commit
78db77f64f
42
src/slab.c
42
src/slab.c
|
@ -217,9 +217,9 @@ struct memusage
|
|||
};
|
||||
static struct memusage *memusage = RT_NULL;
|
||||
#define btokup(addr) \
|
||||
(&memusage[((rt_uint32_t)(addr) - heap_start) >> RT_MM_PAGE_BITS])
|
||||
(&memusage[((rt_ubase_t)(addr) - heap_start) >> RT_MM_PAGE_BITS])
|
||||
|
||||
static rt_uint32_t heap_start, heap_end;
|
||||
static rt_ubase_t heap_start, heap_end;
|
||||
|
||||
/* page allocator */
|
||||
struct rt_page_head
|
||||
|
@ -275,7 +275,7 @@ void rt_page_free(void *addr, rt_size_t npages)
|
|||
struct rt_page_head **prev;
|
||||
|
||||
RT_ASSERT(addr != RT_NULL);
|
||||
RT_ASSERT((rt_uint32_t)addr % RT_MM_PAGE_SIZE == 0);
|
||||
RT_ASSERT((rt_ubase_t)addr % RT_MM_PAGE_SIZE == 0);
|
||||
RT_ASSERT(npages != 0);
|
||||
|
||||
n = (struct rt_page_head *)addr;
|
||||
|
@ -348,13 +348,13 @@ void rt_system_heap_init(void *begin_addr, void *end_addr)
|
|||
RT_DEBUG_NOT_IN_INTERRUPT;
|
||||
|
||||
/* align begin and end addr to page */
|
||||
heap_start = RT_ALIGN((rt_uint32_t)begin_addr, RT_MM_PAGE_SIZE);
|
||||
heap_end = RT_ALIGN_DOWN((rt_uint32_t)end_addr, RT_MM_PAGE_SIZE);
|
||||
heap_start = RT_ALIGN((rt_ubase_t)begin_addr, RT_MM_PAGE_SIZE);
|
||||
heap_end = RT_ALIGN_DOWN((rt_ubase_t)end_addr, RT_MM_PAGE_SIZE);
|
||||
|
||||
if (heap_start >= heap_end)
|
||||
{
|
||||
rt_kprintf("rt_system_heap_init, wrong address[0x%x - 0x%x]\n",
|
||||
(rt_uint32_t)begin_addr, (rt_uint32_t)end_addr);
|
||||
(rt_ubase_t)begin_addr, (rt_ubase_t)end_addr);
|
||||
|
||||
return;
|
||||
}
|
||||
|
@ -391,7 +391,7 @@ void rt_system_heap_init(void *begin_addr, void *end_addr)
|
|||
memusage = rt_page_alloc(limsize / RT_MM_PAGE_SIZE);
|
||||
|
||||
RT_DEBUG_LOG(RT_DEBUG_SLAB, ("memusage 0x%x, size 0x%x\n",
|
||||
(rt_uint32_t)memusage, limsize));
|
||||
(rt_ubase_t)memusage, limsize));
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -401,7 +401,7 @@ void rt_system_heap_init(void *begin_addr, void *end_addr)
|
|||
rt_inline int zoneindex(rt_size_t *bytes)
|
||||
{
|
||||
/* unsigned for shift opt */
|
||||
rt_uint32_t n = (rt_uint32_t)(*bytes);
|
||||
rt_ubase_t n = (rt_ubase_t)(*bytes);
|
||||
|
||||
if (n < 128)
|
||||
{
|
||||
|
@ -507,7 +507,7 @@ void *rt_malloc(rt_size_t size)
|
|||
("malloc a large memory 0x%x, page cnt %d, kup %d\n",
|
||||
size,
|
||||
size >> RT_MM_PAGE_BITS,
|
||||
((rt_uint32_t)chunk - heap_start) >> RT_MM_PAGE_BITS));
|
||||
((rt_ubase_t)chunk - heap_start) >> RT_MM_PAGE_BITS));
|
||||
|
||||
/* lock heap */
|
||||
rt_sem_take(&heap_sem, RT_WAITING_FOREVER);
|
||||
|
@ -610,7 +610,7 @@ void *rt_malloc(rt_size_t size)
|
|||
rt_sem_take(&heap_sem, RT_WAITING_FOREVER);
|
||||
|
||||
RT_DEBUG_LOG(RT_DEBUG_SLAB, ("alloc a new zone: 0x%x\n",
|
||||
(rt_uint32_t)z));
|
||||
(rt_ubase_t)z));
|
||||
|
||||
/* set message usage */
|
||||
for (off = 0, kup = btokup(z); off < zone_page_cnt; off ++)
|
||||
|
@ -694,7 +694,7 @@ void *rt_realloc(void *ptr, rt_size_t size)
|
|||
* Get the original allocation's zone. If the new request winds up
|
||||
* using the same chunk size we do not have to do anything.
|
||||
*/
|
||||
kup = btokup((rt_uint32_t)ptr & ~RT_MM_PAGE_MASK);
|
||||
kup = btokup((rt_ubase_t)ptr & ~RT_MM_PAGE_MASK);
|
||||
if (kup->type == PAGE_TYPE_LARGE)
|
||||
{
|
||||
rt_size_t osize;
|
||||
|
@ -709,7 +709,7 @@ void *rt_realloc(void *ptr, rt_size_t size)
|
|||
}
|
||||
else if (kup->type == PAGE_TYPE_SMALL)
|
||||
{
|
||||
z = (slab_zone *)(((rt_uint32_t)ptr & ~RT_MM_PAGE_MASK) -
|
||||
z = (slab_zone *)(((rt_ubase_t)ptr & ~RT_MM_PAGE_MASK) -
|
||||
kup->size * RT_MM_PAGE_SIZE);
|
||||
RT_ASSERT(z->z_magic == ZALLOC_SLAB_MAGIC);
|
||||
|
||||
|
@ -783,20 +783,20 @@ void rt_free(void *ptr)
|
|||
/* get memory usage */
|
||||
#if RT_DEBUG_SLAB
|
||||
{
|
||||
rt_uint32_t addr = ((rt_uint32_t)ptr & ~RT_MM_PAGE_MASK);
|
||||
rt_ubase_t addr = ((rt_ubase_t)ptr & ~RT_MM_PAGE_MASK);
|
||||
RT_DEBUG_LOG(RT_DEBUG_SLAB,
|
||||
("free a memory 0x%x and align to 0x%x, kup index %d\n",
|
||||
(rt_uint32_t)ptr,
|
||||
(rt_uint32_t)addr,
|
||||
((rt_uint32_t)(addr) - heap_start) >> RT_MM_PAGE_BITS));
|
||||
(rt_ubase_t)ptr,
|
||||
(rt_ubase_t)addr,
|
||||
((rt_ubase_t)(addr) - heap_start) >> RT_MM_PAGE_BITS));
|
||||
}
|
||||
#endif
|
||||
|
||||
kup = btokup((rt_uint32_t)ptr & ~RT_MM_PAGE_MASK);
|
||||
kup = btokup((rt_ubase_t)ptr & ~RT_MM_PAGE_MASK);
|
||||
/* release large allocation */
|
||||
if (kup->type == PAGE_TYPE_LARGE)
|
||||
{
|
||||
rt_uint32_t size;
|
||||
rt_ubase_t size;
|
||||
|
||||
/* lock heap */
|
||||
rt_sem_take(&heap_sem, RT_WAITING_FOREVER);
|
||||
|
@ -811,7 +811,7 @@ void rt_free(void *ptr)
|
|||
|
||||
RT_DEBUG_LOG(RT_DEBUG_SLAB,
|
||||
("free large memory block 0x%x, page count %d\n",
|
||||
(rt_uint32_t)ptr, size));
|
||||
(rt_ubase_t)ptr, size));
|
||||
|
||||
/* free this page */
|
||||
rt_page_free(ptr, size);
|
||||
|
@ -823,7 +823,7 @@ void rt_free(void *ptr)
|
|||
rt_sem_take(&heap_sem, RT_WAITING_FOREVER);
|
||||
|
||||
/* zone case. get out zone. */
|
||||
z = (slab_zone *)(((rt_uint32_t)ptr & ~RT_MM_PAGE_MASK) -
|
||||
z = (slab_zone *)(((rt_ubase_t)ptr & ~RT_MM_PAGE_MASK) -
|
||||
kup->size * RT_MM_PAGE_SIZE);
|
||||
RT_ASSERT(z->z_magic == ZALLOC_SLAB_MAGIC);
|
||||
|
||||
|
@ -857,7 +857,7 @@ void rt_free(void *ptr)
|
|||
slab_zone **pz;
|
||||
|
||||
RT_DEBUG_LOG(RT_DEBUG_SLAB, ("free zone 0x%x\n",
|
||||
(rt_uint32_t)z, z->z_zoneindex));
|
||||
(rt_ubase_t)z, z->z_zoneindex));
|
||||
|
||||
/* remove zone from zone array list */
|
||||
for (pz = &zone_array[z->z_zoneindex]; z != *pz; pz = &(*pz)->z_next)
|
||||
|
|
Loading…
Reference in New Issue