Merge pull request #2481 from lymzzyh/slab

fix slab at 64bits CPU
This commit is contained in:
Bernard Xiong 2019-03-20 21:28:20 +08:00 committed by GitHub
commit 78db77f64f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
1 changed files with 21 additions and 21 deletions

View File

@ -217,9 +217,9 @@ struct memusage
}; };
static struct memusage *memusage = RT_NULL; static struct memusage *memusage = RT_NULL;
#define btokup(addr) \ #define btokup(addr) \
(&memusage[((rt_uint32_t)(addr) - heap_start) >> RT_MM_PAGE_BITS]) (&memusage[((rt_ubase_t)(addr) - heap_start) >> RT_MM_PAGE_BITS])
static rt_uint32_t heap_start, heap_end; static rt_ubase_t heap_start, heap_end;
/* page allocator */ /* page allocator */
struct rt_page_head struct rt_page_head
@ -275,7 +275,7 @@ void rt_page_free(void *addr, rt_size_t npages)
struct rt_page_head **prev; struct rt_page_head **prev;
RT_ASSERT(addr != RT_NULL); RT_ASSERT(addr != RT_NULL);
RT_ASSERT((rt_uint32_t)addr % RT_MM_PAGE_SIZE == 0); RT_ASSERT((rt_ubase_t)addr % RT_MM_PAGE_SIZE == 0);
RT_ASSERT(npages != 0); RT_ASSERT(npages != 0);
n = (struct rt_page_head *)addr; n = (struct rt_page_head *)addr;
@ -348,13 +348,13 @@ void rt_system_heap_init(void *begin_addr, void *end_addr)
RT_DEBUG_NOT_IN_INTERRUPT; RT_DEBUG_NOT_IN_INTERRUPT;
/* align begin and end addr to page */ /* align begin and end addr to page */
heap_start = RT_ALIGN((rt_uint32_t)begin_addr, RT_MM_PAGE_SIZE); heap_start = RT_ALIGN((rt_ubase_t)begin_addr, RT_MM_PAGE_SIZE);
heap_end = RT_ALIGN_DOWN((rt_uint32_t)end_addr, RT_MM_PAGE_SIZE); heap_end = RT_ALIGN_DOWN((rt_ubase_t)end_addr, RT_MM_PAGE_SIZE);
if (heap_start >= heap_end) if (heap_start >= heap_end)
{ {
rt_kprintf("rt_system_heap_init, wrong address[0x%x - 0x%x]\n", rt_kprintf("rt_system_heap_init, wrong address[0x%x - 0x%x]\n",
(rt_uint32_t)begin_addr, (rt_uint32_t)end_addr); (rt_ubase_t)begin_addr, (rt_ubase_t)end_addr);
return; return;
} }
@ -391,7 +391,7 @@ void rt_system_heap_init(void *begin_addr, void *end_addr)
memusage = rt_page_alloc(limsize / RT_MM_PAGE_SIZE); memusage = rt_page_alloc(limsize / RT_MM_PAGE_SIZE);
RT_DEBUG_LOG(RT_DEBUG_SLAB, ("memusage 0x%x, size 0x%x\n", RT_DEBUG_LOG(RT_DEBUG_SLAB, ("memusage 0x%x, size 0x%x\n",
(rt_uint32_t)memusage, limsize)); (rt_ubase_t)memusage, limsize));
} }
/* /*
@ -401,7 +401,7 @@ void rt_system_heap_init(void *begin_addr, void *end_addr)
rt_inline int zoneindex(rt_size_t *bytes) rt_inline int zoneindex(rt_size_t *bytes)
{ {
/* unsigned for shift opt */ /* unsigned for shift opt */
rt_uint32_t n = (rt_uint32_t)(*bytes); rt_ubase_t n = (rt_ubase_t)(*bytes);
if (n < 128) if (n < 128)
{ {
@ -507,7 +507,7 @@ void *rt_malloc(rt_size_t size)
("malloc a large memory 0x%x, page cnt %d, kup %d\n", ("malloc a large memory 0x%x, page cnt %d, kup %d\n",
size, size,
size >> RT_MM_PAGE_BITS, size >> RT_MM_PAGE_BITS,
((rt_uint32_t)chunk - heap_start) >> RT_MM_PAGE_BITS)); ((rt_ubase_t)chunk - heap_start) >> RT_MM_PAGE_BITS));
/* lock heap */ /* lock heap */
rt_sem_take(&heap_sem, RT_WAITING_FOREVER); rt_sem_take(&heap_sem, RT_WAITING_FOREVER);
@ -610,7 +610,7 @@ void *rt_malloc(rt_size_t size)
rt_sem_take(&heap_sem, RT_WAITING_FOREVER); rt_sem_take(&heap_sem, RT_WAITING_FOREVER);
RT_DEBUG_LOG(RT_DEBUG_SLAB, ("alloc a new zone: 0x%x\n", RT_DEBUG_LOG(RT_DEBUG_SLAB, ("alloc a new zone: 0x%x\n",
(rt_uint32_t)z)); (rt_ubase_t)z));
/* set message usage */ /* set message usage */
for (off = 0, kup = btokup(z); off < zone_page_cnt; off ++) for (off = 0, kup = btokup(z); off < zone_page_cnt; off ++)
@ -694,7 +694,7 @@ void *rt_realloc(void *ptr, rt_size_t size)
* Get the original allocation's zone. If the new request winds up * Get the original allocation's zone. If the new request winds up
* using the same chunk size we do not have to do anything. * using the same chunk size we do not have to do anything.
*/ */
kup = btokup((rt_uint32_t)ptr & ~RT_MM_PAGE_MASK); kup = btokup((rt_ubase_t)ptr & ~RT_MM_PAGE_MASK);
if (kup->type == PAGE_TYPE_LARGE) if (kup->type == PAGE_TYPE_LARGE)
{ {
rt_size_t osize; rt_size_t osize;
@ -709,7 +709,7 @@ void *rt_realloc(void *ptr, rt_size_t size)
} }
else if (kup->type == PAGE_TYPE_SMALL) else if (kup->type == PAGE_TYPE_SMALL)
{ {
z = (slab_zone *)(((rt_uint32_t)ptr & ~RT_MM_PAGE_MASK) - z = (slab_zone *)(((rt_ubase_t)ptr & ~RT_MM_PAGE_MASK) -
kup->size * RT_MM_PAGE_SIZE); kup->size * RT_MM_PAGE_SIZE);
RT_ASSERT(z->z_magic == ZALLOC_SLAB_MAGIC); RT_ASSERT(z->z_magic == ZALLOC_SLAB_MAGIC);
@ -783,20 +783,20 @@ void rt_free(void *ptr)
/* get memory usage */ /* get memory usage */
#if RT_DEBUG_SLAB #if RT_DEBUG_SLAB
{ {
rt_uint32_t addr = ((rt_uint32_t)ptr & ~RT_MM_PAGE_MASK); rt_ubase_t addr = ((rt_ubase_t)ptr & ~RT_MM_PAGE_MASK);
RT_DEBUG_LOG(RT_DEBUG_SLAB, RT_DEBUG_LOG(RT_DEBUG_SLAB,
("free a memory 0x%x and align to 0x%x, kup index %d\n", ("free a memory 0x%x and align to 0x%x, kup index %d\n",
(rt_uint32_t)ptr, (rt_ubase_t)ptr,
(rt_uint32_t)addr, (rt_ubase_t)addr,
((rt_uint32_t)(addr) - heap_start) >> RT_MM_PAGE_BITS)); ((rt_ubase_t)(addr) - heap_start) >> RT_MM_PAGE_BITS));
} }
#endif #endif
kup = btokup((rt_uint32_t)ptr & ~RT_MM_PAGE_MASK); kup = btokup((rt_ubase_t)ptr & ~RT_MM_PAGE_MASK);
/* release large allocation */ /* release large allocation */
if (kup->type == PAGE_TYPE_LARGE) if (kup->type == PAGE_TYPE_LARGE)
{ {
rt_uint32_t size; rt_ubase_t size;
/* lock heap */ /* lock heap */
rt_sem_take(&heap_sem, RT_WAITING_FOREVER); rt_sem_take(&heap_sem, RT_WAITING_FOREVER);
@ -811,7 +811,7 @@ void rt_free(void *ptr)
RT_DEBUG_LOG(RT_DEBUG_SLAB, RT_DEBUG_LOG(RT_DEBUG_SLAB,
("free large memory block 0x%x, page count %d\n", ("free large memory block 0x%x, page count %d\n",
(rt_uint32_t)ptr, size)); (rt_ubase_t)ptr, size));
/* free this page */ /* free this page */
rt_page_free(ptr, size); rt_page_free(ptr, size);
@ -823,7 +823,7 @@ void rt_free(void *ptr)
rt_sem_take(&heap_sem, RT_WAITING_FOREVER); rt_sem_take(&heap_sem, RT_WAITING_FOREVER);
/* zone case. get out zone. */ /* zone case. get out zone. */
z = (slab_zone *)(((rt_uint32_t)ptr & ~RT_MM_PAGE_MASK) - z = (slab_zone *)(((rt_ubase_t)ptr & ~RT_MM_PAGE_MASK) -
kup->size * RT_MM_PAGE_SIZE); kup->size * RT_MM_PAGE_SIZE);
RT_ASSERT(z->z_magic == ZALLOC_SLAB_MAGIC); RT_ASSERT(z->z_magic == ZALLOC_SLAB_MAGIC);
@ -857,7 +857,7 @@ void rt_free(void *ptr)
slab_zone **pz; slab_zone **pz;
RT_DEBUG_LOG(RT_DEBUG_SLAB, ("free zone 0x%x\n", RT_DEBUG_LOG(RT_DEBUG_SLAB, ("free zone 0x%x\n",
(rt_uint32_t)z, z->z_zoneindex)); (rt_ubase_t)z, z->z_zoneindex));
/* remove zone from zone array list */ /* remove zone from zone array list */
for (pz = &zone_array[z->z_zoneindex]; z != *pz; pz = &(*pz)->z_next) for (pz = &zone_array[z->z_zoneindex]; z != *pz; pz = &(*pz)->z_next)