2012-05-05 09:45:17 +08:00
|
|
|
/*
|
2018-09-14 22:37:43 +08:00
|
|
|
* Copyright (c) 2006-2018, RT-Thread Development Team
|
2013-06-24 17:06:09 +08:00
|
|
|
*
|
2018-09-14 22:37:43 +08:00
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* File : memheap.c
|
2012-05-05 09:45:17 +08:00
|
|
|
*
|
|
|
|
* Change Logs:
|
|
|
|
* Date Author Notes
|
|
|
|
* 2012-04-10 Bernard first implementation
|
2012-10-22 11:12:21 +08:00
|
|
|
* 2012-10-16 Bernard add the mutex lock for heap object.
|
2012-12-29 20:29:03 +08:00
|
|
|
* 2012-12-29 Bernard memheap can be used as system heap.
|
|
|
|
* change mutex lock to semaphore lock.
|
2013-04-17 22:09:48 +08:00
|
|
|
* 2013-04-10 Bernard add rt_memheap_realloc function.
|
2013-06-03 06:44:35 +08:00
|
|
|
* 2013-05-24 Bernard fix the rt_memheap_realloc issue.
|
2013-07-11 16:20:28 +08:00
|
|
|
* 2013-07-11 Grissiom fix the memory block splitting issue.
|
2013-07-15 11:29:58 +08:00
|
|
|
* 2013-07-15 Grissiom optimize rt_memheap_realloc
|
2012-05-05 09:45:17 +08:00
|
|
|
*/
|
2012-06-03 14:16:30 +08:00
|
|
|
|
2012-12-29 20:29:03 +08:00
|
|
|
#include <rthw.h>
|
2012-05-05 09:45:17 +08:00
|
|
|
#include <rtthread.h>
|
|
|
|
|
|
|
|
#ifdef RT_USING_MEMHEAP
|
|
|
|
|
|
|
|
/* dynamic pool magic and mask */
|
2012-12-25 16:35:19 +08:00
|
|
|
#define RT_MEMHEAP_MAGIC 0x1ea01ea0
|
|
|
|
#define RT_MEMHEAP_MASK 0xfffffffe
|
|
|
|
#define RT_MEMHEAP_USED 0x01
|
|
|
|
#define RT_MEMHEAP_FREED 0x00
|
2012-05-05 09:45:17 +08:00
|
|
|
|
2012-12-25 16:35:19 +08:00
|
|
|
#define RT_MEMHEAP_IS_USED(i) ((i)->magic & RT_MEMHEAP_USED)
|
|
|
|
#define RT_MEMHEAP_MINIALLOC 12
|
2012-05-05 09:45:17 +08:00
|
|
|
|
2012-12-31 00:11:12 +08:00
|
|
|
#define RT_MEMHEAP_SIZE RT_ALIGN(sizeof(struct rt_memheap_item), RT_ALIGN_SIZE)
|
2018-12-13 14:54:26 +08:00
|
|
|
#define MEMITEM_SIZE(item) ((rt_ubase_t)item->next - (rt_ubase_t)item - RT_MEMHEAP_SIZE)
|
2021-02-20 15:27:37 +08:00
|
|
|
#define MEMITEM(ptr) (struct rt_memheap_item*)((rt_uint8_t*)ptr - RT_MEMHEAP_SIZE)
|
|
|
|
|
|
|
|
#ifdef RT_USING_MEMTRACE
|
|
|
|
rt_inline void rt_memheap_setname(struct rt_memheap_item *item, const char *name)
|
|
|
|
{
|
|
|
|
int index;
|
|
|
|
rt_uint8_t* ptr;
|
|
|
|
|
|
|
|
ptr = (rt_uint8_t*)&(item->next_free);
|
|
|
|
for (index = 0; index < sizeof(void*); index ++)
|
|
|
|
{
|
|
|
|
if (name[index] == '\0') break;
|
|
|
|
ptr[index] = name[index];
|
|
|
|
}
|
|
|
|
if (name[index] == '\0') ptr[index] = '\0';
|
|
|
|
else
|
|
|
|
{
|
|
|
|
ptr = (rt_uint8_t*)&(item->prev_free);
|
|
|
|
for (index = 0; index < sizeof(void*) && (index + sizeof(void*))< RT_NAME_MAX; index ++)
|
|
|
|
{
|
|
|
|
if (name[sizeof(void*) + index] == '\0') break;
|
|
|
|
ptr[index] = name[sizeof(void*) + index];
|
|
|
|
}
|
|
|
|
|
|
|
|
if (name[sizeof(void*) + index] == '\0') ptr[index] = '\0';
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void rt_mem_set_tag(void* ptr, const char* name)
|
|
|
|
{
|
|
|
|
struct rt_memheap_item* item;
|
|
|
|
|
|
|
|
if (ptr && name)
|
|
|
|
{
|
|
|
|
item = MEMITEM(ptr);
|
|
|
|
rt_memheap_setname(item, name);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
2012-05-05 09:45:17 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The initialized memory pool will be:
|
|
|
|
* +-----------------------------------+--------------------------+
|
|
|
|
* | whole freed memory block | Used Memory Block Tailer |
|
|
|
|
* +-----------------------------------+--------------------------+
|
|
|
|
*
|
|
|
|
* block_list --> whole freed memory block
|
|
|
|
*
|
2012-12-25 16:35:19 +08:00
|
|
|
* The length of Used Memory Block Tailer is 0,
|
|
|
|
* which is prevents block merging across list
|
2012-05-05 09:45:17 +08:00
|
|
|
*/
|
2012-12-25 16:35:19 +08:00
|
|
|
rt_err_t rt_memheap_init(struct rt_memheap *memheap,
|
|
|
|
const char *name,
|
|
|
|
void *start_addr,
|
2018-10-26 06:35:42 +08:00
|
|
|
rt_size_t size)
|
2012-05-05 09:45:17 +08:00
|
|
|
{
|
2012-12-25 16:35:19 +08:00
|
|
|
struct rt_memheap_item *item;
|
2012-05-05 09:45:17 +08:00
|
|
|
|
2012-12-25 16:35:19 +08:00
|
|
|
RT_ASSERT(memheap != RT_NULL);
|
2012-05-05 09:45:17 +08:00
|
|
|
|
2012-12-25 16:35:19 +08:00
|
|
|
/* initialize pool object */
|
|
|
|
rt_object_init(&(memheap->parent), RT_Object_Class_MemHeap, name);
|
2012-05-05 09:45:17 +08:00
|
|
|
|
2012-12-25 16:35:19 +08:00
|
|
|
memheap->start_addr = start_addr;
|
|
|
|
memheap->pool_size = RT_ALIGN_DOWN(size, RT_ALIGN_SIZE);
|
2012-05-05 09:45:17 +08:00
|
|
|
memheap->available_size = memheap->pool_size - (2 * RT_MEMHEAP_SIZE);
|
2012-12-31 00:11:12 +08:00
|
|
|
memheap->max_used_size = memheap->pool_size - memheap->available_size;
|
2012-05-05 09:45:17 +08:00
|
|
|
|
2012-12-25 16:35:19 +08:00
|
|
|
/* initialize the free list header */
|
|
|
|
item = &(memheap->free_header);
|
2021-02-20 15:27:37 +08:00
|
|
|
item->magic = (RT_MEMHEAP_MAGIC | RT_MEMHEAP_FREED);
|
2012-12-25 16:35:19 +08:00
|
|
|
item->pool_ptr = memheap;
|
|
|
|
item->next = RT_NULL;
|
|
|
|
item->prev = RT_NULL;
|
|
|
|
item->next_free = item;
|
|
|
|
item->prev_free = item;
|
|
|
|
|
|
|
|
/* set the free list to free list header */
|
|
|
|
memheap->free_list = item;
|
|
|
|
|
|
|
|
/* initialize the first big memory block */
|
|
|
|
item = (struct rt_memheap_item *)start_addr;
|
2021-02-20 15:27:37 +08:00
|
|
|
item->magic = (RT_MEMHEAP_MAGIC | RT_MEMHEAP_FREED);
|
2012-12-25 16:35:19 +08:00
|
|
|
item->pool_ptr = memheap;
|
|
|
|
item->next = RT_NULL;
|
|
|
|
item->prev = RT_NULL;
|
|
|
|
item->next_free = item;
|
|
|
|
item->prev_free = item;
|
|
|
|
|
|
|
|
item->next = (struct rt_memheap_item *)
|
2017-09-15 11:02:24 +08:00
|
|
|
((rt_uint8_t *)item + memheap->available_size + RT_MEMHEAP_SIZE);
|
2012-12-25 16:35:19 +08:00
|
|
|
item->prev = item->next;
|
|
|
|
|
|
|
|
/* block list header */
|
|
|
|
memheap->block_list = item;
|
|
|
|
|
|
|
|
/* place the big memory block to free list */
|
|
|
|
item->next_free = memheap->free_list->next_free;
|
|
|
|
item->prev_free = memheap->free_list;
|
|
|
|
memheap->free_list->next_free->prev_free = item;
|
|
|
|
memheap->free_list->next_free = item;
|
|
|
|
|
|
|
|
/* move to the end of memory pool to build a small tailer block,
|
|
|
|
* which prevents block merging
|
|
|
|
*/
|
|
|
|
item = item->next;
|
|
|
|
/* it's a used memory block */
|
2021-02-20 15:27:37 +08:00
|
|
|
item->magic = (RT_MEMHEAP_MAGIC | RT_MEMHEAP_USED);
|
2012-12-25 16:35:19 +08:00
|
|
|
item->pool_ptr = memheap;
|
|
|
|
item->next = (struct rt_memheap_item *)start_addr;
|
|
|
|
item->prev = (struct rt_memheap_item *)start_addr;
|
|
|
|
/* not in free list */
|
|
|
|
item->next_free = item->prev_free = RT_NULL;
|
|
|
|
|
2012-12-29 20:29:03 +08:00
|
|
|
/* initialize semaphore lock */
|
|
|
|
rt_sem_init(&(memheap->lock), name, 1, RT_IPC_FLAG_FIFO);
|
2012-12-25 16:35:19 +08:00
|
|
|
|
|
|
|
RT_DEBUG_LOG(RT_DEBUG_MEMHEAP,
|
2013-07-11 16:20:28 +08:00
|
|
|
("memory heap: start addr 0x%08x, size %d, free list header 0x%08x\n",
|
2012-12-20 15:25:19 +08:00
|
|
|
start_addr, size, &(memheap->free_header)));
|
2012-05-05 09:45:17 +08:00
|
|
|
|
2012-12-25 16:35:19 +08:00
|
|
|
return RT_EOK;
|
2012-05-05 09:45:17 +08:00
|
|
|
}
|
2012-08-27 09:21:57 +08:00
|
|
|
RTM_EXPORT(rt_memheap_init);
|
2012-05-05 09:45:17 +08:00
|
|
|
|
2012-06-03 14:16:30 +08:00
|
|
|
rt_err_t rt_memheap_detach(struct rt_memheap *heap)
|
2012-05-05 09:45:17 +08:00
|
|
|
{
|
2012-12-25 16:35:19 +08:00
|
|
|
RT_ASSERT(heap);
|
2018-07-10 10:10:45 +08:00
|
|
|
RT_ASSERT(rt_object_get_type(&heap->parent) == RT_Object_Class_MemHeap);
|
|
|
|
RT_ASSERT(rt_object_is_systemobject(&heap->parent));
|
2020-11-16 15:10:22 +08:00
|
|
|
|
|
|
|
rt_sem_detach(&heap->lock);
|
2012-12-25 16:35:19 +08:00
|
|
|
rt_object_detach(&(heap->parent));
|
2012-05-05 09:45:17 +08:00
|
|
|
|
2012-12-25 16:35:19 +08:00
|
|
|
/* Return a successful completion. */
|
|
|
|
return RT_EOK;
|
2012-05-05 09:45:17 +08:00
|
|
|
}
|
2012-08-27 09:21:57 +08:00
|
|
|
RTM_EXPORT(rt_memheap_detach);
|
2012-05-05 09:45:17 +08:00
|
|
|
|
2018-10-26 06:35:42 +08:00
|
|
|
void *rt_memheap_alloc(struct rt_memheap *heap, rt_size_t size)
|
2012-05-05 09:45:17 +08:00
|
|
|
{
|
2012-12-25 16:35:19 +08:00
|
|
|
rt_err_t result;
|
|
|
|
rt_uint32_t free_size;
|
|
|
|
struct rt_memheap_item *header_ptr;
|
|
|
|
|
|
|
|
RT_ASSERT(heap != RT_NULL);
|
2018-07-10 10:10:45 +08:00
|
|
|
RT_ASSERT(rt_object_get_type(&heap->parent) == RT_Object_Class_MemHeap);
|
2012-12-25 16:35:19 +08:00
|
|
|
|
|
|
|
/* align allocated size */
|
|
|
|
size = RT_ALIGN(size, RT_ALIGN_SIZE);
|
|
|
|
if (size < RT_MEMHEAP_MINIALLOC)
|
|
|
|
size = RT_MEMHEAP_MINIALLOC;
|
|
|
|
|
2012-12-31 00:11:12 +08:00
|
|
|
RT_DEBUG_LOG(RT_DEBUG_MEMHEAP, ("allocate %d on heap:%8.*s",
|
|
|
|
size, RT_NAME_MAX, heap->parent.name));
|
2012-12-25 16:35:19 +08:00
|
|
|
|
|
|
|
if (size < heap->available_size)
|
|
|
|
{
|
|
|
|
/* search on free list */
|
|
|
|
free_size = 0;
|
|
|
|
|
|
|
|
/* lock memheap */
|
2012-12-29 20:29:03 +08:00
|
|
|
result = rt_sem_take(&(heap->lock), RT_WAITING_FOREVER);
|
2012-12-25 16:35:19 +08:00
|
|
|
if (result != RT_EOK)
|
|
|
|
{
|
|
|
|
rt_set_errno(result);
|
|
|
|
|
|
|
|
return RT_NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* get the first free memory block */
|
|
|
|
header_ptr = heap->free_list->next_free;
|
|
|
|
while (header_ptr != heap->free_list && free_size < size)
|
|
|
|
{
|
|
|
|
/* get current freed memory block size */
|
2013-06-03 06:44:35 +08:00
|
|
|
free_size = MEMITEM_SIZE(header_ptr);
|
2012-12-25 16:35:19 +08:00
|
|
|
if (free_size < size)
|
|
|
|
{
|
|
|
|
/* move to next free memory block */
|
|
|
|
header_ptr = header_ptr->next_free;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* determine if the memory is available. */
|
|
|
|
if (free_size >= size)
|
|
|
|
{
|
|
|
|
/* a block that satisfies the request has been found. */
|
|
|
|
|
|
|
|
/* determine if the block needs to be split. */
|
|
|
|
if (free_size >= (size + RT_MEMHEAP_SIZE + RT_MEMHEAP_MINIALLOC))
|
|
|
|
{
|
|
|
|
struct rt_memheap_item *new_ptr;
|
|
|
|
|
|
|
|
/* split the block. */
|
|
|
|
new_ptr = (struct rt_memheap_item *)
|
|
|
|
(((rt_uint8_t *)header_ptr) + size + RT_MEMHEAP_SIZE);
|
|
|
|
|
|
|
|
RT_DEBUG_LOG(RT_DEBUG_MEMHEAP,
|
2013-07-11 16:20:28 +08:00
|
|
|
("split: block[0x%08x] nextm[0x%08x] prevm[0x%08x] to new[0x%08x]\n",
|
2012-12-20 15:25:19 +08:00
|
|
|
header_ptr,
|
|
|
|
header_ptr->next,
|
|
|
|
header_ptr->prev,
|
|
|
|
new_ptr));
|
2012-05-05 09:45:17 +08:00
|
|
|
|
2012-12-25 16:35:19 +08:00
|
|
|
/* mark the new block as a memory block and freed. */
|
2021-02-20 15:27:37 +08:00
|
|
|
new_ptr->magic = (RT_MEMHEAP_MAGIC | RT_MEMHEAP_FREED);
|
2012-12-25 16:35:19 +08:00
|
|
|
|
|
|
|
/* put the pool pointer into the new block. */
|
|
|
|
new_ptr->pool_ptr = heap;
|
|
|
|
|
|
|
|
/* break down the block list */
|
|
|
|
new_ptr->prev = header_ptr;
|
|
|
|
new_ptr->next = header_ptr->next;
|
|
|
|
header_ptr->next->prev = new_ptr;
|
|
|
|
header_ptr->next = new_ptr;
|
|
|
|
|
|
|
|
/* remove header ptr from free list */
|
|
|
|
header_ptr->next_free->prev_free = header_ptr->prev_free;
|
|
|
|
header_ptr->prev_free->next_free = header_ptr->next_free;
|
|
|
|
header_ptr->next_free = RT_NULL;
|
|
|
|
header_ptr->prev_free = RT_NULL;
|
|
|
|
|
|
|
|
/* insert new_ptr to free list */
|
|
|
|
new_ptr->next_free = heap->free_list->next_free;
|
|
|
|
new_ptr->prev_free = heap->free_list;
|
|
|
|
heap->free_list->next_free->prev_free = new_ptr;
|
|
|
|
heap->free_list->next_free = new_ptr;
|
2013-07-11 16:20:28 +08:00
|
|
|
RT_DEBUG_LOG(RT_DEBUG_MEMHEAP, ("new ptr: next_free 0x%08x, prev_free 0x%08x\n",
|
2012-12-20 15:25:19 +08:00
|
|
|
new_ptr->next_free,
|
|
|
|
new_ptr->prev_free));
|
2012-05-05 09:45:17 +08:00
|
|
|
|
2012-12-25 16:35:19 +08:00
|
|
|
/* decrement the available byte count. */
|
|
|
|
heap->available_size = heap->available_size -
|
|
|
|
size -
|
|
|
|
RT_MEMHEAP_SIZE;
|
2012-12-31 00:11:12 +08:00
|
|
|
if (heap->pool_size - heap->available_size > heap->max_used_size)
|
|
|
|
heap->max_used_size = heap->pool_size - heap->available_size;
|
2012-12-25 16:35:19 +08:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* decrement the entire free size from the available bytes count. */
|
|
|
|
heap->available_size = heap->available_size - free_size;
|
2012-12-31 00:11:12 +08:00
|
|
|
if (heap->pool_size - heap->available_size > heap->max_used_size)
|
|
|
|
heap->max_used_size = heap->pool_size - heap->available_size;
|
2012-12-25 16:35:19 +08:00
|
|
|
|
|
|
|
/* remove header_ptr from free list */
|
|
|
|
RT_DEBUG_LOG(RT_DEBUG_MEMHEAP,
|
2013-07-11 16:20:28 +08:00
|
|
|
("one block: block[0x%08x], next_free 0x%08x, prev_free 0x%08x\n",
|
2012-12-20 15:25:19 +08:00
|
|
|
header_ptr,
|
|
|
|
header_ptr->next_free,
|
|
|
|
header_ptr->prev_free));
|
2012-05-05 09:45:17 +08:00
|
|
|
|
2012-12-25 16:35:19 +08:00
|
|
|
header_ptr->next_free->prev_free = header_ptr->prev_free;
|
|
|
|
header_ptr->prev_free->next_free = header_ptr->next_free;
|
|
|
|
header_ptr->next_free = RT_NULL;
|
|
|
|
header_ptr->prev_free = RT_NULL;
|
|
|
|
}
|
2012-05-05 09:45:17 +08:00
|
|
|
|
2012-12-25 16:35:19 +08:00
|
|
|
/* Mark the allocated block as not available. */
|
2021-02-20 15:27:37 +08:00
|
|
|
header_ptr->magic = (RT_MEMHEAP_MAGIC | RT_MEMHEAP_USED);
|
2012-05-05 09:45:17 +08:00
|
|
|
|
2012-12-29 20:29:03 +08:00
|
|
|
/* release lock */
|
|
|
|
rt_sem_release(&(heap->lock));
|
|
|
|
|
2012-12-25 16:35:19 +08:00
|
|
|
/* Return a memory address to the caller. */
|
|
|
|
RT_DEBUG_LOG(RT_DEBUG_MEMHEAP,
|
2013-07-11 16:20:28 +08:00
|
|
|
("alloc mem: memory[0x%08x], heap[0x%08x], size: %d\n",
|
2012-12-20 15:25:19 +08:00
|
|
|
(void *)((rt_uint8_t *)header_ptr + RT_MEMHEAP_SIZE),
|
|
|
|
header_ptr,
|
2013-07-11 16:20:28 +08:00
|
|
|
size));
|
2012-06-03 14:16:30 +08:00
|
|
|
|
2013-07-11 16:20:28 +08:00
|
|
|
return (void *)((rt_uint8_t *)header_ptr + RT_MEMHEAP_SIZE);
|
2012-12-25 16:35:19 +08:00
|
|
|
}
|
2012-10-22 11:12:21 +08:00
|
|
|
|
2012-12-25 16:35:19 +08:00
|
|
|
/* release lock */
|
2012-12-29 20:29:03 +08:00
|
|
|
rt_sem_release(&(heap->lock));
|
2012-12-25 16:35:19 +08:00
|
|
|
}
|
2012-05-05 09:45:17 +08:00
|
|
|
|
2012-12-25 16:35:19 +08:00
|
|
|
RT_DEBUG_LOG(RT_DEBUG_MEMHEAP, ("allocate memory: failed\n"));
|
2012-05-05 09:45:17 +08:00
|
|
|
|
|
|
|
/* Return the completion status. */
|
|
|
|
return RT_NULL;
|
|
|
|
}
|
2012-08-27 09:21:57 +08:00
|
|
|
RTM_EXPORT(rt_memheap_alloc);
|
2012-05-05 09:45:17 +08:00
|
|
|
|
2013-06-29 10:00:14 +08:00
|
|
|
void *rt_memheap_realloc(struct rt_memheap *heap, void *ptr, rt_size_t newsize)
|
2013-04-17 22:09:48 +08:00
|
|
|
{
|
2013-06-03 06:44:35 +08:00
|
|
|
rt_err_t result;
|
2013-06-29 10:00:14 +08:00
|
|
|
rt_size_t oldsize;
|
|
|
|
struct rt_memheap_item *header_ptr;
|
|
|
|
struct rt_memheap_item *new_ptr;
|
|
|
|
|
2018-07-10 10:10:45 +08:00
|
|
|
RT_ASSERT(heap);
|
|
|
|
RT_ASSERT(rt_object_get_type(&heap->parent) == RT_Object_Class_MemHeap);
|
|
|
|
|
2013-06-29 10:00:14 +08:00
|
|
|
if (newsize == 0)
|
|
|
|
{
|
|
|
|
rt_memheap_free(ptr);
|
|
|
|
|
|
|
|
return RT_NULL;
|
|
|
|
}
|
|
|
|
/* align allocated size */
|
|
|
|
newsize = RT_ALIGN(newsize, RT_ALIGN_SIZE);
|
|
|
|
if (newsize < RT_MEMHEAP_MINIALLOC)
|
|
|
|
newsize = RT_MEMHEAP_MINIALLOC;
|
|
|
|
|
|
|
|
if (ptr == RT_NULL)
|
|
|
|
{
|
|
|
|
return rt_memheap_alloc(heap, newsize);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* get memory block header and get the size of memory block */
|
|
|
|
header_ptr = (struct rt_memheap_item *)
|
|
|
|
((rt_uint8_t *)ptr - RT_MEMHEAP_SIZE);
|
|
|
|
oldsize = MEMITEM_SIZE(header_ptr);
|
2017-09-15 11:02:24 +08:00
|
|
|
/* re-allocate memory */
|
2013-06-03 06:44:35 +08:00
|
|
|
if (newsize > oldsize)
|
|
|
|
{
|
2017-09-15 11:02:24 +08:00
|
|
|
void *new_ptr;
|
2013-07-15 11:29:58 +08:00
|
|
|
struct rt_memheap_item *next_ptr;
|
|
|
|
|
|
|
|
/* lock memheap */
|
|
|
|
result = rt_sem_take(&(heap->lock), RT_WAITING_FOREVER);
|
|
|
|
if (result != RT_EOK)
|
|
|
|
{
|
|
|
|
rt_set_errno(result);
|
|
|
|
return RT_NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
next_ptr = header_ptr->next;
|
|
|
|
|
|
|
|
/* header_ptr should not be the tail */
|
|
|
|
RT_ASSERT(next_ptr > header_ptr);
|
|
|
|
|
|
|
|
/* check whether the following free space is enough to expand */
|
|
|
|
if (!RT_MEMHEAP_IS_USED(next_ptr))
|
|
|
|
{
|
|
|
|
rt_int32_t nextsize;
|
|
|
|
|
|
|
|
nextsize = MEMITEM_SIZE(next_ptr);
|
|
|
|
RT_ASSERT(next_ptr > 0);
|
|
|
|
|
|
|
|
/* Here is the ASCII art of the situation that we can make use of
|
|
|
|
* the next free node without alloc/memcpy, |*| is the control
|
|
|
|
* block:
|
|
|
|
*
|
|
|
|
* oldsize free node
|
|
|
|
* |*|-----------|*|----------------------|*|
|
|
|
|
* newsize >= minialloc
|
|
|
|
* |*|----------------|*|-----------------|*|
|
|
|
|
*/
|
|
|
|
if (nextsize + oldsize > newsize + RT_MEMHEAP_MINIALLOC)
|
|
|
|
{
|
|
|
|
/* decrement the entire free size from the available bytes count. */
|
|
|
|
heap->available_size = heap->available_size - (newsize - oldsize);
|
|
|
|
if (heap->pool_size - heap->available_size > heap->max_used_size)
|
|
|
|
heap->max_used_size = heap->pool_size - heap->available_size;
|
|
|
|
|
|
|
|
/* remove next_ptr from free list */
|
|
|
|
RT_DEBUG_LOG(RT_DEBUG_MEMHEAP,
|
|
|
|
("remove block: block[0x%08x], next_free 0x%08x, prev_free 0x%08x",
|
|
|
|
next_ptr,
|
|
|
|
next_ptr->next_free,
|
|
|
|
next_ptr->prev_free));
|
|
|
|
|
|
|
|
next_ptr->next_free->prev_free = next_ptr->prev_free;
|
|
|
|
next_ptr->prev_free->next_free = next_ptr->next_free;
|
|
|
|
next_ptr->next->prev = next_ptr->prev;
|
|
|
|
next_ptr->prev->next = next_ptr->next;
|
|
|
|
|
|
|
|
/* build a new one on the right place */
|
2017-09-15 11:02:24 +08:00
|
|
|
next_ptr = (struct rt_memheap_item *)((char *)ptr + newsize);
|
2013-07-15 11:29:58 +08:00
|
|
|
|
|
|
|
RT_DEBUG_LOG(RT_DEBUG_MEMHEAP,
|
|
|
|
("new free block: block[0x%08x] nextm[0x%08x] prevm[0x%08x]",
|
|
|
|
next_ptr,
|
|
|
|
next_ptr->next,
|
|
|
|
next_ptr->prev));
|
|
|
|
|
|
|
|
/* mark the new block as a memory block and freed. */
|
2021-02-20 15:27:37 +08:00
|
|
|
next_ptr->magic = (RT_MEMHEAP_MAGIC | RT_MEMHEAP_FREED);
|
2013-07-15 11:29:58 +08:00
|
|
|
|
|
|
|
/* put the pool pointer into the new block. */
|
|
|
|
next_ptr->pool_ptr = heap;
|
|
|
|
|
|
|
|
next_ptr->prev = header_ptr;
|
|
|
|
next_ptr->next = header_ptr->next;
|
|
|
|
header_ptr->next->prev = next_ptr;
|
|
|
|
header_ptr->next = next_ptr;
|
|
|
|
|
|
|
|
/* insert next_ptr to free list */
|
|
|
|
next_ptr->next_free = heap->free_list->next_free;
|
|
|
|
next_ptr->prev_free = heap->free_list;
|
|
|
|
heap->free_list->next_free->prev_free = next_ptr;
|
|
|
|
heap->free_list->next_free = next_ptr;
|
|
|
|
RT_DEBUG_LOG(RT_DEBUG_MEMHEAP, ("new ptr: next_free 0x%08x, prev_free 0x%08x",
|
|
|
|
next_ptr->next_free,
|
|
|
|
next_ptr->prev_free));
|
|
|
|
|
|
|
|
/* release lock */
|
|
|
|
rt_sem_release(&(heap->lock));
|
|
|
|
|
|
|
|
return ptr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* release lock */
|
|
|
|
rt_sem_release(&(heap->lock));
|
|
|
|
|
2013-06-03 06:44:35 +08:00
|
|
|
/* re-allocate a memory block */
|
2017-09-15 11:02:24 +08:00
|
|
|
new_ptr = (void *)rt_memheap_alloc(heap, newsize);
|
2013-06-03 06:44:35 +08:00
|
|
|
if (new_ptr != RT_NULL)
|
|
|
|
{
|
|
|
|
rt_memcpy(new_ptr, ptr, oldsize < newsize ? oldsize : newsize);
|
|
|
|
rt_memheap_free(ptr);
|
|
|
|
}
|
2013-04-17 22:09:48 +08:00
|
|
|
|
2013-06-03 06:44:35 +08:00
|
|
|
return new_ptr;
|
|
|
|
}
|
|
|
|
|
2013-07-11 16:20:28 +08:00
|
|
|
/* don't split when there is less than one node space left */
|
|
|
|
if (newsize + RT_MEMHEAP_SIZE + RT_MEMHEAP_MINIALLOC >= oldsize)
|
|
|
|
return ptr;
|
|
|
|
|
2013-06-29 10:00:14 +08:00
|
|
|
/* lock memheap */
|
|
|
|
result = rt_sem_take(&(heap->lock), RT_WAITING_FOREVER);
|
|
|
|
if (result != RT_EOK)
|
|
|
|
{
|
|
|
|
rt_set_errno(result);
|
|
|
|
|
|
|
|
return RT_NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* split the block. */
|
|
|
|
new_ptr = (struct rt_memheap_item *)
|
|
|
|
(((rt_uint8_t *)header_ptr) + newsize + RT_MEMHEAP_SIZE);
|
|
|
|
|
|
|
|
RT_DEBUG_LOG(RT_DEBUG_MEMHEAP,
|
2013-07-11 16:20:28 +08:00
|
|
|
("split: block[0x%08x] nextm[0x%08x] prevm[0x%08x] to new[0x%08x]\n",
|
2013-06-29 10:00:14 +08:00
|
|
|
header_ptr,
|
|
|
|
header_ptr->next,
|
|
|
|
header_ptr->prev,
|
|
|
|
new_ptr));
|
|
|
|
|
|
|
|
/* mark the new block as a memory block and freed. */
|
2021-02-20 15:27:37 +08:00
|
|
|
new_ptr->magic = (RT_MEMHEAP_MAGIC | RT_MEMHEAP_FREED);
|
2013-06-29 10:00:14 +08:00
|
|
|
/* put the pool pointer into the new block. */
|
|
|
|
new_ptr->pool_ptr = heap;
|
|
|
|
|
|
|
|
/* break down the block list */
|
|
|
|
new_ptr->prev = header_ptr;
|
|
|
|
new_ptr->next = header_ptr->next;
|
|
|
|
header_ptr->next->prev = new_ptr;
|
|
|
|
header_ptr->next = new_ptr;
|
|
|
|
|
|
|
|
/* determine if the block can be merged with the next neighbor. */
|
|
|
|
if (!RT_MEMHEAP_IS_USED(new_ptr->next))
|
|
|
|
{
|
|
|
|
struct rt_memheap_item *free_ptr;
|
|
|
|
|
|
|
|
/* merge block with next neighbor. */
|
|
|
|
free_ptr = new_ptr->next;
|
|
|
|
heap->available_size = heap->available_size - MEMITEM_SIZE(free_ptr);
|
|
|
|
|
|
|
|
RT_DEBUG_LOG(RT_DEBUG_MEMHEAP,
|
2013-07-11 16:20:28 +08:00
|
|
|
("merge: right node 0x%08x, next_free 0x%08x, prev_free 0x%08x\n",
|
2013-06-29 10:00:14 +08:00
|
|
|
header_ptr, header_ptr->next_free, header_ptr->prev_free));
|
|
|
|
|
|
|
|
free_ptr->next->prev = new_ptr;
|
|
|
|
new_ptr->next = free_ptr->next;
|
|
|
|
|
|
|
|
/* remove free ptr from free list */
|
|
|
|
free_ptr->next_free->prev_free = free_ptr->prev_free;
|
|
|
|
free_ptr->prev_free->next_free = free_ptr->next_free;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* insert the split block to free list */
|
|
|
|
new_ptr->next_free = heap->free_list->next_free;
|
|
|
|
new_ptr->prev_free = heap->free_list;
|
|
|
|
heap->free_list->next_free->prev_free = new_ptr;
|
|
|
|
heap->free_list->next_free = new_ptr;
|
2013-07-11 16:20:28 +08:00
|
|
|
RT_DEBUG_LOG(RT_DEBUG_MEMHEAP, ("new free ptr: next_free 0x%08x, prev_free 0x%08x\n",
|
2013-06-29 10:00:14 +08:00
|
|
|
new_ptr->next_free,
|
|
|
|
new_ptr->prev_free));
|
|
|
|
|
|
|
|
/* increment the available byte count. */
|
|
|
|
heap->available_size = heap->available_size + MEMITEM_SIZE(new_ptr);
|
2013-06-03 06:44:35 +08:00
|
|
|
|
|
|
|
/* release lock */
|
|
|
|
rt_sem_release(&(heap->lock));
|
|
|
|
|
2013-06-29 10:00:14 +08:00
|
|
|
/* return the old memory block */
|
|
|
|
return ptr;
|
2013-04-17 22:09:48 +08:00
|
|
|
}
|
|
|
|
RTM_EXPORT(rt_memheap_realloc);
|
|
|
|
|
2012-06-03 14:16:30 +08:00
|
|
|
void rt_memheap_free(void *ptr)
|
2012-05-05 09:45:17 +08:00
|
|
|
{
|
2012-12-25 16:35:19 +08:00
|
|
|
rt_err_t result;
|
|
|
|
struct rt_memheap *heap;
|
|
|
|
struct rt_memheap_item *header_ptr, *new_ptr;
|
|
|
|
rt_uint32_t insert_header;
|
|
|
|
|
2017-09-15 11:02:24 +08:00
|
|
|
/* NULL check */
|
|
|
|
if (ptr == RT_NULL) return;
|
2013-07-11 16:20:28 +08:00
|
|
|
|
2012-12-25 16:35:19 +08:00
|
|
|
/* set initial status as OK */
|
|
|
|
insert_header = 1;
|
|
|
|
new_ptr = RT_NULL;
|
2013-06-29 10:00:14 +08:00
|
|
|
header_ptr = (struct rt_memheap_item *)
|
|
|
|
((rt_uint8_t *)ptr - RT_MEMHEAP_SIZE);
|
2012-12-25 16:35:19 +08:00
|
|
|
|
2013-07-11 16:20:28 +08:00
|
|
|
RT_DEBUG_LOG(RT_DEBUG_MEMHEAP, ("free memory: memory[0x%08x], block[0x%08x]\n",
|
2012-12-20 15:25:19 +08:00
|
|
|
ptr, header_ptr));
|
2012-05-05 09:45:17 +08:00
|
|
|
|
2012-12-25 16:35:19 +08:00
|
|
|
/* check magic */
|
2021-02-20 15:27:37 +08:00
|
|
|
if (header_ptr->magic != (RT_MEMHEAP_MAGIC | RT_MEMHEAP_USED))
|
|
|
|
{
|
|
|
|
RT_DEBUG_LOG("bad magic:0x%08x @ memheap\n", header_ptr->magic);
|
|
|
|
}
|
|
|
|
RT_ASSERT(header_ptr->magic == (RT_MEMHEAP_MAGIC | RT_MEMHEAP_USED));
|
2015-01-16 11:41:20 +08:00
|
|
|
/* check whether this block of memory has been over-written. */
|
|
|
|
RT_ASSERT((header_ptr->next->magic & RT_MEMHEAP_MASK) == RT_MEMHEAP_MAGIC);
|
2012-12-25 16:35:19 +08:00
|
|
|
|
|
|
|
/* get pool ptr */
|
|
|
|
heap = header_ptr->pool_ptr;
|
2012-05-05 09:45:17 +08:00
|
|
|
|
2018-07-10 10:10:45 +08:00
|
|
|
RT_ASSERT(heap);
|
|
|
|
RT_ASSERT(rt_object_get_type(&heap->parent) == RT_Object_Class_MemHeap);
|
|
|
|
|
2012-12-25 16:35:19 +08:00
|
|
|
/* lock memheap */
|
2012-12-29 20:29:03 +08:00
|
|
|
result = rt_sem_take(&(heap->lock), RT_WAITING_FOREVER);
|
2012-12-25 16:35:19 +08:00
|
|
|
if (result != RT_EOK)
|
|
|
|
{
|
|
|
|
rt_set_errno(result);
|
2013-06-29 10:00:14 +08:00
|
|
|
|
2012-12-25 16:35:19 +08:00
|
|
|
return ;
|
|
|
|
}
|
2012-05-05 09:45:17 +08:00
|
|
|
|
2012-12-25 16:35:19 +08:00
|
|
|
/* Mark the memory as available. */
|
2021-02-20 15:27:37 +08:00
|
|
|
header_ptr->magic = (RT_MEMHEAP_MAGIC | RT_MEMHEAP_FREED);
|
2012-12-25 16:35:19 +08:00
|
|
|
/* Adjust the available number of bytes. */
|
2021-02-20 15:27:37 +08:00
|
|
|
heap->available_size += MEMITEM_SIZE(header_ptr);
|
2012-05-05 09:45:17 +08:00
|
|
|
|
2012-12-25 16:35:19 +08:00
|
|
|
/* Determine if the block can be merged with the previous neighbor. */
|
|
|
|
if (!RT_MEMHEAP_IS_USED(header_ptr->prev))
|
|
|
|
{
|
2013-07-11 16:20:28 +08:00
|
|
|
RT_DEBUG_LOG(RT_DEBUG_MEMHEAP, ("merge: left node 0x%08x\n",
|
2012-12-20 15:25:19 +08:00
|
|
|
header_ptr->prev));
|
2012-05-05 09:45:17 +08:00
|
|
|
|
2012-12-25 16:35:19 +08:00
|
|
|
/* adjust the available number of bytes. */
|
2021-02-20 15:27:37 +08:00
|
|
|
heap->available_size += RT_MEMHEAP_SIZE;
|
2012-05-05 09:45:17 +08:00
|
|
|
|
2012-12-25 16:35:19 +08:00
|
|
|
/* yes, merge block with previous neighbor. */
|
|
|
|
(header_ptr->prev)->next = header_ptr->next;
|
|
|
|
(header_ptr->next)->prev = header_ptr->prev;
|
2012-05-05 09:45:17 +08:00
|
|
|
|
2012-12-25 16:35:19 +08:00
|
|
|
/* move header pointer to previous. */
|
|
|
|
header_ptr = header_ptr->prev;
|
|
|
|
/* don't insert header to free list */
|
|
|
|
insert_header = 0;
|
|
|
|
}
|
2012-05-05 09:45:17 +08:00
|
|
|
|
2012-12-25 16:35:19 +08:00
|
|
|
/* determine if the block can be merged with the next neighbor. */
|
|
|
|
if (!RT_MEMHEAP_IS_USED(header_ptr->next))
|
|
|
|
{
|
|
|
|
/* adjust the available number of bytes. */
|
2021-02-20 15:27:37 +08:00
|
|
|
heap->available_size += RT_MEMHEAP_SIZE;
|
2012-05-05 09:45:17 +08:00
|
|
|
|
2012-12-25 16:35:19 +08:00
|
|
|
/* merge block with next neighbor. */
|
|
|
|
new_ptr = header_ptr->next;
|
2012-05-05 09:45:17 +08:00
|
|
|
|
2012-12-25 16:35:19 +08:00
|
|
|
RT_DEBUG_LOG(RT_DEBUG_MEMHEAP,
|
2013-07-11 16:20:28 +08:00
|
|
|
("merge: right node 0x%08x, next_free 0x%08x, prev_free 0x%08x\n",
|
2012-12-20 15:25:19 +08:00
|
|
|
new_ptr, new_ptr->next_free, new_ptr->prev_free));
|
2012-05-05 09:45:17 +08:00
|
|
|
|
2012-12-25 16:35:19 +08:00
|
|
|
new_ptr->next->prev = header_ptr;
|
|
|
|
header_ptr->next = new_ptr->next;
|
2012-05-05 09:45:17 +08:00
|
|
|
|
2012-12-25 16:35:19 +08:00
|
|
|
/* remove new ptr from free list */
|
|
|
|
new_ptr->next_free->prev_free = new_ptr->prev_free;
|
|
|
|
new_ptr->prev_free->next_free = new_ptr->next_free;
|
|
|
|
}
|
2012-05-05 09:45:17 +08:00
|
|
|
|
2012-12-25 16:35:19 +08:00
|
|
|
if (insert_header)
|
|
|
|
{
|
|
|
|
/* no left merge, insert to free list */
|
|
|
|
header_ptr->next_free = heap->free_list->next_free;
|
|
|
|
header_ptr->prev_free = heap->free_list;
|
|
|
|
heap->free_list->next_free->prev_free = header_ptr;
|
|
|
|
heap->free_list->next_free = header_ptr;
|
2012-05-05 09:45:17 +08:00
|
|
|
|
2012-12-25 16:35:19 +08:00
|
|
|
RT_DEBUG_LOG(RT_DEBUG_MEMHEAP,
|
2013-07-11 16:20:28 +08:00
|
|
|
("insert to free list: next_free 0x%08x, prev_free 0x%08x\n",
|
2012-12-20 15:25:19 +08:00
|
|
|
header_ptr->next_free, header_ptr->prev_free));
|
2012-12-25 16:35:19 +08:00
|
|
|
}
|
2012-10-22 11:12:21 +08:00
|
|
|
|
2012-12-25 16:35:19 +08:00
|
|
|
/* release lock */
|
2012-12-29 20:29:03 +08:00
|
|
|
rt_sem_release(&(heap->lock));
|
2012-05-05 09:45:17 +08:00
|
|
|
}
|
2012-08-27 09:21:57 +08:00
|
|
|
RTM_EXPORT(rt_memheap_free);
|
2012-05-05 09:45:17 +08:00
|
|
|
|
2021-02-20 15:27:37 +08:00
|
|
|
#ifdef RT_USING_FINSH
|
|
|
|
static void _memheap_dump_tag(struct rt_memheap_item* item)
|
|
|
|
{
|
|
|
|
rt_uint8_t name[2 * sizeof(void*)];
|
|
|
|
rt_uint8_t* ptr;
|
|
|
|
|
|
|
|
ptr = (rt_uint8_t*)&(item->next_free);
|
|
|
|
rt_memcpy(name, ptr, sizeof(void*));
|
|
|
|
ptr = (rt_uint8_t*)&(item->prev_free);
|
|
|
|
rt_memcpy(&name[sizeof(void*)], ptr, sizeof(void*));
|
|
|
|
|
|
|
|
rt_kprintf("%.*s", 2 * sizeof(void*), name);
|
|
|
|
}
|
|
|
|
|
|
|
|
int rt_memheap_dump(struct rt_memheap *heap)
|
|
|
|
{
|
|
|
|
struct rt_memheap_item *item, *end;
|
|
|
|
|
|
|
|
if (heap == RT_NULL) return 0;
|
|
|
|
RT_ASSERT(rt_object_get_type(&heap->parent) == RT_Object_Class_MemHeap);
|
|
|
|
|
|
|
|
rt_kprintf("\n[%.*s] [0x%08x - 0x%08x]->\n", RT_NAME_MAX, heap->parent.name,
|
|
|
|
(rt_ubase_t)heap->start_addr, (rt_ubase_t)heap->start_addr + heap->pool_size);
|
|
|
|
rt_kprintf("------------------------------\n");
|
|
|
|
|
|
|
|
/* lock memheap */
|
|
|
|
rt_sem_take(&(heap->lock), RT_WAITING_FOREVER);
|
|
|
|
item = heap->block_list;
|
|
|
|
|
|
|
|
end = (struct rt_memheap_item *) ((rt_uint8_t *)heap->start_addr + heap->pool_size - RT_MEMHEAP_SIZE);
|
|
|
|
|
|
|
|
/* for each memory block */
|
|
|
|
while ((rt_ubase_t)item < ((rt_ubase_t)end))
|
|
|
|
{
|
|
|
|
if (RT_MEMHEAP_IS_USED(item) && ((item->magic & RT_MEMHEAP_MASK) != RT_MEMHEAP_MAGIC))
|
|
|
|
rt_kprintf("0x%08x", item + 1);
|
|
|
|
|
|
|
|
if (item->magic == (RT_MEMHEAP_MAGIC | RT_MEMHEAP_USED))
|
|
|
|
{
|
|
|
|
rt_kprintf("0x%08x: %-8d ", item + 1, MEMITEM_SIZE(item));
|
|
|
|
_memheap_dump_tag(item);
|
|
|
|
rt_kprintf("\n");
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
rt_kprintf("0x%08x: %-8d <F>\n", item + 1, MEMITEM_SIZE(item));
|
|
|
|
}
|
|
|
|
|
|
|
|
item = item->next;
|
|
|
|
}
|
|
|
|
rt_sem_release(&(heap->lock));
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int memtrace(void)
|
|
|
|
{
|
|
|
|
int count = rt_object_get_length(RT_Object_Class_MemHeap);
|
|
|
|
struct rt_memheap **heaps;
|
|
|
|
|
|
|
|
if (count > 0)
|
|
|
|
{
|
|
|
|
int index;
|
|
|
|
extern int list_memheap(void);
|
|
|
|
|
|
|
|
heaps = (struct rt_memheap**)rt_malloc(sizeof(struct rt_memheap*) * count);
|
|
|
|
if (heaps == RT_NULL) return 0;
|
|
|
|
|
|
|
|
list_memheap();
|
|
|
|
|
|
|
|
rt_kprintf("memheap header size: %d\n", RT_MEMHEAP_SIZE);
|
|
|
|
count = rt_object_get_pointers(RT_Object_Class_MemHeap, (rt_object_t*)heaps, count);
|
|
|
|
for (index = 0; index < count; index++)
|
|
|
|
{
|
|
|
|
rt_memheap_dump(heaps[index]);
|
|
|
|
}
|
|
|
|
|
|
|
|
rt_free(heaps);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
MSH_CMD_EXPORT(memtrace, dump memory trace information);
|
|
|
|
#endif
|
|
|
|
|
2012-12-29 20:29:03 +08:00
|
|
|
#ifdef RT_USING_MEMHEAP_AS_HEAP
|
|
|
|
static struct rt_memheap _heap;
|
|
|
|
|
|
|
|
void rt_system_heap_init(void *begin_addr, void *end_addr)
|
|
|
|
{
|
2012-12-31 00:11:12 +08:00
|
|
|
/* initialize a default heap in the system */
|
|
|
|
rt_memheap_init(&_heap,
|
|
|
|
"heap",
|
|
|
|
begin_addr,
|
|
|
|
(rt_uint32_t)end_addr - (rt_uint32_t)begin_addr);
|
2012-12-29 20:29:03 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void *rt_malloc(rt_size_t size)
|
|
|
|
{
|
2017-09-15 11:02:24 +08:00
|
|
|
void *ptr;
|
2013-06-03 06:44:35 +08:00
|
|
|
|
2012-12-31 00:11:12 +08:00
|
|
|
/* try to allocate in system heap */
|
|
|
|
ptr = rt_memheap_alloc(&_heap, size);
|
|
|
|
if (ptr == RT_NULL)
|
|
|
|
{
|
|
|
|
struct rt_object *object;
|
|
|
|
struct rt_list_node *node;
|
|
|
|
struct rt_memheap *heap;
|
|
|
|
struct rt_object_information *information;
|
|
|
|
|
|
|
|
/* try to allocate on other memory heap */
|
2017-12-12 07:36:37 +08:00
|
|
|
information = rt_object_get_information(RT_Object_Class_MemHeap);
|
|
|
|
RT_ASSERT(information != RT_NULL);
|
2012-12-31 00:11:12 +08:00
|
|
|
for (node = information->object_list.next;
|
|
|
|
node != &(information->object_list);
|
|
|
|
node = node->next)
|
|
|
|
{
|
|
|
|
object = rt_list_entry(node, struct rt_object, list);
|
|
|
|
heap = (struct rt_memheap *)object;
|
|
|
|
|
2018-07-10 10:10:45 +08:00
|
|
|
RT_ASSERT(heap);
|
|
|
|
RT_ASSERT(rt_object_get_type(&heap->parent) == RT_Object_Class_MemHeap);
|
|
|
|
|
2012-12-31 00:11:12 +08:00
|
|
|
/* not allocate in the default system heap */
|
|
|
|
if (heap == &_heap)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
ptr = rt_memheap_alloc(heap, size);
|
|
|
|
if (ptr != RT_NULL)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-02-20 15:27:37 +08:00
|
|
|
|
|
|
|
#ifdef RT_USING_MEMTRACE
|
|
|
|
if (ptr == RT_NULL)
|
|
|
|
{
|
|
|
|
RT_DEBUG_LOG("malloc[%d] => NULL", size);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
struct rt_memheap_item *item = MEMITEM(ptr);
|
|
|
|
if (rt_thread_self())
|
|
|
|
rt_memheap_setname(item, rt_thread_self()->name);
|
|
|
|
else
|
|
|
|
rt_memheap_setname(item, "<null>");
|
|
|
|
|
|
|
|
RT_DEBUG_LOG("malloc => 0x%08x : %d", ptr, size);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2012-12-31 00:11:12 +08:00
|
|
|
return ptr;
|
2012-12-29 20:29:03 +08:00
|
|
|
}
|
|
|
|
RTM_EXPORT(rt_malloc);
|
|
|
|
|
|
|
|
void rt_free(void *rmem)
|
|
|
|
{
|
2012-12-31 00:11:12 +08:00
|
|
|
rt_memheap_free(rmem);
|
2012-12-29 20:29:03 +08:00
|
|
|
}
|
|
|
|
RTM_EXPORT(rt_free);
|
|
|
|
|
|
|
|
void *rt_realloc(void *rmem, rt_size_t newsize)
|
|
|
|
{
|
2013-06-29 10:00:14 +08:00
|
|
|
void *new_ptr;
|
2012-12-29 20:29:03 +08:00
|
|
|
struct rt_memheap_item *header_ptr;
|
|
|
|
|
2013-06-29 10:00:14 +08:00
|
|
|
if (rmem == RT_NULL)
|
|
|
|
return rt_malloc(newsize);
|
2012-12-31 00:11:12 +08:00
|
|
|
|
2018-03-28 10:17:24 +08:00
|
|
|
if (newsize == 0)
|
|
|
|
{
|
|
|
|
rt_free(rmem);
|
|
|
|
return RT_NULL;
|
|
|
|
}
|
|
|
|
|
2012-12-31 00:11:12 +08:00
|
|
|
/* get old memory item */
|
2013-06-29 10:00:14 +08:00
|
|
|
header_ptr = (struct rt_memheap_item *)
|
|
|
|
((rt_uint8_t *)rmem - RT_MEMHEAP_SIZE);
|
|
|
|
|
|
|
|
new_ptr = rt_memheap_realloc(header_ptr->pool_ptr, rmem, newsize);
|
|
|
|
if (new_ptr == RT_NULL && newsize != 0)
|
|
|
|
{
|
|
|
|
/* allocate memory block from other memheap */
|
|
|
|
new_ptr = rt_malloc(newsize);
|
|
|
|
if (new_ptr != RT_NULL && rmem != RT_NULL)
|
|
|
|
{
|
|
|
|
rt_size_t oldsize;
|
|
|
|
|
|
|
|
/* get the size of old memory block */
|
|
|
|
oldsize = MEMITEM_SIZE(header_ptr);
|
|
|
|
if (newsize > oldsize)
|
|
|
|
rt_memcpy(new_ptr, rmem, oldsize);
|
|
|
|
else
|
|
|
|
rt_memcpy(new_ptr, rmem, newsize);
|
2018-03-28 10:17:24 +08:00
|
|
|
|
|
|
|
rt_free(rmem);
|
2013-06-29 10:00:14 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-02-20 15:27:37 +08:00
|
|
|
#ifdef RT_USING_MEMTRACE
|
|
|
|
if (new_ptr == RT_NULL)
|
|
|
|
{
|
|
|
|
RT_DEBUG_LOG("realloc[%d] => NULL", newsize);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
struct rt_memheap_item *item = MEMITEM(new_ptr);
|
|
|
|
if (rt_thread_self())
|
|
|
|
rt_memheap_setname(item, rt_thread_self()->name);
|
|
|
|
else
|
|
|
|
rt_memheap_setname(item, "<null>");
|
|
|
|
|
|
|
|
RT_DEBUG_LOG("realloc => 0x%08x : %d", new_ptr, newsize);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2013-06-29 10:00:14 +08:00
|
|
|
return new_ptr;
|
2012-12-29 20:29:03 +08:00
|
|
|
}
|
|
|
|
RTM_EXPORT(rt_realloc);
|
|
|
|
|
|
|
|
void *rt_calloc(rt_size_t count, rt_size_t size)
|
|
|
|
{
|
2012-12-31 00:11:12 +08:00
|
|
|
void *ptr;
|
|
|
|
rt_size_t total_size;
|
|
|
|
|
|
|
|
total_size = count * size;
|
|
|
|
ptr = rt_malloc(total_size);
|
|
|
|
if (ptr != RT_NULL)
|
|
|
|
{
|
|
|
|
/* clean memory */
|
|
|
|
rt_memset(ptr, 0, total_size);
|
|
|
|
}
|
|
|
|
|
2021-02-20 15:27:37 +08:00
|
|
|
#ifdef RT_USING_MEMTRACE
|
|
|
|
if (ptr == RT_NULL)
|
|
|
|
{
|
|
|
|
RT_DEBUG_LOG("calloc[%d x %d] => NULL", count, size);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
RT_DEBUG_LOG("calloc => 0x%08x : %d", ptr, count * size);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2012-12-31 00:11:12 +08:00
|
|
|
return ptr;
|
2012-12-29 20:29:03 +08:00
|
|
|
}
|
|
|
|
RTM_EXPORT(rt_calloc);
|
|
|
|
|
2020-12-25 15:06:55 +08:00
|
|
|
void rt_memory_info(rt_uint32_t *total,
|
|
|
|
rt_uint32_t *used,
|
|
|
|
rt_uint32_t *max_used)
|
|
|
|
{
|
|
|
|
if (total != RT_NULL)
|
|
|
|
*total = _heap.pool_size;
|
|
|
|
|
|
|
|
if (used != RT_NULL)
|
|
|
|
*used = _heap.pool_size - _heap.available_size;
|
|
|
|
|
|
|
|
if (max_used != RT_NULL)
|
|
|
|
*max_used = _heap.max_used_size;
|
|
|
|
}
|
|
|
|
|
2012-12-29 20:29:03 +08:00
|
|
|
#endif
|
|
|
|
|
2012-05-05 09:45:17 +08:00
|
|
|
#endif
|