2009-07-03 06:48:23 +08:00
|
|
|
/*
|
2018-09-14 22:37:43 +08:00
|
|
|
* Copyright (c) 2006-2018, RT-Thread Development Team
|
2013-06-24 17:06:09 +08:00
|
|
|
*
|
2018-09-14 22:37:43 +08:00
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
2009-07-03 06:48:23 +08:00
|
|
|
*
|
|
|
|
* Change Logs:
|
|
|
|
* Date Author Notes
|
2010-06-09 14:43:14 +08:00
|
|
|
* 2008-7-12 Bernard the first version
|
|
|
|
* 2010-06-09 Bernard fix the end stub of heap
|
|
|
|
* fix memory check in rt_realloc function
|
2010-07-13 15:36:37 +08:00
|
|
|
* 2010-07-13 Bernard fix RT_ALIGN issue found by kuronca
|
2010-10-14 17:38:03 +08:00
|
|
|
* 2010-10-14 Bernard fix rt_realloc issue when realloc a NULL pointer.
|
2017-08-14 15:57:42 +08:00
|
|
|
* 2017-07-14 armink fix rt_realloc issue when new size is 0
|
2018-10-26 06:35:42 +08:00
|
|
|
* 2018-10-02 Bernard Add 64bit support
|
2009-07-03 06:48:23 +08:00
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Copyright (c) 2001-2004 Swedish Institute of Computer Science.
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without modification,
|
|
|
|
* are permitted provided that the following conditions are met:
|
|
|
|
*
|
|
|
|
* 1. Redistributions of source code must retain the above copyright notice,
|
|
|
|
* this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
|
|
|
* this list of conditions and the following disclaimer in the documentation
|
|
|
|
* and/or other materials provided with the distribution.
|
|
|
|
* 3. The name of the author may not be used to endorse or promote products
|
|
|
|
* derived from this software without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
|
|
|
|
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
|
|
|
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
|
|
|
|
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
|
|
|
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
|
|
|
|
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
|
|
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
|
|
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
|
|
|
|
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
|
|
|
|
* OF SUCH DAMAGE.
|
|
|
|
*
|
|
|
|
* This file is part of the lwIP TCP/IP stack.
|
|
|
|
*
|
|
|
|
* Author: Adam Dunkels <adam@sics.se>
|
|
|
|
* Simon Goldschmidt
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2011-09-08 10:17:18 +08:00
|
|
|
#include <rthw.h>
|
2009-07-03 06:48:23 +08:00
|
|
|
#include <rtthread.h>
|
|
|
|
|
2012-12-29 20:29:03 +08:00
|
|
|
#ifndef RT_USING_MEMHEAP_AS_HEAP
|
|
|
|
|
2009-07-03 06:48:23 +08:00
|
|
|
/* #define RT_MEM_DEBUG */
|
2009-12-31 21:33:59 +08:00
|
|
|
#define RT_MEM_STATS
|
2009-07-03 06:48:23 +08:00
|
|
|
|
|
|
|
#if defined (RT_USING_HEAP) && defined (RT_USING_SMALL_MEM)
|
|
|
|
#ifdef RT_USING_HOOK
|
|
|
|
static void (*rt_malloc_hook)(void *ptr, rt_size_t size);
|
|
|
|
static void (*rt_free_hook)(void *ptr);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @addtogroup Hook
|
|
|
|
*/
|
2012-03-17 14:43:49 +08:00
|
|
|
|
2016-08-19 10:11:38 +08:00
|
|
|
/**@{*/
|
2009-07-03 06:48:23 +08:00
|
|
|
|
|
|
|
/**
|
2009-12-25 20:18:53 +08:00
|
|
|
* This function will set a hook function, which will be invoked when a memory
|
2009-07-03 06:48:23 +08:00
|
|
|
* block is allocated from heap memory.
|
2009-12-25 20:18:53 +08:00
|
|
|
*
|
2009-07-03 06:48:23 +08:00
|
|
|
* @param hook the hook function
|
|
|
|
*/
|
|
|
|
void rt_malloc_sethook(void (*hook)(void *ptr, rt_size_t size))
|
|
|
|
{
|
2012-12-25 16:27:29 +08:00
|
|
|
rt_malloc_hook = hook;
|
2009-07-03 06:48:23 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2009-12-25 20:18:53 +08:00
|
|
|
* This function will set a hook function, which will be invoked when a memory
|
2009-07-03 06:48:23 +08:00
|
|
|
* block is released to heap memory.
|
2009-12-25 20:18:53 +08:00
|
|
|
*
|
2009-07-03 06:48:23 +08:00
|
|
|
* @param hook the hook function
|
|
|
|
*/
|
|
|
|
void rt_free_sethook(void (*hook)(void *ptr))
|
|
|
|
{
|
2012-12-25 16:27:29 +08:00
|
|
|
rt_free_hook = hook;
|
2009-07-03 06:48:23 +08:00
|
|
|
}
|
|
|
|
|
2016-08-19 10:11:38 +08:00
|
|
|
/**@}*/
|
2009-07-03 06:48:23 +08:00
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#define HEAP_MAGIC 0x1ea0
|
|
|
|
struct heap_mem
|
|
|
|
{
|
2012-12-25 16:27:29 +08:00
|
|
|
/* magic and used flag */
|
|
|
|
rt_uint16_t magic;
|
|
|
|
rt_uint16_t used;
|
2018-10-26 06:35:42 +08:00
|
|
|
#ifdef ARCH_CPU_64BIT
|
|
|
|
rt_uint32_t resv;
|
|
|
|
#endif
|
2009-07-03 06:48:23 +08:00
|
|
|
|
2012-12-25 16:27:29 +08:00
|
|
|
rt_size_t next, prev;
|
2017-12-22 14:29:14 +08:00
|
|
|
|
|
|
|
#ifdef RT_USING_MEMTRACE
|
2018-10-26 06:35:42 +08:00
|
|
|
#ifdef ARCH_CPU_64BIT
|
|
|
|
rt_uint8_t thread[8];
|
|
|
|
#else
|
2017-12-22 14:29:14 +08:00
|
|
|
rt_uint8_t thread[4]; /* thread name */
|
|
|
|
#endif
|
2018-10-26 06:35:42 +08:00
|
|
|
#endif
|
2009-07-03 06:48:23 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
/** pointer to the heap: for alignment, heap_ptr is now a pointer instead of an array */
|
|
|
|
static rt_uint8_t *heap_ptr;
|
|
|
|
|
|
|
|
/** the last entry, always unused! */
|
|
|
|
static struct heap_mem *heap_end;
|
|
|
|
|
2018-10-26 06:35:42 +08:00
|
|
|
#ifdef ARCH_CPU_64BIT
|
|
|
|
#define MIN_SIZE 24
|
|
|
|
#else
|
2009-07-03 06:48:23 +08:00
|
|
|
#define MIN_SIZE 12
|
2018-10-26 06:35:42 +08:00
|
|
|
#endif
|
|
|
|
|
2009-07-03 06:48:23 +08:00
|
|
|
#define MIN_SIZE_ALIGNED RT_ALIGN(MIN_SIZE, RT_ALIGN_SIZE)
|
|
|
|
#define SIZEOF_STRUCT_MEM RT_ALIGN(sizeof(struct heap_mem), RT_ALIGN_SIZE)
|
|
|
|
|
|
|
|
static struct heap_mem *lfree; /* pointer to the lowest free block */
|
|
|
|
|
|
|
|
static struct rt_semaphore heap_sem;
|
2009-08-03 22:00:27 +08:00
|
|
|
static rt_size_t mem_size_aligned;
|
|
|
|
|
2009-07-03 06:48:23 +08:00
|
|
|
#ifdef RT_MEM_STATS
|
2009-08-03 22:00:27 +08:00
|
|
|
static rt_size_t used_mem, max_mem;
|
|
|
|
#endif
|
2017-12-22 14:29:14 +08:00
|
|
|
#ifdef RT_USING_MEMTRACE
|
|
|
|
rt_inline void rt_mem_setname(struct heap_mem *mem, const char *name)
|
|
|
|
{
|
|
|
|
int index;
|
|
|
|
for (index = 0; index < sizeof(mem->thread); index ++)
|
|
|
|
{
|
|
|
|
if (name[index] == '\0') break;
|
|
|
|
mem->thread[index] = name[index];
|
|
|
|
}
|
|
|
|
|
|
|
|
for (; index < sizeof(mem->thread); index ++)
|
|
|
|
{
|
|
|
|
mem->thread[index] = ' ';
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
2009-07-03 06:48:23 +08:00
|
|
|
|
|
|
|
static void plug_holes(struct heap_mem *mem)
|
|
|
|
{
|
2012-12-25 16:27:29 +08:00
|
|
|
struct heap_mem *nmem;
|
|
|
|
struct heap_mem *pmem;
|
|
|
|
|
|
|
|
RT_ASSERT((rt_uint8_t *)mem >= heap_ptr);
|
|
|
|
RT_ASSERT((rt_uint8_t *)mem < (rt_uint8_t *)heap_end);
|
|
|
|
RT_ASSERT(mem->used == 0);
|
|
|
|
|
|
|
|
/* plug hole forward */
|
|
|
|
nmem = (struct heap_mem *)&heap_ptr[mem->next];
|
|
|
|
if (mem != nmem &&
|
|
|
|
nmem->used == 0 &&
|
|
|
|
(rt_uint8_t *)nmem != (rt_uint8_t *)heap_end)
|
|
|
|
{
|
|
|
|
/* if mem->next is unused and not end of heap_ptr,
|
|
|
|
* combine mem and mem->next
|
|
|
|
*/
|
|
|
|
if (lfree == nmem)
|
|
|
|
{
|
|
|
|
lfree = mem;
|
|
|
|
}
|
|
|
|
mem->next = nmem->next;
|
|
|
|
((struct heap_mem *)&heap_ptr[nmem->next])->prev = (rt_uint8_t *)mem - heap_ptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* plug hole backward */
|
|
|
|
pmem = (struct heap_mem *)&heap_ptr[mem->prev];
|
|
|
|
if (pmem != mem && pmem->used == 0)
|
|
|
|
{
|
|
|
|
/* if mem->prev is unused, combine mem and mem->prev */
|
|
|
|
if (lfree == mem)
|
|
|
|
{
|
|
|
|
lfree = pmem;
|
|
|
|
}
|
|
|
|
pmem->next = mem->next;
|
|
|
|
((struct heap_mem *)&heap_ptr[mem->next])->prev = (rt_uint8_t *)pmem - heap_ptr;
|
|
|
|
}
|
2009-07-03 06:48:23 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @ingroup SystemInit
|
|
|
|
*
|
2014-09-11 12:51:52 +08:00
|
|
|
* This function will initialize system heap memory.
|
2009-07-03 06:48:23 +08:00
|
|
|
*
|
2014-09-11 12:51:52 +08:00
|
|
|
* @param begin_addr the beginning address of system heap memory.
|
|
|
|
* @param end_addr the end address of system heap memory.
|
2009-07-03 06:48:23 +08:00
|
|
|
*/
|
2011-09-21 11:56:42 +08:00
|
|
|
void rt_system_heap_init(void *begin_addr, void *end_addr)
|
2009-07-03 06:48:23 +08:00
|
|
|
{
|
2012-12-25 16:27:29 +08:00
|
|
|
struct heap_mem *mem;
|
2018-10-26 06:35:42 +08:00
|
|
|
rt_ubase_t begin_align = RT_ALIGN((rt_ubase_t)begin_addr, RT_ALIGN_SIZE);
|
|
|
|
rt_ubase_t end_align = RT_ALIGN_DOWN((rt_ubase_t)end_addr, RT_ALIGN_SIZE);
|
2012-12-25 16:27:29 +08:00
|
|
|
|
|
|
|
RT_DEBUG_NOT_IN_INTERRUPT;
|
|
|
|
|
|
|
|
/* alignment addr */
|
|
|
|
if ((end_align > (2 * SIZEOF_STRUCT_MEM)) &&
|
|
|
|
((end_align - 2 * SIZEOF_STRUCT_MEM) >= begin_align))
|
|
|
|
{
|
|
|
|
/* calculate the aligned memory size */
|
|
|
|
mem_size_aligned = end_align - begin_align - 2 * SIZEOF_STRUCT_MEM;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
rt_kprintf("mem init, error begin address 0x%x, and end address 0x%x\n",
|
2018-10-26 06:35:42 +08:00
|
|
|
(rt_ubase_t)begin_addr, (rt_ubase_t)end_addr);
|
2012-12-25 16:27:29 +08:00
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* point to begin address of heap */
|
|
|
|
heap_ptr = (rt_uint8_t *)begin_align;
|
|
|
|
|
|
|
|
RT_DEBUG_LOG(RT_DEBUG_MEM, ("mem init, heap begin address 0x%x, size %d\n",
|
2018-10-26 06:35:42 +08:00
|
|
|
(rt_ubase_t)heap_ptr, mem_size_aligned));
|
2009-07-03 06:48:23 +08:00
|
|
|
|
2012-12-25 16:27:29 +08:00
|
|
|
/* initialize the start of the heap */
|
|
|
|
mem = (struct heap_mem *)heap_ptr;
|
|
|
|
mem->magic = HEAP_MAGIC;
|
|
|
|
mem->next = mem_size_aligned + SIZEOF_STRUCT_MEM;
|
|
|
|
mem->prev = 0;
|
|
|
|
mem->used = 0;
|
2018-02-24 16:10:44 +08:00
|
|
|
#ifdef RT_USING_MEMTRACE
|
2017-12-22 14:29:14 +08:00
|
|
|
rt_mem_setname(mem, "INIT");
|
2018-02-24 16:10:44 +08:00
|
|
|
#endif
|
2009-07-03 06:48:23 +08:00
|
|
|
|
2012-12-25 16:27:29 +08:00
|
|
|
/* initialize the end of the heap */
|
|
|
|
heap_end = (struct heap_mem *)&heap_ptr[mem->next];
|
|
|
|
heap_end->magic = HEAP_MAGIC;
|
|
|
|
heap_end->used = 1;
|
|
|
|
heap_end->next = mem_size_aligned + SIZEOF_STRUCT_MEM;
|
|
|
|
heap_end->prev = mem_size_aligned + SIZEOF_STRUCT_MEM;
|
2018-02-24 16:10:44 +08:00
|
|
|
#ifdef RT_USING_MEMTRACE
|
2017-12-22 14:29:14 +08:00
|
|
|
rt_mem_setname(heap_end, "INIT");
|
2018-02-24 16:10:44 +08:00
|
|
|
#endif
|
2009-07-03 06:48:23 +08:00
|
|
|
|
2012-12-25 16:27:29 +08:00
|
|
|
rt_sem_init(&heap_sem, "heap", 1, RT_IPC_FLAG_FIFO);
|
2009-07-03 06:48:23 +08:00
|
|
|
|
2012-12-25 16:27:29 +08:00
|
|
|
/* initialize the lowest-free pointer to the start of the heap */
|
|
|
|
lfree = (struct heap_mem *)heap_ptr;
|
2009-07-03 06:48:23 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @addtogroup MM
|
|
|
|
*/
|
|
|
|
|
2016-08-19 10:11:38 +08:00
|
|
|
/**@{*/
|
2009-07-03 06:48:23 +08:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Allocate a block of memory with a minimum of 'size' bytes.
|
|
|
|
*
|
|
|
|
* @param size is the minimum size of the requested block in bytes.
|
|
|
|
*
|
|
|
|
* @return pointer to allocated memory or NULL if no free memory was found.
|
|
|
|
*/
|
|
|
|
void *rt_malloc(rt_size_t size)
|
|
|
|
{
|
2012-12-25 16:27:29 +08:00
|
|
|
rt_size_t ptr, ptr2;
|
|
|
|
struct heap_mem *mem, *mem2;
|
2009-07-03 06:48:23 +08:00
|
|
|
|
2012-12-25 16:27:29 +08:00
|
|
|
if (size == 0)
|
|
|
|
return RT_NULL;
|
2009-07-03 06:48:23 +08:00
|
|
|
|
2018-10-23 16:03:17 +08:00
|
|
|
RT_DEBUG_NOT_IN_INTERRUPT;
|
|
|
|
|
2012-12-25 16:27:29 +08:00
|
|
|
if (size != RT_ALIGN(size, RT_ALIGN_SIZE))
|
|
|
|
RT_DEBUG_LOG(RT_DEBUG_MEM, ("malloc size %d, but align to %d\n",
|
2012-12-20 15:25:19 +08:00
|
|
|
size, RT_ALIGN(size, RT_ALIGN_SIZE)));
|
2012-12-25 16:27:29 +08:00
|
|
|
else
|
|
|
|
RT_DEBUG_LOG(RT_DEBUG_MEM, ("malloc size %d\n", size));
|
|
|
|
|
|
|
|
/* alignment size */
|
|
|
|
size = RT_ALIGN(size, RT_ALIGN_SIZE);
|
|
|
|
|
|
|
|
if (size > mem_size_aligned)
|
|
|
|
{
|
|
|
|
RT_DEBUG_LOG(RT_DEBUG_MEM, ("no memory\n"));
|
|
|
|
|
|
|
|
return RT_NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* every data block must be at least MIN_SIZE_ALIGNED long */
|
|
|
|
if (size < MIN_SIZE_ALIGNED)
|
|
|
|
size = MIN_SIZE_ALIGNED;
|
|
|
|
|
|
|
|
/* take memory semaphore */
|
|
|
|
rt_sem_take(&heap_sem, RT_WAITING_FOREVER);
|
|
|
|
|
|
|
|
for (ptr = (rt_uint8_t *)lfree - heap_ptr;
|
|
|
|
ptr < mem_size_aligned - size;
|
|
|
|
ptr = ((struct heap_mem *)&heap_ptr[ptr])->next)
|
|
|
|
{
|
|
|
|
mem = (struct heap_mem *)&heap_ptr[ptr];
|
|
|
|
|
|
|
|
if ((!mem->used) && (mem->next - (ptr + SIZEOF_STRUCT_MEM)) >= size)
|
|
|
|
{
|
|
|
|
/* mem is not used and at least perfect fit is possible:
|
|
|
|
* mem->next - (ptr + SIZEOF_STRUCT_MEM) gives us the 'user data size' of mem */
|
|
|
|
|
|
|
|
if (mem->next - (ptr + SIZEOF_STRUCT_MEM) >=
|
|
|
|
(size + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED))
|
|
|
|
{
|
|
|
|
/* (in addition to the above, we test if another struct heap_mem (SIZEOF_STRUCT_MEM) containing
|
|
|
|
* at least MIN_SIZE_ALIGNED of data also fits in the 'user data space' of 'mem')
|
|
|
|
* -> split large block, create empty remainder,
|
|
|
|
* remainder must be large enough to contain MIN_SIZE_ALIGNED data: if
|
|
|
|
* mem->next - (ptr + (2*SIZEOF_STRUCT_MEM)) == size,
|
|
|
|
* struct heap_mem would fit in but no data between mem2 and mem2->next
|
|
|
|
* @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty
|
|
|
|
* region that couldn't hold data, but when mem->next gets freed,
|
|
|
|
* the 2 regions would be combined, resulting in more free memory
|
|
|
|
*/
|
|
|
|
ptr2 = ptr + SIZEOF_STRUCT_MEM + size;
|
|
|
|
|
|
|
|
/* create mem2 struct */
|
|
|
|
mem2 = (struct heap_mem *)&heap_ptr[ptr2];
|
2015-09-24 21:07:54 +08:00
|
|
|
mem2->magic = HEAP_MAGIC;
|
2012-12-25 16:27:29 +08:00
|
|
|
mem2->used = 0;
|
|
|
|
mem2->next = mem->next;
|
|
|
|
mem2->prev = ptr;
|
2018-02-24 16:10:44 +08:00
|
|
|
#ifdef RT_USING_MEMTRACE
|
2017-12-22 14:29:14 +08:00
|
|
|
rt_mem_setname(mem2, " ");
|
2018-02-24 16:10:44 +08:00
|
|
|
#endif
|
2012-12-25 16:27:29 +08:00
|
|
|
|
|
|
|
/* and insert it between mem and mem->next */
|
|
|
|
mem->next = ptr2;
|
|
|
|
mem->used = 1;
|
|
|
|
|
|
|
|
if (mem2->next != mem_size_aligned + SIZEOF_STRUCT_MEM)
|
|
|
|
{
|
|
|
|
((struct heap_mem *)&heap_ptr[mem2->next])->prev = ptr2;
|
|
|
|
}
|
2009-08-03 22:00:27 +08:00
|
|
|
#ifdef RT_MEM_STATS
|
2012-12-25 16:27:29 +08:00
|
|
|
used_mem += (size + SIZEOF_STRUCT_MEM);
|
|
|
|
if (max_mem < used_mem)
|
|
|
|
max_mem = used_mem;
|
2009-07-03 06:48:23 +08:00
|
|
|
#endif
|
2012-12-25 16:27:29 +08:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* (a mem2 struct does no fit into the user data space of mem and mem->next will always
|
|
|
|
* be used at this point: if not we have 2 unused structs in a row, plug_holes should have
|
|
|
|
* take care of this).
|
|
|
|
* -> near fit or excact fit: do not split, no mem2 creation
|
|
|
|
* also can't move mem->next directly behind mem, since mem->next
|
|
|
|
* will always be used at this point!
|
|
|
|
*/
|
|
|
|
mem->used = 1;
|
2009-08-03 22:00:27 +08:00
|
|
|
#ifdef RT_MEM_STATS
|
2017-09-15 11:02:24 +08:00
|
|
|
used_mem += mem->next - ((rt_uint8_t *)mem - heap_ptr);
|
2012-12-25 16:27:29 +08:00
|
|
|
if (max_mem < used_mem)
|
|
|
|
max_mem = used_mem;
|
2009-07-03 06:48:23 +08:00
|
|
|
#endif
|
2012-12-25 16:27:29 +08:00
|
|
|
}
|
|
|
|
/* set memory block magic */
|
|
|
|
mem->magic = HEAP_MAGIC;
|
2018-02-24 16:10:44 +08:00
|
|
|
#ifdef RT_USING_MEMTRACE
|
2017-12-22 14:29:14 +08:00
|
|
|
if (rt_thread_self())
|
|
|
|
rt_mem_setname(mem, rt_thread_self()->name);
|
|
|
|
else
|
|
|
|
rt_mem_setname(mem, "NONE");
|
2018-02-24 16:10:44 +08:00
|
|
|
#endif
|
2009-07-03 06:48:23 +08:00
|
|
|
|
2012-12-25 16:27:29 +08:00
|
|
|
if (mem == lfree)
|
|
|
|
{
|
|
|
|
/* Find next free block after mem and update lowest free pointer */
|
|
|
|
while (lfree->used && lfree != heap_end)
|
|
|
|
lfree = (struct heap_mem *)&heap_ptr[lfree->next];
|
2009-07-03 06:48:23 +08:00
|
|
|
|
2012-12-25 16:27:29 +08:00
|
|
|
RT_ASSERT(((lfree == heap_end) || (!lfree->used)));
|
|
|
|
}
|
2009-07-03 06:48:23 +08:00
|
|
|
|
2012-12-25 16:27:29 +08:00
|
|
|
rt_sem_release(&heap_sem);
|
2018-10-26 06:35:42 +08:00
|
|
|
RT_ASSERT((rt_ubase_t)mem + SIZEOF_STRUCT_MEM + size <= (rt_ubase_t)heap_end);
|
|
|
|
RT_ASSERT((rt_ubase_t)((rt_uint8_t *)mem + SIZEOF_STRUCT_MEM) % RT_ALIGN_SIZE == 0);
|
|
|
|
RT_ASSERT((((rt_ubase_t)mem) & (RT_ALIGN_SIZE - 1)) == 0);
|
2009-07-03 06:48:23 +08:00
|
|
|
|
2012-12-25 16:27:29 +08:00
|
|
|
RT_DEBUG_LOG(RT_DEBUG_MEM,
|
2013-06-24 17:06:09 +08:00
|
|
|
("allocate memory at 0x%x, size: %d\n",
|
2018-10-26 06:35:42 +08:00
|
|
|
(rt_ubase_t)((rt_uint8_t *)mem + SIZEOF_STRUCT_MEM),
|
|
|
|
(rt_ubase_t)(mem->next - ((rt_uint8_t *)mem - heap_ptr))));
|
2009-07-03 06:48:23 +08:00
|
|
|
|
2012-12-25 16:27:29 +08:00
|
|
|
RT_OBJECT_HOOK_CALL(rt_malloc_hook,
|
|
|
|
(((void *)((rt_uint8_t *)mem + SIZEOF_STRUCT_MEM)), size));
|
2009-07-03 06:48:23 +08:00
|
|
|
|
2012-12-25 16:27:29 +08:00
|
|
|
/* return the memory data except mem struct */
|
|
|
|
return (rt_uint8_t *)mem + SIZEOF_STRUCT_MEM;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
rt_sem_release(&heap_sem);
|
|
|
|
|
|
|
|
return RT_NULL;
|
2009-07-03 06:48:23 +08:00
|
|
|
}
|
2012-08-27 09:21:57 +08:00
|
|
|
RTM_EXPORT(rt_malloc);
|
2009-07-03 06:48:23 +08:00
|
|
|
|
|
|
|
/**
|
|
|
|
* This function will change the previously allocated memory block.
|
|
|
|
*
|
|
|
|
* @param rmem pointer to memory allocated by rt_malloc
|
|
|
|
* @param newsize the required new size
|
2009-12-25 20:18:53 +08:00
|
|
|
*
|
2009-07-03 06:48:23 +08:00
|
|
|
* @return the changed memory block address
|
|
|
|
*/
|
|
|
|
void *rt_realloc(void *rmem, rt_size_t newsize)
|
|
|
|
{
|
2012-12-25 16:27:29 +08:00
|
|
|
rt_size_t size;
|
|
|
|
rt_size_t ptr, ptr2;
|
|
|
|
struct heap_mem *mem, *mem2;
|
|
|
|
void *nmem;
|
2009-07-03 06:48:23 +08:00
|
|
|
|
2012-12-25 16:27:29 +08:00
|
|
|
RT_DEBUG_NOT_IN_INTERRUPT;
|
2011-06-12 18:01:48 +08:00
|
|
|
|
2012-12-25 16:27:29 +08:00
|
|
|
/* alignment size */
|
|
|
|
newsize = RT_ALIGN(newsize, RT_ALIGN_SIZE);
|
|
|
|
if (newsize > mem_size_aligned)
|
|
|
|
{
|
|
|
|
RT_DEBUG_LOG(RT_DEBUG_MEM, ("realloc: out of memory\n"));
|
2011-06-12 18:01:48 +08:00
|
|
|
|
2012-12-25 16:27:29 +08:00
|
|
|
return RT_NULL;
|
|
|
|
}
|
2017-08-14 15:57:42 +08:00
|
|
|
else if (newsize == 0)
|
|
|
|
{
|
|
|
|
rt_free(rmem);
|
|
|
|
return RT_NULL;
|
|
|
|
}
|
2009-07-03 06:48:23 +08:00
|
|
|
|
2012-12-25 16:27:29 +08:00
|
|
|
/* allocate a new memory block */
|
|
|
|
if (rmem == RT_NULL)
|
|
|
|
return rt_malloc(newsize);
|
2010-10-14 17:38:03 +08:00
|
|
|
|
2012-12-25 16:27:29 +08:00
|
|
|
rt_sem_take(&heap_sem, RT_WAITING_FOREVER);
|
2009-07-03 06:48:23 +08:00
|
|
|
|
2012-12-25 16:27:29 +08:00
|
|
|
if ((rt_uint8_t *)rmem < (rt_uint8_t *)heap_ptr ||
|
|
|
|
(rt_uint8_t *)rmem >= (rt_uint8_t *)heap_end)
|
|
|
|
{
|
|
|
|
/* illegal memory */
|
|
|
|
rt_sem_release(&heap_sem);
|
2012-03-17 14:43:49 +08:00
|
|
|
|
2012-12-25 16:27:29 +08:00
|
|
|
return rmem;
|
|
|
|
}
|
2009-07-03 06:48:23 +08:00
|
|
|
|
2012-12-25 16:27:29 +08:00
|
|
|
mem = (struct heap_mem *)((rt_uint8_t *)rmem - SIZEOF_STRUCT_MEM);
|
2009-07-03 06:48:23 +08:00
|
|
|
|
2012-12-25 16:27:29 +08:00
|
|
|
ptr = (rt_uint8_t *)mem - heap_ptr;
|
|
|
|
size = mem->next - ptr - SIZEOF_STRUCT_MEM;
|
|
|
|
if (size == newsize)
|
|
|
|
{
|
|
|
|
/* the size is the same as */
|
|
|
|
rt_sem_release(&heap_sem);
|
2012-03-17 14:43:49 +08:00
|
|
|
|
2012-12-25 16:27:29 +08:00
|
|
|
return rmem;
|
|
|
|
}
|
2009-07-03 06:48:23 +08:00
|
|
|
|
2012-12-25 16:27:29 +08:00
|
|
|
if (newsize + SIZEOF_STRUCT_MEM + MIN_SIZE < size)
|
|
|
|
{
|
|
|
|
/* split memory block */
|
2009-09-28 15:06:29 +08:00
|
|
|
#ifdef RT_MEM_STATS
|
2012-12-25 16:27:29 +08:00
|
|
|
used_mem -= (size - newsize);
|
2009-09-28 15:06:29 +08:00
|
|
|
#endif
|
2009-07-03 06:48:23 +08:00
|
|
|
|
2012-12-25 16:27:29 +08:00
|
|
|
ptr2 = ptr + SIZEOF_STRUCT_MEM + newsize;
|
|
|
|
mem2 = (struct heap_mem *)&heap_ptr[ptr2];
|
2017-09-15 11:02:24 +08:00
|
|
|
mem2->magic = HEAP_MAGIC;
|
2012-12-25 16:27:29 +08:00
|
|
|
mem2->used = 0;
|
|
|
|
mem2->next = mem->next;
|
|
|
|
mem2->prev = ptr;
|
2017-12-22 14:29:14 +08:00
|
|
|
#ifdef RT_USING_MEMTRACE
|
|
|
|
rt_mem_setname(mem2, " ");
|
|
|
|
#endif
|
2012-12-25 16:27:29 +08:00
|
|
|
mem->next = ptr2;
|
|
|
|
if (mem2->next != mem_size_aligned + SIZEOF_STRUCT_MEM)
|
|
|
|
{
|
|
|
|
((struct heap_mem *)&heap_ptr[mem2->next])->prev = ptr2;
|
|
|
|
}
|
2020-03-27 18:57:29 +08:00
|
|
|
|
|
|
|
if (mem2 < lfree)
|
|
|
|
{
|
|
|
|
/* the splited struct is now the lowest */
|
|
|
|
lfree = mem2;
|
|
|
|
}
|
2012-12-25 16:27:29 +08:00
|
|
|
|
|
|
|
plug_holes(mem2);
|
|
|
|
|
|
|
|
rt_sem_release(&heap_sem);
|
|
|
|
|
|
|
|
return rmem;
|
|
|
|
}
|
|
|
|
rt_sem_release(&heap_sem);
|
|
|
|
|
|
|
|
/* expand memory */
|
|
|
|
nmem = rt_malloc(newsize);
|
|
|
|
if (nmem != RT_NULL) /* check memory */
|
|
|
|
{
|
2013-06-24 17:06:09 +08:00
|
|
|
rt_memcpy(nmem, rmem, size < newsize ? size : newsize);
|
2012-12-25 16:27:29 +08:00
|
|
|
rt_free(rmem);
|
|
|
|
}
|
|
|
|
|
|
|
|
return nmem;
|
2009-07-03 06:48:23 +08:00
|
|
|
}
|
2012-08-27 09:21:57 +08:00
|
|
|
RTM_EXPORT(rt_realloc);
|
2009-07-03 06:48:23 +08:00
|
|
|
|
|
|
|
/**
|
2009-12-25 20:18:53 +08:00
|
|
|
* This function will contiguously allocate enough space for count objects
|
|
|
|
* that are size bytes of memory each and returns a pointer to the allocated
|
2009-07-03 06:48:23 +08:00
|
|
|
* memory.
|
|
|
|
*
|
|
|
|
* The allocated memory is filled with bytes of value zero.
|
|
|
|
*
|
|
|
|
* @param count number of objects to allocate
|
|
|
|
* @param size size of the objects to allocate
|
|
|
|
*
|
|
|
|
* @return pointer to allocated memory / NULL pointer if there is an error
|
|
|
|
*/
|
|
|
|
void *rt_calloc(rt_size_t count, rt_size_t size)
|
|
|
|
{
|
2012-12-25 16:27:29 +08:00
|
|
|
void *p;
|
2009-07-03 06:48:23 +08:00
|
|
|
|
2012-12-25 16:27:29 +08:00
|
|
|
/* allocate 'count' objects of size 'size' */
|
|
|
|
p = rt_malloc(count * size);
|
2009-07-03 06:48:23 +08:00
|
|
|
|
2012-12-25 16:27:29 +08:00
|
|
|
/* zero the memory */
|
|
|
|
if (p)
|
|
|
|
rt_memset(p, 0, count * size);
|
2009-07-03 06:48:23 +08:00
|
|
|
|
2012-12-25 16:27:29 +08:00
|
|
|
return p;
|
2009-07-03 06:48:23 +08:00
|
|
|
}
|
2012-08-27 09:21:57 +08:00
|
|
|
RTM_EXPORT(rt_calloc);
|
2009-07-03 06:48:23 +08:00
|
|
|
|
|
|
|
/**
|
2012-12-25 16:27:29 +08:00
|
|
|
* This function will release the previously allocated memory block by
|
|
|
|
* rt_malloc. The released memory block is taken back to system heap.
|
2009-07-03 06:48:23 +08:00
|
|
|
*
|
|
|
|
* @param rmem the address of memory which will be released
|
|
|
|
*/
|
|
|
|
void rt_free(void *rmem)
|
|
|
|
{
|
2012-12-25 16:27:29 +08:00
|
|
|
struct heap_mem *mem;
|
2009-07-03 06:48:23 +08:00
|
|
|
|
2012-12-25 16:27:29 +08:00
|
|
|
if (rmem == RT_NULL)
|
|
|
|
return;
|
2018-10-23 16:03:17 +08:00
|
|
|
|
|
|
|
RT_DEBUG_NOT_IN_INTERRUPT;
|
|
|
|
|
2018-10-26 06:35:42 +08:00
|
|
|
RT_ASSERT((((rt_ubase_t)rmem) & (RT_ALIGN_SIZE - 1)) == 0);
|
2012-12-25 16:27:29 +08:00
|
|
|
RT_ASSERT((rt_uint8_t *)rmem >= (rt_uint8_t *)heap_ptr &&
|
|
|
|
(rt_uint8_t *)rmem < (rt_uint8_t *)heap_end);
|
2009-07-03 06:48:23 +08:00
|
|
|
|
2012-12-25 16:27:29 +08:00
|
|
|
RT_OBJECT_HOOK_CALL(rt_free_hook, (rmem));
|
2009-07-03 06:48:23 +08:00
|
|
|
|
2012-12-25 16:27:29 +08:00
|
|
|
if ((rt_uint8_t *)rmem < (rt_uint8_t *)heap_ptr ||
|
|
|
|
(rt_uint8_t *)rmem >= (rt_uint8_t *)heap_end)
|
|
|
|
{
|
|
|
|
RT_DEBUG_LOG(RT_DEBUG_MEM, ("illegal memory\n"));
|
2011-06-12 18:01:48 +08:00
|
|
|
|
2012-12-25 16:27:29 +08:00
|
|
|
return;
|
|
|
|
}
|
2009-07-03 06:48:23 +08:00
|
|
|
|
2012-12-25 16:27:29 +08:00
|
|
|
/* Get the corresponding struct heap_mem ... */
|
|
|
|
mem = (struct heap_mem *)((rt_uint8_t *)rmem - SIZEOF_STRUCT_MEM);
|
2011-03-21 15:40:09 +08:00
|
|
|
|
2012-12-25 16:27:29 +08:00
|
|
|
RT_DEBUG_LOG(RT_DEBUG_MEM,
|
2013-06-24 17:06:09 +08:00
|
|
|
("release memory 0x%x, size: %d\n",
|
2018-10-26 06:35:42 +08:00
|
|
|
(rt_ubase_t)rmem,
|
|
|
|
(rt_ubase_t)(mem->next - ((rt_uint8_t *)mem - heap_ptr))));
|
2011-06-12 18:01:48 +08:00
|
|
|
|
2011-03-21 15:40:09 +08:00
|
|
|
|
2012-12-25 16:27:29 +08:00
|
|
|
/* protect the heap from concurrent access */
|
|
|
|
rt_sem_take(&heap_sem, RT_WAITING_FOREVER);
|
2011-03-21 15:40:09 +08:00
|
|
|
|
2012-12-25 16:27:29 +08:00
|
|
|
/* ... which has to be in a used state ... */
|
2017-12-22 14:29:14 +08:00
|
|
|
if (!mem->used || mem->magic != HEAP_MAGIC)
|
|
|
|
{
|
|
|
|
rt_kprintf("to free a bad data block:\n");
|
|
|
|
rt_kprintf("mem: 0x%08x, used flag: %d, magic code: 0x%04x\n", mem, mem->used, mem->magic);
|
|
|
|
}
|
2012-12-25 16:27:29 +08:00
|
|
|
RT_ASSERT(mem->used);
|
|
|
|
RT_ASSERT(mem->magic == HEAP_MAGIC);
|
|
|
|
/* ... and is now unused. */
|
|
|
|
mem->used = 0;
|
2015-09-24 21:07:54 +08:00
|
|
|
mem->magic = HEAP_MAGIC;
|
2017-12-22 14:29:14 +08:00
|
|
|
#ifdef RT_USING_MEMTRACE
|
|
|
|
rt_mem_setname(mem, " ");
|
|
|
|
#endif
|
2009-07-03 06:48:23 +08:00
|
|
|
|
2012-12-25 16:27:29 +08:00
|
|
|
if (mem < lfree)
|
|
|
|
{
|
|
|
|
/* the newly freed struct is now the lowest */
|
|
|
|
lfree = mem;
|
|
|
|
}
|
2009-07-03 06:48:23 +08:00
|
|
|
|
2009-08-03 22:00:27 +08:00
|
|
|
#ifdef RT_MEM_STATS
|
2017-09-15 11:02:24 +08:00
|
|
|
used_mem -= (mem->next - ((rt_uint8_t *)mem - heap_ptr));
|
2009-08-03 22:00:27 +08:00
|
|
|
#endif
|
2009-07-03 06:48:23 +08:00
|
|
|
|
2012-12-25 16:27:29 +08:00
|
|
|
/* finally, see if prev or next are free also */
|
|
|
|
plug_holes(mem);
|
|
|
|
rt_sem_release(&heap_sem);
|
2009-08-03 22:00:27 +08:00
|
|
|
}
|
2012-08-27 09:21:57 +08:00
|
|
|
RTM_EXPORT(rt_free);
|
2009-07-03 06:48:23 +08:00
|
|
|
|
2010-07-13 15:36:37 +08:00
|
|
|
#ifdef RT_MEM_STATS
|
2012-12-25 16:27:29 +08:00
|
|
|
void rt_memory_info(rt_uint32_t *total,
|
|
|
|
rt_uint32_t *used,
|
|
|
|
rt_uint32_t *max_used)
|
2010-07-13 15:36:37 +08:00
|
|
|
{
|
2012-12-25 16:27:29 +08:00
|
|
|
if (total != RT_NULL)
|
|
|
|
*total = mem_size_aligned;
|
|
|
|
if (used != RT_NULL)
|
|
|
|
*used = used_mem;
|
|
|
|
if (max_used != RT_NULL)
|
|
|
|
*max_used = max_mem;
|
2010-07-13 15:36:37 +08:00
|
|
|
}
|
2009-12-25 20:18:53 +08:00
|
|
|
|
2009-07-03 06:48:23 +08:00
|
|
|
#ifdef RT_USING_FINSH
|
|
|
|
#include <finsh.h>
|
2012-12-25 16:27:29 +08:00
|
|
|
|
2011-09-21 11:56:42 +08:00
|
|
|
void list_mem(void)
|
2009-07-03 06:48:23 +08:00
|
|
|
{
|
2012-12-25 16:27:29 +08:00
|
|
|
rt_kprintf("total memory: %d\n", mem_size_aligned);
|
|
|
|
rt_kprintf("used memory : %d\n", used_mem);
|
|
|
|
rt_kprintf("maximum allocated memory: %d\n", max_mem);
|
2009-07-03 06:48:23 +08:00
|
|
|
}
|
|
|
|
FINSH_FUNCTION_EXPORT(list_mem, list memory usage information)
|
2017-12-22 14:29:14 +08:00
|
|
|
|
|
|
|
#ifdef RT_USING_MEMTRACE
|
|
|
|
int memcheck(void)
|
|
|
|
{
|
|
|
|
int position;
|
2018-10-26 06:35:42 +08:00
|
|
|
rt_ubase_t level;
|
2017-12-22 14:29:14 +08:00
|
|
|
struct heap_mem *mem;
|
|
|
|
level = rt_hw_interrupt_disable();
|
2018-02-24 16:10:44 +08:00
|
|
|
for (mem = (struct heap_mem *)heap_ptr; mem != heap_end; mem = (struct heap_mem *)&heap_ptr[mem->next])
|
2017-12-22 14:29:14 +08:00
|
|
|
{
|
2018-10-26 06:35:42 +08:00
|
|
|
position = (rt_ubase_t)mem - (rt_ubase_t)heap_ptr;
|
2017-12-22 14:29:14 +08:00
|
|
|
if (position < 0) goto __exit;
|
2019-02-12 14:33:45 +08:00
|
|
|
if (position > (int)mem_size_aligned) goto __exit;
|
2017-12-22 14:29:14 +08:00
|
|
|
if (mem->magic != HEAP_MAGIC) goto __exit;
|
|
|
|
if (mem->used != 0 && mem->used != 1) goto __exit;
|
|
|
|
}
|
|
|
|
rt_hw_interrupt_enable(level);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
__exit:
|
|
|
|
rt_kprintf("Memory block wrong:\n");
|
|
|
|
rt_kprintf("address: 0x%08x\n", mem);
|
|
|
|
rt_kprintf(" magic: 0x%04x\n", mem->magic);
|
|
|
|
rt_kprintf(" used: %d\n", mem->used);
|
|
|
|
rt_kprintf(" size: %d\n", mem->next - position - SIZEOF_STRUCT_MEM);
|
|
|
|
rt_hw_interrupt_enable(level);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
MSH_CMD_EXPORT(memcheck, check memory data);
|
|
|
|
|
2018-02-24 16:10:44 +08:00
|
|
|
int memtrace(int argc, char **argv)
|
2017-12-22 14:29:14 +08:00
|
|
|
{
|
|
|
|
struct heap_mem *mem;
|
|
|
|
|
|
|
|
list_mem();
|
|
|
|
|
|
|
|
rt_kprintf("\nmemory heap address:\n");
|
|
|
|
rt_kprintf("heap_ptr: 0x%08x\n", heap_ptr);
|
|
|
|
rt_kprintf("lfree : 0x%08x\n", lfree);
|
|
|
|
rt_kprintf("heap_end: 0x%08x\n", heap_end);
|
|
|
|
|
|
|
|
rt_kprintf("\n--memory item information --\n");
|
2018-02-24 16:10:44 +08:00
|
|
|
for (mem = (struct heap_mem *)heap_ptr; mem != heap_end; mem = (struct heap_mem *)&heap_ptr[mem->next])
|
2017-12-22 14:29:14 +08:00
|
|
|
{
|
2018-10-26 06:35:42 +08:00
|
|
|
int position = (rt_ubase_t)mem - (rt_ubase_t)heap_ptr;
|
2017-12-22 14:29:14 +08:00
|
|
|
int size;
|
|
|
|
|
|
|
|
rt_kprintf("[0x%08x - ", mem);
|
|
|
|
|
|
|
|
size = mem->next - position - SIZEOF_STRUCT_MEM;
|
|
|
|
if (size < 1024)
|
|
|
|
rt_kprintf("%5d", size);
|
|
|
|
else if (size < 1024 * 1024)
|
|
|
|
rt_kprintf("%4dK", size / 1024);
|
|
|
|
else
|
|
|
|
rt_kprintf("%4dM", size / (1024 * 1024));
|
|
|
|
|
|
|
|
rt_kprintf("] %c%c%c%c", mem->thread[0], mem->thread[1], mem->thread[2], mem->thread[3]);
|
|
|
|
if (mem->magic != HEAP_MAGIC)
|
|
|
|
rt_kprintf(": ***\n");
|
|
|
|
else
|
|
|
|
rt_kprintf("\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
MSH_CMD_EXPORT(memtrace, dump memory trace information);
|
|
|
|
#endif /* end of RT_USING_MEMTRACE */
|
|
|
|
#endif /* end of RT_USING_FINSH */
|
|
|
|
|
2009-08-03 22:00:27 +08:00
|
|
|
#endif
|
2009-07-03 06:48:23 +08:00
|
|
|
|
2016-08-19 10:11:38 +08:00
|
|
|
/**@}*/
|
2009-07-03 06:48:23 +08:00
|
|
|
|
2012-12-29 20:29:03 +08:00
|
|
|
#endif /* end of RT_USING_HEAP */
|
|
|
|
#endif /* end of RT_USING_MEMHEAP_AS_HEAP */
|