2013-05-29 20:37:59 +08:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2012, 2013 ARM Ltd
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. The name of the company may not be used to endorse or promote
|
|
|
|
* products derived from this software without specific prior written
|
|
|
|
* permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
|
|
|
|
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
|
|
|
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
|
|
|
* IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
|
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
|
|
|
|
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
|
|
|
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
|
|
|
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
|
|
|
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
|
|
|
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* Implementation of <<malloc>> <<free>> <<calloc>> <<realloc>>, optional
|
|
|
|
* as to be reenterable.
|
|
|
|
*
|
|
|
|
* Interface documentation refer to malloc.c.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <string.h>
|
|
|
|
#include <errno.h>
|
2018-02-12 19:23:42 +08:00
|
|
|
#include <malloc.h>
|
2013-05-29 20:37:59 +08:00
|
|
|
|
|
|
|
#if DEBUG
|
|
|
|
#include <assert.h>
|
|
|
|
#else
|
2021-09-03 23:54:10 +08:00
|
|
|
#undef assert
|
2013-05-29 20:37:59 +08:00
|
|
|
#define assert(x) ((void)0)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef MAX
|
|
|
|
#define MAX(a,b) ((a) >= (b) ? (a) : (b))
|
|
|
|
#endif
|
|
|
|
|
2016-01-29 23:19:57 +08:00
|
|
|
#define _SBRK_R(X) _sbrk_r(X)
|
|
|
|
|
2022-03-01 09:18:39 +08:00
|
|
|
#ifdef _LIBC
|
2013-05-29 20:37:59 +08:00
|
|
|
|
|
|
|
#include <sys/config.h>
|
|
|
|
#include <reent.h>
|
|
|
|
|
|
|
|
#define RARG struct _reent *reent_ptr,
|
|
|
|
#define RONEARG struct _reent *reent_ptr
|
|
|
|
#define RCALL reent_ptr,
|
2013-10-30 23:53:05 +08:00
|
|
|
#define RONECALL reent_ptr
|
2013-05-29 20:37:59 +08:00
|
|
|
|
2016-06-16 19:23:51 +08:00
|
|
|
#define MALLOC_LOCK __malloc_lock(reent_ptr)
|
|
|
|
#define MALLOC_UNLOCK __malloc_unlock(reent_ptr)
|
2013-05-29 20:37:59 +08:00
|
|
|
|
|
|
|
#define RERRNO reent_ptr->_errno
|
|
|
|
|
|
|
|
#define nano_malloc _malloc_r
|
|
|
|
#define nano_free _free_r
|
|
|
|
#define nano_realloc _realloc_r
|
|
|
|
#define nano_memalign _memalign_r
|
|
|
|
#define nano_valloc _valloc_r
|
|
|
|
#define nano_pvalloc _pvalloc_r
|
|
|
|
#define nano_calloc _calloc_r
|
|
|
|
#define nano_cfree _cfree_r
|
|
|
|
#define nano_malloc_usable_size _malloc_usable_size_r
|
|
|
|
#define nano_malloc_stats _malloc_stats_r
|
|
|
|
#define nano_mallinfo _mallinfo_r
|
2013-09-14 00:51:48 +08:00
|
|
|
#define nano_mallopt _mallopt_r
|
2013-05-29 20:37:59 +08:00
|
|
|
|
2022-03-01 09:18:39 +08:00
|
|
|
#else /* ! _LIBC */
|
2013-05-29 20:37:59 +08:00
|
|
|
|
|
|
|
#define RARG
|
|
|
|
#define RONEARG
|
|
|
|
#define RCALL
|
2013-10-30 23:53:05 +08:00
|
|
|
#define RONECALL
|
2013-05-29 20:37:59 +08:00
|
|
|
#define MALLOC_LOCK
|
|
|
|
#define MALLOC_UNLOCK
|
|
|
|
#define RERRNO errno
|
|
|
|
|
|
|
|
#define nano_malloc malloc
|
|
|
|
#define nano_free free
|
|
|
|
#define nano_realloc realloc
|
|
|
|
#define nano_memalign memalign
|
|
|
|
#define nano_valloc valloc
|
|
|
|
#define nano_pvalloc pvalloc
|
|
|
|
#define nano_calloc calloc
|
|
|
|
#define nano_cfree cfree
|
|
|
|
#define nano_malloc_usable_size malloc_usable_size
|
|
|
|
#define nano_malloc_stats malloc_stats
|
|
|
|
#define nano_mallinfo mallinfo
|
2013-09-14 00:51:48 +08:00
|
|
|
#define nano_mallopt mallopt
|
2022-03-01 09:18:39 +08:00
|
|
|
#endif /* ! _LIBC */
|
2013-05-29 20:37:59 +08:00
|
|
|
|
2013-10-30 23:53:05 +08:00
|
|
|
/* Redefine names to avoid conflict with user names */
|
2013-05-29 20:37:59 +08:00
|
|
|
#define free_list __malloc_free_list
|
2013-10-30 23:53:05 +08:00
|
|
|
#define sbrk_start __malloc_sbrk_start
|
|
|
|
#define current_mallinfo __malloc_current_mallinfo
|
2013-05-29 20:37:59 +08:00
|
|
|
|
2020-09-02 22:50:07 +08:00
|
|
|
#define ALIGN_PTR(ptr, align) \
|
|
|
|
(((ptr) + (align) - (intptr_t)1) & ~((align) - (intptr_t)1))
|
|
|
|
#define ALIGN_SIZE(size, align) \
|
|
|
|
(((size) + (align) - (size_t)1) & ~((align) - (size_t)1))
|
2013-05-29 20:37:59 +08:00
|
|
|
|
|
|
|
/* Alignment of allocated block */
|
|
|
|
#define MALLOC_ALIGN (8U)
|
|
|
|
#define CHUNK_ALIGN (sizeof(void*))
|
|
|
|
#define MALLOC_PADDING ((MAX(MALLOC_ALIGN, CHUNK_ALIGN)) - CHUNK_ALIGN)
|
|
|
|
|
|
|
|
/* as well as the minimal allocation size
|
|
|
|
* to hold a free pointer */
|
|
|
|
#define MALLOC_MINSIZE (sizeof(void *))
|
|
|
|
#define MALLOC_PAGE_ALIGN (0x1000)
|
|
|
|
#define MAX_ALLOC_SIZE (0x80000000U)
|
|
|
|
|
|
|
|
typedef size_t malloc_size_t;
|
|
|
|
|
|
|
|
typedef struct malloc_chunk
|
|
|
|
{
|
2017-01-13 23:35:26 +08:00
|
|
|
/* --------------------------------------
|
|
|
|
* chunk->| size |
|
|
|
|
* --------------------------------------
|
|
|
|
* | Padding for alignment |
|
|
|
|
* | This includes padding inserted by |
|
|
|
|
* | the compiler (to align fields) and |
|
|
|
|
* | explicit padding inserted by this |
|
|
|
|
* | implementation. If any explicit |
|
|
|
|
* | padding is being used then the |
|
|
|
|
* | sizeof (size) bytes at |
|
|
|
|
* | mem_ptr - CHUNK_OFFSET must be |
|
|
|
|
* | initialized with the negative |
|
|
|
|
* | offset to size. |
|
|
|
|
* --------------------------------------
|
|
|
|
* mem_ptr->| When allocated: data |
|
|
|
|
* | When freed: pointer to next free |
|
|
|
|
* | chunk |
|
|
|
|
* --------------------------------------
|
2013-05-29 20:37:59 +08:00
|
|
|
*/
|
|
|
|
/* size of the allocated payload area, including size before
|
|
|
|
CHUNK_OFFSET */
|
2013-10-30 23:53:05 +08:00
|
|
|
long size;
|
2013-05-29 20:37:59 +08:00
|
|
|
|
|
|
|
/* since here, the memory is either the next free block, or data load */
|
|
|
|
struct malloc_chunk * next;
|
|
|
|
}chunk;
|
|
|
|
|
2013-10-30 23:53:05 +08:00
|
|
|
|
2013-05-29 20:37:59 +08:00
|
|
|
#define CHUNK_OFFSET ((malloc_size_t)(&(((struct malloc_chunk *)0)->next)))
|
|
|
|
|
|
|
|
/* size of smallest possible chunk. A memory piece smaller than this size
|
|
|
|
* won't be able to create a chunk */
|
|
|
|
#define MALLOC_MINCHUNK (CHUNK_OFFSET + MALLOC_PADDING + MALLOC_MINSIZE)
|
|
|
|
|
2013-10-30 23:53:05 +08:00
|
|
|
/* Forward data declarations */
|
|
|
|
extern chunk * free_list;
|
|
|
|
extern char * sbrk_start;
|
|
|
|
extern struct mallinfo current_mallinfo;
|
|
|
|
|
|
|
|
/* Forward function declarations */
|
|
|
|
extern void * nano_malloc(RARG malloc_size_t);
|
|
|
|
extern void nano_free (RARG void * free_p);
|
|
|
|
extern void nano_cfree(RARG void * ptr);
|
|
|
|
extern void * nano_calloc(RARG malloc_size_t n, malloc_size_t elem);
|
|
|
|
extern void nano_malloc_stats(RONEARG);
|
|
|
|
extern malloc_size_t nano_malloc_usable_size(RARG void * ptr);
|
|
|
|
extern void * nano_realloc(RARG void * ptr, malloc_size_t size);
|
|
|
|
extern void * nano_memalign(RARG size_t align, size_t s);
|
|
|
|
extern int nano_mallopt(RARG int parameter_number, int parameter_value);
|
|
|
|
extern void * nano_valloc(RARG size_t s);
|
|
|
|
extern void * nano_pvalloc(RARG size_t s);
|
|
|
|
|
|
|
|
static inline chunk * get_chunk_from_ptr(void * ptr)
|
2013-05-29 20:37:59 +08:00
|
|
|
{
|
2017-01-13 23:35:26 +08:00
|
|
|
/* Assume that there is no explicit padding in the
|
|
|
|
chunk, and that the chunk starts at ptr - CHUNK_OFFSET. */
|
2013-05-29 20:37:59 +08:00
|
|
|
chunk * c = (chunk *)((char *)ptr - CHUNK_OFFSET);
|
2017-01-13 23:35:26 +08:00
|
|
|
|
|
|
|
/* c->size being negative indicates that there is explicit padding in
|
|
|
|
the chunk. In which case, c->size is currently the negative offset to
|
|
|
|
the true size. */
|
2013-05-29 20:37:59 +08:00
|
|
|
if (c->size < 0) c = (chunk *)((char *)c + c->size);
|
|
|
|
return c;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef DEFINE_MALLOC
|
2013-10-30 23:53:05 +08:00
|
|
|
/* List list header of free blocks */
|
2013-05-29 20:37:59 +08:00
|
|
|
chunk * free_list = NULL;
|
|
|
|
|
2013-10-30 23:53:05 +08:00
|
|
|
/* Starting point of memory allocated from system */
|
|
|
|
char * sbrk_start = NULL;
|
|
|
|
|
2013-05-29 20:37:59 +08:00
|
|
|
/** Function sbrk_aligned
|
|
|
|
* Algorithm:
|
|
|
|
* Use sbrk() to obtain more memory and ensure it is CHUNK_ALIGN aligned
|
|
|
|
* Optimise for the case that it is already aligned - only ask for extra
|
|
|
|
* padding after we know we need it
|
|
|
|
*/
|
|
|
|
static void* sbrk_aligned(RARG malloc_size_t s)
|
|
|
|
{
|
|
|
|
char *p, *align_p;
|
|
|
|
|
2016-01-29 23:19:57 +08:00
|
|
|
if (sbrk_start == NULL) sbrk_start = _SBRK_R(RCALL 0);
|
2013-10-30 23:53:05 +08:00
|
|
|
|
2016-01-29 23:19:57 +08:00
|
|
|
p = _SBRK_R(RCALL s);
|
2013-05-29 20:37:59 +08:00
|
|
|
|
|
|
|
/* sbrk returns -1 if fail to allocate */
|
|
|
|
if (p == (void *)-1)
|
|
|
|
return p;
|
|
|
|
|
2020-09-02 22:50:07 +08:00
|
|
|
align_p = (char*)ALIGN_PTR((uintptr_t)p, CHUNK_ALIGN);
|
2013-05-29 20:37:59 +08:00
|
|
|
if (align_p != p)
|
|
|
|
{
|
|
|
|
/* p is not aligned, ask for a few more bytes so that we have s
|
|
|
|
* bytes reserved from align_p. */
|
2016-01-29 23:19:57 +08:00
|
|
|
p = _SBRK_R(RCALL align_p - p);
|
2013-05-29 20:37:59 +08:00
|
|
|
if (p == (void *)-1)
|
|
|
|
return p;
|
|
|
|
}
|
|
|
|
return align_p;
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Function nano_malloc
|
|
|
|
* Algorithm:
|
|
|
|
* Walk through the free list to find the first match. If fails to find
|
|
|
|
* one, call sbrk to allocate a new chunk.
|
|
|
|
*/
|
|
|
|
void * nano_malloc(RARG malloc_size_t s)
|
|
|
|
{
|
|
|
|
chunk *p, *r;
|
|
|
|
char * ptr, * align_ptr;
|
|
|
|
int offset;
|
|
|
|
|
|
|
|
malloc_size_t alloc_size;
|
|
|
|
|
2020-09-02 22:50:07 +08:00
|
|
|
alloc_size = ALIGN_SIZE(s, CHUNK_ALIGN); /* size of aligned data load */
|
2013-05-29 20:37:59 +08:00
|
|
|
alloc_size += MALLOC_PADDING; /* padding */
|
|
|
|
alloc_size += CHUNK_OFFSET; /* size of chunk head */
|
|
|
|
alloc_size = MAX(alloc_size, MALLOC_MINCHUNK);
|
|
|
|
|
|
|
|
if (alloc_size >= MAX_ALLOC_SIZE || alloc_size < s)
|
|
|
|
{
|
|
|
|
RERRNO = ENOMEM;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
MALLOC_LOCK;
|
|
|
|
|
|
|
|
p = free_list;
|
|
|
|
r = p;
|
|
|
|
|
|
|
|
while (r)
|
|
|
|
{
|
|
|
|
int rem = r->size - alloc_size;
|
|
|
|
if (rem >= 0)
|
|
|
|
{
|
|
|
|
if (rem >= MALLOC_MINCHUNK)
|
|
|
|
{
|
2021-04-26 16:57:02 +08:00
|
|
|
if (p == r)
|
|
|
|
{
|
|
|
|
/* First item in the list, break it into two chunks
|
|
|
|
* and return the first one */
|
|
|
|
r->size = alloc_size;
|
|
|
|
free_list = (chunk *)((char *)r + alloc_size);
|
|
|
|
free_list->size = rem;
|
|
|
|
free_list->next = r->next;
|
|
|
|
} else {
|
|
|
|
/* Any other item in the list. Split and return
|
|
|
|
* the first one */
|
|
|
|
r->size = alloc_size;
|
|
|
|
p->next = (chunk *)((char *)r + alloc_size);
|
|
|
|
p->next->size = rem;
|
|
|
|
p->next->next = r->next;
|
|
|
|
}
|
2013-05-29 20:37:59 +08:00
|
|
|
}
|
|
|
|
/* Find a chunk that is exactly the size or slightly bigger
|
|
|
|
* than requested size, just return this chunk */
|
|
|
|
else if (p == r)
|
|
|
|
{
|
|
|
|
/* Now it implies p==r==free_list. Move the free_list
|
|
|
|
* to next chunk */
|
|
|
|
free_list = r->next;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* Normal case. Remove it from free_list */
|
|
|
|
p->next = r->next;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
p=r;
|
|
|
|
r=r->next;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Failed to find a appropriate chunk. Ask for more memory */
|
|
|
|
if (r == NULL)
|
|
|
|
{
|
|
|
|
r = sbrk_aligned(RCALL alloc_size);
|
|
|
|
|
|
|
|
/* sbrk returns -1 if fail to allocate */
|
|
|
|
if (r == (void *)-1)
|
|
|
|
{
|
2021-04-26 16:57:02 +08:00
|
|
|
/* sbrk didn't have the requested amount. Let's check
|
|
|
|
* if the last item in the free list is adjacent to the
|
|
|
|
* current heap end (sbrk(0)). In that case, only ask
|
|
|
|
* for the difference in size and merge them */
|
|
|
|
p = free_list;
|
|
|
|
r = p;
|
|
|
|
|
|
|
|
while (r)
|
|
|
|
{
|
|
|
|
p=r;
|
|
|
|
r=r->next;
|
|
|
|
}
|
|
|
|
|
2022-01-25 23:44:10 +08:00
|
|
|
if (p != NULL && (char *)p + p->size == (char *)_SBRK_R(RCALL 0))
|
2021-04-26 16:57:02 +08:00
|
|
|
{
|
|
|
|
/* The last free item has the heap end as neighbour.
|
|
|
|
* Let's ask for a smaller amount and merge */
|
|
|
|
alloc_size -= p->size;
|
|
|
|
alloc_size = ALIGN_SIZE(alloc_size, CHUNK_ALIGN); /* size of aligned data load */
|
|
|
|
alloc_size += MALLOC_PADDING; /* padding */
|
|
|
|
alloc_size += CHUNK_OFFSET; /* size of chunk head */
|
|
|
|
alloc_size = MAX(alloc_size, MALLOC_MINCHUNK);
|
|
|
|
|
|
|
|
if (sbrk_aligned(RCALL alloc_size) != (void *)-1)
|
|
|
|
{
|
|
|
|
p->size += alloc_size;
|
|
|
|
r = p;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
RERRNO = ENOMEM;
|
|
|
|
MALLOC_UNLOCK;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
RERRNO = ENOMEM;
|
|
|
|
MALLOC_UNLOCK;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
r->size = alloc_size;
|
2013-05-29 20:37:59 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
MALLOC_UNLOCK;
|
|
|
|
|
|
|
|
ptr = (char *)r + CHUNK_OFFSET;
|
|
|
|
|
2020-09-02 22:50:07 +08:00
|
|
|
align_ptr = (char *)ALIGN_PTR((uintptr_t)ptr, MALLOC_ALIGN);
|
2013-05-29 20:37:59 +08:00
|
|
|
offset = align_ptr - ptr;
|
|
|
|
|
|
|
|
if (offset)
|
|
|
|
{
|
2017-01-13 23:35:26 +08:00
|
|
|
/* Initialize sizeof (malloc_chunk.size) bytes at
|
|
|
|
align_ptr - CHUNK_OFFSET with negative offset to the
|
|
|
|
size field (at the start of the chunk).
|
|
|
|
|
|
|
|
The negative offset to size from align_ptr - CHUNK_OFFSET is
|
|
|
|
the size of any remaining padding minus CHUNK_OFFSET. This is
|
|
|
|
equivalent to the total size of the padding, because the size of
|
|
|
|
any remaining padding is the total size of the padding minus
|
|
|
|
CHUNK_OFFSET.
|
|
|
|
|
|
|
|
Note that the size of the padding must be at least CHUNK_OFFSET.
|
|
|
|
|
|
|
|
The rest of the padding is not initialized. */
|
Fix incorrect cast in nano malloc
As described in nano-mallocr.c, chunks of heap are represented in memory
as a size (of type long), followed by some optional padding containing a
negative offset to size, followed by the data area.
get_chunk_from_ptr is responsible for taking a pointer to the data area
(as returned by malloc) and finding the start of the chunk. It does this
by assuming there is no padding and trying to read the size, if the size
is negative then it uses that as an offset to find the true size.
Crucially, it reads the padding area as a long.
nano_malloc is responsible for populating the optional padding area. It
does so by casting a pointer to an (int *) and writing the negative
offset into it.
This means that padding is being written as an int but read as a long.
On msp430 an int is 2 bytes, while a long is 4 bytes. This means that 2
bytes are written to the padding, but 4 bytes are read from it: it has
only been partially initialised.
nano_malloc is the default malloc implementation for msp430.
This patch changes the cast from (int *) to (long *). The change to
nano_malloc has has been observed to fix a TI Energia project that
had been malfunctioning because malloc was returning invalid addresses.
The change to nano_memalign is based entirely on code inspection.
I've built and tested as follows:
Configured (gcc+newlib) with: --target=msp430-elf --enable-languages=c
gcc testsuite variations:
msp430-sim/-mcpu=msp430
msp430-sim/-mcpu=msp430x
msp430-sim/-mcpu=msp430x/-mlarge/-mdata-region=either/-mcode-region=either
msp430-sim/-mhwmult=none
msp430-sim/-mhwmult=f5series
My testing has shown no regressions, however I don't know if the gcc
testsuite provides sufficient coverage for this patch?
I don't have write access, so if this patch is acceptable after review,
I would appreciate it if someone would commit it for me.
Thanks,
2017-01-XX Joe Seymour <joe.s@somniumtech.com>
newlib/
* libc/stdlib/nano-mallocr.c (nano_malloc): Fix incorrect cast.
(nano_memalign): Likewise.
2017-01-03 22:50:53 +08:00
|
|
|
*(long *)((char *)r + offset) = -offset;
|
2013-05-29 20:37:59 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
assert(align_ptr + size <= (char *)r + alloc_size);
|
|
|
|
return align_ptr;
|
|
|
|
}
|
|
|
|
#endif /* DEFINE_MALLOC */
|
|
|
|
|
|
|
|
#ifdef DEFINE_FREE
|
|
|
|
#define MALLOC_CHECK_DOUBLE_FREE
|
|
|
|
|
|
|
|
/** Function nano_free
|
|
|
|
* Implementation of libc free.
|
|
|
|
* Algorithm:
|
|
|
|
* Maintain a global free chunk single link list, headed by global
|
|
|
|
* variable free_list.
|
|
|
|
* When free, insert the to-be-freed chunk into free list. The place to
|
|
|
|
* insert should make sure all chunks are sorted by address from low to
|
|
|
|
* high. Then merge with neighbor chunks if adjacent.
|
|
|
|
*/
|
|
|
|
void nano_free (RARG void * free_p)
|
|
|
|
{
|
|
|
|
chunk * p_to_free;
|
|
|
|
chunk * p, * q;
|
|
|
|
|
|
|
|
if (free_p == NULL) return;
|
|
|
|
|
|
|
|
p_to_free = get_chunk_from_ptr(free_p);
|
|
|
|
|
|
|
|
MALLOC_LOCK;
|
|
|
|
if (free_list == NULL)
|
|
|
|
{
|
|
|
|
/* Set first free list element */
|
|
|
|
p_to_free->next = free_list;
|
|
|
|
free_list = p_to_free;
|
|
|
|
MALLOC_UNLOCK;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (p_to_free < free_list)
|
|
|
|
{
|
|
|
|
if ((char *)p_to_free + p_to_free->size == (char *)free_list)
|
|
|
|
{
|
|
|
|
/* Chunk to free is just before the first element of
|
|
|
|
* free list */
|
|
|
|
p_to_free->size += free_list->size;
|
|
|
|
p_to_free->next = free_list->next;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* Insert before current free_list */
|
|
|
|
p_to_free->next = free_list;
|
|
|
|
}
|
|
|
|
free_list = p_to_free;
|
|
|
|
MALLOC_UNLOCK;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
q = free_list;
|
|
|
|
/* Walk through the free list to find the place for insert. */
|
|
|
|
do
|
|
|
|
{
|
|
|
|
p = q;
|
|
|
|
q = q->next;
|
|
|
|
} while (q && q <= p_to_free);
|
|
|
|
|
|
|
|
/* Now p <= p_to_free and either q == NULL or q > p_to_free
|
|
|
|
* Try to merge with chunks immediately before/after it. */
|
|
|
|
|
|
|
|
if ((char *)p + p->size == (char *)p_to_free)
|
|
|
|
{
|
|
|
|
/* Chunk to be freed is adjacent
|
|
|
|
* to a free chunk before it */
|
|
|
|
p->size += p_to_free->size;
|
|
|
|
/* If the merged chunk is also adjacent
|
|
|
|
* to the chunk after it, merge again */
|
|
|
|
if ((char *)p + p->size == (char *) q)
|
|
|
|
{
|
|
|
|
p->size += q->size;
|
|
|
|
p->next = q->next;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#ifdef MALLOC_CHECK_DOUBLE_FREE
|
|
|
|
else if ((char *)p + p->size > (char *)p_to_free)
|
|
|
|
{
|
|
|
|
/* Report double free fault */
|
|
|
|
RERRNO = ENOMEM;
|
|
|
|
MALLOC_UNLOCK;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
else if ((char *)p_to_free + p_to_free->size == (char *) q)
|
|
|
|
{
|
|
|
|
/* Chunk to be freed is adjacent
|
|
|
|
* to a free chunk after it */
|
|
|
|
p_to_free->size += q->size;
|
|
|
|
p_to_free->next = q->next;
|
|
|
|
p->next = p_to_free;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* Not adjacent to any chunk. Just insert it. Resulting
|
|
|
|
* a fragment. */
|
|
|
|
p_to_free->next = q;
|
|
|
|
p->next = p_to_free;
|
|
|
|
}
|
|
|
|
MALLOC_UNLOCK;
|
|
|
|
}
|
|
|
|
#endif /* DEFINE_FREE */
|
|
|
|
|
|
|
|
#ifdef DEFINE_CFREE
|
|
|
|
void nano_cfree(RARG void * ptr)
|
|
|
|
{
|
|
|
|
nano_free(RCALL ptr);
|
|
|
|
}
|
|
|
|
#endif /* DEFINE_CFREE */
|
|
|
|
|
|
|
|
#ifdef DEFINE_CALLOC
|
|
|
|
/* Function nano_calloc
|
|
|
|
* Implement calloc simply by calling malloc and set zero */
|
|
|
|
void * nano_calloc(RARG malloc_size_t n, malloc_size_t elem)
|
|
|
|
{
|
2020-08-12 22:33:23 +08:00
|
|
|
malloc_size_t bytes;
|
2020-08-12 07:05:40 +08:00
|
|
|
void * mem;
|
|
|
|
|
|
|
|
if (__builtin_mul_overflow (n, elem, &bytes))
|
|
|
|
{
|
|
|
|
RERRNO = ENOMEM;
|
|
|
|
return NULL;
|
|
|
|
}
|
2020-08-12 22:33:23 +08:00
|
|
|
mem = nano_malloc(RCALL bytes);
|
2020-08-12 07:05:40 +08:00
|
|
|
if (mem != NULL) memset(mem, 0, bytes);
|
2013-05-29 20:37:59 +08:00
|
|
|
return mem;
|
|
|
|
}
|
|
|
|
#endif /* DEFINE_CALLOC */
|
|
|
|
|
|
|
|
#ifdef DEFINE_REALLOC
|
|
|
|
/* Function nano_realloc
|
|
|
|
* Implement realloc by malloc + memcpy */
|
|
|
|
void * nano_realloc(RARG void * ptr, malloc_size_t size)
|
|
|
|
{
|
|
|
|
void * mem;
|
|
|
|
chunk * p_to_realloc;
|
2020-08-14 08:19:01 +08:00
|
|
|
malloc_size_t old_size;
|
2013-05-29 20:37:59 +08:00
|
|
|
|
|
|
|
if (ptr == NULL) return nano_malloc(RCALL size);
|
|
|
|
|
|
|
|
if (size == 0)
|
|
|
|
{
|
|
|
|
nano_free(RCALL ptr);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2020-08-14 08:19:01 +08:00
|
|
|
old_size = nano_malloc_usable_size(RCALL ptr);
|
2020-08-14 08:19:02 +08:00
|
|
|
if (size <= old_size && (old_size >> 1) < size)
|
2013-05-29 20:37:59 +08:00
|
|
|
return ptr;
|
|
|
|
|
|
|
|
mem = nano_malloc(RCALL size);
|
|
|
|
if (mem != NULL)
|
|
|
|
{
|
2020-08-14 08:19:02 +08:00
|
|
|
if (old_size > size)
|
|
|
|
old_size = size;
|
2020-08-14 08:19:01 +08:00
|
|
|
memcpy(mem, ptr, old_size);
|
2013-05-29 20:37:59 +08:00
|
|
|
nano_free(RCALL ptr);
|
|
|
|
}
|
|
|
|
return mem;
|
|
|
|
}
|
|
|
|
#endif /* DEFINE_REALLOC */
|
|
|
|
|
|
|
|
#ifdef DEFINE_MALLINFO
|
2013-10-30 23:53:05 +08:00
|
|
|
struct mallinfo current_mallinfo={0,0,0,0,0,0,0,0,0,0};
|
2013-05-29 20:37:59 +08:00
|
|
|
|
|
|
|
struct mallinfo nano_mallinfo(RONEARG)
|
|
|
|
{
|
2013-10-30 23:53:05 +08:00
|
|
|
char * sbrk_now;
|
|
|
|
chunk * pf;
|
|
|
|
size_t free_size = 0;
|
|
|
|
size_t total_size;
|
|
|
|
|
|
|
|
MALLOC_LOCK;
|
|
|
|
|
|
|
|
if (sbrk_start == NULL) total_size = 0;
|
|
|
|
else {
|
2016-01-29 23:19:57 +08:00
|
|
|
sbrk_now = _SBRK_R(RCALL 0);
|
2013-10-30 23:53:05 +08:00
|
|
|
|
|
|
|
if (sbrk_now == (void *)-1)
|
|
|
|
total_size = (size_t)-1;
|
|
|
|
else
|
|
|
|
total_size = (size_t) (sbrk_now - sbrk_start);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (pf = free_list; pf; pf = pf->next)
|
|
|
|
free_size += pf->size;
|
|
|
|
|
|
|
|
current_mallinfo.arena = total_size;
|
|
|
|
current_mallinfo.fordblks = free_size;
|
|
|
|
current_mallinfo.uordblks = total_size - free_size;
|
|
|
|
|
|
|
|
MALLOC_UNLOCK;
|
2013-05-29 20:37:59 +08:00
|
|
|
return current_mallinfo;
|
|
|
|
}
|
|
|
|
#endif /* DEFINE_MALLINFO */
|
|
|
|
|
|
|
|
#ifdef DEFINE_MALLOC_STATS
|
|
|
|
void nano_malloc_stats(RONEARG)
|
|
|
|
{
|
2013-10-30 23:53:05 +08:00
|
|
|
nano_mallinfo(RONECALL);
|
|
|
|
fiprintf(stderr, "max system bytes = %10u\n",
|
|
|
|
current_mallinfo.arena);
|
|
|
|
fiprintf(stderr, "system bytes = %10u\n",
|
|
|
|
current_mallinfo.arena);
|
|
|
|
fiprintf(stderr, "in use bytes = %10u\n",
|
|
|
|
current_mallinfo.uordblks);
|
2013-05-29 20:37:59 +08:00
|
|
|
}
|
|
|
|
#endif /* DEFINE_MALLOC_STATS */
|
|
|
|
|
|
|
|
#ifdef DEFINE_MALLOC_USABLE_SIZE
|
|
|
|
malloc_size_t nano_malloc_usable_size(RARG void * ptr)
|
|
|
|
{
|
|
|
|
chunk * c = (chunk *)((char *)ptr - CHUNK_OFFSET);
|
|
|
|
int size_or_offset = c->size;
|
|
|
|
|
|
|
|
if (size_or_offset < 0)
|
|
|
|
{
|
|
|
|
/* Padding is used. Excluding the padding size */
|
|
|
|
c = (chunk *)((char *)c + c->size);
|
|
|
|
return c->size - CHUNK_OFFSET + size_or_offset;
|
|
|
|
}
|
|
|
|
return c->size - CHUNK_OFFSET;
|
|
|
|
}
|
|
|
|
#endif /* DEFINE_MALLOC_USABLE_SIZE */
|
|
|
|
|
|
|
|
#ifdef DEFINE_MEMALIGN
|
|
|
|
/* Function nano_memalign
|
|
|
|
* Allocate memory block aligned at specific boundary.
|
|
|
|
* align: required alignment. Must be power of 2. Return NULL
|
|
|
|
* if not power of 2. Undefined behavior is bigger than
|
|
|
|
* pointer value range.
|
|
|
|
* s: required size.
|
|
|
|
* Return: allocated memory pointer aligned to align
|
|
|
|
* Algorithm: Malloc a big enough block, padding pointer to aligned
|
|
|
|
* address, then truncate and free the tail if too big.
|
|
|
|
* Record the offset of align pointer and original pointer
|
|
|
|
* in the padding area.
|
|
|
|
*/
|
|
|
|
void * nano_memalign(RARG size_t align, size_t s)
|
|
|
|
{
|
|
|
|
chunk * chunk_p;
|
|
|
|
malloc_size_t size_allocated, offset, ma_size, size_with_padding;
|
|
|
|
char * allocated, * aligned_p;
|
|
|
|
|
|
|
|
/* Return NULL if align isn't power of 2 */
|
|
|
|
if ((align & (align-1)) != 0) return NULL;
|
|
|
|
|
|
|
|
align = MAX(align, MALLOC_ALIGN);
|
2020-11-17 17:50:57 +08:00
|
|
|
|
|
|
|
/* Make sure ma_size does not overflow */
|
|
|
|
if (s > __SIZE_MAX__ - CHUNK_ALIGN)
|
|
|
|
{
|
|
|
|
RERRNO = ENOMEM;
|
|
|
|
return NULL;
|
|
|
|
}
|
2020-09-02 22:50:07 +08:00
|
|
|
ma_size = ALIGN_SIZE(MAX(s, MALLOC_MINSIZE), CHUNK_ALIGN);
|
2020-11-17 17:50:57 +08:00
|
|
|
|
|
|
|
/* Make sure size_with_padding does not overflow */
|
|
|
|
if (ma_size > __SIZE_MAX__ - (align - MALLOC_ALIGN))
|
|
|
|
{
|
|
|
|
RERRNO = ENOMEM;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
size_with_padding = ma_size + (align - MALLOC_ALIGN);
|
2013-05-29 20:37:59 +08:00
|
|
|
|
|
|
|
allocated = nano_malloc(RCALL size_with_padding);
|
|
|
|
if (allocated == NULL) return NULL;
|
|
|
|
|
|
|
|
chunk_p = get_chunk_from_ptr(allocated);
|
2020-09-02 22:50:07 +08:00
|
|
|
aligned_p = (char *)ALIGN_PTR(
|
|
|
|
(uintptr_t)((char *)chunk_p + CHUNK_OFFSET),
|
|
|
|
(uintptr_t)align);
|
2013-05-29 20:37:59 +08:00
|
|
|
offset = aligned_p - ((char *)chunk_p + CHUNK_OFFSET);
|
|
|
|
|
|
|
|
if (offset)
|
|
|
|
{
|
|
|
|
if (offset >= MALLOC_MINCHUNK)
|
|
|
|
{
|
|
|
|
/* Padding is too large, free it */
|
|
|
|
chunk * front_chunk = chunk_p;
|
|
|
|
chunk_p = (chunk *)((char *)chunk_p + offset);
|
|
|
|
chunk_p->size = front_chunk->size - offset;
|
|
|
|
front_chunk->size = offset;
|
|
|
|
nano_free(RCALL (char *)front_chunk + CHUNK_OFFSET);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* Padding is used. Need to set a jump offset for aligned pointer
|
|
|
|
* to get back to chunk head */
|
|
|
|
assert(offset >= sizeof(int));
|
Fix incorrect cast in nano malloc
As described in nano-mallocr.c, chunks of heap are represented in memory
as a size (of type long), followed by some optional padding containing a
negative offset to size, followed by the data area.
get_chunk_from_ptr is responsible for taking a pointer to the data area
(as returned by malloc) and finding the start of the chunk. It does this
by assuming there is no padding and trying to read the size, if the size
is negative then it uses that as an offset to find the true size.
Crucially, it reads the padding area as a long.
nano_malloc is responsible for populating the optional padding area. It
does so by casting a pointer to an (int *) and writing the negative
offset into it.
This means that padding is being written as an int but read as a long.
On msp430 an int is 2 bytes, while a long is 4 bytes. This means that 2
bytes are written to the padding, but 4 bytes are read from it: it has
only been partially initialised.
nano_malloc is the default malloc implementation for msp430.
This patch changes the cast from (int *) to (long *). The change to
nano_malloc has has been observed to fix a TI Energia project that
had been malfunctioning because malloc was returning invalid addresses.
The change to nano_memalign is based entirely on code inspection.
I've built and tested as follows:
Configured (gcc+newlib) with: --target=msp430-elf --enable-languages=c
gcc testsuite variations:
msp430-sim/-mcpu=msp430
msp430-sim/-mcpu=msp430x
msp430-sim/-mcpu=msp430x/-mlarge/-mdata-region=either/-mcode-region=either
msp430-sim/-mhwmult=none
msp430-sim/-mhwmult=f5series
My testing has shown no regressions, however I don't know if the gcc
testsuite provides sufficient coverage for this patch?
I don't have write access, so if this patch is acceptable after review,
I would appreciate it if someone would commit it for me.
Thanks,
2017-01-XX Joe Seymour <joe.s@somniumtech.com>
newlib/
* libc/stdlib/nano-mallocr.c (nano_malloc): Fix incorrect cast.
(nano_memalign): Likewise.
2017-01-03 22:50:53 +08:00
|
|
|
*(long *)((char *)chunk_p + offset) = -offset;
|
2013-05-29 20:37:59 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
size_allocated = chunk_p->size;
|
|
|
|
if ((char *)chunk_p + size_allocated >
|
|
|
|
(aligned_p + ma_size + MALLOC_MINCHUNK))
|
|
|
|
{
|
|
|
|
/* allocated much more than what's required for padding, free
|
|
|
|
* tail part */
|
|
|
|
chunk * tail_chunk = (chunk *)(aligned_p + ma_size);
|
|
|
|
chunk_p->size = aligned_p + ma_size - (char *)chunk_p;
|
|
|
|
tail_chunk->size = size_allocated - chunk_p->size;
|
|
|
|
nano_free(RCALL (char *)tail_chunk + CHUNK_OFFSET);
|
|
|
|
}
|
|
|
|
return aligned_p;
|
|
|
|
}
|
|
|
|
#endif /* DEFINE_MEMALIGN */
|
|
|
|
|
|
|
|
#ifdef DEFINE_MALLOPT
|
|
|
|
int nano_mallopt(RARG int parameter_number, int parameter_value)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif /* DEFINE_MALLOPT */
|
|
|
|
|
|
|
|
#ifdef DEFINE_VALLOC
|
|
|
|
void * nano_valloc(RARG size_t s)
|
|
|
|
{
|
|
|
|
return nano_memalign(RCALL MALLOC_PAGE_ALIGN, s);
|
|
|
|
}
|
|
|
|
#endif /* DEFINE_VALLOC */
|
|
|
|
|
|
|
|
#ifdef DEFINE_PVALLOC
|
|
|
|
void * nano_pvalloc(RARG size_t s)
|
|
|
|
{
|
2020-11-17 17:50:57 +08:00
|
|
|
/* Make sure size given to nano_valloc does not overflow */
|
|
|
|
if (s > __SIZE_MAX__ - MALLOC_PAGE_ALIGN)
|
|
|
|
{
|
|
|
|
RERRNO = ENOMEM;
|
|
|
|
return NULL;
|
|
|
|
}
|
2020-09-02 22:50:07 +08:00
|
|
|
return nano_valloc(RCALL ALIGN_SIZE(s, MALLOC_PAGE_ALIGN));
|
2013-05-29 20:37:59 +08:00
|
|
|
}
|
|
|
|
#endif /* DEFINE_PVALLOC */
|