[Kernel] Remove memory ops when SLAB and module enable

This commit is contained in:
Bernard Xiong 2018-06-10 18:46:11 +08:00
parent 564df04da1
commit df64a297ab
6 changed files with 10 additions and 86 deletions

View File

@ -164,12 +164,6 @@ void rt_thread_idle_excute(void)
rt_hw_interrupt_enable(lock);
#ifdef RT_USING_HEAP
#if defined(RT_USING_MODULE) && defined(RT_USING_SLAB)
/* the thread belongs to an application module */
if (thread->flags & RT_OBJECT_FLAG_MODULE)
rt_module_free((rt_module_t)thread->module_id, thread->stack_addr);
else
#endif
/* release thread's stack */
RT_KERNEL_FREE(thread->stack_addr);
/* delete thread object */

View File

@ -1380,13 +1380,6 @@ rt_err_t rt_mb_delete(rt_mailbox_t mb)
/* also resume all mailbox private suspended thread */
rt_ipc_list_resume_all(&(mb->suspend_sender_thread));
#if defined(RT_USING_MODULE) && defined(RT_USING_SLAB)
/* the mb object belongs to an application module */
if (mb->parent.parent.flag & RT_OBJECT_FLAG_MODULE)
rt_module_free(mb->parent.parent.module_id, mb->msg_pool);
else
#endif
/* free mailbox pool */
RT_KERNEL_FREE(mb->msg_pool);
@ -1900,13 +1893,6 @@ rt_err_t rt_mq_delete(rt_mq_t mq)
/* resume all suspended thread */
rt_ipc_list_resume_all(&(mq->parent.suspend_thread));
#if defined(RT_USING_MODULE) && defined(RT_USING_SLAB)
/* the mq object belongs to an application module */
if (mq->parent.parent.flag & RT_OBJECT_FLAG_MODULE)
rt_module_free(mq->parent.parent.module_id, mq->msg_pool);
else
#endif
/* free message queue pool */
RT_KERNEL_FREE(mq->msg_pool);

View File

@ -292,13 +292,6 @@ rt_err_t rt_mp_delete(rt_mp_t mp)
rt_hw_interrupt_enable(temp);
}
#if defined(RT_USING_MODULE) && defined(RT_USING_SLAB)
/* the mp object belongs to an application module */
if (mp->parent.flag & RT_OBJECT_FLAG_MODULE)
rt_module_free(mp->parent.module_id, mp->start_address);
else
#endif
/* release allocated room */
rt_free(mp->start_address);

View File

@ -65,30 +65,6 @@
#define RT_USING_MODULE_PRIO (RT_THREAD_PRIORITY_MAX - 2)
#endif
#ifdef RT_USING_SLAB
#define PAGE_COUNT_MAX 256
/* module memory allocator */
struct rt_mem_head
{
rt_size_t size; /* size of memory block */
struct rt_mem_head *next; /* next valid memory block */
};
struct rt_page_info
{
rt_uint32_t *page_ptr;
rt_uint32_t npage;
};
static void *rt_module_malloc_page(rt_size_t npages);
static void rt_module_free_page(rt_module_t module,
void *page_ptr,
rt_size_t npages);
static struct rt_semaphore mod_sem;
#endif
static struct rt_module_symtab *_rt_module_symtab_begin = RT_NULL;
static struct rt_module_symtab *_rt_module_symtab_end = RT_NULL;

View File

@ -387,12 +387,6 @@ void rt_object_delete(rt_object_t object)
/* unlock interrupt */
rt_hw_interrupt_enable(temp);
#if defined(RT_USING_MODULE) && defined(RT_USING_SLAB)
if (object->flag & RT_OBJECT_FLAG_MODULE)
rt_module_free((rt_module_t)object->module_id, object);
else
#endif
/* free the memory of object */
RT_KERNEL_FREE(object);
}

View File

@ -496,11 +496,6 @@ void *rt_malloc(rt_size_t size)
if (size == 0)
return RT_NULL;
#ifdef RT_USING_MODULE
if (rt_module_self() != RT_NULL)
return rt_module_malloc(size);
#endif
/*
* Handle large allocations directly. There should not be very many of
* these so performance is not a big issue.
@ -705,11 +700,6 @@ void *rt_realloc(void *ptr, rt_size_t size)
return RT_NULL;
}
#ifdef RT_USING_MODULE
if (rt_module_self() != RT_NULL)
return rt_module_realloc(ptr, size);
#endif
/*
* Get the original allocation's zone. If the new request winds up
* using the same chunk size we do not have to do anything.
@ -800,15 +790,6 @@ void rt_free(void *ptr)
RT_OBJECT_HOOK_CALL(rt_free_hook, (ptr));
#ifdef RT_USING_MODULE
if (rt_module_self() != RT_NULL)
{
rt_module_free(rt_module_self(), ptr);
return;
}
#endif
/* get memory usage */
#if RT_DEBUG_SLAB
{