From 49fa5c44d7c293e85dc17be2063f1f1c14956f30 Mon Sep 17 00:00:00 2001 From: ardafu Date: Wed, 22 Apr 2015 11:19:50 +0800 Subject: [PATCH] [libcpu][arm926] Optimize code 1. Combine code for IAR and GCC in file mmu.c and cpuport.c 2. Remove remap code in start_xxx.S. User should config MMU to map vector table to visual address 0x0 --- libcpu/arm/arm926/context_gcc.S | 26 +- libcpu/arm/arm926/cpuport.c | 77 ++---- libcpu/arm/arm926/mmu.c | 416 ++++++++------------------------ libcpu/arm/arm926/start_gcc.S | 43 ++-- libcpu/arm/arm926/start_iar.S | 32 ++- libcpu/arm/arm926/start_rvds.S | 75 ++---- 6 files changed, 192 insertions(+), 477 deletions(-) diff --git a/libcpu/arm/arm926/context_gcc.S b/libcpu/arm/arm926/context_gcc.S index 419c5565de..5152618d24 100644 --- a/libcpu/arm/arm926/context_gcc.S +++ b/libcpu/arm/arm926/context_gcc.S @@ -49,15 +49,15 @@ rt_hw_interrupt_enable: */ .globl rt_hw_context_switch rt_hw_context_switch: - stmfd sp!, {lr} @; push pc (lr should be pushed in place of pc) - stmfd sp!, {r0-r12, lr} @; push lr & register file - mrs r4, cpsr - stmfd sp!, {r4} @; push cpsr - str sp, [r0] @; store sp in preempted tasks tcb - ldr sp, [r1] @; get new task stack pointer - ldmfd sp!, {r4} @; pop new task spsr - msr spsr_cxsf, r4 - ldmfd sp!, {r0-r12, lr, pc}^ @; pop new task r0-r12, lr & pc + STMFD SP!, {LR} @; push pc (lr should be pushed in place of pc) + STMFD SP!, {R0-R12, LR} @; push lr & register file + MRS R4, CPSR + STMFD SP!, {R4} @; push cpsr + STR SP, [R0] @; store sp in preempted tasks tcb + LDR SP, [R1] @; get new task stack pointer + LDMFD SP!, {R4} @; pop new task spsr + MSR SPSR_cxsf, R4 + LDMFD SP!, {R0-R12, LR, PC}^ @; pop new task r0-r12, lr & pc /* * void rt_hw_context_switch_to(rt_uint32 to); @@ -65,10 +65,10 @@ rt_hw_context_switch: */ .globl rt_hw_context_switch_to rt_hw_context_switch_to: - ldr sp, [r0] @; get new task stack pointer - ldmfd sp!, {r4} @; pop new task cpsr - msr spsr_cxsf, r4 - ldmfd sp!, {r0-r12, lr, pc}^ @; pop new task r0-r12, lr & pc + LDR SP, [R0] @; get new task stack pointer + LDMFD SP!, {R4} @; pop new task cpsr + MSR SPSR_cxsf, R4 + LDMFD SP!, {R0-R12, LR, PC}^ @; pop new task r0-r12, lr & pc /* * void rt_hw_context_switch_interrupt(rt_uint32 from, rt_uint32 to); diff --git a/libcpu/arm/arm926/cpuport.c b/libcpu/arm/arm926/cpuport.c index 8d5c21660c..fb90df9e9f 100644 --- a/libcpu/arm/arm926/cpuport.c +++ b/libcpu/arm/arm926/cpuport.c @@ -32,18 +32,18 @@ extern void machine_reset(void); extern void machine_shutdown(void); -#ifdef __GNUC__ +#if defined(__GNUC__) || defined(__ICCARM__) rt_inline rt_uint32_t cp15_rd(void) { rt_uint32_t i; - asm ("mrc p15, 0, %0, c1, c0, 0":"=r" (i)); + __asm volatile("mrc p15, 0, %0, c1, c0, 0":"=r" (i)); return i; } rt_inline void cache_enable(rt_uint32_t bit) { - __asm__ __volatile__(\ + __asm volatile(\ "mrc p15,0,r0,c1,c0,0\n\t" \ "orr r0,r0,%0\n\t" \ "mcr p15,0,r0,c1,c0,0" \ @@ -54,7 +54,7 @@ rt_inline void cache_enable(rt_uint32_t bit) rt_inline void cache_disable(rt_uint32_t bit) { - __asm__ __volatile__(\ + __asm volatile(\ "mrc p15,0,r0,c1,c0,0\n\t" \ "bic r0,r0,%0\n\t" \ "mcr p15,0,r0,c1,c0,0" \ @@ -64,12 +64,12 @@ rt_inline void cache_disable(rt_uint32_t bit) } #endif -#ifdef __CC_ARM +#if defined(__CC_ARM) rt_inline rt_uint32_t cp15_rd(void) { rt_uint32_t i; - __asm + __asm volatile { mrc p15, 0, i, c1, c0, 0 } @@ -81,7 +81,7 @@ rt_inline void cache_enable(rt_uint32_t bit) { rt_uint32_t value; - __asm + __asm volatile { mrc p15, 0, value, c1, c0, 0 orr value, value, bit @@ -93,7 +93,7 @@ rt_inline void cache_disable(rt_uint32_t bit) { rt_uint32_t value; - __asm + __asm volatile { mrc p15, 0, value, c1, c0, 0 bic value, value, bit @@ -102,38 +102,6 @@ rt_inline void cache_disable(rt_uint32_t bit) } #endif -#ifdef __ICCARM__ -rt_inline rt_uint32_t cp15_rd(void) -{ - rt_uint32_t i; - - asm ("mrc p15, 0, %0, c1, c0, 0":"=r" (i)); - return i; -} - -rt_inline void cache_enable(rt_uint32_t bit) -{ - asm volatile(\ - "mrc p15,0,r0,c1,c0,0\n\t" \ - "orr r0,r0,%0\n\t" \ - "mcr p15,0,r0,c1,c0,0" \ - : \ - :"r" (bit) \ - :"memory"); -} - -rt_inline void cache_disable(rt_uint32_t bit) -{ - asm volatile(\ - "mrc p15,0,r0,c1,c0,0\n\t" \ - "bic r0,r0,%0\n\t" \ - "mcr p15,0,r0,c1,c0,0" \ - : \ - :"r" (bit) \ - :"memory"); -} -#endif - /** * enable I-Cache * @@ -249,27 +217,24 @@ int __rt_ffs(int value) return x; } -#elif defined(__ICCARM__) +#elif defined(__GNUC__) || defined(__ICCARM__) int __rt_ffs(int value) { + register rt_uint32_t x; + if (value == 0) return value; - __ASM("RSB r4, r0, #0"); - __ASM("AND r4, r4, r0"); - __ASM("CLZ r4, r4"); - __ASM("RSB r0, r4, #32"); -} -#elif defined(__GNUC__) -int __rt_ffs(int value) -{ - if (value == 0) - return value; - - value &= (-value); - asm ("clz %0, %1": "=r"(value) :"r"(value)); - - return (32 - value); + __asm + ( + "rsb %[temp], %[val], #0\n" + "and %[temp], %[temp], %[val]\n" + "clz %[temp], %[temp]\n" + "rsb %[temp], %[temp], #32\n" + :[temp] "=r"(x) + :[val] "r"(value) + ); + return x; } #endif diff --git a/libcpu/arm/arm926/mmu.c b/libcpu/arm/arm926/mmu.c index b0b1d428bd..b8cfd31a1c 100644 --- a/libcpu/arm/arm926/mmu.c +++ b/libcpu/arm/arm926/mmu.c @@ -24,6 +24,7 @@ #include "mmu.h" +/*----- Keil -----------------------------------------------------------------*/ #ifdef __CC_ARM void mmu_setttbase(rt_uint32_t i) { @@ -35,32 +36,22 @@ void mmu_setttbase(rt_uint32_t i) * set by page table entry */ value = 0; - __asm - { - mcr p15, 0, value, c8, c7, 0 - } - + __asm volatile{ mcr p15, 0, value, c8, c7, 0 } value = 0x55555555; - __asm - { - mcr p15, 0, value, c3, c0, 0 - mcr p15, 0, i, c2, c0, 0 - } + __asm volatile { mcr p15, 0, value, c3, c0, 0 } + __asm volatile { mcr p15, 0, i, c2, c0, 0 } } void mmu_set_domain(rt_uint32_t i) { - __asm - { - mcr p15,0, i, c3, c0, 0 - } + __asm volatile { mcr p15, 0, i, c3, c0, 0 } } void mmu_enable() { register rt_uint32_t value; - __asm + __asm volatile { mrc p15, 0, value, c1, c0, 0 orr value, value, #0x01 @@ -72,7 +63,7 @@ void mmu_disable() { register rt_uint32_t value; - __asm + __asm volatile { mrc p15, 0, value, c1, c0, 0 bic value, value, #0x01 @@ -84,7 +75,7 @@ void mmu_enable_icache() { register rt_uint32_t value; - __asm + __asm volatile { mrc p15, 0, value, c1, c0, 0 orr value, value, #0x1000 @@ -96,7 +87,7 @@ void mmu_enable_dcache() { register rt_uint32_t value; - __asm + __asm volatile { mrc p15, 0, value, c1, c0, 0 orr value, value, #0x04 @@ -108,7 +99,7 @@ void mmu_disable_icache() { register rt_uint32_t value; - __asm + __asm volatile { mrc p15, 0, value, c1, c0, 0 bic value, value, #0x1000 @@ -120,7 +111,7 @@ void mmu_disable_dcache() { register rt_uint32_t value; - __asm + __asm volatile { mrc p15, 0, value, c1, c0, 0 bic value, value, #0x04 @@ -132,7 +123,7 @@ void mmu_enable_alignfault() { register rt_uint32_t value; - __asm + __asm volatile { mrc p15, 0, value, c1, c0, 0 orr value, value, #0x02 @@ -144,7 +135,7 @@ void mmu_disable_alignfault() { register rt_uint32_t value; - __asm + __asm volatile { mrc p15, 0, value, c1, c0, 0 bic value, value, #0x02 @@ -154,10 +145,7 @@ void mmu_disable_alignfault() void mmu_clean_invalidated_cache_index(int index) { - __asm - { - mcr p15, 0, index, c7, c14, 2 - } + __asm volatile { mcr p15, 0, index, c7, c14, 2 } } void mmu_clean_invalidated_dcache(rt_uint32_t buffer, rt_uint32_t size) @@ -168,10 +156,7 @@ void mmu_clean_invalidated_dcache(rt_uint32_t buffer, rt_uint32_t size) while(ptr < buffer + size) { - __asm - { - MCR p15, 0, ptr, c7, c14, 1 - } + __asm volatile { MCR p15, 0, ptr, c7, c14, 1 } ptr += CACHE_LINE_SIZE; } } @@ -184,10 +169,7 @@ void mmu_clean_dcache(rt_uint32_t buffer, rt_uint32_t size) while (ptr < buffer + size) { - __asm - { - MCR p15, 0, ptr, c7, c10, 1 - } + __asm volatile { MCR p15, 0, ptr, c7, c10, 1 } ptr += CACHE_LINE_SIZE; } } @@ -200,10 +182,7 @@ void mmu_invalidate_dcache(rt_uint32_t buffer, rt_uint32_t size) while (ptr < buffer + size) { - __asm - { - MCR p15, 0, ptr, c7, c6, 1 - } + __asm volatile { MCR p15, 0, ptr, c7, c6, 1 } ptr += CACHE_LINE_SIZE; } } @@ -213,10 +192,7 @@ void mmu_invalidate_tlb() register rt_uint32_t value; value = 0; - __asm - { - mcr p15, 0, value, c8, c7, 0 - } + __asm volatile { mcr p15, 0, value, c8, c7, 0 } } void mmu_invalidate_icache() @@ -225,10 +201,7 @@ void mmu_invalidate_icache() value = 0; - __asm - { - mcr p15, 0, value, c7, c5, 0 - } + __asm volatile { mcr p15, 0, value, c7, c5, 0 } } @@ -238,12 +211,10 @@ void mmu_invalidate_dcache_all() value = 0; - __asm - { - mcr p15, 0, value, c7, c6, 0 - } + __asm volatile { mcr p15, 0, value, c7, c6, 0 } } -#elif defined(__GNUC__) +/*----- GNU ------------------------------------------------------------------*/ +#elif defined(__GNUC__) || defined(__ICCARM__) void mmu_setttbase(register rt_uint32_t i) { register rt_uint32_t value; @@ -254,125 +225,117 @@ void mmu_setttbase(register rt_uint32_t i) * set by page table entry */ value = 0; - asm ("mcr p15, 0, %0, c8, c7, 0"::"r"(value)); + asm volatile ("mcr p15, 0, %0, c8, c7, 0"::"r"(value)); value = 0x55555555; - asm ("mcr p15, 0, %0, c3, c0, 0"::"r"(value)); - asm ("mcr p15, 0, %0, c2, c0, 0"::"r"(i)); + asm volatile ("mcr p15, 0, %0, c3, c0, 0"::"r"(value)); + + asm volatile ("mcr p15, 0, %0, c2, c0, 0"::"r"(i)); + } void mmu_set_domain(register rt_uint32_t i) { - asm ("mcr p15,0, %0, c3, c0, 0": :"r" (i)); + asm volatile ("mcr p15,0, %0, c3, c0, 0": :"r" (i)); } void mmu_enable() { - register rt_uint32_t i; - - /* read control register */ - asm ("mrc p15, 0, %0, c1, c0, 0":"=r" (i)); - - i |= 0x1; - - /* write back to control register */ - asm ("mcr p15, 0, %0, c1, c0, 0": :"r" (i)); + asm volatile + ( + "mrc p15, 0, r0, c1, c0, 0 \n" + "orr r0, r0, #0x1 \n" + "mcr p15, 0, r0, c1, c0, 0 \n" + :::"r0" + ); } void mmu_disable() { - register rt_uint32_t i; + asm volatile + ( + "mrc p15, 0, r0, c1, c0, 0 \n" + "bic r0, r0, #0x1 \n" + "mcr p15, 0, r0, c1, c0, 0 \n" + :::"r0" + ); - /* read control register */ - asm ("mrc p15, 0, %0, c1, c0, 0":"=r" (i)); - - i &= ~0x1; - - /* write back to control register */ - asm ("mcr p15, 0, %0, c1, c0, 0": :"r" (i)); } void mmu_enable_icache() { - register rt_uint32_t i; - - /* read control register */ - asm ("mrc p15, 0, %0, c1, c0, 0":"=r" (i)); - - i |= (1 << 12); - - /* write back to control register */ - asm ("mcr p15, 0, %0, c1, c0, 0": :"r" (i)); + asm volatile + ( + "mrc p15, 0, r0, c1, c0, 0 \n" + "orr r0, r0, #(1<<12) \n" + "mcr p15, 0, r0, c1, c0, 0 \n" + :::"r0" + ); } void mmu_enable_dcache() { - register rt_uint32_t i; + asm volatile + ( + "mrc p15, 0, r0, c1, c0, 0 \n" + "orr r0, r0, #(1<<2) \n" + "mcr p15, 0, r0, c1, c0, 0 \n" + :::"r0" + ); - /* read control register */ - asm ("mrc p15, 0, %0, c1, c0, 0":"=r" (i)); - - i |= (1 << 2); - - /* write back to control register */ - asm ("mcr p15, 0, %0, c1, c0, 0": :"r" (i)); } void mmu_disable_icache() { - register rt_uint32_t i; + asm volatile + ( + "mrc p15, 0, r0, c1, c0, 0 \n" + "bic r0, r0, #(1<<12) \n" + "mcr p15, 0, r0, c1, c0, 0 \n" + :::"r0" + ); - /* read control register */ - asm ("mrc p15, 0, %0, c1, c0, 0":"=r" (i)); - - i &= ~(1 << 12); - - /* write back to control register */ - asm ("mcr p15, 0, %0, c1, c0, 0": :"r" (i)); } void mmu_disable_dcache() { - register rt_uint32_t i; + asm volatile + ( + "mrc p15, 0, r0, c1, c0, 0 \n" + "bic r0, r0, #(1<<2) \n" + "mcr p15, 0, r0, c1, c0, 0 \n" + :::"r0" + ); - /* read control register */ - asm ("mrc p15, 0, %0, c1, c0, 0":"=r" (i)); - - i &= ~(1 << 2); - - /* write back to control register */ - asm ("mcr p15, 0, %0, c1, c0, 0": :"r" (i)); } void mmu_enable_alignfault() { - register rt_uint32_t i; + asm volatile + ( + "mrc p15, 0, r0, c1, c0, 0 \n" + "orr r0, r0, #1 \n" + "mcr p15, 0, r0, c1, c0, 0 \n" + :::"r0" + ); - /* read control register */ - asm ("mrc p15, 0, %0, c1, c0, 0":"=r" (i)); - - i |= (1 << 1); - - /* write back to control register */ - asm ("mcr p15, 0, %0, c1, c0, 0": :"r" (i)); } void mmu_disable_alignfault() { - register rt_uint32_t i; + asm volatile + ( + "mrc p15, 0, r0, c1, c0, 0 \n" + "bic r0, r0, #1 \n" + "mcr p15, 0, r0, c1, c0, 0 \n" + :::"r0" + ); - /* read control register */ - asm ("mrc p15, 0, %0, c1, c0, 0":"=r" (i)); - - i &= ~(1 << 1); - - /* write back to control register */ - asm ("mcr p15, 0, %0, c1, c0, 0": :"r" (i)); } void mmu_clean_invalidated_cache_index(int index) { - asm ("mcr p15, 0, %0, c7, c14, 2": :"r" (index)); + asm volatile ("mcr p15, 0, %0, c7, c14, 2": :"r" (index)); } void mmu_clean_invalidated_dcache(rt_uint32_t buffer, rt_uint32_t size) @@ -383,7 +346,8 @@ void mmu_clean_invalidated_dcache(rt_uint32_t buffer, rt_uint32_t size) while(ptr < buffer + size) { - asm ("mcr p15, 0, %0, c7, c14, 1": :"r" (ptr)); + asm volatile ("mcr p15, 0, %0, c7, c14, 1": :"r" (ptr)); + ptr += CACHE_LINE_SIZE; } } @@ -397,7 +361,8 @@ void mmu_clean_dcache(rt_uint32_t buffer, rt_uint32_t size) while (ptr < buffer + size) { - asm ("mcr p15, 0, %0, c7, c10, 1": :"r" (ptr)); + asm volatile ("mcr p15, 0, %0, c7, c10, 1": :"r" (ptr)); + ptr += CACHE_LINE_SIZE; } } @@ -410,221 +375,40 @@ void mmu_invalidate_dcache(rt_uint32_t buffer, rt_uint32_t size) while (ptr < buffer + size) { - asm ("mcr p15, 0, %0, c7, c6, 1": :"r" (ptr)); + asm volatile ("mcr p15, 0, %0, c7, c6, 1": :"r" (ptr)); + ptr += CACHE_LINE_SIZE; } } void mmu_invalidate_tlb() { - asm ("mcr p15, 0, %0, c8, c7, 0": :"r" (0)); + asm volatile ("mcr p15, 0, %0, c8, c7, 0": :"r" (0)); + } void mmu_invalidate_icache() { - asm ("mcr p15, 0, %0, c7, c5, 0": :"r" (0)); + asm volatile ("mcr p15, 0, %0, c7, c5, 0": :"r" (0)); + } void mmu_invalidate_dcache_all() { - asm ("mcr p15, 0, %0, c7, c6, 0": :"r" (0)); -} -#elif defined(__ICCARM__) -void mmu_setttbase(register rt_uint32_t i) -{ - register rt_uint32_t value; + asm volatile ("mcr p15, 0, %0, c7, c6, 0": :"r" (0)); - /* Invalidates all TLBs.Domain access is selected as - * client by configuring domain access register, - * in that case access controlled by permission value - * set by page table entry - */ - value = 0; - asm ("mcr p15, 0, %0, c8, c7, 0"::"r"(value)); - - value = 0x55555555; - asm ("mcr p15, 0, %0, c3, c0, 0"::"r"(value)); - asm ("mcr p15, 0, %0, c2, c0, 0"::"r"(i)); -} - -void mmu_set_domain(register rt_uint32_t i) -{ - asm ("mcr p15,0, %0, c3, c0, 0": :"r" (i)); -} - -void mmu_enable() -{ - register rt_uint32_t i; - - /* read control register */ - asm ("mrc p15, 0, %0, c1, c0, 0":"=r" (i)); - - i |= 0x1; - - /* write back to control register */ - asm ("mcr p15, 0, %0, c1, c0, 0": :"r" (i)); -} - -void mmu_disable() -{ - register rt_uint32_t i; - - /* read control register */ - asm ("mrc p15, 0, %0, c1, c0, 0":"=r" (i)); - - i &= ~0x1; - - /* write back to control register */ - asm ("mcr p15, 0, %0, c1, c0, 0": :"r" (i)); -} - -void mmu_enable_icache() -{ - register rt_uint32_t i; - - /* read control register */ - asm ("mrc p15, 0, %0, c1, c0, 0":"=r" (i)); - - i |= (1 << 12); - - /* write back to control register */ - asm ("mcr p15, 0, %0, c1, c0, 0": :"r" (i)); -} - -void mmu_enable_dcache() -{ - register rt_uint32_t i; - - /* read control register */ - asm ("mrc p15, 0, %0, c1, c0, 0":"=r" (i)); - - i |= (1 << 2); - - /* write back to control register */ - asm ("mcr p15, 0, %0, c1, c0, 0": :"r" (i)); -} - -void mmu_disable_icache() -{ - register rt_uint32_t i; - - /* read control register */ - asm ("mrc p15, 0, %0, c1, c0, 0":"=r" (i)); - - i &= ~(1 << 12); - - /* write back to control register */ - asm ("mcr p15, 0, %0, c1, c0, 0": :"r" (i)); -} - -void mmu_disable_dcache() -{ - register rt_uint32_t i; - - /* read control register */ - asm ("mrc p15, 0, %0, c1, c0, 0":"=r" (i)); - - i &= ~(1 << 2); - - /* write back to control register */ - asm ("mcr p15, 0, %0, c1, c0, 0": :"r" (i)); -} - -void mmu_enable_alignfault() -{ - register rt_uint32_t i; - - /* read control register */ - asm ("mrc p15, 0, %0, c1, c0, 0":"=r" (i)); - - i |= (1 << 1); - - /* write back to control register */ - asm ("mcr p15, 0, %0, c1, c0, 0": :"r" (i)); -} - -void mmu_disable_alignfault() -{ - register rt_uint32_t i; - - /* read control register */ - asm ("mrc p15, 0, %0, c1, c0, 0":"=r" (i)); - - i &= ~(1 << 1); - - /* write back to control register */ - asm ("mcr p15, 0, %0, c1, c0, 0": :"r" (i)); -} - -void mmu_clean_invalidated_cache_index(int index) -{ - asm ("mcr p15, 0, %0, c7, c14, 2": :"r" (index)); -} - -void mmu_clean_invalidated_dcache(rt_uint32_t buffer, rt_uint32_t size) -{ - unsigned int ptr; - - ptr = buffer & ~(CACHE_LINE_SIZE - 1); - - while(ptr < buffer + size) - { - asm ("mcr p15, 0, %0, c7, c14, 1": :"r" (ptr)); - ptr += CACHE_LINE_SIZE; - } -} - - -void mmu_clean_dcache(rt_uint32_t buffer, rt_uint32_t size) -{ - unsigned int ptr; - - ptr = buffer & ~(CACHE_LINE_SIZE - 1); - - while (ptr < buffer + size) - { - asm ("mcr p15, 0, %0, c7, c10, 1": :"r" (ptr)); - ptr += CACHE_LINE_SIZE; - } -} - -void mmu_invalidate_dcache(rt_uint32_t buffer, rt_uint32_t size) -{ - unsigned int ptr; - - ptr = buffer & ~(CACHE_LINE_SIZE - 1); - - while (ptr < buffer + size) - { - asm ("mcr p15, 0, %0, c7, c6, 1": :"r" (ptr)); - ptr += CACHE_LINE_SIZE; - } -} - -void mmu_invalidate_tlb() -{ - asm ("mcr p15, 0, %0, c8, c7, 0": :"r" (0)); -} - -void mmu_invalidate_icache() -{ - asm ("mcr p15, 0, %0, c7, c5, 0": :"r" (0)); -} - -void mmu_invalidate_dcache_all() -{ - asm ("mcr p15, 0, %0, c7, c6, 0": :"r" (0)); } #endif /* level1 page table */ #if defined(__ICCARM__) #pragma data_alignment=(16*1024) -static volatile unsigned int _page_table[4*1024];; +static volatile rt_uint32_t _page_table[4*1024]; #else -static volatile unsigned int _page_table[4*1024] \ -__attribute__((aligned(16*1024))); +static volatile rt_uint32_t _page_table[4*1024] \ + __attribute__((aligned(16*1024))); #endif + void mmu_setmtt(rt_uint32_t vaddrStart, rt_uint32_t vaddrEnd, rt_uint32_t paddrStart, rt_uint32_t attr) { diff --git a/libcpu/arm/arm926/start_gcc.S b/libcpu/arm/arm926/start_gcc.S index f2cceeb8f9..aeeb37616b 100644 --- a/libcpu/arm/arm926/start_gcc.S +++ b/libcpu/arm/arm926/start_gcc.S @@ -21,6 +21,7 @@ * Date Author Notes * 2011-01-13 weety first version * 2015-04-15 ArdaFu Split from AT91SAM9260 BSP + * 2015-04-21 ArdaFu Remove remap code. Using mmu to map vector table */ #define S_FRAME_SIZE (18*4) //72 @@ -63,31 +64,30 @@ .global UND_STACK_START UND_STACK_START: - .space SVC_STK_SIZE - .align 2 - .global SVC_STACK_START -SVC_STACK_START: - .space ABT_STK_SIZE .align 2 .global ABT_STACK_START ABT_STACK_START: - .space IRQ_STK_SIZE - .align 2 - .global IRQ_STACK_START -IRQ_STACK_START: - .space FIQ_STK_SIZE .align 2 .global FIQ_STACK_START FIQ_STACK_START: + .space IRQ_STK_SIZE + .align 2 + .global IRQ_STACK_START +IRQ_STACK_START: + .skip SYS_STK_SIZE .align 2 .global SYS_STACK_START SYS_STACK_START: + .space SVC_STK_SIZE + .align 2 + .global SVC_STACK_START +SVC_STACK_START: @;--------------Jump vector table----------------------------------------------- .section .init, "ax" @@ -132,24 +132,21 @@ Reset_Handler: MRS R0, CPSR BIC R0, R0, #MODEMASK ORR R0, R0, #MODE_SVC|NOINT - MSR CPSR, R0 - LDR SP, =SVC_STACK_START + MSR CPSR_cxsf, R0 + + @; Set CO-Processor + @; little-end,disbale I/D Cache MMU, vector table is 0x00000000 + MRC P15, 0, R0, C1, C0, 0 @; Read CP15 + LDR R1, =0x00003085 @; set clear bits + BIC R0, R0, R1 + MCR P15, 0, R0, C1, C0, 0 @; Write CP15 @; Call low level init function, - @; disable and clear all IRQs and remap internal ram to 0x00000000. + @; disable and clear all IRQs, Init MMU, Init interrupt controller, etc. + LDR SP, =SVC_STACK_START LDR R0, =rt_low_level_init BLX R0 - @; Copy Exception Vectors to Internal RAM - LDR R8, =entry @; Source - LDR R9, =VECTOR_TABLE_START @; Destination - CMP R8, R9 - BEQ Setup_Stack - LDMIA R8!, {R0-R7} @; Load Vectors - STMIA R9!, {R0-R7} @; Store Vectors - LDMIA R8!, {R0-R7} @; Load Handler Addresses - STMIA R9!, {R0-R7} @; Store Handler Addresses - Setup_Stack: @; Setup Stack for each mode MRS R0, CPSR diff --git a/libcpu/arm/arm926/start_iar.S b/libcpu/arm/arm926/start_iar.S index 187ae35bbf..4e689236a5 100644 --- a/libcpu/arm/arm926/start_iar.S +++ b/libcpu/arm/arm926/start_iar.S @@ -21,6 +21,7 @@ ; * Date Author Notes ; * 2011-01-13 weety first version ; * 2015-04-15 ArdaFu Split from AT91SAM9260 BSP +; * 2015-04-21 ArdaFu Remove remap code. Using mmu to map vector table ; */ #define S_FRAME_SIZE (18*4) ;72 @@ -80,16 +81,16 @@ FIQ_STACK_START: PUBLIC IRQ_STACK_START IRQ_STACK_START: - ALIGNRAM 2 - DS8 SVC_STK_SIZE - PUBLIC SVC_STACK_START -SVC_STACK_START: - ALIGNRAM 2 DS8 SYS_STK_SIZE PUBLIC SYS_STACK_START SYS_STACK_START: + ALIGNRAM 2 + DS8 SVC_STK_SIZE + PUBLIC SVC_STACK_START +SVC_STACK_START: + ;--------------Jump vector table------------------------------------------------ SECTION .intvec:CODE:ROOT(2) ARM @@ -134,22 +135,19 @@ Reset_Handler: BIC R0, R0, #MODEMASK ORR R0, R0, #MODE_SVC|NOINT MSR CPSR_cxsf, R0 - LDR SP, =SVC_STACK_START + + ; Set CO-Processor + ; little-end,disbale I/D Cache MMU, vector table is 0x00000000 + MRC P15, 0, R0, C1, C0, 0 ; Read CP15 + LDR R1, =0x00003085 ; set clear bits + BIC R0, R0, R1 + MCR P15, 0, R0, C1, C0, 0 ; Write CP15 ; Call low level init function, - ; disable and clear all IRQs and remap internal ram to 0x00000000. + ; disable and clear all IRQs, Init MMU, Init interrupt controller, etc. + LDR SP, =SVC_STACK_START LDR R0, =rt_low_level_init BLX R0 - - ; Copy Exception Vectors to Internal RAM - LDR R8, =Entry_Point ; Source - LDR R9, =VECTOR_TABLE_START ; Destination - CMP R8, R9 - BEQ Setup_Stack - LDMIA R8!, {R0-R7} ; Load Vectors - STMIA R9!, {R0-R7} ; Store Vectors - LDMIA R8!, {R0-R7} ; Load Handler Addresses - STMIA R9!, {R0-R7} ; Store Handler Addresses Setup_Stack: ; Setup Stack for each mode diff --git a/libcpu/arm/arm926/start_rvds.S b/libcpu/arm/arm926/start_rvds.S index edf2fcb96e..e086e394f1 100644 --- a/libcpu/arm/arm926/start_rvds.S +++ b/libcpu/arm/arm926/start_rvds.S @@ -20,7 +20,8 @@ ; * Change Logs: ; * Date Author Notes ; * 2011-08-14 weety first version -; * 2015-04-15 ArdaFu Split from AT91SAM9260 BSP +; * 2015-04-15 ArdaFu Split from AT91SAM9260 BSP +; * 2015-04-21 ArdaFu Remove remap code. Using mmu to map vector table ; */ S_FRAME_SIZE EQU (18*4) ;72 @@ -59,46 +60,38 @@ NOINT EQU 0xC0 GET rt_low_level_keil.inc ;----------------------- Stack and Heap Definitions ---------------------------- - AREA STACK, NOINIT, READWRITE, ALIGN=3 + AREA STACK, NOINIT, READWRITE, ALIGN=2 Stack_Mem SPACE UND_STK_SIZE EXPORT UND_STACK_START UND_STACK_START - ALIGN 8 + ALIGN 4 SPACE ABT_STK_SIZE EXPORT ABT_STACK_START ABT_STACK_START - ALIGN 8 + ALIGN 4 SPACE FIQ_STK_SIZE EXPORT FIQ_STACK_START FIQ_STACK_START - ALIGN 8 + ALIGN 4 SPACE IRQ_STK_SIZE EXPORT IRQ_STACK_START IRQ_STACK_START - ALIGN 8 - SPACE SVC_STK_SIZE - EXPORT SVC_STACK_START -SVC_STACK_START - - ALIGN 8 + ALIGN 4 SPACE SYS_STK_SIZE EXPORT SYS_STACK_START SYS_STACK_START + + ALIGN 4 + SPACE SVC_STK_SIZE + EXPORT SVC_STACK_START +SVC_STACK_START Stack_Top - -Heap_Size EQU 0x00000000 - AREA HEAP, NOINIT, READWRITE, ALIGN=3 -__heap_base -Heap_Mem - SPACE Heap_Size -__heap_limit - PRESERVE8 ;--------------Jump vector table------------------------------------------------ EXPORT Entry_Point @@ -139,25 +132,22 @@ Reset_Handler ; set the cpu to SVC32 mode MRS R0,CPSR BIC R0,R0,#MODEMASK - ORR R0,R0,#MODE_SVC - MSR CPSR_CXSF,R0 - LDR SP, =SVC_STACK_START + ORR R0,R0,#MODE_SVC:OR:NOINT + MSR CPSR_cxsf,R0 + + ; Set CO-Processor + ; little-end,disbale I/D Cache MMU, vector table is 0x00000000 + MRC p15, 0, R0, c1, c0, 0 ; Read CP15 + LDR R1, =0x00003085 ; set clear bits + BIC R0, R0, R1 + MCR p15, 0, R0, c1, c0, 0 ; Write CP15 ; Call low level init function, - ; disable and clear all IRQs and remap internal ram to 0x00000000. + ; disable and clear all IRQs, Init MMU, Init interrupt controller, etc. + LDR SP, =SVC_STACK_START LDR R0, =rt_low_level_init BLX R0 - ; Copy Exception Vectors to Internal RAM - LDR R8, =Entry_Point ; Source - LDR R9, =VECTOR_TABLE_START ; Destination - CMP R8, R9 - BEQ Setup_Stack - LDMIA R8!, {R0-R7} ; Load Vectors - STMIA R9!, {R0-R7} ; Store Vectors - LDMIA R8!, {R0-R7} ; Load Handler Addresses - STMIA R9!, {R0-R7} ; Store Handler Addresses - Setup_Stack ; Setup Stack for each mode MRS R0, CPSR @@ -301,23 +291,4 @@ rt_hw_context_switch_interrupt_do PROC LDMFD SP!, {R0-R12,LR,PC}^ ; pop new task's R0-R12,LR & PC SPSR to CPSR ENDP - - IF :DEF:__MICROLIB - - EXPORT __heap_base - EXPORT __heap_limit - - ELSE - ; User Initial Stack & Heap - AREA |.text|, CODE, READONLY - - IMPORT __use_two_region_memory - EXPORT __user_initial_stackheap -__user_initial_stackheap - LDR R0, = Heap_Mem - LDR R1, = (Stack_Mem + SYS_STK_SIZE) - LDR R2, = (Heap_Mem + Heap_Size) - LDR R3, = Stack_Mem - BX LR - ENDIF END