From 1523e4680d6b17a788314cb38386fd391daad819 Mon Sep 17 00:00:00 2001 From: zhouji <956133287@qq.com> Date: Sat, 1 May 2021 19:15:37 +0800 Subject: [PATCH] =?UTF-8?q?[add]=20=E6=B7=BB=E5=8A=A0gicv3=E4=B8=AD?= =?UTF-8?q?=E6=96=AD=E6=8E=A7=E5=88=B6=E5=99=A8=E4=BB=A3=E7=A0=81=EF=BC=8C?= =?UTF-8?q?=E6=9B=B4=E6=96=B0menuconfig=E9=85=8D=E7=BD=AE=E9=80=89?= =?UTF-8?q?=E9=A1=B9=E4=B8=8Eutest=E7=9A=84config.h?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- examples/utest/configs/utest_self/config.h | 1 + libcpu/Kconfig | 20 + libcpu/arm/cortex-a/SConscript | 23 +- libcpu/arm/cortex-a/gicv3.c | 708 +++++++++++++++++++++ libcpu/arm/cortex-a/gicv3.h | 194 ++++++ libcpu/arm/cortex-a/interrupt.c | 40 +- 6 files changed, 983 insertions(+), 3 deletions(-) create mode 100644 libcpu/arm/cortex-a/gicv3.c create mode 100644 libcpu/arm/cortex-a/gicv3.h diff --git a/examples/utest/configs/utest_self/config.h b/examples/utest/configs/utest_self/config.h index 287f0f539d..2080a544b0 100644 --- a/examples/utest/configs/utest_self/config.h +++ b/examples/utest/configs/utest_self/config.h @@ -55,6 +55,7 @@ #define RT_VER_NUM 0x40004 #define ARCH_ARM #define ARCH_ARM_CORTEX_A +#define RT_USING_GIC_V2 #define ARCH_ARM_CORTEX_A9 /* RT-Thread Components */ diff --git a/libcpu/Kconfig b/libcpu/Kconfig index 974c421138..bb54b487c7 100644 --- a/libcpu/Kconfig +++ b/libcpu/Kconfig @@ -58,6 +58,26 @@ config ARCH_ARM_CORTEX_A bool select ARCH_ARM + if ARCH_ARM_CORTEX_A + config RT_SMP_AUTO_BOOT + bool + default n + + choice + prompt "GIC controller selection" + default RT_USING_GIC_V2 + + config RT_USING_GIC_V2 + bool " Gic version 2 " + + config RT_USING_GIC_V3 + bool " Gic version 3 " + + config RT_NO_USING_GIC + bool " GIC controller is not used " + endchoice + endif + config ARCH_ARM_CORTEX_A5 bool select ARCH_ARM_CORTEX_A diff --git a/libcpu/arm/cortex-a/SConscript b/libcpu/arm/cortex-a/SConscript index 28af7ba651..a9fe6f0bc9 100644 --- a/libcpu/arm/cortex-a/SConscript +++ b/libcpu/arm/cortex-a/SConscript @@ -5,9 +5,27 @@ from building import * Import('rtconfig') cwd = GetCurrentDir() -src = Glob('*.c') + Glob('*.cpp') +src = Split(''' +cache.c +cpu.c +gtimer.c +mmu.c +pmu.c +stack.c + +''') CPPPATH = [cwd] +if GetDepend('RT_USING_GIC_V2'): + src += ['interrupt.c'] + src += ['gic.c'] + src += ['trap.c'] + +if GetDepend('RT_USING_GIC_V3'): + src += ['interrupt.c'] + src += ['gicv3.c'] + src += ['trap.c'] + if rtconfig.PLATFORM == 'armcc': src += Glob('*_rvds.S') @@ -18,6 +36,9 @@ if rtconfig.PLATFORM == 'gcc': if rtconfig.PLATFORM == 'iar': src += Glob('*_iar.S') +if rtconfig.PLATFORM == 'iar': + src += Glob('*_iar.S') + group = DefineGroup('CPU', src, depend = [''], CPPPATH = CPPPATH) Return('group') diff --git a/libcpu/arm/cortex-a/gicv3.c b/libcpu/arm/cortex-a/gicv3.c new file mode 100644 index 0000000000..a3fb0e767b --- /dev/null +++ b/libcpu/arm/cortex-a/gicv3.c @@ -0,0 +1,708 @@ +/* + * Copyright (c) 2006-2021, RT-Thread Development Team + * + * SPDX-License-Identifier: Apache-2.0 + * + * Change Logs: + * Date Author Notes + * 2013-07-20 Bernard first version + * 2014-04-03 Grissiom many enhancements + * 2018-11-22 Jesven add rt_hw_ipi_send() + * add rt_hw_ipi_handler_install() + */ + +#include +#include + +#include "gicv3.h" +#include "cp15.h" + +#ifndef RT_CPUS_NR +#define RT_CPUS_NR 1 +#endif + +struct arm_gic_v3 +{ + rt_uint32_t offset; /* the first interrupt index in the vector table */ + rt_uint32_t redist_hw_base[RT_CPUS_NR]; /* the pointer of the gic redistributor */ + rt_uint32_t dist_hw_base; /* the base address of the gic distributor */ + rt_uint32_t cpu_hw_base[RT_CPUS_NR]; /* the base addrees of the gic cpu interface */ +}; + +/* 'ARM_GIC_MAX_NR' is the number of cores */ +static struct arm_gic_v3 _gic_table[ARM_GIC_MAX_NR]; +static unsigned int _gic_max_irq; + +/** + * @name: arm_gic_cpumask_to_affval + * @msg: + * @in param cpu_mask: + * @out param cluster_id: aff1 [0:7],aff2 [8:15],aff3 [16:23] + * @out param target_list: Target List. The set of PEs for which SGI interrupts will be generated. Each bit corresponds to the + * PE within a cluster with an Affinity 0 value equal to the bit number. + * @return {rt_uint32_t} 0 is finish , 1 is data valid + */ +RT_WEAK rt_uint32_t arm_gic_cpumask_to_affval(rt_uint32_t *cpu_mask, rt_uint32_t *cluster_id, rt_uint32_t *target_list) +{ + return 0; +} + +RT_WEAK rt_uint64_t get_main_cpu_affval(void) +{ + return 0; +} + +int arm_gic_get_active_irq(rt_uint32_t index) +{ + int irq; + + RT_ASSERT(index < ARM_GIC_MAX_NR); + + __get_gicv3_reg(ICC_IAR1, irq); + + irq = (irq & 0x1FFFFFF) + _gic_table[index].offset; + return irq; +} + +void arm_gic_ack(rt_uint32_t index, int irq) +{ + RT_ASSERT(index < ARM_GIC_MAX_NR); + RT_ASSERT(irq >= 0U); + + __asm__ volatile("dsb 0xF" :: + : "memory"); + __set_gicv3_reg(ICC_EOIR1, irq); +} + +void arm_gic_mask(rt_uint32_t index, int irq) +{ + rt_uint32_t mask = 1U << (irq % 32U); + + RT_ASSERT(index < ARM_GIC_MAX_NR); + + irq = irq - _gic_table[index].offset; + RT_ASSERT(irq >= 0U); + + if (irq < 32U) + { + rt_int32_t cpu_id = rt_hw_cpu_id(); + RT_ASSERT((cpu_id) < RT_CPUS_NR); + GIC_RDISTSGI_ICENABLER0(_gic_table[index].redist_hw_base[cpu_id]) = mask; + } + else + { + GIC_DIST_ENABLE_CLEAR(_gic_table[index].dist_hw_base, irq) = mask; + } +} + +void arm_gic_umask(rt_uint32_t index, int irq) +{ + rt_uint32_t mask = 1U << (irq % 32U); + + RT_ASSERT(index < ARM_GIC_MAX_NR); + + irq = irq - _gic_table[index].offset; + RT_ASSERT(irq >= 0U); + + if (irq < 32U) + { + rt_int32_t cpu_id = rt_hw_cpu_id(); + RT_ASSERT((cpu_id) < RT_CPUS_NR); + GIC_RDISTSGI_ISENABLER0(_gic_table[index].redist_hw_base[cpu_id]) = mask; + } + else + { + GIC_DIST_ENABLE_SET(_gic_table[index].dist_hw_base, irq) = mask; + } +} + +rt_uint32_t arm_gic_get_pending_irq(rt_uint32_t index, int irq) +{ + rt_uint32_t pend; + + RT_ASSERT(index < ARM_GIC_MAX_NR); + + irq = irq - _gic_table[index].offset; + RT_ASSERT(irq >= 0U); + + if (irq >= 16U) + { + pend = (GIC_DIST_PENDING_SET(_gic_table[index].dist_hw_base, irq) >> (irq % 32U)) & 0x1UL; + } + else + { + /* INTID 0-15 Software Generated Interrupt */ + pend = (GIC_DIST_SPENDSGI(_gic_table[index].dist_hw_base, irq) >> ((irq % 4U) * 8U)) & 0xFFUL; + /* No CPU identification offered */ + if (pend != 0U) + { + pend = 1U; + } + else + { + pend = 0U; + } + } + + return (pend); +} + +void arm_gic_set_pending_irq(rt_uint32_t index, int irq) +{ + RT_ASSERT(index < ARM_GIC_MAX_NR); + + irq = irq - _gic_table[index].offset; + RT_ASSERT(irq >= 0U); + + if (irq >= 16U) + { + GIC_DIST_PENDING_SET(_gic_table[index].dist_hw_base, irq) = 1U << (irq % 32U); + } + else + { + /* INTID 0-15 Software Generated Interrupt */ + /* Forward the interrupt to the CPU interface that requested it */ + GIC_DIST_SOFTINT(_gic_table[index].dist_hw_base) = (irq | 0x02000000U); + } +} + +void arm_gic_clear_pending_irq(rt_uint32_t index, int irq) +{ + rt_uint32_t mask; + + RT_ASSERT(index < ARM_GIC_MAX_NR); + + irq = irq - _gic_table[index].offset; + RT_ASSERT(irq >= 0U); + + if (irq >= 16U) + { + mask = 1U << (irq % 32U); + GIC_DIST_PENDING_CLEAR(_gic_table[index].dist_hw_base, irq) = mask; + } + else + { + mask = 1U << ((irq % 4U) * 8U); + GIC_DIST_CPENDSGI(_gic_table[index].dist_hw_base, irq) = mask; + } +} + +void arm_gic_set_configuration(rt_uint32_t index, int irq, uint32_t config) +{ + rt_uint32_t icfgr; + rt_uint32_t shift; + + RT_ASSERT(index < ARM_GIC_MAX_NR); + + irq = irq - _gic_table[index].offset; + RT_ASSERT(irq >= 0U); + + icfgr = GIC_DIST_CONFIG(_gic_table[index].dist_hw_base, irq); + shift = (irq % 16U) << 1U; + + icfgr &= (~(3U << shift)); + icfgr |= (config << shift); + + GIC_DIST_CONFIG(_gic_table[index].dist_hw_base, irq) = icfgr; +} + +rt_uint32_t arm_gic_get_configuration(rt_uint32_t index, int irq) +{ + RT_ASSERT(index < ARM_GIC_MAX_NR); + + irq = irq - _gic_table[index].offset; + RT_ASSERT(irq >= 0U); + + return (GIC_DIST_CONFIG(_gic_table[index].dist_hw_base, irq) >> ((irq % 16U) >> 1U)); +} + +void arm_gic_clear_active(rt_uint32_t index, int irq) +{ + rt_uint32_t mask = 1U << (irq % 32U); + + RT_ASSERT(index < ARM_GIC_MAX_NR); + + irq = irq - _gic_table[index].offset; + RT_ASSERT(irq >= 0U); + + GIC_DIST_ACTIVE_CLEAR(_gic_table[index].dist_hw_base, irq) = mask; +} + +/* Set up the cpu mask for the specific interrupt */ +void arm_gic_set_cpu(rt_uint32_t index, int irq, unsigned int cpumask) +{ + rt_uint32_t old_tgt; + + RT_ASSERT(index < ARM_GIC_MAX_NR); + + irq = irq - _gic_table[index].offset; + RT_ASSERT(irq >= 0U); + + old_tgt = GIC_DIST_TARGET(_gic_table[index].dist_hw_base, irq); + + old_tgt &= ~(0x0FFUL << ((irq % 4U) * 8U)); + old_tgt |= cpumask << ((irq % 4U) * 8U); + + GIC_DIST_TARGET(_gic_table[index].dist_hw_base, irq) = old_tgt; +} + +rt_uint32_t arm_gic_get_target_cpu(rt_uint32_t index, int irq) +{ + RT_ASSERT(index < ARM_GIC_MAX_NR); + + irq = irq - _gic_table[index].offset; + RT_ASSERT(irq >= 0U); + + return (GIC_DIST_TARGET(_gic_table[index].dist_hw_base, irq) >> ((irq % 4U) * 8U)) & 0xFFUL; +} + +void arm_gic_set_priority(rt_uint32_t index, int irq, rt_uint32_t priority) +{ + rt_uint32_t mask; + + RT_ASSERT(index < ARM_GIC_MAX_NR); + + irq = irq - _gic_table[index].offset; + RT_ASSERT(irq >= 0U); + + if (irq < 32U) + { + rt_int32_t cpu_id = rt_hw_cpu_id(); + RT_ASSERT((cpu_id) < RT_CPUS_NR); + + mask = GIC_RDISTSGI_IPRIORITYR(_gic_table[index].redist_hw_base[cpu_id], irq); + mask &= ~(0xFFUL << ((irq % 4U) * 8U)); + mask |= ((priority & 0xFFUL) << ((irq % 4U) * 8U)); + GIC_RDISTSGI_IPRIORITYR(_gic_table[index].redist_hw_base[cpu_id], irq) = mask; + } + else + { + mask = GIC_DIST_PRI(_gic_table[index].dist_hw_base, irq); + mask &= ~(0xFFUL << ((irq % 4U) * 8U)); + mask |= ((priority & 0xFFUL) << ((irq % 4U) * 8U)); + GIC_DIST_PRI(_gic_table[index].dist_hw_base, irq) = mask; + } +} + +rt_uint32_t arm_gic_get_priority(rt_uint32_t index, int irq) +{ + RT_ASSERT(index < ARM_GIC_MAX_NR); + + irq = irq - _gic_table[index].offset; + RT_ASSERT(irq >= 0U); + + if (irq < 32U) + { + rt_int32_t cpu_id = rt_hw_cpu_id(); + + RT_ASSERT((cpu_id) < RT_CPUS_NR); + return (GIC_RDISTSGI_IPRIORITYR(_gic_table[index].redist_hw_base[cpu_id], irq) >> ((irq % 4U) * 8U)) & 0xFFUL; + } + else + { + return (GIC_DIST_PRI(_gic_table[index].dist_hw_base, irq) >> ((irq % 4U) * 8U)) & 0xFFUL; + } +} + +void arm_gic_set_system_register_enable_mask(rt_uint32_t index, rt_uint32_t value) +{ + RT_ASSERT(index < ARM_GIC_MAX_NR); + + value &= 0xFFUL; + /* set priority mask */ + __set_gicv3_reg(ICC_SRE, value); + __asm__ volatile ("isb 0xF":: + :"memory"); +} + +rt_uint32_t arm_gic_get_system_register_enable_mask(rt_uint32_t index) +{ + RT_ASSERT(index < ARM_GIC_MAX_NR); + rt_uint32_t value; + + __get_gicv3_reg(ICC_SRE, value); + return value; +} + +void arm_gic_set_interface_prior_mask(rt_uint32_t index, rt_uint32_t priority) +{ + RT_ASSERT(index < ARM_GIC_MAX_NR); + + priority &= 0xFFUL; + /* set priority mask */ + __set_gicv3_reg(ICC_PMR, priority); +} + +rt_uint32_t arm_gic_get_interface_prior_mask(rt_uint32_t index) +{ + RT_ASSERT(index < ARM_GIC_MAX_NR); + rt_uint32_t priority; + + __get_gicv3_reg(ICC_PMR, priority); + return priority; +} + +void arm_gic_set_binary_point(rt_uint32_t index, rt_uint32_t binary_point) +{ + index = index; + binary_point &= 0x7U; + + __set_gicv3_reg(ICC_BPR1, binary_point); +} + +rt_uint32_t arm_gic_get_binary_point(rt_uint32_t index) +{ + rt_uint32_t binary_point; + + index = index; + __get_gicv3_reg(ICC_BPR1, binary_point); + return binary_point; +} + +rt_uint32_t arm_gic_get_irq_status(rt_uint32_t index, int irq) +{ + rt_uint32_t pending; + rt_uint32_t active; + + RT_ASSERT(index < ARM_GIC_MAX_NR); + + irq = irq - _gic_table[index].offset; + RT_ASSERT(irq >= 0U); + + active = (GIC_DIST_ACTIVE_SET(_gic_table[index].dist_hw_base, irq) >> (irq % 32U)) & 0x1UL; + pending = (GIC_DIST_PENDING_SET(_gic_table[index].dist_hw_base, irq) >> (irq % 32U)) & 0x1UL; + + return ((active << 1U) | pending); +} + +void arm_gic_send_affinity_sgi(rt_uint32_t index, int irq, rt_uint32_t cpu_mask, rt_uint32_t routing_mode) +{ + rt_uint64_t sgi_val; + + if (routing_mode) + { + sgi_val = (1ULL << 40) | ((irq & 0x0FULL) << 24); //Interrupts routed to all PEs in the system, excluding "self". + /* Write the ICC_SGI1R registers */ + __asm__ volatile("dsb 0xF" :: + : "memory"); + __set_cp64(15, 0, sgi_val, 12); + __asm__ volatile("isb 0xF" :: + : "memory"); + } + else + { + rt_uint32_t cluster_id, target_list; + while (arm_gic_cpumask_to_affval(&cpu_mask, &cluster_id, &target_list)) + { + sgi_val = ((irq & 0x0FULL) << 24 | + target_list | + ((cluster_id >> 8) & 0xFFULL) << GIC_RSGI_AFF1_OFFSET | + ((cluster_id >> 16) & 0xFFULL) << GIC_RSGI_AFF2_OFFSET | + ((cluster_id >> 24) & 0xFFull) << GIC_RSGI_AFF3_OFFSET); + + __asm__ volatile("dsb 0xF" :: + : "memory"); + __set_cp64(15, 0, sgi_val, 12); + __asm__ volatile("isb 0xF" :: + : "memory"); + } + } +} + +rt_uint32_t arm_gic_get_high_pending_irq(rt_uint32_t index) +{ + rt_uint32_t irq; + RT_ASSERT(index < ARM_GIC_MAX_NR); + + index = index; + __get_gicv3_reg(ICC_HPPIR1, irq); + return irq; +} + +rt_uint32_t arm_gic_get_interface_id(rt_uint32_t index) +{ + RT_ASSERT(index < ARM_GIC_MAX_NR); + + return GIC_CPU_IIDR(_gic_table[index].cpu_hw_base); +} + +void arm_gic_set_group(rt_uint32_t index, int irq, rt_uint32_t group) +{ + uint32_t igroupr; + uint32_t shift; + + RT_ASSERT(index < ARM_GIC_MAX_NR); + RT_ASSERT(group <= 1U); + + irq = irq - _gic_table[index].offset; + RT_ASSERT(irq >= 0U); + + igroupr = GIC_DIST_IGROUP(_gic_table[index].dist_hw_base, irq); + shift = (irq % 32U); + igroupr &= (~(1U << shift)); + igroupr |= ((group & 0x1U) << shift); + + GIC_DIST_IGROUP(_gic_table[index].dist_hw_base, irq) = igroupr; +} + +rt_uint32_t arm_gic_get_group(rt_uint32_t index, int irq) +{ + RT_ASSERT(index < ARM_GIC_MAX_NR); + + irq = irq - _gic_table[index].offset; + RT_ASSERT(irq >= 0U); + + return (GIC_DIST_IGROUP(_gic_table[index].dist_hw_base, irq) >> (irq % 32U)) & 0x1UL; +} + +static int arm_gicv3_wait_rwp(rt_uint32_t index, rt_uint32_t irq) +{ + rt_uint32_t rwp_bit; + rt_uint32_t base; + + RT_ASSERT(index < ARM_GIC_MAX_NR); + + if (irq < 32u) + { + rt_int32_t cpu_id = rt_hw_cpu_id(); + + RT_ASSERT((cpu_id) < RT_CPUS_NR); + base = _gic_table[index].redist_hw_base[cpu_id]; + rwp_bit = GICR_CTLR_RWP; + } + else + { + base = _gic_table[index].dist_hw_base; + rwp_bit = GICD_CTLR_RWP; + } + + while (__REG32(base) & rwp_bit) + { + ; + } + + return 0; +} + +int arm_gic_dist_init(rt_uint32_t index, rt_uint32_t dist_base, int irq_start) +{ + rt_uint64_t cpu0_affval; + unsigned int gic_type, i; + + RT_ASSERT(index < ARM_GIC_MAX_NR); + + _gic_table[index].dist_hw_base = dist_base; + _gic_table[index].offset = irq_start; + + /* Find out how many interrupts are supported. */ + gic_type = GIC_DIST_TYPE(dist_base); + _gic_max_irq = ((gic_type & 0x1fU) + 1U) * 32U; + + /* + * The GIC only supports up to 1020 interrupt sources. + * Limit this to either the architected maximum, or the + * platform maximum. + */ + if (_gic_max_irq > 1020U) + _gic_max_irq = 1020U; + if (_gic_max_irq > ARM_GIC_NR_IRQS) /* the platform maximum interrupts */ + _gic_max_irq = ARM_GIC_NR_IRQS; + + GIC_DIST_CTRL(dist_base) = 0x0U; + /* Wait for register write pending */ + arm_gicv3_wait_rwp(0, 32); + + /* Set all global interrupts to be level triggered, active low. */ + for (i = 32U; i < _gic_max_irq; i += 16U) + GIC_DIST_CONFIG(dist_base, i) = 0x0U; + + arm_gicv3_wait_rwp(0, 32); + + cpu0_affval = get_main_cpu_affval(); + /* Set all global interrupts to this CPU only. */ + for (i = 32U; i < _gic_max_irq; i++) + { + GIC_DIST_IROUTER_LOW(dist_base, i) = cpu0_affval; + GIC_DIST_IROUTER_HIGH(dist_base, i) = cpu0_affval >> 32; + } + + arm_gicv3_wait_rwp(0, 32); + + /* Set priority on spi interrupts. */ + for (i = 32U; i < _gic_max_irq; i += 4U) + GIC_DIST_PRI(dist_base, i) = 0xa0a0a0a0U; + + arm_gicv3_wait_rwp(0, 32); + /* Disable all interrupts. */ + for (i = 0U; i < _gic_max_irq; i += 32U) + { + GIC_DIST_PENDING_CLEAR(dist_base, i) = 0xffffffffU; + GIC_DIST_ENABLE_CLEAR(dist_base, i) = 0xffffffffU; + } + + arm_gicv3_wait_rwp(0, 32); + /* All interrupts defaults to IGROUP1(IRQ). */ + for (i = 0U; i < _gic_max_irq; i += 32U) + GIC_DIST_IGROUP(dist_base, i) = 0xffffffffU; + + arm_gicv3_wait_rwp(0, 32); + + /* + The Distributor control register (GICD_CTLR) must be configured to enable the interrupt groups and to set the routing mode. + Enable Affinity routing (ARE bits) The ARE bits in GICD_CTLR control whether affinity routing is enabled. + If affinity routing is not enabled, GICv3 can be configured for legacy operation. + Whether affinity routing is enabled or not can be controlled separately for Secure and Non-secure state. + Enables GICD_CTLR contains separate enable bits for Group 0, Secure Group 1 and Non-secure Group 1: + GICD_CTLR.EnableGrp1S enables distribution of Secure Group 1 interrupts. + GICD_CTLR.EnableGrp1NS enables distribution of Non-secure Group 1 interrupts. + GICD_CTLR.EnableGrp0 enables distribution of Group 0 interrupts. + */ + GIC_DIST_CTRL(dist_base) = GICD_CTLR_ARE_NS | GICD_CTLR_ENGRP1NS; + + return 0; +} + +int arm_gic_redist_address_set(rt_uint32_t index, rt_uint32_t redist_addr, rt_uint32_t cpu_id) +{ + RT_ASSERT(index < ARM_GIC_MAX_NR); + RT_ASSERT((cpu_id) < RT_CPUS_NR); + _gic_table[index].redist_hw_base[cpu_id] = redist_addr; + + return 0; +} + +int arm_gic_cpu_interface_address_set(rt_uint32_t index, rt_uint32_t interface_addr, rt_uint32_t cpu_id) +{ + RT_ASSERT(index < ARM_GIC_MAX_NR); + RT_ASSERT((cpu_id) < RT_CPUS_NR); + _gic_table[index].cpu_hw_base[cpu_id] = interface_addr; + + return 0; +} + +int arm_gic_redist_init(rt_uint32_t index) +{ + unsigned int i; + rt_uint32_t base; + rt_int32_t cpu_id = rt_hw_cpu_id(); + + RT_ASSERT(index < ARM_GIC_MAX_NR); + RT_ASSERT((cpu_id) < RT_CPUS_NR); + + base = _gic_table[index].redist_hw_base[cpu_id]; + /* redistributor enable */ + GIC_RDIST_WAKER(base) &= ~(1U << 1); + while (GIC_RDIST_WAKER(base) & (1 << 2)) + { + ; + } + + /* Disable all sgi and ppi interrupt */ + GIC_RDISTSGI_ICENABLER0(base) = 0xFFFFFFFF; + arm_gicv3_wait_rwp(0, 0); + + /* Clear all inetrrupt pending */ + GIC_RDISTSGI_ICPENDR0(base) = 0xFFFFFFFF; + + /* the corresponding interrupt is Group 1 or Non-secure Group 1. */ + GIC_RDISTSGI_IGROUPR0(base, 0) = 0xFFFFFFFF; + GIC_RDISTSGI_IGRPMODR0(base, 0) = 0xFFFFFFFF; + + /* Configure default priorities for SGI 0:15 and PPI 16:31. */ + for (i = 0; i < 32; i += 4) + { + GIC_RDISTSGI_IPRIORITYR(base, i) = 0xa0a0a0a0U; + } + + /* Trigger level for PPI interrupts*/ + GIC_RDISTSGI_ICFGR1(base) = 0x0U; // PPI is level-sensitive. + return 0; +} + +int arm_gic_cpu_init(rt_uint32_t index) +{ + rt_uint32_t value; + RT_ASSERT(index < ARM_GIC_MAX_NR); + + value = arm_gic_get_system_register_enable_mask(index); + value |= (1U << 0); + arm_gic_set_system_register_enable_mask(index, value); + __set_gicv3_reg(ICC_CTLR, 0); + + arm_gic_set_interface_prior_mask(index, 0xFFU); + + /* Enable group1 interrupt */ + value = 0x1U; + __set_gicv3_reg(ICC_IGRPEN1, value); + + arm_gic_set_binary_point(0, 0); + + /* ICC_BPR0_EL1 determines the preemption group for both + Group 0 and Group 1 interrupts. + */ + value = 0x1U; + __set_gicv3_reg(ICC_CTLR, value); + + return 0; +} + +#ifdef RT_USING_SMP +void arm_gic_secondary_cpu_init(void) +{ + arm_gic_redist_init(0); + + arm_gic_cpu_init(0); +} +#endif + +void arm_gic_dump_type(rt_uint32_t index) +{ + unsigned int gic_type; + + gic_type = GIC_DIST_TYPE(_gic_table[index].dist_hw_base); + rt_kprintf("GICv%d on %p, max IRQs: %d, %s security extension(%08x)\n", + (GIC_DIST_ICPIDR2(_gic_table[index].dist_hw_base) >> 4U) & 0xfUL, + _gic_table[index].dist_hw_base, + _gic_max_irq, + gic_type & (1U << 10U) ? "has" : "no", + gic_type); +} + +void arm_gic_dump(rt_uint32_t index) +{ + unsigned int i, k; + + k = arm_gic_get_high_pending_irq(0); + rt_kprintf("--- high pending priority: %d(%08x)\n", k, k); + rt_kprintf("--- hw mask ---\n"); + for (i = 0U; i < _gic_max_irq / 32U; i++) + { + rt_kprintf("0x%08x, ", + GIC_DIST_ENABLE_SET(_gic_table[index].dist_hw_base, + i * 32U)); + } + rt_kprintf("\n--- hw pending ---\n"); + for (i = 0U; i < _gic_max_irq / 32U; i++) + { + rt_kprintf("0x%08x, ", + GIC_DIST_PENDING_SET(_gic_table[index].dist_hw_base, + i * 32U)); + } + rt_kprintf("\n--- hw active ---\n"); + for (i = 0U; i < _gic_max_irq / 32U; i++) + { + rt_kprintf("0x%08x, ", + GIC_DIST_ACTIVE_SET(_gic_table[index].dist_hw_base, + i * 32U)); + } + rt_kprintf("\n"); +} + +long gic_dump(void) +{ + arm_gic_dump_type(0); + arm_gic_dump(0); + + return 0; +} +MSH_CMD_EXPORT(gic_dump, show gic status); diff --git a/libcpu/arm/cortex-a/gicv3.h b/libcpu/arm/cortex-a/gicv3.h new file mode 100644 index 0000000000..813a4f4c8d --- /dev/null +++ b/libcpu/arm/cortex-a/gicv3.h @@ -0,0 +1,194 @@ +/* + * Copyright (c) 2006-2021, RT-Thread Development Team + * + * SPDX-License-Identifier: Apache-2.0 + * + * Change Logs: + * Date Author Notes + * 2013-07-20 Bernard first version + */ + +#ifndef __GIC_V3_H__ +#define __GIC_V3_H__ + +#include +#include + +#define __get_gicv3_reg(CR, Rt) __asm__ volatile("MRC " CR \ + : "=r"(Rt) \ + : \ + : "memory") +#define __set_gicv3_reg(CR, Rt) __asm__ volatile("MCR " CR \ + : \ + : "r"(Rt) \ + : "memory") + + +/* AArch32 System register interface to GICv3 */ +#define ICC_IAR0 "p15, 0, %0, c12, c8, 0" +#define ICC_IAR1 "p15, 0, %0, c12, c12, 0" +#define ICC_EOIR0 "p15, 0, %0, c12, c8, 1" +#define ICC_EOIR1 "p15, 0, %0, c12, c12, 1" +#define ICC_HPPIR0 "p15, 0, %0, c12, c8, 2" +#define ICC_HPPIR1 "p15, 0, %0, c12, c12, 2" +#define ICC_BPR0 "p15, 0, %0, c12, c8, 3" +#define ICC_BPR1 "p15, 0, %0, c12, c12, 3" +#define ICC_DIR "p15, 0, %0, c12, c11, 1" +#define ICC_PMR "p15, 0, %0, c4, c6, 0" +#define ICC_RPR "p15, 0, %0, c12, c11, 3" +#define ICC_CTLR "p15, 0, %0, c12, c12, 4" +#define ICC_MCTLR "p15, 6, %0, c12, c12, 4" +#define ICC_SRE "p15, 0, %0, c12, c12, 5" +#define ICC_HSRE "p15, 4, %0, c12, c9, 5" +#define ICC_MSRE "p15, 6, %0, c12, c12, 5" +#define ICC_IGRPEN0 "p15, 0, %0, c12, c12, 6" +#define ICC_IGRPEN1 "p15, 0, %0, c12, c12, 7" +#define ICC_MGRPEN1 "p15, 6, %0, c12, c12, 7" + +#define __REG32(x) (*((volatile unsigned int*)((rt_uint32_t)x))) + +#define ROUTED_TO_ALL (1) +#define ROUTED_TO_SPEC (0) + +/** Macro to access the Distributor Control Register (GICD_CTLR) +*/ +#define GICD_CTLR_RWP (1<<31) +#define GICD_CTLR_E1NWF (1<<7) +#define GICD_CTLR_DS (1<<6) +#define GICD_CTLR_ARE_NS (1<<5) +#define GICD_CTLR_ARE_S (1<<4) +#define GICD_CTLR_ENGRP1S (1<<2) +#define GICD_CTLR_ENGRP1NS (1<<1) +#define GICD_CTLR_ENGRP0 (1<<0) + +/** Macro to access the Redistributor Control Register (GICR_CTLR) +*/ +#define GICR_CTLR_UWP (1<<31) +#define GICR_CTLR_DPG1S (1<<26) +#define GICR_CTLR_DPG1NS (1<<25) +#define GICR_CTLR_DPG0 (1<<24) +#define GICR_CTLR_RWP (1<<3) +#define GICR_CTLR_IR (1<<2) +#define GICR_CTLR_CES (1<<1) +#define GICR_CTLR_EnableLPI (1<<0) + +/** Macro to access the Generic Interrupt Controller Interface (GICC) +*/ +#define GIC_CPU_CTRL(hw_base) __REG32((hw_base) + 0x00U) +#define GIC_CPU_PRIMASK(hw_base) __REG32((hw_base) + 0x04U) +#define GIC_CPU_BINPOINT(hw_base) __REG32((hw_base) + 0x08U) +#define GIC_CPU_INTACK(hw_base) __REG32((hw_base) + 0x0cU) +#define GIC_CPU_EOI(hw_base) __REG32((hw_base) + 0x10U) +#define GIC_CPU_RUNNINGPRI(hw_base) __REG32((hw_base) + 0x14U) +#define GIC_CPU_HIGHPRI(hw_base) __REG32((hw_base) + 0x18U) +#define GIC_CPU_IIDR(hw_base) __REG32((hw_base) + 0xFCU) + +/** Macro to access the Generic Interrupt Controller Distributor (GICD) +*/ +#define GIC_DIST_CTRL(hw_base) __REG32((hw_base) + 0x000U) +#define GIC_DIST_TYPE(hw_base) __REG32((hw_base) + 0x004U) +#define GIC_DIST_IGROUP(hw_base, n) __REG32((hw_base) + 0x080U + ((n)/32U) * 4U) +#define GIC_DIST_ENABLE_SET(hw_base, n) __REG32((hw_base) + 0x100U + ((n)/32U) * 4U) +#define GIC_DIST_ENABLE_CLEAR(hw_base, n) __REG32((hw_base) + 0x180U + ((n)/32U) * 4U) +#define GIC_DIST_PENDING_SET(hw_base, n) __REG32((hw_base) + 0x200U + ((n)/32U) * 4U) +#define GIC_DIST_PENDING_CLEAR(hw_base, n) __REG32((hw_base) + 0x280U + ((n)/32U) * 4U) +#define GIC_DIST_ACTIVE_SET(hw_base, n) __REG32((hw_base) + 0x300U + ((n)/32U) * 4U) +#define GIC_DIST_ACTIVE_CLEAR(hw_base, n) __REG32((hw_base) + 0x380U + ((n)/32U) * 4U) +#define GIC_DIST_PRI(hw_base, n) __REG32((hw_base) + 0x400U + ((n)/4U) * 4U) +#define GIC_DIST_TARGET(hw_base, n) __REG32((hw_base) + 0x800U + ((n)/4U) * 4U) +#define GIC_DIST_CONFIG(hw_base, n) __REG32((hw_base) + 0xc00U + ((n)/16U) * 4U) +#define GIC_DIST_SOFTINT(hw_base) __REG32((hw_base) + 0xf00U) +#define GIC_DIST_CPENDSGI(hw_base, n) __REG32((hw_base) + 0xf10U + ((n)/4U) * 4U) +#define GIC_DIST_SPENDSGI(hw_base, n) __REG32((hw_base) + 0xf20U + ((n)/4U) * 4U) +#define GIC_DIST_ICPIDR2(hw_base) __REG32((hw_base) + 0xfe8U) +#define GIC_DIST_IROUTER_LOW(hw_base, n) __REG32((hw_base) + 0x6000U + (n)*8U) +#define GIC_DIST_IROUTER_HIGH(hw_base, n) __REG32((hw_base) + 0x6000U + (n)*8U + 4) + +/* SGI base address is at 64K offset from Redistributor base address */ +#define GIC_RSGI_OFFSET 0x10000 + +/** Macro to access the Generic Interrupt Controller Redistributor (GICD) +*/ +#define GIC_RDIST_CTRL(hw_base) __REG32((hw_base) + 0x000U) +#define GIC_RDIST_IIDR(hw_base) __REG32((hw_base) + 0x004U) +#define GIC_RDIST_TYPER(hw_base) __REG32((hw_base) + 0x008U) +#define GIC_RDIST_TSTATUSR(hw_base) __REG32((hw_base) + 0x010U) +#define GIC_RDIST_WAKER(hw_base) __REG32((hw_base) + 0x014U) +#define GIC_RDIST_SETLPIR(hw_base) __REG32((hw_base) + 0x040U) +#define GIC_RDIST_CLRLPIR(hw_base) __REG32((hw_base) + 0x048U) +#define GIC_RDIST_PROPBASER(hw_base) __REG32((hw_base) + 0x070U) +#define GIC_RDIST_PENDBASER(hw_base) __REG32((hw_base) + 0x078U) +#define GIC_RDIST_INVLPIR(hw_base) __REG32((hw_base) + 0x0A0U) +#define GIC_RDIST_INVALLR(hw_base) __REG32((hw_base) + 0x0B0U) +#define GIC_RDIST_SYNCR(hw_base) __REG32((hw_base) + 0x0C0U) + +#define GIC_RDISTSGI_IGROUPR0(hw_base, n) __REG32((hw_base) + GIC_RSGI_OFFSET + 0x080U + (n)*4U) +#define GIC_RDISTSGI_ISENABLER0(hw_base) __REG32((hw_base) + GIC_RSGI_OFFSET + 0x100U) +#define GIC_RDISTSGI_ICENABLER0(hw_base) __REG32((hw_base) + GIC_RSGI_OFFSET + 0x180U) +#define GIC_RDISTSGI_ISPENDR0(hw_base) __REG32((hw_base) + GIC_RSGI_OFFSET + 0x200U) +#define GIC_RDISTSGI_ICPENDR0(hw_base) __REG32((hw_base) + GIC_RSGI_OFFSET + 0x280U) +#define GIC_RDISTSGI_ISACTIVER0(hw_base) __REG32((hw_base) + GIC_RSGI_OFFSET + 0x300U) +#define GIC_RDISTSGI_ICACTIVER0(hw_base) __REG32((hw_base) + GIC_RSGI_OFFSET + 0x380U) +#define GIC_RDISTSGI_IPRIORITYR(hw_base, n) __REG32((hw_base) + GIC_RSGI_OFFSET + 0x400U + ((n) / 4U) * 4U) +#define GIC_RDISTSGI_ICFGR0(hw_base) __REG32((hw_base) + GIC_RSGI_OFFSET + 0xC00U) +#define GIC_RDISTSGI_ICFGR1(hw_base) __REG32((hw_base) + GIC_RSGI_OFFSET + 0xC04U) +#define GIC_RDISTSGI_IGRPMODR0(hw_base, n) __REG32((hw_base) + GIC_RSGI_OFFSET + 0xD00U + (n)*4) +#define GIC_RDISTSGI_NSACR(hw_base) __REG32((hw_base) + GIC_RSGI_OFFSET + 0xE00U) + +#define GIC_RSGI_AFF1_OFFSET 16 +#define GIC_RSGI_AFF2_OFFSET 32 +#define GIC_RSGI_AFF3_OFFSET 48 + +rt_uint32_t arm_gic_cpumask_to_affval(rt_uint32_t *cpu_mask, rt_uint32_t *cluster_id, rt_uint32_t *target_list); +rt_uint64_t get_main_cpu_affval(void); +int arm_gic_get_active_irq(rt_uint32_t index); +void arm_gic_ack(rt_uint32_t index, int irq); + +void arm_gic_mask(rt_uint32_t index, int irq); +void arm_gic_umask(rt_uint32_t index, int irq); + +rt_uint32_t arm_gic_get_pending_irq(rt_uint32_t index, int irq); +void arm_gic_set_pending_irq(rt_uint32_t index, int irq); +void arm_gic_clear_pending_irq(rt_uint32_t index, int irq); + +void arm_gic_set_configuration(rt_uint32_t index, int irq, uint32_t config); +rt_uint32_t arm_gic_get_configuration(rt_uint32_t index, int irq); + +void arm_gic_clear_active(rt_uint32_t index, int irq); + +void arm_gic_set_cpu(rt_uint32_t index, int irq, unsigned int cpumask); +rt_uint32_t arm_gic_get_target_cpu(rt_uint32_t index, int irq); + +void arm_gic_set_priority(rt_uint32_t index, int irq, rt_uint32_t priority); +rt_uint32_t arm_gic_get_priority(rt_uint32_t index, int irq); + +void arm_gic_set_interface_prior_mask(rt_uint32_t index, rt_uint32_t priority); +rt_uint32_t arm_gic_get_interface_prior_mask(rt_uint32_t index); + +void arm_gic_set_binary_point(rt_uint32_t index, rt_uint32_t binary_point); +rt_uint32_t arm_gic_get_binary_point(rt_uint32_t index); + +rt_uint32_t arm_gic_get_irq_status(rt_uint32_t index, int irq); + +void arm_gic_send_affinity_sgi(rt_uint32_t index, int irq, rt_uint32_t cpu_mask, rt_uint32_t routing_mode); +rt_uint32_t arm_gic_get_high_pending_irq(rt_uint32_t index); + +rt_uint32_t arm_gic_get_interface_id(rt_uint32_t index); + +void arm_gic_set_group(rt_uint32_t index, int irq, rt_uint32_t group); +rt_uint32_t arm_gic_get_group(rt_uint32_t index, int irq); + +int arm_gic_redist_address_set(rt_uint32_t index, rt_uint32_t redist_addr, rt_uint32_t cpu_id); +int arm_gic_cpu_interface_address_set(rt_uint32_t index, rt_uint32_t interface_addr, rt_uint32_t cpu_id); +int arm_gic_dist_init(rt_uint32_t index, rt_uint32_t dist_base, int irq_start); +int arm_gic_cpu_init(rt_uint32_t index); +int arm_gic_redist_init(rt_uint32_t index); + +void arm_gic_dump_type(rt_uint32_t index); +void arm_gic_dump(rt_uint32_t index); + +void arm_gic_set_system_register_enable_mask(rt_uint32_t index, rt_uint32_t value); +rt_uint32_t arm_gic_get_system_register_enable_mask(rt_uint32_t index); +void arm_gic_secondary_cpu_init(void); +#endif + diff --git a/libcpu/arm/cortex-a/interrupt.c b/libcpu/arm/cortex-a/interrupt.c index 65e51fa3ae..474a23fee1 100644 --- a/libcpu/arm/cortex-a/interrupt.c +++ b/libcpu/arm/cortex-a/interrupt.c @@ -12,8 +12,12 @@ #include #include #include "interrupt.h" -#include "gic.h" +#ifdef RT_USING_GIC_V2 +#include "gic.h" +#else +#include "gicv3.h" +#endif /* exception and interrupt handler table */ struct rt_irq_desc isr_table[MAX_HANDLERS]; @@ -34,6 +38,7 @@ void rt_hw_vector_init(void) rt_cpu_vector_set_base((unsigned int)&system_vectors); } +#ifdef RT_USING_GIC_V2 /** * This function will initialize hardware interrupt */ @@ -58,6 +63,33 @@ void rt_hw_interrupt_init(void) arm_gic_dist_init(0, gic_dist_base, gic_irq_start); arm_gic_cpu_init(0, gic_cpu_base); } +#else +/** + * This function will initialize hardware interrupt + * Called by the primary cpu(cpu0) + */ +void rt_hw_interrupt_init(void) +{ + rt_uint32_t gic_dist_base; + rt_uint32_t gic_irq_start; + + /* initialize vector table */ + rt_hw_vector_init(); + + /* initialize exceptions table */ + rt_memset(isr_table, 0x00, sizeof(isr_table)); + + /* initialize ARM GIC */ + gic_dist_base = platform_get_gic_dist_base(); + gic_irq_start = GIC_IRQ_START; + + arm_gic_dist_init(0, gic_dist_base, gic_irq_start); + + arm_gic_cpu_init(0); + arm_gic_redist_init(0); +} + +#endif /** * This function will mask a interrupt. @@ -245,7 +277,7 @@ unsigned int rt_hw_interrupt_get_prior_group_bits(void) * @param old_handler the old interrupt service routine */ rt_isr_handler_t rt_hw_interrupt_install(int vector, rt_isr_handler_t handler, - void *param, const char *name) + void *param, const char *name) { rt_isr_handler_t old_handler = RT_NULL; @@ -269,7 +301,11 @@ rt_isr_handler_t rt_hw_interrupt_install(int vector, rt_isr_handler_t handler, #ifdef RT_USING_SMP void rt_hw_ipi_send(int ipi_vector, unsigned int cpu_mask) { +#ifdef RT_USING_GIC_V2 arm_gic_send_sgi(0, ipi_vector, cpu_mask, 0); +#else + arm_gic_send_affinity_sgi(0, ipi_vector, cpu_mask, ROUTED_TO_SPEC); +#endif } void rt_hw_ipi_handler_install(int ipi_vector, rt_isr_handler_t ipi_isr_handler)