[components] remove vmm (#8269)

This commit is contained in:
Meco Man 2023-11-21 00:41:54 -05:00 committed by GitHub
parent eff3560d64
commit df29a13d89
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
15 changed files with 2 additions and 2139 deletions

View File

@ -16,13 +16,7 @@
#include <mmu.h>
#include <interrupt.h>
#ifdef RT_USING_VMM
#include <vmm.h>
static rt_uint32_t DMTIMER = 0;
#define TIMER_HW_BASE (DMTIMER)
#else
#define TIMER_HW_BASE AM33XX_DMTIMER_7_REGS
#endif
#define DMTIMER_TCLR_AR (0x00000002u)
#define DMTIMER_TCLR_CE (0x00000040u)
@ -55,11 +49,7 @@ static void timer_clk_init(void)
{
unsigned long prcm_base;
#ifdef RT_USING_VMM
prcm_base = vmm_find_iomap("PRCM");
#else
prcm_base = AM33XX_PRCM_REGS;
#endif
/* software forced wakeup */
CM_PER_L4LS_CLKSTCTRL_REG(prcm_base) |= 0x2;
@ -100,10 +90,6 @@ int rt_hw_timer_init(void)
{
rt_uint32_t counter;
#ifdef RT_USING_VMM
DMTIMER = vmm_find_iomap("TIMER7");
#endif
timer_clk_init();
/* soft reset the timer */
@ -164,13 +150,8 @@ void rt_hw_board_init(void)
void rt_hw_cpu_reset(void)
{
unsigned long prcm_base;
unsigned long prcm_base = AM33XX_PRCM_REGS;
#ifdef RT_USING_VMM
prcm_base = vmm_find_iomap("PRCM");
#else
prcm_base = AM33XX_PRCM_REGS;
#endif
REG32(PRM_DEVICE(prcm_base)) = 0x1;
RT_ASSERT(0);
}

View File

@ -1,15 +0,0 @@
# RT-Thread building script for component
from building import *
cwd = GetCurrentDir()
src = Glob('*.c')
src += Glob('utilities/rshell.c')
if GetDepend('RT_USING_VMM_RFS'):
src += Glob('utilities/rfs.c')
CPPPATH = [cwd, os.path.join(cwd, 'share_hdr')]
group = DefineGroup('VMM', src, depend = ['RT_USING_VMM'], CPPPATH = CPPPATH)
Return('group')

View File

@ -1,37 +0,0 @@
From 848bdea67f5fc201cd05687f207e5f8f42b0990d Mon Sep 17 00:00:00 2001
From: Grissiom <chaos.proton@gmail.com>
Date: Thu, 3 Apr 2014 16:51:58 +0800
Subject: [PATCH 2/2] arm: gic: correct the cpu map on gic_raise_softirq for UP
system
The CPU mask on UP system is empty, so if we want to raise softirq on UP
system, designate CPU0 to the map.
Maybe the more correct way is to fix the gic_get_cpumask.
Signed-off-by: Grissiom <chaos.proton@gmail.com>
---
arch/arm/common/gic.c | 7 +++++++
1 file changed, 7 insertions(+)
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c
index a9d7357..5da382b 100644
--- a/arch/arm/common/gic.c
+++ b/arch/arm/common/gic.c
@@ -858,6 +858,13 @@ void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
*/
dsb();
+ /*
+ * On UP system, realview-pb-a8 for example, the CPU mask is empty. The
+ * softirq are always handled on CPU0.
+ */
+ if (map == 0) {
+ map = 1;
+ }
/* this always happens on GIC0 */
writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
}
--
1.8.4

View File

@ -1,172 +0,0 @@
/*
* VMM startup file.
*
* COPYRIGHT (C) 2011-2021, Real-Thread Information Technology Ltd
* All rights reserved
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2013-06-15 Bernard the first verion
*/
#include <rthw.h>
#include <rtthread.h>
#include "board.h"
#include "vmm.h"
#include "vmm_context.h"
extern void rt_hw_interrupt_init(void);
extern void rt_application_init(void);
void vmm_entry(struct vmm_entry_param* param) rt_section(".vmm_init");
struct rt_thread vmm_thread rt_section(".bss.share.vmm");
extern rt_uint8_t vmm_stack_start;
extern rt_uint8_t vmm_stack_end;
void vmm_thread_init(struct rt_thread *thread, const char *name)
{
extern struct rt_thread *rt_current_thread;
rt_thread_init(thread, name, RT_NULL, RT_NULL,
&vmm_stack_start, &vmm_stack_end - &vmm_stack_start,
RT_THREAD_PRIORITY_MAX - 1, 10);
/* set thread to ready status but not switch to */
rt_thread_startup(thread);
/* set current thread as vmm thread */
rt_current_thread = thread;
}
#ifdef VMM_VERIFY_GUEST
static void _verify_guest(void *p)
{
while (1)
{
rt_thread_delay(RT_TICK_PER_SECOND/4);
vmm_verify_guest_status(vmm_thread.sp);
}
}
static void vmm_create_monitor(void)
{
rt_thread_t tid;
tid = rt_thread_create("vmon",
_verify_guest, RT_NULL,
1024, 8, 20);
if (tid)
rt_thread_startup(tid);
}
#endif
#ifdef RT_VMM_USING_DOMAIN
extern unsigned long guest_domain_val;
extern unsigned long vmm_domain_val;
#endif
static void vmm_entry_glue(rt_uint32_t level,
unsigned int vmm_domain,
unsigned int kernel_domain)
/* inline would make the section setting meaningless */
__attribute__((noinline))
rt_section(".vmm_glue");
static void vmm_entry_glue(rt_uint32_t level,
unsigned int vmm_domain,
unsigned int kernel_domain)
{
rt_schedule();
#ifdef RT_VMM_USING_DOMAIN
/* protect us from the guest code, but leave the shared region permission
*/
guest_domain_val &= ~(0x3 << (vmm_domain * 2));
/* don't touch the guest kernel space */
vmm_domain_val &= ~(0x3 << (kernel_domain * 2));
#endif
rt_hw_interrupt_enable(level);
}
void vmm_entry(struct vmm_entry_param *param)
{
rt_base_t level;
level = rt_hw_interrupt_disable();
/* set iomap */
vmm_iomap_init(param->iomap);
/* set VMM context address */
vmm_context_init(&RT_VMM_SHARE->ctx);
/* init hardware interrupt */
rt_hw_interrupt_init();
vmm_vector_init();
/* init board */
rt_hw_board_init();
/* show version */
rt_show_version();
rt_kprintf("share ctx: %p(%x)\n",
&RT_VMM_SHARE->ctx, sizeof(RT_VMM_SHARE->ctx));
/* init timer system */
rt_system_timer_init();
{
rt_uint32_t ttbr;
asm volatile ("mrc p15, 0, %0, c2, c0, 0\n"
: "=r"(ttbr));
rt_kprintf("Linux TTBR: 0x%08x\n", ttbr);
/*
*rt_hw_cpu_dump_page_table((void*)((ttbr & (0xffffc000))
* - 0x80000000 + 0xC0000000));
*/
/*rt_hw_cpu_dump_page_table((void*)(0xc0004000));*/
}
#ifdef RT_VMM_USING_DOMAIN
vmm_context_init_domain(param->domain);
#endif
rt_kprintf("heap: 0x%p - 0x%p, %dKi bytes\n",
(void*)HEAP_BEGIN, (void*)HEAP_END,
((int)HEAP_END - (int)HEAP_BEGIN) / 1024);
/* init heap memory system */
rt_system_heap_init((void*)HEAP_BEGIN, (void*)HEAP_END);
/* init scheduler system */
rt_system_scheduler_init();
rt_kprintf("user application init.\n");
/* init application */
rt_application_init();
#ifdef VMM_VERIFY_GUEST
vmm_create_monitor();
#endif
rt_system_timer_thread_init();
vmm_thread_init(&vmm_thread, "vmm");
#ifdef RT_VMM_USING_DOMAIN
rt_kprintf("domain protect present\n");
#endif
/* start scheduler */
rt_kprintf("do the first scheduling...\n");
vmm_entry_glue(level,
param->domain->vmm,
param->domain->kernel);
}

View File

@ -1,44 +0,0 @@
/*
* VMM startup file.
*
* COPYRIGHT (C) 2011-2021, Real-Thread Information Technology Ltd
* All rights reserved
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2013-06-15 Bernard the first verion
*/
#ifndef __VMM_H__
#define __VMM_H__
#ifndef __ASSEMBLY__
#include <stddef.h> // for size_t
#endif
#define VMM_VERIFY_GUEST
#include <rtt_api.h>
#ifndef __ASSEMBLY__
void vmm_iomap_init(struct vmm_iomap *iomap);
unsigned long vmm_find_iomap(const char *name);
unsigned long vmm_find_iomap_by_pa(unsigned long pa);
void vmm_vector_init(void);
/* If the rshell is run, we could not rt_kprintf in some situation because
* write to a vbus channel *Would BLOCK*. So we cannot use it in interrupt
* context, we cannot use it within the context of idle(vmm). */
#define vmm_debug(fmt, ...)
#define vmm_verbose(fmt, ...)
#define vmm_info(fmt, ...)
#endif
#define ARRAY_SIZE(ar) (sizeof(ar)/sizeof(ar[0]))
#endif

View File

@ -1,317 +0,0 @@
/*
* COPYRIGHT (C) 2011-2021, Real-Thread Information Technology Ltd
* All rights reserved
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2013-11-04 Grissiom add comment
*/
#include <rthw.h>
#include <rtthread.h>
#include <interrupt.h>
#include <log_trace.h>
#include <vmm.h>
#include "vmm_context.h"
struct rt_vmm_share_layout rt_vmm_share rt_section(".vmm.share");
volatile struct vmm_context *_vmm_context = RT_NULL;
void vmm_context_init(void *context_addr)
{
_vmm_context = (struct vmm_context *)context_addr;
rt_memset((void *)_vmm_context, 0x00, sizeof(struct vmm_context));
/* When loading RT-Thread, the IRQ on the guest should be disabled. */
_vmm_context->virq_status = 1;
}
#ifdef RT_VMM_USING_DOMAIN
unsigned long guest_domain_val rt_section(".bss.share");
unsigned long vmm_domain_val rt_section(".bss.share");
/* some RT-Thread code need to be called in the guest
* context(rt_thread_idle_excute for example). To simplify the code, we need a
* "super" domain mode to have access of both side. The code executed in super
* domain mode is restricted and should be harmless. */
unsigned long super_domain_val rt_section(".bss.share");
void vmm_context_init_domain(struct vmm_domain *domain)
{
asm volatile ("mrc p15, 0, %0, c3, c0\n" : "=r" (guest_domain_val));
rt_kprintf("Linux domain: kernel: %d, user: %d, io: %d\n"
"VMM domain: vmm: %d, share: %d\n",
domain->kernel, domain->user, domain->io,
domain->vmm, domain->vmm_share);
if (domain->kernel == domain->vmm ||
domain->io == domain->vmm)
{
rt_kprintf("VMM and the guest share the same domain\n");
super_domain_val = vmm_domain_val = guest_domain_val;
return;
}
vmm_domain_val = guest_domain_val;
/* become client to our own territory */
vmm_domain_val |= (1 << (domain->vmm * 2)) | (1 << (domain->vmm_share * 2));
super_domain_val = vmm_domain_val;
/* super domain has access to both side */
super_domain_val |= (1 << (domain->kernel * 2)) | (1 << (domain->user * 2));
rt_kprintf("Original DAC: 0x%08x\n", guest_domain_val);
}
unsigned long vmm_context_enter_domain(unsigned long domain_val)
{
unsigned long old_domain;
asm volatile ("mrc p15, 0, %0, c3, c0\n" : "=r" (old_domain));
asm volatile ("mcr p15, 0, %0, c3, c0\n" : :"r" (domain_val) : "memory");
return old_domain;
}
void vmm_context_restore_domain(unsigned long domain_val)
{
asm volatile ("mcr p15, 0, %0, c3, c0\n" : :"r" (domain_val) : "memory");
}
#endif
void vmm_virq_pending(int irq)
{
/* when running this piece of code, the guest is already suspended. So it's
* safe to set the bits without locks. */
_vmm_context->virq_pending[irq / 32] |= (1 << (irq % 32));
_vmm_context->virq_pended = 1;
/* mask this IRQ in host */
rt_hw_interrupt_mask(irq);
}
void vmm_virq_update(void)
{
if ((!_vmm_context->virq_status) &&
( _vmm_context->virq_pended))
{
rt_hw_interrupt_trigger(RT_VMM_VIRQ_TRIGGER);
}
}
/** check the guest IRQ status
*
* @return 0 on guest should handle IRQ, -1 on should restore the guest context
* normally.
*/
int vmm_virq_check(void)
{
if ((!_vmm_context->virq_status) &&
( _vmm_context->virq_pended))
{
return 0;
}
return -1;
}
/* 10 = len("%08x, ") */
static char _vmbuf[10*ARRAY_SIZE(_vmm_context->virq_pending)];
void vmm_dump_virq(void)
{
int i, s;
vmm_info("---- virtual IRQ ----\n");
vmm_info(" status: %08x, pended: %08x, pending:\n",
_vmm_context->virq_status, _vmm_context->virq_pended);
for (s = 0, i = 0; i < ARRAY_SIZE(_vmm_context->virq_pending); i++)
{
s += rt_snprintf(_vmbuf+s, sizeof(_vmbuf)-s,
"%08x, ", _vmm_context->virq_pending[i]);
}
vmm_info("%.*s\n", sizeof(_vmbuf), _vmbuf);
vmm_info("---- virtual IRQ ----\n");
}
int vmm_virq_coherence_ok(void)
{
int i, res;
int should_pend = 0;
for (i = 0; i < ARRAY_SIZE(_vmm_context->virq_pending); i++)
{
should_pend |= _vmm_context->virq_pending[i];
}
res = (_vmm_context->virq_pended == !!should_pend);
if (!res)
{
vmm_info("--- %x %x, %x\n",
_vmm_context->virq_pended, should_pend, !!should_pend);
}
return res;
}
extern struct rt_thread vmm_thread;
void vmm_show_guest_reg(void)
{
struct rt_hw_stack *sp = vmm_thread.sp;
#ifdef RT_VMM_USING_DOMAIN
unsigned long old_domain;
old_domain = vmm_context_enter_domain(super_domain_val);
#endif
vmm_info("CPSR: %08x, PC: %08x, LR: %08x, SP: %08x\n",
sp->cpsr, sp->pc, sp->lr, sp+1);
#ifdef RT_VMM_USING_DOMAIN
vmm_context_restore_domain(old_domain);
#endif
}
void vmm_dump_domain(void)
{
unsigned long dac;
asm volatile ("mrc p15, 0, %0, c3, c0\n" : "=r" (dac));
vmm_info("current DAC: %08x\n", dac);
#ifdef RT_VMM_USING_DOMAIN
vmm_info("guest DAC: %08x, RTT DAC: %08x, super DAC: %08x\n",
guest_domain_val, vmm_domain_val, super_domain_val);
#endif
}
void vmm_show_guest(void)
{
vmm_show_guest_reg();
vmm_dump_virq();
vmm_dump_domain();
}
#ifdef RT_USING_FINSH
#include <finsh.h>
FINSH_FUNCTION_EXPORT_ALIAS(vmm_show_guest, vmm, show vmm status);
#endif
static int _bad_cpsr(unsigned long cpsr)
{
int bad = 1;
switch (cpsr & MODEMASK)
{
case USERMODE:
case FIQMODE:
case IRQMODE:
case SVCMODE:
#ifdef CPU_HAS_MONITOR_MODE
case MONITORMODE:
#endif
case ABORTMODE:
#ifdef CPU_HAS_HYP_MODE
case HYPMODE:
#endif
case UNDEFMODE:
case MODEMASK:
bad = 0;
break;
};
return bad;
}
void vmm_verify_guest_status(struct rt_hw_stack *sp)
{
int dump_vmm = 0;
unsigned long cpsr;
#ifdef RT_VMM_USING_DOMAIN
unsigned long old_domain;
old_domain = vmm_context_enter_domain(super_domain_val);
#endif
cpsr = sp->cpsr;
if (_bad_cpsr(cpsr))
{
vmm_info("=================================\n");
vmm_info("VMM WARING: bad CPSR in guest\n");
dump_vmm = 1;
}
else
{
if (cpsr & A_Bit && 0)
{
vmm_info("=================================\n");
vmm_info("VMM WARING: A bit is set in guest\n");
dump_vmm = 1;
}
if ((cpsr & I_Bit) && (sp->pc <= VMM_BEGIN))
{
vmm_info("=================================\n");
vmm_info("VMM WARING: IRQ disabled in guest\n");
dump_vmm = 1;
}
if (cpsr & F_Bit)
{
vmm_info("=================================\n");
vmm_info("VMM WARING: FIQ disabled in guest\n");
dump_vmm = 1;
}
if ((cpsr & MODEMASK) == USERMODE)
{
if (_vmm_context->virq_status & 1)
{
vmm_info("=================================\n");
vmm_info("VMM WARING: VIRQ disabled in user mode\n");
dump_vmm = 1;
}
if ((sp->pc > 0xbf000000) && (sp->pc < 0xffff0000))
{
vmm_info("=================================\n");
vmm_info("VMM WARING: executing kernel code in usr mode\n");
dump_vmm = 1;
}
/* FIXME: when the guest is suspended in user mode and its
* interrupts come, this can be misleading. */
#if 0
if (_vmm_context->virq_pended)
{
vmm_info("=================================\n");
vmm_info("VMM WARING: VIRQ pended in user mode\n");
dump_vmm = 1;
}
#endif
}
else if ((cpsr & MODEMASK) == SVCMODE && sp->pc < 0xbf000000)
{
vmm_info("=================================\n");
vmm_info("VMM WARING: executing usr code in svc mode\n");
dump_vmm = 1;
}
}
#if 0
if (!vmm_virq_coherence_ok())
{
vmm_info("=================================\n");
vmm_info("VMM WARING: bad VIRQ status\n");
dump_vmm = 1;
}
#endif
if (dump_vmm)
{
vmm_show_guest();
vmm_info("=================================\n");
}
#ifdef RT_VMM_USING_DOMAIN
vmm_context_restore_domain(old_domain);
#endif
}

View File

@ -1,28 +0,0 @@
/*
* COPYRIGHT (C) 2011-2021, Real-Thread Information Technology Ltd
* All rights reserved
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2013-11-04 Grissiom add comment
*/
#ifndef __VMM_CONTEXT_H__
#define __VMM_CONTEXT_H__
#include <armv7.h> // for struct rt_hw_stack
#include "vmm.h"
void vmm_context_init(void *context_addr);
#ifdef RT_VMM_USING_DOMAIN
void vmm_context_init_domain(struct vmm_domain *domain);
#endif
void vmm_virq_pending(int irq);
void vmm_verify_guest_status(struct rt_hw_stack *sp);
void vmm_show_guest(void);
#endif

View File

@ -1,49 +0,0 @@
/*
* VMM IO map table
*
* COPYRIGHT (C) 2011-2021, Real-Thread Information Technology Ltd
* All rights reserved
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2013-06-15 Bernard the first verion
*/
#include <rtthread.h>
#include "vmm.h"
static struct vmm_iomap _vmm_iomap[RT_VMM_IOMAP_MAXNR];
void vmm_iomap_init(struct vmm_iomap *iomap)
{
rt_memcpy(_vmm_iomap, iomap, sizeof(_vmm_iomap));
}
/* find virtual address according to name */
unsigned long vmm_find_iomap(const char *name)
{
int i;
for (i = 0; i < ARRAY_SIZE(_vmm_iomap); i++)
{
if (rt_strcmp(_vmm_iomap[i].name, name) == 0)
return (unsigned long)_vmm_iomap[i].va;
}
return 0;
}
/* find virtual address according to physcal address */
unsigned long vmm_find_iomap_by_pa(unsigned long pa)
{
int i;
for (i = 0; i < ARRAY_SIZE(_vmm_iomap); i++)
{
if (_vmm_iomap[i].pa == pa)
return (unsigned long)_vmm_iomap[i].va;
}
return 0;
}

View File

@ -1,31 +0,0 @@
/*
* VMM vector handle
*
* COPYRIGHT (C) 2011-2021, Real-Thread Information Technology Ltd
* All rights reserved
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2013-06-15 Bernard the first verion
*/
#include <rthw.h>
#include <rtthread.h>
#include <interrupt.h>
#include "vmm.h"
void vmm_guest_isr(int irqno, void* parameter)
{
/* nothing, let GuestOS to handle it */
rt_hw_interrupt_clear(irqno);
}
void vmm_vector_init(void)
{
rt_hw_interrupt_install(RT_VMM_VIRQ_TRIGGER, vmm_guest_isr, RT_NULL, "virq");
rt_hw_interrupt_umask(RT_VMM_VIRQ_TRIGGER);
return;
}

View File

@ -10,10 +10,6 @@
#include <rtconfig.h>
#ifdef RT_USING_VMM
#include <vmm.h>
#endif
.section .text, "ax"
/*
* rt_base_t rt_hw_interrupt_disable();
@ -71,67 +67,6 @@ rt_hw_context_switch:
str sp, [r0] @ store sp in preempted tasks TCB
ldr sp, [r1] @ get new task stack pointer
#ifdef RT_USING_VMM
#ifdef RT_VMM_USING_DOMAIN
@ need to make sure we are in vmm domain as we would use rt_current_thread
ldr r2, =vmm_domain_val
ldr r7, [r2]
mcr p15, 0, r7, c3, c0
#endif
/* check whether vmm thread, otherwise, update vIRQ */
ldr r3, =rt_current_thread
ldr r4, [r3]
ldr r5, =vmm_thread
cmp r4, r5
beq switch_to_guest
@ not falling into guest. Simple task ;-)
ldmfd sp!, {r6} @ pop new task cpsr to spsr
msr spsr_cxsf, r6
ldmfd sp!, {r0-r12, lr, pc}^
switch_to_guest:
#ifdef RT_VMM_USING_DOMAIN
@ the stack is saved in the guest domain so we need to
@ come back to the guest domain to get the registers.
ldr r1, =super_domain_val
ldr r0, [r1]
mcr p15, 0, r0, c3, c0
#endif
/* The user can do nearly anything in rt_thread_idle_excute because it will
call the thread->cleanup. One common thing is sending events and wake up
threads. So the guest thread will be preempted. This is the only point that
the guest thread would call rt_hw_context_switch and "yield".
More over, rt_schedule will call this function and this function *will*
reentrant. If that happens, we need to make sure that call the
rt_thread_idle_excute and vmm_virq_update again and we are in super domain.
I use a "reference count" to achieve such behaviour. If you have better
idea, tell me. */
ldr r4, =_guest_switch_lvl
ldr r5, [r4]
add r5, r5, #1
str r5, [r4]
cmp r5, #1
bne _switch_through
bl rt_thread_idle_excute
bl vmm_virq_update
/* we need _guest_switch_lvl to protect until _switch_through, but it's OK
* to cleanup the reference count here because the code below will not be
* reentrant. */
sub r5, r5, #1
str r5, [r4]
#ifdef RT_VMM_USING_DOMAIN
ldr r1, =guest_domain_val
ldr r0, [r1]
mcr p15, 0, r0, c3, c0
#endif
_switch_through:
#endif /* RT_USING_VMM */
ldmfd sp!, {r4} @ pop new task cpsr to spsr
msr spsr_cxsf, r4
ldmfd sp!, {r0-r12, lr, pc}^ @ pop new task r0-r12, lr & pc, copy spsr to cpsr

View File

@ -217,9 +217,6 @@ int arm_gic_dist_init(rt_uint32_t index, rt_uint32_t dist_base, int irq_start)
if ((rt_cpu_get_smp_id() & 0xF) != 0)
return 0;
#endif
#ifdef RT_USING_VMM
return 0;
#endif
cpumask |= cpumask << 8;
cpumask |= cpumask << 16;
@ -263,9 +260,6 @@ int arm_gic_cpu_init(rt_uint32_t index, rt_uint32_t cpu_base)
if ((rt_cpu_get_smp_id() & 0xF) != 0)
return 0;
#endif
#ifdef RT_USING_VMM
return 0;
#endif
GIC_CPU_PRIMASK(cpu_base) = 0xf0;
/* Enable CPU interrupt */

View File

@ -14,10 +14,6 @@
#include "realview.h"
#include "gic.h"
#ifdef RT_USING_VMM
#include <vmm.h>
#endif
#define MAX_HANDLERS NR_IRQS_PBA8
extern volatile rt_uint8_t rt_interrupt_nest;
@ -36,13 +32,7 @@ extern int system_vectors;
static void rt_hw_vector_init(void)
{
#ifndef RT_USING_VMM
unsigned int *dest = (unsigned int *)VECTOR_BASE;
unsigned int *src = (unsigned int *)&system_vectors;
rt_memcpy(dest, src, 16 * 4);
rt_cpu_vector_set_base(VECTOR_BASE);
#endif
}
/**
@ -60,13 +50,9 @@ void rt_hw_interrupt_init(void)
rt_memset(isr_table, 0x00, sizeof(isr_table));
/* initialize ARM GIC */
#ifdef RT_USING_VMM
gic_dist_base = vmm_find_iomap("GIC_DIST");
gic_cpu_base = vmm_find_iomap("GIC_CPU");
#else
gic_dist_base = REALVIEW_GIC_DIST_BASE;
gic_cpu_base = REALVIEW_GIC_CPU_BASE;
#endif
arm_gic_dist_init(0, gic_dist_base, 0);
arm_gic_cpu_init(0, gic_cpu_base);
/*arm_gic_dump_type(0);*/

View File

@ -10,13 +10,6 @@
#include <rtconfig.h>
#ifdef RT_USING_VMM
#include <vmm.h>
.equ orig_irq_isr, LINUX_VECTOR_POS+0x18
#else
#undef RT_VMM_USING_DOMAIN
#endif
.equ Mode_USR, 0x10
.equ Mode_FIQ, 0x11
.equ Mode_IRQ, 0x12
@ -28,7 +21,6 @@
.equ I_Bit, 0x80 @ when I bit is set, IRQ is disabled
.equ F_Bit, 0x40 @ when F bit is set, FIQ is disabled
#ifndef RT_USING_VMM
.equ UND_Stack_Size, 0x00000000
.equ SVC_Stack_Size, 0x00000100
.equ ABT_Stack_Size, 0x00000000
@ -38,9 +30,6 @@
#define ISR_Stack_Size (UND_Stack_Size + SVC_Stack_Size + ABT_Stack_Size + \
RT_FIQ_STACK_PGSZ + RT_IRQ_STACK_PGSZ)
#else
#define ISR_Stack_Size (RT_FIQ_STACK_PGSZ + RT_IRQ_STACK_PGSZ)
#endif
.section .data.share.isr
/* stack */
@ -58,10 +47,6 @@ stack_top:
/* reset entry */
.globl _reset
_reset:
#ifdef RT_USING_VMM
/* save all the parameter and variable registers */
stmfd sp!, {r0-r12, lr}
#endif
/* set the cpu to SVC32 mode and disable interrupt */
mrs r0, cpsr
bic r0, r0, #0x1f
@ -81,18 +66,6 @@ bss_loop:
strlo r0,[r1],#4 /* clear 4 bytes */
blo bss_loop /* loop until done */
#ifdef RT_USING_VMM
/* clear .bss.share */
mov r0,#0 /* get a zero */
ldr r1,=__bss_share_start /* bss start */
ldr r2,=__bss_share_end /* bss end */
bss_share_loop:
cmp r1,r2 /* check if data to clear */
strlo r0,[r1],#4 /* clear 4 bytes */
blo bss_share_loop /* loop until done */
#endif
/* call C++ constructors of global objects */
ldr r0, =__ctors_start__
ldr r1, =__ctors_end__
@ -109,31 +82,16 @@ ctor_loop:
ctor_end:
/* start RT-Thread Kernel */
#ifdef RT_USING_VMM
/* restore the parameter */
ldmfd sp!, {r0-r3}
bl vmm_entry
ldmfd sp!, {r4-r12, pc}
#else
ldr pc, _rtthread_startup
_rtthread_startup:
.word rtthread_startup
#endif
stack_setup:
ldr r0, =stack_top
#ifdef RT_USING_VMM
@ Linux use stmia to save r0, lr and spsr. To align to 8 byte boundary,
@ just allocate 16 bytes for it.
sub r0, r0, #16
#endif
#ifndef RT_USING_VMM
@ Set the startup stack for svc
mov sp, r0
#endif
#ifndef RT_USING_VMM
@ Enter Undefined Instruction Mode and set its Stack Pointer
msr cpsr_c, #Mode_UND|I_Bit|F_Bit
mov sp, r0
@ -143,7 +101,6 @@ stack_setup:
msr cpsr_c, #Mode_ABT|I_Bit|F_Bit
mov sp, r0
sub r0, r0, #ABT_Stack_Size
#endif
@ Enter FIQ Mode and set its Stack Pointer
msr cpsr_c, #Mode_FIQ|I_Bit|F_Bit
@ -184,26 +141,10 @@ vector_fiq:
vector_irq:
stmfd sp!, {r0-r12,lr}
#ifdef RT_VMM_USING_DOMAIN
@ save the last domain
mrc p15, 0, r5, c3, c0
@ switch to vmm domain as we are going to call vmm codes
ldr r1, =vmm_domain_val
ldr r4, [r1]
mcr p15, 0, r4, c3, c0
#endif
bl rt_interrupt_enter
bl rt_hw_trap_irq
bl rt_interrupt_leave
#ifdef RT_VMM_USING_DOMAIN
@ restore the last domain. It do some redundant work but simplify the
@ logic. It might be the guest domain so rt_thread_switch_interrupt_flag
@ should lay in .bss.share
mcr p15, 0, r5, c3, c0
#endif
@ if rt_thread_switch_interrupt_flag set, jump to
@ rt_hw_context_switch_interrupt_do and don't return
ldr r0, =rt_thread_switch_interrupt_flag
@ -211,58 +152,8 @@ vector_irq:
cmp r1, #1
beq rt_hw_context_switch_interrupt_do
#ifndef RT_USING_VMM
ldmfd sp!, {r0-r12,lr}
subs pc, lr, #4
#else
#ifdef RT_VMM_USING_DOMAIN
@ r4 is vmm_domain_val
@ back to vmm domain as we need access rt_current_thread
mcr p15, 0, r4, c3, c0
#endif
/* check whether we need to do IRQ routing
* ensure the int is disabled. Or there will be an infinite loop. */
ldr r0, =rt_current_thread
ldr r0, [r0]
ldr r1, =vmm_thread
cmp r0, r1
beq switch_to_guest
#ifdef RT_VMM_USING_DOMAIN
@ r5 is domain of interrupted context
@ it might be super_domain_val or vmm_domain_val so we need to restore it.
mcr p15, 0, r5, c3, c0
#endif
@ switch back if the interrupted thread is not vmm
ldmfd sp!, {r0-r12,lr}
subs pc, lr, #4
switch_to_guest:
#ifdef RT_VMM_USING_DOMAIN
@ We are going to execute rt-thread code but accessing the content of the
@ guest. So switch to super domain.
ldr r1, =super_domain_val
ldr r0, [r1]
mcr p15, 0, r0, c3, c0
#endif
/* check whether there is a pending interrupt for Guest OS */
bl vmm_virq_check
#ifdef RT_VMM_USING_DOMAIN
@ All done, restore the guest domain.
mcr p15, 0, r5, c3, c0
#endif
cmp r0, #0x0
beq route_irq_to_guest
ldmfd sp!, {r0-r12,lr}
subs pc, lr, #4
route_irq_to_guest:
ldmfd sp!, {r0-r12,lr}
b orig_irq_isr
#endif /* RT_USING_VMM */
rt_hw_context_switch_interrupt_do:
mov r1, #0 @ clear flag
@ -289,13 +180,6 @@ rt_hw_context_switch_interrupt_do:
ldr r5, [r4]
str sp, [r5] @ store sp in preempted tasks's TCB
#ifdef RT_VMM_USING_DOMAIN
@ If a thread is wake up by interrupt, it should be RTT thread.
@ Make sure the domain is correct.
ldr r1, =vmm_domain_val
ldr r2, [r1]
mcr p15, 0, r2, c3, c0
#endif
ldr r6, =rt_interrupt_to_thread
ldr r6, [r6]
ldr sp, [r6] @ get new task's stack pointer

View File

@ -14,10 +14,6 @@
#include "armv7.h"
#ifdef RT_USING_VMM
#include <vmm_context.h>
#endif
#include "gic.h"
extern struct rt_thread *rt_current_thread;
@ -162,15 +158,6 @@ void rt_hw_trap_irq(void)
/* turn to interrupt service routine */
isr_func(ir, param);
}
#ifdef RT_USING_VMM
else
{
/* We have to EOI before masking the interrupts */
arm_gic_ack(0, fullir);
vmm_virq_pending(ir);
return;
}
#endif
/* end of interrupt */
arm_gic_ack(0, fullir);