This commit is contained in:
2024-08-05 20:57:09 +08:00
commit 46d9ee7795
3020 changed files with 1725767 additions and 0 deletions

View File

@@ -0,0 +1,11 @@
# RT-Thread building script for component
from building import *
cwd = GetCurrentDir()
src = Glob('*.c') + Glob('*.S')
CPPPATH = [cwd]
group = DefineGroup('lwp-riscv', src, depend = ['RT_USING_SMART'], CPPPATH = CPPPATH)
Return('group')

View File

@@ -0,0 +1,357 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2020-11-18 Jesven first version
* 2021-02-03 lizhirui port to riscv64
* 2021-02-06 lizhirui add thread filter
* 2021-02-19 lizhirui port to new version of rt-smart
* 2021-03-02 lizhirui add a auxillary function for interrupt
* 2021-03-04 lizhirui delete thread filter
* 2021-03-04 lizhirui modify for new version of rt-smart
* 2021-11-22 JasonHu add lwp_set_thread_context
* 2021-11-30 JasonHu add clone/fork support
* 2023-07-16 Shell Move part of the codes to C from asm in signal handling
* 2023-10-16 Shell Support a new backtrace framework
*/
#include <rthw.h>
#include <rtthread.h>
#include <stddef.h>
#ifdef ARCH_MM_MMU
#define DBG_TAG "lwp.arch"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#include <lwp_internal.h>
#include <lwp_arch.h>
#include <lwp_user_mm.h>
#include <page.h>
#include <cpuport.h>
#include <encoding.h>
#include <stack.h>
#include <cache.h>
extern rt_ubase_t MMUTable[];
void *lwp_copy_return_code_to_user_stack()
{
void lwp_thread_return();
void lwp_thread_return_end();
rt_thread_t tid = rt_thread_self();
if (tid->user_stack != RT_NULL)
{
rt_size_t size = (rt_size_t)lwp_thread_return_end - (rt_size_t)lwp_thread_return;
rt_size_t userstack = (rt_size_t)tid->user_stack + tid->user_stack_size - size;
lwp_memcpy((void *)userstack, lwp_thread_return, size);
return (void *)userstack;
}
return RT_NULL;
}
rt_ubase_t lwp_fix_sp(rt_ubase_t cursp)
{
void lwp_thread_return();
void lwp_thread_return_end();
if (cursp == 0)
{
return 0;
}
return cursp - ((rt_size_t)lwp_thread_return_end - (rt_size_t)lwp_thread_return);
}
rt_thread_t rt_thread_sp_to_thread(void *spmember_addr)
{
return (rt_thread_t)(((rt_ubase_t)spmember_addr) - (offsetof(struct rt_thread, sp)));
}
void *get_thread_kernel_stack_top(rt_thread_t thread)
{
return (void *)(((rt_size_t)thread->stack_addr) + ((rt_size_t)thread->stack_size));
}
void *arch_get_user_sp(void)
{
/* user sp saved in interrupt context */
rt_thread_t self = rt_thread_self();
rt_uint8_t *stack_top = (rt_uint8_t *)self->stack_addr + self->stack_size;
struct rt_hw_stack_frame *frame = (struct rt_hw_stack_frame *)(stack_top - sizeof(struct rt_hw_stack_frame));
return (void *)frame->user_sp_exc_stack;
}
int arch_user_space_init(struct rt_lwp *lwp)
{
rt_ubase_t *mmu_table;
mmu_table = rt_hw_mmu_pgtbl_create();
if (!mmu_table)
{
return -RT_ENOMEM;
}
lwp->end_heap = USER_HEAP_VADDR;
lwp->aspace = rt_aspace_create(
(void *)USER_VADDR_START, USER_VADDR_TOP - USER_VADDR_START, mmu_table);
if (!lwp->aspace)
{
return -RT_ERROR;
}
return 0;
}
void *arch_kernel_mmu_table_get(void)
{
return (void *)((char *)MMUTable);
}
void arch_user_space_free(struct rt_lwp *lwp)
{
if (lwp)
{
RT_ASSERT(lwp->aspace);
void *pgtbl = lwp->aspace->page_table;
rt_aspace_delete(lwp->aspace);
/* must be freed after aspace delete, pgtbl is required for unmap */
rt_hw_mmu_pgtbl_delete(pgtbl);
lwp->aspace = RT_NULL;
}
else
{
LOG_W("%s: NULL lwp as parameter", __func__);
RT_ASSERT(0);
}
}
long _sys_clone(void *arg[]);
long sys_clone(void *arg[])
{
return _sys_clone(arg);
}
long _sys_fork(void);
long sys_fork(void)
{
return _sys_fork();
}
long _sys_vfork(void);
long sys_vfork(void)
{
return _sys_fork();
}
/**
* set exec context for fork/clone.
*/
int arch_set_thread_context(void (*exit)(void), void *new_thread_stack,
void *user_stack, void **thread_sp)
{
RT_ASSERT(exit != RT_NULL);
RT_ASSERT(user_stack != RT_NULL);
RT_ASSERT(new_thread_stack != RT_NULL);
RT_ASSERT(thread_sp != RT_NULL);
struct rt_hw_stack_frame *syscall_frame;
struct rt_hw_stack_frame *thread_frame;
rt_uint8_t *stk;
rt_uint8_t *syscall_stk;
stk = (rt_uint8_t *)new_thread_stack;
/* reserve syscall context, all the registers are copyed from parent */
stk -= CTX_REG_NR * REGBYTES;
syscall_stk = stk;
syscall_frame = (struct rt_hw_stack_frame *)stk;
/* modify user sp */
syscall_frame->user_sp_exc_stack = (rt_ubase_t)user_stack;
/* skip ecall */
syscall_frame->epc += 4;
/* child return value is 0 */
syscall_frame->a0 = 0;
syscall_frame->a1 = 0;
/* reset thread area */
rt_thread_t thread = rt_container_of((unsigned long)thread_sp, struct rt_thread, sp);
syscall_frame->tp = (rt_ubase_t)thread->thread_idr;
#ifdef ARCH_USING_NEW_CTX_SWITCH
extern void *_rt_hw_stack_init(rt_ubase_t *sp, rt_ubase_t ra, rt_ubase_t sstatus);
rt_ubase_t sstatus = read_csr(sstatus) | SSTATUS_SPP;
sstatus &= ~SSTATUS_SIE;
/* compatible to RESTORE_CONTEXT */
stk = (void *)_rt_hw_stack_init((rt_ubase_t *)stk, (rt_ubase_t)exit, sstatus);
#else
/* build temp thread context */
stk -= sizeof(struct rt_hw_stack_frame);
thread_frame = (struct rt_hw_stack_frame *)stk;
int i;
for (i = 0; i < sizeof(struct rt_hw_stack_frame) / sizeof(rt_ubase_t); i++)
{
((rt_ubase_t *)thread_frame)[i] = 0xdeadbeaf;
}
/* set pc for thread */
thread_frame->epc = (rt_ubase_t)exit;
/* set old exception mode as supervisor, because in kernel */
thread_frame->sstatus = read_csr(sstatus) | SSTATUS_SPP;
thread_frame->sstatus &= ~SSTATUS_SIE; /* must disable interrupt */
/* set stack as syscall stack */
thread_frame->user_sp_exc_stack = (rt_ubase_t)syscall_stk;
#endif /* ARCH_USING_NEW_CTX_SWITCH */
/* save new stack top */
*thread_sp = (void *)stk;
/**
* The stack for child thread:
*
* +------------------------+ --> kernel stack top
* | syscall stack |
* | |
* | @sp | --> `user_stack`
* | @epc | --> user ecall addr + 4 (skip ecall)
* | @a0&a1 | --> 0 (for child return 0)
* | |
* +------------------------+ --> temp thread stack top
* | temp thread stack | ^
* | | |
* | @sp | ---------/
* | @epc | --> `exit` (arch_clone_exit/arch_fork_exit)
* | |
* +------------------------+ --> thread sp
*/
return 0;
}
#define ALGIN_BYTES (16)
struct signal_ucontext
{
rt_int64_t sigreturn;
lwp_sigset_t save_sigmask;
siginfo_t si;
rt_align(16)
struct rt_hw_stack_frame frame;
};
void *arch_signal_ucontext_restore(rt_base_t user_sp)
{
struct signal_ucontext *new_sp;
new_sp = (void *)user_sp;
if (lwp_user_accessable(new_sp, sizeof(*new_sp)))
{
lwp_thread_signal_mask(rt_thread_self(), LWP_SIG_MASK_CMD_SET_MASK, &new_sp->save_sigmask, RT_NULL);
}
else
{
LOG_I("User frame corrupted during signal handling\nexiting...");
sys_exit_group(EXIT_FAILURE);
}
return (void *)&new_sp->frame;
}
void *arch_signal_ucontext_save(int signo, siginfo_t *psiginfo,
struct rt_hw_stack_frame *exp_frame, rt_base_t user_sp,
lwp_sigset_t *save_sig_mask)
{
struct signal_ucontext *new_sp;
new_sp = (void *)(user_sp - sizeof(struct signal_ucontext));
if (lwp_user_accessable(new_sp, sizeof(*new_sp)))
{
/* push psiginfo */
if (psiginfo)
{
lwp_memcpy(&new_sp->si, psiginfo, sizeof(*psiginfo));
}
lwp_memcpy(&new_sp->frame, exp_frame, sizeof(*exp_frame));
/* copy the save_sig_mask */
lwp_memcpy(&new_sp->save_sigmask, save_sig_mask, sizeof(lwp_sigset_t));
/* copy lwp_sigreturn */
const size_t lwp_sigreturn_bytes = 8;
extern void lwp_sigreturn(void);
/* -> ensure that the sigreturn start at the outer most boundary */
lwp_memcpy(&new_sp->sigreturn, &lwp_sigreturn, lwp_sigreturn_bytes);
/**
* synchronize dcache & icache if target is
* a Harvard Architecture machine, otherwise
* do nothing
*/
rt_hw_sync_cache_local(&new_sp->sigreturn, 8);
}
else
{
LOG_I("%s: User stack overflow", __func__);
sys_exit_group(EXIT_FAILURE);
}
return new_sp;
}
void arch_syscall_set_errno(void *eframe, int expected, int code)
{
/* NO support */
return ;
}
/**
* void lwp_exec_user(void *args, void *kernel_stack, void *user_entry)
*/
void lwp_exec_user(void *args, void *kernel_stack, void *user_entry)
{
arch_start_umode(args, user_entry, (void *)USER_STACK_VEND, kernel_stack);
}
#endif /* ARCH_MM_MMU */
int arch_backtrace_uthread(rt_thread_t thread)
{
struct rt_hw_backtrace_frame frame;
struct rt_hw_stack_frame *stack;
if (thread && thread->lwp)
{
stack = thread->user_ctx.ctx;
if ((long)stack > (unsigned long)thread->stack_addr
&& (long)stack < (unsigned long)thread->stack_addr + thread->stack_size)
{
frame.pc = stack->epc;
frame.fp = stack->s0_fp;
lwp_backtrace_frame(thread, &frame);
return 0;
}
else
return -1;
}
return -1;
}

View File

@@ -0,0 +1,82 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
*/
#ifndef LWP_ARCH_H__
#define LWP_ARCH_H__
#include <rthw.h>
#include <lwp.h>
#include <lwp_arch_comm.h>
#ifdef ARCH_MM_MMU
#ifdef ARCH_MM_MMU_32BIT_LIMIT
#define USER_HEAP_VADDR 0xF0000000UL
#define USER_HEAP_VEND 0xFE000000UL
#define USER_STACK_VSTART 0xE0000000UL
#define USER_STACK_VEND USER_HEAP_VADDR
#define USER_VADDR_START 0xC0000000UL
#define USER_VADDR_TOP 0xFF000000UL
#define USER_LOAD_VADDR 0xD0000000UL
#define LDSO_LOAD_VADDR USER_LOAD_VADDR
#elif defined(ARCH_REMAP_KERNEL)
#define USER_VADDR_START 0x00001000UL
#define USER_VADDR_TOP 0x003ffffff000UL
#define USER_STACK_VSTART 0x000270000000UL
#define USER_STACK_VEND USER_HEAP_VADDR
#define USER_HEAP_VADDR 0x000300000000UL
#define USER_HEAP_VEND USER_VADDR_TOP
#define USER_LOAD_VADDR 0x200000000
#define LDSO_LOAD_VADDR 0x200000000
#else
#define USER_HEAP_VADDR 0x300000000UL
#define USER_HEAP_VEND USER_STACK_VSTART
#define USER_STACK_VSTART 0x370000000UL
#define USER_STACK_VEND 0x400000000UL
#define USER_VADDR_START 0x200000000UL
#define USER_VADDR_TOP 0xfffffffffffff000UL
#define USER_LOAD_VADDR 0x200000000UL
#define LDSO_LOAD_VADDR 0x200000000UL
#endif
/* this attribution is cpu specified, and it should be defined in riscv_mmu.h */
#ifndef MMU_MAP_U_RWCB
#define MMU_MAP_U_RWCB 0
#endif
#ifndef MMU_MAP_U_RW
#define MMU_MAP_U_RW 0
#endif
#ifdef __cplusplus
extern "C" {
#endif
rt_inline unsigned long rt_hw_ffz(unsigned long x)
{
return __builtin_ffsl(~x) - 1;
}
rt_inline void icache_invalid_all(void)
{
rt_hw_cpu_icache_invalidate_all();
}
struct rt_hw_stack_frame;
void *arch_signal_ucontext_restore(rt_base_t user_sp);
void *arch_signal_ucontext_save(int signo, siginfo_t *psiginfo,
struct rt_hw_stack_frame *exp_frame, rt_base_t user_sp,
lwp_sigset_t *save_sig_mask);
#ifdef __cplusplus
}
#endif
#endif
#endif /*LWP_ARCH_H__*/

View File

@@ -0,0 +1,303 @@
/*
* Copyright (c) 2006-2020, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2018-12-10 Jesven first version
* 2021-02-03 lizhirui port to riscv64
* 2021-02-19 lizhirui port to new version of rt-smart
* 2022-11-08 Wangxiaoyao Cleanup codes;
* Support new context switch
* 2023-07-16 Shell Move part of the codes to C from asm in signal handling
*/
#include "rtconfig.h"
#ifndef __ASSEMBLY__
#define __ASSEMBLY__
#endif /* __ASSEMBLY__ */
#include "cpuport.h"
#include "encoding.h"
#include "stackframe.h"
#include "asm-generic.h"
.section .text.lwp
/*
* void arch_start_umode(args, text, ustack, kstack);
*/
.global arch_start_umode
.type arch_start_umode, % function
arch_start_umode:
// load kstack for user process
csrw sscratch, a3
li t0, SSTATUS_SPP | SSTATUS_SIE // set as user mode, close interrupt
csrc sstatus, t0
li t0, SSTATUS_SPIE // enable interrupt when return to user mode
csrs sstatus, t0
csrw sepc, a1
mv sp, a2
sret//enter user mode
/*
* void arch_crt_start_umode(args, text, ustack, kstack);
*/
.global arch_crt_start_umode
.type arch_crt_start_umode, % function
arch_crt_start_umode:
li t0, SSTATUS_SPP | SSTATUS_SIE // set as user mode, close interrupt
csrc sstatus, t0
li t0, SSTATUS_SPIE // enable interrupt when return to user mode
csrs sstatus, t0
csrw sepc, a1
mv s0, a0
mv s1, a1
mv s2, a2
mv s3, a3
mv a0, s2
call lwp_copy_return_code_to_user_stack
mv a0, s2
call lwp_fix_sp
mv sp, a0//user_sp
mv ra, a0//return address
mv a0, s0//args
csrw sscratch, s3
sret//enter user mode
/**
* Unify exit point from kernel mode to enter user space
* we handle following things here:
* 1. restoring user mode debug state (not support yet)
* 2. handling thread's exit request
* 3. handling POSIX signal
* 4. restoring user context
* 5. jump to user mode
*/
.global arch_ret_to_user
arch_ret_to_user:
// TODO: we don't support kernel gdb server in risc-v yet
// so we don't check debug state here and handle debugging bussiness
call lwp_check_exit_request
beqz a0, 1f
mv a0, x0
call sys_exit
1:
mv a0, sp
call lwp_thread_signal_catch
ret_to_user_exit:
RESTORE_ALL
// `RESTORE_ALL` also reset sp to user sp, and setup sscratch
sret
/**
* Restore user context from exception frame stroraged in ustack
* And handle pending signals;
*/
arch_signal_quit:
LOAD a0, FRAME_OFF_SP(sp)
call arch_signal_ucontext_restore
/* reset kernel sp to the stack */
addi sp, sp, CTX_REG_NR * REGBYTES
STORE sp, FRAME_OFF_SP(a0)
/* return value is user sp */
mv sp, a0
/* restore user sp before enter trap */
addi a0, sp, CTX_REG_NR * REGBYTES
csrw sscratch, a0
RESTORE_ALL
SAVE_ALL
j arch_ret_to_user
/**
* rt_noreturn
* void arch_thread_signal_enter(
* int signo, -> a0
* siginfo_t *psiginfo, -> a1
* void *exp_frame, -> a2
* void *entry_uaddr, -> a3
* lwp_sigset_t *save_sig_mask, -> a4
* )
*/
.global arch_thread_signal_enter
arch_thread_signal_enter:
mv s3, a2
mv s2, a0
mv s1, a3
LOAD t0, FRAME_OFF_SP(a2)
mv a3, t0
call arch_signal_ucontext_save
/** restore kernel sp */
addi sp, s3, CTX_REG_NR * REGBYTES
/**
* set regiter RA to user signal handler
* set sp to user sp & save kernel sp in sscratch
*/
mv ra, a0
csrw sscratch, sp
mv sp, a0
/**
* s1 is signal_handler,
* s1 = !s1 ? lwp_sigreturn : s1;
*/
bnez s1, 1f
mv s1, ra
1:
/* enter user mode and enable interrupt when return to user mode */
li t0, SSTATUS_SPP
csrc sstatus, t0
li t0, SSTATUS_SPIE
csrs sstatus, t0
/* sepc <- signal_handler */
csrw sepc, s1
/* a0 <- signal id */
mv a0, s2
/* a1 <- siginfo */
add a1, sp, 16
/* dummy a2 */
mv a2, a1
/* restore user GP */
LOAD gp, FRAME_OFF_GP(s3)
/**
* handler(signo, psi, ucontext);
*/
sret
.align 3
lwp_debugreturn:
li a7, 0xff
ecall
.align 3
.global lwp_sigreturn
lwp_sigreturn:
li a7, 0xfe
ecall
.align 3
lwp_sigreturn_end:
.align 3
.global lwp_thread_return
lwp_thread_return:
li a0, 0
li a7, 1
ecall
.align 3
.global lwp_thread_return_end
lwp_thread_return_end:
.globl arch_get_tidr
arch_get_tidr:
mv a0, tp
ret
.global arch_set_thread_area
arch_set_thread_area:
.globl arch_set_tidr
arch_set_tidr:
mv tp, a0
ret
.global arch_clone_exit
.global arch_fork_exit
arch_fork_exit:
arch_clone_exit:
j arch_syscall_exit
START_POINT(syscall_entry)
#ifndef ARCH_USING_NEW_CTX_SWITCH
//swap to thread kernel stack
csrr t0, sstatus
andi t0, t0, 0x100
beqz t0, __restore_sp_from_tcb
__restore_sp_from_sscratch: // from kernel
csrr t0, sscratch
j __move_stack_context
__restore_sp_from_tcb: // from user
jal rt_thread_self
jal get_thread_kernel_stack_top
mv t0, a0
__move_stack_context:
mv t1, sp//src
mv sp, t0//switch stack
addi sp, sp, -CTX_REG_NR * REGBYTES
//copy context
li s0, CTX_REG_NR//cnt
mv t2, sp//dst
copy_context_loop:
LOAD t0, 0(t1)
STORE t0, 0(t2)
addi s0, s0, -1
addi t1, t1, 8
addi t2, t2, 8
bnez s0, copy_context_loop
#endif /* ARCH_USING_NEW_CTX_SWITCH */
/* fetch SYSCALL ID */
LOAD a7, 17 * REGBYTES(sp)
addi a7, a7, -0xfe
beqz a7, arch_signal_quit
#ifdef ARCH_MM_MMU
/* save setting when syscall enter */
call rt_thread_self
call lwp_user_setting_save
#endif
mv a0, sp
OPEN_INTERRUPT
call syscall_handler
j arch_syscall_exit
START_POINT_END(syscall_entry)
.global arch_syscall_exit
arch_syscall_exit:
CLOSE_INTERRUPT
#if defined(ARCH_MM_MMU)
LOAD s0, FRAME_OFF_SSTATUS(sp)
andi s0, s0, 0x100
bnez s0, dont_ret_to_user
j arch_ret_to_user
#endif
dont_ret_to_user:
#ifdef ARCH_MM_MMU
/* restore setting when syscall exit */
call rt_thread_self
call lwp_user_setting_restore
/* after restore the reg `tp`, need modify context */
STORE tp, 4 * REGBYTES(sp)
#endif
//restore context
RESTORE_ALL
csrw sscratch, zero
sret

View File

@@ -0,0 +1,109 @@
#include "mm_aspace.h"
#include <rtthread.h>
#include <stdint.h>
#include <string.h>
#include <elf.h>
#ifdef ARCH_MM_MMU
#include <mmu.h>
#include <page.h>
#endif
typedef struct
{
Elf64_Word st_name;
Elf64_Addr st_value;
Elf64_Word st_size;
unsigned char st_info;
unsigned char st_other;
Elf64_Half st_shndx;
} Elf64_sym;
#ifdef ARCH_MM_MMU
void arch_elf_reloc(rt_aspace_t aspace, void *text_start, void *rel_dyn_start, size_t rel_dyn_size, void *got_start, size_t got_size, Elf64_sym *dynsym)
{
size_t rel_off;
void* addr;
if (rel_dyn_size && !dynsym)
{
return;
}
for (rel_off = 0; rel_off < rel_dyn_size; rel_off += 8)
{
uint32_t v1, v2;
addr = rt_hw_mmu_v2p(aspace, (void *)(((rt_size_t)rel_dyn_start) + rel_off));
memcpy(&v1, addr, 4);
addr = rt_hw_mmu_v2p(aspace, (void *)(((rt_size_t)rel_dyn_start) + rel_off + 4));
memcpy(&v2, addr, 4);
addr = rt_hw_mmu_v2p(aspace, (void *)((rt_size_t)text_start + v1));
if ((v2 & 0xff) == R_ARM_RELATIVE)
{
*(rt_size_t*)addr += (rt_size_t)text_start;
}
else if ((v2 & 0xff) == R_ARM_ABS32)
{
uint32_t t;
t = (v2 >> 8);
if (t) /* 0 is UDF */
{
*(rt_size_t*)addr = (((rt_size_t)text_start) + dynsym[t].st_value);
}
}
}
/* modify got */
if (got_size)
{
uint32_t *got_item = (uint32_t*)got_start;
for (rel_off = 0; rel_off < got_size; rel_off += 4, got_item++)
{
addr = rt_hw_mmu_v2p(aspace, got_item);
*(rt_size_t *)addr += (rt_size_t)text_start;
}
}
}
#else
void arch_elf_reloc(void *text_start, void *rel_dyn_start, size_t rel_dyn_size, void *got_start, size_t got_size, Elf64_sym *dynsym)
{
size_t rel_off;
if (rel_dyn_size && !dynsym)
{
return;
}
for (rel_off = 0; rel_off < rel_dyn_size; rel_off += 8)
{
uint32_t v1, v2;
memcpy(&v1, ((rt_uint8_t *)rel_dyn_start) + rel_off, 4);
memcpy(&v2, ((rt_uint8_t *)rel_dyn_start) + rel_off + 4, 4);
if ((v2 & 0xff) == R_ARM_RELATIVE)
{
*(uint32_t*)(((rt_size_t)text_start) + v1) += (uint32_t)text_start;
}
else if ((v2 & 0xff) == R_ARM_ABS32)
{
uint32_t t;
t = (v2 >> 8);
if (t) /* 0 is UDF */
{
*(uint32_t*)(((rt_size_t)text_start) + v1) = (uint32_t)(((rt_size_t)text_start) + dynsym[t].st_value);
}
}
}
/* modify got */
if (got_size)
{
uint32_t *got_item = (uint32_t*)got_start;
for (rel_off = 0; rel_off < got_size; rel_off += 4, got_item++)
{
*got_item += (uint32_t)text_start;
}
}
}
#endif