2
0
mirror of https://github.com/checkpoint-restore/criu synced 2025-08-22 01:51:51 +00:00

criu: add riscv64 support to parasite and restorer

Co-authored-by: Yixue Zhao <felicitia2010@gmail.com>
Co-authored-by: stove <stove@rivosinc.com>
Signed-off-by: Haorong Lu <ancientmodern4@gmail.com>
This commit is contained in:
Haorong Lu 2023-08-01 12:06:00 -07:00 committed by Andrei Vagin
parent 1a42f63d30
commit 35b30774fc
19 changed files with 751 additions and 0 deletions

View File

@ -0,0 +1,8 @@
builtin-name := crtools.built-in.o
ldflags-y += -r
obj-y += cpu.o
obj-y += crtools.o
obj-y += sigframe.o
obj-y += vdso-lookup.o

40
criu/arch/riscv64/cpu.c Normal file
View File

@ -0,0 +1,40 @@
#undef LOG_PREFIX
#define LOG_PREFIX "cpu: "
#include <errno.h>
#include "cpu.h"
int cpu_init(void)
{
return 0;
}
int cpu_dump_cpuinfo(void)
{
return 0;
}
int cpu_validate_cpuinfo(void)
{
return 0;
}
int cpu_dump_cpuinfo_single(void)
{
return -ENOTSUP;
}
int cpu_validate_image_cpuinfo_single(void)
{
return -ENOTSUP;
}
int cpuinfo_dump(void)
{
return -ENOTSUP;
}
int cpuinfo_check(void)
{
return -ENOTSUP;
}

171
criu/arch/riscv64/crtools.c Normal file
View File

@ -0,0 +1,171 @@
#include <string.h>
#include <unistd.h>
#include <linux/elf.h>
#include "types.h"
#include <compel/asm/processor-flags.h>
#include <compel/asm/infect-types.h>
#include "asm/restorer.h"
#include "common/compiler.h"
#include <compel/ptrace.h>
#include "asm/dump.h"
#include "protobuf.h"
#include "images/core.pb-c.h"
#include "images/creds.pb-c.h"
#include "parasite-syscall.h"
#include "log.h"
#include "util.h"
#include "cpu.h"
#include "restorer.h"
#include "compel/infect.h"
#define assign_reg(dst, src, e) dst->e = (__typeof__(dst->e))(src)->e
int save_task_regs(void *x, user_regs_struct_t *regs, user_fpregs_struct_t *fpsimd)
{
int i;
CoreEntry *core = x;
// Save riscv64 gprs
assign_reg(core->ti_riscv64->gpregs, regs, pc);
assign_reg(core->ti_riscv64->gpregs, regs, ra);
assign_reg(core->ti_riscv64->gpregs, regs, sp);
assign_reg(core->ti_riscv64->gpregs, regs, gp);
assign_reg(core->ti_riscv64->gpregs, regs, tp);
assign_reg(core->ti_riscv64->gpregs, regs, t0);
assign_reg(core->ti_riscv64->gpregs, regs, t1);
assign_reg(core->ti_riscv64->gpregs, regs, t2);
assign_reg(core->ti_riscv64->gpregs, regs, s0);
assign_reg(core->ti_riscv64->gpregs, regs, s1);
assign_reg(core->ti_riscv64->gpregs, regs, a0);
assign_reg(core->ti_riscv64->gpregs, regs, a1);
assign_reg(core->ti_riscv64->gpregs, regs, a2);
assign_reg(core->ti_riscv64->gpregs, regs, a3);
assign_reg(core->ti_riscv64->gpregs, regs, a4);
assign_reg(core->ti_riscv64->gpregs, regs, a5);
assign_reg(core->ti_riscv64->gpregs, regs, a6);
assign_reg(core->ti_riscv64->gpregs, regs, a7);
assign_reg(core->ti_riscv64->gpregs, regs, s2);
assign_reg(core->ti_riscv64->gpregs, regs, s3);
assign_reg(core->ti_riscv64->gpregs, regs, s4);
assign_reg(core->ti_riscv64->gpregs, regs, s5);
assign_reg(core->ti_riscv64->gpregs, regs, s6);
assign_reg(core->ti_riscv64->gpregs, regs, s7);
assign_reg(core->ti_riscv64->gpregs, regs, s8);
assign_reg(core->ti_riscv64->gpregs, regs, s9);
assign_reg(core->ti_riscv64->gpregs, regs, s10);
assign_reg(core->ti_riscv64->gpregs, regs, s11);
assign_reg(core->ti_riscv64->gpregs, regs, t3);
assign_reg(core->ti_riscv64->gpregs, regs, t4);
assign_reg(core->ti_riscv64->gpregs, regs, t5);
assign_reg(core->ti_riscv64->gpregs, regs, t6);
// Save riscv64 fprs
for (i = 0; i < 32; ++i)
assign_reg(core->ti_riscv64->fpsimd, fpsimd, f[i]);
assign_reg(core->ti_riscv64->fpsimd, fpsimd, fcsr);
return 0;
}
int arch_alloc_thread_info(CoreEntry *core)
{
ThreadInfoRiscv64 *ti_riscv64;
UserRiscv64RegsEntry *gpregs;
UserRiscv64DExtEntry *fpsimd;
ti_riscv64 = xmalloc(sizeof(*ti_riscv64));
if (!ti_riscv64)
goto err;
thread_info_riscv64__init(ti_riscv64);
core->ti_riscv64 = ti_riscv64;
gpregs = xmalloc(sizeof(*gpregs));
if (!gpregs)
goto err;
user_riscv64_regs_entry__init(gpregs);
ti_riscv64->gpregs = gpregs;
fpsimd = xmalloc(sizeof(*fpsimd));
if (!fpsimd)
goto err;
user_riscv64_d_ext_entry__init(fpsimd);
ti_riscv64->fpsimd = fpsimd;
fpsimd->f = xmalloc(32 * sizeof(fpsimd->f[0]));
fpsimd->n_f = 32;
if (!fpsimd->f)
goto err;
return 0;
err:
return -1;
}
void arch_free_thread_info(CoreEntry *core)
{
if (core->ti_riscv64) {
if (core->ti_riscv64->fpsimd) {
xfree(core->ti_riscv64->fpsimd->f);
xfree(core->ti_riscv64->fpsimd);
}
xfree(core->ti_riscv64->gpregs);
xfree(core->ti_riscv64);
core->ti_riscv64 = NULL;
}
}
int restore_fpu(struct rt_sigframe *sigframe, CoreEntry *core)
{
int i;
UserRiscv64DExtEntry *fpsimd = core->ti_riscv64->fpsimd;
if (fpsimd->n_f != 32)
return 1;
for (i = 0; i < 32; ++i)
sigframe->uc.uc_mcontext.__fpregs.__d.__f[i] = fpsimd->f[i];
sigframe->uc.uc_mcontext.__fpregs.__d.__fcsr = fpsimd->fcsr;
return 0;
}
int restore_gpregs(struct rt_sigframe *f, UserRiscv64RegsEntry *r)
{
f->uc.uc_mcontext.__gregs[0] = r->pc;
f->uc.uc_mcontext.__gregs[1] = r->ra;
f->uc.uc_mcontext.__gregs[2] = r->sp;
f->uc.uc_mcontext.__gregs[3] = r->gp;
f->uc.uc_mcontext.__gregs[4] = r->tp;
f->uc.uc_mcontext.__gregs[5] = r->t0;
f->uc.uc_mcontext.__gregs[6] = r->t1;
f->uc.uc_mcontext.__gregs[7] = r->t2;
f->uc.uc_mcontext.__gregs[8] = r->s0;
f->uc.uc_mcontext.__gregs[9] = r->s1;
f->uc.uc_mcontext.__gregs[10] = r->a0;
f->uc.uc_mcontext.__gregs[11] = r->a1;
f->uc.uc_mcontext.__gregs[12] = r->a2;
f->uc.uc_mcontext.__gregs[13] = r->a3;
f->uc.uc_mcontext.__gregs[14] = r->a4;
f->uc.uc_mcontext.__gregs[15] = r->a5;
f->uc.uc_mcontext.__gregs[16] = r->a6;
f->uc.uc_mcontext.__gregs[17] = r->a7;
f->uc.uc_mcontext.__gregs[18] = r->s2;
f->uc.uc_mcontext.__gregs[19] = r->s3;
f->uc.uc_mcontext.__gregs[20] = r->s4;
f->uc.uc_mcontext.__gregs[21] = r->s5;
f->uc.uc_mcontext.__gregs[22] = r->s6;
f->uc.uc_mcontext.__gregs[23] = r->s7;
f->uc.uc_mcontext.__gregs[24] = r->s8;
f->uc.uc_mcontext.__gregs[25] = r->s9;
f->uc.uc_mcontext.__gregs[26] = r->s10;
f->uc.uc_mcontext.__gregs[27] = r->s11;
f->uc.uc_mcontext.__gregs[28] = r->t3;
f->uc.uc_mcontext.__gregs[29] = r->t4;
f->uc.uc_mcontext.__gregs[30] = r->t5;
f->uc.uc_mcontext.__gregs[31] = r->t6;
return 0;
}

View File

@ -0,0 +1,15 @@
#ifndef __CR_ASM_DUMP_H__
#define __CR_ASM_DUMP_H__
extern int save_task_regs(void *, user_regs_struct_t *, user_fpregs_struct_t *);
extern int arch_alloc_thread_info(CoreEntry *core);
extern void arch_free_thread_info(CoreEntry *core);
static inline void core_put_tls(CoreEntry *core, tls_t tls)
{
core->ti_riscv64->tls = tls;
}
#define get_task_futex_robust_list_compat(pid, info) -1
#endif

View File

@ -0,0 +1,6 @@
#ifndef __CR_ASM_INT_H__
#define __CR_ASM_INT_H__
#include "asm-generic/int.h"
#endif /* __CR_ASM_INT_H__ */

View File

@ -0,0 +1,7 @@
#ifndef __CR_ASM_KERNDAT_H__
#define __CR_ASM_KERNDAT_H__
#define kdat_compatible_cr() 0
#define kdat_can_map_vdso() 0
#endif /* __CR_ASM_KERNDAT_H__ */

View File

@ -0,0 +1,6 @@
#ifndef __CR_ASM_PARASITE_SYSCALL_H__
#define __CR_ASM_PARASITE_SYSCALL_H__
struct parasite_ctl;
#endif

View File

@ -0,0 +1,16 @@
#ifndef __ASM_PARASITE_H__
#define __ASM_PARASITE_H__
/*
* This function is used to retrieve the value of the thread pointer (tp)
* in RISC-V architecture, which is typically used for thread-local storage (TLS).
* The value is then stored in the provided tls_t pointer.
*/
static inline void arch_get_tls(tls_t *ptls)
{
tls_t tls;
asm("mv %0, tp" : "=r"(tls));
*ptls = tls;
}
#endif

View File

@ -0,0 +1,29 @@
#ifndef __CR_ASM_RESTORE_H__
#define __CR_ASM_RESTORE_H__
#include "asm/restorer.h"
#include "images/core.pb-c.h"
/* clang-format off */
#define JUMP_TO_RESTORER_BLOB(new_sp, restore_task_exec_start, \
task_args) \
asm volatile( \
"and sp, %0, ~15 \n" \
"mv a0, %2 \n" \
"jr %1 \n" \
: \
: "r"(new_sp), \
"r"(restore_task_exec_start), \
"r"(task_args) \
: "a0", "memory")
/* clang-format on */
static inline void core_get_tls(CoreEntry *pcore, tls_t *ptls)
{
*ptls = pcore->ti_riscv64->tls;
}
int restore_fpu(struct rt_sigframe *sigframe, CoreEntry *core);
#endif

View File

@ -0,0 +1,150 @@
#ifndef __CR_ASM_RESTORER_H__
#define __CR_ASM_RESTORER_H__
#include <sys/ucontext.h>
#include "asm/types.h"
#include "images/core.pb-c.h"
#include <compel/asm/sigframe.h>
// kernel arg order for clone
// unsigned long clone_flags,
// unsigned long newsp,
// int __user * parent_tidptr,
// unsigned long tls,
// int __user * child_tidptr
/* clang-format off */
#define RUN_CLONE_RESTORE_FN(ret, clone_flags, new_sp, parent_tid, \
thread_args, clone_restore_fn) \
asm volatile( \
"clone_emul: \n" \
"ld a1, %2 \n" \
"andi a1, a1, ~15 \n" \
"addi a1, a1, -16 \n" \
"sd %5, 0(a1) \n" \
"sd %6, 8(a1) \n" \
"mv a0, %1 \n" \
"mv a2, %3 \n" \
"mv a3, %4 \n" \
"li a7, "__stringify(__NR_clone)" \n" \
"ecall \n" \
\
"beqz a0, thread_run \n" \
\
"mv %0, a0 \n" \
"j clone_end \n" \
\
"thread_run: \n" \
"ld a1, 0(sp) \n" \
"ld a0, 8(sp) \n" \
"jr a1 \n" \
\
"clone_end: \n" \
: "=r"(ret) \
: "r"(clone_flags), \
"m"(new_sp), \
"r"(&parent_tid), \
"r"(&thread_args[i].pid), \
"r"(clone_restore_fn), \
"r"(&thread_args[i]) \
: "a0", "a1", "a2", "a3", "a7", "memory")
/*
* Based on sysdeps/unix/sysv/linux/riscv/clone.S
*
* int clone(int (*fn)(void *arg), x0
* void *child_stack, x1
* int flags, x2
* void *arg, x3
* pid_t *ptid, x4
* struct user_desc *tls, x5
* pid_t *ctid); x6
*
* int clone3(struct clone_args *args, x0
* size_t size); x1
*
* Always consult the CLONE3 wrappers for other architectures
* for additional details.
*
*/
#define RUN_CLONE3_RESTORE_FN(ret, clone_args, size, args, \
clone_restore_fn) \
asm volatile( \
/* In contrast to the clone() wrapper above this does not put
* the thread function and its arguments on the child stack,
* but uses registers to pass these parameters to the child process.
* Based on the glibc clone() wrapper at
* sysdeps/unix/sysv/linux/riscv/clone.S.
*/ \
"clone3_emul: \n" \
/*
* Based on the glibc clone() wrapper, which uses x10 and x11
* to save the arguments for the child process, this does the same.
* x10 for the thread function and x11 for the thread arguments.
*/ \
"mv t0, %3 /* clone_restore_fn */ \n" \
"mv t1, %4 /* args */ \n" \
"mv a0, %1 /* &clone_args */ \n" \
"mv a1, %2 /* size */ \n" \
/* Load syscall number */ \
"li a7, "__stringify(__NR_clone3)" \n" \
/* Do the syscall */ \
"ecall \n" \
\
"beqz a0, clone3_thread_run \n" \
\
"mv %0, a0 \n" \
"j clone3_end \n" \
\
"clone3_thread_run: \n" \
/* Move args to a0 */ \
"mv a0, t1 \n" \
/* Jump to clone_restore_fn */ \
"jr t0 \n" \
\
"clone3_end: \n" \
: "=r"(ret) \
: "r"(&clone_args), \
"r"(size), \
"r"(clone_restore_fn), \
"r"(args) \
: "a0", "a1", "a7", "t0", "t1", "memory")
#define ARCH_FAIL_CORE_RESTORE \
asm volatile( \
"mv sp, %0 \n" \
"li a0, 0 \n" \
"jr x0 \n" \
: \
: "r"(ret) \
: "sp", "a0", "memory")
/* clang-format on */
#define arch_map_vdso(map, compat) -1
int restore_gpregs(struct rt_sigframe *f, UserRiscv64RegsEntry *r);
int restore_nonsigframe_gpregs(UserRiscv64RegsEntry *r);
static inline void restore_tls(tls_t *ptls)
{
asm("mv tp, %0" : : "r"(*ptls));
}
static inline void *alloc_compat_syscall_stack(void)
{
return NULL;
}
static inline void free_compat_syscall_stack(void *stack32)
{
}
static inline int arch_compat_rt_sigaction(void *stack, int sig, void *act)
{
return -1;
}
static inline int set_compat_robust_list(uint32_t head_ptr, uint32_t len)
{
return -1;
}
#endif

View File

@ -0,0 +1,27 @@
/* __thread_pointer definition. Generic version.
Copyright (C) 2021 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library. If not, see
<https://www.gnu.org/licenses/>. */
#ifndef _SYS_THREAD_POINTER_H
#define _SYS_THREAD_POINTER_H
static inline void *__criu_thread_pointer(void)
{
return __builtin_thread_pointer();
}
#endif /* _SYS_THREAD_POINTER_H */

View File

@ -0,0 +1,40 @@
#ifndef __CR_ASM_TYPES_H__
#define __CR_ASM_TYPES_H__
#include <stdbool.h>
#include <signal.h>
#include <asm/ptrace.h>
#include "images/core.pb-c.h"
#include "page.h"
#include "bitops.h"
#include "asm/int.h"
#include <compel/plugins/std/asm/syscall-types.h>
#define core_is_compat(core) false
typedef UserRiscv64RegsEntry UserRegsEntry;
#define CORE_ENTRY__MARCH CORE_ENTRY__MARCH__RISCV64
#define CORE_THREAD_ARCH_INFO(core) core->ti_riscv64
#define TI_SP(core) ((core)->ti_riscv64->gpregs->sp)
#define TI_IP(core) ((core)->ti_riscv64->gpregs->pc)
static inline void *decode_pointer(uint64_t v)
{
return (void *)v;
}
static inline uint64_t encode_pointer(void *p)
{
return (uint64_t)p;
}
#define AT_VECTOR_SIZE 64
typedef uint64_t auxv_t;
typedef uint64_t tls_t;
#endif /* __CR_ASM_TYPES_H__ */

View File

@ -0,0 +1,28 @@
#ifndef __CR_ASM_VDSO_H__
#define __CR_ASM_VDSO_H__
#include "asm/int.h"
#include "common/compiler.h"
#include "asm-generic/vdso.h"
/*
* This is a minimal amount of symbols
* we should support at the moment.
*/
#define VDSO_SYMBOL_MAX 6
#define VDSO_SYMBOL_GTOD 2
#define ARCH_VDSO_SYMBOLS_LIST \
const char *rv64_vdso_symbol1 = "__vdso_clock_getres"; \
const char *rv64_vdso_symbol2 = "__vdso_clock_gettime"; \
const char *rv64_vdso_symbol3 = "__vdso_gettimeofday"; \
const char *rv64_vdso_symbol4 = "__vdso_getcpu"; \
const char *rv64_vdso_symbol5 = "__vdso_flush_icache"; \
const char *rv64_vdso_symbol6 = "__vdso_rt_sigreturn";
#define ARCH_VDSO_SYMBOLS \
rv64_vdso_symbol1, rv64_vdso_symbol2, rv64_vdso_symbol3, rv64_vdso_symbol4, rv64_vdso_symbol5, rv64_vdso_symbol6
extern void write_intraprocedure_branch(unsigned long to, unsigned long from);
#endif /* __CR_ASM_VDSO_H__ */

View File

@ -0,0 +1,14 @@
#include <unistd.h>
#include "restorer.h"
#include "asm/restorer.h"
#include <compel/plugins/std/syscall.h>
#include "log.h"
#include <compel/asm/fpu.h>
#include "cpu.h"
int restore_nonsigframe_gpregs(UserRiscv64RegsEntry *r)
{
return 0;
}

View File

@ -0,0 +1,8 @@
#include "asm/types.h"
#include <compel/asm/infect-types.h>
#include "asm/sigframe.h"
int sigreturn_prep_fpu_frame(struct rt_sigframe *sigframe, struct rt_sigframe *rsigframe)
{
return 0;
}

View File

@ -0,0 +1,15 @@
#include "common/asm/linkage.h"
.section .text
/* Expects t0 to hold the index into the lookup table. */
GLOBAL(riscv_vdso_lookup)
/* Get the beginning of the lookup table */
la t1, riscv_vdso_lookup_end
/* Scale the index */
slli t0, t0, 3
add t1, t0, t1
ld t2, 0(t1)
jr t2
GLOBAL(riscv_vdso_lookup_end)

View File

@ -0,0 +1,159 @@
#include <unistd.h>
#include "asm/types.h"
#include <compel/asm/instruction_formats.h>
#include <compel/plugins/std/string.h>
#include <compel/plugins/std/syscall.h>
#include <compel/plugins/std/syscall-codes.h>
#include "atomic.h"
#include "parasite-vdso.h"
#include "log.h"
#include "common/bug.h"
#ifdef LOG_PREFIX
#undef LOG_PREFIX
#endif
#define LOG_PREFIX "vdso: "
/* These symbols are defined in vdso-lookup.S */
extern char *riscv_vdso_lookup, *riscv_vdso_lookup_end;
/*
* li t0, INDEX
* jal x0, riscv_vdso_lookup
*/
#define TRAMP_CALL_SIZE (2 * sizeof(uint32_t))
static inline void invalidate_caches(void)
{
// We're supposed to use the VDSO as the officially sanctioned ABI. But oh well.
int ret;
__smp_mb();
asm volatile("li a0, 0\n"
"li a1, 0\n"
"li a2, 1\n" /* SYS_RISCV_FLUSH_ICACHE_ALL */
"li a7, 259\n" /* __NR_arch_specific_syscall */
"ecall\n"
: "=r"(ret)
:
: "a7");
}
static inline size_t vdso_trampoline_size(void)
{
return (size_t)&riscv_vdso_lookup_end - (size_t)&riscv_vdso_lookup;
}
static uint64_t put_trampoline(uint64_t at, struct vdso_symtable *sym)
{
int i, j;
uint64_t total_size, trampoline_size;
uint64_t trampoline = 0;
/* First of all we have to find a place where to put the trampoline
* code.
*/
trampoline_size = vdso_trampoline_size();
total_size = trampoline_size + VDSO_SYMBOL_MAX * sizeof(uint64_t);
for (i = 0; i < ARRAY_SIZE(sym->symbols); i++) {
if (vdso_symbol_empty(&sym->symbols[i]))
continue;
pr_debug("Checking '%s' at %lx\n", sym->symbols[i].name, sym->symbols[i].offset);
/* find the nearest following symbol we are interested in */
for (j = 0; j < ARRAY_SIZE(sym->symbols); j++) {
if (i == j || vdso_symbol_empty(&sym->symbols[j]))
continue;
if (sym->symbols[j].offset <= sym->symbols[i].offset)
/* this symbol is above the current one */
continue;
if ((sym->symbols[i].offset + TRAMP_CALL_SIZE) > sym->symbols[j].offset) {
/* we have a major issue here since we cannot
* even put the trampoline call for this symbol
*/
pr_err("Can't handle small vDSO symbol %s\n", sym->symbols[i].name);
return 0;
}
if (trampoline)
/* no need to put it twice */
continue;
if ((sym->symbols[j].offset - (sym->symbols[i].offset + TRAMP_CALL_SIZE)) <= total_size)
/* not enough place */
continue;
/* We can put the trampoline there */
trampoline = at + sym->symbols[i].offset;
trampoline += TRAMP_CALL_SIZE;
pr_debug("Putting vDSO trampoline in %s at %lx\n", sym->symbols[i].name, trampoline);
memcpy((void *)trampoline, &riscv_vdso_lookup, trampoline_size);
invalidate_caches();
return trampoline;
}
}
return 0;
}
static inline void put_trampoline_call(uint64_t from, uint64_t to, uint64_t trampoline, unsigned int idx)
{
size_t trampoline_size = vdso_trampoline_size();
uint64_t *lookup_table = NULL;
/*
* li t0, INDEX
* addi t0, x0 INDEX
* jal x0, riscv_vdso_lookup
*/
uint32_t trampoline_call[2] = {
0x00000293,
0x0000006f,
};
const size_t insts_len = ARRAY_SIZE(trampoline_call);
uint32_t *call_addr = (uint32_t *)from;
// Offset from the jal instruction to the lookup trampoline.
ssize_t trampoline_offset = trampoline - (from + sizeof(uint32_t));
trampoline_call[0] = trampoline_call[0] | (idx << 24);
trampoline_call[1] = trampoline_call[1] | riscv_j_imm(trampoline_offset);
for (unsigned int i = 0; i < insts_len; i++) {
call_addr[i] = trampoline_call[i];
}
// Set the lookup table pointer for this vdso symbol.
lookup_table = (uint64_t *)(trampoline + trampoline_size);
lookup_table[idx] = to;
}
int vdso_redirect_calls(uint64_t base_to, uint64_t base_from, struct vdso_symtable *to, struct vdso_symtable *from,
bool __always_unused compat_vdso)
{
unsigned int i, valid_idx = 0;
uint64_t trampoline = (uint64_t)put_trampoline(base_from, from);
if (!trampoline)
return 1;
for (i = 0; i < ARRAY_SIZE(to->symbols); i++) {
if (vdso_symbol_empty(&from->symbols[i]))
continue;
pr_debug("br: %lx/%lx -> %lx/%lx (index %d) '%s'\n", base_from, from->symbols[i].offset, base_to,
to->symbols[i].offset, i, from->symbols[i].name);
put_trampoline_call(base_from + from->symbols[i].offset, base_to + to->symbols[i].offset, trampoline,
valid_idx);
valid_idx++;
}
invalidate_caches();
return 0;
}

View File

@ -23,6 +23,10 @@ ifeq ($(ARCH),x86)
ccflags-y += -mshstk
endif
ifeq ($(ARCH),riscv64)
ccflags-y += -fno-stack-protector
endif
LDS := compel/arch/$(ARCH)/scripts/compel-pack.lds.S
restorer-obj-y += parasite-vdso.o ./$(ARCH_DIR)/vdso-pie.o
@ -43,6 +47,10 @@ ifeq ($(ARCH),ppc64)
restorer-obj-y += ./$(ARCH_DIR)/vdso-trampoline.o
endif
ifeq ($(ARCH),riscv64)
restorer-obj-y += ./$(ARCH_DIR)/vdso-lookup.o
endif
define gen-pie-rules
$(1)-obj-y += $(1).o
$(1)-obj-e += pie.lib.a

View File

@ -27,3 +27,7 @@ CFLAGS += $(CFLAGS_PIE)
ifeq ($(ARCH),mips)
CFLAGS += -fno-stack-protector -DCR_NOGLIBC -mno-abicalls -fno-pic
endif
ifeq ($(ARCH),riscv64)
ccflags-y += -fno-stack-protector
endif