2
0
mirror of https://github.com/checkpoint-restore/criu synced 2025-08-22 01:51:51 +00:00

Run 'make indent' on header files

Acked-by: Mike Rapoport <rppt@linux.ibm.com>
Signed-off-by: Adrian Reber <areber@redhat.com>
This commit is contained in:
Adrian Reber 2021-07-19 07:39:51 +00:00 committed by Andrei Vagin
parent 93dd984ca0
commit 70833bcf29
252 changed files with 4746 additions and 5011 deletions

View File

@ -1,3 +1,3 @@
#ifndef __NR_openat
# define __NR_openat 56
#define __NR_openat 56
#endif

View File

@ -1,4 +1,8 @@
#ifndef __COMPEL_SYSCALL_H__
#define __COMPEL_SYSCALL_H__
#define __NR(syscall, compat) ({ (void)compat; __NR_##syscall; })
#define __NR(syscall, compat) \
({ \
(void)compat; \
__NR_##syscall; \
})
#endif

View File

@ -1,6 +1,7 @@
#ifndef UAPI_COMPEL_ASM_CPU_H__
#define UAPI_COMPEL_ASM_CPU_H__
typedef struct { } compel_cpuinfo_t;
typedef struct {
} compel_cpuinfo_t;
#endif /* UAPI_COMPEL_ASM_CPU_H__ */

View File

@ -32,6 +32,10 @@ typedef struct user_fpsimd_state user_fpregs_struct_t;
#define ARCH_SI_TRAP TRAP_BRKPT
#define __NR(syscall, compat) ({ (void)compat; __NR_##syscall; })
#define __NR(syscall, compat) \
({ \
(void)compat; \
__NR_##syscall; \
})
#endif /* UAPI_COMPEL_ASM_TYPES_H__ */

View File

@ -32,6 +32,7 @@ struct rt_sigframe {
uint64_t lr;
};
/* clang-format off */
#define ARCH_RT_SIGRETURN(new_sp, rt_sigframe) \
asm volatile( \
"mov sp, %0 \n" \
@ -40,6 +41,7 @@ struct rt_sigframe {
: \
: "r"(new_sp) \
: "x8", "memory")
/* clang-format on */
/* cr_sigcontext is copied from arch/arm64/include/uapi/asm/sigcontext.h */
struct cr_sigcontext {
@ -57,13 +59,11 @@ struct cr_sigcontext {
#define RT_SIGFRAME_REGIP(rt_sigframe) ((long unsigned int)(rt_sigframe)->uc.uc_mcontext.pc)
#define RT_SIGFRAME_HAS_FPU(rt_sigframe) (1)
#define RT_SIGFRAME_SIGCONTEXT(rt_sigframe) ((struct cr_sigcontext *)&(rt_sigframe)->uc.uc_mcontext)
#define RT_SIGFRAME_AUX_CONTEXT(rt_sigframe) ((struct aux_context*)&(RT_SIGFRAME_SIGCONTEXT(rt_sigframe)->__reserved))
#define RT_SIGFRAME_AUX_CONTEXT(rt_sigframe) ((struct aux_context *)&(RT_SIGFRAME_SIGCONTEXT(rt_sigframe)->__reserved))
#define RT_SIGFRAME_FPU(rt_sigframe) (&RT_SIGFRAME_AUX_CONTEXT(rt_sigframe)->fpsimd)
#define RT_SIGFRAME_OFFSET(rt_sigframe) 0
#define rt_sigframe_erase_sigset(sigframe) \
memset(&sigframe->uc.uc_sigmask, 0, sizeof(k_rtsigset_t))
#define rt_sigframe_copy_sigset(sigframe, from) \
memcpy(&sigframe->uc.uc_sigmask, from, sizeof(k_rtsigset_t))
#define rt_sigframe_erase_sigset(sigframe) memset(&sigframe->uc.uc_sigmask, 0, sizeof(k_rtsigset_t))
#define rt_sigframe_copy_sigset(sigframe, from) memcpy(&sigframe->uc.uc_sigmask, from, sizeof(k_rtsigset_t))
#endif /* UAPI_COMPEL_ASM_SIGFRAME_H__ */

View File

@ -1,27 +1,27 @@
#ifndef __NR_mmap2
# define __NR_mmap2 192
#define __NR_mmap2 192
#endif
#ifndef __ARM_NR_BASE
# define __ARM_NR_BASE 0x0f0000
#define __ARM_NR_BASE 0x0f0000
#endif
#ifndef __ARM_NR_breakpoint
# define __ARM_NR_breakpoint (__ARM_NR_BASE+1)
#define __ARM_NR_breakpoint (__ARM_NR_BASE + 1)
#endif
#ifndef __ARM_NR_cacheflush
# define __ARM_NR_cacheflush (__ARM_NR_BASE+2)
#define __ARM_NR_cacheflush (__ARM_NR_BASE + 2)
#endif
#ifndef __ARM_NR_usr26
# define __ARM_NR_usr26 (__ARM_NR_BASE+3)
#define __ARM_NR_usr26 (__ARM_NR_BASE + 3)
#endif
#ifndef __ARM_NR_usr32
# define __ARM_NR_usr32 (__ARM_NR_BASE+4)
#define __ARM_NR_usr32 (__ARM_NR_BASE + 4)
#endif
#ifndef __ARM_NR_set_tls
# define __ARM_NR_set_tls (__ARM_NR_BASE+5)
#define __ARM_NR_set_tls (__ARM_NR_BASE + 5)
#endif

View File

@ -1,4 +1,8 @@
#ifndef __COMPEL_SYSCALL_H__
#define __COMPEL_SYSCALL_H__
#define __NR(syscall, compat) ({ (void)compat; __NR_##syscall; })
#define __NR(syscall, compat) \
({ \
(void)compat; \
__NR_##syscall; \
})
#endif

View File

@ -1,6 +1,7 @@
#ifndef UAPI_COMPEL_ASM_CPU_H__
#define UAPI_COMPEL_ASM_CPU_H__
typedef struct { } compel_cpuinfo_t;
typedef struct {
} compel_cpuinfo_t;
#endif /* UAPI_COMPEL_ASM_CPU_H__ */

View File

@ -43,7 +43,6 @@ typedef struct user_vfp user_fpregs_struct_t;
#define ARM_r0 uregs[0]
#define ARM_ORIG_r0 uregs[17]
/* Copied from arch/arm/include/asm/user.h */
struct user_vfp {
@ -66,6 +65,10 @@ struct user_vfp_exc {
#define ARCH_SI_TRAP TRAP_BRKPT
#define __NR(syscall, compat) ({ (void)compat; __NR_##syscall; })
#define __NR(syscall, compat) \
({ \
(void)compat; \
__NR_##syscall; \
})
#endif /* UAPI_COMPEL_ASM_TYPES_H__ */

View File

@ -65,7 +65,7 @@ struct rt_sigframe {
struct sigframe sig;
};
/* clang-format off */
#define ARCH_RT_SIGRETURN(new_sp, rt_sigframe) \
asm volatile( \
"mov sp, %0 \n" \
@ -74,6 +74,7 @@ struct rt_sigframe {
: \
: "r"(new_sp) \
: "memory")
/* clang-format on */
#define RT_SIGFRAME_UC(rt_sigframe) (&rt_sigframe->sig.uc)
#define RT_SIGFRAME_REGIP(rt_sigframe) (rt_sigframe)->sig.uc.uc_mcontext.arm_ip
@ -82,9 +83,7 @@ struct rt_sigframe {
#define RT_SIGFRAME_FPU(rt_sigframe) (&RT_SIGFRAME_AUX_SIGFRAME(rt_sigframe)->vfp)
#define RT_SIGFRAME_OFFSET(rt_sigframe) 0
#define rt_sigframe_erase_sigset(sigframe) \
memset(&sigframe->sig.uc.uc_sigmask, 0, sizeof(k_rtsigset_t))
#define rt_sigframe_copy_sigset(sigframe, from) \
memcpy(&sigframe->sig.uc.uc_sigmask, from, sizeof(k_rtsigset_t))
#define rt_sigframe_erase_sigset(sigframe) memset(&sigframe->sig.uc.uc_sigmask, 0, sizeof(k_rtsigset_t))
#define rt_sigframe_copy_sigset(sigframe, from) memcpy(&sigframe->sig.uc.uc_sigmask, from, sizeof(k_rtsigset_t))
#endif /* UAPI_COMPEL_ASM_SIGFRAME_H__ */

View File

@ -9,8 +9,7 @@
#include <errno.h>
#define sys_recv(sockfd, ubuf, size, flags) \
sys_recvfrom(sockfd, ubuf, size, flags, NULL, NULL)
#define sys_recv(sockfd, ubuf, size, flags) sys_recvfrom(sockfd, ubuf, size, flags, NULL, NULL)
typedef struct prologue_init_args {
struct sockaddr_un ctl_sock_addr;

View File

@ -29,37 +29,25 @@ struct La_mips_64_regs;
struct La_mips_64_retval;
#define ARCH_PLTENTER_MEMBERS \
Elf32_Addr (*mips_o32_gnu_pltenter) (Elf32_Sym *, unsigned int, \
uintptr_t *, uintptr_t *, \
struct La_mips_32_regs *, \
unsigned int *, const char *name, \
Elf32_Addr (*mips_o32_gnu_pltenter)(Elf32_Sym *, unsigned int, uintptr_t *, uintptr_t *, \
struct La_mips_32_regs *, unsigned int *, const char *name, \
long int *framesizep); \
Elf32_Addr (*mips_n32_gnu_pltenter) (Elf32_Sym *, unsigned int, \
uintptr_t *, uintptr_t *, \
struct La_mips_64_regs *, \
unsigned int *, const char *name, \
Elf32_Addr (*mips_n32_gnu_pltenter)(Elf32_Sym *, unsigned int, uintptr_t *, uintptr_t *, \
struct La_mips_64_regs *, unsigned int *, const char *name, \
long int *framesizep); \
Elf64_Addr (*mips_n64_gnu_pltenter) (Elf64_Sym *, unsigned int, \
uintptr_t *, uintptr_t *, \
struct La_mips_64_regs *, \
unsigned int *, const char *name, \
Elf64_Addr (*mips_n64_gnu_pltenter)(Elf64_Sym *, unsigned int, uintptr_t *, uintptr_t *, \
struct La_mips_64_regs *, unsigned int *, const char *name, \
long int *framesizep);
#define ARCH_PLTEXIT_MEMBERS \
unsigned int (*mips_o32_gnu_pltexit) (Elf32_Sym *, unsigned int, \
uintptr_t *, uintptr_t *, \
const struct La_mips_32_regs *, \
struct La_mips_32_retval *, \
unsigned int (*mips_o32_gnu_pltexit)(Elf32_Sym *, unsigned int, uintptr_t *, uintptr_t *, \
const struct La_mips_32_regs *, struct La_mips_32_retval *, \
const char *); \
unsigned int (*mips_n32_gnu_pltexit) (Elf32_Sym *, unsigned int, \
uintptr_t *, uintptr_t *, \
const struct La_mips_64_regs *, \
struct La_mips_64_retval *, \
unsigned int (*mips_n32_gnu_pltexit)(Elf32_Sym *, unsigned int, uintptr_t *, uintptr_t *, \
const struct La_mips_64_regs *, struct La_mips_64_retval *, \
const char *); \
unsigned int (*mips_n64_gnu_pltexit) (Elf64_Sym *, unsigned int, \
uintptr_t *, uintptr_t *, \
const struct La_mips_64_regs *, \
struct La_mips_64_retval *, \
unsigned int (*mips_n64_gnu_pltexit)(Elf64_Sym *, unsigned int, uintptr_t *, uintptr_t *, \
const struct La_mips_64_regs *, struct La_mips_64_retval *, \
const char *);
/* The MIPS ABI specifies that the dynamic section has to be read-only. */
@ -80,8 +68,7 @@ struct La_mips_64_retval;
/* An entry in a 64 bit SHT_REL section. */
typedef struct
{
typedef struct {
Elf32_Word r_sym; /* Symbol index */
unsigned char r_ssym; /* Special symbol for 2nd relocation */
unsigned char r_type3; /* 3rd relocation type */
@ -89,45 +76,35 @@ typedef struct
unsigned char r_type1; /* 1st relocation type */
} _Elf64_Mips_R_Info;
typedef union
{
typedef union {
Elf64_Xword r_info_number;
_Elf64_Mips_R_Info r_info_fields;
} _Elf64_Mips_R_Info_union;
typedef struct
{
typedef struct {
Elf64_Addr r_offset; /* Address */
_Elf64_Mips_R_Info_union r_info; /* Relocation type and symbol index */
} Elf64_Mips_Rel;
typedef struct
{
typedef struct {
Elf64_Addr r_offset; /* Address */
_Elf64_Mips_R_Info_union r_info; /* Relocation type and symbol index */
Elf64_Sxword r_addend; /* Addend */
} Elf64_Mips_Rela;
#define ELF64_MIPS_R_SYM(i) \
((__extension__ (_Elf64_Mips_R_Info_union)(i)).r_info_fields.r_sym)
#define ELF64_MIPS_R_SYM(i) ((__extension__(_Elf64_Mips_R_Info_union)(i)).r_info_fields.r_sym)
#define ELF64_MIPS_R_TYPE(i) \
(((_Elf64_Mips_R_Info_union)(i)).r_info_fields.r_type1 \
| ((Elf32_Word)(__extension__ (_Elf64_Mips_R_Info_union)(i) \
).r_info_fields.r_type2 << 8) \
| ((Elf32_Word)(__extension__ (_Elf64_Mips_R_Info_union)(i) \
).r_info_fields.r_type3 << 16) \
| ((Elf32_Word)(__extension__ (_Elf64_Mips_R_Info_union)(i) \
).r_info_fields.r_ssym << 24))
(((_Elf64_Mips_R_Info_union)(i)).r_info_fields.r_type1 | \
((Elf32_Word)(__extension__(_Elf64_Mips_R_Info_union)(i)).r_info_fields.r_type2 << 8) | \
((Elf32_Word)(__extension__(_Elf64_Mips_R_Info_union)(i)).r_info_fields.r_type3 << 16) | \
((Elf32_Word)(__extension__(_Elf64_Mips_R_Info_union)(i)).r_info_fields.r_ssym << 24))
#define ELF64_MIPS_R_INFO(sym, type) \
(__extension__ (_Elf64_Mips_R_Info_union) \
(__extension__ (_Elf64_Mips_R_Info) \
{ (sym), ELF64_MIPS_R_SSYM (type), \
ELF64_MIPS_R_TYPE3 (type), \
ELF64_MIPS_R_TYPE2 (type), \
ELF64_MIPS_R_TYPE1 (type) \
}).r_info_number)
(__extension__(_Elf64_Mips_R_Info_union)( \
__extension__(_Elf64_Mips_R_Info){ (sym), ELF64_MIPS_R_SSYM(type), ELF64_MIPS_R_TYPE3(type), \
ELF64_MIPS_R_TYPE2(type), ELF64_MIPS_R_TYPE1(type) }) \
.r_info_number)
/*
* These macros decompose the value returned by ELF64_MIPS_R_TYPE, and
@ -137,20 +114,17 @@ typedef struct
#define ELF64_MIPS_R_SSYM(i) (((i) >> 24) & 0xff)
#define ELF64_MIPS_R_TYPE3(i) (((i) >> 16) & 0xff)
#define ELF64_MIPS_R_TYPE2(i) (((i) >> 8) & 0xff)
#define ELF64_MIPS_R_TYPE1(i) ((i) & 0xff)
#define ELF64_MIPS_R_TYPE1(i) ((i)&0xff)
#define ELF64_MIPS_R_TYPEENC(type1, type2, type3, ssym) \
((type1) \
| ((Elf32_Word)(type2) << 8) \
| ((Elf32_Word)(type3) << 16) \
| ((Elf32_Word)(ssym) << 24))
((type1) | ((Elf32_Word)(type2) << 8) | ((Elf32_Word)(type3) << 16) | ((Elf32_Word)(ssym) << 24))
#undef ELF64_R_SYM
#define ELF64_R_SYM(i) ELF64_MIPS_R_SYM (i)
#define ELF64_R_SYM(i) ELF64_MIPS_R_SYM(i)
#undef ELF64_R_TYPE
/*fixme*/
#define ELF64_R_TYPE(i) (ELF64_MIPS_R_TYPE (i) & 0x00ff)
#define ELF64_R_TYPE(i) (ELF64_MIPS_R_TYPE(i) & 0x00ff)
#undef ELF64_R_INFO
#define ELF64_R_INFO(sym, type) ELF64_MIPS_R_INFO ((sym), (type))
#define ELF64_R_INFO(sym, type) ELF64_MIPS_R_INFO((sym), (type))
#endif

View File

@ -1,5 +1,6 @@
#ifndef __CR_ASM_CPU_H__
#define __CR_ASM_CPU_H__
typedef struct { } compel_cpuinfo_t;
typedef struct {
} compel_cpuinfo_t;
#endif /* __CR_ASM_CPU_H__ */

View File

@ -35,7 +35,6 @@ typedef struct {
__u32 fpu_fcr31;
__u32 fpu_id;
} user_fpregs_struct_t;
#define MIPS_a0 regs[4] //arguments a0-a3
@ -45,7 +44,6 @@ typedef struct {
#define MIPS_sp regs[29]
#define MIPS_ra regs[31]
#define NATIVE_MAGIC 0x0A
#define COMPAT_MAGIC 0x0C
static inline bool user_regs_native(user_regs_struct_t *pregs)

View File

@ -13,11 +13,10 @@
/* sigcontext defined in /usr/include/asm/sigcontext.h*/
#define rt_sigcontext sigcontext
#include <compel/sigframe-common.h>
/* refer to linux-3.10/include/uapi/asm-generic/ucontext.h */
struct k_ucontext{
struct k_ucontext {
unsigned long uc_flags;
struct k_ucontext *uc_link;
stack_t uc_stack;
@ -33,17 +32,15 @@ struct rt_sigframe {
struct k_ucontext rs_uc;
};
#define RT_SIGFRAME_UC(rt_sigframe) (&rt_sigframe->rs_uc)
#define RT_SIGFRAME_UC_SIGMASK(rt_sigframe) ((k_rtsigset_t *)(void *)&rt_sigframe->rs_uc.uc_sigmask)
#define RT_SIGFRAME_REGIP(rt_sigframe) ((long unsigned int)0x00)
#define RT_SIGFRAME_FPU(rt_sigframe)
#define RT_SIGFRAME_HAS_FPU(rt_sigframe) 1
#define RT_SIGFRAME_OFFSET(rt_sigframe) 0
/* clang-format off */
#define ARCH_RT_SIGRETURN(new_sp, rt_sigframe) \
asm volatile( \
"move $29, %0 \n" \
@ -52,12 +49,10 @@ struct rt_sigframe {
: \
: "r"(new_sp) \
: "$2","memory")
/* clang-format on */
int sigreturn_prep_fpu_frame(struct rt_sigframe *sigframe,
struct rt_sigframe *rsigframe);
int sigreturn_prep_fpu_frame(struct rt_sigframe *sigframe, struct rt_sigframe *rsigframe);
#define rt_sigframe_erase_sigset(sigframe) \
memset(&sigframe->rs_uc.uc_sigmask, 0, sizeof(k_rtsigset_t))
#define rt_sigframe_copy_sigset(sigframe, from) \
memcpy(&sigframe->rs_uc.uc_sigmask, from, sizeof(k_rtsigset_t))
#define rt_sigframe_erase_sigset(sigframe) memset(&sigframe->rs_uc.uc_sigmask, 0, sizeof(k_rtsigset_t))
#define rt_sigframe_copy_sigset(sigframe, from) memcpy(&sigframe->rs_uc.uc_sigmask, from, sizeof(k_rtsigset_t))
#endif /* UAPI_COMPEL_ASM_SIGFRAME_H__ */

View File

@ -9,8 +9,7 @@
#ifndef _UAPI_ASM_SIGINFO_H
#define _UAPI_ASM_SIGINFO_H
#define __ARCH_SIGEV_PREAMBLE_SIZE (sizeof(long) + 2*sizeof(int))
#define __ARCH_SIGEV_PREAMBLE_SIZE (sizeof(long) + 2 * sizeof(int))
#undef __ARCH_SI_TRAPNO /* exception code needs to fill this ... */
#define HAVE_ARCH_SIGINFO_T
@ -61,7 +60,7 @@ typedef struct siginfo {
struct {
__kernel_timer_t _tid; /* timer id */
int _overrun; /* overrun count */
char _pad[sizeof( __ARCH_SI_UID_T) - sizeof(int)];
char _pad[sizeof(__ARCH_SI_UID_T) - sizeof(int)];
sigval_t _sigval; /* same as below */
int _sys_private; /* not to be passed to user */
} _timer;

View File

@ -1,4 +1,8 @@
#ifndef __COMPEL_SYSCALL_H__
#define __COMPEL_SYSCALL_H__
#define __NR(syscall, compat) ({ (void)compat; __NR_##syscall; })
#define __NR(syscall, compat) \
({ \
(void)compat; \
__NR_##syscall; \
})
#endif

View File

@ -39,20 +39,20 @@ typedef struct {
#define USER_FPREGS_FL_TM 0x00010
#ifndef NT_PPC_TM_SPR
# define NT_PPC_TM_CGPR 0x108 /* TM checkpointed GPR Registers */
# define NT_PPC_TM_CFPR 0x109 /* TM checkpointed FPR Registers */
# define NT_PPC_TM_CVMX 0x10a /* TM checkpointed VMX Registers */
# define NT_PPC_TM_CVSX 0x10b /* TM checkpointed VSX Registers */
# define NT_PPC_TM_SPR 0x10c /* TM Special Purpose Registers */
#define NT_PPC_TM_CGPR 0x108 /* TM checkpointed GPR Registers */
#define NT_PPC_TM_CFPR 0x109 /* TM checkpointed FPR Registers */
#define NT_PPC_TM_CVMX 0x10a /* TM checkpointed VMX Registers */
#define NT_PPC_TM_CVSX 0x10b /* TM checkpointed VSX Registers */
#define NT_PPC_TM_SPR 0x10c /* TM Special Purpose Registers */
#endif
#define MSR_TMA (1UL<<34) /* bit 29 Trans Mem state: Transactional */
#define MSR_TMS (1UL<<33) /* bit 30 Trans Mem state: Suspended */
#define MSR_TM (1UL<<32) /* bit 31 Trans Mem Available */
#define MSR_VEC (1UL<<25)
#define MSR_VSX (1UL<<23)
#define MSR_TMA (1UL << 34) /* bit 29 Trans Mem state: Transactional */
#define MSR_TMS (1UL << 33) /* bit 30 Trans Mem state: Suspended */
#define MSR_TM (1UL << 32) /* bit 31 Trans Mem Available */
#define MSR_VEC (1UL << 25)
#define MSR_VSX (1UL << 23)
#define MSR_TM_ACTIVE(x) ((((x) & MSR_TM) && ((x)&(MSR_TMA|MSR_TMS))) != 0)
#define MSR_TM_ACTIVE(x) ((((x)&MSR_TM) && ((x) & (MSR_TMA | MSR_TMS))) != 0)
typedef struct {
uint64_t fpregs[NFPREG];
@ -81,7 +81,11 @@ typedef struct {
#define ARCH_SI_TRAP TRAP_BRKPT
#define __NR(syscall, compat) ({ (void)compat; __NR_##syscall; })
#define __NR(syscall, compat) \
({ \
(void)compat; \
__NR_##syscall; \
})
#define __compel_arch_fetch_thread_area(tid, th) 0
#define compel_arch_fetch_thread_area(tctl) 0

View File

@ -43,6 +43,7 @@ struct rt_sigframe {
char abigap[USER_REDZONE_SIZE];
} __attribute__((aligned(16)));
/* clang-format off */
#define ARCH_RT_SIGRETURN(new_sp, rt_sigframe) \
asm volatile( \
"mr 1, %0 \n" \
@ -51,11 +52,12 @@ struct rt_sigframe {
: \
: "r"(new_sp) \
: "memory")
/* clang-format on */
#if _CALL_ELF != 2
# error Only supporting ABIv2.
#error Only supporting ABIv2.
#else
# define FRAME_MIN_SIZE_PARM 96
#define FRAME_MIN_SIZE_PARM 96
#endif
#define RT_SIGFRAME_UC(rt_sigframe) (&(rt_sigframe)->uc)
@ -63,17 +65,15 @@ struct rt_sigframe {
#define RT_SIGFRAME_HAS_FPU(rt_sigframe) (1)
#define RT_SIGFRAME_FPU(rt_sigframe) (&(rt_sigframe)->uc.uc_mcontext)
#define rt_sigframe_erase_sigset(sigframe) \
memset(&sigframe->uc.uc_sigmask, 0, sizeof(k_rtsigset_t))
#define rt_sigframe_copy_sigset(sigframe, from) \
memcpy(&sigframe->uc.uc_sigmask, from, sizeof(k_rtsigset_t))
#define rt_sigframe_erase_sigset(sigframe) memset(&sigframe->uc.uc_sigmask, 0, sizeof(k_rtsigset_t))
#define rt_sigframe_copy_sigset(sigframe, from) memcpy(&sigframe->uc.uc_sigmask, from, sizeof(k_rtsigset_t))
#define MSR_TMA (1UL<<34) /* bit 29 Trans Mem state: Transactional */
#define MSR_TMS (1UL<<33) /* bit 30 Trans Mem state: Suspended */
#define MSR_TM (1UL<<32) /* bit 31 Trans Mem Available */
#define MSR_VEC (1UL<<25)
#define MSR_VSX (1UL<<23)
#define MSR_TMA (1UL << 34) /* bit 29 Trans Mem state: Transactional */
#define MSR_TMS (1UL << 33) /* bit 30 Trans Mem state: Suspended */
#define MSR_TM (1UL << 32) /* bit 31 Trans Mem Available */
#define MSR_VEC (1UL << 25)
#define MSR_VSX (1UL << 23)
#define MSR_TM_ACTIVE(x) ((((x) & MSR_TM) && ((x)&(MSR_TMA|MSR_TMS))) != 0)
#define MSR_TM_ACTIVE(x) ((((x)&MSR_TM) && ((x) & (MSR_TMA | MSR_TMS))) != 0)
#endif /* UAPI_COMPEL_ASM_SIGFRAME_H__ */

View File

@ -1,8 +1,7 @@
#ifndef __COMPEL_SYSCALL_H__
#define __COMPEL_SYSCALL_H__
unsigned long sys_mmap(void *addr, unsigned long len, unsigned long prot,
unsigned long flags, unsigned long fd,
unsigned long sys_mmap(void *addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long fd,
unsigned long offset);
#endif

View File

@ -73,7 +73,11 @@ typedef struct {
#define user_regs_native(pregs) true
#define __NR(syscall, compat) ({ (void)compat; __NR_##syscall; })
#define __NR(syscall, compat) \
({ \
(void)compat; \
__NR_##syscall; \
})
struct mmap_arg_struct {
unsigned long addr;

View File

@ -59,6 +59,7 @@ struct rt_sigframe {
/*
* Do rt_sigreturn SVC
*/
/* clang-format off */
#define ARCH_RT_SIGRETURN(new_sp, rt_sigframe) \
asm volatile( \
"lgr %%r15,%0\n" \
@ -67,14 +68,13 @@ struct rt_sigframe {
: \
: "d" (new_sp) \
: "memory")
/* clang-format on */
#define RT_SIGFRAME_UC(rt_sigframe) (&rt_sigframe->uc)
#define RT_SIGFRAME_REGIP(rt_sigframe) (rt_sigframe)->uc.uc_mcontext.regs.psw.addr
#define RT_SIGFRAME_HAS_FPU(rt_sigframe) (1)
#define rt_sigframe_erase_sigset(sigframe) \
memset(&sigframe->uc.uc_sigmask, 0, sizeof(k_rtsigset_t))
#define rt_sigframe_copy_sigset(sigframe, from) \
memcpy(&sigframe->uc.uc_sigmask, from, sizeof(k_rtsigset_t))
#define rt_sigframe_erase_sigset(sigframe) memset(&sigframe->uc.uc_sigmask, 0, sizeof(k_rtsigset_t))
#define rt_sigframe_copy_sigset(sigframe, from) memcpy(&sigframe->uc.uc_sigmask, from, sizeof(k_rtsigset_t))
#endif /* UAPI_COMPEL_ASM_SIGFRAME_H__ */

View File

@ -9,8 +9,7 @@
#include <errno.h>
#define sys_recv(sockfd, ubuf, size, flags) \
sys_recvfrom(sockfd, ubuf, size, flags, NULL, NULL)
#define sys_recv(sockfd, ubuf, size, flags) sys_recvfrom(sockfd, ubuf, size, flags, NULL, NULL)
typedef struct prologue_init_args {
struct sockaddr_un ctl_sock_addr;

View File

@ -48,13 +48,13 @@ typedef struct {
unsigned int entry_number;
unsigned int base_addr;
unsigned int limit;
unsigned int seg_32bit:1;
unsigned int contents:2;
unsigned int read_exec_only:1;
unsigned int limit_in_pages:1;
unsigned int seg_not_present:1;
unsigned int useable:1;
unsigned int lm:1;
unsigned int seg_32bit : 1;
unsigned int contents : 2;
unsigned int read_exec_only : 1;
unsigned int limit_in_pages : 1;
unsigned int seg_not_present : 1;
unsigned int useable : 1;
unsigned int lm : 1;
} user_desc_t;
#endif /* COMPEL_ARCH_SYSCALL_TYPES_H__ */

View File

@ -1,31 +1,21 @@
#ifndef __COMPEL_ASM_CPU_H__
#define __COMPEL_ASM_CPU_H__
static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
unsigned int *ecx, unsigned int *edx)
static inline void native_cpuid(unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx)
{
/* ecx is often an input as well as an output. */
asm volatile("cpuid"
: "=a" (*eax),
"=b" (*ebx),
"=c" (*ecx),
"=d" (*edx)
: "0" (*eax), "2" (*ecx)
: "memory");
asm volatile("cpuid" : "=a"(*eax), "=b"(*ebx), "=c"(*ecx), "=d"(*edx) : "0"(*eax), "2"(*ecx) : "memory");
}
static inline void cpuid(unsigned int op,
unsigned int *eax, unsigned int *ebx,
unsigned int *ecx, unsigned int *edx)
static inline void cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx)
{
*eax = op;
*ecx = 0;
native_cpuid(eax, ebx, ecx, edx);
}
static inline void cpuid_count(unsigned int op, int count,
unsigned int *eax, unsigned int *ebx,
unsigned int *ecx, unsigned int *edx)
static inline void cpuid_count(unsigned int op, int count, unsigned int *eax, unsigned int *ebx, unsigned int *ecx,
unsigned int *edx)
{
*eax = op;
*ecx = count;

View File

@ -6,11 +6,11 @@
#define ELF_X86_64
#ifndef R_X86_64_GOTPCRELX
# define R_X86_64_GOTPCRELX 41
#define R_X86_64_GOTPCRELX 41
#endif
#ifndef R_X86_64_REX_GOTPCRELX
# define R_X86_64_REX_GOTPCRELX 42
#define R_X86_64_REX_GOTPCRELX 42
#endif
#define __handle_elf handle_elf_x86_64

View File

@ -43,275 +43,275 @@ enum cpuid_leafs {
#define NCAPINTS_BITS (NCAPINTS * 32)
/* Intel-defined CPU features, CPUID level 0x00000001 (EDX), word 0 */
#define X86_FEATURE_FPU (0*32+ 0) /* Onboard FPU */
#define X86_FEATURE_VME (0*32+ 1) /* Virtual Mode Extensions */
#define X86_FEATURE_DE (0*32+ 2) /* Debugging Extensions */
#define X86_FEATURE_PSE (0*32+ 3) /* Page Size Extensions */
#define X86_FEATURE_TSC (0*32+ 4) /* Time Stamp Counter */
#define X86_FEATURE_MSR (0*32+ 5) /* Model-Specific Registers */
#define X86_FEATURE_PAE (0*32+ 6) /* Physical Address Extensions */
#define X86_FEATURE_MCE (0*32+ 7) /* Machine Check Exception */
#define X86_FEATURE_CX8 (0*32+ 8) /* CMPXCHG8 instruction */
#define X86_FEATURE_APIC (0*32+ 9) /* Onboard APIC */
#define X86_FEATURE_SEP (0*32+11) /* SYSENTER/SYSEXIT */
#define X86_FEATURE_MTRR (0*32+12) /* Memory Type Range Registers */
#define X86_FEATURE_PGE (0*32+13) /* Page Global Enable */
#define X86_FEATURE_MCA (0*32+14) /* Machine Check Architecture */
#define X86_FEATURE_CMOV (0*32+15) /* CMOV instructions (plus FCMOVcc, FCOMI with FPU) */
#define X86_FEATURE_PAT (0*32+16) /* Page Attribute Table */
#define X86_FEATURE_PSE36 (0*32+17) /* 36-bit PSEs */
#define X86_FEATURE_PN (0*32+18) /* Processor serial number */
#define X86_FEATURE_CLFLUSH (0*32+19) /* CLFLUSH instruction */
#define X86_FEATURE_DS (0*32+21) /* "dts" Debug Store */
#define X86_FEATURE_ACPI (0*32+22) /* ACPI via MSR */
#define X86_FEATURE_MMX (0*32+23) /* Multimedia Extensions */
#define X86_FEATURE_FXSR (0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */
#define X86_FEATURE_XMM (0*32+25) /* "sse" */
#define X86_FEATURE_XMM2 (0*32+26) /* "sse2" */
#define X86_FEATURE_SELFSNOOP (0*32+27) /* "ss" CPU self snoop */
#define X86_FEATURE_HT (0*32+28) /* Hyper-Threading */
#define X86_FEATURE_ACC (0*32+29) /* "tm" Automatic clock control */
#define X86_FEATURE_IA64 (0*32+30) /* IA-64 processor */
#define X86_FEATURE_PBE (0*32+31) /* Pending Break Enable */
#define X86_FEATURE_FPU (0 * 32 + 0) /* Onboard FPU */
#define X86_FEATURE_VME (0 * 32 + 1) /* Virtual Mode Extensions */
#define X86_FEATURE_DE (0 * 32 + 2) /* Debugging Extensions */
#define X86_FEATURE_PSE (0 * 32 + 3) /* Page Size Extensions */
#define X86_FEATURE_TSC (0 * 32 + 4) /* Time Stamp Counter */
#define X86_FEATURE_MSR (0 * 32 + 5) /* Model-Specific Registers */
#define X86_FEATURE_PAE (0 * 32 + 6) /* Physical Address Extensions */
#define X86_FEATURE_MCE (0 * 32 + 7) /* Machine Check Exception */
#define X86_FEATURE_CX8 (0 * 32 + 8) /* CMPXCHG8 instruction */
#define X86_FEATURE_APIC (0 * 32 + 9) /* Onboard APIC */
#define X86_FEATURE_SEP (0 * 32 + 11) /* SYSENTER/SYSEXIT */
#define X86_FEATURE_MTRR (0 * 32 + 12) /* Memory Type Range Registers */
#define X86_FEATURE_PGE (0 * 32 + 13) /* Page Global Enable */
#define X86_FEATURE_MCA (0 * 32 + 14) /* Machine Check Architecture */
#define X86_FEATURE_CMOV (0 * 32 + 15) /* CMOV instructions (plus FCMOVcc, FCOMI with FPU) */
#define X86_FEATURE_PAT (0 * 32 + 16) /* Page Attribute Table */
#define X86_FEATURE_PSE36 (0 * 32 + 17) /* 36-bit PSEs */
#define X86_FEATURE_PN (0 * 32 + 18) /* Processor serial number */
#define X86_FEATURE_CLFLUSH (0 * 32 + 19) /* CLFLUSH instruction */
#define X86_FEATURE_DS (0 * 32 + 21) /* "dts" Debug Store */
#define X86_FEATURE_ACPI (0 * 32 + 22) /* ACPI via MSR */
#define X86_FEATURE_MMX (0 * 32 + 23) /* Multimedia Extensions */
#define X86_FEATURE_FXSR (0 * 32 + 24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */
#define X86_FEATURE_XMM (0 * 32 + 25) /* "sse" */
#define X86_FEATURE_XMM2 (0 * 32 + 26) /* "sse2" */
#define X86_FEATURE_SELFSNOOP (0 * 32 + 27) /* "ss" CPU self snoop */
#define X86_FEATURE_HT (0 * 32 + 28) /* Hyper-Threading */
#define X86_FEATURE_ACC (0 * 32 + 29) /* "tm" Automatic clock control */
#define X86_FEATURE_IA64 (0 * 32 + 30) /* IA-64 processor */
#define X86_FEATURE_PBE (0 * 32 + 31) /* Pending Break Enable */
/* AMD-defined CPU features, CPUID level 0x80000001, word 1 */
/* Don't duplicate feature flags which are redundant with Intel! */
#define X86_FEATURE_SYSCALL (1*32+11) /* SYSCALL/SYSRET */
#define X86_FEATURE_MP (1*32+19) /* MP Capable */
#define X86_FEATURE_NX (1*32+20) /* Execute Disable */
#define X86_FEATURE_MMXEXT (1*32+22) /* AMD MMX extensions */
#define X86_FEATURE_FXSR_OPT (1*32+25) /* FXSAVE/FXRSTOR optimizations */
#define X86_FEATURE_GBPAGES (1*32+26) /* "pdpe1gb" GB pages */
#define X86_FEATURE_RDTSCP (1*32+27) /* RDTSCP */
#define X86_FEATURE_LM (1*32+29) /* Long Mode (x86-64, 64-bit support) */
#define X86_FEATURE_3DNOWEXT (1*32+30) /* AMD 3DNow extensions */
#define X86_FEATURE_3DNOW (1*32+31) /* 3DNow */
#define X86_FEATURE_SYSCALL (1 * 32 + 11) /* SYSCALL/SYSRET */
#define X86_FEATURE_MP (1 * 32 + 19) /* MP Capable */
#define X86_FEATURE_NX (1 * 32 + 20) /* Execute Disable */
#define X86_FEATURE_MMXEXT (1 * 32 + 22) /* AMD MMX extensions */
#define X86_FEATURE_FXSR_OPT (1 * 32 + 25) /* FXSAVE/FXRSTOR optimizations */
#define X86_FEATURE_GBPAGES (1 * 32 + 26) /* "pdpe1gb" GB pages */
#define X86_FEATURE_RDTSCP (1 * 32 + 27) /* RDTSCP */
#define X86_FEATURE_LM (1 * 32 + 29) /* Long Mode (x86-64, 64-bit support) */
#define X86_FEATURE_3DNOWEXT (1 * 32 + 30) /* AMD 3DNow extensions */
#define X86_FEATURE_3DNOW (1 * 32 + 31) /* 3DNow */
/* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */
#define X86_FEATURE_RECOVERY (2*32+ 0) /* CPU in recovery mode */
#define X86_FEATURE_LONGRUN (2*32+ 1) /* Longrun power control */
#define X86_FEATURE_LRTI (2*32+ 3) /* LongRun table interface */
#define X86_FEATURE_RECOVERY (2 * 32 + 0) /* CPU in recovery mode */
#define X86_FEATURE_LONGRUN (2 * 32 + 1) /* Longrun power control */
#define X86_FEATURE_LRTI (2 * 32 + 3) /* LongRun table interface */
/* Other features, Linux-defined mapping, word 3 */
/* This range is used for feature bits which conflict or are synthesized */
#define X86_FEATURE_CXMMX (3*32+ 0) /* Cyrix MMX extensions */
#define X86_FEATURE_K6_MTRR (3*32+ 1) /* AMD K6 nonstandard MTRRs */
#define X86_FEATURE_CYRIX_ARR (3*32+ 2) /* Cyrix ARRs (= MTRRs) */
#define X86_FEATURE_CENTAUR_MCR (3*32+ 3) /* Centaur MCRs (= MTRRs) */
#define X86_FEATURE_CXMMX (3 * 32 + 0) /* Cyrix MMX extensions */
#define X86_FEATURE_K6_MTRR (3 * 32 + 1) /* AMD K6 nonstandard MTRRs */
#define X86_FEATURE_CYRIX_ARR (3 * 32 + 2) /* Cyrix ARRs (= MTRRs) */
#define X86_FEATURE_CENTAUR_MCR (3 * 32 + 3) /* Centaur MCRs (= MTRRs) */
/* CPU types for specific tunings: */
#define X86_FEATURE_K8 (3*32+ 4) /* "" Opteron, Athlon64 */
#define X86_FEATURE_K7 (3*32+ 5) /* "" Athlon */
#define X86_FEATURE_P3 (3*32+ 6) /* "" P3 */
#define X86_FEATURE_P4 (3*32+ 7) /* "" P4 */
#define X86_FEATURE_CONSTANT_TSC (3*32+ 8) /* TSC ticks at a constant rate */
#define X86_FEATURE_UP (3*32+ 9) /* SMP kernel running on UP */
#define X86_FEATURE_ART (3*32+10) /* Always running timer (ART) */
#define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */
#define X86_FEATURE_PEBS (3*32+12) /* Precise-Event Based Sampling */
#define X86_FEATURE_BTS (3*32+13) /* Branch Trace Store */
#define X86_FEATURE_SYSCALL32 (3*32+14) /* "" syscall in IA32 userspace */
#define X86_FEATURE_SYSENTER32 (3*32+15) /* "" sysenter in IA32 userspace */
#define X86_FEATURE_REP_GOOD (3*32+16) /* REP microcode works well */
#define X86_FEATURE_MFENCE_RDTSC (3*32+17) /* "" MFENCE synchronizes RDTSC */
#define X86_FEATURE_LFENCE_RDTSC (3*32+18) /* "" LFENCE synchronizes RDTSC */
#define X86_FEATURE_ACC_POWER (3*32+19) /* AMD Accumulated Power Mechanism */
#define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */
#define X86_FEATURE_ALWAYS (3*32+21) /* "" Always-present feature */
#define X86_FEATURE_XTOPOLOGY (3*32+22) /* CPU topology enum extensions */
#define X86_FEATURE_TSC_RELIABLE (3*32+23) /* TSC is known to be reliable */
#define X86_FEATURE_NONSTOP_TSC (3*32+24) /* TSC does not stop in C states */
#define X86_FEATURE_CPUID (3*32+25) /* CPU has CPUID instruction itself */
#define X86_FEATURE_EXTD_APICID (3*32+26) /* Extended APICID (8 bits) */
#define X86_FEATURE_AMD_DCM (3*32+27) /* AMD multi-node processor */
#define X86_FEATURE_APERFMPERF (3*32+28) /* P-State hardware coordination feedback capability (APERF/MPERF MSRs) */
#define X86_FEATURE_NONSTOP_TSC_S3 (3*32+30) /* TSC doesn't stop in S3 state */
#define X86_FEATURE_TSC_KNOWN_FREQ (3*32+31) /* TSC has known frequency */
#define X86_FEATURE_K8 (3 * 32 + 4) /* "" Opteron, Athlon64 */
#define X86_FEATURE_K7 (3 * 32 + 5) /* "" Athlon */
#define X86_FEATURE_P3 (3 * 32 + 6) /* "" P3 */
#define X86_FEATURE_P4 (3 * 32 + 7) /* "" P4 */
#define X86_FEATURE_CONSTANT_TSC (3 * 32 + 8) /* TSC ticks at a constant rate */
#define X86_FEATURE_UP (3 * 32 + 9) /* SMP kernel running on UP */
#define X86_FEATURE_ART (3 * 32 + 10) /* Always running timer (ART) */
#define X86_FEATURE_ARCH_PERFMON (3 * 32 + 11) /* Intel Architectural PerfMon */
#define X86_FEATURE_PEBS (3 * 32 + 12) /* Precise-Event Based Sampling */
#define X86_FEATURE_BTS (3 * 32 + 13) /* Branch Trace Store */
#define X86_FEATURE_SYSCALL32 (3 * 32 + 14) /* "" syscall in IA32 userspace */
#define X86_FEATURE_SYSENTER32 (3 * 32 + 15) /* "" sysenter in IA32 userspace */
#define X86_FEATURE_REP_GOOD (3 * 32 + 16) /* REP microcode works well */
#define X86_FEATURE_MFENCE_RDTSC (3 * 32 + 17) /* "" MFENCE synchronizes RDTSC */
#define X86_FEATURE_LFENCE_RDTSC (3 * 32 + 18) /* "" LFENCE synchronizes RDTSC */
#define X86_FEATURE_ACC_POWER (3 * 32 + 19) /* AMD Accumulated Power Mechanism */
#define X86_FEATURE_NOPL (3 * 32 + 20) /* The NOPL (0F 1F) instructions */
#define X86_FEATURE_ALWAYS (3 * 32 + 21) /* "" Always-present feature */
#define X86_FEATURE_XTOPOLOGY (3 * 32 + 22) /* CPU topology enum extensions */
#define X86_FEATURE_TSC_RELIABLE (3 * 32 + 23) /* TSC is known to be reliable */
#define X86_FEATURE_NONSTOP_TSC (3 * 32 + 24) /* TSC does not stop in C states */
#define X86_FEATURE_CPUID (3 * 32 + 25) /* CPU has CPUID instruction itself */
#define X86_FEATURE_EXTD_APICID (3 * 32 + 26) /* Extended APICID (8 bits) */
#define X86_FEATURE_AMD_DCM (3 * 32 + 27) /* AMD multi-node processor */
#define X86_FEATURE_APERFMPERF (3 * 32 + 28) /* P-State hardware coordination feedback capability (APERF/MPERF MSRs) */
#define X86_FEATURE_NONSTOP_TSC_S3 (3 * 32 + 30) /* TSC doesn't stop in S3 state */
#define X86_FEATURE_TSC_KNOWN_FREQ (3 * 32 + 31) /* TSC has known frequency */
/* Intel-defined CPU features, CPUID level 0x00000001 (ECX), word 4 */
#define X86_FEATURE_XMM3 (4*32+ 0) /* "pni" SSE-3 */
#define X86_FEATURE_PCLMULQDQ (4*32+ 1) /* PCLMULQDQ instruction */
#define X86_FEATURE_DTES64 (4*32+ 2) /* 64-bit Debug Store */
#define X86_FEATURE_MWAIT (4*32+ 3) /* "monitor" MONITOR/MWAIT support */
#define X86_FEATURE_DSCPL (4*32+ 4) /* "ds_cpl" CPL-qualified (filtered) Debug Store */
#define X86_FEATURE_VMX (4*32+ 5) /* Hardware virtualization */
#define X86_FEATURE_SMX (4*32+ 6) /* Safer Mode eXtensions */
#define X86_FEATURE_EST (4*32+ 7) /* Enhanced SpeedStep */
#define X86_FEATURE_TM2 (4*32+ 8) /* Thermal Monitor 2 */
#define X86_FEATURE_SSSE3 (4*32+ 9) /* Supplemental SSE-3 */
#define X86_FEATURE_CID (4*32+10) /* Context ID */
#define X86_FEATURE_SDBG (4*32+11) /* Silicon Debug */
#define X86_FEATURE_FMA (4*32+12) /* Fused multiply-add */
#define X86_FEATURE_CX16 (4*32+13) /* CMPXCHG16B instruction */
#define X86_FEATURE_XTPR (4*32+14) /* Send Task Priority Messages */
#define X86_FEATURE_PDCM (4*32+15) /* Perf/Debug Capabilities MSR */
#define X86_FEATURE_PCID (4*32+17) /* Process Context Identifiers */
#define X86_FEATURE_DCA (4*32+18) /* Direct Cache Access */
#define X86_FEATURE_XMM4_1 (4*32+19) /* "sse4_1" SSE-4.1 */
#define X86_FEATURE_XMM4_2 (4*32+20) /* "sse4_2" SSE-4.2 */
#define X86_FEATURE_X2APIC (4*32+21) /* X2APIC */
#define X86_FEATURE_MOVBE (4*32+22) /* MOVBE instruction */
#define X86_FEATURE_POPCNT (4*32+23) /* POPCNT instruction */
#define X86_FEATURE_TSC_DEADLINE_TIMER (4*32+24) /* TSC deadline timer */
#define X86_FEATURE_AES (4*32+25) /* AES instructions */
#define X86_FEATURE_XSAVE (4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV instructions */
#define X86_FEATURE_OSXSAVE (4*32+27) /* "" XSAVE instruction enabled in the OS */
#define X86_FEATURE_AVX (4*32+28) /* Advanced Vector Extensions */
#define X86_FEATURE_F16C (4*32+29) /* 16-bit FP conversions */
#define X86_FEATURE_RDRAND (4*32+30) /* RDRAND instruction */
#define X86_FEATURE_HYPERVISOR (4*32+31) /* Running on a hypervisor */
#define X86_FEATURE_XMM3 (4 * 32 + 0) /* "pni" SSE-3 */
#define X86_FEATURE_PCLMULQDQ (4 * 32 + 1) /* PCLMULQDQ instruction */
#define X86_FEATURE_DTES64 (4 * 32 + 2) /* 64-bit Debug Store */
#define X86_FEATURE_MWAIT (4 * 32 + 3) /* "monitor" MONITOR/MWAIT support */
#define X86_FEATURE_DSCPL (4 * 32 + 4) /* "ds_cpl" CPL-qualified (filtered) Debug Store */
#define X86_FEATURE_VMX (4 * 32 + 5) /* Hardware virtualization */
#define X86_FEATURE_SMX (4 * 32 + 6) /* Safer Mode eXtensions */
#define X86_FEATURE_EST (4 * 32 + 7) /* Enhanced SpeedStep */
#define X86_FEATURE_TM2 (4 * 32 + 8) /* Thermal Monitor 2 */
#define X86_FEATURE_SSSE3 (4 * 32 + 9) /* Supplemental SSE-3 */
#define X86_FEATURE_CID (4 * 32 + 10) /* Context ID */
#define X86_FEATURE_SDBG (4 * 32 + 11) /* Silicon Debug */
#define X86_FEATURE_FMA (4 * 32 + 12) /* Fused multiply-add */
#define X86_FEATURE_CX16 (4 * 32 + 13) /* CMPXCHG16B instruction */
#define X86_FEATURE_XTPR (4 * 32 + 14) /* Send Task Priority Messages */
#define X86_FEATURE_PDCM (4 * 32 + 15) /* Perf/Debug Capabilities MSR */
#define X86_FEATURE_PCID (4 * 32 + 17) /* Process Context Identifiers */
#define X86_FEATURE_DCA (4 * 32 + 18) /* Direct Cache Access */
#define X86_FEATURE_XMM4_1 (4 * 32 + 19) /* "sse4_1" SSE-4.1 */
#define X86_FEATURE_XMM4_2 (4 * 32 + 20) /* "sse4_2" SSE-4.2 */
#define X86_FEATURE_X2APIC (4 * 32 + 21) /* X2APIC */
#define X86_FEATURE_MOVBE (4 * 32 + 22) /* MOVBE instruction */
#define X86_FEATURE_POPCNT (4 * 32 + 23) /* POPCNT instruction */
#define X86_FEATURE_TSC_DEADLINE_TIMER (4 * 32 + 24) /* TSC deadline timer */
#define X86_FEATURE_AES (4 * 32 + 25) /* AES instructions */
#define X86_FEATURE_XSAVE (4 * 32 + 26) /* XSAVE/XRSTOR/XSETBV/XGETBV instructions */
#define X86_FEATURE_OSXSAVE (4 * 32 + 27) /* "" XSAVE instruction enabled in the OS */
#define X86_FEATURE_AVX (4 * 32 + 28) /* Advanced Vector Extensions */
#define X86_FEATURE_F16C (4 * 32 + 29) /* 16-bit FP conversions */
#define X86_FEATURE_RDRAND (4 * 32 + 30) /* RDRAND instruction */
#define X86_FEATURE_HYPERVISOR (4 * 32 + 31) /* Running on a hypervisor */
/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */
#define X86_FEATURE_XSTORE (5*32+ 2) /* "rng" RNG present (xstore) */
#define X86_FEATURE_XSTORE_EN (5*32+ 3) /* "rng_en" RNG enabled */
#define X86_FEATURE_XCRYPT (5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */
#define X86_FEATURE_XCRYPT_EN (5*32+ 7) /* "ace_en" on-CPU crypto enabled */
#define X86_FEATURE_ACE2 (5*32+ 8) /* Advanced Cryptography Engine v2 */
#define X86_FEATURE_ACE2_EN (5*32+ 9) /* ACE v2 enabled */
#define X86_FEATURE_PHE (5*32+10) /* PadLock Hash Engine */
#define X86_FEATURE_PHE_EN (5*32+11) /* PHE enabled */
#define X86_FEATURE_PMM (5*32+12) /* PadLock Montgomery Multiplier */
#define X86_FEATURE_PMM_EN (5*32+13) /* PMM enabled */
#define X86_FEATURE_XSTORE (5 * 32 + 2) /* "rng" RNG present (xstore) */
#define X86_FEATURE_XSTORE_EN (5 * 32 + 3) /* "rng_en" RNG enabled */
#define X86_FEATURE_XCRYPT (5 * 32 + 6) /* "ace" on-CPU crypto (xcrypt) */
#define X86_FEATURE_XCRYPT_EN (5 * 32 + 7) /* "ace_en" on-CPU crypto enabled */
#define X86_FEATURE_ACE2 (5 * 32 + 8) /* Advanced Cryptography Engine v2 */
#define X86_FEATURE_ACE2_EN (5 * 32 + 9) /* ACE v2 enabled */
#define X86_FEATURE_PHE (5 * 32 + 10) /* PadLock Hash Engine */
#define X86_FEATURE_PHE_EN (5 * 32 + 11) /* PHE enabled */
#define X86_FEATURE_PMM (5 * 32 + 12) /* PadLock Montgomery Multiplier */
#define X86_FEATURE_PMM_EN (5 * 32 + 13) /* PMM enabled */
/* More extended AMD flags: CPUID level 0x80000001, ECX, word 6 */
#define X86_FEATURE_LAHF_LM (6*32+ 0) /* LAHF/SAHF in long mode */
#define X86_FEATURE_CMP_LEGACY (6*32+ 1) /* If yes HyperThreading not valid */
#define X86_FEATURE_SVM (6*32+ 2) /* Secure Virtual Machine */
#define X86_FEATURE_EXTAPIC (6*32+ 3) /* Extended APIC space */
#define X86_FEATURE_CR8_LEGACY (6*32+ 4) /* CR8 in 32-bit mode */
#define X86_FEATURE_ABM (6*32+ 5) /* Advanced bit manipulation */
#define X86_FEATURE_SSE4A (6*32+ 6) /* SSE-4A */
#define X86_FEATURE_MISALIGNSSE (6*32+ 7) /* Misaligned SSE mode */
#define X86_FEATURE_3DNOWPREFETCH (6*32+ 8) /* 3DNow prefetch instructions */
#define X86_FEATURE_OSVW (6*32+ 9) /* OS Visible Workaround */
#define X86_FEATURE_IBS (6*32+10) /* Instruction Based Sampling */
#define X86_FEATURE_XOP (6*32+11) /* extended AVX instructions */
#define X86_FEATURE_SKINIT (6*32+12) /* SKINIT/STGI instructions */
#define X86_FEATURE_WDT (6*32+13) /* Watchdog timer */
#define X86_FEATURE_LWP (6*32+15) /* Light Weight Profiling */
#define X86_FEATURE_FMA4 (6*32+16) /* 4 operands MAC instructions */
#define X86_FEATURE_TCE (6*32+17) /* Translation Cache Extension */
#define X86_FEATURE_NODEID_MSR (6*32+19) /* NodeId MSR */
#define X86_FEATURE_TBM (6*32+21) /* Trailing Bit Manipulations */
#define X86_FEATURE_TOPOEXT (6*32+22) /* Topology extensions CPUID leafs */
#define X86_FEATURE_PERFCTR_CORE (6*32+23) /* Core performance counter extensions */
#define X86_FEATURE_PERFCTR_NB (6*32+24) /* NB performance counter extensions */
#define X86_FEATURE_BPEXT (6*32+26) /* Data breakpoint extension */
#define X86_FEATURE_PTSC (6*32+27) /* Performance time-stamp counter */
#define X86_FEATURE_PERFCTR_LLC (6*32+28) /* Last Level Cache performance counter extensions */
#define X86_FEATURE_MWAITX (6*32+29) /* MWAIT extension (MONITORX/MWAITX instructions) */
#define X86_FEATURE_LAHF_LM (6 * 32 + 0) /* LAHF/SAHF in long mode */
#define X86_FEATURE_CMP_LEGACY (6 * 32 + 1) /* If yes HyperThreading not valid */
#define X86_FEATURE_SVM (6 * 32 + 2) /* Secure Virtual Machine */
#define X86_FEATURE_EXTAPIC (6 * 32 + 3) /* Extended APIC space */
#define X86_FEATURE_CR8_LEGACY (6 * 32 + 4) /* CR8 in 32-bit mode */
#define X86_FEATURE_ABM (6 * 32 + 5) /* Advanced bit manipulation */
#define X86_FEATURE_SSE4A (6 * 32 + 6) /* SSE-4A */
#define X86_FEATURE_MISALIGNSSE (6 * 32 + 7) /* Misaligned SSE mode */
#define X86_FEATURE_3DNOWPREFETCH (6 * 32 + 8) /* 3DNow prefetch instructions */
#define X86_FEATURE_OSVW (6 * 32 + 9) /* OS Visible Workaround */
#define X86_FEATURE_IBS (6 * 32 + 10) /* Instruction Based Sampling */
#define X86_FEATURE_XOP (6 * 32 + 11) /* extended AVX instructions */
#define X86_FEATURE_SKINIT (6 * 32 + 12) /* SKINIT/STGI instructions */
#define X86_FEATURE_WDT (6 * 32 + 13) /* Watchdog timer */
#define X86_FEATURE_LWP (6 * 32 + 15) /* Light Weight Profiling */
#define X86_FEATURE_FMA4 (6 * 32 + 16) /* 4 operands MAC instructions */
#define X86_FEATURE_TCE (6 * 32 + 17) /* Translation Cache Extension */
#define X86_FEATURE_NODEID_MSR (6 * 32 + 19) /* NodeId MSR */
#define X86_FEATURE_TBM (6 * 32 + 21) /* Trailing Bit Manipulations */
#define X86_FEATURE_TOPOEXT (6 * 32 + 22) /* Topology extensions CPUID leafs */
#define X86_FEATURE_PERFCTR_CORE (6 * 32 + 23) /* Core performance counter extensions */
#define X86_FEATURE_PERFCTR_NB (6 * 32 + 24) /* NB performance counter extensions */
#define X86_FEATURE_BPEXT (6 * 32 + 26) /* Data breakpoint extension */
#define X86_FEATURE_PTSC (6 * 32 + 27) /* Performance time-stamp counter */
#define X86_FEATURE_PERFCTR_LLC (6 * 32 + 28) /* Last Level Cache performance counter extensions */
#define X86_FEATURE_MWAITX (6 * 32 + 29) /* MWAIT extension (MONITORX/MWAITX instructions) */
/* Intel-defined CPU features, CPUID level 0x00000007:0 (EBX), word 9 */
#define X86_FEATURE_FSGSBASE (9*32+ 0) /* RDFSBASE, WRFSBASE, RDGSBASE, WRGSBASE instructions*/
#define X86_FEATURE_TSC_ADJUST (9*32+ 1) /* TSC adjustment MSR 0x3B */
#define X86_FEATURE_BMI1 (9*32+ 3) /* 1st group bit manipulation extensions */
#define X86_FEATURE_HLE (9*32+ 4) /* Hardware Lock Elision */
#define X86_FEATURE_AVX2 (9*32+ 5) /* AVX2 instructions */
#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Protection */
#define X86_FEATURE_BMI2 (9*32+ 8) /* 2nd group bit manipulation extensions */
#define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB instructions */
#define X86_FEATURE_INVPCID (9*32+10) /* Invalidate Processor Context ID */
#define X86_FEATURE_RTM (9*32+11) /* Restricted Transactional Memory */
#define X86_FEATURE_CQM (9*32+12) /* Cache QoS Monitoring */
#define X86_FEATURE_MPX (9*32+14) /* Memory Protection Extension */
#define X86_FEATURE_RDT_A (9*32+15) /* Resource Director Technology Allocation */
#define X86_FEATURE_AVX512F (9*32+16) /* AVX-512 Foundation */
#define X86_FEATURE_AVX512DQ (9*32+17) /* AVX-512 DQ (Double/Quad granular) Instructions */
#define X86_FEATURE_RDSEED (9*32+18) /* RDSEED instruction */
#define X86_FEATURE_ADX (9*32+19) /* ADCX and ADOX instructions */
#define X86_FEATURE_SMAP (9*32+20) /* Supervisor Mode Access Prevention */
#define X86_FEATURE_AVX512IFMA (9*32+21) /* AVX-512 Integer Fused Multiply-Add instructions */
#define X86_FEATURE_CLFLUSHOPT (9*32+23) /* CLFLUSHOPT instruction */
#define X86_FEATURE_CLWB (9*32+24) /* CLWB instruction */
#define X86_FEATURE_INTEL_PT (9*32+25) /* Intel Processor Trace */
#define X86_FEATURE_AVX512PF (9*32+26) /* AVX-512 Prefetch */
#define X86_FEATURE_AVX512ER (9*32+27) /* AVX-512 Exponential and Reciprocal */
#define X86_FEATURE_AVX512CD (9*32+28) /* AVX-512 Conflict Detection */
#define X86_FEATURE_SHA_NI (9*32+29) /* SHA1/SHA256 Instruction Extensions */
#define X86_FEATURE_AVX512BW (9*32+30) /* AVX-512 BW (Byte/Word granular) Instructions */
#define X86_FEATURE_AVX512VL (9*32+31) /* AVX-512 VL (128/256 Vector Length) Extensions */
#define X86_FEATURE_FSGSBASE (9 * 32 + 0) /* RDFSBASE, WRFSBASE, RDGSBASE, WRGSBASE instructions*/
#define X86_FEATURE_TSC_ADJUST (9 * 32 + 1) /* TSC adjustment MSR 0x3B */
#define X86_FEATURE_BMI1 (9 * 32 + 3) /* 1st group bit manipulation extensions */
#define X86_FEATURE_HLE (9 * 32 + 4) /* Hardware Lock Elision */
#define X86_FEATURE_AVX2 (9 * 32 + 5) /* AVX2 instructions */
#define X86_FEATURE_SMEP (9 * 32 + 7) /* Supervisor Mode Execution Protection */
#define X86_FEATURE_BMI2 (9 * 32 + 8) /* 2nd group bit manipulation extensions */
#define X86_FEATURE_ERMS (9 * 32 + 9) /* Enhanced REP MOVSB/STOSB instructions */
#define X86_FEATURE_INVPCID (9 * 32 + 10) /* Invalidate Processor Context ID */
#define X86_FEATURE_RTM (9 * 32 + 11) /* Restricted Transactional Memory */
#define X86_FEATURE_CQM (9 * 32 + 12) /* Cache QoS Monitoring */
#define X86_FEATURE_MPX (9 * 32 + 14) /* Memory Protection Extension */
#define X86_FEATURE_RDT_A (9 * 32 + 15) /* Resource Director Technology Allocation */
#define X86_FEATURE_AVX512F (9 * 32 + 16) /* AVX-512 Foundation */
#define X86_FEATURE_AVX512DQ (9 * 32 + 17) /* AVX-512 DQ (Double/Quad granular) Instructions */
#define X86_FEATURE_RDSEED (9 * 32 + 18) /* RDSEED instruction */
#define X86_FEATURE_ADX (9 * 32 + 19) /* ADCX and ADOX instructions */
#define X86_FEATURE_SMAP (9 * 32 + 20) /* Supervisor Mode Access Prevention */
#define X86_FEATURE_AVX512IFMA (9 * 32 + 21) /* AVX-512 Integer Fused Multiply-Add instructions */
#define X86_FEATURE_CLFLUSHOPT (9 * 32 + 23) /* CLFLUSHOPT instruction */
#define X86_FEATURE_CLWB (9 * 32 + 24) /* CLWB instruction */
#define X86_FEATURE_INTEL_PT (9 * 32 + 25) /* Intel Processor Trace */
#define X86_FEATURE_AVX512PF (9 * 32 + 26) /* AVX-512 Prefetch */
#define X86_FEATURE_AVX512ER (9 * 32 + 27) /* AVX-512 Exponential and Reciprocal */
#define X86_FEATURE_AVX512CD (9 * 32 + 28) /* AVX-512 Conflict Detection */
#define X86_FEATURE_SHA_NI (9 * 32 + 29) /* SHA1/SHA256 Instruction Extensions */
#define X86_FEATURE_AVX512BW (9 * 32 + 30) /* AVX-512 BW (Byte/Word granular) Instructions */
#define X86_FEATURE_AVX512VL (9 * 32 + 31) /* AVX-512 VL (128/256 Vector Length) Extensions */
/* Extended state features, CPUID level 0x0000000d:1 (EAX), word 10 */
#define X86_FEATURE_XSAVEOPT (10*32+ 0) /* XSAVEOPT instruction */
#define X86_FEATURE_XSAVEC (10*32+ 1) /* XSAVEC instruction */
#define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 instruction */
#define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS instructions */
#define X86_FEATURE_XSAVEOPT (10 * 32 + 0) /* XSAVEOPT instruction */
#define X86_FEATURE_XSAVEC (10 * 32 + 1) /* XSAVEC instruction */
#define X86_FEATURE_XGETBV1 (10 * 32 + 2) /* XGETBV with ECX = 1 instruction */
#define X86_FEATURE_XSAVES (10 * 32 + 3) /* XSAVES/XRSTORS instructions */
/* Intel-defined CPU features, CPUID level 0x00000007:0 (ECX), word 11 */
#define X86_FEATURE_PREFETCHWT1 (11*32+ 0) /* PREFETCHWT1 Intel® Xeon PhiTM only */
#define X86_FEATURE_AVX512VBMI (11*32+ 1) /* AVX512 Vector Bit Manipulation instructions*/
#define X86_FEATURE_UMIP (11*32+ 2) /* User Mode Instruction Protection */
#define X86_FEATURE_PKU (11*32+ 3) /* Protection Keys for Userspace */
#define X86_FEATURE_OSPKE (11*32+ 4) /* OS Protection Keys Enable */
#define X86_FEATURE_AVX512_VBMI2 (11*32+ 6) /* Additional AVX512 Vector Bit Manipulation Instructions */
#define X86_FEATURE_GFNI (11*32+ 8) /* Galois Field New Instructions */
#define X86_FEATURE_VAES (11*32+ 9) /* Vector AES */
#define X86_FEATURE_VPCLMULQDQ (11*32+10) /* Carry-Less Multiplication Double Quadword */
#define X86_FEATURE_AVX512_VNNI (11*32+11) /* Vector Neural Network Instructions */
#define X86_FEATURE_AVX512_BITALG (11*32+12) /* Support for VPOPCNT[B,W] and VPSHUF-BITQMB instructions */
#define X86_FEATURE_TME (11*32+13) /* Intel Total Memory Encryption */
#define X86_FEATURE_AVX512_VPOPCNTDQ (11*32+14) /* POPCNT for vectors of DW/QW */
#define X86_FEATURE_LA57 (11*32+16) /* 5-level page tables */
#define X86_FEATURE_RDPID (11*32+22) /* RDPID instruction */
#define X86_FEATURE_CLDEMOTE (11*32+25) /* CLDEMOTE instruction */
#define X86_FEATURE_PREFETCHWT1 (11 * 32 + 0) /* PREFETCHWT1 Intel® Xeon PhiTM only */
#define X86_FEATURE_AVX512VBMI (11 * 32 + 1) /* AVX512 Vector Bit Manipulation instructions*/
#define X86_FEATURE_UMIP (11 * 32 + 2) /* User Mode Instruction Protection */
#define X86_FEATURE_PKU (11 * 32 + 3) /* Protection Keys for Userspace */
#define X86_FEATURE_OSPKE (11 * 32 + 4) /* OS Protection Keys Enable */
#define X86_FEATURE_AVX512_VBMI2 (11 * 32 + 6) /* Additional AVX512 Vector Bit Manipulation Instructions */
#define X86_FEATURE_GFNI (11 * 32 + 8) /* Galois Field New Instructions */
#define X86_FEATURE_VAES (11 * 32 + 9) /* Vector AES */
#define X86_FEATURE_VPCLMULQDQ (11 * 32 + 10) /* Carry-Less Multiplication Double Quadword */
#define X86_FEATURE_AVX512_VNNI (11 * 32 + 11) /* Vector Neural Network Instructions */
#define X86_FEATURE_AVX512_BITALG (11 * 32 + 12) /* Support for VPOPCNT[B,W] and VPSHUF-BITQMB instructions */
#define X86_FEATURE_TME (11 * 32 + 13) /* Intel Total Memory Encryption */
#define X86_FEATURE_AVX512_VPOPCNTDQ (11 * 32 + 14) /* POPCNT for vectors of DW/QW */
#define X86_FEATURE_LA57 (11 * 32 + 16) /* 5-level page tables */
#define X86_FEATURE_RDPID (11 * 32 + 22) /* RDPID instruction */
#define X86_FEATURE_CLDEMOTE (11 * 32 + 25) /* CLDEMOTE instruction */
/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:1 (EDX), word 12 */
#define X86_FEATURE_CQM_OCCUP_LLC (12*32+ 0) /* LLC occupancy monitoring */
#define X86_FEATURE_CQM_MBM_TOTAL (12*32+ 1) /* LLC Total MBM monitoring */
#define X86_FEATURE_CQM_MBM_LOCAL (12*32+ 2) /* LLC Local MBM monitoring */
#define X86_FEATURE_CQM_OCCUP_LLC (12 * 32 + 0) /* LLC occupancy monitoring */
#define X86_FEATURE_CQM_MBM_TOTAL (12 * 32 + 1) /* LLC Total MBM monitoring */
#define X86_FEATURE_CQM_MBM_LOCAL (12 * 32 + 2) /* LLC Local MBM monitoring */
/* AMD-defined CPU features, CPUID level 0x80000008 (EBX), word 13 */
#define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */
#define X86_FEATURE_IRPERF (13*32+ 1) /* Instructions Retired Count */
#define X86_FEATURE_XSAVEERPTR (13*32+ 2) /* Always save/restore FP error pointers */
#define X86_FEATURE_IBPB (13*32+12) /* Indirect Branch Prediction Barrier */
#define X86_FEATURE_IBRS (13*32+14) /* Indirect Branch Restricted Speculation */
#define X86_FEATURE_STIBP (13*32+15) /* Single Thread Indirect Branch Predictors */
#define X86_FEATURE_CLZERO (13 * 32 + 0) /* CLZERO instruction */
#define X86_FEATURE_IRPERF (13 * 32 + 1) /* Instructions Retired Count */
#define X86_FEATURE_XSAVEERPTR (13 * 32 + 2) /* Always save/restore FP error pointers */
#define X86_FEATURE_IBPB (13 * 32 + 12) /* Indirect Branch Prediction Barrier */
#define X86_FEATURE_IBRS (13 * 32 + 14) /* Indirect Branch Restricted Speculation */
#define X86_FEATURE_STIBP (13 * 32 + 15) /* Single Thread Indirect Branch Predictors */
/* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */
#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */
#define X86_FEATURE_IDA (14*32+ 1) /* Intel Dynamic Acceleration */
#define X86_FEATURE_ARAT (14*32+ 2) /* Always Running APIC Timer */
#define X86_FEATURE_PLN (14*32+ 4) /* Intel Power Limit Notification */
#define X86_FEATURE_PTS (14*32+ 6) /* Intel Package Thermal Status */
#define X86_FEATURE_HWP (14*32+ 7) /* Intel Hardware P-states */
#define X86_FEATURE_HWP_NOTIFY (14*32+ 8) /* HWP Notification */
#define X86_FEATURE_HWP_ACT_WINDOW (14*32+ 9) /* HWP Activity Window */
#define X86_FEATURE_HWP_EPP (14*32+10) /* HWP Energy Perf. Preference */
#define X86_FEATURE_HWP_PKG_REQ (14*32+11) /* HWP Package Level Request */
#define X86_FEATURE_HDC (14*32+13) /* HDC base registers present */
#define X86_FEATURE_DTHERM (14 * 32 + 0) /* Digital Thermal Sensor */
#define X86_FEATURE_IDA (14 * 32 + 1) /* Intel Dynamic Acceleration */
#define X86_FEATURE_ARAT (14 * 32 + 2) /* Always Running APIC Timer */
#define X86_FEATURE_PLN (14 * 32 + 4) /* Intel Power Limit Notification */
#define X86_FEATURE_PTS (14 * 32 + 6) /* Intel Package Thermal Status */
#define X86_FEATURE_HWP (14 * 32 + 7) /* Intel Hardware P-states */
#define X86_FEATURE_HWP_NOTIFY (14 * 32 + 8) /* HWP Notification */
#define X86_FEATURE_HWP_ACT_WINDOW (14 * 32 + 9) /* HWP Activity Window */
#define X86_FEATURE_HWP_EPP (14 * 32 + 10) /* HWP Energy Perf. Preference */
#define X86_FEATURE_HWP_PKG_REQ (14 * 32 + 11) /* HWP Package Level Request */
#define X86_FEATURE_HDC (14 * 32 + 13) /* HDC base registers present */
/* AMD SVM Feature Identification, CPUID level 0x8000000a (EDX), word 15 */
#define X86_FEATURE_NPT (15*32+ 0) /* Nested Page Table support */
#define X86_FEATURE_LBRV (15*32+ 1) /* LBR Virtualization support */
#define X86_FEATURE_SVML (15*32+ 2) /* "svm_lock" SVM locking MSR */
#define X86_FEATURE_NRIPS (15*32+ 3) /* "nrip_save" SVM next_rip save */
#define X86_FEATURE_TSCRATEMSR (15*32+ 4) /* "tsc_scale" TSC scaling support */
#define X86_FEATURE_VMCBCLEAN (15*32+ 5) /* "vmcb_clean" VMCB clean bits support */
#define X86_FEATURE_FLUSHBYASID (15*32+ 6) /* flush-by-ASID support */
#define X86_FEATURE_DECODEASSISTS (15*32+ 7) /* Decode Assists support */
#define X86_FEATURE_PAUSEFILTER (15*32+10) /* filtered pause intercept */
#define X86_FEATURE_PFTHRESHOLD (15*32+12) /* pause filter threshold */
#define X86_FEATURE_AVIC (15*32+13) /* Virtual Interrupt Controller */
#define X86_FEATURE_V_VMSAVE_VMLOAD (15*32+15) /* Virtual VMSAVE VMLOAD */
#define X86_FEATURE_VGIF (15*32+16) /* Virtual GIF */
#define X86_FEATURE_NPT (15 * 32 + 0) /* Nested Page Table support */
#define X86_FEATURE_LBRV (15 * 32 + 1) /* LBR Virtualization support */
#define X86_FEATURE_SVML (15 * 32 + 2) /* "svm_lock" SVM locking MSR */
#define X86_FEATURE_NRIPS (15 * 32 + 3) /* "nrip_save" SVM next_rip save */
#define X86_FEATURE_TSCRATEMSR (15 * 32 + 4) /* "tsc_scale" TSC scaling support */
#define X86_FEATURE_VMCBCLEAN (15 * 32 + 5) /* "vmcb_clean" VMCB clean bits support */
#define X86_FEATURE_FLUSHBYASID (15 * 32 + 6) /* flush-by-ASID support */
#define X86_FEATURE_DECODEASSISTS (15 * 32 + 7) /* Decode Assists support */
#define X86_FEATURE_PAUSEFILTER (15 * 32 + 10) /* filtered pause intercept */
#define X86_FEATURE_PFTHRESHOLD (15 * 32 + 12) /* pause filter threshold */
#define X86_FEATURE_AVIC (15 * 32 + 13) /* Virtual Interrupt Controller */
#define X86_FEATURE_V_VMSAVE_VMLOAD (15 * 32 + 15) /* Virtual VMSAVE VMLOAD */
#define X86_FEATURE_VGIF (15 * 32 + 16) /* Virtual GIF */
/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:0 (EDX), word 16 */
#define X86_FEATURE_CQM_LLC (16*32+ 1) /* LLC QoS if 1 */
#define X86_FEATURE_CQM_LLC (16 * 32 + 1) /* LLC QoS if 1 */
/* AMD-defined CPU features, CPUID level 0x80000007 (EBX), word 17 */
#define X86_FEATURE_OVERFLOW_RECOV (17*32+ 0) /* MCA overflow recovery support */
#define X86_FEATURE_SUCCOR (17*32+ 1) /* Uncorrectable error containment and recovery */
#define X86_FEATURE_SMCA (17*32+ 3) /* Scalable MCA */
#define X86_FEATURE_OVERFLOW_RECOV (17 * 32 + 0) /* MCA overflow recovery support */
#define X86_FEATURE_SUCCOR (17 * 32 + 1) /* Uncorrectable error containment and recovery */
#define X86_FEATURE_SMCA (17 * 32 + 3) /* Scalable MCA */
/* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */
#define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */
#define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
#define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */
#define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
#define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
#define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
#define X86_FEATURE_SPEC_CTRL_SSBD (18*32+31) /* "" Speculative Store Bypass Disable */
#define X86_FEATURE_AVX512_4VNNIW (18 * 32 + 2) /* AVX-512 Neural Network Instructions */
#define X86_FEATURE_AVX512_4FMAPS (18 * 32 + 3) /* AVX-512 Multiply Accumulation Single precision */
#define X86_FEATURE_PCONFIG (18 * 32 + 18) /* Intel PCONFIG */
#define X86_FEATURE_SPEC_CTRL (18 * 32 + 26) /* "" Speculation Control (IBRS + IBPB) */
#define X86_FEATURE_INTEL_STIBP (18 * 32 + 27) /* "" Single Thread Indirect Branch Predictors */
#define X86_FEATURE_ARCH_CAPABILITIES (18 * 32 + 29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
#define X86_FEATURE_SPEC_CTRL_SSBD (18 * 32 + 31) /* "" Speculative Store Bypass Disable */
enum {
X86_VENDOR_INTEL = 0,

View File

@ -77,11 +77,8 @@ enum xfeature {
/* All currently supported features */
#define XFEATURE_MASK_USER \
(XFEATURE_MASK_FP | XFEATURE_MASK_SSE | \
XFEATURE_MASK_YMM | XFEATURE_MASK_OPMASK | \
XFEATURE_MASK_ZMM_Hi256 | XFEATURE_MASK_Hi16_ZMM | \
XFEATURE_MASK_PKRU | XFEATURE_MASK_BNDREGS | \
XFEATURE_MASK_BNDCSR)
(XFEATURE_MASK_FP | XFEATURE_MASK_SSE | XFEATURE_MASK_YMM | XFEATURE_MASK_OPMASK | XFEATURE_MASK_ZMM_Hi256 | \
XFEATURE_MASK_Hi16_ZMM | XFEATURE_MASK_PKRU | XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR)
struct fpx_sw_bytes {
uint32_t magic1;
@ -319,7 +316,6 @@ typedef struct {
uint8_t has_fpu;
} fpu_state_t;
extern void compel_convert_from_fxsr(struct user_i387_ia32_struct *env,
struct i387_fxsave_struct *fxsave);
extern void compel_convert_from_fxsr(struct user_i387_ia32_struct *env, struct i387_fxsave_struct *fxsave);
#endif /* __CR_ASM_FPU_H__ */

View File

@ -103,15 +103,10 @@ static inline bool user_regs_native(user_regs_struct_t *pregs)
return pregs->__is_native == NATIVE_MAGIC;
}
#define get_user_reg(pregs, name) \
((user_regs_native(pregs)) ? \
((pregs)->native.name) : \
((pregs)->compat.name))
#define get_user_reg(pregs, name) ((user_regs_native(pregs)) ? ((pregs)->native.name) : ((pregs)->compat.name))
#define set_user_reg(pregs, name, val) \
((user_regs_native(pregs)) ? \
((pregs)->native.name = (val)) : \
((pregs)->compat.name = (val)))
((user_regs_native(pregs)) ? ((pregs)->native.name = (val)) : ((pregs)->compat.name = (val)))
#if 0
typedef struct {

View File

@ -82,7 +82,7 @@ typedef struct compat_siginfo {
int si_signo;
int si_errno;
int si_code;
int _pad[128/sizeof(int) - 3];
int _pad[128 / sizeof(int) - 3];
} compat_siginfo_t;
typedef struct compat_sigaltstack {
@ -137,8 +137,7 @@ struct rt_sigframe {
bool is_native;
};
static inline
void rt_sigframe_copy_sigset(struct rt_sigframe *to, k_rtsigset_t *from)
static inline void rt_sigframe_copy_sigset(struct rt_sigframe *to, k_rtsigset_t *from)
{
size_t sz = sizeof(k_rtsigset_t);
@ -149,8 +148,7 @@ void rt_sigframe_copy_sigset(struct rt_sigframe *to, k_rtsigset_t *from)
memcpy(&to->compat.uc.uc_sigmask, from, sz);
}
static inline
void rt_sigframe_erase_sigset(struct rt_sigframe *sigframe)
static inline void rt_sigframe_erase_sigset(struct rt_sigframe *sigframe)
{
size_t sz = sizeof(k_rtsigset_t);
@ -161,14 +159,10 @@ void rt_sigframe_erase_sigset(struct rt_sigframe *sigframe)
}
#define RT_SIGFRAME_REGIP(rt_sigframe) \
((rt_sigframe->is_native) ? \
(rt_sigframe)->native.uc.uc_mcontext.rip : \
(rt_sigframe)->compat.uc.uc_mcontext.ip)
((rt_sigframe->is_native) ? (rt_sigframe)->native.uc.uc_mcontext.rip : (rt_sigframe)->compat.uc.uc_mcontext.ip)
#define RT_SIGFRAME_FPU(rt_sigframe) \
((rt_sigframe->is_native) ? \
(&(rt_sigframe)->native.fpu_state) : \
(&(rt_sigframe)->compat.fpu_state))
((rt_sigframe->is_native) ? (&(rt_sigframe)->native.fpu_state) : (&(rt_sigframe)->compat.fpu_state))
#define RT_SIGFRAME_HAS_FPU(rt_sigframe) (RT_SIGFRAME_FPU(rt_sigframe)->has_fpu)
@ -178,10 +172,11 @@ void rt_sigframe_erase_sigset(struct rt_sigframe *sigframe)
* - compatible is in sys32_rt_sigreturn at arch/x86/ia32/ia32_signal.c
* - native is in sys_rt_sigreturn at arch/x86/kernel/signal.c
*/
#define RT_SIGFRAME_OFFSET(rt_sigframe) (((rt_sigframe)->is_native) ? 8 : 4 )
#define RT_SIGFRAME_OFFSET(rt_sigframe) (((rt_sigframe)->is_native) ? 8 : 4)
#define USER32_CS 0x23
/* clang-format off */
#define ARCH_RT_SIGRETURN_NATIVE(new_sp) \
asm volatile( \
"movq %0, %%rax \n" \
@ -215,6 +210,7 @@ do { \
else \
ARCH_RT_SIGRETURN_COMPAT(new_sp); \
} while (0)
/* clang-format off */
int sigreturn_prep_fpu_frame(struct rt_sigframe *sigframe,
struct rt_sigframe *rsigframe);

View File

@ -58,9 +58,8 @@ struct ctl_msg;
int parasite_wait_ack(int sockfd, unsigned int cmd, struct ctl_msg *m);
extern void parasite_setup_regs(unsigned long new_ip, void *stack, user_regs_struct_t *regs);
extern void *remote_mmap(struct parasite_ctl *ctl,
void *addr, size_t length, int prot,
int flags, int fd, off_t offset);
extern void *remote_mmap(struct parasite_ctl *ctl, void *addr, size_t length, int prot, int flags, int fd,
off_t offset);
extern bool arch_can_dump_task(struct parasite_ctl *ctl);
/*
* @regs: general purpose registers
@ -70,16 +69,12 @@ extern bool arch_can_dump_task(struct parasite_ctl *ctl);
* @flags: see INFECT_* in infect_ctx::flags
* @pid: mystery
*/
extern int compel_get_task_regs(pid_t pid, user_regs_struct_t *regs,
user_fpregs_struct_t *ext_regs, save_regs_t save,
extern int compel_get_task_regs(pid_t pid, user_regs_struct_t *regs, user_fpregs_struct_t *ext_regs, save_regs_t save,
void *arg, unsigned long flags);
extern int compel_set_task_ext_regs(pid_t pid, user_fpregs_struct_t *ext_regs);
extern int arch_fetch_sas(struct parasite_ctl *ctl, struct rt_sigframe *s);
extern int sigreturn_prep_regs_plain(struct rt_sigframe *sigframe,
user_regs_struct_t *regs,
extern int sigreturn_prep_regs_plain(struct rt_sigframe *sigframe, user_regs_struct_t *regs,
user_fpregs_struct_t *fpregs);
extern int sigreturn_prep_fpu_frame_plain(struct rt_sigframe *sigframe,
struct rt_sigframe *rsigframe);
extern int compel_execute_syscall(struct parasite_ctl *ctl,
user_regs_struct_t *regs, const char *code_syscall);
extern int sigreturn_prep_fpu_frame_plain(struct rt_sigframe *sigframe, struct rt_sigframe *rsigframe);
extern int compel_execute_syscall(struct parasite_ctl *ctl, user_regs_struct_t *regs, const char *code_syscall);
#endif

View File

@ -4,31 +4,23 @@
#include "uapi/compel/log.h"
#ifndef LOG_PREFIX
# define LOG_PREFIX
#define LOG_PREFIX
#endif
static inline int pr_quelled(unsigned int loglevel)
{
return compel_log_get_loglevel() < loglevel
&& loglevel != COMPEL_LOG_MSG;
return compel_log_get_loglevel() < loglevel && loglevel != COMPEL_LOG_MSG;
}
extern void compel_print_on_level(unsigned int loglevel,
const char *format, ...)
__attribute__ ((__format__ (__printf__, 2, 3)));
extern void compel_print_on_level(unsigned int loglevel, const char *format, ...)
__attribute__((__format__(__printf__, 2, 3)));
#define pr_msg(fmt, ...) \
compel_print_on_level(COMPEL_LOG_MSG, \
fmt, ##__VA_ARGS__)
#define pr_msg(fmt, ...) compel_print_on_level(COMPEL_LOG_MSG, fmt, ##__VA_ARGS__)
#define pr_info(fmt, ...) \
compel_print_on_level(COMPEL_LOG_INFO, \
LOG_PREFIX fmt, ##__VA_ARGS__)
#define pr_info(fmt, ...) compel_print_on_level(COMPEL_LOG_INFO, LOG_PREFIX fmt, ##__VA_ARGS__)
#define pr_err(fmt, ...) \
compel_print_on_level(COMPEL_LOG_ERROR, \
"Error (%s:%d): " LOG_PREFIX fmt, \
__FILE__, __LINE__, ##__VA_ARGS__)
compel_print_on_level(COMPEL_LOG_ERROR, "Error (%s:%d): " LOG_PREFIX fmt, __FILE__, __LINE__, ##__VA_ARGS__)
#define pr_err_once(fmt, ...) \
do { \
@ -40,9 +32,7 @@ extern void compel_print_on_level(unsigned int loglevel,
} while (0)
#define pr_warn(fmt, ...) \
compel_print_on_level(COMPEL_LOG_WARN, \
"Warn (%s:%d): " LOG_PREFIX fmt, \
__FILE__, __LINE__, ##__VA_ARGS__)
compel_print_on_level(COMPEL_LOG_WARN, "Warn (%s:%d): " LOG_PREFIX fmt, __FILE__, __LINE__, ##__VA_ARGS__)
#define pr_warn_once(fmt, ...) \
do { \
@ -53,11 +43,8 @@ extern void compel_print_on_level(unsigned int loglevel,
} \
} while (0)
#define pr_debug(fmt, ...) \
compel_print_on_level(COMPEL_LOG_DEBUG, \
LOG_PREFIX fmt, ##__VA_ARGS__)
#define pr_debug(fmt, ...) compel_print_on_level(COMPEL_LOG_DEBUG, LOG_PREFIX fmt, ##__VA_ARGS__)
#define pr_perror(fmt, ...) \
pr_err(fmt ": %m\n", ##__VA_ARGS__)
#define pr_perror(fmt, ...) pr_err(fmt ": %m\n", ##__VA_ARGS__)
#endif /* COMPEL_LOG_H__ */

View File

@ -18,10 +18,10 @@ typedef struct {
extern piegen_opt_t opts;
#define pr_out(fmt, ...) \
do { \
do { \
if (opts.fout) \
fprintf(opts.fout, fmt, ##__VA_ARGS__); \
} while (0)
} while (0)
extern int handle_binary(void *mem, size_t size);

View File

@ -5,7 +5,7 @@
#include <compel/asm/infect-types.h>
#include <compel/ptrace.h>
#define PTRACE_SI_EVENT(_si_code) (((_si_code) & 0xFFFF) >> 8)
#define PTRACE_SI_EVENT(_si_code) (((_si_code)&0xFFFF) >> 8)
extern int ptrace_get_regs(pid_t pid, user_regs_struct_t *regs);
extern int ptrace_set_regs(pid_t pid, user_regs_struct_t *regs);

View File

@ -7,10 +7,16 @@ struct ctl_msg {
};
#define ctl_msg_cmd(_cmd) \
(struct ctl_msg){.cmd = _cmd, }
(struct ctl_msg) \
{ \
.cmd = _cmd, \
}
#define ctl_msg_ack(_cmd, _err) \
(struct ctl_msg){.cmd = _cmd, .ack = _cmd, .err = _err, }
(struct ctl_msg) \
{ \
.cmd = _cmd, .ack = _cmd, .err = _err, \
}
/*
* NOTE: each command's args should be arch-independed sized.

View File

@ -7,4 +7,3 @@ struct shmem_plugin_msg {
};
#endif /* __COMPEL_PLUGIN_SHMEM_PRIV_H__ */

View File

@ -15,5 +15,4 @@ extern int compel_rpc_sock(struct parasite_ctl *ctl);
#define PARASITE_USER_CMDS 64
#endif

View File

@ -37,8 +37,7 @@ struct parasite_thread_ctl;
extern struct parasite_ctl __must_check *compel_prepare(int pid);
extern struct parasite_ctl __must_check *compel_prepare_noctx(int pid);
extern int __must_check compel_infect(struct parasite_ctl *ctl,
unsigned long nr_threads, unsigned long args_size);
extern int __must_check compel_infect(struct parasite_ctl *ctl, unsigned long nr_threads, unsigned long args_size);
extern struct parasite_thread_ctl __must_check *compel_prepare_thread(struct parasite_ctl *ctl, int pid);
extern void compel_release_thread(struct parasite_thread_ctl *);
@ -47,7 +46,7 @@ extern int __must_check compel_cure_remote(struct parasite_ctl *ctl);
extern int __must_check compel_cure_local(struct parasite_ctl *ctl);
extern int __must_check compel_cure(struct parasite_ctl *ctl);
#define PARASITE_ARG_SIZE_MIN ( 1 << 12)
#define PARASITE_ARG_SIZE_MIN (1 << 12)
#define compel_parasite_args(ctl, type) \
({ \
@ -60,13 +59,8 @@ extern int __must_check compel_cure(struct parasite_ctl *ctl);
extern void *compel_parasite_args_p(struct parasite_ctl *ctl);
extern void *compel_parasite_args_s(struct parasite_ctl *ctl, unsigned long args_size);
extern int __must_check compel_syscall(struct parasite_ctl *ctl,
int nr, long *ret,
unsigned long arg1,
unsigned long arg2,
unsigned long arg3,
unsigned long arg4,
unsigned long arg5,
extern int __must_check compel_syscall(struct parasite_ctl *ctl, int nr, long *ret, unsigned long arg1,
unsigned long arg2, unsigned long arg3, unsigned long arg4, unsigned long arg5,
unsigned long arg6);
extern int __must_check compel_run_in_thread(struct parasite_thread_ctl *tctl, unsigned int cmd);
extern int __must_check compel_run_at(struct parasite_ctl *ctl, unsigned long ip, user_regs_struct_t *ret_regs);
@ -83,11 +77,9 @@ enum trace_flags {
TRACE_EXIT,
};
extern int __must_check compel_stop_on_syscall(int tasks, int sys_nr,
int sys_nr_compat, enum trace_flags trace);
extern int __must_check compel_stop_on_syscall(int tasks, int sys_nr, int sys_nr_compat, enum trace_flags trace);
extern int __must_check compel_stop_pie(pid_t pid, void *addr,
enum trace_flags *tf, bool no_bp);
extern int __must_check compel_stop_pie(pid_t pid, void *addr, enum trace_flags *tf, bool no_bp);
extern int __must_check compel_unmap(struct parasite_ctl *ctl, unsigned long addr);
@ -98,8 +90,7 @@ extern k_rtsigset_t *compel_thread_sigmask(struct parasite_thread_ctl *tctl);
struct rt_sigframe;
typedef int (*open_proc_fn)(int pid, int mode, const char *fmt, ...)
__attribute__ ((__format__ (__printf__, 3, 4)));
typedef int (*open_proc_fn)(int pid, int mode, const char *fmt, ...) __attribute__((__format__(__printf__, 3, 4)));
typedef int (*save_regs_t)(void *, user_regs_struct_t *, user_fpregs_struct_t *);
typedef int (*make_sigframe_t)(void *, struct rt_sigframe *, struct rt_sigframe *, k_rtsigset_t *);

View File

@ -6,8 +6,7 @@
* also by log functions in the std plugin.
*/
enum __compel_log_levels
{
enum __compel_log_levels {
COMPEL_LOG_MSG, /* Print message regardless of log level */
COMPEL_LOG_ERROR, /* Errors only, when we're in trouble */
COMPEL_LOG_WARN, /* Warnings */

View File

@ -1,8 +1,8 @@
#ifndef UAPI_COMPEL_PLUGIN_H__
#define UAPI_COMPEL_PLUGIN_H__
#define __init __attribute__((__used__)) __attribute__ ((__section__(".compel.init")))
#define __exit __attribute__((__used__)) __attribute__ ((__section__(".compel.exit")))
#define __init __attribute__((__used__)) __attribute__((__section__(".compel.init")))
#define __exit __attribute__((__used__)) __attribute__((__section__(".compel.exit")))
#ifndef __ASSEMBLY__
@ -12,9 +12,7 @@ typedef struct {
void (*exit)(void);
} plugin_init_t;
#define plugin_register(___desc) \
static const plugin_init_t * const \
___ptr__##___desc __init = &___desc;
#define plugin_register(___desc) static const plugin_init_t *const ___ptr__##___desc __init = &___desc;
#define PLUGIN_REGISTER(___id, ___name, ___init, ___exit) \
static const plugin_init_t __plugin_desc_##___id = { \

View File

@ -19,15 +19,15 @@
*/
#ifndef PTRACE_SEIZE
# define PTRACE_SEIZE 0x4206
#define PTRACE_SEIZE 0x4206
#endif
#ifndef PTRACE_O_SUSPEND_SECCOMP
# define PTRACE_O_SUSPEND_SECCOMP (1 << 21)
#define PTRACE_O_SUSPEND_SECCOMP (1 << 21)
#endif
#ifndef PTRACE_INTERRUPT
# define PTRACE_INTERRUPT 0x4207
#define PTRACE_INTERRUPT 0x4207
#endif
#ifndef PTRACE_PEEKSIGINFO
@ -38,13 +38,13 @@
#endif
#ifndef PTRACE_GETREGSET
# define PTRACE_GETREGSET 0x4204
# define PTRACE_SETREGSET 0x4205
#define PTRACE_GETREGSET 0x4204
#define PTRACE_SETREGSET 0x4205
#endif
#ifndef PTRACE_GETSIGMASK
# define PTRACE_GETSIGMASK 0x420a
# define PTRACE_SETSIGMASK 0x420b
#define PTRACE_GETSIGMASK 0x420a
#define PTRACE_SETSIGMASK 0x420b
#endif
#ifndef PTRACE_SECCOMP_GET_FILTER
@ -52,7 +52,7 @@
#endif
#ifndef PTRACE_SECCOMP_GET_METADATA
# define PTRACE_SECCOMP_GET_METADATA 0x420d
#define PTRACE_SECCOMP_GET_METADATA 0x420d
#endif /* PTRACE_SECCOMP_GET_METADATA */
/*
@ -66,12 +66,12 @@ typedef struct {
} seccomp_metadata_t;
#ifdef PTRACE_EVENT_STOP
# if PTRACE_EVENT_STOP == 7 /* Bad value from Linux 3.1-3.3, fixed in 3.4 */
# undef PTRACE_EVENT_STOP
# endif
#if PTRACE_EVENT_STOP == 7 /* Bad value from Linux 3.1-3.3, fixed in 3.4 */
#undef PTRACE_EVENT_STOP
#endif
#endif
#ifndef PTRACE_EVENT_STOP
# define PTRACE_EVENT_STOP 128
#define PTRACE_EVENT_STOP 128
#endif
extern int ptrace_suspend_seccomp(pid_t pid);

View File

@ -5,7 +5,7 @@
#define UAPI_COMPEL_SIGFRAME_COMMON_H__
#ifndef UAPI_COMPEL_ASM_SIGFRAME_H__
# error "Direct inclusion is forbidden, use <compel/asm/sigframe.h> instead"
#error "Direct inclusion is forbidden, use <compel/asm/sigframe.h> instead"
#endif
#include "common/compiler.h"
@ -15,23 +15,22 @@
struct rt_sigframe;
#ifndef SIGFRAME_MAX_OFFSET
# define SIGFRAME_MAX_OFFSET RT_SIGFRAME_OFFSET(0)
#define SIGFRAME_MAX_OFFSET RT_SIGFRAME_OFFSET(0)
#endif
#define RESTORE_STACK_ALIGN(x, a) (((x) + (a) - 1) & ~((a) - 1))
#define RESTORE_STACK_ALIGN(x, a) (((x) + (a)-1) & ~((a)-1))
/* sigframe should be aligned on 64 byte for x86 and 8 bytes for arm */
#define RESTORE_STACK_SIGFRAME \
RESTORE_STACK_ALIGN(sizeof(struct rt_sigframe) + SIGFRAME_MAX_OFFSET, 64)
#define RESTORE_STACK_SIGFRAME RESTORE_STACK_ALIGN(sizeof(struct rt_sigframe) + SIGFRAME_MAX_OFFSET, 64)
#ifndef __ARCH_SI_PREAMBLE_SIZE
# define __ARCH_SI_PREAMBLE_SIZE (3 * sizeof(int))
#define __ARCH_SI_PREAMBLE_SIZE (3 * sizeof(int))
#endif
#define SI_MAX_SIZE 128
#ifndef SI_PAD_SIZE
# define SI_PAD_SIZE ((SI_MAX_SIZE - __ARCH_SI_PREAMBLE_SIZE) / sizeof(int))
#define SI_PAD_SIZE ((SI_MAX_SIZE - __ARCH_SI_PREAMBLE_SIZE) / sizeof(int))
#endif
typedef struct rt_siginfo {
@ -53,11 +52,10 @@ struct rt_ucontext {
rt_stack_t uc_stack;
struct rt_sigcontext uc_mcontext;
k_rtsigset_t uc_sigmask; /* mask last for extensibility */
int _unused[32 - (sizeof (k_rtsigset_t) / sizeof (int))];
int _unused[32 - (sizeof(k_rtsigset_t) / sizeof(int))];
unsigned long uc_regspace[128] __attribute__((aligned(8)));
};
extern int __must_check sigreturn_prep_fpu_frame(struct rt_sigframe *frame,
struct rt_sigframe *rframe);
extern int __must_check sigreturn_prep_fpu_frame(struct rt_sigframe *frame, struct rt_sigframe *rframe);
#endif /* UAPI_COMPEL_SIGFRAME_COMMON_H__ */

View File

@ -5,8 +5,7 @@
* Task state, as returned by compel_wait_task()
* and used in arguments to compel_resume_task().
*/
enum __compel_task_state
{
enum __compel_task_state {
COMPEL_TASK_ALIVE = 0x01,
COMPEL_TASK_DEAD = 0x02,
COMPEL_TASK_STOPPED = 0x03,

View File

@ -6,7 +6,7 @@
extern int parasite_get_rpc_sock(void);
extern unsigned int __export_parasite_service_cmd;
extern void * __export_parasite_service_args_ptr;
extern void *__export_parasite_service_args_ptr;
extern int __must_check parasite_service(void);
/*

View File

@ -23,8 +23,8 @@ extern int std_gettimeofday(struct timeval *tv, struct timezone *tz);
extern int std_vprint_num(char *buf, int blen, int num, char **ps);
extern void std_sprintf(char output[STD_LOG_SIMPLE_CHUNK], const char *format, ...)
__attribute__ ((__format__ (__printf__, 2, 3)));
__attribute__((__format__(__printf__, 2, 3)));
extern void print_on_level(unsigned int loglevel, const char *format, ...)
__attribute__ ((__format__ (__printf__, 2, 3)));
__attribute__((__format__(__printf__, 2, 3)));
#endif /* COMPEL_PLUGIN_STD_LOG_H__ */

View File

@ -10,12 +10,10 @@
#define STDOUT_FILENO 1 /* Standard output. */
#define STDERR_FILENO 2 /* Standard error output. */
extern void std_dputc(int fd, char c);
extern void std_dputs(int fd, const char *s);
extern void std_vdprintf(int fd, const char *format, va_list args);
extern void std_dprintf(int fd, const char *format, ...)
__attribute__ ((__format__ (__printf__, 2, 3)));
extern void std_dprintf(int fd, const char *format, ...) __attribute__((__format__(__printf__, 2, 3)));
#define std_printf(fmt, ...) std_dprintf(STDOUT_FILENO, fmt, ##__VA_ARGS__)
#define std_puts(s) std_dputs(STDOUT_FILENO, s)

View File

@ -44,7 +44,7 @@ struct clone_args;
typedef unsigned long aio_context_t;
#ifndef F_GETFD
# define F_GETFD 1
#define F_GETFD 1
#endif
struct krlimit {
@ -57,7 +57,6 @@ typedef int kernel_timer_t;
#include <compel/plugins/std/asm/syscall-types.h>
extern long sys_preadv_raw(int fd, struct iovec *iov, unsigned long nr, unsigned long pos_l, unsigned long pos_h);
static inline long sys_preadv(int fd, struct iovec *iov, unsigned long nr, off_t off)
@ -67,7 +66,7 @@ static inline long sys_preadv(int fd, struct iovec *iov, unsigned long nr, off_t
#elif BITS_PER_LONG == 32
return sys_preadv_raw(fd, iov, nr, off, ((uint64_t)off) >> 32);
#else
# error "BITS_PER_LONG isn't defined"
#error "BITS_PER_LONG isn't defined"
#endif
}

View File

@ -5,7 +5,6 @@ extern int save_task_regs(void *, user_regs_struct_t *, user_fpregs_struct_t *);
extern int arch_alloc_thread_info(CoreEntry *core);
extern void arch_free_thread_info(CoreEntry *core);
static inline void core_put_tls(CoreEntry *core, tls_t tls)
{
core->ti_aarch64->tls = tls;

View File

@ -4,7 +4,7 @@
static inline void arch_get_tls(tls_t *ptls)
{
tls_t tls;
asm("mrs %0, tpidr_el0" : "=r" (tls));
asm("mrs %0, tpidr_el0" : "=r"(tls));
*ptls = tls;
}

View File

@ -5,6 +5,7 @@
#include "images/core.pb-c.h"
/* clang-format off */
#define JUMP_TO_RESTORER_BLOB(new_sp, restore_task_exec_start, \
task_args) \
asm volatile( \
@ -16,13 +17,13 @@
"r"(restore_task_exec_start), \
"r"(task_args) \
: "x0", "memory")
/* clang-format on */
static inline void core_get_tls(CoreEntry *pcore, tls_t *ptls)
{
*ptls = pcore->ti_aarch64->tls;
}
int restore_fpu(struct rt_sigframe *sigframe, CoreEntry *core);
#endif

View File

@ -9,6 +9,7 @@
#include <compel/asm/sigframe.h>
/* clang-format off */
#define RUN_CLONE_RESTORE_FN(ret, clone_flags, new_sp, parent_tid, \
thread_args, clone_restore_fn) \
asm volatile( \
@ -112,7 +113,7 @@
: \
: "r"(ret) \
: "sp", "x0", "memory")
/* clang-format on */
#define arch_map_vdso(map, compat) -1
@ -121,11 +122,16 @@ int restore_nonsigframe_gpregs(UserAarch64RegsEntry *r);
static inline void restore_tls(tls_t *ptls)
{
asm("msr tpidr_el0, %0" : : "r" (*ptls));
asm("msr tpidr_el0, %0" : : "r"(*ptls));
}
static inline void *alloc_compat_syscall_stack(void) { return NULL; }
static inline void free_compat_syscall_stack(void *stack32) { }
static inline void *alloc_compat_syscall_stack(void)
{
return NULL;
}
static inline void free_compat_syscall_stack(void *stack32)
{
}
static inline int arch_compat_rt_sigaction(void *stack, int sig, void *act)
{
return -1;

View File

@ -22,8 +22,14 @@ typedef UserAarch64RegsEntry UserRegsEntry;
#define TI_SP(core) ((core)->ti_aarch64->gpregs->sp)
static inline void *decode_pointer(uint64_t v) { return (void*)v; }
static inline uint64_t encode_pointer(void *p) { return (uint64_t)p; }
static inline void *decode_pointer(uint64_t v)
{
return (void *)v;
}
static inline uint64_t encode_pointer(void *p)
{
return (uint64_t)p;
}
#define AT_VECTOR_SIZE 40
typedef uint64_t auxv_t;

View File

@ -17,16 +17,12 @@
* XXX: remove when compel/piegen will support aarch64.
*/
#define ARCH_VDSO_SYMBOLS_LIST \
const char* aarch_vdso_symbol1 = "__kernel_clock_getres"; \
const char* aarch_vdso_symbol2 = "__kernel_clock_gettime"; \
const char* aarch_vdso_symbol3 = "__kernel_gettimeofday"; \
const char* aarch_vdso_symbol4 = "__kernel_rt_sigreturn";
const char *aarch_vdso_symbol1 = "__kernel_clock_getres"; \
const char *aarch_vdso_symbol2 = "__kernel_clock_gettime"; \
const char *aarch_vdso_symbol3 = "__kernel_gettimeofday"; \
const char *aarch_vdso_symbol4 = "__kernel_rt_sigreturn";
#define ARCH_VDSO_SYMBOLS \
aarch_vdso_symbol1, \
aarch_vdso_symbol2, \
aarch_vdso_symbol3, \
aarch_vdso_symbol4
#define ARCH_VDSO_SYMBOLS aarch_vdso_symbol1, aarch_vdso_symbol2, aarch_vdso_symbol3, aarch_vdso_symbol4
extern void write_intraprocedure_branch(unsigned long to, unsigned long from);

View File

@ -5,7 +5,6 @@ extern int save_task_regs(void *, user_regs_struct_t *, user_fpregs_struct_t *);
extern int arch_alloc_thread_info(CoreEntry *core);
extern void arch_free_thread_info(CoreEntry *core);
static inline void core_put_tls(CoreEntry *core, tls_t tls)
{
core->ti_arm->tls = tls;

View File

@ -4,7 +4,7 @@
/* kuser_get_tls() kernel-provided user-helper, the address is emulated */
static inline void arch_get_tls(tls_t *ptls)
{
*ptls = ((tls_t (*)(void))0xffff0fe0)();
*ptls = ((tls_t(*)(void))0xffff0fe0)();
}
#endif

View File

@ -5,6 +5,7 @@
#include "images/core.pb-c.h"
/* clang-format off */
#define JUMP_TO_RESTORER_BLOB(new_sp, restore_task_exec_start, \
task_args) \
asm volatile( \
@ -17,13 +18,13 @@
"r"(restore_task_exec_start), \
"r"(task_args) \
: "r0", "r1", "memory")
/* clang-format on */
static inline void core_get_tls(CoreEntry *pcore, tls_t *ptls)
{
*ptls = pcore->ti_arm->tls;
}
int restore_fpu(struct rt_sigframe *sigframe, CoreEntry *core);
#endif

View File

@ -6,6 +6,7 @@
#include <compel/asm/sigframe.h>
/* clang-format off */
#define RUN_CLONE_RESTORE_FN(ret, clone_flags, new_sp, parent_tid, \
thread_args, clone_restore_fn) \
asm volatile( \
@ -108,19 +109,18 @@
: \
: "r"(ret) \
: "memory")
/* clang-format on */
#define arch_map_vdso(map, compat) -1
int restore_gpregs(struct rt_sigframe *f, UserArmRegsEntry *r);
int restore_nonsigframe_gpregs(UserArmRegsEntry *r);
#define ARCH_HAS_SHMAT_HOOK
unsigned long arch_shmat(int shmid, void *shmaddr,
int shmflg, unsigned long size);
unsigned long arch_shmat(int shmid, void *shmaddr, int shmflg, unsigned long size);
static inline void restore_tls(tls_t *ptls) {
asm (
"mov r7, #15 \n"
static inline void restore_tls(tls_t *ptls)
{
asm("mov r7, #15 \n"
"lsl r7, #16 \n"
"mov r0, #5 \n"
"add r7, r0 \n" /* r7 = 0xF005 */
@ -128,12 +128,16 @@ static inline void restore_tls(tls_t *ptls) {
"svc #0 \n"
:
: "r"(ptls)
: "r0", "r7"
);
: "r0", "r7");
}
static inline void *alloc_compat_syscall_stack(void) { return NULL; }
static inline void free_compat_syscall_stack(void *stack32) { }
static inline void *alloc_compat_syscall_stack(void)
{
return NULL;
}
static inline void free_compat_syscall_stack(void *stack32)
{
}
static inline int arch_compat_rt_sigaction(void *stack, int sig, void *act)
{
return -1;

View File

@ -21,8 +21,14 @@ typedef UserArmRegsEntry UserRegsEntry;
#define TI_SP(core) ((core)->ti_arm->gpregs->sp)
static inline void *decode_pointer(u64 v) { return (void*)(u32)v; }
static inline u64 encode_pointer(void *p) { return (u32)p; }
static inline void *decode_pointer(u64 v)
{
return (void *)(u32)v;
}
static inline u64 encode_pointer(void *p)
{
return (u32)p;
}
#define AT_VECTOR_SIZE 40
typedef uint32_t auxv_t;

View File

@ -12,10 +12,8 @@
#define VDSO_SYMBOL_MAX 2
#define VDSO_SYMBOL_GTOD 1
#define ARCH_VDSO_SYMBOLS_LIST \
const char* aarch_vdso_symbol1 = "__vdso_clock_gettime"; \
const char* aarch_vdso_symbol2 = "__vdso_gettimeofday";
#define ARCH_VDSO_SYMBOLS \
aarch_vdso_symbol1, \
aarch_vdso_symbol2,
const char *aarch_vdso_symbol1 = "__vdso_clock_gettime"; \
const char *aarch_vdso_symbol2 = "__vdso_gettimeofday";
#define ARCH_VDSO_SYMBOLS aarch_vdso_symbol1, aarch_vdso_symbol2,
#endif /* __CR_ASM_VDSO_H__ */

View File

@ -4,6 +4,7 @@
#include "asm/restorer.h"
#include "images/core.pb-c.h"
/* clang-format off */
#define JUMP_TO_RESTORER_BLOB(new_sp, restore_task_exec_start, task_args) \
asm volatile( \
"move $4, %0 \n" \
@ -16,13 +17,13 @@
:"r"(task_args),"r"(restore_task_exec_start), \
"g"(new_sp) \
: "$25", "$4","$5")
/* clang-format on */
static inline void core_get_tls(CoreEntry *pcore, tls_t *ptls)
{
*ptls = pcore->ti_mips->tls;
}
int restore_fpu(struct rt_sigframe *sigframe, CoreEntry *core);
#endif

View File

@ -7,14 +7,16 @@
#include <compel/plugins/std/syscall-codes.h>
#include <compel/asm/sigframe.h>
static inline void restore_tls(tls_t *ptls) {
asm volatile(
"move $4, %0 \n"
"li $2, "__stringify(__NR_set_thread_area)" \n"
static inline void restore_tls(tls_t *ptls)
{
/* clang-format off */
asm volatile("move $4, %0 \n"
"li $2, " __stringify(__NR_set_thread_area) " \n"
"syscall \n"
:
: "r"(*ptls)
: "$4","$2","memory");
: "$4", "$2", "memory");
/* clang-format on */
}
static inline int arch_compat_rt_sigaction(void *stack, int sig, void *act)
{
@ -25,6 +27,7 @@ static inline int set_compat_robust_list(uint32_t head_ptr, uint32_t len)
return -1;
}
/* clang-format off */
#define RUN_CLONE_RESTORE_FN(ret, clone_flags, new_sp, parent_tid, \
thread_args, clone_restore_fn) \
asm volatile( \
@ -67,17 +70,22 @@ static inline int set_compat_robust_list(uint32_t head_ptr, uint32_t len)
pr_err("This architecture does not support clone3() with set_tid, yet!\n"); \
ret = -1; \
} while (0)
/* clang-format on */
#define kdat_compatible_cr() 0
#define arch_map_vdso(map, compat) -1
static inline void *alloc_compat_syscall_stack(void) { return NULL; }
static inline void free_compat_syscall_stack(void *stack32) { }
static inline void *alloc_compat_syscall_stack(void)
{
return NULL;
}
static inline void free_compat_syscall_stack(void *stack32)
{
}
int restore_gpregs(struct rt_sigframe *f, UserMipsRegsEntry *r);
int restore_nonsigframe_gpregs(UserMipsRegsEntry *r);
#define ARCH_HAS_SHMAT_HOOK
unsigned long arch_shmat(int shmid, void *shmaddr,
int shmflg, unsigned long size);
unsigned long arch_shmat(int shmid, void *shmaddr, int shmflg, unsigned long size);
#endif

View File

@ -20,9 +20,14 @@
typedef UserMipsRegsEntry UserRegsEntry;
static inline u64 encode_pointer(void *p) { return (u64)p; }
static inline void *decode_pointer(u64 v) { return (void*)v; }
static inline u64 encode_pointer(void *p)
{
return (u64)p;
}
static inline void *decode_pointer(u64 v)
{
return (void *)v;
}
#define AT_VECTOR_SIZE 44
typedef uint64_t auxv_t;

View File

@ -15,13 +15,9 @@
#define VDSO_SYMBOL_MAX 3
#define VDSO_SYMBOL_GTOD 0
#define ARCH_VDSO_SYMBOLS_LIST \
const char* aarch_vdso_symbol1 = "__vdso_clock_gettime"; \
const char* aarch_vdso_symbol2 = "__vdso_gettimeofday"; \
const char* aarch_vdso_symbol3 = "__vdso_clock_getres";
#define ARCH_VDSO_SYMBOLS \
aarch_vdso_symbol1, \
aarch_vdso_symbol2, \
aarch_vdso_symbol3,
const char *aarch_vdso_symbol1 = "__vdso_clock_gettime"; \
const char *aarch_vdso_symbol2 = "__vdso_gettimeofday"; \
const char *aarch_vdso_symbol3 = "__vdso_clock_getres";
#define ARCH_VDSO_SYMBOLS aarch_vdso_symbol1, aarch_vdso_symbol2, aarch_vdso_symbol3,
#endif /* __CR_ASM_VDSO_H__ */

View File

@ -5,7 +5,6 @@ extern int save_task_regs(void *, user_regs_struct_t *, user_fpregs_struct_t *);
extern int arch_alloc_thread_info(CoreEntry *core);
extern void arch_free_thread_info(CoreEntry *core);
#define core_put_tls(core, tls)
#define get_task_futex_robust_list_compat(pid, info) -1

View File

@ -2,6 +2,9 @@
#define __ASM_PARASITE_H__
/* TLS is accessed through r13, which is already processed */
static inline void arch_get_tls(tls_t *ptls) { (void)ptls; }
static inline void arch_get_tls(tls_t *ptls)
{
(void)ptls;
}
#endif

View File

@ -9,6 +9,7 @@
* Set R2 to blob + 8000 which is the default value
* Jump to restore_task_exec_start + 8 since R2 is already set (local call)
*/
/* clang-format off */
#define JUMP_TO_RESTORER_BLOB(new_sp, restore_task_exec_start, \
task_args) \
asm volatile( \
@ -22,6 +23,7 @@
"r"((unsigned long)restore_task_exec_start), \
"r"(task_args) \
: "3", "12")
/* clang-format on */
/* There is nothing to do since TLS is accessed through r13 */
#define core_get_tls(pcore, ptls)

View File

@ -14,6 +14,7 @@
*
* See glibc sysdeps/powerpc/powerpc64/sysdep.h for FRAME_MIN_SIZE defines
*/
/* clang-format off */
#define RUN_CLONE_RESTORE_FN(ret, clone_flags, new_sp, parent_tid, \
thread_args, clone_restore_fn) \
asm volatile( \
@ -88,6 +89,7 @@
"r"(clone_restore_fn), /* %3 */ \
"r"(args) /* %4 */ \
: "memory","0","3","4","5","14","15")
/* clang-format on */
#define arch_map_vdso(map, compat) -1
@ -95,15 +97,23 @@ int restore_gpregs(struct rt_sigframe *f, UserPpc64RegsEntry *r);
int restore_nonsigframe_gpregs(UserPpc64RegsEntry *r);
/* Nothing to do, TLS is accessed through r13 */
static inline void restore_tls(tls_t *ptls) { (void)ptls; }
static inline void restore_tls(tls_t *ptls)
{
(void)ptls;
}
/*
* Defined in arch/ppc64/syscall-common-ppc64.S
*/
unsigned long sys_shmat(int shmid, const void *shmaddr, int shmflg);
static inline void *alloc_compat_syscall_stack(void) { return NULL; }
static inline void free_compat_syscall_stack(void *stack32) { }
static inline void *alloc_compat_syscall_stack(void)
{
return NULL;
}
static inline void free_compat_syscall_stack(void *stack32)
{
}
static inline int arch_compat_rt_sigaction(void *stack, int sig, void *act)
{
return -1;

View File

@ -19,8 +19,14 @@ typedef UserPpc64RegsEntry UserRegsEntry;
#define CORE_THREAD_ARCH_INFO(core) core->ti_ppc64
static inline void *decode_pointer(uint64_t v) { return (void*)v; }
static inline uint64_t encode_pointer(void *p) { return (uint64_t)p; }
static inline void *decode_pointer(uint64_t v)
{
return (void *)v;
}
static inline uint64_t encode_pointer(void *p)
{
return (uint64_t)p;
}
/*
* Copied from the following kernel header files :
@ -32,7 +38,7 @@ static inline uint64_t encode_pointer(void *p) { return (uint64_t)p; }
#if !defined AT_VECTOR_SIZE_ARCH
#define AT_VECTOR_SIZE_ARCH 6
#endif
#define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1))
#define AT_VECTOR_SIZE (2 * (AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1))
typedef uint64_t auxv_t;

View File

@ -15,27 +15,19 @@
#define VDSO_SYMBOL_MAX 10
#define VDSO_SYMBOL_GTOD 5
#define ARCH_VDSO_SYMBOLS_LIST \
const char* aarch_vdso_symbol1 = "__kernel_clock_getres"; \
const char* aarch_vdso_symbol2 = "__kernel_clock_gettime"; \
const char* aarch_vdso_symbol3 = "__kernel_get_syscall_map"; \
const char* aarch_vdso_symbol4 = "__kernel_get_tbfreq"; \
const char* aarch_vdso_symbol5 = "__kernel_getcpu"; \
const char* aarch_vdso_symbol6 = "__kernel_gettimeofday"; \
const char* aarch_vdso_symbol7 = "__kernel_sigtramp_rt64"; \
const char* aarch_vdso_symbol8 = "__kernel_sync_dicache"; \
const char* aarch_vdso_symbol9 = "__kernel_sync_dicache_p5"; \
const char* aarch_vdso_symbol10 = "__kernel_time";
const char *aarch_vdso_symbol1 = "__kernel_clock_getres"; \
const char *aarch_vdso_symbol2 = "__kernel_clock_gettime"; \
const char *aarch_vdso_symbol3 = "__kernel_get_syscall_map"; \
const char *aarch_vdso_symbol4 = "__kernel_get_tbfreq"; \
const char *aarch_vdso_symbol5 = "__kernel_getcpu"; \
const char *aarch_vdso_symbol6 = "__kernel_gettimeofday"; \
const char *aarch_vdso_symbol7 = "__kernel_sigtramp_rt64"; \
const char *aarch_vdso_symbol8 = "__kernel_sync_dicache"; \
const char *aarch_vdso_symbol9 = "__kernel_sync_dicache_p5"; \
const char *aarch_vdso_symbol10 = "__kernel_time";
#define ARCH_VDSO_SYMBOLS \
aarch_vdso_symbol1, \
aarch_vdso_symbol2, \
aarch_vdso_symbol3, \
aarch_vdso_symbol4, \
aarch_vdso_symbol5, \
aarch_vdso_symbol6, \
aarch_vdso_symbol7, \
aarch_vdso_symbol8, \
aarch_vdso_symbol9, \
aarch_vdso_symbol10
aarch_vdso_symbol1, aarch_vdso_symbol2, aarch_vdso_symbol3, aarch_vdso_symbol4, aarch_vdso_symbol5, \
aarch_vdso_symbol6, aarch_vdso_symbol7, aarch_vdso_symbol8, aarch_vdso_symbol9, aarch_vdso_symbol10
#endif /* __CR_ASM_VDSO_H__ */

View File

@ -5,7 +5,9 @@ int save_task_regs(void *arg, user_regs_struct_t *u, user_fpregs_struct_t *f);
int arch_alloc_thread_info(CoreEntry *core);
void arch_free_thread_info(CoreEntry *core);
static inline void core_put_tls(CoreEntry *core, tls_t tls) { }
static inline void core_put_tls(CoreEntry *core, tls_t tls)
{
}
#define get_task_futex_robust_list_compat(pid, info) -1

View File

@ -2,6 +2,9 @@
#define __ASM_PARASITE_H__
/* TLS is accessed through %a01, which is already processed */
static inline void arch_get_tls(tls_t *ptls) { (void)ptls; }
static inline void arch_get_tls(tls_t *ptls)
{
(void)ptls;
}
#endif

View File

@ -7,6 +7,7 @@
/*
* Load stack to %r15, return address in %r14 and argument 1 into %r2
*/
/* clang-format off */
#define JUMP_TO_RESTORER_BLOB(new_sp, restore_task_exec_start, \
task_args) \
asm volatile( \
@ -19,6 +20,7 @@
"d"((unsigned long)restore_task_exec_start), \
"d" (task_args) \
: "2", "14", "memory")
/* clang-format on */
/* There is nothing to do since TLS is accessed through %a01 */
#define core_get_tls(pcore, ptls)

View File

@ -11,6 +11,7 @@
/*
* Clone trampoline - see glibc sysdeps/unix/sysv/linux/s390/s390-64/clone.S
*/
/* clang-format off */
#define RUN_CLONE_RESTORE_FN(ret, clone_flags, new_sp, parent_tid, \
thread_args, clone_restore_fn) \
asm volatile( \
@ -75,6 +76,7 @@
"d"(clone_restore_fn), \
"d"(args) \
: "0", "1", "2", "3", "4", "5", "cc", "memory")
/* clang-format on */
#define arch_map_vdso(map, compat) -1
@ -82,13 +84,20 @@ int restore_gpregs(struct rt_sigframe *f, UserS390RegsEntry *r);
int restore_nonsigframe_gpregs(UserS390RegsEntry *r);
unsigned long sys_shmat(int shmid, const void *shmaddr, int shmflg);
unsigned long sys_mmap(void *addr, unsigned long len, unsigned long prot,
unsigned long flags, unsigned long fd,
unsigned long sys_mmap(void *addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long fd,
unsigned long offset);
static inline void restore_tls(tls_t *ptls) { (void)ptls; }
static inline void *alloc_compat_syscall_stack(void) { return NULL; }
static inline void free_compat_syscall_stack(void *stack32) { }
static inline void restore_tls(tls_t *ptls)
{
(void)ptls;
}
static inline void *alloc_compat_syscall_stack(void)
{
return NULL;
}
static inline void free_compat_syscall_stack(void *stack32)
{
}
static inline int arch_compat_rt_sigaction(void *stack, int sig, void *act)
{
return -1;

View File

@ -19,8 +19,14 @@ typedef UserS390RegsEntry UserRegsEntry;
#define CORE_THREAD_ARCH_INFO(core) core->ti_s390
static inline u64 encode_pointer(void *p) { return (u64) p; }
static inline void *decode_pointer(u64 v) { return (void *) v; }
static inline u64 encode_pointer(void *p)
{
return (u64)p;
}
static inline void *decode_pointer(u64 v)
{
return (void *)v;
}
/*
* See also:
@ -29,7 +35,7 @@ static inline void *decode_pointer(u64 v) { return (void *) v; }
*/
#define AT_VECTOR_SIZE_BASE 20
#define AT_VECTOR_SIZE_ARCH 1
#define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1))
#define AT_VECTOR_SIZE (2 * (AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1))
typedef uint64_t auxv_t;
typedef uint64_t tls_t;

View File

@ -16,14 +16,10 @@
* name string table 'vdso_symbols'
*/
#define ARCH_VDSO_SYMBOLS_LIST \
const char* aarch_vdso_symbol1 = "__kernel_gettimeofday"; \
const char* aarch_vdso_symbol2 = "__kernel_clock_gettime"; \
const char* aarch_vdso_symbol3 = "__kernel_clock_getres"; \
const char* aarch_vdso_symbol4 = "__kernel_getcpu";
#define ARCH_VDSO_SYMBOLS \
aarch_vdso_symbol1, \
aarch_vdso_symbol2, \
aarch_vdso_symbol3, \
aarch_vdso_symbol4
const char *aarch_vdso_symbol1 = "__kernel_gettimeofday"; \
const char *aarch_vdso_symbol2 = "__kernel_clock_gettime"; \
const char *aarch_vdso_symbol3 = "__kernel_clock_getres"; \
const char *aarch_vdso_symbol4 = "__kernel_getcpu";
#define ARCH_VDSO_SYMBOLS aarch_vdso_symbol1, aarch_vdso_symbol2, aarch_vdso_symbol3, aarch_vdso_symbol4
#endif /* __CR_ASM_VDSO_H__ */

View File

@ -2,19 +2,19 @@
#define __CR_ASM_COMPAT_H__
#ifdef CR_NOGLIBC
# include <compel/plugins/std/syscall.h>
# include <compel/plugins/std/syscall-codes.h>
#include <compel/plugins/std/syscall.h>
#include <compel/plugins/std/syscall-codes.h>
#else
# define sys_mmap mmap
# define sys_munmap munmap
#define sys_mmap mmap
#define sys_munmap munmap
#endif
#include <sys/mman.h>
static inline void *alloc_compat_syscall_stack(void)
{
void *mem = (void*)sys_mmap(NULL, PAGE_SIZE, PROT_READ | PROT_WRITE,
MAP_32BIT | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
void *mem = (void *)sys_mmap(NULL, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_32BIT | MAP_ANONYMOUS | MAP_PRIVATE,
-1, 0);
if ((uintptr_t)mem % PAGE_SIZE) {
int err = (~(uint32_t)(uintptr_t)mem) + 1;
@ -30,8 +30,7 @@ static inline void free_compat_syscall_stack(void *mem)
long int ret = sys_munmap(mem, PAGE_SIZE);
if (ret)
pr_err("munmap() of compat addr %p failed with %ld\n",
mem, ret);
pr_err("munmap() of compat addr %p failed with %ld\n", mem, ret);
}
struct syscall_args32 {
@ -65,23 +64,22 @@ static inline uint32_t do_full_int80(struct syscall_args32 *args)
*/
uint32_t ret;
asm volatile ("sub $128, %%rsp\n\t"
asm volatile("sub $128, %%rsp\n\t"
"pushq %%rbp\n\t"
"mov %7, %%ebp\n\t"
"int $0x80\n\t"
"popq %%rbp\n\t"
"add $128, %%rsp\n\t"
: "=a" (ret)
: "a" (args->nr),
"b" (args->arg0), "c" (args->arg1), "d" (args->arg2),
"S" (args->arg3), "D" (args->arg4), "g" (args->arg5)
: "=a"(ret)
: "a"(args->nr), "b"(args->arg0), "c"(args->arg1), "d"(args->arg2), "S"(args->arg3),
"D"(args->arg4), "g"(args->arg5)
: "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15");
return ret;
}
#ifndef CR_NOGLIBC
# undef sys_mmap
# undef sys_munmap
#undef sys_mmap
#undef sys_munmap
#endif
#endif

View File

@ -11,8 +11,7 @@ static inline void core_put_tls(CoreEntry *core, tls_t tls)
ThreadInfoX86 *ti = core->thread_info;
int i;
for (i = 0; i < GDT_ENTRY_TLS_NUM; i++)
{
for (i = 0; i < GDT_ENTRY_TLS_NUM; i++) {
user_desc_t *from = &tls.desc[i];
UserDescT *to = ti->tls[i];

View File

@ -5,6 +5,9 @@
* TLS is accessed through PTRACE_GET_THREAD_AREA,
* see compel_arch_fetch_thread_area().
*/
static inline void arch_get_tls(tls_t *ptls) { (void)ptls; }
static inline void arch_get_tls(tls_t *ptls)
{
(void)ptls;
}
#endif

View File

@ -5,6 +5,7 @@
#include "images/core.pb-c.h"
/* clang-format off */
#define JUMP_TO_RESTORER_BLOB(new_sp, restore_task_exec_start, \
task_args) \
asm volatile( \
@ -18,6 +19,7 @@
"g"(restore_task_exec_start), \
"g"(task_args) \
: "rdi", "rsi", "rbx", "rax", "memory")
/* clang-format on */
static inline void core_get_tls(CoreEntry *pcore, tls_t *ptls)
{
@ -52,7 +54,6 @@ static inline void core_get_tls(CoreEntry *pcore, tls_t *ptls)
}
}
int restore_fpu(struct rt_sigframe *sigframe, CoreEntry *core);
#endif

View File

@ -11,11 +11,12 @@
#ifdef CONFIG_COMPAT
extern void restore_tls(tls_t *ptls);
extern int arch_compat_rt_sigaction(void *stack32, int sig,
rt_sigaction_t_compat *act);
extern int arch_compat_rt_sigaction(void *stack32, int sig, rt_sigaction_t_compat *act);
extern int set_compat_robust_list(uint32_t head_ptr, uint32_t len);
#else /* CONFIG_COMPAT */
static inline void restore_tls(tls_t *ptls) { }
static inline void restore_tls(tls_t *ptls)
{
}
static inline int arch_compat_rt_sigaction(void *stack, int sig, void *act)
{
return -1;
@ -41,6 +42,7 @@ static inline int set_compat_robust_list(uint32_t head_ptr, uint32_t len)
* unsigned long tls);
*/
/* clang-format off */
#define RUN_CLONE_RESTORE_FN(ret, clone_flags, new_sp, parent_tid, \
thread_args, clone_restore_fn) \
asm volatile( \
@ -164,17 +166,16 @@ static inline int set_compat_robust_list(uint32_t head_ptr, uint32_t len)
: \
: "r"(ret) \
: "memory")
/* clang-format on */
static inline void
__setup_sas_compat(struct ucontext_ia32* uc, ThreadSasEntry *sas)
static inline void __setup_sas_compat(struct ucontext_ia32 *uc, ThreadSasEntry *sas)
{
uc->uc_stack.ss_sp = (compat_uptr_t)(sas)->ss_sp;
uc->uc_stack.ss_flags = (int)(sas)->ss_flags;
uc->uc_stack.ss_size = (compat_size_t)(sas)->ss_size;
}
static inline void
__setup_sas(struct rt_sigframe* sigframe, ThreadSasEntry *sas)
static inline void __setup_sas(struct rt_sigframe *sigframe, ThreadSasEntry *sas)
{
if (sigframe->is_native) {
struct rt_ucontext *uc = &sigframe->native.uc;
@ -187,7 +188,7 @@ __setup_sas(struct rt_sigframe* sigframe, ThreadSasEntry *sas)
}
}
static inline void _setup_sas(struct rt_sigframe* sigframe, ThreadSasEntry *sas)
static inline void _setup_sas(struct rt_sigframe *sigframe, ThreadSasEntry *sas)
{
if (sas)
__setup_sas(sigframe, sas);

View File

@ -30,8 +30,14 @@ static inline int core_is_compat(CoreEntry *c)
typedef UserX86RegsEntry UserRegsEntry;
static inline u64 encode_pointer(void *p) { return (u64)(long)p; }
static inline void *decode_pointer(u64 v) { return (void*)(long)v; }
static inline u64 encode_pointer(void *p)
{
return (u64)(long)p;
}
static inline void *decode_pointer(u64 v)
{
return (void *)(long)v;
}
#define AT_VECTOR_SIZE 44
typedef uint64_t auxv_t;

View File

@ -37,40 +37,33 @@
*/
#define ARCH_VDSO_SYMBOLS_LIST \
const char* aarch_vdso_symbol1 = "__vdso_clock_gettime"; \
const char* aarch_vdso_symbol2 = "__vdso_getcpu"; \
const char* aarch_vdso_symbol3 = "__vdso_gettimeofday"; \
const char* aarch_vdso_symbol4 = "__vdso_time"; \
const char* aarch_vdso_symbol5 = "__kernel_sigreturn"; \
const char* aarch_vdso_symbol6 = "__kernel_rt_sigreturn";
const char *aarch_vdso_symbol1 = "__vdso_clock_gettime"; \
const char *aarch_vdso_symbol2 = "__vdso_getcpu"; \
const char *aarch_vdso_symbol3 = "__vdso_gettimeofday"; \
const char *aarch_vdso_symbol4 = "__vdso_time"; \
const char *aarch_vdso_symbol5 = "__kernel_sigreturn"; \
const char *aarch_vdso_symbol6 = "__kernel_rt_sigreturn";
#define ARCH_VDSO_SYMBOLS \
aarch_vdso_symbol1, \
aarch_vdso_symbol2, \
aarch_vdso_symbol3, \
aarch_vdso_symbol4, \
aarch_vdso_symbol5, \
aarch_vdso_symbol1, aarch_vdso_symbol2, aarch_vdso_symbol3, aarch_vdso_symbol4, aarch_vdso_symbol5, \
aarch_vdso_symbol6
/* "__kernel_vsyscall", */
#ifndef ARCH_MAP_VDSO_32
# define ARCH_MAP_VDSO_32 0x2002
#define ARCH_MAP_VDSO_32 0x2002
#endif
#ifndef ARCH_MAP_VDSO_64
# define ARCH_MAP_VDSO_64 0x2003
#define ARCH_MAP_VDSO_64 0x2003
#endif
#if defined(CONFIG_COMPAT) && !defined(__ASSEMBLY__)
struct vdso_symtable;
extern int vdso_fill_symtable(uintptr_t mem, size_t size,
struct vdso_symtable *t);
extern int vdso_fill_symtable_compat(uintptr_t mem, size_t size,
struct vdso_symtable *t);
extern int vdso_fill_symtable(uintptr_t mem, size_t size, struct vdso_symtable *t);
extern int vdso_fill_symtable_compat(uintptr_t mem, size_t size, struct vdso_symtable *t);
static inline int __vdso_fill_symtable(uintptr_t mem, size_t size,
struct vdso_symtable *t, bool compat_vdso)
static inline int __vdso_fill_symtable(uintptr_t mem, size_t size, struct vdso_symtable *t, bool compat_vdso)
{
if (compat_vdso)
return vdso_fill_symtable_compat(mem, size, t);

View File

@ -12,8 +12,7 @@ bool is_autofs_pipe(unsigned long inode);
struct mount_info;
int autofs_parse(struct mount_info *pm);
int autofs_dump(struct mount_info *pm);
int autofs_mount(struct mount_info *mi, const char *source, const
char *filesystemtype, unsigned long mountflags);
int autofs_mount(struct mount_info *mi, const char *source, const char *filesystemtype, unsigned long mountflags);
#include <linux/limits.h>
#include <linux/auto_fs.h>
@ -71,7 +70,6 @@ struct args_expire {
__u32 how;
};
struct args_askumount {
__u32 may_umount;
};
@ -131,7 +129,6 @@ static inline void init_autofs_dev_ioctl(struct autofs_dev_ioctl *in)
return;
}
/*
* If you change this make sure you make the corresponding change
* to autofs-dev-ioctl.c:lookup_ioctl()
@ -174,61 +171,32 @@ enum {
#define AUTOFS_IOCTL 0x93
#define AUTOFS_DEV_IOCTL_VERSION \
_IOWR(AUTOFS_IOCTL, \
AUTOFS_DEV_IOCTL_VERSION_CMD, struct autofs_dev_ioctl)
#define AUTOFS_DEV_IOCTL_VERSION _IOWR(AUTOFS_IOCTL, AUTOFS_DEV_IOCTL_VERSION_CMD, struct autofs_dev_ioctl)
#define AUTOFS_DEV_IOCTL_PROTOVER \
_IOWR(AUTOFS_IOCTL, \
AUTOFS_DEV_IOCTL_PROTOVER_CMD, struct autofs_dev_ioctl)
#define AUTOFS_DEV_IOCTL_PROTOVER _IOWR(AUTOFS_IOCTL, AUTOFS_DEV_IOCTL_PROTOVER_CMD, struct autofs_dev_ioctl)
#define AUTOFS_DEV_IOCTL_PROTOSUBVER \
_IOWR(AUTOFS_IOCTL, \
AUTOFS_DEV_IOCTL_PROTOSUBVER_CMD, struct autofs_dev_ioctl)
#define AUTOFS_DEV_IOCTL_PROTOSUBVER _IOWR(AUTOFS_IOCTL, AUTOFS_DEV_IOCTL_PROTOSUBVER_CMD, struct autofs_dev_ioctl)
#define AUTOFS_DEV_IOCTL_OPENMOUNT \
_IOWR(AUTOFS_IOCTL, \
AUTOFS_DEV_IOCTL_OPENMOUNT_CMD, struct autofs_dev_ioctl)
#define AUTOFS_DEV_IOCTL_OPENMOUNT _IOWR(AUTOFS_IOCTL, AUTOFS_DEV_IOCTL_OPENMOUNT_CMD, struct autofs_dev_ioctl)
#define AUTOFS_DEV_IOCTL_CLOSEMOUNT _IOWR(AUTOFS_IOCTL, AUTOFS_DEV_IOCTL_CLOSEMOUNT_CMD, struct autofs_dev_ioctl)
#define AUTOFS_DEV_IOCTL_CLOSEMOUNT \
_IOWR(AUTOFS_IOCTL, \
AUTOFS_DEV_IOCTL_CLOSEMOUNT_CMD, struct autofs_dev_ioctl)
#define AUTOFS_DEV_IOCTL_READY _IOWR(AUTOFS_IOCTL, AUTOFS_DEV_IOCTL_READY_CMD, struct autofs_dev_ioctl)
#define AUTOFS_DEV_IOCTL_READY \
_IOWR(AUTOFS_IOCTL, \
AUTOFS_DEV_IOCTL_READY_CMD, struct autofs_dev_ioctl)
#define AUTOFS_DEV_IOCTL_FAIL _IOWR(AUTOFS_IOCTL, AUTOFS_DEV_IOCTL_FAIL_CMD, struct autofs_dev_ioctl)
#define AUTOFS_DEV_IOCTL_FAIL \
_IOWR(AUTOFS_IOCTL, \
AUTOFS_DEV_IOCTL_FAIL_CMD, struct autofs_dev_ioctl)
#define AUTOFS_DEV_IOCTL_SETPIPEFD _IOWR(AUTOFS_IOCTL, AUTOFS_DEV_IOCTL_SETPIPEFD_CMD, struct autofs_dev_ioctl)
#define AUTOFS_DEV_IOCTL_SETPIPEFD \
_IOWR(AUTOFS_IOCTL, \
AUTOFS_DEV_IOCTL_SETPIPEFD_CMD, struct autofs_dev_ioctl)
#define AUTOFS_DEV_IOCTL_CATATONIC _IOWR(AUTOFS_IOCTL, AUTOFS_DEV_IOCTL_CATATONIC_CMD, struct autofs_dev_ioctl)
#define AUTOFS_DEV_IOCTL_CATATONIC \
_IOWR(AUTOFS_IOCTL, \
AUTOFS_DEV_IOCTL_CATATONIC_CMD, struct autofs_dev_ioctl)
#define AUTOFS_DEV_IOCTL_TIMEOUT _IOWR(AUTOFS_IOCTL, AUTOFS_DEV_IOCTL_TIMEOUT_CMD, struct autofs_dev_ioctl)
#define AUTOFS_DEV_IOCTL_TIMEOUT \
_IOWR(AUTOFS_IOCTL, \
AUTOFS_DEV_IOCTL_TIMEOUT_CMD, struct autofs_dev_ioctl)
#define AUTOFS_DEV_IOCTL_REQUESTER _IOWR(AUTOFS_IOCTL, AUTOFS_DEV_IOCTL_REQUESTER_CMD, struct autofs_dev_ioctl)
#define AUTOFS_DEV_IOCTL_REQUESTER \
_IOWR(AUTOFS_IOCTL, \
AUTOFS_DEV_IOCTL_REQUESTER_CMD, struct autofs_dev_ioctl)
#define AUTOFS_DEV_IOCTL_EXPIRE _IOWR(AUTOFS_IOCTL, AUTOFS_DEV_IOCTL_EXPIRE_CMD, struct autofs_dev_ioctl)
#define AUTOFS_DEV_IOCTL_EXPIRE \
_IOWR(AUTOFS_IOCTL, \
AUTOFS_DEV_IOCTL_EXPIRE_CMD, struct autofs_dev_ioctl)
#define AUTOFS_DEV_IOCTL_ASKUMOUNT _IOWR(AUTOFS_IOCTL, AUTOFS_DEV_IOCTL_ASKUMOUNT_CMD, struct autofs_dev_ioctl)
#define AUTOFS_DEV_IOCTL_ASKUMOUNT \
_IOWR(AUTOFS_IOCTL, \
AUTOFS_DEV_IOCTL_ASKUMOUNT_CMD, struct autofs_dev_ioctl)
#define AUTOFS_DEV_IOCTL_ISMOUNTPOINT \
_IOWR(AUTOFS_IOCTL, \
AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD, struct autofs_dev_ioctl)
#define AUTOFS_DEV_IOCTL_ISMOUNTPOINT _IOWR(AUTOFS_IOCTL, AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD, struct autofs_dev_ioctl)
#endif

View File

@ -22,8 +22,8 @@ struct bpfmap_data_rst {
extern int is_bpfmap_link(char *link);
extern int dump_one_bpfmap_data(BpfmapFileEntry *bpf, int lfd, const struct fd_parms *p);
extern int do_collect_bpfmap_data(struct bpfmap_data_rst *, ProtobufCMessage *,
struct cr_img *, struct bpfmap_data_rst **);
extern int do_collect_bpfmap_data(struct bpfmap_data_rst *, ProtobufCMessage *, struct cr_img *,
struct bpfmap_data_rst **);
extern int restore_bpfmap_data(int, uint32_t, struct bpfmap_data_rst **);
extern const struct fdtype_ops bpfmap_dump_ops;

View File

@ -2,7 +2,6 @@
#define __CR_CLONE_NOASAN_H__
int clone_noasan(int (*fn)(void *), int flags, void *arg);
int clone3_with_pid_noasan(int (*fn)(void *), void *arg, int flags,
int exit_signal, pid_t pid);
int clone3_with_pid_noasan(int (*fn)(void *), void *arg, int flags, int exit_signal, pid_t pid);
#endif /* __CR_CLONE_NOASAN_H__ */

View File

@ -64,8 +64,7 @@ struct cg_root_opt {
#define DEFAULT_TIMEOUT 10
enum FILE_VALIDATION_OPTIONS
{
enum FILE_VALIDATION_OPTIONS {
/*
* This constant indicates that the file validation should be tried with the
* file size method by default.

View File

@ -23,17 +23,15 @@
#include <limits.h>
#include <stdbool.h>
#define CRIU_PLUGIN_GEN_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))
#define CRIU_PLUGIN_GEN_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c))
#define CRIU_PLUGIN_VERSION_MAJOR 0
#define CRIU_PLUGIN_VERSION_MINOR 2
#define CRIU_PLUGIN_VERSION_SUBLEVEL 0
#define CRIU_PLUGIN_VERSION_OLD CRIU_PLUGIN_GEN_VERSION(0,1,0)
#define CRIU_PLUGIN_VERSION_OLD CRIU_PLUGIN_GEN_VERSION(0, 1, 0)
#define CRIU_PLUGIN_VERSION \
CRIU_PLUGIN_GEN_VERSION(CRIU_PLUGIN_VERSION_MAJOR, \
CRIU_PLUGIN_VERSION_MINOR, \
CRIU_PLUGIN_VERSION_SUBLEVEL)
CRIU_PLUGIN_GEN_VERSION(CRIU_PLUGIN_VERSION_MAJOR, CRIU_PLUGIN_VERSION_MINOR, CRIU_PLUGIN_VERSION_SUBLEVEL)
/*
* Plugin hook points and their arguments in hooks.
@ -53,8 +51,7 @@ enum {
CR_PLUGIN_HOOK__MAX
};
#define DECLARE_PLUGIN_HOOK_ARGS(__hook, ...) \
typedef int (__hook ##_t)(__VA_ARGS__)
#define DECLARE_PLUGIN_HOOK_ARGS(__hook, ...) typedef int(__hook##_t)(__VA_ARGS__)
DECLARE_PLUGIN_HOOK_ARGS(CR_PLUGIN_HOOK__DUMP_UNIX_SK, int fd, int id);
DECLARE_PLUGIN_HOOK_ARGS(CR_PLUGIN_HOOK__RESTORE_UNIX_SK, int id);
@ -95,8 +92,13 @@ extern cr_plugin_desc_t CR_PLUGIN_DESC;
.max_hooks = CR_PLUGIN_HOOK__MAX, \
};
static inline int cr_plugin_dummy_init(int stage) { return 0; }
static inline void cr_plugin_dummy_exit(int stage, int ret) { }
static inline int cr_plugin_dummy_init(int stage)
{
return 0;
}
static inline void cr_plugin_dummy_exit(int stage, int ret)
{
}
#define CR_PLUGIN_REGISTER_DUMMY(___name) \
cr_plugin_desc_t CR_PLUGIN_DESC = { \
@ -108,10 +110,10 @@ static inline void cr_plugin_dummy_exit(int stage, int ret) { }
};
#define CR_PLUGIN_REGISTER_HOOK(__hook, __func) \
static void __attribute__((constructor)) cr_plugin_register_hook_##__func (void) \
{ \
static void __attribute__((constructor)) cr_plugin_register_hook_##__func(void) \
{ \
CR_PLUGIN_DESC.hooks[__hook] = (void *)__func; \
}
}
/* Public API */
extern int criu_get_image_dir(void);
@ -119,14 +121,14 @@ extern int criu_get_image_dir(void);
/*
* Deprecated, will be removed in next version.
*/
typedef int (cr_plugin_init_t)(void);
typedef void (cr_plugin_fini_t)(void);
typedef int (cr_plugin_dump_unix_sk_t)(int fd, int id);
typedef int (cr_plugin_restore_unix_sk_t)(int id);
typedef int (cr_plugin_dump_file_t)(int fd, int id);
typedef int (cr_plugin_restore_file_t)(int id);
typedef int (cr_plugin_dump_ext_mount_t)(char *mountpoint, int id);
typedef int (cr_plugin_restore_ext_mount_t)(int id, char *mountpoint, char *old_root, int *is_file);
typedef int (cr_plugin_dump_ext_link_t)(int index, int type, char *kind);
typedef int(cr_plugin_init_t)(void);
typedef void(cr_plugin_fini_t)(void);
typedef int(cr_plugin_dump_unix_sk_t)(int fd, int id);
typedef int(cr_plugin_restore_unix_sk_t)(int id);
typedef int(cr_plugin_dump_file_t)(int fd, int id);
typedef int(cr_plugin_restore_file_t)(int id);
typedef int(cr_plugin_dump_ext_mount_t)(char *mountpoint, int id);
typedef int(cr_plugin_restore_ext_mount_t)(int id, char *mountpoint, char *old_root, int *is_file);
typedef int(cr_plugin_dump_ext_link_t)(int index, int type, char *kind);
#endif /* __CRIU_PLUGIN_H__ */

View File

@ -34,12 +34,14 @@ extern void pr_check_features(const char *offset, const char *sep, int width);
#define PPREP_HEAD_INACTIVE ((struct pprep_head *)-1)
#define add_post_prepare_cb_once(phead) do { \
if ((phead)->next == PPREP_HEAD_INACTIVE)\
#define add_post_prepare_cb_once(phead) \
do { \
if ((phead)->next == PPREP_HEAD_INACTIVE) \
add_post_prepare_cb(phead); \
} while (0)
#define MAKE_PPREP_HEAD(name) struct pprep_head name = { \
#define MAKE_PPREP_HEAD(name) \
struct pprep_head name = { \
.next = PPREP_HEAD_INACTIVE, \
.actor = name##_cb, \
}

View File

@ -23,27 +23,27 @@ struct f_owner_ex {
* These things are required to compile on CentOS-6
*/
#ifndef F_LINUX_SPECIFIC_BASE
# define F_LINUX_SPECIFIC_BASE 1024
#define F_LINUX_SPECIFIC_BASE 1024
#endif
#ifndef F_SETPIPE_SZ
# define F_SETPIPE_SZ (F_LINUX_SPECIFIC_BASE + 7)
#define F_SETPIPE_SZ (F_LINUX_SPECIFIC_BASE + 7)
#endif
#ifndef F_GETPIPE_SZ
# define F_GETPIPE_SZ (F_LINUX_SPECIFIC_BASE + 8)
#define F_GETPIPE_SZ (F_LINUX_SPECIFIC_BASE + 8)
#endif
#ifndef F_ADD_SEALS
# define F_ADD_SEALS (F_LINUX_SPECIFIC_BASE + 9)
#define F_ADD_SEALS (F_LINUX_SPECIFIC_BASE + 9)
#endif
#ifndef F_GET_SEALS
# define F_GET_SEALS (F_LINUX_SPECIFIC_BASE + 10)
#define F_GET_SEALS (F_LINUX_SPECIFIC_BASE + 10)
#endif
#ifndef O_PATH
# define O_PATH 010000000
#define O_PATH 010000000
#endif
#ifndef __O_TMPFILE

View File

@ -29,7 +29,8 @@
/* operations for bsd flock(), also used by the kernel implementation */
#define LOCK_SH 1 /* shared lock */
#define LOCK_EX 2 /* exclusive lock */
#define LOCK_NB 4 /* or'd with one of the above to prevent
#define LOCK_NB \
4 /* or'd with one of the above to prevent
blocking */
#define LOCK_UN 8 /* remove lock */

View File

@ -29,8 +29,7 @@ struct reg_file_info {
extern int open_reg_by_id(u32 id);
extern int open_reg_fd(struct file_desc *);
extern int open_path(struct file_desc *, int (*open_cb)(int ns_root_fd,
struct reg_file_info *, void *), void *arg);
extern int open_path(struct file_desc *, int (*open_cb)(int ns_root_fd, struct reg_file_info *, void *), void *arg);
extern const struct fdtype_ops regfile_dump_ops;
extern int do_open_reg_noseek_flags(int ns_root_fd, struct reg_file_info *rfi, void *arg);

View File

@ -55,12 +55,10 @@ struct fd_parms {
};
#define FD_PARMS_INIT \
(struct fd_parms) { \
.fd = FD_DESC_INVALID, \
.fown = FOWN_ENTRY__INIT, \
.link = NULL, \
.mnt_id = -1, \
}
(struct fd_parms) \
{ \
.fd = FD_DESC_INVALID, .fown = FOWN_ENTRY__INIT, .link = NULL, .mnt_id = -1, \
}
extern int fill_fdlink(int lfd, const struct fd_parms *p, struct fd_link *link);
extern uint32_t make_gen_id(uint32_t st_dev, uint32_t st_ino, uint64_t pos);
@ -89,9 +87,9 @@ struct fdinfo_list_entry {
struct pstree_item *task;
FdinfoEntry *fe;
int pid;
u8 received:1;
u8 stage:3;
u8 fake:1;
u8 received : 1;
u8 stage : 3;
u8 fake : 1;
};
extern int inh_fd_max;
@ -99,8 +97,7 @@ extern int inh_fd_max;
/* reports whether fd_a takes prio over fd_b */
static inline int fdinfo_rst_prio(struct fdinfo_list_entry *fd_a, struct fdinfo_list_entry *fd_b)
{
return pid_rst_prio(fd_a->pid, fd_b->pid) ||
((fd_a->pid == fd_b->pid) && (fd_a->fe->fd < fd_b->fe->fd));
return pid_rst_prio(fd_a->pid, fd_b->pid) || ((fd_a->pid == fd_b->pid) && (fd_a->fe->fd < fd_b->fe->fd));
}
struct file_desc_ops {
@ -112,12 +109,11 @@ struct file_desc_ops {
* so it shouldn't be saved for any post-actions.
*/
int (*open)(struct file_desc *d, int *new_fd);
char * (*name)(struct file_desc *, char *b, size_t s);
char *(*name)(struct file_desc *, char *b, size_t s);
};
int collect_fd(int pid, FdinfoEntry *e, struct rst_info *rst_info, bool ghost);
struct fdinfo_list_entry *collect_fd_to(int pid, FdinfoEntry *e,
struct rst_info *rst_info, struct file_desc *fdesc,
struct fdinfo_list_entry *collect_fd_to(int pid, FdinfoEntry *e, struct rst_info *rst_info, struct file_desc *fdesc,
bool fake, bool force_master);
u32 find_unused_file_desc_id(void);
@ -129,7 +125,7 @@ struct file_desc {
struct hlist_node hash; /* Descriptor hashing and lookup */
struct list_head fd_info_head; /* Chain of fdinfo_list_entry-s with same ID and type but different pids */
struct file_desc_ops *ops; /* Associated operations */
struct list_head fake_master_list;/* To chain in the list of file_desc, which don't
struct list_head fake_master_list; /* To chain in the list of file_desc, which don't
have a fle in a task, that having permissions */
};
@ -142,12 +138,9 @@ struct fdtype_ops {
struct cr_img;
extern int dump_my_file(int lfd, u32 *, int *type);
extern int do_dump_gen_file(struct fd_parms *p, int lfd,
const struct fdtype_ops *ops,
FdinfoEntry *e);
extern int do_dump_gen_file(struct fd_parms *p, int lfd, const struct fdtype_ops *ops, FdinfoEntry *e);
struct parasite_drain_fd;
int dump_task_files_seized(struct parasite_ctl *ctl, struct pstree_item *item,
struct parasite_drain_fd *dfds);
int dump_task_files_seized(struct parasite_ctl *ctl, struct pstree_item *item, struct parasite_drain_fd *dfds);
int predump_task_files(int pid);
extern void file_desc_init(struct file_desc *d, u32 id, struct file_desc_ops *ops);
@ -184,8 +177,7 @@ extern int close_old_fds(void);
extern int shared_fdt_prepare(struct pstree_item *item);
extern struct collect_image_info ext_file_cinfo;
extern int dump_unsupp_fd(struct fd_parms *p, int lfd,
char *more, char *info, FdinfoEntry *);
extern int dump_unsupp_fd(struct fd_parms *p, int lfd, char *more, char *info, FdinfoEntry *);
extern int inherit_fd_parse(char *optarg);
extern int inherit_fd_add(int fd, char *key);
@ -197,8 +189,7 @@ extern int inherit_fd_lookup_id(char *id);
extern bool inherited_fd(struct file_desc *, int *fdp);
extern FdinfoEntry *dup_fdinfo(FdinfoEntry *old, int fd, unsigned flags);
int dup_fle(struct pstree_item *task, struct fdinfo_list_entry *ple,
int fd, unsigned flags);
int dup_fle(struct pstree_item *task, struct fdinfo_list_entry *ple, int fd, unsigned flags);
extern int open_transport_socket(void);
extern int set_fds_event(pid_t virt);

View File

@ -5,8 +5,7 @@ extern struct fstype *decode_fstype(u32 fst);
extern bool add_fsname_auto(const char *names);
struct mount_info;
typedef int (*mount_fn_t)(struct mount_info *mi, const char *src, const
char *fstype, unsigned long mountflags);
typedef int (*mount_fn_t)(struct mount_info *mi, const char *src, const char *fstype, unsigned long mountflags);
struct fstype {
char *name;

View File

@ -10,23 +10,23 @@
*/
#ifndef NFS_SUPER_MAGIC
# define NFS_SUPER_MAGIC 0x6969
#define NFS_SUPER_MAGIC 0x6969
#endif
#ifndef PIPEFS_MAGIC
# define PIPEFS_MAGIC 0x50495045
#define PIPEFS_MAGIC 0x50495045
#endif
#ifndef ANON_INODE_FS_MAGIC
# define ANON_INODE_FS_MAGIC 0x09041934
#define ANON_INODE_FS_MAGIC 0x09041934
#endif
#ifndef TMPFS_MAGIC
# define TMPFS_MAGIC 0x01021994
#define TMPFS_MAGIC 0x01021994
#endif
#ifndef SOCKFS_MAGIC
# define SOCKFS_MAGIC 0x534f434b
#define SOCKFS_MAGIC 0x534f434b
#endif
#ifndef DEVPTS_SUPER_MAGIC

View File

@ -27,10 +27,8 @@ extern struct cr_imgset *glob_imgset;
extern struct cr_fd_desc_tmpl imgset_template[CR_FD_MAX];
extern struct cr_imgset *cr_task_imgset_open(int pid, int mode);
extern struct cr_imgset *cr_imgset_open_range(int pid, int from, int to,
unsigned long flags);
#define cr_imgset_open(pid, type, flags) cr_imgset_open_range(pid, \
_CR_FD_##type##_FROM, _CR_FD_##type##_TO, flags)
extern struct cr_imgset *cr_imgset_open_range(int pid, int from, int to, unsigned long flags);
#define cr_imgset_open(pid, type, flags) cr_imgset_open_range(pid, _CR_FD_##type##_FROM, _CR_FD_##type##_TO, flags)
extern struct cr_imgset *cr_glob_imgset_open(int mode);
extern void close_cr_imgset(struct cr_imgset **cr_imgset);

View File

@ -114,7 +114,6 @@ enum {
#define INET_DIAG_MAX INET_DIAG_SHUTDOWN
/* INET_DIAG_MEM */
struct inet_diag_meminfo {

View File

@ -4,12 +4,10 @@
#include "images/fh.pb-c.h"
char *irmap_lookup(unsigned int s_dev, unsigned long i_ino);
int irmap_queue_cache(unsigned int dev, unsigned long ino,
FhEntry *fh);
int irmap_queue_cache(unsigned int dev, unsigned long ino, FhEntry *fh);
int irmap_predump_prep(void);
int irmap_predump_run(void);
int check_open_handle(unsigned int s_dev, unsigned long i_ino,
FhEntry *f_handle);
int check_open_handle(unsigned int s_dev, unsigned long i_ino, FhEntry *f_handle);
int irmap_load_cache(void);
int irmap_scan_path_add(char *path);
#endif

View File

@ -10,7 +10,6 @@ struct kid_tree {
struct rb_root root;
unsigned int kcmp_type;
unsigned long subid;
};
#define DECLARE_KCMP_TREE(name, type) \
@ -26,11 +25,8 @@ struct kid_elem {
unsigned int idx;
};
extern uint32_t kid_generate_gen(struct kid_tree *tree,
struct kid_elem *elem, int *new_id);
extern uint32_t kid_generate_gen(struct kid_tree *tree, struct kid_elem *elem, int *new_id);
extern struct kid_elem *kid_lookup_epoll_tfd(struct kid_tree *tree,
struct kid_elem *elem,
kcmp_epoll_slot_t *slot);
extern struct kid_elem *kid_lookup_epoll_tfd(struct kid_tree *tree, struct kid_elem *elem, kcmp_epoll_slot_t *slot);
#endif /* __CR_KCMP_IDS_H__ */

View File

@ -8,17 +8,14 @@ extern int do_rtnl_req(int nl, void *req, int size,
int (*receive_callback)(struct nlmsghdr *h, struct ns_id *ns, void *),
int (*error_callback)(int err, struct ns_id *ns, void *), struct ns_id *ns, void *);
extern int addattr_l(struct nlmsghdr *n, int maxlen, int type,
const void *data, int alen);
extern int addattr_l(struct nlmsghdr *n, int maxlen, int type, const void *data, int alen);
extern int32_t nla_get_s32(const struct nlattr *nla);
#define NLMSG_TAIL(nmsg) \
((struct rtattr *) (((void *) (nmsg)) + NLMSG_ALIGN((nmsg)->nlmsg_len)))
#define NLMSG_TAIL(nmsg) ((struct rtattr *)(((void *)(nmsg)) + NLMSG_ALIGN((nmsg)->nlmsg_len)))
#ifndef NETNS_RTA
#define NETNS_RTA(r) \
((struct rtattr*)(((char*)(r)) + NLMSG_ALIGN(sizeof(struct rtgenmsg))))
#define NETNS_RTA(r) ((struct rtattr *)(((char *)(r)) + NLMSG_ALIGN(sizeof(struct rtgenmsg))))
#endif
#endif /* __CR_LIBNETLINK_H__ */

View File

@ -18,23 +18,12 @@
* means the userland is reading).
*/
#define UFFD_API ((__u64)0xAA)
#define UFFD_API_FEATURES (UFFD_FEATURE_EVENT_FORK | \
UFFD_FEATURE_EVENT_REMAP | \
UFFD_FEATURE_EVENT_REMOVE | \
UFFD_FEATURE_EVENT_UNMAP | \
UFFD_FEATURE_MISSING_HUGETLBFS | \
UFFD_FEATURE_MISSING_SHMEM)
#define UFFD_API_IOCTLS \
((__u64)1 << _UFFDIO_REGISTER | \
(__u64)1 << _UFFDIO_UNREGISTER | \
(__u64)1 << _UFFDIO_API)
#define UFFD_API_RANGE_IOCTLS \
((__u64)1 << _UFFDIO_WAKE | \
(__u64)1 << _UFFDIO_COPY | \
(__u64)1 << _UFFDIO_ZEROPAGE)
#define UFFD_API_RANGE_IOCTLS_BASIC \
((__u64)1 << _UFFDIO_WAKE | \
(__u64)1 << _UFFDIO_COPY)
#define UFFD_API_FEATURES \
(UFFD_FEATURE_EVENT_FORK | UFFD_FEATURE_EVENT_REMAP | UFFD_FEATURE_EVENT_REMOVE | UFFD_FEATURE_EVENT_UNMAP | \
UFFD_FEATURE_MISSING_HUGETLBFS | UFFD_FEATURE_MISSING_SHMEM)
#define UFFD_API_IOCTLS ((__u64)1 << _UFFDIO_REGISTER | (__u64)1 << _UFFDIO_UNREGISTER | (__u64)1 << _UFFDIO_API)
#define UFFD_API_RANGE_IOCTLS ((__u64)1 << _UFFDIO_WAKE | (__u64)1 << _UFFDIO_COPY | (__u64)1 << _UFFDIO_ZEROPAGE)
#define UFFD_API_RANGE_IOCTLS_BASIC ((__u64)1 << _UFFDIO_WAKE | (__u64)1 << _UFFDIO_COPY)
/*
* Valid ioctl command number range with this API is from 0x00 to
@ -53,18 +42,12 @@
/* userfaultfd ioctl ids */
#define UFFDIO 0xAA
#define UFFDIO_API _IOWR(UFFDIO, _UFFDIO_API, \
struct uffdio_api)
#define UFFDIO_REGISTER _IOWR(UFFDIO, _UFFDIO_REGISTER, \
struct uffdio_register)
#define UFFDIO_UNREGISTER _IOR(UFFDIO, _UFFDIO_UNREGISTER, \
struct uffdio_range)
#define UFFDIO_WAKE _IOR(UFFDIO, _UFFDIO_WAKE, \
struct uffdio_range)
#define UFFDIO_COPY _IOWR(UFFDIO, _UFFDIO_COPY, \
struct uffdio_copy)
#define UFFDIO_ZEROPAGE _IOWR(UFFDIO, _UFFDIO_ZEROPAGE, \
struct uffdio_zeropage)
#define UFFDIO_API _IOWR(UFFDIO, _UFFDIO_API, struct uffdio_api)
#define UFFDIO_REGISTER _IOWR(UFFDIO, _UFFDIO_REGISTER, struct uffdio_register)
#define UFFDIO_UNREGISTER _IOR(UFFDIO, _UFFDIO_UNREGISTER, struct uffdio_range)
#define UFFDIO_WAKE _IOR(UFFDIO, _UFFDIO_WAKE, struct uffdio_range)
#define UFFDIO_COPY _IOWR(UFFDIO, _UFFDIO_COPY, struct uffdio_copy)
#define UFFDIO_ZEROPAGE _IOWR(UFFDIO, _UFFDIO_ZEROPAGE, struct uffdio_zeropage)
/* read() structure */
struct uffd_msg {
@ -114,8 +97,8 @@ struct uffd_msg {
#define UFFD_EVENT_UNMAP 0x16
/* flags for UFFD_EVENT_PAGEFAULT */
#define UFFD_PAGEFAULT_FLAG_WRITE (1<<0) /* If this was a write fault */
#define UFFD_PAGEFAULT_FLAG_WP (1<<1) /* If reason is VM_UFFD_WP */
#define UFFD_PAGEFAULT_FLAG_WRITE (1 << 0) /* If this was a write fault */
#define UFFD_PAGEFAULT_FLAG_WP (1 << 1) /* If reason is VM_UFFD_WP */
struct uffdio_api {
/* userland asks for an API number and the features to enable */
@ -154,13 +137,13 @@ struct uffdio_api {
* UFFD_FEATURE_MISSING_HUGETLBFS, but it applies to shmem
* (i.e. tmpfs and other shmem based APIs).
*/
#define UFFD_FEATURE_PAGEFAULT_FLAG_WP (1<<0)
#define UFFD_FEATURE_EVENT_FORK (1<<1)
#define UFFD_FEATURE_EVENT_REMAP (1<<2)
#define UFFD_FEATURE_EVENT_REMOVE (1<<3)
#define UFFD_FEATURE_MISSING_HUGETLBFS (1<<4)
#define UFFD_FEATURE_MISSING_SHMEM (1<<5)
#define UFFD_FEATURE_EVENT_UNMAP (1<<6)
#define UFFD_FEATURE_PAGEFAULT_FLAG_WP (1 << 0)
#define UFFD_FEATURE_EVENT_FORK (1 << 1)
#define UFFD_FEATURE_EVENT_REMAP (1 << 2)
#define UFFD_FEATURE_EVENT_REMOVE (1 << 3)
#define UFFD_FEATURE_MISSING_HUGETLBFS (1 << 4)
#define UFFD_FEATURE_MISSING_SHMEM (1 << 5)
#define UFFD_FEATURE_EVENT_UNMAP (1 << 6)
__u64 features;
__u64 ioctls;
@ -173,8 +156,8 @@ struct uffdio_range {
struct uffdio_register {
struct uffdio_range range;
#define UFFDIO_REGISTER_MODE_MISSING ((__u64)1<<0)
#define UFFDIO_REGISTER_MODE_WP ((__u64)1<<1)
#define UFFDIO_REGISTER_MODE_MISSING ((__u64)1 << 0)
#define UFFDIO_REGISTER_MODE_WP ((__u64)1 << 1)
__u64 mode;
/*
@ -194,7 +177,7 @@ struct uffdio_copy {
* available if the wrprotection ioctl are implemented for the
* range according to the uffdio_register.ioctls.
*/
#define UFFDIO_COPY_MODE_DONTWAKE ((__u64)1<<0)
#define UFFDIO_COPY_MODE_DONTWAKE ((__u64)1 << 0)
__u64 mode;
/*
@ -206,7 +189,7 @@ struct uffdio_copy {
struct uffdio_zeropage {
struct uffdio_range range;
#define UFFDIO_ZEROPAGE_MODE_DONTWAKE ((__u64)1<<0)
#define UFFDIO_ZEROPAGE_MODE_DONTWAKE ((__u64)1 << 0)
__u64 mode;
/*

Some files were not shown because too many files have changed in this diff Show More