2
0
mirror of https://github.com/checkpoint-restore/criu synced 2025-08-22 01:51:51 +00:00

criu/vdso: Purge CONFIG_VDSO

Vigorously remove the config ifdef.
The config option *never* had any excuse to exist:
- for x86 we were grand
- for ppc64/arm64 patches to support mremap() on vdso were long ago
accepted, but regardless - it's not possible to disable CONFIG_VDSO for
those platforms in kernel
- for s390 - patches were mainstreamed not that long ago, but it's not
possible to disable the kernel config
- for arm32 it's possible to disable the kernel config, but kernel
returns to userspace historically through sigpage, not vdso.
That's the only platform that criu disallows to have CONFIG_VDSO=y in
kernel, but that's just meaningles. A kernel patch for sigpage mremap()
has gone into v4.13: commit 280e87e98c09 ("ARM: 8683/1: ARM32: Support
mremap() for sigpage/vDSO").

So, removing the config was long-lived item on my TODO list that bligted
arm32 users and made changes to vdso more complex by all "needed"
iffdeferry. Get rid of it with fire.

Fixes: #446

Signed-off-by: Dmitry Safonov <dima@arista.com>
This commit is contained in:
Dmitry Safonov 2019-05-29 17:15:15 +01:00 committed by Andrei Vagin
parent e73df268af
commit de53191179
14 changed files with 9 additions and 95 deletions

View File

@ -56,19 +56,16 @@ ifeq ($(ARCH),arm)
endif
ifeq ($(ARCH),aarch64)
VDSO := y
DEFINES := -DCONFIG_AARCH64
endif
ifeq ($(ARCH),ppc64)
LDARCH := powerpc:common64
VDSO := y
DEFINES := -DCONFIG_PPC64 -D__SANE_USERSPACE_TYPES__
endif
ifeq ($(ARCH),x86)
LDARCH := i386:x86-64
VDSO := y
DEFINES := -DCONFIG_X86_64
endif
@ -81,7 +78,6 @@ endif
ifeq ($(ARCH),s390)
ARCH := s390
SRCARCH := s390
VDSO := y
DEFINES := -DCONFIG_S390
CFLAGS_PIE := -fno-optimize-sibling-calls
endif
@ -90,7 +86,7 @@ CFLAGS_PIE += -DCR_NOGLIBC
export CFLAGS_PIE
LDARCH ?= $(SRCARCH)
export LDARCH VDSO
export LDARCH
export PROTOUFIX DEFINES
#

View File

@ -57,10 +57,6 @@ $(CONFIG_HEADER): scripts/feature-tests.mak $(CONFIG_FILE)
$(Q) echo '' >> $$@
$(call map,gen-feature-test,$(FEATURES_LIST))
$(Q) cat $(CONFIG_FILE) | sed -n -e '/^[^#]/s/^/#define CONFIG_/p' >> $$@
ifeq ($$(VDSO),y)
$(Q) echo '#define CONFIG_VDSO' >> $$@
$(Q) echo '' >> $$@
endif
$(Q) echo '#endif /* __CR_CONFIG_H__ */' >> $$@
endef

View File

@ -83,15 +83,12 @@ obj-y += fdstore.o
obj-y += uffd.o
obj-y += config.o
obj-y += servicefd.o
ifeq ($(VDSO),y)
obj-y += pie-util-vdso.o
obj-y += vdso.o
obj-$(CONFIG_COMPAT) += pie-util-vdso-elf32.o
CFLAGS_pie-util-vdso-elf32.o += -DCONFIG_VDSO_32
obj-$(CONFIG_COMPAT) += vdso-compat.o
CFLAGS_REMOVE_vdso-compat.o += $(CFLAGS-ASAN) $(CFLAGS-GCOV)
endif
PROTOBUF_GEN := scripts/protobuf-gen.sh

View File

@ -3225,10 +3225,8 @@ static int sigreturn_restore(pid_t pid, struct task_restore_args *task_args, uns
struct thread_restore_args *thread_args;
struct restore_mem_zone *mz;
#ifdef CONFIG_VDSO
struct vdso_maps vdso_maps_rt;
unsigned long vdso_rt_size = 0;
#endif
struct vm_area_list self_vmas;
struct vm_area_list *vmas = &rsti(current)->vmas;
@ -3279,7 +3277,6 @@ static int sigreturn_restore(pid_t pid, struct task_restore_args *task_args, uns
pr_info("%d threads require %ldK of memory\n",
current->nr_threads, KBYTES(task_args->bootstrap_len));
#ifdef CONFIG_VDSO
if (core_is_compat(core))
vdso_maps_rt = vdso_maps_compat;
else
@ -3291,7 +3288,6 @@ static int sigreturn_restore(pid_t pid, struct task_restore_args *task_args, uns
if (vdso_rt_size && vdso_maps_rt.sym.vvar_size)
vdso_rt_size += ALIGN(vdso_maps_rt.sym.vvar_size, PAGE_SIZE);
task_args->bootstrap_len += vdso_rt_size;
#endif
/*
* Restorer is a blob (code + args) that will get mapped in some
@ -3506,7 +3502,6 @@ static int sigreturn_restore(pid_t pid, struct task_restore_args *task_args, uns
}
#ifdef CONFIG_VDSO
/*
* Restorer needs own copy of vdso parameters. Runtime
* vdso must be kept non intersecting with anything else,
@ -3518,7 +3513,6 @@ static int sigreturn_restore(pid_t pid, struct task_restore_args *task_args, uns
task_args->vdso_maps_rt = vdso_maps_rt;
task_args->vdso_rt_size = vdso_rt_size;
task_args->can_map_vdso = kdat.can_map_vdso;
#endif
new_sp = restorer_stack(task_args->t->mz);

View File

@ -5,9 +5,7 @@
#include "int.h"
#include "common/config.h"
#include "asm/kerndat.h"
#ifdef CONFIG_VDSO
#include "util-vdso.h"
#endif
struct stat;
@ -61,11 +59,9 @@ struct kerndat_s {
bool has_thp_disable;
bool can_map_vdso;
bool vdso_hint_reliable;
#ifdef CONFIG_VDSO
struct vdso_symtable vdso_sym;
#ifdef CONFIG_COMPAT
struct vdso_symtable vdso_sym_compat;
#endif
#endif
bool has_nsid;
bool has_link_nsid;

View File

@ -2,9 +2,6 @@
#define __CR_PARASITE_VDSO_H__
#include "common/config.h"
#ifdef CONFIG_VDSO
#include "util-vdso.h"
#include "images/vma.pb-c.h"
@ -95,10 +92,4 @@ extern int vdso_redirect_calls(unsigned long base_to, unsigned long base_from,
struct vdso_symtable *to, struct vdso_symtable *from,
bool compat_vdso);
#else /* CONFIG_VDSO */
#define vdso_do_park(sym_rt, park_at, park_size) (0)
#define vdso_map_compat(map_at) (0)
#endif /* CONFIG_VDSO */
#endif /* __CR_PARASITE_VDSO_H__ */

View File

@ -207,11 +207,9 @@ struct task_restore_args {
bool can_map_vdso;
bool auto_dedup;
#ifdef CONFIG_VDSO
unsigned long vdso_rt_size;
struct vdso_maps vdso_maps_rt; /* runtime vdso symbols */
unsigned long vdso_rt_parked_at; /* safe place to keep vdso */
#endif
void **breakpoint;
enum faults fault_strategy;

View File

@ -5,9 +5,6 @@
#include <stdbool.h>
#include "common/config.h"
#ifdef CONFIG_VDSO
#include "util-vdso.h"
extern struct vdso_maps vdso_maps;
@ -26,14 +23,4 @@ extern void compat_vdso_helper(struct vdso_maps *native, int pipe_fd,
int err_fd, void *vdso_buf, size_t buf_size);
#endif
#else /* CONFIG_VDSO */
#define vdso_init_dump() (0)
#define vdso_init_restore() (0)
#define kerndat_vdso_fill_symtable() (0)
#define kerndat_vdso_preserves_hint() (0)
#define parasite_fixup_vdso(ctl, pid, vma_area_list) (0)
#endif /* CONFIG_VDSO */
#endif /* __CR_VDSO_H__ */

View File

@ -101,7 +101,6 @@ static inline bool __page_in_parent(bool dirty)
bool should_dump_page(VmaEntry *vmae, u64 pme)
{
#ifdef CONFIG_VDSO
/*
* vDSO area must be always dumped because on restore
* we might need to generate a proxy.
@ -117,7 +116,7 @@ bool should_dump_page(VmaEntry *vmae, u64 pme)
*/
if (vma_entry_is(vmae, VMA_AREA_VVAR))
return false;
#endif
/*
* Optimisation for private mapping pages, that haven't
* yet being COW-ed

View File

@ -16,6 +16,7 @@ endif
LDS := compel/arch/$(SRCARCH)/scripts/compel-pack.lds.S
restorer-obj-y += parasite-vdso.o ./$(ARCH_DIR)/vdso-pie.o
restorer-obj-y += ./$(ARCH_DIR)/restorer.o
ifeq ($(ARCH),x86)
@ -25,16 +26,12 @@ ifeq ($(ARCH),x86)
endif
endif
ifeq ($(VDSO),y)
restorer-obj-y += parasite-vdso.o ./$(ARCH_DIR)/vdso-pie.o
ifeq ($(SRCARCH),aarch64)
restorer-obj-y += ./$(ARCH_DIR)/intraprocedure.o
endif
ifeq ($(SRCARCH),aarch64)
restorer-obj-y += ./$(ARCH_DIR)/intraprocedure.o
endif
ifeq ($(SRCARCH),ppc64)
restorer-obj-y += ./$(ARCH_DIR)/vdso-trampoline.o
endif
ifeq ($(SRCARCH),ppc64)
restorer-obj-y += ./$(ARCH_DIR)/vdso-trampoline.o
endif
define gen-pie-rules

View File

@ -7,14 +7,7 @@
lib-name := pie.lib.a
lib-y += util.o
ifeq ($(VDSO),y)
lib-y += util-vdso.o
endif
ifeq ($(SRCARCH),ppc64)
lib-y += ./$(ARCH_DIR)/misc.o
endif
lib-y += util-vdso.o
ifeq ($(SRCARCH),x86)
ifeq ($(CONFIG_COMPAT),y)

View File

@ -573,7 +573,6 @@ err_io:
#undef __tty_ioctl
}
#ifdef CONFIG_VDSO
static int parasite_check_vdso_mark(struct parasite_vdso_vma_entry *args)
{
struct vdso_mark *m = (void *)args->start;
@ -609,13 +608,6 @@ static int parasite_check_vdso_mark(struct parasite_vdso_vma_entry *args)
return 0;
}
#else
static inline int parasite_check_vdso_mark(struct parasite_vdso_vma_entry *args)
{
pr_err("Unexpected VDSO check command\n");
return -1;
}
#endif
static int parasite_dump_cgroup(struct parasite_dump_cgroup_args *args)
{

View File

@ -1081,11 +1081,7 @@ static void restore_posix_timers(struct task_restore_args *args)
* sys_munmap must not return here. The control process must
* trap us on the exit from sys_munmap.
*/
#ifdef CONFIG_VDSO
unsigned long vdso_rt_size = 0;
#else
#define vdso_rt_size (0)
#endif
void *bootstrap_start = NULL;
unsigned int bootstrap_len = 0;
@ -1259,9 +1255,7 @@ long __export_restore_task(struct task_restore_args *args)
bootstrap_start = args->bootstrap_start;
bootstrap_len = args->bootstrap_len;
#ifdef CONFIG_VDSO
vdso_rt_size = args->vdso_rt_size;
#endif
fi_strategy = args->fault_strategy;
@ -1446,7 +1440,6 @@ long __export_restore_task(struct task_restore_args *args)
sys_close(args->vma_ios_fd);
#ifdef CONFIG_VDSO
/*
* Proxify vDSO.
*/
@ -1454,7 +1447,6 @@ long __export_restore_task(struct task_restore_args *args)
args->vmas, args->vmas_n, args->compatible_mode,
fault_injected(FI_VDSO_TRAMPOLINES)))
goto core_restore_end;
#endif
/*
* Walk though all VMAs again to drop PROT_WRITE

View File

@ -502,7 +502,6 @@ err:
return -1;
}
#ifdef CONFIG_VDSO
static inline int handle_vdso_vma(struct vma_area *vma)
{
vma->e->status |= VMA_AREA_REGULAR;
@ -518,19 +517,6 @@ static inline int handle_vvar_vma(struct vma_area *vma)
vma->e->status |= VMA_AREA_VVAR;
return 0;
}
#else
static inline int handle_vdso_vma(struct vma_area *vma)
{
pr_warn_once("Found vDSO area without support\n");
return -1;
}
static inline int handle_vvar_vma(struct vma_area *vma)
{
pr_warn_once("Found VVAR area without support\n");
return -1;
}
#endif
static int handle_vma(pid_t pid, struct vma_area *vma_area,
const char *file_path, DIR *map_files_dir,