mirror of
https://github.com/checkpoint-restore/criu
synced 2025-09-05 08:45:49 +00:00
vdso/restorer: Always track vdso/vvar positions in vdso_maps_rt
For simplicity, make them always valid in restorer. rt->vdso_start will be used to calculate gettimeofday() address. Signed-off-by: Dmitry Safonov <dima@arista.com> Signed-off-by: Andrei Vagin <avagin@gmail.com>
This commit is contained in:
committed by
Andrei Vagin
parent
f451fa996a
commit
0918c76676
@@ -84,8 +84,7 @@ static inline bool is_vdso_mark(void *addr)
|
||||
extern int vdso_do_park(struct vdso_maps *rt, unsigned long park_at,
|
||||
unsigned long park_size);
|
||||
extern int vdso_map_compat(unsigned long map_at);
|
||||
extern int vdso_proxify(struct vdso_symtable *sym_rt, bool *added_proxy,
|
||||
unsigned long vdso_rt_parked_at,
|
||||
extern int vdso_proxify(struct vdso_maps *rt, bool *added_proxy,
|
||||
VmaEntry *vmas, size_t nr_vmas,
|
||||
bool compat_vdso, bool force_trampolines);
|
||||
extern int vdso_redirect_calls(unsigned long base_to, unsigned long base_from,
|
||||
|
@@ -24,19 +24,19 @@
|
||||
#endif
|
||||
#define LOG_PREFIX "vdso: "
|
||||
|
||||
|
||||
static int vdso_remap(char *who, unsigned long from, unsigned long to, size_t size)
|
||||
/* Updates @from on success */
|
||||
static int vdso_remap(char *who, unsigned long *from, unsigned long to, size_t size)
|
||||
{
|
||||
unsigned long addr;
|
||||
|
||||
pr_debug("Remap %s %lx -> %lx\n", who, from, to);
|
||||
pr_debug("Remap %s %lx -> %lx\n", who, *from, to);
|
||||
|
||||
addr = sys_mremap(from, size, size, MREMAP_MAYMOVE | MREMAP_FIXED, to);
|
||||
addr = sys_mremap(*from, size, size, MREMAP_MAYMOVE | MREMAP_FIXED, to);
|
||||
if (addr != to) {
|
||||
pr_err("Unable to remap %lx -> %lx %lx\n",
|
||||
from, to, addr);
|
||||
pr_err("Unable to remap %lx -> %lx %lx\n", *from, to, addr);
|
||||
return -1;
|
||||
}
|
||||
*from = addr;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -57,7 +57,7 @@ int vdso_do_park(struct vdso_maps *rt, unsigned long park_at,
|
||||
|
||||
if (rt->vvar_start == VVAR_BAD_ADDR) {
|
||||
BUG_ON(vdso_size < park_size);
|
||||
return vdso_remap("rt-vdso", rt->vdso_start,
|
||||
return vdso_remap("rt-vdso", &rt->vdso_start,
|
||||
rt_vdso_park, vdso_size);
|
||||
}
|
||||
|
||||
@@ -68,8 +68,8 @@ int vdso_do_park(struct vdso_maps *rt, unsigned long park_at,
|
||||
else
|
||||
rt_vdso_park = park_at + vvar_size;
|
||||
|
||||
ret = vdso_remap("rt-vdso", rt->vdso_start, rt_vdso_park, vdso_size);
|
||||
ret |= vdso_remap("rt-vvar", rt->vvar_start, rt_vvar_park, vvar_size);
|
||||
ret = vdso_remap("rt-vdso", &rt->vdso_start, rt_vdso_park, vdso_size);
|
||||
ret |= vdso_remap("rt-vvar", &rt->vvar_start, rt_vvar_park, vvar_size);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -144,10 +144,8 @@ static bool blobs_matches(VmaEntry *vdso_img, VmaEntry *vvar_img,
|
||||
* to dumpee position without generating any proxy.
|
||||
*/
|
||||
static int remap_rt_vdso(VmaEntry *vma_vdso, VmaEntry *vma_vvar,
|
||||
struct vdso_symtable *sym_rt, unsigned long vdso_rt_parked_at)
|
||||
struct vdso_maps *rt)
|
||||
{
|
||||
unsigned long rt_vvar_addr = vdso_rt_parked_at;
|
||||
unsigned long rt_vdso_addr = vdso_rt_parked_at;
|
||||
void *remap_addr;
|
||||
int ret;
|
||||
|
||||
@@ -164,8 +162,8 @@ static int remap_rt_vdso(VmaEntry *vma_vdso, VmaEntry *vma_vvar,
|
||||
}
|
||||
|
||||
if (!vma_vvar) {
|
||||
return vdso_remap("rt-vdso", rt_vdso_addr,
|
||||
vma_vdso->start, sym_rt->vdso_size);
|
||||
return vdso_remap("rt-vdso", &rt->vdso_start,
|
||||
vma_vdso->start, rt->sym.vdso_size);
|
||||
}
|
||||
|
||||
remap_addr = (void *)(uintptr_t)vma_vvar->start;
|
||||
@@ -174,15 +172,10 @@ static int remap_rt_vdso(VmaEntry *vma_vdso, VmaEntry *vma_vvar,
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (vma_vdso->start < vma_vvar->start)
|
||||
rt_vvar_addr = vdso_rt_parked_at + sym_rt->vdso_size;
|
||||
else
|
||||
rt_vdso_addr = vdso_rt_parked_at + sym_rt->vvar_size;
|
||||
|
||||
ret = vdso_remap("rt-vdso", rt_vdso_addr,
|
||||
vma_vdso->start, sym_rt->vdso_size);
|
||||
ret |= vdso_remap("rt-vvar", rt_vvar_addr,
|
||||
vma_vvar->start, sym_rt->vvar_size);
|
||||
ret = vdso_remap("rt-vdso", &rt->vdso_start,
|
||||
vma_vdso->start, rt->sym.vdso_size);
|
||||
ret |= vdso_remap("rt-vvar", &rt->vvar_start,
|
||||
vma_vvar->start, rt->sym.vvar_size);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -193,28 +186,14 @@ static int remap_rt_vdso(VmaEntry *vma_vdso, VmaEntry *vma_vvar,
|
||||
* to operate as proxy vdso.
|
||||
*/
|
||||
static int add_vdso_proxy(VmaEntry *vma_vdso, VmaEntry *vma_vvar,
|
||||
struct vdso_symtable *sym_img, struct vdso_symtable *sym_rt,
|
||||
unsigned long vdso_rt_parked_at, bool compat_vdso)
|
||||
struct vdso_symtable *sym_img, struct vdso_maps *rt,
|
||||
bool compat_vdso)
|
||||
{
|
||||
unsigned long rt_vvar_addr = vdso_rt_parked_at;
|
||||
unsigned long rt_vdso_addr = vdso_rt_parked_at;
|
||||
unsigned long orig_vvar_addr =
|
||||
vma_vvar ? vma_vvar->start : VVAR_BAD_ADDR;
|
||||
|
||||
pr_info("Runtime vdso mismatches dumpee, generate proxy\n");
|
||||
|
||||
/*
|
||||
* Don't forget to shift if vvar is before vdso.
|
||||
*/
|
||||
if (sym_rt->vvar_size == VVAR_BAD_SIZE) {
|
||||
rt_vvar_addr = VVAR_BAD_ADDR;
|
||||
} else {
|
||||
if (sym_rt->vdso_before_vvar)
|
||||
rt_vvar_addr += sym_rt->vdso_size;
|
||||
else
|
||||
rt_vdso_addr += sym_rt->vvar_size;
|
||||
}
|
||||
|
||||
/*
|
||||
* Note: we assume that after first migration with inserted
|
||||
* rt-vdso and trampoilines on the following migrations
|
||||
@@ -223,8 +202,8 @@ static int add_vdso_proxy(VmaEntry *vma_vdso, VmaEntry *vma_vvar,
|
||||
* jumps, so we can't remove them if on the following migration
|
||||
* found that number of symbols in vdso has decreased.
|
||||
*/
|
||||
if (vdso_redirect_calls(rt_vdso_addr, vma_vdso->start,
|
||||
sym_rt, sym_img, compat_vdso)) {
|
||||
if (vdso_redirect_calls(rt->vdso_start, vma_vdso->start,
|
||||
&rt->sym, sym_img, compat_vdso)) {
|
||||
pr_err("Failed to proxify dumpee contents\n");
|
||||
return -1;
|
||||
}
|
||||
@@ -234,16 +213,15 @@ static int add_vdso_proxy(VmaEntry *vma_vdso, VmaEntry *vma_vvar,
|
||||
* routine we could detect this vdso and do not dump it, since
|
||||
* it's auto-generated every new session if proxy required.
|
||||
*/
|
||||
sys_mprotect((void *)rt_vdso_addr, sym_rt->vdso_size, PROT_WRITE);
|
||||
vdso_put_mark((void *)rt_vdso_addr, rt_vvar_addr,
|
||||
vma_vdso->start, orig_vvar_addr);
|
||||
sys_mprotect((void *)rt_vdso_addr, sym_rt->vdso_size, VDSO_PROT);
|
||||
sys_mprotect((void *)rt->vdso_start, rt->sym.vdso_size, PROT_WRITE);
|
||||
vdso_put_mark((void *)rt->vdso_start, rt->vvar_start,
|
||||
vma_vdso->start, orig_vvar_addr);
|
||||
sys_mprotect((void *)rt->vdso_start, rt->sym.vdso_size, VDSO_PROT);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int vdso_proxify(struct vdso_symtable *sym_rt, bool *added_proxy,
|
||||
unsigned long vdso_rt_parked_at,
|
||||
int vdso_proxify(struct vdso_maps *rt, bool *added_proxy,
|
||||
VmaEntry *vmas, size_t nr_vmas,
|
||||
bool compat_vdso, bool force_trampolines)
|
||||
{
|
||||
@@ -291,12 +269,9 @@ int vdso_proxify(struct vdso_symtable *sym_rt, bool *added_proxy,
|
||||
vma_vvar ? (unsigned long)vma_vvar->end : VVAR_BAD_ADDR);
|
||||
|
||||
*added_proxy = false;
|
||||
if (blobs_matches(vma_vdso, vma_vvar, &s, sym_rt) && !force_trampolines) {
|
||||
return remap_rt_vdso(vma_vdso, vma_vvar,
|
||||
sym_rt, vdso_rt_parked_at);
|
||||
}
|
||||
if (blobs_matches(vma_vdso, vma_vvar, &s, &rt->sym) && !force_trampolines)
|
||||
return remap_rt_vdso(vma_vdso, vma_vvar, rt);
|
||||
|
||||
*added_proxy = true;
|
||||
return add_vdso_proxy(vma_vdso, vma_vvar, &s, sym_rt,
|
||||
vdso_rt_parked_at, compat_vdso);
|
||||
return add_vdso_proxy(vma_vdso, vma_vvar, &s, rt, compat_vdso);
|
||||
}
|
||||
|
@@ -1264,6 +1264,32 @@ static inline int restore_child_subreaper(int child_subreaper)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int map_vdso(struct task_restore_args *args, bool compatible)
|
||||
{
|
||||
struct vdso_maps *rt = &args->vdso_maps_rt;
|
||||
int err;
|
||||
|
||||
err = arch_map_vdso(args->vdso_rt_parked_at, compatible);
|
||||
if (err < 0) {
|
||||
pr_err("Failed to map vdso %d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
if (rt->sym.vdso_before_vvar) {
|
||||
rt->vdso_start = args->vdso_rt_parked_at;
|
||||
/* kernel may provide only vdso */
|
||||
if (rt->sym.vvar_size != VVAR_BAD_SIZE)
|
||||
rt->vvar_start = rt->vdso_start + rt->sym.vdso_size;
|
||||
else
|
||||
rt->vvar_start = VVAR_BAD_ADDR;
|
||||
} else {
|
||||
rt->vvar_start = args->vdso_rt_parked_at;
|
||||
rt->vdso_start = rt->vvar_start + rt->sym.vvar_size;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* The main routine to restore task via sigreturn.
|
||||
* This one is very special, we never return there
|
||||
@@ -1356,15 +1382,8 @@ long __export_restore_task(struct task_restore_args *args)
|
||||
goto core_restore_end;
|
||||
|
||||
/* Map vdso that wasn't parked */
|
||||
if (args->can_map_vdso) {
|
||||
int err = arch_map_vdso(args->vdso_rt_parked_at,
|
||||
args->compatible_mode);
|
||||
|
||||
if (err < 0) {
|
||||
pr_err("Failed to map vdso %d\n", err);
|
||||
goto core_restore_end;
|
||||
}
|
||||
}
|
||||
if (args->can_map_vdso && (map_vdso(args, args->compatible_mode) < 0))
|
||||
goto core_restore_end;
|
||||
|
||||
/* Shift private vma-s to the left */
|
||||
for (i = 0; i < args->vmas_n; i++) {
|
||||
@@ -1496,10 +1515,9 @@ long __export_restore_task(struct task_restore_args *args)
|
||||
/*
|
||||
* Proxify vDSO.
|
||||
*/
|
||||
if (vdso_proxify(&args->vdso_maps_rt.sym, &has_vdso_proxy,
|
||||
args->vdso_rt_parked_at,
|
||||
args->vmas, args->vmas_n, args->compatible_mode,
|
||||
fault_injected(FI_VDSO_TRAMPOLINES)))
|
||||
if (vdso_proxify(&args->vdso_maps_rt, &has_vdso_proxy,
|
||||
args->vmas, args->vmas_n, args->compatible_mode,
|
||||
fault_injected(FI_VDSO_TRAMPOLINES)))
|
||||
goto core_restore_end;
|
||||
|
||||
/* unmap rt-vdso with restorer blob after restore's finished */
|
||||
|
Reference in New Issue
Block a user