diff --git a/cr-dump.c b/cr-dump.c index ca2f94e04..86dedce89 100644 --- a/cr-dump.c +++ b/cr-dump.c @@ -94,6 +94,11 @@ bool privately_dump_vma(struct vma_area *vma) if (vma->e->status & VMA_AREA_SYSVIPC) return false; +#ifdef CONFIG_VDSO + /* No dumps for vDSO VVAR data */ + if (vma->e->status & VMA_AREA_VVAR) + return false; +#endif if (vma_area_is(vma, VMA_ANON_SHARED)) return false; diff --git a/mem.c b/mem.c index 570de571a..6df11aba3 100644 --- a/mem.c +++ b/mem.c @@ -67,8 +67,23 @@ unsigned int dump_pages_args_size(struct vm_area_list *vmas) static inline bool should_dump_page(VmaEntry *vmae, u64 pme) { +#ifdef CONFIG_VDSO + /* + * vDSO area must be always dumped because on restore + * we might need to generate a proxy. + */ if (vma_entry_is(vmae, VMA_AREA_VDSO)) return true; + /* + * In turn VVAR area is special and referenced from + * vDSO area by IP addressing (at least on x86) thus + * never ever dump its content but always use one provided + * by the kernel on restore, ie runtime VVAR area must + * be remapped into proper place.. + */ + if (vma_entry_is(vmae, VMA_AREA_VVAR)) + return false; +#endif /* * Optimisation for private mapping pages, that haven't * yet being COW-ed