2
0
mirror of https://github.com/checkpoint-restore/criu synced 2025-08-22 01:51:51 +00:00

criu: shstk: premap and prepopulate shadow stack VMAs

Shadow stack VMAs cannot be mmap()ed, they must be created using
map_shadow_stack() system call and populated using special wrss
instruction available only when shadow stack is enabled.

Premap them to reserve virtual address space and populate it to have
there contents available for later copying after enabling shadow stack.

Along with the space required by shadow stack VMAs also reserve an extra
page that will be later used as a temporary shadow stack.

Signed-off-by: Mike Rapoport (IBM) <rppt@kernel.org>
This commit is contained in:
Mike Rapoport (IBM) 2022-05-24 21:25:14 +03:00 committed by Andrei Vagin
parent 17eda3ce57
commit 4b6dda7ec0
2 changed files with 20 additions and 0 deletions

View File

@ -106,6 +106,7 @@ static inline bool vma_entry_is_private(VmaEntry *entry, unsigned long task_size
return (vma_entry_is(entry, VMA_AREA_REGULAR) &&
(vma_entry_is(entry, VMA_ANON_PRIVATE) || vma_entry_is(entry, VMA_FILE_PRIVATE)) &&
(entry->end <= task_size)) ||
vma_entry_is(entry, VMA_AREA_SHSTK) ||
vma_entry_is(entry, VMA_AREA_AIORING);
}

View File

@ -741,6 +741,8 @@ int prepare_mm_pid(struct pstree_item *i)
ri->vmas.rst_priv_size += vma_area_len(vma);
if (vma_has_guard_gap_hidden(vma))
ri->vmas.rst_priv_size += PAGE_SIZE;
if (vma_area_is(vma, VMA_AREA_SHSTK))
ri->vmas.rst_priv_size += PAGE_SIZE;
}
pr_info("vma 0x%" PRIx64 " 0x%" PRIx64 "\n", vma->e->start, vma->e->end);
@ -882,6 +884,14 @@ static int premap_private_vma(struct pstree_item *t, struct vma_area *vma, void
vma->e->start -= PAGE_SIZE;
size = vma_entry_len(vma->e);
/*
* map an extra page for shadow stack VMAs, it will be used as a
* temporary shadow stack
*/
if (vma_area_is(vma, VMA_AREA_SHSTK))
size += PAGE_SIZE;
if (!vma_inherited(vma)) {
int flag = 0;
/*
@ -957,6 +967,15 @@ static int premap_private_vma(struct pstree_item *t, struct vma_area *vma, void
static inline bool vma_force_premap(struct vma_area *vma, struct list_head *head)
{
/*
* Shadow stack VMAs cannot be mmap()ed, they must be created using
* map_shadow_stack() system call.
* Premap them to reserve virtual address space and populate them
* to have there contents available for later copying.
*/
if (vma_area_is(vma, VMA_AREA_SHSTK))
return true;
/*
* On kernels with 4K guard pages, growsdown VMAs
* always have one guard page at the