2
0
mirror of https://github.com/checkpoint-restore/criu synced 2025-08-31 14:25:49 +00:00

mem/vma: Sanitize struct vm_area_list

- make names more descriptive
 - add comments
 - use union for nr_priv_pages and rst_priv_size since
   former priv_size has been used with different meaning:
   number of pages during checkpoint time and size in bytes
   on restore moment

Signed-off-by: Cyrill Gorcunov <gorcunov@gmail.com>
Reviewed-by: Mike Rapoport <rppt@linux.ibm.com>
Signed-off-by: Andrei Vagin <avagin@gmail.com>
This commit is contained in:
Cyrill Gorcunov
2019-07-05 18:38:03 +03:00
committed by Andrei Vagin
parent 0ee3d0764d
commit 8f9ae895d0
4 changed files with 25 additions and 21 deletions

View File

@@ -125,7 +125,7 @@ int collect_mappings(pid_t pid, struct vm_area_list *vma_area_list,
goto err;
pr_info("Collected, longest area occupies %lu pages\n",
vma_area_list->priv_longest);
vma_area_list->nr_priv_pages_longest);
pr_info_vma_list(&vma_area_list->h);
pr_info("----------------------------------------\n");

View File

@@ -10,12 +10,15 @@
#include <string.h>
struct vm_area_list {
struct list_head h;
unsigned nr;
unsigned int nr_aios;
unsigned long priv_size; /* nr of pages in private VMAs */
unsigned long priv_longest; /* nr of pages in longest private VMA */
unsigned long shared_longest; /* nr of pages in longest shared VMA */
struct list_head h; /* list of VMAs */
unsigned nr; /* nr of all VMAs in the list */
unsigned int nr_aios; /* nr of AIOs VMAs in the list */
union {
unsigned long nr_priv_pages; /* dmp: nr of pages in private VMAs */
unsigned long rst_priv_size; /* rst: size of private VMAs */
};
unsigned long nr_priv_pages_longest; /* nr of pages in longest private VMA */
unsigned long nr_shared_pages_longest;/* nr of pages in longest shared VMA */
};
static inline void vm_area_list_init(struct vm_area_list *vml)

View File

@@ -81,7 +81,7 @@ unsigned long dump_pages_args_size(struct vm_area_list *vmas)
/* In the worst case I need one iovec for each page */
return sizeof(struct parasite_dump_pages_args) +
vmas->nr * sizeof(struct parasite_vma_entry) +
(vmas->priv_size + 1) * sizeof(struct iovec);
(vmas->nr_priv_pages + 1) * sizeof(struct iovec);
}
static inline bool __page_is_zero(u64 pme)
@@ -414,14 +414,14 @@ static int __parasite_dump_pages_seized(struct pstree_item *item,
timing_start(TIME_MEMDUMP);
pr_debug(" Private vmas %lu/%lu pages\n",
vma_area_list->priv_longest, vma_area_list->priv_size);
vma_area_list->nr_priv_pages_longest, vma_area_list->nr_priv_pages);
/*
* Step 0 -- prepare
*/
pmc_size = max(vma_area_list->priv_longest,
vma_area_list->shared_longest);
pmc_size = max(vma_area_list->nr_priv_pages_longest,
vma_area_list->nr_shared_pages_longest);
if (pmc_init(&pmc, item->pid->real, &vma_area_list->h,
pmc_size * PAGE_SIZE))
return -1;
@@ -433,7 +433,7 @@ static int __parasite_dump_pages_seized(struct pstree_item *item,
* use, i.e. on non-lazy non-predump.
*/
cpp_flags |= PP_CHUNK_MODE;
pp = create_page_pipe(vma_area_list->priv_size,
pp = create_page_pipe(vma_area_list->nr_priv_pages,
mdc->lazy ? NULL : pargs_iovs(args),
cpp_flags);
if (!pp)
@@ -612,9 +612,9 @@ int prepare_mm_pid(struct pstree_item *i)
list_add_tail(&vma->list, &ri->vmas.h);
if (vma_area_is_private(vma, kdat.task_size)) {
ri->vmas.priv_size += vma_area_len(vma);
ri->vmas.rst_priv_size += vma_area_len(vma);
if (vma_has_guard_gap_hidden(vma))
ri->vmas.priv_size += PAGE_SIZE;
ri->vmas.rst_priv_size += PAGE_SIZE;
}
pr_info("vma 0x%"PRIx64" 0x%"PRIx64"\n", vma->e->start, vma->e->end);
@@ -1171,17 +1171,17 @@ int prepare_mappings(struct pstree_item *t)
goto out;
/* Reserve a place for mapping private vma-s one by one */
addr = mmap(NULL, vmas->priv_size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
addr = mmap(NULL, vmas->rst_priv_size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
if (addr == MAP_FAILED) {
ret = -1;
pr_perror("Unable to reserve memory (%lu bytes)", vmas->priv_size);
pr_perror("Unable to reserve memory (%lu bytes)", vmas->rst_priv_size);
goto out;
}
old_premmapped_addr = rsti(t)->premmapped_addr;
old_premmapped_len = rsti(t)->premmapped_len;
rsti(t)->premmapped_addr = addr;
rsti(t)->premmapped_len = vmas->priv_size;
rsti(t)->premmapped_len = vmas->rst_priv_size;
ret = open_page_read(vpid(t), &pr, PR_TASK);
if (ret <= 0)

View File

@@ -660,14 +660,15 @@ static int vma_list_add(struct vma_area *vma_area,
unsigned long pages;
pages = vma_area_len(vma_area) / PAGE_SIZE;
vma_area_list->priv_size += pages;
vma_area_list->priv_longest = max(vma_area_list->priv_longest, pages);
vma_area_list->nr_priv_pages += pages;
vma_area_list->nr_priv_pages_longest =
max(vma_area_list->nr_priv_pages_longest, pages);
} else if (vma_area_is(vma_area, VMA_ANON_SHARED)) {
unsigned long pages;
pages = vma_area_len(vma_area) / PAGE_SIZE;
vma_area_list->shared_longest =
max(vma_area_list->shared_longest, pages);
vma_area_list->nr_shared_pages_longest =
max(vma_area_list->nr_shared_pages_longest, pages);
}
*prev_vfi = *vfi;