2011-09-23 12:00:45 +04:00
|
|
|
#include <stdio.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <signal.h>
|
|
|
|
#include <limits.h>
|
|
|
|
#include <unistd.h>
|
|
|
|
#include <errno.h>
|
|
|
|
#include <string.h>
|
|
|
|
|
|
|
|
#include <fcntl.h>
|
|
|
|
|
|
|
|
#include <sys/types.h>
|
|
|
|
#include <sys/stat.h>
|
|
|
|
#include <sys/mman.h>
|
|
|
|
#include <sys/wait.h>
|
2011-12-01 18:21:17 +04:00
|
|
|
#include <sys/file.h>
|
2012-02-14 20:20:10 +03:00
|
|
|
#include <sys/shm.h>
|
2012-06-19 15:53:00 +04:00
|
|
|
#include <sys/mount.h>
|
2013-08-07 13:51:35 +04:00
|
|
|
#include <sys/prctl.h>
|
2011-09-23 12:00:45 +04:00
|
|
|
#include <sched.h>
|
|
|
|
|
|
|
|
|
2016-10-20 23:34:51 +03:00
|
|
|
#include "types.h"
|
2016-10-31 15:06:48 +03:00
|
|
|
#include <compel/ptrace.h>
|
2016-10-24 14:58:02 +03:00
|
|
|
#include "common/compiler.h"
|
2011-09-23 12:00:45 +04:00
|
|
|
|
2019-11-21 01:26:38 +03:00
|
|
|
#include "linux/mount.h"
|
|
|
|
|
2017-02-06 13:14:16 +03:00
|
|
|
#include "clone-noasan.h"
|
2013-11-06 17:21:11 +04:00
|
|
|
#include "cr_options.h"
|
2013-11-05 20:17:47 +04:00
|
|
|
#include "servicefd.h"
|
2011-09-23 12:00:45 +04:00
|
|
|
#include "image.h"
|
2020-05-21 17:40:17 +00:00
|
|
|
#include "img-streamer.h"
|
2011-09-23 12:00:45 +04:00
|
|
|
#include "util.h"
|
2013-08-11 20:00:28 +04:00
|
|
|
#include "util-pie.h"
|
2016-10-21 15:50:37 +03:00
|
|
|
#include "criu-log.h"
|
2011-10-24 22:23:06 +04:00
|
|
|
#include "restorer.h"
|
2011-12-26 22:12:03 +04:00
|
|
|
#include "sockets.h"
|
2012-08-09 16:17:41 +04:00
|
|
|
#include "sk-packet.h"
|
2016-10-28 14:00:55 +03:00
|
|
|
#include "common/lock.h"
|
2012-01-10 18:03:00 +04:00
|
|
|
#include "files.h"
|
2012-05-03 17:36:00 +04:00
|
|
|
#include "pipes.h"
|
2012-06-26 02:36:13 +04:00
|
|
|
#include "fifo.h"
|
2012-04-28 17:38:46 +04:00
|
|
|
#include "sk-inet.h"
|
2012-05-04 13:38:00 +04:00
|
|
|
#include "eventfd.h"
|
2012-05-04 13:38:00 +04:00
|
|
|
#include "eventpoll.h"
|
2012-08-02 12:26:35 +04:00
|
|
|
#include "signalfd.h"
|
2012-01-13 20:52:35 +04:00
|
|
|
#include "proc_parse.h"
|
2017-03-24 15:07:39 -07:00
|
|
|
#include "pie/restorer-blob.h"
|
2011-09-23 12:00:45 +04:00
|
|
|
#include "crtools.h"
|
Try to include userfaultfd with criu (part 2)
This is a first try to include userfaultfd with criu. Right now it
still requires a "normal" checkpoint. After checkpointing the
application it can be restored with the help of userfaultfd.
All restored pages with MAP_ANONYMOUS and MAP_PRIVATE set are marked as
being handled by userfaultfd.
As soon as the process is restored it blocks on the first memory access
and waits for pages being transferred by userfaultfd.
To handle the required pages a new criu command has been added. For a
userfaultfd supported restore the first step is to start the
'lazy-pages' server:
criu lazy-pages -v4 -D /tmp/3/ --address /tmp/userfault.socket
This waits on a unix domain socket (defined using the --address option)
to receive a userfaultfd file descriptor from a '--lazy-pages' enabled
'criu restore':
criu restore -D /tmp/3 -j -v4 --lazy-pages \
--address /tmp/userfault.socket
In the first step the VDSO pages are pushed from the lazy-pages server
into the restored process. After that the lazy-pages server waits on the
UFFD FD for a UFFD requested page. If there are no requests received
during a period of 5 seconds the lazy-pages server switches into a mode
where the remaining, non-transferred pages are copied into the
destination process. After all remaining pages have been copied the
lazy-pages server exits.
The first page that usually is requested is a VDSO page. The process
currently used for restoring has two VDSO pages, but only one is
requested
via userfaultfd. In the second part where the remaining pages are copied
into the process, the second VDSO page is also copied into the process
as it has not been requested previously. Unfortunately, even as this
page has not been requested before, it is not accepted by userfaultfd.
EINVAL is returned. The reason for EINVAL is not understood and
therefore
the VDSO pages are copied first into the process, then switching to
request
mode and copying the pages which are requested via userfaultfd. To
decide at which point the VDSO pages can be copied into the process, the
lazy-pages server is currently waiting for the first page requested via
userfaultfd. This is one of the VDSO pages. To not copy a page a second
time, which is unnecessary and not possible, there is now a check to see
if the page has been transferred previously.
The use case to use usefaultfd with a checkpointed process on a remote
machine will probably benefit from the current work related to
image-cache and image-proxy.
For the final implementation it would be nice to have a restore running
in uffd mode on one system which requests the memory pages over the
network from another system which is running 'criu checkpoint' also in
uffd mode. This way the pages need to be copied only 'once' from the
checkpoint process to the uffd restore process.
TODO:
* Contains still many debug outputs which need to be cleaned up.
* Maybe transfer the dump directory FD also via unix domain sockets
so that the 'uffd'/'lazy-pages' server can keep running without
the need to specify the dump directory with '-D'
* Keep the lazy-pages server running after all pages have been
transferred and start waiting for new connections to serve.
* Resurrect the non-cooperative patch set, as once the restored task
fork()'s or calls mremap() the whole thing becomes broken.
* Figure out if current VDSO handling is correct.
* Figure out when and how zero pages need to be inserted via uffd.
v2:
* provide option '--lazy-pages' to enable uffd style restore
* use send_fd()/recv_fd() provided by criu (instead of own
implementation)
* do not install the uffd as service_fd
* use named constants for MAP_ANONYMOUS
* do not restore memory pages and then later mark them as uffd
handled
* remove function find_pages() to search in pages-<id>.img;
now using criu functions to find the necessary pages;
for each new page search the pages-<id>.img file is opened
* only check the UFFDIO_API once
* trying to protect uffd code by CONFIG_UFFD;
use make UFFD=1 to compile criu with this patch
v3:
* renamed the server mode from 'uffd' -> 'lazy-pages'
* switched client and server roles transferring the UFFD FD
* the criu part running in lazy-pages server mode is now
waiting for connections
* the criu restore process connects to the lazy-pages server
to pass the UFFD FD
* before UFFD copying anything else the VDSO pages are copied
as it fails to copy unused VDSO pages once the process is running.
this was necessary to be able to copy all pages.
* if there are no more UFFD messages for 5 seconds the lazy-pages
server switches in copy mode to copy all remaining pages, which
have not been requested yet, into the restored process
* check the UFFDIO_API at the correct place
* close UFFD FD in the restorer to remove open UFFD FD in the
restored process
v4:
* removed unnecessary madvise() calls ; it seemed necessary when
first running tests with uffd; it actually is not necessary
* auto-detect if build-system provides linux/userfaultfd.h
header.
* simplify unix domain socket setup and communication.
* use --address to specify the location of the used
unix domain socket.
v5:
* split the userfaultfd patch in multiple smaller patches
* introduced vma_can_be_lazy() function to check if a page
can be handled by uffd
* moved uffd related code from cr-restore.c to uffd.c
* handle failure to register a memory page of the restored process
with userfaultfd
v6:
* get PID of to be restored process from the 'criu restore' process;
first the PID is transferred and then the UFFD
Signed-off-by: Adrian Reber <areber@redhat.com>
Signed-off-by: Pavel Emelyanov <xemul@virtuozzo.com>
2016-03-15 13:21:13 +00:00
|
|
|
#include "uffd.h"
|
2012-01-26 15:27:00 +04:00
|
|
|
#include "namespaces.h"
|
2014-02-03 15:12:08 +04:00
|
|
|
#include "mem.h"
|
2012-05-04 13:38:00 +04:00
|
|
|
#include "mount.h"
|
2013-01-14 20:47:51 +04:00
|
|
|
#include "fsnotify.h"
|
2012-06-26 14:51:00 +04:00
|
|
|
#include "pstree.h"
|
2012-08-10 19:14:36 +04:00
|
|
|
#include "net.h"
|
2012-09-12 20:00:54 +04:00
|
|
|
#include "tty.h"
|
2012-12-21 17:35:36 +04:00
|
|
|
#include "cpu.h"
|
2013-01-17 16:09:34 +08:00
|
|
|
#include "file-lock.h"
|
2013-05-24 01:42:13 +04:00
|
|
|
#include "vdso.h"
|
2013-08-11 13:00:45 +04:00
|
|
|
#include "stats.h"
|
2013-08-23 19:10:15 +04:00
|
|
|
#include "tun.h"
|
2013-11-05 12:33:03 +04:00
|
|
|
#include "vma.h"
|
2013-10-11 17:38:57 +04:00
|
|
|
#include "kerndat.h"
|
2013-11-02 01:05:13 +04:00
|
|
|
#include "rst-malloc.h"
|
2013-12-19 21:35:00 +04:00
|
|
|
#include "plugin.h"
|
2014-05-08 16:37:00 +04:00
|
|
|
#include "cgroup.h"
|
2014-06-30 21:58:05 +04:00
|
|
|
#include "timerfd.h"
|
2014-09-03 23:43:23 +04:00
|
|
|
#include "action-scripts.h"
|
2016-05-19 15:14:34 +03:00
|
|
|
#include "shmem.h"
|
2014-12-19 16:02:24 +03:00
|
|
|
#include "aio.h"
|
2015-05-06 16:18:42 -06:00
|
|
|
#include "lsm.h"
|
2015-06-30 07:47:11 -06:00
|
|
|
#include "seccomp.h"
|
2015-10-13 18:06:30 +03:00
|
|
|
#include "fault-injection.h"
|
2016-03-24 16:01:58 +03:00
|
|
|
#include "sk-queue.h"
|
2016-11-21 12:36:38 +03:00
|
|
|
#include "sigframe.h"
|
2017-02-07 11:43:29 +03:00
|
|
|
#include "fdstore.h"
|
2020-02-06 18:01:00 +00:00
|
|
|
#include "string.h"
|
2019-12-18 23:32:32 +00:00
|
|
|
#include "memfd.h"
|
2019-08-14 07:40:40 +03:00
|
|
|
#include "timens.h"
|
2020-07-25 17:56:26 +05:30
|
|
|
#include "bpfmap.h"
|
2016-03-24 16:01:58 +03:00
|
|
|
|
2013-09-23 14:33:34 +04:00
|
|
|
#include "parasite-syscall.h"
|
2016-07-16 02:44:00 +03:00
|
|
|
#include "files-reg.h"
|
2016-10-27 19:01:21 +03:00
|
|
|
#include <compel/plugins/std/syscall-codes.h>
|
2016-10-27 12:10:30 +03:00
|
|
|
#include "compel/include/asm/syscall.h"
|
2013-09-23 14:33:34 +04:00
|
|
|
|
2012-07-18 16:25:06 +04:00
|
|
|
#include "protobuf.h"
|
2016-02-15 18:29:00 +03:00
|
|
|
#include "images/sa.pb-c.h"
|
|
|
|
#include "images/timer.pb-c.h"
|
|
|
|
#include "images/vma.pb-c.h"
|
|
|
|
#include "images/rlimit.pb-c.h"
|
|
|
|
#include "images/pagemap.pb-c.h"
|
|
|
|
#include "images/siginfo.pb-c.h"
|
2012-07-18 16:25:06 +04:00
|
|
|
|
2016-10-20 23:42:12 +03:00
|
|
|
#include "restore.h"
|
2014-12-11 22:55:12 +02:00
|
|
|
|
|
|
|
#include "cr-errno.h"
|
2013-01-09 17:39:23 +04:00
|
|
|
|
2015-04-30 16:52:44 +02:00
|
|
|
#ifndef arch_export_restore_thread
|
|
|
|
#define arch_export_restore_thread __export_restore_thread
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef arch_export_restore_task
|
|
|
|
#define arch_export_restore_task __export_restore_task
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef arch_export_unmap
|
|
|
|
#define arch_export_unmap __export_unmap
|
2016-06-28 22:24:11 +03:00
|
|
|
#define arch_export_unmap_compat __export_unmap_compat
|
2015-04-30 16:52:44 +02:00
|
|
|
#endif
|
|
|
|
|
2016-06-01 19:01:59 +03:00
|
|
|
struct pstree_item *current;
|
2011-11-13 12:57:16 +04:00
|
|
|
|
2012-01-26 15:26:00 +04:00
|
|
|
static int restore_task_with_children(void *);
|
2016-05-25 16:28:00 +03:00
|
|
|
static int sigreturn_restore(pid_t pid, struct task_restore_args *ta, unsigned long alen, CoreEntry *core);
|
2012-09-14 14:51:40 +04:00
|
|
|
static int prepare_restorer_blob(void);
|
2016-05-24 14:36:02 +03:00
|
|
|
static int prepare_rlimits(int pid, struct task_restore_args *, CoreEntry *core);
|
2016-05-24 14:35:35 +03:00
|
|
|
static int prepare_posix_timers(int pid, struct task_restore_args *ta, CoreEntry *core);
|
2016-05-24 14:35:48 +03:00
|
|
|
static int prepare_signals(int pid, struct task_restore_args *, CoreEntry *core);
|
2011-09-23 12:00:45 +04:00
|
|
|
|
2017-10-05 15:50:39 +02:00
|
|
|
/*
|
|
|
|
* Architectures can overwrite this function to restore registers that are not
|
|
|
|
* present in the sigreturn signal frame.
|
|
|
|
*/
|
|
|
|
int __attribute__((weak)) arch_set_thread_regs_nosigrt(struct pid *pid)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-04-28 15:28:10 +03:00
|
|
|
static inline int stage_participants(int next_stage)
|
|
|
|
{
|
|
|
|
switch (next_stage) {
|
|
|
|
case CR_STATE_FAIL:
|
|
|
|
return 0;
|
2017-04-28 15:28:25 +03:00
|
|
|
case CR_STATE_ROOT_TASK:
|
2017-04-28 15:28:41 +03:00
|
|
|
case CR_STATE_PREPARE_NAMESPACES:
|
2017-04-28 15:28:10 +03:00
|
|
|
return 1;
|
|
|
|
case CR_STATE_FORKING:
|
|
|
|
return task_entries->nr_tasks + task_entries->nr_helpers;
|
|
|
|
case CR_STATE_RESTORE:
|
|
|
|
return task_entries->nr_threads + task_entries->nr_helpers;
|
|
|
|
case CR_STATE_RESTORE_SIGCHLD:
|
|
|
|
case CR_STATE_RESTORE_CREDS:
|
|
|
|
return task_entries->nr_threads;
|
|
|
|
}
|
|
|
|
|
|
|
|
BUG();
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int stage_current_participants(int next_stage)
|
|
|
|
{
|
|
|
|
switch (next_stage) {
|
|
|
|
case CR_STATE_FORKING:
|
|
|
|
return 1;
|
|
|
|
case CR_STATE_RESTORE:
|
|
|
|
/*
|
|
|
|
* Each thread has to be reported about this stage,
|
2018-02-07 02:40:09 +00:00
|
|
|
* so if we want to wait all other tasks, we have to
|
2017-04-28 15:28:10 +03:00
|
|
|
* exclude all threads of the current process.
|
|
|
|
* It is supposed that we will wait other tasks,
|
|
|
|
* before creating threads of the current task.
|
|
|
|
*/
|
|
|
|
return current->nr_threads;
|
|
|
|
}
|
|
|
|
|
|
|
|
BUG();
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2017-06-20 23:49:03 +03:00
|
|
|
static int __restore_wait_inprogress_tasks(int participants)
|
2017-04-28 15:28:10 +03:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
futex_t *np = &task_entries->nr_in_progress;
|
|
|
|
|
2017-06-20 23:49:03 +03:00
|
|
|
futex_wait_while_gt(np, participants);
|
2017-04-28 15:28:10 +03:00
|
|
|
ret = (int)futex_get(np);
|
|
|
|
if (ret < 0) {
|
|
|
|
set_cr_errno(get_task_cr_err());
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-12-21 18:13:06 +00:00
|
|
|
static int restore_wait_inprogress_tasks(void)
|
2017-06-20 23:49:03 +03:00
|
|
|
{
|
|
|
|
return __restore_wait_inprogress_tasks(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Wait all tasks except the current one */
|
2019-12-21 18:13:06 +00:00
|
|
|
static int restore_wait_other_tasks(void)
|
2017-06-20 23:49:03 +03:00
|
|
|
{
|
|
|
|
int participants, stage;
|
|
|
|
|
|
|
|
stage = futex_get(&task_entries->start);
|
|
|
|
participants = stage_current_participants(stage);
|
|
|
|
|
|
|
|
return __restore_wait_inprogress_tasks(participants);
|
|
|
|
}
|
|
|
|
|
2017-04-28 15:28:10 +03:00
|
|
|
static inline void __restore_switch_stage_nw(int next_stage)
|
|
|
|
{
|
|
|
|
futex_set(&task_entries->nr_in_progress,
|
|
|
|
stage_participants(next_stage));
|
|
|
|
futex_set(&task_entries->start, next_stage);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void __restore_switch_stage(int next_stage)
|
|
|
|
{
|
|
|
|
if (next_stage != CR_STATE_COMPLETE)
|
|
|
|
futex_set(&task_entries->nr_in_progress,
|
|
|
|
stage_participants(next_stage));
|
|
|
|
futex_set_and_wake(&task_entries->start, next_stage);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int restore_switch_stage(int next_stage)
|
|
|
|
{
|
|
|
|
__restore_switch_stage(next_stage);
|
|
|
|
return restore_wait_inprogress_tasks();
|
|
|
|
}
|
|
|
|
|
2017-04-28 15:29:52 +03:00
|
|
|
static int restore_finish_ns_stage(int from, int to)
|
|
|
|
{
|
|
|
|
if (root_ns_mask)
|
|
|
|
return restore_finish_stage(task_entries, from);
|
|
|
|
|
|
|
|
/* Nobody waits for this stage change, just go ahead */
|
|
|
|
__restore_switch_stage_nw(to);
|
|
|
|
return 0;
|
|
|
|
}
|
2014-08-14 19:34:30 +04:00
|
|
|
|
2013-05-28 21:11:13 +04:00
|
|
|
static int crtools_prepare_shared(void)
|
2012-09-17 20:06:06 +04:00
|
|
|
{
|
2020-03-23 07:37:00 +03:00
|
|
|
if (prepare_memfd_inodes())
|
|
|
|
return -1;
|
|
|
|
|
2017-06-30 13:57:06 +03:00
|
|
|
if (prepare_files())
|
2012-09-17 20:06:06 +04:00
|
|
|
return -1;
|
|
|
|
|
2015-09-23 17:20:50 +03:00
|
|
|
/* We might want to remove ghost files on failed restore */
|
|
|
|
if (collect_remaps_and_regfiles())
|
|
|
|
return -1;
|
|
|
|
|
2013-05-09 10:58:04 -07:00
|
|
|
/* Connections are unlocked from criu */
|
2017-06-30 13:57:56 +03:00
|
|
|
if (!files_collected() && collect_image(&inet_sk_cinfo))
|
2012-09-17 20:06:06 +04:00
|
|
|
return -1;
|
|
|
|
|
2016-08-23 19:16:31 +03:00
|
|
|
if (collect_binfmt_misc())
|
|
|
|
return -1;
|
|
|
|
|
2013-05-28 21:11:13 +04:00
|
|
|
if (tty_prep_fds())
|
2012-10-18 15:51:56 +04:00
|
|
|
return -1;
|
|
|
|
|
2012-09-17 20:06:06 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-02-05 15:26:16 +04:00
|
|
|
/*
|
|
|
|
* Collect order information:
|
|
|
|
* - reg_file should be before remap, as the latter needs
|
|
|
|
* to find file_desc objects
|
|
|
|
* - per-pid collects (mm and fd) should be after remap and
|
|
|
|
* reg_file since both per-pid ones need to get fdesc-s
|
|
|
|
* and bump counters on remaps if they exist
|
|
|
|
*/
|
|
|
|
|
2013-08-21 03:52:18 +04:00
|
|
|
static struct collect_image_info *cinfos[] = {
|
2014-08-06 21:08:42 +04:00
|
|
|
&file_locks_cinfo,
|
2016-03-24 16:02:13 +03:00
|
|
|
&pipe_data_cinfo,
|
|
|
|
&fifo_data_cinfo,
|
2016-03-24 16:01:58 +03:00
|
|
|
&sk_queues_cinfo,
|
2020-07-25 17:56:26 +05:30
|
|
|
#ifdef CONFIG_HAS_LIBBPF
|
|
|
|
&bpfmap_data_cinfo,
|
|
|
|
#endif
|
2013-08-21 03:52:18 +04:00
|
|
|
};
|
|
|
|
|
2017-06-30 13:57:22 +03:00
|
|
|
static struct collect_image_info *cinfos_files[] = {
|
2017-06-30 14:02:20 +03:00
|
|
|
&unix_sk_cinfo,
|
2017-06-30 14:02:38 +03:00
|
|
|
&fifo_cinfo,
|
2017-06-30 14:02:54 +03:00
|
|
|
&pipe_cinfo,
|
2017-06-30 13:58:13 +03:00
|
|
|
&nsfile_cinfo,
|
2017-06-30 13:59:37 +03:00
|
|
|
&packet_sk_cinfo,
|
2017-06-30 13:59:53 +03:00
|
|
|
&netlink_sk_cinfo,
|
2017-06-30 14:00:09 +03:00
|
|
|
&eventfd_cinfo,
|
2017-06-30 14:00:26 +03:00
|
|
|
&epoll_cinfo,
|
|
|
|
&epoll_tfd_cinfo,
|
2017-06-30 14:00:42 +03:00
|
|
|
&signalfd_cinfo,
|
2017-06-30 14:00:58 +03:00
|
|
|
&tunfile_cinfo,
|
2017-06-30 14:01:14 +03:00
|
|
|
&timerfd_cinfo,
|
2017-06-30 14:01:30 +03:00
|
|
|
&inotify_cinfo,
|
|
|
|
&inotify_mark_cinfo,
|
2017-06-30 14:01:47 +03:00
|
|
|
&fanotify_cinfo,
|
|
|
|
&fanotify_mark_cinfo,
|
2017-06-30 14:02:03 +03:00
|
|
|
&ext_file_cinfo,
|
2019-12-18 23:32:32 +00:00
|
|
|
&memfd_cinfo,
|
2017-06-30 13:57:22 +03:00
|
|
|
};
|
|
|
|
|
2018-02-07 02:40:09 +00:00
|
|
|
/* These images are required to restore namespaces */
|
2017-02-07 11:43:29 +03:00
|
|
|
static struct collect_image_info *before_ns_cinfos[] = {
|
2017-07-19 14:19:48 +03:00
|
|
|
&tty_info_cinfo, /* Restore devpts content */
|
2017-02-07 11:43:29 +03:00
|
|
|
&tty_cdata,
|
|
|
|
};
|
|
|
|
|
2017-05-26 15:44:42 +03:00
|
|
|
static struct pprep_head *post_prepare_heads = NULL;
|
2016-03-16 16:17:46 +03:00
|
|
|
|
2017-05-26 15:44:42 +03:00
|
|
|
void add_post_prepare_cb(struct pprep_head *ph)
|
2016-03-16 16:17:46 +03:00
|
|
|
{
|
2017-05-26 15:44:42 +03:00
|
|
|
ph->next = post_prepare_heads;
|
|
|
|
post_prepare_heads = ph;
|
2016-03-16 16:17:46 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static int run_post_prepare(void)
|
|
|
|
{
|
2017-05-26 15:44:42 +03:00
|
|
|
struct pprep_head *ph;
|
2016-03-16 16:17:46 +03:00
|
|
|
|
2017-05-26 15:44:42 +03:00
|
|
|
for (ph = post_prepare_heads; ph != NULL; ph = ph->next)
|
|
|
|
if (ph->actor(ph))
|
2016-03-16 16:17:46 +03:00
|
|
|
return -1;
|
2017-05-26 15:44:42 +03:00
|
|
|
|
2016-03-16 16:17:46 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-09-17 20:06:06 +04:00
|
|
|
static int root_prepare_shared(void)
|
2011-09-23 12:00:45 +04:00
|
|
|
{
|
2017-05-04 21:42:36 +03:00
|
|
|
int ret = 0;
|
2012-04-05 15:34:31 +04:00
|
|
|
struct pstree_item *pi;
|
2012-01-26 20:30:31 +04:00
|
|
|
|
2011-09-23 12:00:45 +04:00
|
|
|
pr_info("Preparing info about shared resources\n");
|
|
|
|
|
2015-09-23 17:20:50 +03:00
|
|
|
if (prepare_remaps())
|
|
|
|
return -1;
|
|
|
|
|
2018-05-07 11:42:45 +03:00
|
|
|
if (seccomp_read_image())
|
2015-11-16 22:17:45 -07:00
|
|
|
return -1;
|
|
|
|
|
2017-05-04 21:42:36 +03:00
|
|
|
if (collect_images(cinfos, ARRAY_SIZE(cinfos)))
|
|
|
|
return -1;
|
2013-05-18 04:00:05 +04:00
|
|
|
|
2017-06-30 13:57:22 +03:00
|
|
|
if (!files_collected() &&
|
|
|
|
collect_images(cinfos_files, ARRAY_SIZE(cinfos_files)))
|
|
|
|
return -1;
|
|
|
|
|
2012-05-31 14:50:00 +04:00
|
|
|
for_each_pstree_item(pi) {
|
2017-01-25 18:29:04 +03:00
|
|
|
if (pi->pid->state == TASK_HELPER)
|
2012-08-02 15:54:54 +04:00
|
|
|
continue;
|
|
|
|
|
2014-02-03 15:12:08 +04:00
|
|
|
ret = prepare_mm_pid(pi);
|
2012-01-26 20:30:31 +04:00
|
|
|
if (ret < 0)
|
|
|
|
break;
|
2011-09-23 12:00:45 +04:00
|
|
|
|
2013-01-11 18:16:25 +04:00
|
|
|
ret = prepare_fd_pid(pi);
|
2012-01-26 20:30:31 +04:00
|
|
|
if (ret < 0)
|
|
|
|
break;
|
2014-07-03 19:07:44 +04:00
|
|
|
|
|
|
|
ret = prepare_fs_pid(pi);
|
|
|
|
if (ret < 0)
|
|
|
|
break;
|
2011-09-23 12:00:45 +04:00
|
|
|
}
|
|
|
|
|
2012-09-12 20:11:33 +04:00
|
|
|
if (ret < 0)
|
|
|
|
goto err;
|
|
|
|
|
2017-05-11 12:12:52 +03:00
|
|
|
prepare_cow_vmas();
|
|
|
|
|
2012-09-14 14:51:40 +04:00
|
|
|
ret = prepare_restorer_blob();
|
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
|
SCM: Dump and restore SCM_RIGHTs
Most of the pieces has already been described in the previous patches :)
so here's the summary.
* Dump:
When receiving a message, also receive any SCM-s (already there) and when
SCM_RIGHTs one is met -- go ahead and just dump received descriptors using
regular code, but taking current as the victim task.
Few words about file paths resolution -- since we do dump path-ed files
by receiving them from victim's parasite, such files sent via sockets
should still work OK, as we still receive them, just from another socket.
Several problems here:
1. Unix sockets sent via unix sockets form knots. Not supported.
2. Eventpolls sent via unix might themseves poll unix sockets. Knots
again. Not supported either.
* Restore:
On restore we need to make unix socket wait for the soon-to-be-scm-sent
descriptors to get restored, so we need to find them, then put a dependency.
After that, the fake fdinfo entry is attached to the respective file
descs, when sent the respective descriptors are closed.
https://github.com/xemul/criu/issues/251
v2: Addressed comments from Kirill
* Moved prepare_scms before adding fake fles (with comment)
* Add scm-only fles as fake, thus removing close_scm_fds
* Try hard finding any suitable fle to use as scm one when
queuing them for unix socket scm list, only allocate a new
one if really needed
Reviewed-by: Kirill Tkhai <ktkhai@virtuozzo.com>
Signed-off-by: Pavel Emelyanov <xemul@virtuozzo.com>
Signed-off-by: Andrei Vagin <avagin@virtuozzo.com>
2017-07-13 14:24:29 +03:00
|
|
|
/*
|
|
|
|
* This should be called with all packets collected AND all
|
|
|
|
* fdescs and fles prepared BUT post-prep-s not run.
|
|
|
|
*/
|
|
|
|
ret = prepare_scms();
|
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
|
2016-03-16 16:17:46 +03:00
|
|
|
ret = run_post_prepare();
|
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
|
2018-06-09 16:06:54 +03:00
|
|
|
ret = unix_prepare_root_shared();
|
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
|
2018-01-30 18:43:06 +03:00
|
|
|
ret = add_fake_unix_queuers();
|
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
|
2012-09-12 20:11:33 +04:00
|
|
|
show_saved_files();
|
2012-09-12 20:00:54 +04:00
|
|
|
err:
|
2012-01-26 20:30:31 +04:00
|
|
|
return ret;
|
2011-09-23 12:00:45 +04:00
|
|
|
}
|
|
|
|
|
2018-01-10 17:01:33 +03:00
|
|
|
/* This actually populates and occupies ROOT_FD_OFF sfd */
|
|
|
|
static int populate_root_fd_off(void)
|
|
|
|
{
|
|
|
|
struct ns_id *mntns = NULL;
|
|
|
|
int ret;
|
|
|
|
|
2019-04-12 21:01:36 +01:00
|
|
|
if (root_ns_mask & CLONE_NEWNS) {
|
|
|
|
mntns = lookup_ns_by_id(root_item->ids->mnt_ns_id, &mnt_ns_desc);
|
|
|
|
BUG_ON(!mntns);
|
|
|
|
}
|
2018-01-10 17:01:33 +03:00
|
|
|
|
|
|
|
ret = mntns_get_root_fd(mntns);
|
|
|
|
if (ret < 0)
|
|
|
|
pr_err("Can't get root fd\n");
|
|
|
|
return ret >= 0 ? 0 : -1;
|
|
|
|
}
|
|
|
|
|
2018-01-10 17:01:51 +03:00
|
|
|
static int populate_pid_proc(void)
|
|
|
|
{
|
|
|
|
if (open_pid_proc(vpid(current)) < 0) {
|
|
|
|
pr_err("Can't open PROC_SELF\n");
|
|
|
|
return -1;
|
|
|
|
}
|
2020-11-10 11:36:02 +03:00
|
|
|
if (open_pid_proc(PROC_SELF) < 0) {
|
|
|
|
pr_err("Can't open PROC_SELF\n");
|
|
|
|
return -1;
|
|
|
|
}
|
2018-01-10 17:01:51 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-01-19 01:33:19 +03:00
|
|
|
static rt_sigaction_t sigchld_act;
|
2016-06-28 22:24:21 +03:00
|
|
|
/*
|
2018-02-07 02:40:09 +00:00
|
|
|
* If parent's sigaction has blocked SIGKILL (which is non-sense),
|
2016-06-28 22:24:21 +03:00
|
|
|
* this parent action is non-valid and shouldn't be inherited.
|
|
|
|
* Used to mark parent_act* no more valid.
|
|
|
|
*/
|
2014-08-06 16:25:38 +04:00
|
|
|
static rt_sigaction_t parent_act[SIGMAX];
|
2016-06-28 22:24:21 +03:00
|
|
|
#ifdef CONFIG_COMPAT
|
|
|
|
static rt_sigaction_t_compat parent_act_compat[SIGMAX];
|
|
|
|
#endif
|
2014-08-06 16:25:38 +04:00
|
|
|
|
|
|
|
static bool sa_inherited(int sig, rt_sigaction_t *sa)
|
|
|
|
{
|
|
|
|
rt_sigaction_t *pa;
|
2016-06-21 14:41:00 +03:00
|
|
|
int i;
|
2014-08-06 16:25:38 +04:00
|
|
|
|
|
|
|
if (current == root_item)
|
|
|
|
return false; /* XXX -- inherit from CRIU? */
|
|
|
|
|
|
|
|
pa = &parent_act[sig];
|
2016-06-21 14:41:00 +03:00
|
|
|
|
2016-06-28 22:24:21 +03:00
|
|
|
/* Omitting non-valid sigaction */
|
|
|
|
if (pa->rt_sa_mask.sig[0] & (1 << SIGKILL))
|
|
|
|
return false;
|
|
|
|
|
2016-06-21 14:41:00 +03:00
|
|
|
for (i = 0; i < _KNSIG_WORDS; i++)
|
|
|
|
if (pa->rt_sa_mask.sig[i] != sa->rt_sa_mask.sig[i])
|
|
|
|
return false;
|
|
|
|
|
2014-08-06 16:25:38 +04:00
|
|
|
return pa->rt_sa_handler == sa->rt_sa_handler &&
|
|
|
|
pa->rt_sa_flags == sa->rt_sa_flags &&
|
2016-06-21 14:41:00 +03:00
|
|
|
pa->rt_sa_restorer == sa->rt_sa_restorer;
|
2014-08-06 16:25:38 +04:00
|
|
|
}
|
|
|
|
|
2016-06-28 22:24:21 +03:00
|
|
|
static int restore_native_sigaction(int sig, SaEntry *e)
|
2016-06-22 14:20:00 +03:00
|
|
|
{
|
|
|
|
rt_sigaction_t act;
|
2016-06-28 22:24:21 +03:00
|
|
|
int ret;
|
2016-06-22 14:20:00 +03:00
|
|
|
|
|
|
|
ASSIGN_TYPED(act.rt_sa_handler, decode_pointer(e->sigaction));
|
|
|
|
ASSIGN_TYPED(act.rt_sa_flags, e->flags);
|
|
|
|
ASSIGN_TYPED(act.rt_sa_restorer, decode_pointer(e->restorer));
|
2020-04-08 10:29:21 +08:00
|
|
|
#ifdef CONFIG_MIPS
|
|
|
|
e->has_mask_extended = 1;
|
|
|
|
BUILD_BUG_ON(sizeof(e->mask)* 2 != sizeof(act.rt_sa_mask.sig));
|
|
|
|
|
|
|
|
memcpy(&(act.rt_sa_mask.sig[0]), &e->mask, sizeof(act.rt_sa_mask.sig[0]));
|
|
|
|
memcpy(&(act.rt_sa_mask.sig[1]), &e->mask_extended, sizeof(act.rt_sa_mask.sig[1]));
|
|
|
|
#else
|
2016-06-24 13:47:00 +03:00
|
|
|
BUILD_BUG_ON(sizeof(e->mask) != sizeof(act.rt_sa_mask.sig));
|
|
|
|
memcpy(act.rt_sa_mask.sig, &e->mask, sizeof(act.rt_sa_mask.sig));
|
2020-04-08 10:29:21 +08:00
|
|
|
#endif
|
2016-06-22 14:20:00 +03:00
|
|
|
if (sig == SIGCHLD) {
|
|
|
|
sigchld_act = act;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sa_inherited(sig - 1, &act))
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* A pure syscall is used, because glibc
|
|
|
|
* sigaction overwrites se_restorer.
|
|
|
|
*/
|
|
|
|
ret = syscall(SYS_rt_sigaction, sig, &act, NULL, sizeof(k_rtsigset_t));
|
|
|
|
if (ret < 0) {
|
|
|
|
pr_perror("Can't restore sigaction");
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
parent_act[sig - 1] = act;
|
2016-06-28 22:24:21 +03:00
|
|
|
/* Mark SIGKILL blocked which makes compat sigaction non-valid */
|
|
|
|
#ifdef CONFIG_COMPAT
|
|
|
|
parent_act_compat[sig - 1].rt_sa_mask.sig[0] |= 1 << SIGKILL;
|
|
|
|
#endif
|
2016-06-22 14:20:00 +03:00
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2016-06-28 22:24:21 +03:00
|
|
|
static void *stack32;
|
|
|
|
|
|
|
|
#ifdef CONFIG_COMPAT
|
|
|
|
static bool sa_compat_inherited(int sig, rt_sigaction_t_compat *sa)
|
|
|
|
{
|
|
|
|
rt_sigaction_t_compat *pa;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (current == root_item)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
pa = &parent_act_compat[sig];
|
|
|
|
|
|
|
|
/* Omitting non-valid sigaction */
|
|
|
|
if (pa->rt_sa_mask.sig[0] & (1 << SIGKILL))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
for (i = 0; i < _KNSIG_WORDS; i++)
|
|
|
|
if (pa->rt_sa_mask.sig[i] != sa->rt_sa_mask.sig[i])
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return pa->rt_sa_handler == sa->rt_sa_handler &&
|
|
|
|
pa->rt_sa_flags == sa->rt_sa_flags &&
|
|
|
|
pa->rt_sa_restorer == sa->rt_sa_restorer;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int restore_compat_sigaction(int sig, SaEntry *e)
|
|
|
|
{
|
|
|
|
rt_sigaction_t_compat act;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ASSIGN_TYPED(act.rt_sa_handler, (u32)e->sigaction);
|
|
|
|
ASSIGN_TYPED(act.rt_sa_flags, e->flags);
|
|
|
|
ASSIGN_TYPED(act.rt_sa_restorer, (u32)e->restorer);
|
|
|
|
BUILD_BUG_ON(sizeof(e->mask) != sizeof(act.rt_sa_mask.sig));
|
|
|
|
memcpy(act.rt_sa_mask.sig, &e->mask, sizeof(act.rt_sa_mask.sig));
|
|
|
|
|
|
|
|
if (sig == SIGCHLD) {
|
|
|
|
memcpy(&sigchld_act, &act, sizeof(rt_sigaction_t_compat));
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sa_compat_inherited(sig - 1, &act))
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
if (!stack32) {
|
|
|
|
stack32 = alloc_compat_syscall_stack();
|
|
|
|
if (!stack32)
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = arch_compat_rt_sigaction(stack32, sig, &act);
|
|
|
|
if (ret < 0) {
|
|
|
|
pr_err("Can't restore compat sigaction: %d\n", ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
parent_act_compat[sig - 1] = act;
|
|
|
|
/* Mark SIGKILL blocked which makes native sigaction non-valid */
|
|
|
|
parent_act[sig - 1].rt_sa_mask.sig[0] |= 1 << SIGKILL;
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
static int restore_compat_sigaction(int sig, SaEntry *e)
|
|
|
|
{
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2017-04-19 16:51:01 +03:00
|
|
|
static int prepare_sigactions_from_core(TaskCoreEntry *tc)
|
|
|
|
{
|
|
|
|
int sig, i;
|
|
|
|
|
|
|
|
if (tc->n_sigactions != SIGMAX - 2) {
|
|
|
|
pr_err("Bad number of sigactions in the image (%d, want %d)\n",
|
|
|
|
(int)tc->n_sigactions, SIGMAX - 2);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
pr_info("Restore on-core sigactions for %d\n", vpid(current));
|
|
|
|
|
|
|
|
for (sig = 1, i = 0; sig <= SIGMAX; sig++) {
|
|
|
|
int ret;
|
|
|
|
SaEntry *e;
|
|
|
|
bool sigaction_is_compat;
|
|
|
|
|
|
|
|
if (sig == SIGKILL || sig == SIGSTOP)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
e = tc->sigactions[i++];
|
|
|
|
sigaction_is_compat = e->has_compat_sigaction && e->compat_sigaction;
|
|
|
|
if (sigaction_is_compat)
|
|
|
|
ret = restore_compat_sigaction(sig, e);
|
|
|
|
else
|
|
|
|
ret = restore_native_sigaction(sig, e);
|
|
|
|
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-06-28 22:24:21 +03:00
|
|
|
/* Returns number of restored signals, -1 or negative errno on fail */
|
|
|
|
static int restore_one_sigaction(int sig, struct cr_img *img, int pid)
|
|
|
|
{
|
|
|
|
bool sigaction_is_compat;
|
|
|
|
SaEntry *e;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
BUG_ON(sig == SIGKILL || sig == SIGSTOP);
|
|
|
|
|
|
|
|
ret = pb_read_one_eof(img, &e, PB_SIGACT);
|
|
|
|
if (ret == 0) {
|
|
|
|
if (sig != SIGMAX_OLD + 1) { /* backward compatibility */
|
|
|
|
pr_err("Unexpected EOF %d\n", sig);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
pr_warn("This format of sigacts-%d.img is deprecated\n", pid);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
sigaction_is_compat = e->has_compat_sigaction && e->compat_sigaction;
|
|
|
|
if (sigaction_is_compat)
|
|
|
|
ret = restore_compat_sigaction(sig, e);
|
|
|
|
else
|
|
|
|
ret = restore_native_sigaction(sig, e);
|
|
|
|
|
|
|
|
sa_entry__free_unpacked(e, NULL);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-04-19 16:51:01 +03:00
|
|
|
static int prepare_sigactions_from_image(void)
|
2011-11-29 15:12:25 +03:00
|
|
|
{
|
2017-02-10 14:28:57 +03:00
|
|
|
int pid = vpid(current);
|
2014-09-29 12:48:53 +04:00
|
|
|
struct cr_img *img;
|
2014-08-06 16:25:53 +04:00
|
|
|
int sig, rst = 0;
|
2014-08-06 16:25:38 +04:00
|
|
|
int ret = 0;
|
2011-11-29 15:12:25 +03:00
|
|
|
|
2014-08-06 16:25:53 +04:00
|
|
|
pr_info("Restore sigacts for %d\n", pid);
|
|
|
|
|
2014-09-29 12:48:53 +04:00
|
|
|
img = open_image(CR_FD_SIGACT, O_RSTR, pid);
|
|
|
|
if (!img)
|
2011-12-13 15:03:33 +04:00
|
|
|
return -1;
|
2011-12-01 17:15:00 +04:00
|
|
|
|
2012-12-04 19:26:54 +04:00
|
|
|
for (sig = 1; sig <= SIGMAX; sig++) {
|
2011-11-29 15:12:25 +03:00
|
|
|
if (sig == SIGKILL || sig == SIGSTOP)
|
|
|
|
continue;
|
|
|
|
|
2016-06-22 14:20:00 +03:00
|
|
|
ret = restore_one_sigaction(sig, img, pid);
|
2012-01-26 20:30:31 +04:00
|
|
|
if (ret < 0)
|
|
|
|
break;
|
2016-06-22 14:20:00 +03:00
|
|
|
if (ret)
|
|
|
|
rst++;
|
2011-11-29 15:12:25 +03:00
|
|
|
}
|
|
|
|
|
2014-08-06 16:25:53 +04:00
|
|
|
pr_info("Restored %d/%d sigacts\n", rst,
|
|
|
|
SIGMAX - 3 /* KILL, STOP and CHLD */);
|
|
|
|
|
2014-09-29 12:48:53 +04:00
|
|
|
close_image(img);
|
2017-04-19 16:51:01 +03:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int prepare_sigactions(CoreEntry *core)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!task_alive(current))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (core->tc->n_sigactions != 0)
|
|
|
|
ret = prepare_sigactions_from_core(core->tc);
|
|
|
|
else
|
|
|
|
ret = prepare_sigactions_from_image();
|
|
|
|
|
2016-06-28 22:24:21 +03:00
|
|
|
if (stack32) {
|
|
|
|
free_compat_syscall_stack(stack32);
|
|
|
|
stack32 = NULL;
|
|
|
|
}
|
2017-04-19 16:51:01 +03:00
|
|
|
|
2011-11-29 15:12:25 +03:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-07-23 02:44:00 +03:00
|
|
|
static int __collect_child_pids(struct pstree_item *p, int state, unsigned int *n)
|
2012-06-22 00:38:00 +04:00
|
|
|
{
|
|
|
|
struct pstree_item *pi;
|
|
|
|
|
2016-07-23 02:44:00 +03:00
|
|
|
list_for_each_entry(pi, &p->children, sibling) {
|
2016-01-26 22:58:00 +03:00
|
|
|
pid_t *child;
|
2012-06-22 00:38:00 +04:00
|
|
|
|
2017-01-25 18:29:04 +03:00
|
|
|
if (pi->pid->state != state)
|
2012-06-22 00:38:00 +04:00
|
|
|
continue;
|
|
|
|
|
2016-02-11 01:42:00 +03:00
|
|
|
child = rst_mem_alloc(sizeof(*child), RM_PRIVATE);
|
2015-07-21 12:26:08 -06:00
|
|
|
if (!child)
|
2015-06-25 15:36:30 +03:00
|
|
|
return -1;
|
2012-06-22 00:38:00 +04:00
|
|
|
|
2015-07-21 12:26:08 -06:00
|
|
|
(*n)++;
|
2017-02-10 14:28:57 +03:00
|
|
|
*child = vpid(pi);
|
2012-06-22 00:38:00 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-07-23 02:44:00 +03:00
|
|
|
static int collect_child_pids(int state, unsigned int *n)
|
|
|
|
{
|
|
|
|
struct pstree_item *pi;
|
|
|
|
|
|
|
|
*n = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* All children of helpers and zombies will be reparented to the init
|
|
|
|
* process and they have to be collected too.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (current == root_item) {
|
|
|
|
for_each_pstree_item(pi) {
|
2017-01-25 18:29:04 +03:00
|
|
|
if (pi->pid->state != TASK_HELPER &&
|
|
|
|
pi->pid->state != TASK_DEAD)
|
2016-07-23 02:44:00 +03:00
|
|
|
continue;
|
|
|
|
if (__collect_child_pids(pi, state, n))
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return __collect_child_pids(current, state, n);
|
|
|
|
}
|
|
|
|
|
2016-05-24 14:36:16 +03:00
|
|
|
static int collect_helper_pids(struct task_restore_args *ta)
|
2015-07-21 12:26:08 -06:00
|
|
|
{
|
2016-05-24 14:36:16 +03:00
|
|
|
ta->helpers = (pid_t *)rst_mem_align_cpos(RM_PRIVATE);
|
|
|
|
return collect_child_pids(TASK_HELPER, &ta->helpers_n);
|
2015-07-21 12:26:08 -06:00
|
|
|
}
|
|
|
|
|
2016-05-24 14:36:16 +03:00
|
|
|
static int collect_zombie_pids(struct task_restore_args *ta)
|
2015-07-21 12:26:08 -06:00
|
|
|
{
|
2016-05-24 14:36:16 +03:00
|
|
|
ta->zombies = (pid_t *)rst_mem_align_cpos(RM_PRIVATE);
|
|
|
|
return collect_child_pids(TASK_DEAD, &ta->zombies_n);
|
2015-07-21 12:26:08 -06:00
|
|
|
}
|
|
|
|
|
2019-06-26 11:55:19 +03:00
|
|
|
static int collect_inotify_fds(struct task_restore_args *ta)
|
|
|
|
{
|
|
|
|
struct list_head *list = &rsti(current)->fds;
|
|
|
|
struct fdt *fdt = rsti(current)->fdt;
|
|
|
|
struct fdinfo_list_entry *fle;
|
|
|
|
|
|
|
|
/* Check we are an fdt-restorer */
|
|
|
|
if (fdt && fdt->pid != vpid(current))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
ta->inotify_fds = (int *)rst_mem_align_cpos(RM_PRIVATE);
|
|
|
|
|
|
|
|
list_for_each_entry(fle, list, ps_list) {
|
|
|
|
struct file_desc *d = fle->desc;
|
|
|
|
int *inotify_fd;
|
|
|
|
|
|
|
|
if (d->ops->type != FD_TYPES__INOTIFY)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (fle != file_master(d))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
inotify_fd = rst_mem_alloc(sizeof(*inotify_fd), RM_PRIVATE);
|
|
|
|
if (!inotify_fd)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
ta->inotify_fds_n++;
|
|
|
|
*inotify_fd = fle->fe->fd;
|
|
|
|
|
|
|
|
pr_debug("Collect inotify fd %d to cleanup later\n", *inotify_fd);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-05-23 22:13:00 +03:00
|
|
|
static int open_core(int pid, CoreEntry **pcore)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
struct cr_img *img;
|
|
|
|
|
|
|
|
img = open_image(CR_FD_CORE, O_RSTR, pid);
|
|
|
|
if (!img) {
|
2017-03-23 15:02:03 -07:00
|
|
|
pr_err("Can't open core data for %d\n", pid);
|
2016-05-23 22:13:00 +03:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = pb_read_one(img, pcore, PB_CORE);
|
|
|
|
close_image(img);
|
|
|
|
|
|
|
|
return ret <= 0 ? -1 : 0;
|
|
|
|
}
|
|
|
|
|
2014-08-15 16:02:14 +03:00
|
|
|
static int open_cores(int pid, CoreEntry *leader_core)
|
|
|
|
{
|
2014-09-29 12:48:53 +04:00
|
|
|
int i, tpid;
|
2014-08-15 16:02:14 +03:00
|
|
|
CoreEntry **cores = NULL;
|
|
|
|
|
|
|
|
cores = xmalloc(sizeof(*cores)*current->nr_threads);
|
|
|
|
if (!cores)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
for (i = 0; i < current->nr_threads; i++) {
|
2017-01-25 18:29:04 +03:00
|
|
|
tpid = current->threads[i].ns[0].virt;
|
2014-08-15 16:02:14 +03:00
|
|
|
|
|
|
|
if (tpid == pid)
|
|
|
|
cores[i] = leader_core;
|
2016-05-23 22:13:00 +03:00
|
|
|
else if (open_core(tpid, &cores[i]))
|
|
|
|
goto err;
|
2014-08-15 16:02:14 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
current->core = cores;
|
|
|
|
|
2018-05-07 11:42:48 +03:00
|
|
|
/*
|
|
|
|
* Walk over all threads and if one them is having
|
|
|
|
* active seccomp mode we will suspend filtering
|
|
|
|
* on the whole group until restore complete.
|
|
|
|
*
|
|
|
|
* Otherwise any criu code which might use same syscall
|
|
|
|
* if present inside a filter chain would take filter
|
|
|
|
* action and might break restore procedure.
|
|
|
|
*/
|
|
|
|
for (i = 0; i < current->nr_threads; i++) {
|
|
|
|
ThreadCoreEntry *thread_core = cores[i]->thread_core;
|
|
|
|
if (thread_core->seccomp_mode != SECCOMP_MODE_DISABLED) {
|
|
|
|
rsti(current)->has_seccomp = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-08-15 16:02:14 +03:00
|
|
|
return 0;
|
|
|
|
err:
|
|
|
|
xfree(cores);
|
|
|
|
return -1;
|
|
|
|
}
|
2012-06-26 14:51:00 +04:00
|
|
|
|
2015-12-18 14:23:00 +03:00
|
|
|
static int prepare_oom_score_adj(int value)
|
2015-12-14 12:19:15 +03:00
|
|
|
{
|
|
|
|
int fd, ret = 0;
|
|
|
|
char buf[11];
|
|
|
|
|
2015-12-18 14:23:00 +03:00
|
|
|
fd = open_proc_rw(PROC_SELF, "oom_score_adj");
|
2015-12-14 12:19:15 +03:00
|
|
|
if (fd < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
snprintf(buf, 11, "%d", value);
|
|
|
|
|
|
|
|
if (write(fd, buf, 11) < 0) {
|
2015-12-18 14:23:00 +03:00
|
|
|
pr_perror("Write %s to /proc/self/oom_score_adj failed", buf);
|
2015-12-14 12:19:15 +03:00
|
|
|
ret = -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
close(fd);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-07-12 18:12:42 +02:00
|
|
|
static int prepare_proc_misc(pid_t pid, TaskCoreEntry *tc, struct task_restore_args *args)
|
2015-12-14 12:19:15 +03:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2019-07-12 18:12:42 +02:00
|
|
|
if (tc->has_child_subreaper)
|
|
|
|
args->child_subreaper = tc->child_subreaper;
|
|
|
|
|
2015-12-14 12:19:15 +03:00
|
|
|
/* loginuid value is critical to restore */
|
2017-05-04 16:31:44 +03:00
|
|
|
if (kdat.luid == LUID_FULL && tc->has_loginuid &&
|
2015-12-24 14:43:44 +03:00
|
|
|
tc->loginuid != INVALID_UID) {
|
2020-07-31 14:02:02 +03:00
|
|
|
ret = prepare_loginuid(tc->loginuid);
|
|
|
|
if (ret < 0) {
|
|
|
|
pr_err("Setting loginuid for %d task failed\n", pid);
|
2015-12-14 12:19:15 +03:00
|
|
|
return ret;
|
2020-07-31 14:02:02 +03:00
|
|
|
}
|
2015-12-14 12:19:15 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/* oom_score_adj is not critical: only log errors */
|
2015-12-18 14:23:00 +03:00
|
|
|
if (tc->has_oom_score_adj && tc->oom_score_adj != 0)
|
|
|
|
prepare_oom_score_adj(tc->oom_score_adj);
|
2015-12-14 12:19:15 +03:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-05-24 14:37:01 +03:00
|
|
|
static int prepare_itimers(int pid, struct task_restore_args *args, CoreEntry *core);
|
|
|
|
static int prepare_mm(pid_t pid, struct task_restore_args *args);
|
|
|
|
|
2012-07-19 13:23:01 +04:00
|
|
|
static int restore_one_alive_task(int pid, CoreEntry *core)
|
2011-09-23 12:00:45 +04:00
|
|
|
{
|
2016-05-24 14:34:20 +03:00
|
|
|
unsigned args_len;
|
|
|
|
struct task_restore_args *ta;
|
2012-05-02 14:42:00 +04:00
|
|
|
pr_info("Restoring resources\n");
|
2011-09-23 12:00:45 +04:00
|
|
|
|
2013-11-03 17:40:15 +04:00
|
|
|
rst_mem_switch_to_private();
|
|
|
|
|
2016-05-24 14:34:20 +03:00
|
|
|
args_len = round_up(sizeof(*ta) + sizeof(struct thread_restore_args) *
|
|
|
|
current->nr_threads, page_size());
|
2018-11-09 21:51:23 +00:00
|
|
|
ta = mmap(NULL, args_len, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, 0, 0);
|
2016-05-24 14:34:20 +03:00
|
|
|
if (!ta)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
memzero(ta, args_len);
|
|
|
|
|
2012-09-05 19:52:55 +04:00
|
|
|
if (prepare_fds(current))
|
2011-12-13 15:03:33 +04:00
|
|
|
return -1;
|
2011-09-23 12:00:45 +04:00
|
|
|
|
2013-01-17 16:09:34 +08:00
|
|
|
if (prepare_file_locks(pid))
|
|
|
|
return -1;
|
|
|
|
|
2016-05-30 17:52:00 +03:00
|
|
|
if (open_vmas(current))
|
2012-12-10 16:04:46 +03:00
|
|
|
return -1;
|
|
|
|
|
2016-05-24 14:34:51 +03:00
|
|
|
if (prepare_aios(current, ta))
|
|
|
|
return -1;
|
|
|
|
|
2016-05-19 15:14:34 +03:00
|
|
|
if (fixup_sysv_shmems())
|
|
|
|
return -1;
|
|
|
|
|
2014-08-15 16:02:14 +03:00
|
|
|
if (open_cores(pid, core))
|
|
|
|
return -1;
|
|
|
|
|
2016-05-24 14:35:48 +03:00
|
|
|
if (prepare_signals(pid, ta, core))
|
2013-11-03 23:47:51 +04:00
|
|
|
return -1;
|
|
|
|
|
2016-05-24 14:35:35 +03:00
|
|
|
if (prepare_posix_timers(pid, ta, core))
|
2013-11-03 23:43:44 +04:00
|
|
|
return -1;
|
|
|
|
|
2016-05-24 14:36:02 +03:00
|
|
|
if (prepare_rlimits(pid, ta, core) < 0)
|
2013-11-03 23:40:12 +04:00
|
|
|
return -1;
|
|
|
|
|
2016-05-24 14:36:16 +03:00
|
|
|
if (collect_helper_pids(ta) < 0)
|
2014-09-17 15:32:16 -05:00
|
|
|
return -1;
|
|
|
|
|
2016-05-24 14:36:16 +03:00
|
|
|
if (collect_zombie_pids(ta) < 0)
|
2015-07-21 12:26:08 -06:00
|
|
|
return -1;
|
|
|
|
|
2019-06-26 11:55:19 +03:00
|
|
|
if (collect_inotify_fds(ta) < 0)
|
|
|
|
return -1;
|
|
|
|
|
2019-07-12 18:12:42 +02:00
|
|
|
if (prepare_proc_misc(pid, core->tc, ta))
|
2015-12-14 12:19:15 +03:00
|
|
|
return -1;
|
|
|
|
|
2016-05-24 14:35:07 +03:00
|
|
|
/*
|
|
|
|
* Get all the tcp sockets fds into rst memory -- restorer
|
|
|
|
* will turn repair off before going sigreturn
|
|
|
|
*/
|
|
|
|
if (prepare_tcp_socks(ta))
|
|
|
|
return -1;
|
|
|
|
|
2016-05-24 14:35:21 +03:00
|
|
|
/*
|
|
|
|
* Copy timerfd params for restorer args, we need to proceed
|
|
|
|
* timer setting at the very late.
|
|
|
|
*/
|
|
|
|
if (prepare_timerfds(ta))
|
|
|
|
return -1;
|
2016-05-24 14:35:07 +03:00
|
|
|
|
2018-05-07 11:42:45 +03:00
|
|
|
if (seccomp_prepare_threads(current, ta) < 0)
|
2016-05-24 14:36:32 +03:00
|
|
|
return -1;
|
|
|
|
|
2016-05-24 14:37:01 +03:00
|
|
|
if (prepare_itimers(pid, ta, core) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (prepare_mm(pid, ta))
|
|
|
|
return -1;
|
|
|
|
|
2016-05-30 17:52:00 +03:00
|
|
|
if (prepare_vmas(current, ta))
|
|
|
|
return -1;
|
|
|
|
|
2017-02-15 02:59:27 +03:00
|
|
|
/*
|
|
|
|
* Sockets have to be restored in their network namespaces,
|
|
|
|
* so a task namespace has to be restored after sockets.
|
|
|
|
*/
|
|
|
|
if (restore_task_net_ns(current))
|
|
|
|
return -1;
|
|
|
|
|
2016-05-24 14:37:18 +03:00
|
|
|
if (setup_uffd(pid, ta))
|
|
|
|
return -1;
|
|
|
|
|
2016-05-25 16:28:00 +03:00
|
|
|
return sigreturn_restore(pid, ta, args_len, core);
|
2011-09-23 12:00:45 +04:00
|
|
|
}
|
|
|
|
|
2012-01-22 20:28:30 +04:00
|
|
|
static void zombie_prepare_signals(void)
|
|
|
|
{
|
|
|
|
sigset_t blockmask;
|
|
|
|
int sig;
|
|
|
|
struct sigaction act;
|
|
|
|
|
|
|
|
sigfillset(&blockmask);
|
|
|
|
sigprocmask(SIG_UNBLOCK, &blockmask, NULL);
|
|
|
|
|
|
|
|
memset(&act, 0, sizeof(act));
|
|
|
|
act.sa_handler = SIG_DFL;
|
|
|
|
|
2012-12-04 19:26:54 +04:00
|
|
|
for (sig = 1; sig <= SIGMAX; sig++)
|
2012-01-22 20:28:30 +04:00
|
|
|
sigaction(sig, &act, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
#define SIG_FATAL_MASK ( \
|
|
|
|
(1 << SIGHUP) |\
|
|
|
|
(1 << SIGINT) |\
|
|
|
|
(1 << SIGQUIT) |\
|
|
|
|
(1 << SIGILL) |\
|
|
|
|
(1 << SIGTRAP) |\
|
|
|
|
(1 << SIGABRT) |\
|
|
|
|
(1 << SIGIOT) |\
|
|
|
|
(1 << SIGBUS) |\
|
|
|
|
(1 << SIGFPE) |\
|
|
|
|
(1 << SIGKILL) |\
|
|
|
|
(1 << SIGUSR1) |\
|
|
|
|
(1 << SIGSEGV) |\
|
|
|
|
(1 << SIGUSR2) |\
|
|
|
|
(1 << SIGPIPE) |\
|
|
|
|
(1 << SIGALRM) |\
|
|
|
|
(1 << SIGTERM) |\
|
|
|
|
(1 << SIGXCPU) |\
|
|
|
|
(1 << SIGXFSZ) |\
|
|
|
|
(1 << SIGVTALRM)|\
|
|
|
|
(1 << SIGPROF) |\
|
|
|
|
(1 << SIGPOLL) |\
|
|
|
|
(1 << SIGIO) |\
|
|
|
|
(1 << SIGSYS) |\
|
|
|
|
(1 << SIGSTKFLT)|\
|
|
|
|
(1 << SIGPWR) \
|
|
|
|
)
|
|
|
|
|
|
|
|
static inline int sig_fatal(int sig)
|
|
|
|
{
|
2012-12-04 19:26:54 +04:00
|
|
|
return (sig > 0) && (sig < SIGMAX) && (SIG_FATAL_MASK & (1UL << sig));
|
2012-01-22 20:28:30 +04:00
|
|
|
}
|
|
|
|
|
2012-06-26 14:51:00 +04:00
|
|
|
struct task_entries *task_entries;
|
2014-08-18 19:47:20 +04:00
|
|
|
static unsigned long task_entries_pos;
|
2012-06-26 14:51:00 +04:00
|
|
|
|
2016-03-21 18:22:00 +03:00
|
|
|
static int wait_on_helpers_zombies(void)
|
|
|
|
{
|
|
|
|
struct pstree_item *pi;
|
|
|
|
|
|
|
|
list_for_each_entry(pi, ¤t->children, sibling) {
|
2017-02-10 14:28:57 +03:00
|
|
|
pid_t pid = vpid(pi);
|
2016-03-21 18:22:00 +03:00
|
|
|
int status;
|
|
|
|
|
2017-01-25 18:29:04 +03:00
|
|
|
switch (pi->pid->state) {
|
2016-03-21 18:22:00 +03:00
|
|
|
case TASK_DEAD:
|
|
|
|
if (waitid(P_PID, pid, NULL, WNOWAIT | WEXITED) < 0) {
|
2016-04-22 13:52:00 +03:00
|
|
|
pr_perror("Wait on %d zombie failed", pid);
|
2016-03-21 18:22:00 +03:00
|
|
|
return -1;
|
|
|
|
}
|
2016-04-01 08:20:00 +03:00
|
|
|
break;
|
2016-03-21 18:22:00 +03:00
|
|
|
case TASK_HELPER:
|
|
|
|
if (waitpid(pid, &status, 0) != pid) {
|
|
|
|
pr_perror("waitpid for helper %d failed", pid);
|
|
|
|
return -1;
|
|
|
|
}
|
2016-04-01 08:20:00 +03:00
|
|
|
break;
|
2016-03-21 18:22:00 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-12-28 13:50:54 +03:00
|
|
|
static int wait_exiting_children(void);
|
|
|
|
|
2015-06-25 23:42:43 +03:00
|
|
|
static int restore_one_zombie(CoreEntry *core)
|
2012-01-22 20:28:30 +04:00
|
|
|
{
|
2013-07-12 18:12:12 +04:00
|
|
|
int exit_code = core->tc->exit_code;
|
|
|
|
|
2012-01-22 20:28:30 +04:00
|
|
|
pr_info("Restoring zombie with %d code\n", exit_code);
|
|
|
|
|
2017-12-28 13:50:45 +03:00
|
|
|
if (prepare_fds(current))
|
|
|
|
return -1;
|
|
|
|
|
2016-11-27 12:52:54 +02:00
|
|
|
if (lazy_pages_setup_zombie(vpid(current)))
|
2016-11-04 09:08:04 -06:00
|
|
|
return -1;
|
|
|
|
|
2016-02-03 17:13:48 +01:00
|
|
|
prctl(PR_SET_NAME, (long)(void *)core->tc->comm, 0, 0, 0);
|
2013-07-12 18:12:12 +04:00
|
|
|
|
2012-01-22 20:28:30 +04:00
|
|
|
if (task_entries != NULL) {
|
2017-12-28 13:50:54 +03:00
|
|
|
wait_exiting_children();
|
2012-01-22 20:28:30 +04:00
|
|
|
zombie_prepare_signals();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (exit_code & 0x7f) {
|
|
|
|
int signr;
|
|
|
|
|
2013-07-12 18:14:23 +04:00
|
|
|
/* prevent generating core files */
|
2013-08-07 13:51:35 +04:00
|
|
|
if (prctl(PR_SET_DUMPABLE, 0, 0, 0, 0))
|
|
|
|
pr_perror("Can't drop the dumpable flag");
|
2013-07-12 18:14:23 +04:00
|
|
|
|
2012-01-22 20:28:30 +04:00
|
|
|
signr = exit_code & 0x7F;
|
|
|
|
if (!sig_fatal(signr)) {
|
2012-03-01 18:52:42 +04:00
|
|
|
pr_warn("Exit with non fatal signal ignored\n");
|
2012-01-22 20:28:30 +04:00
|
|
|
signr = SIGABRT;
|
|
|
|
}
|
|
|
|
|
2017-02-10 14:28:57 +03:00
|
|
|
if (kill(vpid(current), signr) < 0)
|
2012-01-31 15:13:05 +04:00
|
|
|
pr_perror("Can't kill myself, will just exit");
|
2012-01-22 20:28:30 +04:00
|
|
|
|
|
|
|
exit_code = 0;
|
|
|
|
}
|
|
|
|
|
2012-01-30 17:04:24 +04:00
|
|
|
exit((exit_code >> 8) & 0x7f);
|
2012-01-22 20:28:30 +04:00
|
|
|
|
|
|
|
/* never reached */
|
|
|
|
BUG_ON(1);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2017-06-28 18:54:51 +03:00
|
|
|
static int setup_newborn_fds(struct pstree_item *me)
|
|
|
|
{
|
2018-01-10 17:02:27 +03:00
|
|
|
if (clone_service_fd(me))
|
2017-06-28 18:54:51 +03:00
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (!me->parent ||
|
|
|
|
(rsti(me->parent)->fdt && !(rsti(me)->clone_flags & CLONE_FILES))) {
|
|
|
|
/*
|
|
|
|
* When our parent has shared fd table, some of the table owners
|
|
|
|
* may be already created. Files, they open, will be inherited
|
|
|
|
* by current process, and here we close them. Also, service fds
|
|
|
|
* of parent are closed here. And root_item closes the files,
|
|
|
|
* that were inherited from criu process.
|
|
|
|
*/
|
|
|
|
if (close_old_fds())
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-08-12 09:04:56 +04:00
|
|
|
static int check_core(CoreEntry *core, struct pstree_item *me)
|
2012-01-22 20:28:30 +04:00
|
|
|
{
|
2012-11-27 22:03:36 +03:00
|
|
|
int ret = -1;
|
2012-01-22 20:28:30 +04:00
|
|
|
|
2013-01-14 11:25:50 +04:00
|
|
|
if (core->mtype != CORE_ENTRY__MARCH) {
|
2012-07-19 13:23:01 +04:00
|
|
|
pr_err("Core march mismatch %d\n", (int)core->mtype);
|
2012-02-29 13:39:21 +03:00
|
|
|
goto out;
|
2012-01-22 20:28:30 +04:00
|
|
|
}
|
2012-07-20 14:18:53 +04:00
|
|
|
|
|
|
|
if (!core->tc) {
|
|
|
|
pr_err("Core task state data missed\n");
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2012-11-12 12:34:56 +04:00
|
|
|
if (core->tc->task_state != TASK_DEAD) {
|
2013-08-12 09:04:56 +04:00
|
|
|
if (!core->ids && !me->ids) {
|
2012-11-12 12:34:56 +04:00
|
|
|
pr_err("Core IDS data missed for non-zombie\n");
|
|
|
|
goto out;
|
|
|
|
}
|
2012-11-12 12:35:03 +04:00
|
|
|
|
2013-01-14 17:19:06 +04:00
|
|
|
if (!CORE_THREAD_ARCH_INFO(core)) {
|
2012-11-12 12:35:03 +04:00
|
|
|
pr_err("Core info data missed for non-zombie\n");
|
|
|
|
goto out;
|
|
|
|
}
|
2018-05-07 11:42:45 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Seccomp are moved to per-thread origin,
|
|
|
|
* so for old images we need to move per-task
|
|
|
|
* data into proper place.
|
|
|
|
*/
|
|
|
|
if (core->tc->has_old_seccomp_mode) {
|
|
|
|
core->thread_core->has_seccomp_mode = core->tc->has_old_seccomp_mode;
|
|
|
|
core->thread_core->seccomp_mode = core->tc->old_seccomp_mode;
|
|
|
|
}
|
|
|
|
if (core->tc->has_old_seccomp_filter) {
|
|
|
|
core->thread_core->has_seccomp_filter = core->tc->has_old_seccomp_filter;
|
|
|
|
core->thread_core->seccomp_filter = core->tc->old_seccomp_filter;
|
|
|
|
rsti(me)->has_old_seccomp_filter = true;
|
|
|
|
}
|
2012-07-20 14:18:53 +04:00
|
|
|
}
|
|
|
|
|
2012-07-19 13:23:01 +04:00
|
|
|
ret = 0;
|
2012-02-29 13:39:21 +03:00
|
|
|
out:
|
2013-07-08 19:10:11 +04:00
|
|
|
return ret;
|
2012-01-22 20:28:30 +04:00
|
|
|
}
|
|
|
|
|
restore: Fix deadlock when helper's child dies
Since commit ced9c529f687 ("restore: fix race with helpers' kids dying
too early"), we block SIGCHLD in helper tasks before CR_STATE_RESTORE.
This way we avoided default criu sighandler as it doesn't expect that
childs may die.
This is very racy as we wait on futex for another stage to be started,
but the next stage may start only when all the tasks complete previous
stage. If some children of helper dies, the helper may already have
blocked SIGCHLD and have started sleeping on the futex. Then the next
stage never comes and no one reads a pending SIGCHLD for helper.
A customer met this situation on the node, where the following
(non-related) problem has occured:
Unable to send a fin packet: libnet_write_raw_ipv6(): -1 bytes written (Network is unreachable)
Then child criu of the helper has exited with error-code and the
lockup has happened.
While we could fix it by aborting futex in the end of
restore_task_with_children() for each (non-root also) tasks,
that would be not completely correct:
1. All futex-waiting tasks will wake up after that and they
may not expect that some tasks are on the previous stage,
so they will spam into logs with unrelated errors and may
also die painfully.
2. Child may die and miss aborting of the futex due to:
o segfault
o OOM killer
o User-sended SIGKILL
o Other error-path we forgot to cover with abort futex
To fix this deadlock in TASK_HELPER, as suggested-by Kirill,
let's check if there are children deaths expected - if there
isn't any, don't block SIGCHLD, otherwise wait() and check if
death was on expected stage of restore (not CR_STATE_RESTORE).
Reviewed-by: Kirill Tkhai <ktkhai@virtuozzo.com>
Signed-off-by: Dmitry Safonov <dsafonov@virtuozzo.com>
Signed-off-by: Andrei Vagin <avagin@virtuozzo.com>
Conflicts:
criu/cr-restore.c
2017-07-20 18:21:21 +03:00
|
|
|
/*
|
|
|
|
* Find if there are children which are zombies or helpers - processes
|
|
|
|
* which are expected to die during the restore.
|
|
|
|
*/
|
|
|
|
static bool child_death_expected(void)
|
|
|
|
{
|
|
|
|
struct pstree_item *pi;
|
|
|
|
|
|
|
|
list_for_each_entry(pi, ¤t->children, sibling) {
|
|
|
|
switch (pi->pid->state) {
|
|
|
|
case TASK_DEAD:
|
|
|
|
case TASK_HELPER:
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-12-28 13:50:54 +03:00
|
|
|
static int wait_exiting_children(void)
|
restore: Fix deadlock when helper's child dies
Since commit ced9c529f687 ("restore: fix race with helpers' kids dying
too early"), we block SIGCHLD in helper tasks before CR_STATE_RESTORE.
This way we avoided default criu sighandler as it doesn't expect that
childs may die.
This is very racy as we wait on futex for another stage to be started,
but the next stage may start only when all the tasks complete previous
stage. If some children of helper dies, the helper may already have
blocked SIGCHLD and have started sleeping on the futex. Then the next
stage never comes and no one reads a pending SIGCHLD for helper.
A customer met this situation on the node, where the following
(non-related) problem has occured:
Unable to send a fin packet: libnet_write_raw_ipv6(): -1 bytes written (Network is unreachable)
Then child criu of the helper has exited with error-code and the
lockup has happened.
While we could fix it by aborting futex in the end of
restore_task_with_children() for each (non-root also) tasks,
that would be not completely correct:
1. All futex-waiting tasks will wake up after that and they
may not expect that some tasks are on the previous stage,
so they will spam into logs with unrelated errors and may
also die painfully.
2. Child may die and miss aborting of the futex due to:
o segfault
o OOM killer
o User-sended SIGKILL
o Other error-path we forgot to cover with abort futex
To fix this deadlock in TASK_HELPER, as suggested-by Kirill,
let's check if there are children deaths expected - if there
isn't any, don't block SIGCHLD, otherwise wait() and check if
death was on expected stage of restore (not CR_STATE_RESTORE).
Reviewed-by: Kirill Tkhai <ktkhai@virtuozzo.com>
Signed-off-by: Dmitry Safonov <dsafonov@virtuozzo.com>
Signed-off-by: Andrei Vagin <avagin@virtuozzo.com>
Conflicts:
criu/cr-restore.c
2017-07-20 18:21:21 +03:00
|
|
|
{
|
|
|
|
siginfo_t info;
|
|
|
|
|
|
|
|
if (!child_death_expected()) {
|
|
|
|
/*
|
|
|
|
* Restoree has no children that should die, during restore,
|
|
|
|
* wait for the next stage on futex.
|
|
|
|
* The default SIGCHLD handler will handle an unexpected
|
|
|
|
* child's death and abort the restore if someone dies.
|
|
|
|
*/
|
|
|
|
restore_finish_stage(task_entries, CR_STATE_RESTORE);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The restoree has children which will die - decrement itself from
|
|
|
|
* nr. of tasks processing the stage and wait for anyone to die.
|
|
|
|
* Tasks may die only when they're on the following stage.
|
|
|
|
* If one dies earlier - that's unexpected - treat it as an error
|
|
|
|
* and abort the restore.
|
|
|
|
*/
|
|
|
|
if (block_sigmask(NULL, SIGCHLD))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
/* Finish CR_STATE_RESTORE, but do not wait for the next stage. */
|
|
|
|
futex_dec_and_wake(&task_entries->nr_in_progress);
|
|
|
|
|
|
|
|
if (waitid(P_ALL, 0, &info, WEXITED | WNOWAIT)) {
|
2021-04-22 13:15:25 -07:00
|
|
|
pr_perror("Failed to wait");
|
restore: Fix deadlock when helper's child dies
Since commit ced9c529f687 ("restore: fix race with helpers' kids dying
too early"), we block SIGCHLD in helper tasks before CR_STATE_RESTORE.
This way we avoided default criu sighandler as it doesn't expect that
childs may die.
This is very racy as we wait on futex for another stage to be started,
but the next stage may start only when all the tasks complete previous
stage. If some children of helper dies, the helper may already have
blocked SIGCHLD and have started sleeping on the futex. Then the next
stage never comes and no one reads a pending SIGCHLD for helper.
A customer met this situation on the node, where the following
(non-related) problem has occured:
Unable to send a fin packet: libnet_write_raw_ipv6(): -1 bytes written (Network is unreachable)
Then child criu of the helper has exited with error-code and the
lockup has happened.
While we could fix it by aborting futex in the end of
restore_task_with_children() for each (non-root also) tasks,
that would be not completely correct:
1. All futex-waiting tasks will wake up after that and they
may not expect that some tasks are on the previous stage,
so they will spam into logs with unrelated errors and may
also die painfully.
2. Child may die and miss aborting of the futex due to:
o segfault
o OOM killer
o User-sended SIGKILL
o Other error-path we forgot to cover with abort futex
To fix this deadlock in TASK_HELPER, as suggested-by Kirill,
let's check if there are children deaths expected - if there
isn't any, don't block SIGCHLD, otherwise wait() and check if
death was on expected stage of restore (not CR_STATE_RESTORE).
Reviewed-by: Kirill Tkhai <ktkhai@virtuozzo.com>
Signed-off-by: Dmitry Safonov <dsafonov@virtuozzo.com>
Signed-off-by: Andrei Vagin <avagin@virtuozzo.com>
Conflicts:
criu/cr-restore.c
2017-07-20 18:21:21 +03:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (futex_get(&task_entries->start) == CR_STATE_RESTORE) {
|
|
|
|
pr_err("Child %d died too early\n", info.si_pid);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (wait_on_helpers_zombies()) {
|
|
|
|
pr_err("Failed to wait on helpers and zombies\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-12-28 13:50:54 +03:00
|
|
|
/*
|
|
|
|
* Restore a helper process - artificially created by criu
|
|
|
|
* to restore attributes of process tree.
|
|
|
|
* - sessions for each leaders are dead
|
|
|
|
* - process groups with dead leaders
|
|
|
|
* - dead tasks for which /proc/<pid>/... is opened by restoring task
|
|
|
|
* - whatnot
|
|
|
|
*/
|
|
|
|
static int restore_one_helper(void)
|
|
|
|
{
|
2018-04-20 13:10:22 +03:00
|
|
|
int i;
|
|
|
|
|
2017-12-28 13:50:54 +03:00
|
|
|
if (prepare_fds(current))
|
|
|
|
return -1;
|
|
|
|
|
2018-04-20 13:10:22 +03:00
|
|
|
if (wait_exiting_children())
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
sfds_protected = false;
|
|
|
|
close_image_dir();
|
|
|
|
close_proc();
|
|
|
|
for (i = SERVICE_FD_MIN + 1; i < SERVICE_FD_MAX; i++)
|
|
|
|
close_service_fd(i);
|
|
|
|
|
|
|
|
return 0;
|
2017-12-28 13:50:54 +03:00
|
|
|
}
|
|
|
|
|
2013-03-25 23:39:52 +04:00
|
|
|
static int restore_one_task(int pid, CoreEntry *core)
|
2012-01-22 20:28:30 +04:00
|
|
|
{
|
2018-04-20 13:10:22 +03:00
|
|
|
int ret;
|
2012-07-19 13:23:01 +04:00
|
|
|
|
2013-11-03 17:37:10 +04:00
|
|
|
/* No more fork()-s => no more per-pid logs */
|
|
|
|
|
2014-08-07 13:31:39 +04:00
|
|
|
if (task_alive(current))
|
2012-07-19 13:23:01 +04:00
|
|
|
ret = restore_one_alive_task(pid, core);
|
2017-01-25 18:29:04 +03:00
|
|
|
else if (current->pid->state == TASK_DEAD)
|
2015-06-25 23:42:43 +03:00
|
|
|
ret = restore_one_zombie(core);
|
2017-01-25 18:29:04 +03:00
|
|
|
else if (current->pid->state == TASK_HELPER) {
|
restore: Fix deadlock when helper's child dies
Since commit ced9c529f687 ("restore: fix race with helpers' kids dying
too early"), we block SIGCHLD in helper tasks before CR_STATE_RESTORE.
This way we avoided default criu sighandler as it doesn't expect that
childs may die.
This is very racy as we wait on futex for another stage to be started,
but the next stage may start only when all the tasks complete previous
stage. If some children of helper dies, the helper may already have
blocked SIGCHLD and have started sleeping on the futex. Then the next
stage never comes and no one reads a pending SIGCHLD for helper.
A customer met this situation on the node, where the following
(non-related) problem has occured:
Unable to send a fin packet: libnet_write_raw_ipv6(): -1 bytes written (Network is unreachable)
Then child criu of the helper has exited with error-code and the
lockup has happened.
While we could fix it by aborting futex in the end of
restore_task_with_children() for each (non-root also) tasks,
that would be not completely correct:
1. All futex-waiting tasks will wake up after that and they
may not expect that some tasks are on the previous stage,
so they will spam into logs with unrelated errors and may
also die painfully.
2. Child may die and miss aborting of the futex due to:
o segfault
o OOM killer
o User-sended SIGKILL
o Other error-path we forgot to cover with abort futex
To fix this deadlock in TASK_HELPER, as suggested-by Kirill,
let's check if there are children deaths expected - if there
isn't any, don't block SIGCHLD, otherwise wait() and check if
death was on expected stage of restore (not CR_STATE_RESTORE).
Reviewed-by: Kirill Tkhai <ktkhai@virtuozzo.com>
Signed-off-by: Dmitry Safonov <dsafonov@virtuozzo.com>
Signed-off-by: Andrei Vagin <avagin@virtuozzo.com>
Conflicts:
criu/cr-restore.c
2017-07-20 18:21:21 +03:00
|
|
|
ret = restore_one_helper();
|
2014-09-17 15:32:16 -05:00
|
|
|
} else {
|
2012-07-19 13:23:01 +04:00
|
|
|
pr_err("Unknown state in code %d\n", (int)core->tc->task_state);
|
|
|
|
ret = -1;
|
2012-01-22 20:28:30 +04:00
|
|
|
}
|
2012-07-19 13:23:01 +04:00
|
|
|
|
2014-08-05 12:56:04 +04:00
|
|
|
if (core)
|
|
|
|
core_entry__free_unpacked(core, NULL);
|
2012-07-19 13:23:01 +04:00
|
|
|
return ret;
|
2012-01-22 20:28:30 +04:00
|
|
|
}
|
|
|
|
|
2012-08-28 23:19:28 +04:00
|
|
|
/* All arguments should be above stack, because it grows down */
|
2012-01-26 15:26:00 +04:00
|
|
|
struct cr_clone_arg {
|
2012-05-31 14:50:00 +04:00
|
|
|
struct pstree_item *item;
|
2012-01-26 15:27:00 +04:00
|
|
|
unsigned long clone_flags;
|
2013-03-25 23:39:52 +04:00
|
|
|
|
|
|
|
CoreEntry *core;
|
2012-01-26 15:26:00 +04:00
|
|
|
};
|
|
|
|
|
2014-08-14 19:34:30 +04:00
|
|
|
static void maybe_clone_parent(struct pstree_item *item,
|
|
|
|
struct cr_clone_arg *ca)
|
|
|
|
{
|
2014-09-10 01:13:00 +04:00
|
|
|
/*
|
|
|
|
* zdtm runs in kernel 3.11, which has the problem described below. We
|
|
|
|
* avoid this by including the pdeath_sig test. Once users/zdtm migrate
|
|
|
|
* off of 3.11, this condition can be simplified to just test the
|
|
|
|
* options and not have the pdeath_sig test.
|
|
|
|
*/
|
2014-09-10 15:46:06 +04:00
|
|
|
if (opts.restore_sibling) {
|
2014-08-14 19:34:30 +04:00
|
|
|
/*
|
|
|
|
* This means we're called from lib's criu_restore_child().
|
|
|
|
* In that case create the root task as the child one to+
|
|
|
|
* the caller. This is the only way to correctly restore the
|
|
|
|
* pdeath_sig of the root task. But also looks nice.
|
|
|
|
*
|
|
|
|
* Alternatively, if we are --restore-detached, a similar trick is
|
|
|
|
* needed to correctly restore pdeath_sig and prevent processes from
|
|
|
|
* dying once restored.
|
|
|
|
*
|
|
|
|
* There were a problem in kernel 3.11 -- CLONE_PARENT can't be
|
|
|
|
* set together with CLONE_NEWPID, which has been solved in further
|
|
|
|
* versions of the kernels, but we treat 3.11 as a base, so at
|
|
|
|
* least warn a user about potential problems.
|
|
|
|
*/
|
2014-09-29 22:04:39 +04:00
|
|
|
rsti(item)->clone_flags |= CLONE_PARENT;
|
|
|
|
if (rsti(item)->clone_flags & CLONE_NEWPID)
|
2014-08-14 19:34:30 +04:00
|
|
|
pr_warn("Set CLONE_PARENT | CLONE_NEWPID but it might cause restore problem,"
|
|
|
|
"because not all kernels support such clone flags combinations!\n");
|
2014-09-10 15:46:06 +04:00
|
|
|
} else if (opts.restore_detach) {
|
|
|
|
if (ca->core->thread_core->pdeath_sig)
|
|
|
|
pr_warn("Root task has pdeath_sig configured, so it will receive one _right_"
|
|
|
|
"after restore on CRIU exit\n");
|
2014-08-14 19:34:30 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-06-07 14:29:32 +03:00
|
|
|
static bool needs_prep_creds(struct pstree_item *item)
|
|
|
|
{
|
2018-07-11 00:36:11 +03:00
|
|
|
/*
|
|
|
|
* Before the 4.13 kernel, it was impossible to set
|
|
|
|
* an exe_file if uid or gid isn't zero.
|
|
|
|
*/
|
|
|
|
return (!item->parent && ((root_ns_mask & CLONE_NEWUSER) || getuid()));
|
2017-06-07 14:29:32 +03:00
|
|
|
}
|
|
|
|
|
2020-05-05 14:53:08 +00:00
|
|
|
static int set_next_pid(void *arg)
|
2020-05-05 15:45:25 +00:00
|
|
|
{
|
|
|
|
char buf[32];
|
2020-05-05 14:53:08 +00:00
|
|
|
pid_t *pid = arg;
|
2020-05-05 15:45:25 +00:00
|
|
|
int len;
|
|
|
|
int fd;
|
|
|
|
|
|
|
|
fd = open_proc_rw(PROC_GEN, LAST_PID_PATH);
|
|
|
|
if (fd < 0)
|
|
|
|
return -1;
|
|
|
|
|
2020-05-05 14:53:08 +00:00
|
|
|
len = snprintf(buf, sizeof(buf), "%d", *pid - 1);
|
2020-05-05 15:45:25 +00:00
|
|
|
if (write(fd, buf, len) != len) {
|
2020-05-05 14:53:08 +00:00
|
|
|
pr_perror("Failed to write %s to /proc/%s",
|
|
|
|
buf, LAST_PID_PATH);
|
2020-05-05 15:45:25 +00:00
|
|
|
close(fd);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
close(fd);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-01-19 01:16:19 +04:00
|
|
|
static inline int fork_with_pid(struct pstree_item *item)
|
2011-09-23 12:00:45 +04:00
|
|
|
{
|
2012-01-26 15:26:00 +04:00
|
|
|
struct cr_clone_arg ca;
|
2020-05-05 15:45:25 +00:00
|
|
|
struct ns_id *pid_ns = NULL;
|
2020-10-28 13:24:36 +03:00
|
|
|
bool external_pidns = false;
|
2014-09-29 12:48:53 +04:00
|
|
|
int ret = -1;
|
2017-02-10 14:28:57 +03:00
|
|
|
pid_t pid = vpid(item);
|
2011-12-01 18:21:17 +04:00
|
|
|
|
2017-01-25 18:29:04 +03:00
|
|
|
if (item->pid->state != TASK_HELPER) {
|
2016-05-23 22:13:00 +03:00
|
|
|
if (open_core(pid, &ca.core))
|
2013-03-25 23:39:52 +04:00
|
|
|
return -1;
|
|
|
|
|
2013-08-12 09:04:56 +04:00
|
|
|
if (check_core(ca.core, item))
|
|
|
|
return -1;
|
|
|
|
|
2017-01-25 18:29:04 +03:00
|
|
|
item->pid->state = ca.core->tc->task_state;
|
2014-09-29 22:04:39 +04:00
|
|
|
rsti(item)->cg_set = ca.core->tc->cg_set;
|
2015-11-16 22:17:45 -07:00
|
|
|
|
2017-01-25 18:29:04 +03:00
|
|
|
if (item->pid->state != TASK_DEAD && !task_alive(item)) {
|
|
|
|
pr_err("Unknown task state %d\n", item->pid->state);
|
2013-10-01 11:21:34 +04:00
|
|
|
return -1;
|
|
|
|
}
|
2014-08-14 19:34:30 +04:00
|
|
|
|
2018-05-07 11:42:48 +03:00
|
|
|
/*
|
|
|
|
* By default we assume that seccomp is not
|
|
|
|
* used at all (especially on dead task). Later
|
|
|
|
* we will walk over all threads and check in
|
|
|
|
* details if filter is present setting up
|
|
|
|
* this flag as appropriate.
|
|
|
|
*/
|
|
|
|
rsti(item)->has_seccomp = false;
|
2018-05-07 11:42:45 +03:00
|
|
|
|
2014-08-14 19:34:30 +04:00
|
|
|
if (unlikely(item == root_item))
|
|
|
|
maybe_clone_parent(item, &ca);
|
2014-05-08 16:55:53 +04:00
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* Helper entry will not get moved around and thus
|
|
|
|
* will live in the parent's cgset.
|
|
|
|
*/
|
2014-09-29 22:04:39 +04:00
|
|
|
rsti(item)->cg_set = rsti(item->parent)->cg_set;
|
2013-03-25 23:39:52 +04:00
|
|
|
ca.core = NULL;
|
2014-05-08 16:55:53 +04:00
|
|
|
}
|
2011-12-01 18:21:17 +04:00
|
|
|
|
2020-10-27 18:26:35 +03:00
|
|
|
if (item->ids)
|
|
|
|
pid_ns = lookup_ns_by_id(item->ids->pid_ns_id, &pid_ns_desc);
|
2014-09-15 22:05:00 +04:00
|
|
|
|
2020-10-28 13:24:36 +03:00
|
|
|
if (!current && pid_ns && pid_ns->ext_key)
|
|
|
|
external_pidns = true;
|
|
|
|
|
|
|
|
if (external_pidns) {
|
2020-05-05 15:45:25 +00:00
|
|
|
int fd;
|
|
|
|
|
|
|
|
/* Not possible to restore into an empty PID namespace. */
|
2020-05-05 14:53:08 +00:00
|
|
|
if (pid == INIT_PID) {
|
|
|
|
pr_err("Unable to restore into an empty PID namespace\n");
|
|
|
|
return -1;
|
|
|
|
}
|
2020-05-05 15:45:25 +00:00
|
|
|
|
|
|
|
fd = inherit_fd_lookup_id(pid_ns->ext_key);
|
|
|
|
if (fd < 0) {
|
|
|
|
pr_err("Unable to find an external pidns: %s\n", pid_ns->ext_key);
|
|
|
|
return -1;
|
2019-12-16 10:42:13 +00:00
|
|
|
}
|
2012-08-14 14:09:20 +04:00
|
|
|
|
2020-05-05 15:45:25 +00:00
|
|
|
ret = switch_ns_by_fd(fd, &pid_ns_desc, NULL);
|
|
|
|
close(fd);
|
2020-05-05 14:53:08 +00:00
|
|
|
if (ret) {
|
|
|
|
pr_err("Unable to enter existing PID namespace\n");
|
|
|
|
return -1;
|
|
|
|
}
|
2020-10-28 13:24:36 +03:00
|
|
|
|
|
|
|
pr_info("Inheriting external pidns %s for %d\n", pid_ns->ext_key, pid);
|
2020-05-05 15:45:25 +00:00
|
|
|
}
|
|
|
|
|
2020-10-27 18:26:35 +03:00
|
|
|
ca.item = item;
|
|
|
|
ca.clone_flags = rsti(item)->clone_flags;
|
|
|
|
|
|
|
|
BUG_ON(ca.clone_flags & CLONE_VM);
|
|
|
|
|
|
|
|
pr_info("Forking task with %d pid (flags 0x%lx)\n", pid, ca.clone_flags);
|
|
|
|
|
|
|
|
if (!(ca.clone_flags & CLONE_NEWPID)) {
|
2017-05-16 19:27:07 +03:00
|
|
|
lock_last_pid();
|
2012-08-14 14:09:20 +04:00
|
|
|
|
2019-12-16 10:42:13 +00:00
|
|
|
if (!kdat.has_clone3_set_tid) {
|
2020-10-28 13:24:36 +03:00
|
|
|
if (external_pidns) {
|
2020-05-05 15:45:25 +00:00
|
|
|
/*
|
|
|
|
* Restoring into another namespace requires a helper
|
|
|
|
* to write to LAST_PID_PATH. Using clone3() this is
|
|
|
|
* so much easier and simpler. As long as CRIU supports
|
|
|
|
* clone() this is needed.
|
|
|
|
*/
|
2020-05-05 14:53:08 +00:00
|
|
|
ret = call_in_child_process(set_next_pid, (void *)&pid);
|
|
|
|
} else {
|
|
|
|
ret = set_next_pid((void *)&pid);
|
2020-05-05 15:45:25 +00:00
|
|
|
}
|
2020-05-05 14:53:08 +00:00
|
|
|
if (ret != 0) {
|
2021-04-22 17:57:49 -07:00
|
|
|
pr_err("Setting PID failed\n");
|
2020-05-05 14:53:08 +00:00
|
|
|
goto err_unlock;
|
2019-12-16 10:42:13 +00:00
|
|
|
}
|
2015-09-28 20:20:00 +03:00
|
|
|
}
|
2012-08-14 14:09:20 +04:00
|
|
|
} else {
|
2020-10-28 13:24:36 +03:00
|
|
|
if (!external_pidns) {
|
2020-05-05 14:53:08 +00:00
|
|
|
if (pid != INIT_PID) {
|
|
|
|
pr_err("First PID in a PID namespace needs to be %d and not %d\n",
|
|
|
|
pid, INIT_PID);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
2012-08-14 14:09:20 +04:00
|
|
|
}
|
2011-12-01 18:21:17 +04:00
|
|
|
|
2019-12-16 10:42:13 +00:00
|
|
|
if (kdat.has_clone3_set_tid) {
|
|
|
|
ret = clone3_with_pid_noasan(restore_task_with_children,
|
2020-10-27 18:26:35 +03:00
|
|
|
&ca, (ca.clone_flags &
|
2019-08-14 07:40:40 +03:00
|
|
|
~(CLONE_NEWNET | CLONE_NEWCGROUP | CLONE_NEWTIME)),
|
2019-12-16 10:42:13 +00:00
|
|
|
SIGCHLD, pid);
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* Some kernel modules, such as network packet generator
|
|
|
|
* run kernel thread upon net-namespace creation taking
|
|
|
|
* the @pid we've been requesting via LAST_PID_PATH interface
|
|
|
|
* so that we can't restore a take with pid needed.
|
|
|
|
*
|
|
|
|
* Here is an idea -- unshare net namespace in callee instead.
|
|
|
|
*/
|
|
|
|
/*
|
|
|
|
* The cgroup namespace is also unshared explicitly in the
|
|
|
|
* move_in_cgroup(), so drop this flag here as well.
|
|
|
|
*/
|
|
|
|
close_pid_proc();
|
|
|
|
ret = clone_noasan(restore_task_with_children,
|
2020-10-27 18:26:35 +03:00
|
|
|
(ca.clone_flags &
|
2019-08-14 07:40:40 +03:00
|
|
|
~(CLONE_NEWNET | CLONE_NEWCGROUP | CLONE_NEWTIME)) | SIGCHLD,
|
2019-12-16 10:42:13 +00:00
|
|
|
&ca);
|
|
|
|
}
|
|
|
|
|
2014-08-06 22:06:00 +04:00
|
|
|
if (ret < 0) {
|
2012-01-31 15:13:05 +04:00
|
|
|
pr_perror("Can't fork for %d", pid);
|
2020-04-23 09:11:48 +00:00
|
|
|
if (errno == EEXIST)
|
|
|
|
set_cr_errno(EEXIST);
|
2014-08-06 22:06:00 +04:00
|
|
|
goto err_unlock;
|
|
|
|
}
|
|
|
|
|
2011-12-01 18:21:17 +04:00
|
|
|
|
2015-04-23 14:35:00 +03:00
|
|
|
if (item == root_item) {
|
2017-01-25 18:29:04 +03:00
|
|
|
item->pid->real = ret;
|
2015-04-23 14:35:00 +03:00
|
|
|
pr_debug("PID: real %d virt %d\n",
|
2017-02-10 14:28:57 +03:00
|
|
|
item->pid->real, vpid(item));
|
2015-04-23 14:35:00 +03:00
|
|
|
}
|
2012-08-14 12:54:00 +04:00
|
|
|
|
2011-12-01 18:21:17 +04:00
|
|
|
err_unlock:
|
2020-10-27 18:26:35 +03:00
|
|
|
if (!(ca.clone_flags & CLONE_NEWPID))
|
2017-05-16 19:27:07 +03:00
|
|
|
unlock_last_pid();
|
2020-05-05 15:45:25 +00:00
|
|
|
|
2013-03-25 23:39:52 +04:00
|
|
|
if (ca.core)
|
|
|
|
core_entry__free_unpacked(ca.core, NULL);
|
2011-09-23 12:00:45 +04:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2020-11-11 01:24:48 +00:00
|
|
|
/* Returns 0 if restore can be continued */
|
|
|
|
static int sigchld_process(int status, pid_t pid)
|
2012-01-19 01:33:19 +03:00
|
|
|
{
|
2020-11-11 01:24:48 +00:00
|
|
|
int sig;
|
2013-05-31 19:01:31 +04:00
|
|
|
|
2020-11-11 01:24:48 +00:00
|
|
|
if (WIFEXITED(status)) {
|
|
|
|
pr_err("%d exited, status=%d\n", pid, WEXITSTATUS(status));
|
|
|
|
return -1;
|
|
|
|
} else if (WIFSIGNALED(status)) {
|
|
|
|
sig = WTERMSIG(status);
|
|
|
|
pr_err("%d killed by signal %d: %s\n", pid, sig, strsignal(sig));
|
|
|
|
return -1;
|
|
|
|
} else if (WIFSTOPPED(status)) {
|
|
|
|
sig = WSTOPSIG(status);
|
|
|
|
/* The root task is ptraced. Allow it to handle SIGCHLD */
|
|
|
|
if (sig == SIGCHLD && !current) {
|
|
|
|
if (ptrace(PTRACE_CONT, pid, 0, SIGCHLD)) {
|
|
|
|
pr_perror("Unable to resume %d", pid);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
pr_err("%d stopped by signal %d: %s\n", pid, sig, strsignal(sig));
|
|
|
|
return -1;
|
|
|
|
} else if (WIFCONTINUED(status)) {
|
|
|
|
pr_err("%d unexpectedly continued\n", pid);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
pr_err("wait for %d resulted in %x status\n", pid, status);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void sigchld_handler(int signal, siginfo_t *siginfo, void *data)
|
|
|
|
{
|
2017-05-13 03:11:19 +03:00
|
|
|
while (1) {
|
2020-11-11 01:24:48 +00:00
|
|
|
int status;
|
|
|
|
pid_t pid;
|
|
|
|
|
2017-05-13 03:11:19 +03:00
|
|
|
pid = waitpid(-1, &status, WNOHANG);
|
2013-05-31 19:01:31 +04:00
|
|
|
if (pid <= 0)
|
|
|
|
return;
|
2014-08-14 16:52:00 +04:00
|
|
|
|
2020-11-11 01:24:48 +00:00
|
|
|
if (sigchld_process(status, pid) < 0)
|
|
|
|
goto err_abort;
|
2012-06-22 00:38:00 +04:00
|
|
|
}
|
|
|
|
|
2020-11-11 01:24:48 +00:00
|
|
|
err_abort:
|
2012-04-03 00:52:00 +04:00
|
|
|
futex_abort_and_wake(&task_entries->nr_in_progress);
|
2012-01-19 01:33:19 +03:00
|
|
|
}
|
|
|
|
|
2014-08-06 16:25:08 +04:00
|
|
|
static int criu_signals_setup(void)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
struct sigaction act;
|
2014-08-06 16:25:25 +04:00
|
|
|
sigset_t blockmask;
|
2014-08-06 16:25:08 +04:00
|
|
|
|
|
|
|
ret = sigaction(SIGCHLD, NULL, &act);
|
|
|
|
if (ret < 0) {
|
|
|
|
pr_perror("sigaction() failed");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
act.sa_flags |= SA_NOCLDSTOP | SA_SIGINFO | SA_RESTART;
|
|
|
|
act.sa_sigaction = sigchld_handler;
|
|
|
|
sigemptyset(&act.sa_mask);
|
|
|
|
sigaddset(&act.sa_mask, SIGCHLD);
|
|
|
|
|
|
|
|
ret = sigaction(SIGCHLD, &act, NULL);
|
|
|
|
if (ret < 0) {
|
|
|
|
pr_perror("sigaction() failed");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2014-08-06 16:25:25 +04:00
|
|
|
/*
|
|
|
|
* The block mask will be restored in sigreturn.
|
|
|
|
*
|
|
|
|
* TODO: This code should be removed, when a freezer will be added.
|
|
|
|
*/
|
|
|
|
sigfillset(&blockmask);
|
|
|
|
sigdelset(&blockmask, SIGCHLD);
|
2015-06-05 16:48:00 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Here we use SIG_SETMASK instead of SIG_BLOCK to avoid the case where
|
|
|
|
* we've been forked from a parent who had blocked SIGCHLD. If SIGCHLD
|
|
|
|
* is blocked when a task dies (e.g. if the task fails to restore
|
|
|
|
* somehow), we hang because our SIGCHLD handler is never run. Since we
|
|
|
|
* depend on SIGCHLD being unblocked, let's set the mask explicitly.
|
|
|
|
*/
|
|
|
|
ret = sigprocmask(SIG_SETMASK, &blockmask, NULL);
|
2014-08-06 16:25:25 +04:00
|
|
|
if (ret < 0) {
|
|
|
|
pr_perror("Can't block signals");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2014-08-06 16:25:08 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-04-11 22:11:41 +04:00
|
|
|
static void restore_sid(void)
|
|
|
|
{
|
|
|
|
pid_t sid;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* SID can only be reset to pid or inherited from parent.
|
|
|
|
* Thus we restore it right here to let our kids inherit
|
|
|
|
* one in case they need it.
|
|
|
|
*
|
|
|
|
* PGIDs are restored late when all tasks are forked and
|
|
|
|
* we can call setpgid() on custom values.
|
|
|
|
*/
|
|
|
|
|
2017-02-10 14:28:57 +03:00
|
|
|
if (vpid(current) == current->sid) {
|
|
|
|
pr_info("Restoring %d to %d sid\n", vpid(current), current->sid);
|
2012-04-11 22:11:41 +04:00
|
|
|
sid = setsid();
|
2012-09-05 19:52:55 +04:00
|
|
|
if (sid != current->sid) {
|
2012-04-11 22:11:41 +04:00
|
|
|
pr_perror("Can't restore sid (%d)", sid);
|
2012-12-26 18:15:03 +03:00
|
|
|
exit(1);
|
2012-04-11 22:11:41 +04:00
|
|
|
}
|
|
|
|
} else {
|
2019-01-17 20:33:04 +00:00
|
|
|
sid = getsid(0);
|
2012-09-05 19:52:55 +04:00
|
|
|
if (sid != current->sid) {
|
2012-06-22 00:39:00 +04:00
|
|
|
/* Skip the root task if it's not init */
|
2017-02-10 14:28:57 +03:00
|
|
|
if (current == root_item && vpid(root_item) != INIT_PID)
|
2012-06-22 00:39:00 +04:00
|
|
|
return;
|
2012-04-11 22:11:41 +04:00
|
|
|
pr_err("Requested sid %d doesn't match inherited %d\n",
|
2012-09-05 19:52:55 +04:00
|
|
|
current->sid, sid);
|
2012-12-26 18:15:03 +03:00
|
|
|
exit(1);
|
2012-04-11 22:11:41 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void restore_pgid(void)
|
|
|
|
{
|
2013-09-27 04:38:00 +04:00
|
|
|
/*
|
|
|
|
* Unlike sessions, process groups (a.k.a. pgids) can be joined
|
|
|
|
* by any task, provided the task with pid == pgid (group leader)
|
|
|
|
* exists. Thus, in order to restore pgid we must make sure that
|
|
|
|
* group leader was born and created the group, then join one.
|
|
|
|
*
|
|
|
|
* We do this _before_ finishing the forking stage to make sure
|
|
|
|
* helpers are still with us.
|
|
|
|
*/
|
|
|
|
|
|
|
|
pid_t pgid, my_pgid = current->pgid;
|
2012-04-11 22:11:41 +04:00
|
|
|
|
2017-02-10 14:28:57 +03:00
|
|
|
pr_info("Restoring %d to %d pgid\n", vpid(current), my_pgid);
|
2012-04-11 22:11:41 +04:00
|
|
|
|
|
|
|
pgid = getpgrp();
|
2013-09-27 04:38:00 +04:00
|
|
|
if (my_pgid == pgid)
|
2012-04-11 22:11:41 +04:00
|
|
|
return;
|
|
|
|
|
2017-02-10 14:28:57 +03:00
|
|
|
if (my_pgid != vpid(current)) {
|
2013-09-27 04:38:00 +04:00
|
|
|
struct pstree_item *leader;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Wait for leader to become such.
|
|
|
|
* Missing leader means we're going to crtools
|
|
|
|
* group (-j option).
|
|
|
|
*/
|
|
|
|
|
2014-09-29 22:04:39 +04:00
|
|
|
leader = rsti(current)->pgrp_leader;
|
2013-09-27 04:38:00 +04:00
|
|
|
if (leader) {
|
2017-02-10 14:28:57 +03:00
|
|
|
BUG_ON(my_pgid != vpid(leader));
|
2014-09-29 22:04:39 +04:00
|
|
|
futex_wait_until(&rsti(leader)->pgrp_set, 1);
|
2013-09-27 04:38:00 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-04-11 22:11:41 +04:00
|
|
|
pr_info("\twill call setpgid, mine pgid is %d\n", pgid);
|
2013-09-27 04:38:00 +04:00
|
|
|
if (setpgid(0, my_pgid) != 0) {
|
2017-02-10 14:28:57 +03:00
|
|
|
pr_perror("Can't restore pgid (%d/%d->%d)", vpid(current), pgid, current->pgid);
|
2012-12-26 18:15:03 +03:00
|
|
|
exit(1);
|
2012-04-11 22:11:41 +04:00
|
|
|
}
|
2013-09-27 04:38:00 +04:00
|
|
|
|
2017-02-10 14:28:57 +03:00
|
|
|
if (my_pgid == vpid(current))
|
2014-09-29 22:04:39 +04:00
|
|
|
futex_set_and_wake(&rsti(current)->pgrp_set, 1);
|
2012-04-11 22:11:41 +04:00
|
|
|
}
|
|
|
|
|
2019-12-21 18:13:06 +00:00
|
|
|
static int __legacy_mount_proc(void)
|
2019-11-21 01:26:38 +03:00
|
|
|
{
|
|
|
|
char proc_mountpoint[] = "/tmp/crtools-proc.XXXXXX";
|
|
|
|
int fd;
|
|
|
|
|
|
|
|
if (mkdtemp(proc_mountpoint) == NULL) {
|
|
|
|
pr_perror("mkdtemp failed %s", proc_mountpoint);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
pr_info("Mount procfs in %s\n", proc_mountpoint);
|
|
|
|
if (mount("proc", proc_mountpoint, "proc", MS_MGC_VAL | MS_NOSUID | MS_NOEXEC | MS_NODEV, NULL)) {
|
|
|
|
pr_perror("mount failed");
|
|
|
|
if (rmdir(proc_mountpoint))
|
|
|
|
pr_perror("Unable to remove %s", proc_mountpoint);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
fd = open_detach_mount(proc_mountpoint);
|
|
|
|
return fd;
|
|
|
|
}
|
|
|
|
|
2012-08-06 18:36:59 +04:00
|
|
|
static int mount_proc(void)
|
2012-06-27 20:57:40 +04:00
|
|
|
{
|
2013-08-11 20:00:28 +04:00
|
|
|
int fd, ret;
|
2012-08-01 15:01:13 +04:00
|
|
|
|
2017-04-13 18:29:13 +03:00
|
|
|
if (root_ns_mask == 0)
|
|
|
|
fd = ret = open("/proc", O_DIRECTORY);
|
|
|
|
else {
|
2019-11-21 01:26:38 +03:00
|
|
|
if (kdat.has_fsopen)
|
|
|
|
fd = ret = mount_detached_fs("proc");
|
|
|
|
else
|
|
|
|
fd = ret = __legacy_mount_proc();
|
2012-08-01 15:01:13 +04:00
|
|
|
}
|
2012-08-06 18:36:59 +04:00
|
|
|
|
2013-08-11 20:00:28 +04:00
|
|
|
if (fd >= 0) {
|
|
|
|
ret = set_proc_fd(fd);
|
|
|
|
close(fd);
|
2012-08-01 15:01:13 +04:00
|
|
|
}
|
2012-08-06 18:36:59 +04:00
|
|
|
|
|
|
|
return ret;
|
2012-06-27 20:57:40 +04:00
|
|
|
}
|
|
|
|
|
2013-08-11 20:15:43 +04:00
|
|
|
/*
|
|
|
|
* Tasks cannot change sid (session id) arbitrary, but can either
|
|
|
|
* inherit one from ancestor, or create a new one with id equal to
|
|
|
|
* their pid. Thus sid-s restore is tied with children creation.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static int create_children_and_session(void)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
struct pstree_item *child;
|
|
|
|
|
|
|
|
pr_info("Restoring children in alien sessions:\n");
|
|
|
|
list_for_each_entry(child, ¤t->children, sibling) {
|
|
|
|
if (!restore_before_setsid(child))
|
|
|
|
continue;
|
|
|
|
|
2019-01-17 20:33:04 +00:00
|
|
|
BUG_ON(child->born_sid != -1 && getsid(0) != child->born_sid);
|
2013-08-11 20:15:43 +04:00
|
|
|
|
|
|
|
ret = fork_with_pid(child);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-01-27 15:04:12 +03:00
|
|
|
if (current->parent)
|
|
|
|
restore_sid();
|
2013-08-11 20:15:43 +04:00
|
|
|
|
|
|
|
pr_info("Restoring children in our session:\n");
|
|
|
|
list_for_each_entry(child, ¤t->children, sibling) {
|
|
|
|
if (restore_before_setsid(child))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
ret = fork_with_pid(child);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-01-26 15:26:00 +04:00
|
|
|
static int restore_task_with_children(void *_arg)
|
2011-09-23 12:00:45 +04:00
|
|
|
{
|
2012-01-26 15:26:00 +04:00
|
|
|
struct cr_clone_arg *ca = _arg;
|
2012-04-05 15:34:31 +04:00
|
|
|
pid_t pid;
|
2012-05-31 14:50:00 +04:00
|
|
|
int ret;
|
2012-01-26 15:26:00 +04:00
|
|
|
|
2012-09-05 19:52:55 +04:00
|
|
|
current = ca->item;
|
2012-05-31 14:50:00 +04:00
|
|
|
|
2013-09-23 14:33:25 +04:00
|
|
|
if (current != root_item) {
|
2014-09-12 16:43:00 +04:00
|
|
|
char buf[12];
|
2013-09-23 14:33:25 +04:00
|
|
|
int fd;
|
|
|
|
|
|
|
|
/* Determine PID in CRIU's namespace */
|
|
|
|
fd = get_service_fd(CR_PROC_FD_OFF);
|
|
|
|
if (fd < 0)
|
2014-08-28 01:05:00 +04:00
|
|
|
goto err;
|
2013-09-23 14:33:25 +04:00
|
|
|
|
|
|
|
ret = readlinkat(fd, "self", buf, sizeof(buf) - 1);
|
|
|
|
if (ret < 0) {
|
|
|
|
pr_perror("Unable to read the /proc/self link");
|
2014-08-28 01:05:00 +04:00
|
|
|
goto err;
|
2013-09-23 14:33:25 +04:00
|
|
|
}
|
|
|
|
buf[ret] = '\0';
|
|
|
|
|
2017-01-25 18:29:04 +03:00
|
|
|
current->pid->real = atoi(buf);
|
2013-09-23 14:33:25 +04:00
|
|
|
pr_debug("PID: real %d virt %d\n",
|
2017-02-10 14:28:57 +03:00
|
|
|
current->pid->real, vpid(current));
|
2013-09-23 14:33:25 +04:00
|
|
|
}
|
|
|
|
|
2012-04-05 15:34:31 +04:00
|
|
|
pid = getpid();
|
2017-02-10 14:28:57 +03:00
|
|
|
if (vpid(current) != pid) {
|
|
|
|
pr_err("Pid %d do not match expected %d\n", pid, vpid(current));
|
2014-12-11 22:55:12 +02:00
|
|
|
set_task_cr_err(EEXIST);
|
2014-08-28 01:05:00 +04:00
|
|
|
goto err;
|
2012-01-26 15:26:00 +04:00
|
|
|
}
|
2011-12-02 16:06:00 +04:00
|
|
|
|
2018-01-10 17:02:44 +03:00
|
|
|
if (log_init_by_pid(vpid(current)))
|
|
|
|
return -1;
|
|
|
|
|
2017-05-04 23:27:46 +03:00
|
|
|
if (current->parent == NULL) {
|
|
|
|
/*
|
|
|
|
* The root task has to be in its namespaces before executing
|
|
|
|
* ACT_SETUP_NS scripts, so the root netns has to be created here
|
|
|
|
*/
|
|
|
|
if (root_ns_mask & CLONE_NEWNET) {
|
2018-07-24 06:14:02 +03:00
|
|
|
struct ns_id *ns = net_get_root_ns();
|
|
|
|
if (ns->ext_key)
|
|
|
|
ret = net_set_ext(ns);
|
|
|
|
else
|
|
|
|
ret = unshare(CLONE_NEWNET);
|
2017-05-04 23:27:46 +03:00
|
|
|
if (ret) {
|
|
|
|
pr_perror("Can't unshare net-namespace");
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-08-14 07:40:40 +03:00
|
|
|
if (root_ns_mask & CLONE_NEWTIME) {
|
|
|
|
if (prepare_timens(current->ids->time_ns_id))
|
|
|
|
goto err;
|
2020-03-23 03:26:00 +03:00
|
|
|
} else if (kdat.has_timens) {
|
|
|
|
if (prepare_timens(0))
|
|
|
|
goto err;
|
2019-08-14 07:40:40 +03:00
|
|
|
}
|
|
|
|
|
2017-05-04 23:27:46 +03:00
|
|
|
/* Wait prepare_userns */
|
|
|
|
if (restore_finish_ns_stage(CR_STATE_ROOT_TASK, CR_STATE_PREPARE_NAMESPACES) < 0)
|
|
|
|
goto err;
|
|
|
|
}
|
2016-04-21 18:34:00 +03:00
|
|
|
|
2017-06-07 14:29:32 +03:00
|
|
|
if (needs_prep_creds(current) && (prepare_userns_creds()))
|
|
|
|
goto err;
|
|
|
|
|
restore: correctly restore cgroup mounts inside a container
Before the nsroot= mount option, we were just getting lucky because the
cgroup superblocks "matched" when inspecting them from userspace, so we
were actually getting a bind mount from the host when migrating from within
cgroup namespaces.
Instead, let's actually do a new (i.e. not a bind mount) for cgroup
namespaces. For this, we need two things:
1. to prepare the cgroup namespace (and thus the cgroups) before the mount
ns, so when the mount() occurrs it is relative to the right cgroup path.
2. not reject cgroup filesystems with no root. A cgroup ns mount looks
like:
223 222 0:22 /lxc/unpriv /sys/fs/cgroup/systemd rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,xattr,release_agent=/lib/systemd/systemd-cgroups-agent,name=systemd,nsroot=/lxc/unpriv
i.e. it has /lxc/unpriv as its root, and thus doesn't look rooted to CRIU.
We use the fstype->parse hook to rewrite this root to /, since it
is handled by the cgroup ns infrastructure.
v2: add new fstype->munge hook, allowing fstypes to munge their parsed
mountinfo entries if they want to. this allows us to get rid of the
ugly hacks with FSTYPE__CGROUP everywhere in teh patch.
v3: s/fstype->munge/fstype->parse for FSTYPE__CGROUP
Signed-off-by: Tycho Andersen <tycho.andersen@canonical.com>
Signed-off-by: Pavel Emelyanov <xemul@virtuozzo.com>
2016-03-28 17:46:00 +03:00
|
|
|
/*
|
|
|
|
* Call this _before_ forking to optimize cgroups
|
|
|
|
* restore -- if all tasks live in one set of cgroups
|
|
|
|
* we will only move the root one there, others will
|
|
|
|
* just have it inherited.
|
|
|
|
*/
|
|
|
|
if (prepare_task_cgroup(current) < 0)
|
|
|
|
goto err;
|
|
|
|
|
2012-08-06 18:37:13 +04:00
|
|
|
/* Restore root task */
|
2012-09-05 19:52:55 +04:00
|
|
|
if (current->parent == NULL) {
|
2016-04-05 11:04:56 +08:00
|
|
|
if (join_namespaces()) {
|
|
|
|
pr_perror("Join namespaces failed");
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2016-01-27 15:04:12 +03:00
|
|
|
pr_info("Calling restore_sid() for init\n");
|
|
|
|
restore_sid();
|
|
|
|
|
2012-08-01 15:01:13 +04:00
|
|
|
/*
|
|
|
|
* We need non /proc proc mount for restoring pid and mount
|
|
|
|
* namespaces and do not care for the rest of the cases.
|
|
|
|
* Thus -- mount proc at custom location for any new namespace
|
|
|
|
*/
|
2012-08-06 18:36:59 +04:00
|
|
|
if (mount_proc())
|
2015-11-24 15:04:00 +03:00
|
|
|
goto err;
|
2012-01-26 15:27:00 +04:00
|
|
|
|
2017-07-19 14:20:48 +03:00
|
|
|
if (!files_collected() && collect_image(&tty_cinfo))
|
|
|
|
goto err;
|
2017-05-04 21:42:36 +03:00
|
|
|
if (collect_images(before_ns_cinfos, ARRAY_SIZE(before_ns_cinfos)))
|
|
|
|
goto err;
|
2017-02-07 11:43:29 +03:00
|
|
|
|
2015-11-15 09:07:03 +03:00
|
|
|
if (prepare_namespace(current, ca->clone_flags))
|
2015-11-24 15:04:00 +03:00
|
|
|
goto err;
|
2015-11-15 09:07:03 +03:00
|
|
|
|
2017-04-28 15:29:52 +03:00
|
|
|
if (restore_finish_ns_stage(CR_STATE_PREPARE_NAMESPACES, CR_STATE_FORKING) < 0)
|
2017-02-22 09:39:45 +03:00
|
|
|
goto err;
|
|
|
|
|
2012-09-17 20:06:06 +04:00
|
|
|
if (root_prepare_shared())
|
2015-11-24 15:04:00 +03:00
|
|
|
goto err;
|
2018-01-10 17:01:33 +03:00
|
|
|
|
|
|
|
if (populate_root_fd_off())
|
|
|
|
goto err;
|
2012-08-06 18:37:13 +04:00
|
|
|
}
|
2012-08-02 16:08:06 +04:00
|
|
|
|
2018-01-10 17:02:44 +03:00
|
|
|
if (setup_newborn_fds(current))
|
|
|
|
goto err;
|
|
|
|
|
2015-11-13 20:55:00 +03:00
|
|
|
if (restore_task_mnt_ns(current))
|
2015-11-24 15:04:00 +03:00
|
|
|
goto err;
|
2012-11-20 20:39:09 +04:00
|
|
|
|
2016-05-20 20:30:00 +03:00
|
|
|
if (prepare_mappings(current))
|
2015-11-24 15:04:00 +03:00
|
|
|
goto err;
|
2014-04-21 14:48:05 +04:00
|
|
|
|
2017-04-19 16:51:01 +03:00
|
|
|
if (prepare_sigactions(ca->core) < 0)
|
2015-11-24 15:04:00 +03:00
|
|
|
goto err;
|
2014-08-06 16:25:38 +04:00
|
|
|
|
2015-10-13 18:06:30 +03:00
|
|
|
if (fault_injected(FI_RESTORE_ROOT_ONLY)) {
|
|
|
|
pr_info("fault: Restore root task failure!\n");
|
2017-10-12 21:44:54 +03:00
|
|
|
kill(getpid(), SIGKILL);
|
2015-10-13 18:06:30 +03:00
|
|
|
}
|
|
|
|
|
2017-05-05 19:13:23 +03:00
|
|
|
if (open_transport_socket())
|
|
|
|
goto err;
|
|
|
|
|
2017-04-28 15:29:26 +03:00
|
|
|
timing_start(TIME_FORK);
|
|
|
|
|
2013-08-11 20:15:43 +04:00
|
|
|
if (create_children_and_session())
|
2015-11-24 15:04:00 +03:00
|
|
|
goto err;
|
2012-06-22 00:39:00 +04:00
|
|
|
|
2017-04-28 15:29:26 +03:00
|
|
|
timing_stop(TIME_FORK);
|
2014-04-21 18:23:20 +04:00
|
|
|
|
2018-01-10 17:01:51 +03:00
|
|
|
if (populate_pid_proc())
|
|
|
|
goto err;
|
|
|
|
|
2018-01-10 17:02:00 +03:00
|
|
|
sfds_protected = true;
|
|
|
|
|
2016-05-20 20:30:00 +03:00
|
|
|
if (unmap_guard_pages(current))
|
2015-11-24 15:04:00 +03:00
|
|
|
goto err;
|
2013-08-11 20:23:18 +04:00
|
|
|
|
2013-09-27 04:38:00 +04:00
|
|
|
restore_pgid();
|
2012-07-02 15:25:00 +04:00
|
|
|
|
2015-11-15 09:07:03 +03:00
|
|
|
if (current->parent == NULL) {
|
2016-07-05 23:59:00 +03:00
|
|
|
/*
|
|
|
|
* Wait when all tasks passed the CR_STATE_FORKING stage.
|
2017-04-28 15:29:26 +03:00
|
|
|
* The stage was started by criu, but now it waits for
|
|
|
|
* the CR_STATE_RESTORE to finish. See comment near the
|
|
|
|
* CR_STATE_FORKING macro for details.
|
|
|
|
*
|
2016-07-05 23:59:00 +03:00
|
|
|
* It means that all tasks entered into their namespaces.
|
|
|
|
*/
|
2017-06-20 23:49:03 +03:00
|
|
|
if (restore_wait_other_tasks())
|
|
|
|
goto err;
|
2015-11-15 09:07:03 +03:00
|
|
|
fini_restore_mntns();
|
2017-04-28 15:29:26 +03:00
|
|
|
__restore_switch_stage(CR_STATE_RESTORE);
|
|
|
|
} else {
|
|
|
|
if (restore_finish_stage(task_entries, CR_STATE_FORKING) < 0)
|
|
|
|
goto err;
|
2015-11-15 09:07:03 +03:00
|
|
|
}
|
2012-07-02 15:25:00 +04:00
|
|
|
|
2017-02-10 14:28:57 +03:00
|
|
|
if (restore_one_task(vpid(current), ca->core))
|
2014-08-29 17:58:00 +04:00
|
|
|
goto err;
|
|
|
|
|
|
|
|
return 0;
|
2014-08-28 01:05:00 +04:00
|
|
|
|
|
|
|
err:
|
|
|
|
if (current->parent == NULL)
|
|
|
|
futex_abort_and_wake(&task_entries->nr_in_progress);
|
2014-04-21 18:23:19 +04:00
|
|
|
exit(1);
|
2011-09-23 12:00:45 +04:00
|
|
|
}
|
|
|
|
|
2015-12-16 09:15:07 -07:00
|
|
|
static int attach_to_tasks(bool root_seized)
|
2013-09-23 14:33:34 +04:00
|
|
|
{
|
|
|
|
struct pstree_item *item;
|
|
|
|
|
|
|
|
for_each_pstree_item(item) {
|
2015-12-16 09:15:07 -07:00
|
|
|
int status, i;
|
2013-09-23 14:33:34 +04:00
|
|
|
|
2014-08-07 13:30:54 +04:00
|
|
|
if (!task_alive(item))
|
2013-09-23 14:33:34 +04:00
|
|
|
continue;
|
|
|
|
|
2017-04-20 14:13:44 +03:00
|
|
|
if (item->nr_threads == 1) {
|
|
|
|
item->threads[0].real = item->pid->real;
|
|
|
|
} else {
|
|
|
|
if (parse_threads(item->pid->real, &item->threads, &item->nr_threads))
|
|
|
|
return -1;
|
|
|
|
}
|
2013-09-23 14:33:34 +04:00
|
|
|
|
|
|
|
for (i = 0; i < item->nr_threads; i++) {
|
2016-07-15 22:28:43 +03:00
|
|
|
pid_t pid = item->threads[i].real;
|
2013-09-23 14:33:34 +04:00
|
|
|
|
2015-04-23 14:23:00 +03:00
|
|
|
if (item != root_item || !root_seized || i != 0) {
|
2015-12-16 09:15:07 -07:00
|
|
|
if (ptrace(PTRACE_SEIZE, pid, 0, 0)) {
|
2014-06-30 20:30:44 +04:00
|
|
|
pr_perror("Can't attach to %d", pid);
|
|
|
|
return -1;
|
|
|
|
}
|
2013-09-23 14:33:34 +04:00
|
|
|
}
|
2015-12-16 09:15:07 -07:00
|
|
|
if (ptrace(PTRACE_INTERRUPT, pid, 0, 0)) {
|
|
|
|
pr_perror("Can't interrupt the %d task", pid);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2013-09-23 14:33:34 +04:00
|
|
|
|
|
|
|
if (wait4(pid, &status, __WALL, NULL) != pid) {
|
2015-04-23 14:35:00 +03:00
|
|
|
pr_perror("waitpid(%d) failed", pid);
|
2013-09-23 14:33:34 +04:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2015-11-03 23:04:52 -07:00
|
|
|
/*
|
|
|
|
* Suspend seccomp if necessary. We need to do this because
|
|
|
|
* although seccomp is restored at the very end of the
|
|
|
|
* restorer blob (and the final sigreturn is ok), here we're
|
|
|
|
* doing an munmap in the process, which may be blocked by
|
|
|
|
* seccomp and cause the task to be killed.
|
|
|
|
*/
|
2017-02-08 06:36:10 -08:00
|
|
|
if (rsti(item)->has_seccomp && ptrace_suspend_seccomp(pid) < 0)
|
2015-11-03 23:04:52 -07:00
|
|
|
pr_err("failed to suspend seccomp, restore will probably fail...\n");
|
|
|
|
|
2015-12-16 09:15:07 -07:00
|
|
|
if (ptrace(PTRACE_CONT, pid, NULL, NULL) ) {
|
|
|
|
pr_perror("Unable to resume %d", pid);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int catch_tasks(bool root_seized, enum trace_flags *flag)
|
|
|
|
{
|
|
|
|
struct pstree_item *item;
|
|
|
|
|
|
|
|
for_each_pstree_item(item) {
|
|
|
|
int status, i, ret;
|
|
|
|
|
|
|
|
if (!task_alive(item))
|
|
|
|
continue;
|
|
|
|
|
2017-04-20 14:13:44 +03:00
|
|
|
if (item->nr_threads == 1) {
|
|
|
|
item->threads[0].real = item->pid->real;
|
|
|
|
} else {
|
|
|
|
if (parse_threads(item->pid->real, &item->threads, &item->nr_threads))
|
|
|
|
return -1;
|
|
|
|
}
|
2015-12-16 09:15:07 -07:00
|
|
|
|
|
|
|
for (i = 0; i < item->nr_threads; i++) {
|
2016-07-15 22:28:43 +03:00
|
|
|
pid_t pid = item->threads[i].real;
|
2015-12-16 09:15:07 -07:00
|
|
|
|
|
|
|
if (ptrace(PTRACE_INTERRUPT, pid, 0, 0)) {
|
|
|
|
pr_perror("Can't interrupt the %d task", pid);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (wait4(pid, &status, __WALL, NULL) != pid) {
|
|
|
|
pr_perror("waitpid(%d) failed", pid);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2016-09-29 16:43:29 +03:00
|
|
|
ret = compel_stop_pie(pid, rsti(item)->breakpoint,
|
|
|
|
flag, fault_injected(FI_NO_BREAKPOINTS));
|
2014-09-17 19:12:04 +04:00
|
|
|
if (ret < 0)
|
|
|
|
return -1;
|
2013-09-23 14:33:34 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-12-21 18:13:06 +00:00
|
|
|
static int clear_breakpoints(void)
|
2014-09-19 23:48:00 +04:00
|
|
|
{
|
|
|
|
struct pstree_item *item;
|
|
|
|
int ret = 0, i;
|
|
|
|
|
2016-04-27 15:03:00 +03:00
|
|
|
if (fault_injected(FI_NO_BREAKPOINTS))
|
|
|
|
return 0;
|
|
|
|
|
2014-10-11 01:14:00 +04:00
|
|
|
for_each_pstree_item(item) {
|
|
|
|
if (!task_alive(item))
|
|
|
|
continue;
|
2014-09-19 23:48:00 +04:00
|
|
|
for (i = 0; i < item->nr_threads; i++)
|
|
|
|
ret |= ptrace_flush_breakpoints(item->threads[i].real);
|
2014-10-11 01:14:00 +04:00
|
|
|
}
|
2014-09-19 23:48:00 +04:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2015-12-15 13:51:27 +03:00
|
|
|
static void finalize_restore(void)
|
2013-09-23 14:33:34 +04:00
|
|
|
{
|
|
|
|
struct pstree_item *item;
|
|
|
|
|
|
|
|
for_each_pstree_item(item) {
|
2017-01-25 18:29:04 +03:00
|
|
|
pid_t pid = item->pid->real;
|
2013-09-23 14:33:35 +04:00
|
|
|
struct parasite_ctl *ctl;
|
2019-11-09 22:20:41 +00:00
|
|
|
unsigned long restorer_addr;
|
2013-09-23 14:33:34 +04:00
|
|
|
|
2014-08-07 13:30:54 +04:00
|
|
|
if (!task_alive(item))
|
2013-09-23 14:33:34 +04:00
|
|
|
continue;
|
|
|
|
|
2013-09-23 14:33:35 +04:00
|
|
|
/* Unmap the restorer blob */
|
2016-11-21 21:26:13 +03:00
|
|
|
ctl = compel_prepare_noctx(pid);
|
2013-09-23 14:33:35 +04:00
|
|
|
if (ctl == NULL)
|
2015-12-15 13:51:27 +03:00
|
|
|
continue;
|
2013-09-23 14:33:35 +04:00
|
|
|
|
2019-11-09 22:20:41 +00:00
|
|
|
restorer_addr = (unsigned long)rsti(item)->munmap_restorer;
|
|
|
|
if (compel_unmap(ctl, restorer_addr))
|
|
|
|
pr_err("Failed to unmap restorer from %d\n", pid);
|
2013-09-23 14:33:35 +04:00
|
|
|
|
|
|
|
xfree(ctl);
|
|
|
|
|
2017-01-25 18:29:04 +03:00
|
|
|
if ((item->pid->state == TASK_STOPPED) ||
|
2016-05-02 18:46:05 +03:00
|
|
|
(opts.final_state == TASK_STOPPED))
|
2017-01-25 18:29:04 +03:00
|
|
|
kill(item->pid->real, SIGSTOP);
|
2015-12-15 13:51:27 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-09 22:20:44 +00:00
|
|
|
static int finalize_restore_detach(void)
|
2015-12-15 13:51:27 +03:00
|
|
|
{
|
|
|
|
struct pstree_item *item;
|
|
|
|
|
|
|
|
for_each_pstree_item(item) {
|
|
|
|
pid_t pid;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (!task_alive(item))
|
|
|
|
continue;
|
|
|
|
|
2013-09-23 14:33:34 +04:00
|
|
|
for (i = 0; i < item->nr_threads; i++) {
|
|
|
|
pid = item->threads[i].real;
|
|
|
|
if (pid < 0) {
|
2019-11-09 22:20:44 +00:00
|
|
|
pr_err("pstree item has unvalid pid %d\n", pid);
|
|
|
|
continue;
|
2013-09-23 14:33:34 +04:00
|
|
|
}
|
|
|
|
|
2019-11-09 22:20:44 +00:00
|
|
|
if (arch_set_thread_regs_nosigrt(&item->threads[i])) {
|
2017-10-05 15:50:39 +02:00
|
|
|
pr_perror("Restoring regs for %d failed", pid);
|
2019-11-09 22:20:44 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if (ptrace(PTRACE_DETACH, pid, NULL, 0)) {
|
|
|
|
pr_perror("Unable to detach %d", pid);
|
|
|
|
return -1;
|
|
|
|
}
|
2013-09-23 14:33:34 +04:00
|
|
|
}
|
|
|
|
}
|
2019-11-09 22:20:44 +00:00
|
|
|
return 0;
|
2013-09-23 14:33:34 +04:00
|
|
|
}
|
|
|
|
|
2014-08-06 16:24:52 +04:00
|
|
|
static void ignore_kids(void)
|
|
|
|
{
|
|
|
|
struct sigaction sa = { .sa_handler = SIG_DFL };
|
|
|
|
|
|
|
|
if (sigaction(SIGCHLD, &sa, NULL) < 0)
|
|
|
|
pr_perror("Restoring CHLD sigaction failed");
|
|
|
|
}
|
|
|
|
|
2015-12-14 12:19:16 +03:00
|
|
|
static unsigned int saved_loginuid;
|
|
|
|
|
|
|
|
static int prepare_userns_hook(void)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2017-05-04 16:31:44 +03:00
|
|
|
if (kdat.luid != LUID_FULL)
|
2015-12-24 14:43:44 +03:00
|
|
|
return 0;
|
2015-12-14 12:19:16 +03:00
|
|
|
/*
|
|
|
|
* Save old loginuid and set it to INVALID_UID:
|
|
|
|
* this value means that loginuid is unset and it will be inherited.
|
|
|
|
* After you set some value to /proc/<>/loginuid it can't be changed
|
|
|
|
* inside container due to permissions.
|
|
|
|
* But you still can set this value if it was unset.
|
|
|
|
*/
|
2016-02-08 15:32:00 +03:00
|
|
|
saved_loginuid = parse_pid_loginuid(getpid(), &ret, false);
|
2015-12-14 12:19:16 +03:00
|
|
|
if (ret < 0)
|
|
|
|
return -1;
|
|
|
|
|
2020-07-31 14:02:02 +03:00
|
|
|
if (prepare_loginuid(INVALID_UID) < 0) {
|
2017-03-23 15:02:03 -07:00
|
|
|
pr_err("Setting loginuid for CT init task failed, CAP_AUDIT_CONTROL?\n");
|
2015-12-14 12:19:16 +03:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void restore_origin_ns_hook(void)
|
|
|
|
{
|
2017-05-04 16:31:44 +03:00
|
|
|
if (kdat.luid != LUID_FULL)
|
2015-12-24 14:43:44 +03:00
|
|
|
return;
|
|
|
|
|
2015-12-14 12:19:16 +03:00
|
|
|
/* not critical: it does not affect CT in any way */
|
2020-07-31 14:02:02 +03:00
|
|
|
if (prepare_loginuid(saved_loginuid) < 0)
|
2017-03-23 15:02:03 -07:00
|
|
|
pr_err("Restore original /proc/self/loginuid failed\n");
|
2015-12-14 12:19:16 +03:00
|
|
|
}
|
|
|
|
|
2016-04-27 15:23:00 +03:00
|
|
|
static int write_restored_pid(void)
|
|
|
|
{
|
|
|
|
int pid;
|
|
|
|
|
|
|
|
if (!opts.pidfile)
|
|
|
|
return 0;
|
|
|
|
|
2017-01-25 18:29:04 +03:00
|
|
|
pid = root_item->pid->real;
|
2016-04-27 15:23:00 +03:00
|
|
|
|
|
|
|
if (write_pidfile(pid) < 0) {
|
|
|
|
pr_perror("Can't write pidfile");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-10-06 21:25:23 +01:00
|
|
|
static void reap_zombies(void)
|
|
|
|
{
|
|
|
|
while (1) {
|
|
|
|
pid_t pid = wait(NULL);
|
|
|
|
if (pid == -1) {
|
|
|
|
if (errno != ECHILD)
|
|
|
|
pr_perror("Error while waiting for pids");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-05-28 21:11:13 +04:00
|
|
|
static int restore_root_task(struct pstree_item *init)
|
2011-09-23 12:00:45 +04:00
|
|
|
{
|
2014-09-19 23:08:00 +04:00
|
|
|
enum trace_flags flag = TRACE_ALL;
|
2015-09-23 17:21:16 +03:00
|
|
|
int ret, fd, mnt_ns_fd = -1;
|
2016-12-14 02:58:14 +03:00
|
|
|
int root_seized = 0;
|
2016-06-23 15:13:23 +00:00
|
|
|
struct pstree_item *item;
|
2011-09-23 12:00:45 +04:00
|
|
|
|
2015-10-08 16:21:00 +03:00
|
|
|
ret = run_scripts(ACT_PRE_RESTORE);
|
|
|
|
if (ret != 0) {
|
|
|
|
pr_err("Aborting restore due to pre-restore script ret code %d\n", ret);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2013-09-23 14:33:25 +04:00
|
|
|
fd = open("/proc", O_DIRECTORY | O_RDONLY);
|
|
|
|
if (fd < 0) {
|
|
|
|
pr_perror("Unable to open /proc");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = install_service_fd(CR_PROC_FD_OFF, fd);
|
|
|
|
if (ret < 0)
|
|
|
|
return -1;
|
|
|
|
|
2012-01-26 15:27:00 +04:00
|
|
|
/*
|
|
|
|
* FIXME -- currently we assume that all the tasks live
|
|
|
|
* in the same set of namespaces. This is done to debug
|
|
|
|
* the ns contents dumping/restoring. Need to revisit
|
|
|
|
* this later.
|
|
|
|
*/
|
|
|
|
|
2020-05-05 14:53:08 +00:00
|
|
|
if (prepare_userns_hook())
|
|
|
|
return -1;
|
|
|
|
|
2020-05-05 15:45:25 +00:00
|
|
|
if (prepare_namespace_before_tasks())
|
|
|
|
return -1;
|
|
|
|
|
2017-02-10 14:28:57 +03:00
|
|
|
if (vpid(init) == INIT_PID) {
|
2014-04-21 18:23:22 +04:00
|
|
|
if (!(root_ns_mask & CLONE_NEWPID)) {
|
2012-12-06 10:38:46 +03:00
|
|
|
pr_err("This process tree can only be restored "
|
|
|
|
"in a new pid namespace.\n"
|
2013-05-09 10:58:03 -07:00
|
|
|
"criu should be re-executed with the "
|
2012-12-06 10:38:46 +03:00
|
|
|
"\"--namespace pid\" option.\n");
|
2012-06-22 00:38:00 +04:00
|
|
|
return -1;
|
|
|
|
}
|
2020-10-28 11:45:55 +03:00
|
|
|
} else if (root_ns_mask & CLONE_NEWPID) {
|
|
|
|
struct ns_id *ns;
|
|
|
|
/*
|
|
|
|
* Restoring into an existing PID namespace. This disables
|
|
|
|
* the check to require a PID 1 when restoring a process
|
|
|
|
* which used to be in a PID namespace.
|
|
|
|
*/
|
|
|
|
ns = lookup_ns_by_id(init->ids->pid_ns_id, &pid_ns_desc);
|
|
|
|
if (!ns || !ns->ext_key) {
|
|
|
|
pr_err("Can't restore pid namespace without the process init\n");
|
|
|
|
return -1;
|
|
|
|
}
|
2012-06-19 15:53:00 +04:00
|
|
|
}
|
|
|
|
|
2017-04-28 15:28:25 +03:00
|
|
|
__restore_switch_stage_nw(CR_STATE_ROOT_TASK);
|
2012-06-22 00:38:00 +04:00
|
|
|
|
2013-01-19 01:16:19 +04:00
|
|
|
ret = fork_with_pid(init);
|
2011-09-23 12:00:45 +04:00
|
|
|
if (ret < 0)
|
2015-11-18 14:04:26 +03:00
|
|
|
goto out;
|
2011-09-23 12:00:45 +04:00
|
|
|
|
2015-12-14 12:19:16 +03:00
|
|
|
restore_origin_ns_hook();
|
|
|
|
|
2016-05-23 22:12:00 +03:00
|
|
|
if (rsti(init)->clone_flags & CLONE_PARENT) {
|
2014-09-10 01:13:00 +04:00
|
|
|
struct sigaction act;
|
2016-05-23 22:12:00 +03:00
|
|
|
|
|
|
|
root_seized = 1;
|
2014-09-10 01:13:00 +04:00
|
|
|
/*
|
|
|
|
* Root task will be our sibling. This means, that
|
|
|
|
* we will not notice when (if) it dies in SIGCHLD
|
|
|
|
* handler, but we should. To do this -- attach to
|
|
|
|
* the guy with ptrace (below) and (!) make the kernel
|
|
|
|
* deliver us the signal when it will get stopped.
|
|
|
|
* It will in case of e.g. segfault before handling
|
|
|
|
* the signal.
|
|
|
|
*/
|
|
|
|
sigaction(SIGCHLD, NULL, &act);
|
|
|
|
act.sa_flags &= ~SA_NOCLDSTOP;
|
|
|
|
sigaction(SIGCHLD, &act, NULL);
|
|
|
|
|
2017-01-25 18:29:04 +03:00
|
|
|
if (ptrace(PTRACE_SEIZE, init->pid->real, 0, 0)) {
|
2014-06-30 20:30:44 +04:00
|
|
|
pr_perror("Can't attach to init");
|
2015-11-18 14:04:26 +03:00
|
|
|
goto out_kill;
|
2014-06-30 20:30:44 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-28 15:29:52 +03:00
|
|
|
if (!root_ns_mask)
|
|
|
|
goto skip_ns_bouncing;
|
|
|
|
|
2014-11-07 15:09:19 +03:00
|
|
|
/*
|
|
|
|
* uid_map and gid_map must be filled from a parent user namespace.
|
|
|
|
* prepare_userns_creds() must be called after filling mappings.
|
|
|
|
*/
|
|
|
|
if ((root_ns_mask & CLONE_NEWUSER) && prepare_userns(init))
|
2015-11-18 14:04:26 +03:00
|
|
|
goto out_kill;
|
2014-11-07 15:09:19 +03:00
|
|
|
|
2013-05-31 19:01:31 +04:00
|
|
|
pr_info("Wait until namespaces are created\n");
|
|
|
|
ret = restore_wait_inprogress_tasks();
|
|
|
|
if (ret)
|
2015-11-18 14:04:26 +03:00
|
|
|
goto out_kill;
|
2013-05-31 19:01:31 +04:00
|
|
|
|
2017-02-22 09:39:45 +03:00
|
|
|
ret = run_scripts(ACT_SETUP_NS);
|
|
|
|
if (ret)
|
|
|
|
goto out_kill;
|
|
|
|
|
2017-04-28 15:28:41 +03:00
|
|
|
ret = restore_switch_stage(CR_STATE_PREPARE_NAMESPACES);
|
2017-02-22 09:39:45 +03:00
|
|
|
if (ret)
|
|
|
|
goto out_kill;
|
|
|
|
|
2015-09-23 17:21:16 +03:00
|
|
|
if (root_ns_mask & CLONE_NEWNS) {
|
2017-01-25 18:29:04 +03:00
|
|
|
mnt_ns_fd = open_proc(init->pid->real, "ns/mnt");
|
2016-11-07 13:37:47 -08:00
|
|
|
if (mnt_ns_fd < 0)
|
2015-11-18 14:04:26 +03:00
|
|
|
goto out_kill;
|
2015-09-23 17:21:16 +03:00
|
|
|
}
|
|
|
|
|
2018-11-29 09:24:01 +00:00
|
|
|
if (root_ns_mask & opts.empty_ns & CLONE_NEWNET) {
|
2016-11-15 17:08:00 +03:00
|
|
|
/*
|
|
|
|
* Local TCP connections were locked by network_lock_internal()
|
|
|
|
* on dump and normally should have been C/R-ed by respectively
|
|
|
|
* dump_iptables() and restore_iptables() in net.c. However in
|
|
|
|
* the '--empty-ns net' mode no iptables C/R is done and we
|
|
|
|
* need to return these rules by hands.
|
|
|
|
*/
|
|
|
|
ret = network_lock_internal();
|
|
|
|
if (ret)
|
|
|
|
goto out_kill;
|
|
|
|
}
|
|
|
|
|
2016-01-19 15:10:00 +03:00
|
|
|
ret = run_scripts(ACT_POST_SETUP_NS);
|
|
|
|
if (ret)
|
|
|
|
goto out_kill;
|
2013-08-11 21:22:40 +04:00
|
|
|
|
2017-04-28 15:29:52 +03:00
|
|
|
__restore_switch_stage(CR_STATE_FORKING);
|
|
|
|
|
|
|
|
skip_ns_bouncing:
|
|
|
|
|
|
|
|
ret = restore_wait_inprogress_tasks();
|
2013-08-12 06:17:04 +04:00
|
|
|
if (ret < 0)
|
2015-11-18 14:04:26 +03:00
|
|
|
goto out_kill;
|
2012-04-11 22:06:36 +04:00
|
|
|
|
2019-12-18 23:32:32 +00:00
|
|
|
ret = apply_memfd_seals();
|
|
|
|
if (ret < 0)
|
|
|
|
goto out_kill;
|
|
|
|
|
2017-04-28 15:29:26 +03:00
|
|
|
/*
|
|
|
|
* Zombies die after CR_STATE_RESTORE which is switched
|
|
|
|
* by root task, not by us. See comment before CR_STATE_FORKING
|
|
|
|
* in the header for details.
|
|
|
|
*/
|
2016-06-23 15:13:23 +00:00
|
|
|
for_each_pstree_item(item) {
|
2017-01-25 18:29:04 +03:00
|
|
|
if (item->pid->state == TASK_DEAD)
|
2016-06-23 15:13:23 +00:00
|
|
|
task_entries->nr_threads--;
|
|
|
|
}
|
|
|
|
|
2013-08-12 06:17:04 +04:00
|
|
|
ret = restore_switch_stage(CR_STATE_RESTORE_SIGCHLD);
|
2013-04-19 15:58:50 +04:00
|
|
|
if (ret < 0)
|
2013-08-20 15:17:30 +04:00
|
|
|
goto out_kill;
|
2013-04-19 15:58:50 +04:00
|
|
|
|
2015-02-13 16:05:24 +04:00
|
|
|
ret = stop_usernsd();
|
|
|
|
if (ret < 0)
|
|
|
|
goto out_kill;
|
|
|
|
|
2015-01-12 14:54:18 +03:00
|
|
|
ret = move_veth_to_bridge();
|
|
|
|
if (ret < 0)
|
|
|
|
goto out_kill;
|
|
|
|
|
2015-03-19 14:50:55 +00:00
|
|
|
ret = prepare_cgroup_properties();
|
|
|
|
if (ret < 0)
|
|
|
|
goto out_kill;
|
|
|
|
|
2016-12-14 02:58:13 +03:00
|
|
|
if (fault_injected(FI_POST_RESTORE))
|
|
|
|
goto out_kill;
|
|
|
|
|
2014-09-03 23:43:46 +04:00
|
|
|
ret = run_scripts(ACT_POST_RESTORE);
|
2014-03-31 22:00:00 +04:00
|
|
|
if (ret != 0) {
|
2015-10-08 16:21:00 +03:00
|
|
|
pr_err("Aborting restore due to post-restore script ret code %d\n", ret);
|
2014-03-31 22:00:00 +04:00
|
|
|
timing_stop(TIME_RESTORE);
|
|
|
|
write_stats(RESTORE_STATS);
|
|
|
|
goto out_kill;
|
|
|
|
}
|
|
|
|
|
2016-12-14 02:58:14 +03:00
|
|
|
/*
|
|
|
|
* There is no need to call try_clean_remaps() after this point,
|
|
|
|
* as restore went OK and all ghosts were removed by the openers.
|
|
|
|
*/
|
|
|
|
if (depopulate_roots_yard(mnt_ns_fd, false))
|
|
|
|
goto out_kill;
|
|
|
|
|
|
|
|
close_safe(&mnt_ns_fd);
|
|
|
|
|
2016-04-27 15:23:00 +03:00
|
|
|
if (write_restored_pid())
|
|
|
|
goto out_kill;
|
|
|
|
|
2013-07-15 18:14:12 +04:00
|
|
|
/* Unlock network before disabling repair mode on sockets */
|
2012-09-17 20:06:14 +04:00
|
|
|
network_unlock();
|
2013-07-15 18:14:12 +04:00
|
|
|
|
2014-08-06 16:24:52 +04:00
|
|
|
/*
|
|
|
|
* Stop getting sigchld, after we resume the tasks they
|
|
|
|
* may start to exit poking criu in vain.
|
|
|
|
*/
|
|
|
|
ignore_kids();
|
|
|
|
|
2013-07-15 18:14:12 +04:00
|
|
|
/*
|
|
|
|
* -------------------------------------------------------------
|
2019-11-09 22:20:44 +00:00
|
|
|
* Network is unlocked. If something fails below - we lose data
|
|
|
|
* or a connection.
|
2013-07-15 18:14:12 +04:00
|
|
|
*/
|
2016-05-23 22:12:00 +03:00
|
|
|
attach_to_tasks(root_seized);
|
2013-07-15 18:14:12 +04:00
|
|
|
|
2019-11-09 22:20:44 +00:00
|
|
|
if (restore_switch_stage(CR_STATE_RESTORE_CREDS))
|
|
|
|
goto out_kill_network_unlocked;
|
2012-01-19 01:33:16 +03:00
|
|
|
|
2013-08-11 21:25:42 +04:00
|
|
|
timing_stop(TIME_RESTORE);
|
|
|
|
|
2019-11-09 22:20:44 +00:00
|
|
|
if (catch_tasks(root_seized, &flag)) {
|
|
|
|
pr_err("Can't catch all tasks\n");
|
|
|
|
goto out_kill_network_unlocked;
|
|
|
|
}
|
2013-09-23 14:33:34 +04:00
|
|
|
|
2017-11-22 21:37:12 +02:00
|
|
|
if (lazy_pages_finish_restore())
|
2019-11-09 22:20:44 +00:00
|
|
|
goto out_kill_network_unlocked;
|
2017-11-22 21:37:12 +02:00
|
|
|
|
2017-04-28 15:27:42 +03:00
|
|
|
__restore_switch_stage(CR_STATE_COMPLETE);
|
2012-01-16 23:52:15 +03:00
|
|
|
|
2019-11-09 22:20:44 +00:00
|
|
|
ret = compel_stop_on_syscall(task_entries->nr_threads,
|
|
|
|
__NR(rt_sigreturn, 0), __NR(rt_sigreturn, 1), flag);
|
|
|
|
if (ret) {
|
|
|
|
pr_err("Can't stop all tasks on rt_sigreturn\n");
|
|
|
|
goto out_kill_network_unlocked;
|
|
|
|
}
|
2013-09-23 14:33:34 +04:00
|
|
|
|
2014-09-19 23:48:00 +04:00
|
|
|
if (clear_breakpoints())
|
|
|
|
pr_err("Unable to flush breakpoints\n");
|
|
|
|
|
2019-11-09 22:20:44 +00:00
|
|
|
finalize_restore();
|
2015-12-15 13:51:27 +03:00
|
|
|
|
2017-01-18 12:02:04 +03:00
|
|
|
ret = run_scripts(ACT_PRE_RESUME);
|
|
|
|
if (ret)
|
|
|
|
pr_err("Pre-resume script ret code %d\n", ret);
|
|
|
|
|
2015-12-15 13:51:28 +03:00
|
|
|
if (restore_freezer_state())
|
|
|
|
pr_err("Unable to restore freezer state\n");
|
|
|
|
|
2015-12-15 13:51:27 +03:00
|
|
|
/* Detaches from processes and they continue run through sigreturn. */
|
2019-11-09 22:20:44 +00:00
|
|
|
if (finalize_restore_detach())
|
|
|
|
goto out_kill_network_unlocked;
|
2013-09-23 14:33:34 +04:00
|
|
|
|
2019-11-09 22:20:44 +00:00
|
|
|
pr_info("Restore finished successfully. Tasks resumed.\n");
|
2013-08-11 13:00:45 +04:00
|
|
|
write_stats(RESTORE_STATS);
|
|
|
|
|
2020-05-21 17:40:17 +00:00
|
|
|
/* This has the effect of dismissing the image streamer */
|
|
|
|
close_image_dir();
|
|
|
|
|
2016-04-07 12:59:29 +03:00
|
|
|
ret = run_scripts(ACT_POST_RESUME);
|
|
|
|
if (ret != 0)
|
|
|
|
pr_err("Post-resume script ret code %d\n", ret);
|
|
|
|
|
2020-10-06 21:25:23 +01:00
|
|
|
if (!opts.restore_detach && !opts.exec_cmd) {
|
|
|
|
reap_zombies();
|
|
|
|
}
|
2013-08-12 06:33:21 +04:00
|
|
|
|
2011-09-23 12:00:45 +04:00
|
|
|
return 0;
|
2013-08-12 06:33:21 +04:00
|
|
|
|
2019-11-09 22:20:44 +00:00
|
|
|
out_kill_network_unlocked:
|
|
|
|
pr_err("Killing processes because of failure on restore.\nThe Network was unlocked so some data or a connection may have been lost.\n");
|
2013-08-20 15:17:30 +04:00
|
|
|
out_kill:
|
|
|
|
/*
|
|
|
|
* The processes can be killed only when all of them have been created,
|
2018-08-22 20:54:30 +01:00
|
|
|
* otherwise an external processes can be killed.
|
2013-08-20 15:17:30 +04:00
|
|
|
*/
|
2020-10-28 11:45:55 +03:00
|
|
|
if (vpid(root_item) == INIT_PID) {
|
2015-10-09 17:41:00 +03:00
|
|
|
int status;
|
|
|
|
|
2013-08-12 06:33:21 +04:00
|
|
|
/* Kill init */
|
2017-01-25 18:29:04 +03:00
|
|
|
if (root_item->pid->real > 0)
|
|
|
|
kill(root_item->pid->real, SIGKILL);
|
2015-10-09 17:41:00 +03:00
|
|
|
|
2017-01-25 18:29:04 +03:00
|
|
|
if (waitpid(root_item->pid->real, &status, 0) < 0)
|
2018-09-11 11:09:02 +03:00
|
|
|
pr_warn("Unable to wait %d: %s\n",
|
2017-01-25 18:29:04 +03:00
|
|
|
root_item->pid->real, strerror(errno));
|
2013-08-12 06:33:21 +04:00
|
|
|
} else {
|
|
|
|
struct pstree_item *pi;
|
|
|
|
|
|
|
|
for_each_pstree_item(pi)
|
2018-05-31 09:00:02 +00:00
|
|
|
if (pi->pid->real > 0)
|
|
|
|
kill(pi->pid->real, SIGKILL);
|
2013-08-12 06:33:21 +04:00
|
|
|
}
|
|
|
|
|
2013-08-20 15:17:30 +04:00
|
|
|
out:
|
2016-12-14 02:58:14 +03:00
|
|
|
depopulate_roots_yard(mnt_ns_fd, true);
|
2015-02-13 16:05:24 +04:00
|
|
|
stop_usernsd();
|
2013-08-20 15:17:30 +04:00
|
|
|
__restore_switch_stage(CR_STATE_FAIL);
|
2013-08-12 06:33:21 +04:00
|
|
|
pr_err("Restoring FAILED.\n");
|
2014-09-12 12:53:00 +04:00
|
|
|
return -1;
|
2011-09-23 12:00:45 +04:00
|
|
|
}
|
|
|
|
|
2016-03-15 13:21:11 +00:00
|
|
|
int prepare_task_entries(void)
|
2012-06-26 14:51:00 +04:00
|
|
|
{
|
2016-02-11 01:42:00 +03:00
|
|
|
task_entries_pos = rst_mem_align_cpos(RM_SHREMAP);
|
2014-08-18 19:47:20 +04:00
|
|
|
task_entries = rst_mem_alloc(sizeof(*task_entries), RM_SHREMAP);
|
|
|
|
if (!task_entries) {
|
2012-06-26 14:51:00 +04:00
|
|
|
pr_perror("Can't map shmem");
|
|
|
|
return -1;
|
|
|
|
}
|
2014-08-18 19:47:20 +04:00
|
|
|
|
2012-12-04 17:22:45 +03:00
|
|
|
task_entries->nr_threads = 0;
|
2012-06-26 14:51:00 +04:00
|
|
|
task_entries->nr_tasks = 0;
|
2012-07-02 15:25:00 +04:00
|
|
|
task_entries->nr_helpers = 0;
|
2017-04-28 15:27:42 +03:00
|
|
|
futex_set(&task_entries->start, CR_STATE_FAIL);
|
2015-02-13 16:05:24 +04:00
|
|
|
mutex_init(&task_entries->userns_sync_lock);
|
2017-05-16 19:26:59 +03:00
|
|
|
mutex_init(&task_entries->last_pid_mutex);
|
2012-06-26 14:51:00 +04:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-11-27 12:52:54 +02:00
|
|
|
int prepare_dummy_task_state(struct pstree_item *pi)
|
|
|
|
{
|
|
|
|
CoreEntry *core;
|
|
|
|
|
|
|
|
if (open_core(vpid(pi), &core))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
pi->pid->state = core->tc->task_state;
|
|
|
|
core_entry__free_unpacked(core, NULL);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-05-28 21:11:13 +04:00
|
|
|
int cr_restore_tasks(void)
|
2011-09-23 12:00:45 +04:00
|
|
|
{
|
2013-12-19 21:35:00 +04:00
|
|
|
int ret = -1;
|
|
|
|
|
2019-04-16 02:03:18 +03:00
|
|
|
if (init_service_fd())
|
|
|
|
return 1;
|
|
|
|
|
2014-02-27 20:58:23 +04:00
|
|
|
if (cr_plugin_init(CR_PLUGIN_STAGE__RESTORE))
|
2012-07-19 17:37:25 +04:00
|
|
|
return -1;
|
|
|
|
|
2020-09-28 09:53:43 +03:00
|
|
|
if (check_img_inventory(/* restore = */ true) < 0)
|
2013-12-19 21:35:00 +04:00
|
|
|
goto err;
|
|
|
|
|
2013-08-11 13:00:45 +04:00
|
|
|
if (init_stats(RESTORE_STATS))
|
2013-12-19 21:35:00 +04:00
|
|
|
goto err;
|
2013-08-11 13:00:45 +04:00
|
|
|
|
2017-06-26 16:20:07 +03:00
|
|
|
if (lsm_check_opts())
|
|
|
|
goto err;
|
|
|
|
|
2013-08-11 21:25:42 +04:00
|
|
|
timing_start(TIME_RESTORE);
|
|
|
|
|
2012-12-21 17:35:36 +04:00
|
|
|
if (cpu_init() < 0)
|
2013-12-19 21:35:00 +04:00
|
|
|
goto err;
|
2012-12-21 17:35:36 +04:00
|
|
|
|
2017-06-15 19:36:14 +03:00
|
|
|
if (vdso_init_restore())
|
2013-12-19 21:35:00 +04:00
|
|
|
goto err;
|
2013-05-24 01:42:13 +04:00
|
|
|
|
2019-06-25 15:16:26 +03:00
|
|
|
if (tty_init_restore())
|
|
|
|
goto err;
|
|
|
|
|
2018-08-30 14:00:21 +03:00
|
|
|
if (opts.cpu_cap & CPU_CAP_IMAGE) {
|
cpuinfo: x86 -- Add dump and validation of cpuinfo image, v2
On Wed, Oct 01, 2014 at 04:57:40PM +0400, Pavel Emelyanov wrote:
> On 10/01/2014 01:07 AM, Cyrill Gorcunov wrote:
> > On Tue, Sep 30, 2014 at 09:18:53PM +0400, Cyrill Gorcunov wrote:
> >> If a user requested criu to dump cpuinfo image then we
> >> write one on dump and verify on restore. At the moment
> >> we require all cpu feature bits to match the destination
> >> cpu in a sake of simplicity, but in future we need deps
> >> engine which would filer out bits and test if cpu we're
> >> restoring on is more capable than one we were dumping at
> >> allowing to proceed restore procedure.
> >>
> >> Signed-off-by: Cyrill Gorcunov <gorcunov@openvz.org>
> >
> > Updated to new img format
Something like attached?
>From 59272a9514311e6736cddee08d5f88aa95d49189 Mon Sep 17 00:00:00 2001
From: Cyrill Gorcunov <gorcunov@openvz.org>
Date: Thu, 25 Sep 2014 16:04:10 +0400
Subject: [PATCH] cpuinfo: x86 -- Add dump and validation of cpuinfo image
If a user requested criu to dump cpuinfo image then we
write one on dump and verify on restore. At the moment
we require all cpu feature bits to match the destination
cpu in a sake of simplicity, but in future we need deps
engine which would filer out bits and test if cpu we're
restoring on is more capable than one we were dumping at
allowing to proceed restore procedure.
Signed-off-by: Cyrill Gorcunov <gorcunov@openvz.org>
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2014-10-01 17:53:50 +04:00
|
|
|
if (cpu_validate_cpuinfo())
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2012-06-26 14:51:00 +04:00
|
|
|
if (prepare_task_entries() < 0)
|
2013-12-19 21:35:00 +04:00
|
|
|
goto err;
|
2012-06-26 14:51:00 +04:00
|
|
|
|
2012-04-05 15:34:31 +04:00
|
|
|
if (prepare_pstree() < 0)
|
2013-12-19 21:35:00 +04:00
|
|
|
goto err;
|
2011-09-23 12:00:45 +04:00
|
|
|
|
2018-01-10 17:00:32 +03:00
|
|
|
if (fdstore_init())
|
|
|
|
goto err;
|
|
|
|
|
2018-09-27 10:02:26 +03:00
|
|
|
if (inherit_fd_move_to_fdstore())
|
|
|
|
goto err;
|
|
|
|
|
2013-05-28 21:11:13 +04:00
|
|
|
if (crtools_prepare_shared() < 0)
|
2013-12-19 21:35:00 +04:00
|
|
|
goto err;
|
2012-09-17 20:06:06 +04:00
|
|
|
|
2021-07-02 17:31:39 +08:00
|
|
|
if (prepare_cgroup())
|
|
|
|
goto clean_cgroup;
|
|
|
|
|
2014-08-06 16:25:08 +04:00
|
|
|
if (criu_signals_setup() < 0)
|
2021-07-02 17:31:39 +08:00
|
|
|
goto clean_cgroup;
|
2014-08-06 16:25:08 +04:00
|
|
|
|
2016-08-15 10:21:50 +03:00
|
|
|
if (prepare_lazy_pages_socket() < 0)
|
2021-07-02 17:31:39 +08:00
|
|
|
goto clean_cgroup;
|
2016-08-15 10:21:50 +03:00
|
|
|
|
2015-03-19 14:50:55 +00:00
|
|
|
ret = restore_root_task(root_item);
|
2021-07-02 17:31:39 +08:00
|
|
|
clean_cgroup:
|
|
|
|
fini_cgroup();
|
2013-12-19 21:35:00 +04:00
|
|
|
err:
|
2014-02-27 20:58:23 +04:00
|
|
|
cr_plugin_fini(CR_PLUGIN_STAGE__RESTORE, ret);
|
2013-12-19 21:35:00 +04:00
|
|
|
return ret;
|
2011-09-23 12:00:45 +04:00
|
|
|
}
|
|
|
|
|
2015-06-25 23:42:57 +03:00
|
|
|
static long restorer_get_vma_hint(struct list_head *tgt_vma_list,
|
2012-04-05 14:08:11 +04:00
|
|
|
struct list_head *self_vma_list, long vma_len)
|
2011-11-06 01:49:57 +04:00
|
|
|
{
|
2012-04-07 11:09:00 +04:00
|
|
|
struct vma_area *t_vma, *s_vma;
|
2012-04-05 14:08:11 +04:00
|
|
|
long prev_vma_end = 0;
|
2012-04-07 11:09:00 +04:00
|
|
|
struct vma_area end_vma;
|
2014-02-04 00:08:16 +04:00
|
|
|
VmaEntry end_e;
|
2012-04-07 11:09:00 +04:00
|
|
|
|
2014-02-04 00:08:16 +04:00
|
|
|
end_vma.e = &end_e;
|
2015-07-31 10:36:27 -04:00
|
|
|
end_e.start = end_e.end = kdat.task_size;
|
2016-12-23 12:15:48 +03:00
|
|
|
prev_vma_end = kdat.mmap_min_addr;
|
2011-11-06 01:49:57 +04:00
|
|
|
|
2012-04-07 11:09:00 +04:00
|
|
|
s_vma = list_first_entry(self_vma_list, struct vma_area, list);
|
|
|
|
t_vma = list_first_entry(tgt_vma_list, struct vma_area, list);
|
2012-03-02 19:28:13 +04:00
|
|
|
|
2012-04-07 11:09:00 +04:00
|
|
|
while (1) {
|
2014-02-04 00:08:16 +04:00
|
|
|
if (prev_vma_end + vma_len > s_vma->e->start) {
|
2012-04-07 11:09:00 +04:00
|
|
|
if (s_vma->list.next == self_vma_list) {
|
|
|
|
s_vma = &end_vma;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (s_vma == &end_vma)
|
|
|
|
break;
|
2014-02-04 00:08:16 +04:00
|
|
|
if (prev_vma_end < s_vma->e->end)
|
|
|
|
prev_vma_end = s_vma->e->end;
|
2017-05-11 12:10:42 +03:00
|
|
|
s_vma = vma_next(s_vma);
|
2012-04-07 11:09:00 +04:00
|
|
|
continue;
|
|
|
|
}
|
2012-03-02 19:28:13 +04:00
|
|
|
|
2014-02-04 00:08:16 +04:00
|
|
|
if (prev_vma_end + vma_len > t_vma->e->start) {
|
2012-04-07 11:09:00 +04:00
|
|
|
if (t_vma->list.next == tgt_vma_list) {
|
|
|
|
t_vma = &end_vma;
|
|
|
|
continue;
|
2011-11-06 01:49:57 +04:00
|
|
|
}
|
2012-04-07 11:09:00 +04:00
|
|
|
if (t_vma == &end_vma)
|
|
|
|
break;
|
2014-02-04 00:08:16 +04:00
|
|
|
if (prev_vma_end < t_vma->e->end)
|
|
|
|
prev_vma_end = t_vma->e->end;
|
2017-05-11 12:10:42 +03:00
|
|
|
t_vma = vma_next(t_vma);
|
2012-04-07 11:09:00 +04:00
|
|
|
continue;
|
2012-03-02 19:28:13 +04:00
|
|
|
}
|
|
|
|
|
2012-04-07 11:09:00 +04:00
|
|
|
return prev_vma_end;
|
2011-11-06 01:49:57 +04:00
|
|
|
}
|
2012-04-05 14:08:11 +04:00
|
|
|
|
|
|
|
return -1;
|
2011-11-06 01:49:57 +04:00
|
|
|
}
|
|
|
|
|
2012-01-24 16:45:19 +04:00
|
|
|
static inline int timeval_valid(struct timeval *tv)
|
|
|
|
{
|
|
|
|
return (tv->tv_sec >= 0) && ((unsigned long)tv->tv_usec < USEC_PER_SEC);
|
|
|
|
}
|
|
|
|
|
2014-04-15 21:58:49 +04:00
|
|
|
static inline int decode_itimer(char *n, ItimerEntry *ie, struct itimerval *val)
|
2012-01-24 16:45:19 +04:00
|
|
|
{
|
|
|
|
if (ie->isec == 0 && ie->iusec == 0) {
|
|
|
|
memzero_p(val);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
val->it_interval.tv_sec = ie->isec;
|
|
|
|
val->it_interval.tv_usec = ie->iusec;
|
|
|
|
|
|
|
|
if (!timeval_valid(&val->it_interval)) {
|
|
|
|
pr_err("Invalid timer interval\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ie->vsec == 0 && ie->vusec == 0) {
|
|
|
|
/*
|
|
|
|
* Remaining time was too short. Set it to
|
|
|
|
* interval to make the timer armed and work.
|
|
|
|
*/
|
|
|
|
val->it_value.tv_sec = ie->isec;
|
|
|
|
val->it_value.tv_usec = ie->iusec;
|
|
|
|
} else {
|
|
|
|
val->it_value.tv_sec = ie->vsec;
|
|
|
|
val->it_value.tv_usec = ie->vusec;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!timeval_valid(&val->it_value)) {
|
|
|
|
pr_err("Invalid timer value\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
pr_info("Restored %s timer to %ld.%ld -> %ld.%ld\n", n,
|
|
|
|
val->it_value.tv_sec, val->it_value.tv_usec,
|
|
|
|
val->it_interval.tv_sec, val->it_interval.tv_usec);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-04-15 21:59:55 +04:00
|
|
|
/*
|
|
|
|
* Legacy itimers restore from CR_FD_ITIMERS
|
|
|
|
*/
|
|
|
|
|
|
|
|
static int prepare_itimers_from_fd(int pid, struct task_restore_args *args)
|
2012-01-24 16:45:19 +04:00
|
|
|
{
|
2014-09-29 12:48:53 +04:00
|
|
|
int ret = -1;
|
|
|
|
struct cr_img *img;
|
2012-07-18 16:27:01 +04:00
|
|
|
ItimerEntry *ie;
|
2012-01-24 16:45:19 +04:00
|
|
|
|
2016-08-12 14:09:52 +03:00
|
|
|
if (!deprecated_ok("Itimers"))
|
|
|
|
return -1;
|
|
|
|
|
2014-09-29 12:48:53 +04:00
|
|
|
img = open_image(CR_FD_ITIMERS, O_RSTR, pid);
|
|
|
|
if (!img)
|
2014-09-29 12:48:10 +04:00
|
|
|
return -1;
|
2012-01-24 16:45:19 +04:00
|
|
|
|
2014-09-29 12:48:53 +04:00
|
|
|
ret = pb_read_one(img, &ie, PB_ITIMER);
|
2012-07-18 16:27:01 +04:00
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
2014-04-15 21:58:49 +04:00
|
|
|
ret = decode_itimer("real", ie, &args->itimers[0]);
|
2012-07-18 16:27:01 +04:00
|
|
|
itimer_entry__free_unpacked(ie, NULL);
|
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
2012-01-24 16:45:19 +04:00
|
|
|
|
2014-09-29 12:48:53 +04:00
|
|
|
ret = pb_read_one(img, &ie, PB_ITIMER);
|
2012-07-18 16:27:01 +04:00
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
2014-04-15 21:58:49 +04:00
|
|
|
ret = decode_itimer("virt", ie, &args->itimers[1]);
|
2012-07-18 16:27:01 +04:00
|
|
|
itimer_entry__free_unpacked(ie, NULL);
|
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
|
|
|
|
2014-09-29 12:48:53 +04:00
|
|
|
ret = pb_read_one(img, &ie, PB_ITIMER);
|
2012-07-18 16:27:01 +04:00
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
2014-04-15 21:58:49 +04:00
|
|
|
ret = decode_itimer("prof", ie, &args->itimers[2]);
|
2012-07-18 16:27:01 +04:00
|
|
|
itimer_entry__free_unpacked(ie, NULL);
|
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
|
|
|
out:
|
2014-09-29 12:48:53 +04:00
|
|
|
close_image(img);
|
2012-01-24 16:45:19 +04:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-05-24 14:37:01 +03:00
|
|
|
static int prepare_itimers(int pid, struct task_restore_args *args, CoreEntry *core)
|
2014-04-15 21:59:55 +04:00
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
TaskTimersEntry *tte = core->tc->timers;
|
|
|
|
|
|
|
|
if (!tte)
|
|
|
|
return prepare_itimers_from_fd(pid, args);
|
|
|
|
|
|
|
|
ret |= decode_itimer("real", tte->real, &args->itimers[0]);
|
|
|
|
ret |= decode_itimer("virt", tte->virt, &args->itimers[1]);
|
|
|
|
ret |= decode_itimer("prof", tte->prof, &args->itimers[2]);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2013-06-27 23:32:24 +04:00
|
|
|
static inline int timespec_valid(struct timespec *ts)
|
|
|
|
{
|
|
|
|
return (ts->tv_sec >= 0) && ((unsigned long)ts->tv_nsec < NSEC_PER_SEC);
|
|
|
|
}
|
|
|
|
|
2014-04-15 21:59:05 +04:00
|
|
|
static inline int decode_posix_timer(PosixTimerEntry *pte,
|
2013-06-27 23:32:24 +04:00
|
|
|
struct restore_posix_timer *pt)
|
|
|
|
{
|
|
|
|
pt->val.it_interval.tv_sec = pte->isec;
|
|
|
|
pt->val.it_interval.tv_nsec = pte->insec;
|
|
|
|
|
|
|
|
if (!timespec_valid(&pt->val.it_interval)) {
|
|
|
|
pr_err("Invalid timer interval(posix)\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (pte->vsec == 0 && pte->vnsec == 0) {
|
2018-02-07 02:40:09 +00:00
|
|
|
/*
|
|
|
|
* Remaining time was too short. Set it to
|
|
|
|
* interval to make the timer armed and work.
|
|
|
|
*/
|
2013-06-27 23:32:24 +04:00
|
|
|
pt->val.it_value.tv_sec = pte->isec;
|
|
|
|
pt->val.it_value.tv_nsec = pte->insec;
|
|
|
|
} else {
|
|
|
|
pt->val.it_value.tv_sec = pte->vsec;
|
|
|
|
pt->val.it_value.tv_nsec = pte->vnsec;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!timespec_valid(&pt->val.it_value)) {
|
|
|
|
pr_err("Invalid timer value(posix)\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
pt->spt.it_id = pte->it_id;
|
|
|
|
pt->spt.clock_id = pte->clock_id;
|
|
|
|
pt->spt.si_signo = pte->si_signo;
|
|
|
|
pt->spt.it_sigev_notify = pte->it_sigev_notify;
|
|
|
|
pt->spt.sival_ptr = decode_pointer(pte->sival_ptr);
|
2021-06-29 02:02:51 +00:00
|
|
|
pt->spt.notify_thread_id = pte->notify_thread_id;
|
2013-06-27 23:32:24 +04:00
|
|
|
pt->overrun = pte->overrun;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-06-27 23:32:25 +04:00
|
|
|
static int cmp_posix_timer_proc_id(const void *p1, const void *p2)
|
|
|
|
{
|
|
|
|
return ((struct restore_posix_timer *)p1)->spt.it_id - ((struct restore_posix_timer *)p2)->spt.it_id;
|
|
|
|
}
|
|
|
|
|
2016-05-24 14:35:35 +03:00
|
|
|
static void sort_posix_timers(struct task_restore_args *ta)
|
2013-06-27 23:32:25 +04:00
|
|
|
{
|
2016-05-24 14:35:35 +03:00
|
|
|
void *tmem;
|
|
|
|
|
2014-04-15 21:59:55 +04:00
|
|
|
/*
|
|
|
|
* This is required for restorer's create_posix_timers(),
|
|
|
|
* it will probe them one-by-one for the desired ID, since
|
|
|
|
* kernel doesn't provide another API for timer creation
|
|
|
|
* with given ID.
|
|
|
|
*/
|
2014-04-15 21:59:05 +04:00
|
|
|
|
2016-05-24 14:35:35 +03:00
|
|
|
if (ta->posix_timers_n > 0) {
|
|
|
|
tmem = rst_mem_remap_ptr((unsigned long)ta->posix_timers, RM_PRIVATE);
|
|
|
|
qsort(tmem, ta->posix_timers_n,
|
2014-04-15 21:59:55 +04:00
|
|
|
sizeof(struct restore_posix_timer),
|
|
|
|
cmp_posix_timer_proc_id);
|
2016-05-24 14:35:35 +03:00
|
|
|
}
|
2014-04-15 21:59:55 +04:00
|
|
|
}
|
2014-04-15 21:59:05 +04:00
|
|
|
|
2014-04-15 21:59:55 +04:00
|
|
|
/*
|
|
|
|
* Legacy posix timers restoration from CR_FD_POSIX_TIMERS
|
|
|
|
*/
|
2014-04-15 21:59:05 +04:00
|
|
|
|
2016-05-24 14:35:35 +03:00
|
|
|
static int prepare_posix_timers_from_fd(int pid, struct task_restore_args *ta)
|
2014-04-15 21:59:55 +04:00
|
|
|
{
|
2014-09-29 12:48:53 +04:00
|
|
|
struct cr_img *img;
|
2014-04-15 21:59:55 +04:00
|
|
|
int ret = -1;
|
|
|
|
struct restore_posix_timer *t;
|
2014-04-15 21:59:05 +04:00
|
|
|
|
2016-08-12 14:09:52 +03:00
|
|
|
if (!deprecated_ok("Posix timers"))
|
|
|
|
return -1;
|
|
|
|
|
2014-09-29 12:48:53 +04:00
|
|
|
img = open_image(CR_FD_POSIX_TIMERS, O_RSTR, pid);
|
2015-03-06 18:02:43 +03:00
|
|
|
if (!img)
|
|
|
|
return -1;
|
2013-06-27 23:32:25 +04:00
|
|
|
|
2016-05-24 14:35:35 +03:00
|
|
|
ta->posix_timers_n = 0;
|
2013-06-27 23:32:25 +04:00
|
|
|
while (1) {
|
|
|
|
PosixTimerEntry *pte;
|
|
|
|
|
2014-09-29 12:48:53 +04:00
|
|
|
ret = pb_read_one_eof(img, &pte, PB_POSIX_TIMER);
|
2014-04-15 21:59:38 +04:00
|
|
|
if (ret <= 0)
|
|
|
|
break;
|
2013-06-27 23:32:25 +04:00
|
|
|
|
2016-02-11 01:42:00 +03:00
|
|
|
t = rst_mem_alloc(sizeof(struct restore_posix_timer), RM_PRIVATE);
|
2013-07-05 15:00:08 +04:00
|
|
|
if (!t)
|
2014-04-15 21:59:38 +04:00
|
|
|
break;
|
2013-06-27 23:32:25 +04:00
|
|
|
|
2014-04-15 21:59:05 +04:00
|
|
|
ret = decode_posix_timer(pte, t);
|
2013-06-27 23:32:25 +04:00
|
|
|
if (ret < 0)
|
2014-04-15 21:59:38 +04:00
|
|
|
break;
|
2013-06-27 23:32:25 +04:00
|
|
|
|
|
|
|
posix_timer_entry__free_unpacked(pte, NULL);
|
2016-05-24 14:35:35 +03:00
|
|
|
ta->posix_timers_n++;
|
2013-06-27 23:32:25 +04:00
|
|
|
}
|
2014-04-15 21:59:38 +04:00
|
|
|
|
2014-09-29 12:48:53 +04:00
|
|
|
close_image(img);
|
2014-04-15 21:59:55 +04:00
|
|
|
if (!ret)
|
2016-05-24 14:35:35 +03:00
|
|
|
sort_posix_timers(ta);
|
2014-04-15 21:59:55 +04:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-05-24 14:35:35 +03:00
|
|
|
static int prepare_posix_timers(int pid, struct task_restore_args *ta, CoreEntry *core)
|
2014-04-15 21:59:55 +04:00
|
|
|
{
|
|
|
|
int i, ret = -1;
|
|
|
|
TaskTimersEntry *tte = core->tc->timers;
|
|
|
|
struct restore_posix_timer *t;
|
2013-07-05 15:00:08 +04:00
|
|
|
|
2016-05-24 14:35:35 +03:00
|
|
|
ta->posix_timers = (struct restore_posix_timer *)rst_mem_align_cpos(RM_PRIVATE);
|
2014-04-15 21:59:55 +04:00
|
|
|
|
|
|
|
if (!tte)
|
2016-05-24 14:35:35 +03:00
|
|
|
return prepare_posix_timers_from_fd(pid, ta);
|
2014-04-15 21:59:55 +04:00
|
|
|
|
2016-05-24 14:35:35 +03:00
|
|
|
ta->posix_timers_n = tte->n_posix;
|
|
|
|
for (i = 0; i < ta->posix_timers_n; i++) {
|
2016-02-11 01:42:00 +03:00
|
|
|
t = rst_mem_alloc(sizeof(struct restore_posix_timer), RM_PRIVATE);
|
2014-04-15 21:59:55 +04:00
|
|
|
if (!t)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (decode_posix_timer(tte->posix[i], t))
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = 0;
|
2016-05-24 14:35:35 +03:00
|
|
|
sort_posix_timers(ta);
|
2014-04-15 21:59:55 +04:00
|
|
|
out:
|
2013-06-27 23:32:25 +04:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2012-07-19 12:35:25 +04:00
|
|
|
static inline int verify_cap_size(CredsEntry *ce)
|
|
|
|
{
|
|
|
|
return ((ce->n_cap_inh == CR_CAP_SIZE) && (ce->n_cap_eff == CR_CAP_SIZE) &&
|
|
|
|
(ce->n_cap_prm == CR_CAP_SIZE) && (ce->n_cap_bnd == CR_CAP_SIZE));
|
|
|
|
}
|
|
|
|
|
2013-11-08 17:32:07 +04:00
|
|
|
static int prepare_mm(pid_t pid, struct task_restore_args *args)
|
2012-04-09 14:51:37 +04:00
|
|
|
{
|
2014-02-04 00:08:44 +04:00
|
|
|
int exe_fd, i, ret = -1;
|
2014-09-29 22:04:39 +04:00
|
|
|
MmEntry *mm = rsti(current)->mm;
|
2012-04-09 14:51:37 +04:00
|
|
|
|
2012-07-18 20:54:00 +04:00
|
|
|
args->mm = *mm;
|
|
|
|
args->mm.n_mm_saved_auxv = 0;
|
|
|
|
args->mm.mm_saved_auxv = NULL;
|
|
|
|
|
2012-10-29 19:54:12 +04:00
|
|
|
if (mm->n_mm_saved_auxv > AT_VECTOR_SIZE) {
|
2012-07-18 20:54:00 +04:00
|
|
|
pr_err("Image corrupted on pid %d\n", pid);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2013-01-09 17:40:33 +04:00
|
|
|
args->mm_saved_auxv_size = mm->n_mm_saved_auxv*sizeof(auxv_t);
|
|
|
|
for (i = 0; i < mm->n_mm_saved_auxv; ++i) {
|
|
|
|
args->mm_saved_auxv[i] = (auxv_t)mm->mm_saved_auxv[i];
|
|
|
|
}
|
2012-07-18 20:54:00 +04:00
|
|
|
|
2014-02-04 00:08:44 +04:00
|
|
|
exe_fd = open_reg_by_id(mm->exe_file_id);
|
2012-04-09 15:52:00 +04:00
|
|
|
if (exe_fd < 0)
|
2012-07-18 20:54:00 +04:00
|
|
|
goto out;
|
2012-04-09 15:52:00 +04:00
|
|
|
|
|
|
|
args->fd_exe_link = exe_fd;
|
2017-06-28 09:43:27 +03:00
|
|
|
|
|
|
|
args->has_thp_enabled = rsti(current)->has_thp_enabled;
|
|
|
|
|
2012-07-18 20:54:00 +04:00
|
|
|
ret = 0;
|
|
|
|
out:
|
|
|
|
return ret;
|
2012-04-09 14:51:37 +04:00
|
|
|
}
|
|
|
|
|
2012-09-14 14:51:40 +04:00
|
|
|
static void *restorer;
|
|
|
|
static unsigned long restorer_len;
|
|
|
|
|
|
|
|
static int prepare_restorer_blob(void)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* We map anonymous mapping, not mremap the restorer itself later.
|
2013-05-09 10:58:04 -07:00
|
|
|
* Otherwise the restorer vma would be tied to criu binary which
|
2012-09-14 14:51:40 +04:00
|
|
|
* in turn will lead to set-exe-file prctl to fail with EBUSY.
|
|
|
|
*/
|
|
|
|
|
2020-07-17 02:13:35 +00:00
|
|
|
struct parasite_blob_desc pbd;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We pass native=true, which is then used to set the value of
|
|
|
|
* pbd.parasite_ip_off. We don't use parasite_ip_off, so the value we
|
|
|
|
* pass as native argument is not relevant.
|
|
|
|
*/
|
|
|
|
restorer_setup_c_header_desc(&pbd, true);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* args_off is the offset where the binary blob with its GOT table
|
|
|
|
* ends. As we don't do RPC, parasite sections after args_off can be
|
|
|
|
* ignored. See compel_infect() for a description of the parasite
|
|
|
|
* memory layout.
|
|
|
|
*/
|
|
|
|
restorer_len = round_up(pbd.hdr.args_off, page_size());
|
|
|
|
|
2012-09-14 14:51:40 +04:00
|
|
|
restorer = mmap(NULL, restorer_len,
|
|
|
|
PROT_READ | PROT_WRITE | PROT_EXEC,
|
2018-11-09 21:51:23 +00:00
|
|
|
MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
|
2012-09-14 14:51:40 +04:00
|
|
|
if (restorer == MAP_FAILED) {
|
2012-11-23 16:43:33 +04:00
|
|
|
pr_perror("Can't map restorer code");
|
2012-09-14 14:51:40 +04:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2020-07-17 02:13:35 +00:00
|
|
|
memcpy(restorer, pbd.hdr.mem, pbd.hdr.bsize);
|
|
|
|
|
2012-09-14 14:51:40 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int remap_restorer_blob(void *addr)
|
|
|
|
{
|
2020-07-17 00:01:41 +00:00
|
|
|
struct parasite_blob_desc pbd;
|
2012-09-14 14:51:40 +04:00
|
|
|
void *mem;
|
|
|
|
|
|
|
|
mem = mremap(restorer, restorer_len, restorer_len,
|
|
|
|
MREMAP_FIXED | MREMAP_MAYMOVE, addr);
|
|
|
|
if (mem != addr) {
|
|
|
|
pr_perror("Can't remap restorer blob");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2020-07-17 00:01:41 +00:00
|
|
|
/*
|
|
|
|
* Pass native=true, which is then used to set the value of
|
|
|
|
* pbd.parasite_ip_off. parasite_ip_off is unused in restorer
|
|
|
|
* as compat (ia32) tasks are restored from native (x86_64)
|
|
|
|
* mode, so the value we pass as native argument is not relevant.
|
|
|
|
*/
|
|
|
|
restorer_setup_c_header_desc(&pbd, true);
|
|
|
|
compel_relocs_apply(addr, addr, &pbd);
|
compel hgen: use for ARM, kill gen-offsets.sh
I am not quite sure how that happened, but compel hgen was not used for
ARM/ARM64, instead there's a simple version of it, called
gen-offsets.sh. The main difference is, shell script doesn't handle ELF
relocations, which apparently is not (currently?) needed for ARM.
It's bad to maintain two tools for the same functionality, so this
patch kills gen-offsets.sh and related stuff, making compel hgen
working on ARM. ELF relocations are still not handled, this code
is #ifdef-ed out for now and can be fixed to work on ARM later.
This patch also kills some macros and defines that seem obsoleted
now. For example, compel_relocs_apply() is now called unconditionally,
as it handles the trivial case of 0 relocs just fine.
Now, I checked that the blob headers generated by compel hgen and
gen-offsets.h are similar (i.e. generated blob code and values defined
are the same), but haven't done much above that.
Signed-off-by: Kir Kolyshkin <kir@openvz.org>
Signed-off-by: Pavel Emelyanov <xemul@virtuozzo.com>
Signed-off-by: Andrei Vagin <avagin@virtuozzo.com>
2016-12-08 01:44:29 -08:00
|
|
|
|
2012-09-14 14:51:40 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-10-17 00:23:25 +04:00
|
|
|
static int validate_sched_parm(struct rst_sched_param *sp)
|
|
|
|
{
|
|
|
|
if ((sp->nice < -20) || (sp->nice > 19))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
switch (sp->policy) {
|
|
|
|
case SCHED_RR:
|
|
|
|
case SCHED_FIFO:
|
|
|
|
return ((sp->prio > 0) && (sp->prio < 100));
|
|
|
|
case SCHED_IDLE:
|
|
|
|
case SCHED_OTHER:
|
|
|
|
case SCHED_BATCH:
|
|
|
|
return sp->prio == 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int prep_sched_info(struct rst_sched_param *sp, ThreadCoreEntry *tc)
|
|
|
|
{
|
|
|
|
if (!tc->has_sched_policy) {
|
|
|
|
sp->policy = SCHED_OTHER;
|
|
|
|
sp->nice = 0;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
sp->policy = tc->sched_policy;
|
|
|
|
sp->nice = tc->sched_nice;
|
|
|
|
sp->prio = tc->sched_prio;
|
|
|
|
|
|
|
|
if (!validate_sched_parm(sp)) {
|
|
|
|
pr_err("Inconsistent sched params received (%d.%d.%d)\n",
|
|
|
|
sp->policy, sp->nice, sp->prio);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-11-03 16:39:40 -07:00
|
|
|
static rlim_t decode_rlim(rlim_t ival)
|
2013-01-10 20:08:38 +04:00
|
|
|
{
|
|
|
|
return ival == -1 ? RLIM_INFINITY : ival;
|
|
|
|
}
|
|
|
|
|
2014-04-15 22:00:28 +04:00
|
|
|
/*
|
|
|
|
* Legacy rlimits restore from CR_FD_RLIMIT
|
|
|
|
*/
|
|
|
|
|
2016-05-24 14:36:02 +03:00
|
|
|
static int prepare_rlimits_from_fd(int pid, struct task_restore_args *ta)
|
2013-01-10 20:08:38 +04:00
|
|
|
{
|
2013-07-09 00:21:31 +04:00
|
|
|
struct rlimit *r;
|
2014-09-29 12:48:53 +04:00
|
|
|
int ret;
|
|
|
|
struct cr_img *img;
|
2013-01-10 20:08:38 +04:00
|
|
|
|
2016-08-12 14:09:52 +03:00
|
|
|
if (!deprecated_ok("Rlimits"))
|
|
|
|
return -1;
|
|
|
|
|
2014-03-13 14:30:48 +04:00
|
|
|
/*
|
|
|
|
* Old image -- read from the file.
|
|
|
|
*/
|
2015-03-06 18:01:54 +03:00
|
|
|
img = open_image(CR_FD_RLIMIT, O_RSTR, pid);
|
2015-03-06 18:02:43 +03:00
|
|
|
if (!img)
|
2013-01-10 20:08:38 +04:00
|
|
|
return -1;
|
|
|
|
|
2016-05-24 14:36:02 +03:00
|
|
|
ta->rlims_n = 0;
|
2013-01-10 20:08:38 +04:00
|
|
|
while (1) {
|
|
|
|
RlimitEntry *re;
|
|
|
|
|
2014-09-29 12:48:53 +04:00
|
|
|
ret = pb_read_one_eof(img, &re, PB_RLIMIT);
|
2013-01-10 20:08:38 +04:00
|
|
|
if (ret <= 0)
|
|
|
|
break;
|
|
|
|
|
2016-02-11 01:42:00 +03:00
|
|
|
r = rst_mem_alloc(sizeof(*r), RM_PRIVATE);
|
2013-07-09 00:21:31 +04:00
|
|
|
if (!r) {
|
|
|
|
pr_err("Can't allocate memory for resource %d\n",
|
2016-05-24 14:36:02 +03:00
|
|
|
ta->rlims_n);
|
2013-07-09 00:21:31 +04:00
|
|
|
return -1;
|
2013-01-10 20:08:38 +04:00
|
|
|
}
|
|
|
|
|
2013-07-09 00:21:31 +04:00
|
|
|
r->rlim_cur = decode_rlim(re->cur);
|
|
|
|
r->rlim_max = decode_rlim(re->max);
|
|
|
|
if (r->rlim_cur > r->rlim_max) {
|
2013-11-03 23:40:12 +04:00
|
|
|
pr_err("Can't restore cur > max for %d.%d\n",
|
2016-05-24 14:36:02 +03:00
|
|
|
pid, ta->rlims_n);
|
2013-07-09 00:21:31 +04:00
|
|
|
r->rlim_cur = r->rlim_max;
|
2013-01-10 20:08:38 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
rlimit_entry__free_unpacked(re, NULL);
|
|
|
|
|
2016-05-24 14:36:02 +03:00
|
|
|
ta->rlims_n++;
|
2013-01-10 20:08:38 +04:00
|
|
|
}
|
|
|
|
|
2014-09-29 12:48:53 +04:00
|
|
|
close_image(img);
|
2013-11-03 23:40:12 +04:00
|
|
|
|
|
|
|
return 0;
|
2013-01-10 20:08:38 +04:00
|
|
|
}
|
|
|
|
|
2016-05-24 14:36:02 +03:00
|
|
|
static int prepare_rlimits(int pid, struct task_restore_args *ta, CoreEntry *core)
|
2014-04-15 22:00:28 +04:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
TaskRlimitsEntry *rls = core->tc->rlimits;
|
2016-04-02 22:21:00 +03:00
|
|
|
struct rlimit64 *r;
|
2014-04-15 22:00:28 +04:00
|
|
|
|
2016-05-24 14:36:02 +03:00
|
|
|
ta->rlims = (struct rlimit64 *)rst_mem_align_cpos(RM_PRIVATE);
|
2014-04-15 22:00:28 +04:00
|
|
|
|
|
|
|
if (!rls)
|
2016-05-24 14:36:02 +03:00
|
|
|
return prepare_rlimits_from_fd(pid, ta);
|
2014-04-15 22:00:28 +04:00
|
|
|
|
|
|
|
for (i = 0; i < rls->n_rlimits; i++) {
|
2016-02-11 01:42:00 +03:00
|
|
|
r = rst_mem_alloc(sizeof(*r), RM_PRIVATE);
|
2014-04-15 22:00:28 +04:00
|
|
|
if (!r) {
|
|
|
|
pr_err("Can't allocate memory for resource %d\n", i);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
r->rlim_cur = decode_rlim(rls->rlimits[i]->cur);
|
|
|
|
r->rlim_max = decode_rlim(rls->rlimits[i]->max);
|
|
|
|
|
|
|
|
if (r->rlim_cur > r->rlim_max) {
|
|
|
|
pr_warn("Can't restore cur > max for %d.%d\n", pid, i);
|
|
|
|
r->rlim_cur = r->rlim_max;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-24 14:36:02 +03:00
|
|
|
ta->rlims_n = rls->n_rlimits;
|
2014-04-15 22:00:28 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-08-15 16:02:15 +03:00
|
|
|
static int signal_to_mem(SiginfoEntry *sie)
|
|
|
|
{
|
|
|
|
siginfo_t *info, *t;
|
|
|
|
|
|
|
|
info = (siginfo_t *) sie->siginfo.data;
|
2016-02-11 01:42:00 +03:00
|
|
|
t = rst_mem_alloc(sizeof(siginfo_t), RM_PRIVATE);
|
2014-08-15 16:02:15 +03:00
|
|
|
if (!t)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
memcpy(t, info, sizeof(*info));
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-11-03 23:47:51 +04:00
|
|
|
static int open_signal_image(int type, pid_t pid, unsigned int *nr)
|
2013-03-25 23:39:49 +04:00
|
|
|
{
|
2014-09-29 12:48:53 +04:00
|
|
|
int ret;
|
|
|
|
struct cr_img *img;
|
2013-03-25 23:39:49 +04:00
|
|
|
|
2015-03-06 18:01:54 +03:00
|
|
|
img = open_image(type, O_RSTR, pid);
|
2015-03-06 18:02:43 +03:00
|
|
|
if (!img)
|
|
|
|
return -1;
|
2013-03-25 23:39:49 +04:00
|
|
|
|
2013-07-05 15:02:46 +04:00
|
|
|
*nr = 0;
|
2013-03-25 23:39:49 +04:00
|
|
|
while (1) {
|
|
|
|
SiginfoEntry *sie;
|
|
|
|
|
2014-09-29 12:48:53 +04:00
|
|
|
ret = pb_read_one_eof(img, &sie, PB_SIGINFO);
|
2013-03-25 23:39:49 +04:00
|
|
|
if (ret <= 0)
|
|
|
|
break;
|
|
|
|
if (sie->siginfo.len != sizeof(siginfo_t)) {
|
2015-07-15 12:11:00 +03:00
|
|
|
pr_err("Unknown image format\n");
|
2013-03-25 23:39:49 +04:00
|
|
|
ret = -1;
|
|
|
|
break;
|
|
|
|
}
|
2014-08-15 16:02:15 +03:00
|
|
|
|
|
|
|
ret = signal_to_mem(sie);
|
|
|
|
if (ret)
|
2013-07-05 15:02:46 +04:00
|
|
|
break;
|
2013-03-25 23:39:49 +04:00
|
|
|
|
|
|
|
(*nr)++;
|
|
|
|
|
|
|
|
siginfo_entry__free_unpacked(sie, NULL);
|
|
|
|
}
|
|
|
|
|
2014-09-29 12:48:53 +04:00
|
|
|
close_image(img);
|
2013-03-25 23:39:49 +04:00
|
|
|
|
2013-07-05 15:02:46 +04:00
|
|
|
return ret ? : 0;
|
2013-03-25 23:39:49 +04:00
|
|
|
}
|
|
|
|
|
2014-08-15 16:02:15 +03:00
|
|
|
static int prepare_one_signal_queue(SignalQueueEntry *sqe, unsigned int *nr)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < sqe->n_signals; i++)
|
|
|
|
if (signal_to_mem(sqe->signals[i]))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
*nr = sqe->n_signals;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-05-24 14:35:48 +03:00
|
|
|
static unsigned int *siginfo_priv_nr; /* FIXME -- put directly on thread_args */
|
2013-11-03 23:47:51 +04:00
|
|
|
|
2016-05-24 14:35:48 +03:00
|
|
|
static int prepare_signals(int pid, struct task_restore_args *ta, CoreEntry *leader_core)
|
2013-11-03 23:47:51 +04:00
|
|
|
{
|
|
|
|
int ret = -1, i;
|
|
|
|
|
2016-05-24 14:35:48 +03:00
|
|
|
ta->siginfo = (siginfo_t *)rst_mem_align_cpos(RM_PRIVATE);
|
2013-11-03 23:47:51 +04:00
|
|
|
siginfo_priv_nr = xmalloc(sizeof(int) * current->nr_threads);
|
|
|
|
if (siginfo_priv_nr == NULL)
|
|
|
|
goto out;
|
|
|
|
|
2014-08-15 16:02:15 +03:00
|
|
|
/* Prepare shared signals */
|
|
|
|
if (!leader_core->tc->signals_s)/*backward compatibility*/
|
2016-05-24 14:35:48 +03:00
|
|
|
ret = open_signal_image(CR_FD_SIGNAL, pid, &ta->siginfo_n);
|
2014-08-15 16:02:15 +03:00
|
|
|
else
|
2016-05-24 14:35:48 +03:00
|
|
|
ret = prepare_one_signal_queue(leader_core->tc->signals_s, &ta->siginfo_n);
|
2014-08-15 16:02:15 +03:00
|
|
|
|
2013-11-03 23:47:51 +04:00
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
for (i = 0; i < current->nr_threads; i++) {
|
2014-08-15 16:02:15 +03:00
|
|
|
if (!current->core[i]->thread_core->signals_p)/*backward compatibility*/
|
|
|
|
ret = open_signal_image(CR_FD_PSIGNAL,
|
2017-01-25 18:29:04 +03:00
|
|
|
current->threads[i].ns[0].virt, &siginfo_priv_nr[i]);
|
2014-08-15 16:02:15 +03:00
|
|
|
else
|
|
|
|
ret = prepare_one_signal_queue(current->core[i]->thread_core->signals_p,
|
|
|
|
&siginfo_priv_nr[i]);
|
2013-11-03 23:47:51 +04:00
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2012-12-25 22:43:14 +04:00
|
|
|
extern void __gcov_flush(void) __attribute__((weak));
|
|
|
|
void __gcov_flush(void) {}
|
|
|
|
|
2015-12-21 14:08:00 +03:00
|
|
|
static void rst_reloc_creds(struct thread_restore_args *thread_args,
|
|
|
|
unsigned long *creds_pos_next)
|
|
|
|
{
|
|
|
|
struct thread_creds_args *args;
|
|
|
|
|
|
|
|
if (unlikely(!*creds_pos_next))
|
|
|
|
return;
|
|
|
|
|
|
|
|
args = rst_mem_remap_ptr(*creds_pos_next, RM_PRIVATE);
|
|
|
|
|
|
|
|
if (args->lsm_profile)
|
|
|
|
args->lsm_profile = rst_mem_remap_ptr(args->mem_lsm_profile_pos, RM_PRIVATE);
|
2019-05-02 13:41:46 +00:00
|
|
|
if (args->lsm_sockcreate)
|
|
|
|
args->lsm_sockcreate = rst_mem_remap_ptr(args->mem_lsm_sockcreate_pos, RM_PRIVATE);
|
2015-12-21 14:08:00 +03:00
|
|
|
if (args->groups)
|
|
|
|
args->groups = rst_mem_remap_ptr(args->mem_groups_pos, RM_PRIVATE);
|
|
|
|
|
|
|
|
*creds_pos_next = args->mem_pos_next;
|
|
|
|
thread_args->creds_args = args;
|
|
|
|
}
|
|
|
|
|
2020-06-12 10:20:28 -06:00
|
|
|
static bool groups_match(gid_t* groups, int n_groups)
|
|
|
|
{
|
|
|
|
int n, len;
|
|
|
|
bool ret;
|
|
|
|
gid_t* gids;
|
|
|
|
|
|
|
|
n = getgroups(0, NULL);
|
|
|
|
if (n == -1) {
|
|
|
|
pr_perror("Failed to get number of supplementary groups");
|
2020-10-13 17:11:52 +00:00
|
|
|
return false;
|
2020-06-12 10:20:28 -06:00
|
|
|
}
|
|
|
|
if (n != n_groups)
|
|
|
|
return false;
|
|
|
|
if (n == 0)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
len = n * sizeof(gid_t);
|
|
|
|
gids = xmalloc(len);
|
|
|
|
if (gids == NULL)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
n = getgroups(n, gids);
|
|
|
|
if (n == -1) {
|
|
|
|
pr_perror("Failed to get supplementary groups");
|
|
|
|
ret = false;
|
|
|
|
} else {
|
|
|
|
/* getgroups sorts gids, so it is safe to memcmp gid arrays */
|
|
|
|
ret = !memcmp(gids, groups, len);
|
|
|
|
}
|
|
|
|
|
|
|
|
xfree(gids);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2015-12-21 14:08:00 +03:00
|
|
|
static struct thread_creds_args *
|
2015-12-24 22:42:00 +03:00
|
|
|
rst_prep_creds_args(CredsEntry *ce, unsigned long *prev_pos)
|
2015-12-21 14:08:00 +03:00
|
|
|
{
|
2016-02-11 01:42:00 +03:00
|
|
|
unsigned long this_pos;
|
2015-12-21 14:08:00 +03:00
|
|
|
struct thread_creds_args *args;
|
|
|
|
|
|
|
|
if (!verify_cap_size(ce)) {
|
|
|
|
pr_err("Caps size mismatch %d %d %d %d\n",
|
|
|
|
(int)ce->n_cap_inh, (int)ce->n_cap_eff,
|
|
|
|
(int)ce->n_cap_prm, (int)ce->n_cap_bnd);
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
}
|
|
|
|
|
2016-02-11 01:42:00 +03:00
|
|
|
this_pos = rst_mem_align_cpos(RM_PRIVATE);
|
2015-12-21 14:08:00 +03:00
|
|
|
|
2016-02-11 01:42:00 +03:00
|
|
|
args = rst_mem_alloc(sizeof(*args), RM_PRIVATE);
|
2015-12-21 14:08:00 +03:00
|
|
|
if (!args)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
|
|
args->cap_last_cap = kdat.last_cap;
|
|
|
|
memcpy(&args->creds, ce, sizeof(args->creds));
|
|
|
|
|
|
|
|
if (ce->lsm_profile || opts.lsm_supplied) {
|
2016-02-19 18:49:00 +03:00
|
|
|
char *rendered = NULL, *profile;
|
2015-12-21 14:08:00 +03:00
|
|
|
|
|
|
|
profile = ce->lsm_profile;
|
|
|
|
if (opts.lsm_supplied)
|
|
|
|
profile = opts.lsm_profile;
|
|
|
|
|
|
|
|
if (validate_lsm(profile) < 0)
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
|
2016-02-08 08:57:40 -07:00
|
|
|
if (profile && render_lsm_profile(profile, &rendered)) {
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (rendered) {
|
2015-12-21 14:08:00 +03:00
|
|
|
size_t lsm_profile_len;
|
2015-12-24 22:42:00 +03:00
|
|
|
char *lsm_profile;
|
2015-12-21 14:08:00 +03:00
|
|
|
|
2016-02-11 01:42:00 +03:00
|
|
|
args->mem_lsm_profile_pos = rst_mem_align_cpos(RM_PRIVATE);
|
2015-12-21 14:08:00 +03:00
|
|
|
lsm_profile_len = strlen(rendered);
|
2015-12-24 22:42:00 +03:00
|
|
|
lsm_profile = rst_mem_alloc(lsm_profile_len + 1, RM_PRIVATE);
|
|
|
|
if (!lsm_profile) {
|
2015-12-21 14:08:00 +03:00
|
|
|
xfree(rendered);
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
}
|
|
|
|
|
2015-12-24 22:42:00 +03:00
|
|
|
args = rst_mem_remap_ptr(this_pos, RM_PRIVATE);
|
|
|
|
args->lsm_profile = lsm_profile;
|
2020-02-06 18:01:00 +00:00
|
|
|
strlcpy(args->lsm_profile, rendered, lsm_profile_len + 1);
|
2015-12-21 14:08:00 +03:00
|
|
|
xfree(rendered);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
args->lsm_profile = NULL;
|
|
|
|
args->mem_lsm_profile_pos = 0;
|
|
|
|
}
|
|
|
|
|
2019-05-02 13:41:46 +00:00
|
|
|
if (ce->lsm_sockcreate) {
|
|
|
|
char *rendered = NULL;
|
|
|
|
char *profile;
|
|
|
|
|
|
|
|
profile = ce->lsm_sockcreate;
|
|
|
|
|
|
|
|
if (validate_lsm(profile) < 0)
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
|
|
|
|
if (profile && render_lsm_profile(profile, &rendered)) {
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
}
|
|
|
|
if (rendered) {
|
|
|
|
size_t lsm_sockcreate_len;
|
|
|
|
char *lsm_sockcreate;
|
|
|
|
|
|
|
|
args->mem_lsm_sockcreate_pos = rst_mem_align_cpos(RM_PRIVATE);
|
|
|
|
lsm_sockcreate_len = strlen(rendered);
|
|
|
|
lsm_sockcreate = rst_mem_alloc(lsm_sockcreate_len + 1, RM_PRIVATE);
|
|
|
|
if (!lsm_sockcreate) {
|
|
|
|
xfree(rendered);
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
}
|
|
|
|
|
|
|
|
args = rst_mem_remap_ptr(this_pos, RM_PRIVATE);
|
|
|
|
args->lsm_sockcreate = lsm_sockcreate;
|
2020-02-06 18:01:00 +00:00
|
|
|
strlcpy(args->lsm_sockcreate, rendered, lsm_sockcreate_len + 1);
|
2019-05-02 13:41:46 +00:00
|
|
|
xfree(rendered);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
args->lsm_sockcreate = NULL;
|
|
|
|
args->mem_lsm_sockcreate_pos = 0;
|
|
|
|
}
|
|
|
|
|
2015-12-21 14:08:00 +03:00
|
|
|
/*
|
2016-08-04 14:54:56 -07:00
|
|
|
* Zap fields which we can't use.
|
2015-12-21 14:08:00 +03:00
|
|
|
*/
|
|
|
|
args->creds.cap_inh = NULL;
|
|
|
|
args->creds.cap_eff = NULL;
|
|
|
|
args->creds.cap_prm = NULL;
|
|
|
|
args->creds.cap_bnd = NULL;
|
|
|
|
args->creds.groups = NULL;
|
|
|
|
args->creds.lsm_profile = NULL;
|
|
|
|
|
|
|
|
memcpy(args->cap_inh, ce->cap_inh, sizeof(args->cap_inh));
|
|
|
|
memcpy(args->cap_eff, ce->cap_eff, sizeof(args->cap_eff));
|
|
|
|
memcpy(args->cap_prm, ce->cap_prm, sizeof(args->cap_prm));
|
|
|
|
memcpy(args->cap_bnd, ce->cap_bnd, sizeof(args->cap_bnd));
|
|
|
|
|
2020-06-12 10:20:28 -06:00
|
|
|
if (ce->n_groups && !groups_match(ce->groups, ce->n_groups)) {
|
2015-12-24 22:42:00 +03:00
|
|
|
unsigned int *groups;
|
|
|
|
|
2016-02-11 01:42:00 +03:00
|
|
|
args->mem_groups_pos = rst_mem_align_cpos(RM_PRIVATE);
|
2015-12-24 22:42:00 +03:00
|
|
|
groups = rst_mem_alloc(ce->n_groups * sizeof(u32), RM_PRIVATE);
|
|
|
|
if (!groups)
|
2015-12-21 14:08:00 +03:00
|
|
|
return ERR_PTR(-ENOMEM);
|
2015-12-24 22:42:00 +03:00
|
|
|
args = rst_mem_remap_ptr(this_pos, RM_PRIVATE);
|
|
|
|
args->groups = groups;
|
2015-12-21 14:08:00 +03:00
|
|
|
memcpy(args->groups, ce->groups, ce->n_groups * sizeof(u32));
|
|
|
|
} else {
|
|
|
|
args->groups = NULL;
|
|
|
|
args->mem_groups_pos = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
args->mem_pos_next = 0;
|
|
|
|
|
2015-12-24 22:42:00 +03:00
|
|
|
if (prev_pos) {
|
|
|
|
if (*prev_pos) {
|
|
|
|
struct thread_creds_args *prev;
|
|
|
|
|
|
|
|
prev = rst_mem_remap_ptr(*prev_pos, RM_PRIVATE);
|
|
|
|
prev->mem_pos_next = this_pos;
|
|
|
|
}
|
|
|
|
*prev_pos = this_pos;
|
|
|
|
}
|
2015-12-21 14:08:00 +03:00
|
|
|
return args;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int rst_prep_creds_from_img(pid_t pid)
|
|
|
|
{
|
|
|
|
CredsEntry *ce = NULL;
|
|
|
|
struct cr_img *img;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
img = open_image(CR_FD_CREDS, O_RSTR, pid);
|
|
|
|
if (!img)
|
|
|
|
return -ENOENT;
|
|
|
|
|
|
|
|
ret = pb_read_one(img, &ce, PB_CREDS);
|
|
|
|
close_image(img);
|
|
|
|
|
|
|
|
if (ret > 0) {
|
|
|
|
struct thread_creds_args *args;
|
|
|
|
|
2015-12-24 22:42:00 +03:00
|
|
|
args = rst_prep_creds_args(ce, NULL);
|
2015-12-21 14:08:00 +03:00
|
|
|
if (IS_ERR(args))
|
|
|
|
ret = PTR_ERR(args);
|
|
|
|
else
|
|
|
|
ret = 0;
|
|
|
|
}
|
|
|
|
creds_entry__free_unpacked(ce, NULL);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int rst_prep_creds(pid_t pid, CoreEntry *core, unsigned long *creds_pos)
|
|
|
|
{
|
|
|
|
struct thread_creds_args *args = NULL;
|
2015-12-24 22:42:00 +03:00
|
|
|
unsigned long this_pos = 0;
|
2015-12-21 14:08:00 +03:00
|
|
|
size_t i;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is _really_ very old image
|
|
|
|
* format where @thread_core were not
|
|
|
|
* present. It means we don't have
|
|
|
|
* creds either, just ignore and exit
|
|
|
|
* early.
|
|
|
|
*/
|
|
|
|
if (unlikely(!core->thread_core)) {
|
|
|
|
*creds_pos = 0;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-02-11 01:42:00 +03:00
|
|
|
*creds_pos = rst_mem_align_cpos(RM_PRIVATE);
|
2015-12-21 14:08:00 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Old format: one Creds per task carried in own image file.
|
|
|
|
*/
|
|
|
|
if (!core->thread_core->creds)
|
|
|
|
return rst_prep_creds_from_img(pid);
|
|
|
|
|
|
|
|
for (i = 0; i < current->nr_threads; i++) {
|
|
|
|
CredsEntry *ce = current->core[i]->thread_core->creds;
|
|
|
|
|
2015-12-24 22:42:00 +03:00
|
|
|
args = rst_prep_creds_args(ce, &this_pos);
|
2015-12-21 14:08:00 +03:00
|
|
|
if (IS_ERR(args))
|
|
|
|
return PTR_ERR(args);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-04-28 21:30:46 +03:00
|
|
|
static void *restorer_munmap_addr(CoreEntry *core, void *restorer_blob)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_COMPAT
|
|
|
|
if (core_is_compat(core))
|
|
|
|
return restorer_sym(restorer_blob, arch_export_unmap_compat);
|
|
|
|
#endif
|
|
|
|
return restorer_sym(restorer_blob, arch_export_unmap);
|
|
|
|
}
|
|
|
|
|
2016-05-25 16:28:00 +03:00
|
|
|
static int sigreturn_restore(pid_t pid, struct task_restore_args *task_args, unsigned long alen, CoreEntry *core)
|
2011-10-24 22:23:06 +04:00
|
|
|
{
|
2012-03-02 19:29:35 +04:00
|
|
|
void *mem = MAP_FAILED;
|
2011-11-12 19:26:40 +04:00
|
|
|
void *restore_task_exec_start;
|
2011-11-16 18:19:24 +04:00
|
|
|
|
2016-05-25 16:28:00 +03:00
|
|
|
long new_sp;
|
2011-10-25 21:25:42 +04:00
|
|
|
long ret;
|
2013-09-23 14:33:35 +04:00
|
|
|
|
2015-06-25 17:06:00 +03:00
|
|
|
long rst_mem_size;
|
2016-05-24 14:33:57 +03:00
|
|
|
long memzone_size;
|
2011-10-24 22:23:06 +04:00
|
|
|
|
2011-11-16 18:19:24 +04:00
|
|
|
struct thread_restore_args *thread_args;
|
2016-05-24 14:33:57 +03:00
|
|
|
struct restore_mem_zone *mz;
|
2013-07-05 15:02:46 +04:00
|
|
|
|
2017-06-15 19:36:12 +03:00
|
|
|
struct vdso_maps vdso_maps_rt;
|
2013-05-24 01:42:14 +04:00
|
|
|
unsigned long vdso_rt_size = 0;
|
2015-11-16 22:17:45 -07:00
|
|
|
|
2013-03-01 20:11:51 +04:00
|
|
|
struct vm_area_list self_vmas;
|
2014-09-29 22:04:39 +04:00
|
|
|
struct vm_area_list *vmas = &rsti(current)->vmas;
|
2016-05-24 14:35:48 +03:00
|
|
|
int i, siginfo_n;
|
2011-11-12 19:26:40 +04:00
|
|
|
|
2015-12-21 14:08:00 +03:00
|
|
|
unsigned long creds_pos = 0;
|
|
|
|
unsigned long creds_pos_next;
|
2015-07-15 19:35:00 +03:00
|
|
|
|
2016-09-16 12:18:39 +03:00
|
|
|
sigset_t blockmask;
|
|
|
|
|
2012-05-02 14:42:00 +04:00
|
|
|
pr_info("Restore via sigreturn\n");
|
2012-01-01 13:12:37 +04:00
|
|
|
|
2011-11-16 18:19:24 +04:00
|
|
|
/* pr_info_vma_list(&self_vma_list); */
|
2011-10-27 18:59:21 +04:00
|
|
|
|
2013-11-08 17:32:07 +04:00
|
|
|
BUILD_BUG_ON(sizeof(struct task_restore_args) & 1);
|
2011-11-12 19:26:40 +04:00
|
|
|
BUILD_BUG_ON(sizeof(struct thread_restore_args) & 1);
|
|
|
|
|
2015-12-21 14:08:00 +03:00
|
|
|
/*
|
|
|
|
* Read creds info for every thread and allocate memory
|
|
|
|
* needed so we can use this data inside restorer.
|
|
|
|
*/
|
|
|
|
if (rst_prep_creds(pid, core, &creds_pos))
|
|
|
|
goto err_nv;
|
|
|
|
|
2017-02-15 02:59:27 +03:00
|
|
|
if (current->parent == NULL) {
|
|
|
|
/* Wait when all tasks restored all files */
|
2017-10-21 00:01:13 +03:00
|
|
|
if (restore_wait_other_tasks())
|
|
|
|
goto err_nv;
|
mount: remount ro mounts writable before ghost-file restore
We can have ghost-files on readonly mounts, for them we will need to
recreate the file on restore, and we can't do that if mount is readonly,
so the idea is to remount the mount we want to operate on to be writable,
and later after all ghost-files restored return mounts to their proper
state if needed.
There are three exceptions, where we don't remount:
a) Overmounted mounts can't be easily remounted writable, as their
mountpoints are invisible for us.
b) If the mount has readonly superblock - there can be no ghost-files on
such a mount.
c) When we are in host mntns, we should not remount mounts in it, else
if we face errors in between we'll forget to remount back.
We have 3 places where we need to add these remount:
1) create_ghost()
2) clean_one_remap()
3) rfi_remap()
For (1) and (2) we can just remount the mount writable without
remounting it back as they are called in service mntns (the one we save
in mnt_ns_fd), which will be destroyed with all it's mounts at the end.
We mark such mounts as remounted in service mntns - REMOUNTED_RW_SERVICE.
For (3) we need to remount these mounts back to readonly so we mark them
with REMOUNTED_RW and later in remount_readonly_mounts all such mounts
are re-remounted back.
For (3) we also need to enter proper mntns of tmi before remounting.
These solution v3 is better than v2 as for v2 we added additional
remount for all bind-readonly mounts, now we do remounts only for
those having ghost-files restore operations on them. These should be
quiet a rare thing, so ~3 remounts added for each suitable mount is a
relatively small price.
note: Also I thought and tried to implement the complete remove of the
step of remounting back to readonly, but it requires quiet a tricky
playing with usernsd and only removes one remount (of ~3) for already a
rare case so I don't thing it worth the effort.
v2: minor commit message cleanup and remove warn
v4: don't delay, only remount the mounts we explicitly want to write to
just before operating, rename patch accordingly, reuse
do_restore_task_mnt_ns, optimize inefficient ns_remount_readonly_mounts,
and also add another exception.
v5: simplify child status check, fix log messages and brackets, do not
drop all flags but only the readonly flag
Signed-off-by: Pavel Tikhomirov <ptikhomirov@virtuozzo.com>
Signed-off-by: Andrei Vagin <avagin@gmail.com>
2019-01-14 15:59:17 +03:00
|
|
|
if (root_ns_mask & CLONE_NEWNS &&
|
|
|
|
remount_readonly_mounts())
|
|
|
|
goto err_nv;
|
2017-02-15 02:59:27 +03:00
|
|
|
}
|
|
|
|
|
2013-11-03 17:23:31 +04:00
|
|
|
/*
|
|
|
|
* We're about to search for free VM area and inject the restorer blob
|
2016-08-04 14:54:56 -07:00
|
|
|
* into it. No irrelevant mmaps/mremaps beyond this point, otherwise
|
2013-11-03 17:23:31 +04:00
|
|
|
* this unwanted mapping might get overlapped by the restorer.
|
|
|
|
*/
|
|
|
|
|
2014-02-07 13:32:21 +04:00
|
|
|
ret = parse_self_maps_lite(&self_vmas);
|
2013-11-03 17:23:31 +04:00
|
|
|
if (ret < 0)
|
|
|
|
goto err;
|
|
|
|
|
2015-06-25 17:06:00 +03:00
|
|
|
rst_mem_size = rst_mem_lock();
|
2016-05-24 14:33:57 +03:00
|
|
|
memzone_size = round_up(sizeof(struct restore_mem_zone) * current->nr_threads, page_size());
|
2016-05-25 16:28:00 +03:00
|
|
|
task_args->bootstrap_len = restorer_len + memzone_size + alen + rst_mem_size;
|
|
|
|
BUG_ON(task_args->bootstrap_len & (PAGE_SIZE - 1));
|
2016-05-24 14:34:20 +03:00
|
|
|
pr_info("%d threads require %ldK of memory\n",
|
2016-05-25 16:28:00 +03:00
|
|
|
current->nr_threads, KBYTES(task_args->bootstrap_len));
|
2012-11-27 22:16:00 +03:00
|
|
|
|
2016-12-09 21:19:41 +03:00
|
|
|
if (core_is_compat(core))
|
2017-06-15 19:36:12 +03:00
|
|
|
vdso_maps_rt = vdso_maps_compat;
|
2016-12-09 21:19:41 +03:00
|
|
|
else
|
2017-06-15 19:36:12 +03:00
|
|
|
vdso_maps_rt = vdso_maps;
|
2013-05-24 01:42:14 +04:00
|
|
|
/*
|
2014-06-20 19:35:08 +04:00
|
|
|
* Figure out how much memory runtime vdso and vvar will need.
|
2020-01-22 14:00:27 +00:00
|
|
|
* Check if vDSO or VVAR is not provided by kernel.
|
2013-05-24 01:42:14 +04:00
|
|
|
*/
|
2020-01-22 14:00:27 +00:00
|
|
|
if (vdso_maps_rt.sym.vdso_size != VDSO_BAD_SIZE) {
|
|
|
|
vdso_rt_size = vdso_maps_rt.sym.vdso_size;
|
|
|
|
if (vdso_maps_rt.sym.vvar_size != VVAR_BAD_SIZE)
|
2020-01-22 14:05:47 +00:00
|
|
|
vdso_rt_size += vdso_maps_rt.sym.vvar_size;
|
2020-01-22 14:00:27 +00:00
|
|
|
}
|
2016-05-25 16:28:00 +03:00
|
|
|
task_args->bootstrap_len += vdso_rt_size;
|
2013-09-23 14:33:35 +04:00
|
|
|
|
2012-11-27 22:16:00 +03:00
|
|
|
/*
|
|
|
|
* Restorer is a blob (code + args) that will get mapped in some
|
|
|
|
* place, that should _not_ intersect with both -- current mappings
|
|
|
|
* and mappings of the task we're restoring here. The subsequent
|
|
|
|
* call finds the start address for the restorer.
|
|
|
|
*
|
|
|
|
* After the start address is found we populate it with the restorer
|
|
|
|
* parts one by one (some are remap-ed, some are mmap-ed and copied
|
|
|
|
* or inited from scratch).
|
|
|
|
*/
|
|
|
|
|
2016-05-25 16:28:00 +03:00
|
|
|
mem = (void *)restorer_get_vma_hint(&vmas->h, &self_vmas.h,
|
|
|
|
task_args->bootstrap_len);
|
|
|
|
if (mem == (void *)-1) {
|
2012-01-31 15:31:22 +04:00
|
|
|
pr_err("No suitable area for task_restore bootstrap (%ldK)\n",
|
2016-05-25 16:28:00 +03:00
|
|
|
task_args->bootstrap_len);
|
2011-11-06 01:49:57 +04:00
|
|
|
goto err;
|
2011-11-16 18:19:24 +04:00
|
|
|
}
|
2011-10-27 00:57:01 +04:00
|
|
|
|
2016-07-19 19:13:00 +03:00
|
|
|
pr_info("Found bootstrap VMA hint at: %p (needs ~%ldK)\n",
|
2016-05-25 16:28:00 +03:00
|
|
|
mem, KBYTES(task_args->bootstrap_len));
|
2012-03-02 19:28:13 +04:00
|
|
|
|
2016-05-25 16:28:00 +03:00
|
|
|
ret = remap_restorer_blob(mem);
|
2012-09-14 14:51:40 +04:00
|
|
|
if (ret < 0)
|
2011-11-06 01:49:57 +04:00
|
|
|
goto err;
|
2011-10-24 22:23:06 +04:00
|
|
|
|
2011-10-26 11:16:00 +04:00
|
|
|
/*
|
2011-11-16 18:19:24 +04:00
|
|
|
* Prepare a memory map for restorer. Note a thread space
|
|
|
|
* might be completely unused so it's here just for convenience.
|
2011-10-26 11:16:00 +04:00
|
|
|
*/
|
2016-05-25 16:28:00 +03:00
|
|
|
task_args->clone_restore_fn = restorer_sym(mem, arch_export_restore_thread);
|
|
|
|
restore_task_exec_start = restorer_sym(mem, arch_export_restore_task);
|
2017-04-28 21:30:46 +03:00
|
|
|
rsti(current)->munmap_restorer = restorer_munmap_addr(core, mem);
|
2012-09-13 03:01:48 +04:00
|
|
|
|
2016-05-25 16:28:00 +03:00
|
|
|
task_args->bootstrap_start = mem;
|
|
|
|
mem += restorer_len;
|
2011-10-26 11:16:00 +04:00
|
|
|
|
2016-05-25 16:28:00 +03:00
|
|
|
/* VMA we need for stacks and sigframes for threads */
|
|
|
|
if (mmap(mem, memzone_size, PROT_READ | PROT_WRITE,
|
2018-11-09 21:51:23 +00:00
|
|
|
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, 0, 0) != mem) {
|
2021-04-22 17:43:47 -07:00
|
|
|
pr_perror("Can't mmap section for restore code");
|
2012-09-13 04:10:48 +04:00
|
|
|
goto err;
|
|
|
|
}
|
2011-10-26 11:16:00 +04:00
|
|
|
|
2016-05-24 14:34:20 +03:00
|
|
|
memzero(mem, memzone_size);
|
|
|
|
mz = mem;
|
|
|
|
mem += memzone_size;
|
2016-05-24 14:33:57 +03:00
|
|
|
|
2016-05-25 16:28:00 +03:00
|
|
|
/* New home for task_restore_args and thread_restore_args */
|
2016-05-25 16:28:00 +03:00
|
|
|
task_args = mremap(task_args, alen, alen, MREMAP_MAYMOVE|MREMAP_FIXED, mem);
|
|
|
|
if (task_args != mem) {
|
|
|
|
pr_perror("Can't move task args");
|
2016-05-24 14:33:57 +03:00
|
|
|
goto err;
|
2016-05-25 16:28:00 +03:00
|
|
|
}
|
2011-11-12 19:26:40 +04:00
|
|
|
|
2016-05-25 16:28:00 +03:00
|
|
|
task_args->rst_mem = mem;
|
|
|
|
task_args->rst_mem_size = rst_mem_size + alen;
|
2016-05-25 16:28:00 +03:00
|
|
|
thread_args = (struct thread_restore_args *)(task_args + 1);
|
2016-05-25 16:28:00 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* And finally -- the rest arguments referenced by task_ and
|
|
|
|
* thread_restore_args. Pointers will get remapped below.
|
|
|
|
*/
|
2016-05-25 16:28:00 +03:00
|
|
|
mem += alen;
|
|
|
|
if (rst_mem_remap(mem))
|
|
|
|
goto err;
|
2016-05-24 14:34:20 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* At this point we've found a gap in VM that fits in both -- current
|
|
|
|
* and target tasks' mappings -- and its structure is
|
|
|
|
*
|
|
|
|
* | restorer code | memzone (stacks and sigframes) | arguments |
|
|
|
|
*
|
|
|
|
* Arguments is task_restore_args, thread_restore_args-s and all
|
|
|
|
* the bunch of objects allocated with rst_mem_alloc().
|
|
|
|
* Note, that the task_args itself is inside the 3rd section and (!)
|
|
|
|
* it gets unmapped at the very end of __export_restore_task
|
|
|
|
*/
|
|
|
|
|
2015-06-15 15:50:37 -06:00
|
|
|
task_args->proc_fd = dup(get_service_fd(PROC_FD_OFF));
|
|
|
|
if (task_args->proc_fd < 0) {
|
|
|
|
pr_perror("can't dup proc fd");
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2014-09-29 22:04:39 +04:00
|
|
|
task_args->breakpoint = &rsti(current)->breakpoint;
|
2017-02-16 19:20:44 +03:00
|
|
|
task_args->fault_strategy = fi_strategy;
|
2016-09-16 12:18:39 +03:00
|
|
|
|
|
|
|
sigemptyset(&blockmask);
|
|
|
|
sigaddset(&blockmask, SIGCHLD);
|
|
|
|
|
|
|
|
if (sigprocmask(SIG_BLOCK, &blockmask, NULL) == -1) {
|
|
|
|
pr_perror("Can not set mask of blocked signals");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2014-08-18 19:47:20 +04:00
|
|
|
task_args->task_entries = rst_mem_remap_ptr(task_entries_pos, RM_SHREMAP);
|
2012-03-27 16:34:00 +04:00
|
|
|
|
2014-09-29 22:04:39 +04:00
|
|
|
task_args->premmapped_addr = (unsigned long)rsti(current)->premmapped_addr;
|
|
|
|
task_args->premmapped_len = rsti(current)->premmapped_len;
|
2012-09-17 20:02:57 +04:00
|
|
|
|
2015-07-31 10:36:25 -04:00
|
|
|
task_args->task_size = kdat.task_size;
|
2018-05-10 19:14:44 +01:00
|
|
|
#ifdef ARCH_HAS_LONG_PAGES
|
|
|
|
task_args->page_size = PAGE_SIZE;
|
|
|
|
#endif
|
2015-07-31 10:36:25 -04:00
|
|
|
|
2016-05-24 14:36:47 +03:00
|
|
|
RST_MEM_FIXUP_PPTR(task_args->vmas);
|
|
|
|
RST_MEM_FIXUP_PPTR(task_args->rings);
|
|
|
|
RST_MEM_FIXUP_PPTR(task_args->tcp_socks);
|
|
|
|
RST_MEM_FIXUP_PPTR(task_args->timerfd);
|
|
|
|
RST_MEM_FIXUP_PPTR(task_args->posix_timers);
|
|
|
|
RST_MEM_FIXUP_PPTR(task_args->siginfo);
|
|
|
|
RST_MEM_FIXUP_PPTR(task_args->rlims);
|
|
|
|
RST_MEM_FIXUP_PPTR(task_args->helpers);
|
|
|
|
RST_MEM_FIXUP_PPTR(task_args->zombies);
|
2017-05-11 12:13:52 +03:00
|
|
|
RST_MEM_FIXUP_PPTR(task_args->vma_ios);
|
2019-06-26 11:55:19 +03:00
|
|
|
RST_MEM_FIXUP_PPTR(task_args->inotify_fds);
|
2014-09-17 15:32:16 -05:00
|
|
|
|
2016-06-28 22:24:12 +03:00
|
|
|
task_args->compatible_mode = core_is_compat(core);
|
2012-01-01 13:10:12 +04:00
|
|
|
/*
|
|
|
|
* Arguments for task restoration.
|
|
|
|
*/
|
2012-07-19 13:23:01 +04:00
|
|
|
|
2013-01-14 11:25:50 +04:00
|
|
|
BUG_ON(core->mtype != CORE_ENTRY__MARCH);
|
2012-07-19 13:23:01 +04:00
|
|
|
|
2012-03-01 18:52:42 +04:00
|
|
|
task_args->logfd = log_get_fd();
|
2012-09-03 14:44:09 +04:00
|
|
|
task_args->loglevel = log_get_loglevel();
|
2017-01-11 11:25:00 +03:00
|
|
|
log_get_logstart(&task_args->logstart);
|
2012-01-19 01:33:19 +03:00
|
|
|
task_args->sigchld_act = sigchld_act;
|
2011-11-18 16:09:01 +04:00
|
|
|
|
criu: fix gcc-8 warnings
criu/sk-packet.c:443:3: error: 'strncpy' output may be truncated
copying 14 bytes from a string of length 15
strncpy(addr_spkt.sa_data, req.ifr_name, sa_data_size);
^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
criu/img-remote.c:383:3: error: 'strncpy' specified bound 4096
equals destination size
strncpy(snapshot_id, li->snapshot_id, PATHLEN);
^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
criu/img-remote.c:384:3: error: 'strncpy' specified bound 4096
equals destination size
strncpy(path, li->name, PATHLEN);
^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
criu/files.c:288:3: error: 'strncpy' output may be truncated copying
4095 bytes from a string of length 4096
strncpy(buf, link->name, PATH_MAX - 1);
^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
criu/sk-unix.c:239:36: error: '/' directive output may be truncated
writing 1 byte into a region of size between 0 and 4095
snprintf(path, sizeof(path), ".%s/%s", dir, sk->name);
^
criu/sk-unix.c:239:3: note: 'snprintf' output 3 or more bytes
(assuming 4098) into a destination of size 4096
snprintf(path, sizeof(path), ".%s/%s", dir, sk->name);
^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
criu/mount.c:2563:3: error: 'strncpy' specified bound 4096 equals
destination size
strncpy(path, m->mountpoint, PATH_MAX);
^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
criu/cr-restore.c:3647:2: error: 'strncpy' specified bound 16 equals
destination size
strncpy(task_args->comm, core->tc->comm, sizeof(task_args->comm));
^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Signed-off-by: Andrei Vagin <avagin@virtuozzo.com>
2018-02-04 08:22:59 +03:00
|
|
|
strncpy(task_args->comm, core->tc->comm, TASK_COMM_LEN - 1);
|
|
|
|
task_args->comm[TASK_COMM_LEN - 1] = 0;
|
2012-07-19 13:23:01 +04:00
|
|
|
|
2012-02-10 20:18:08 +04:00
|
|
|
/*
|
|
|
|
* Fill up per-thread data.
|
|
|
|
*/
|
2015-12-21 14:08:00 +03:00
|
|
|
creds_pos_next = creds_pos;
|
2016-05-30 17:53:00 +03:00
|
|
|
siginfo_n = task_args->siginfo_n;
|
2012-09-05 19:52:55 +04:00
|
|
|
for (i = 0; i < current->nr_threads; i++) {
|
2012-12-21 18:58:16 +04:00
|
|
|
CoreEntry *tcore;
|
2013-05-24 16:20:19 +04:00
|
|
|
struct rt_sigframe *sigframe;
|
2020-04-22 15:43:04 +08:00
|
|
|
#ifdef CONFIG_MIPS
|
|
|
|
k_rtsigset_t mips_blkset;
|
|
|
|
#else
|
2016-09-26 13:20:00 +03:00
|
|
|
k_rtsigset_t *blkset = NULL;
|
2012-12-21 18:58:23 +04:00
|
|
|
|
2020-04-22 15:43:04 +08:00
|
|
|
#endif
|
2017-01-25 18:29:04 +03:00
|
|
|
thread_args[i].pid = current->threads[i].ns[0].virt;
|
2015-06-05 19:13:00 +03:00
|
|
|
thread_args[i].siginfo_n = siginfo_priv_nr[i];
|
2016-05-24 14:35:48 +03:00
|
|
|
thread_args[i].siginfo = task_args->siginfo;
|
|
|
|
thread_args[i].siginfo += siginfo_n;
|
|
|
|
siginfo_n += thread_args[i].siginfo_n;
|
2011-11-12 19:26:40 +04:00
|
|
|
|
2012-02-10 20:18:08 +04:00
|
|
|
/* skip self */
|
2012-12-21 18:58:23 +04:00
|
|
|
if (thread_args[i].pid == pid) {
|
|
|
|
task_args->t = thread_args + i;
|
|
|
|
tcore = core;
|
2020-04-22 15:43:04 +08:00
|
|
|
#ifdef CONFIG_MIPS
|
|
|
|
mips_blkset.sig[0] = tcore->tc->blk_sigset;
|
|
|
|
mips_blkset.sig[1] = tcore->tc->blk_sigset_extended;
|
|
|
|
#else
|
2016-09-26 13:20:00 +03:00
|
|
|
blkset = (void *)&tcore->tc->blk_sigset;
|
2020-04-22 15:43:04 +08:00
|
|
|
#endif
|
2016-09-26 13:20:00 +03:00
|
|
|
} else {
|
2014-08-15 16:02:14 +03:00
|
|
|
tcore = current->core[i];
|
2020-04-22 15:43:04 +08:00
|
|
|
if (tcore->thread_core->has_blk_sigset) {
|
|
|
|
#ifdef CONFIG_MIPS
|
|
|
|
mips_blkset.sig[0] = tcore->thread_core->blk_sigset;
|
|
|
|
mips_blkset.sig[1] = tcore->thread_core->blk_sigset_extended;
|
|
|
|
#else
|
2016-09-26 13:20:00 +03:00
|
|
|
blkset = (void *)&tcore->thread_core->blk_sigset;
|
2020-04-22 15:43:04 +08:00
|
|
|
#endif
|
|
|
|
}
|
2016-09-26 13:20:00 +03:00
|
|
|
}
|
2012-07-19 13:23:01 +04:00
|
|
|
|
2012-12-21 18:58:23 +04:00
|
|
|
if ((tcore->tc || tcore->ids) && thread_args[i].pid != pid) {
|
2012-07-19 13:23:01 +04:00
|
|
|
pr_err("Thread has optional fields present %d\n",
|
|
|
|
thread_args[i].pid);
|
|
|
|
ret = -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ret < 0) {
|
|
|
|
pr_err("Can't read core data for thread %d\n",
|
|
|
|
thread_args[i].pid);
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2012-10-30 10:04:37 +03:00
|
|
|
thread_args[i].ta = task_args;
|
2013-01-14 17:19:06 +04:00
|
|
|
thread_args[i].gpregs = *CORE_THREAD_ARCH_INFO(tcore)->gpregs;
|
|
|
|
thread_args[i].clear_tid_addr = CORE_THREAD_ARCH_INFO(tcore)->clear_tid_addr;
|
2013-01-09 18:48:00 +04:00
|
|
|
core_get_tls(tcore, &thread_args[i].tls);
|
2011-11-12 19:26:40 +04:00
|
|
|
|
2015-12-21 14:08:00 +03:00
|
|
|
rst_reloc_creds(&thread_args[i], &creds_pos_next);
|
|
|
|
|
2016-07-18 21:40:00 +03:00
|
|
|
thread_args[i].futex_rla = tcore->thread_core->futex_rla;
|
|
|
|
thread_args[i].futex_rla_len = tcore->thread_core->futex_rla_len;
|
|
|
|
thread_args[i].pdeath_sig = tcore->thread_core->pdeath_sig;
|
|
|
|
if (tcore->thread_core->pdeath_sig > _KNSIG) {
|
|
|
|
pr_err("Pdeath signal is too big\n");
|
|
|
|
goto err;
|
2012-08-10 20:29:01 +04:00
|
|
|
}
|
|
|
|
|
2016-07-18 21:40:00 +03:00
|
|
|
ret = prep_sched_info(&thread_args[i].sp, tcore->thread_core);
|
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
|
2018-05-07 11:42:45 +03:00
|
|
|
seccomp_rst_reloc(&thread_args[i]);
|
|
|
|
thread_args[i].seccomp_force_tsync = rsti(current)->has_old_seccomp_filter;
|
|
|
|
|
2016-05-24 14:33:57 +03:00
|
|
|
thread_args[i].mz = mz + i;
|
|
|
|
sigframe = (struct rt_sigframe *)&mz[i].rt_sigframe;
|
2013-05-24 16:20:19 +04:00
|
|
|
|
2020-04-22 15:43:04 +08:00
|
|
|
#ifdef CONFIG_MIPS
|
|
|
|
if (construct_sigframe(sigframe, sigframe, &mips_blkset, tcore))
|
|
|
|
#else
|
2016-09-26 13:20:00 +03:00
|
|
|
if (construct_sigframe(sigframe, sigframe, blkset, tcore))
|
2020-04-22 15:43:04 +08:00
|
|
|
#endif
|
2012-12-21 17:35:43 +04:00
|
|
|
goto err;
|
|
|
|
|
2018-07-28 00:44:33 +03:00
|
|
|
if (tcore->thread_core->comm)
|
2018-08-23 14:09:49 +03:00
|
|
|
strncpy(thread_args[i].comm, tcore->thread_core->comm, TASK_COMM_LEN - 1);
|
2018-02-01 22:29:06 +03:00
|
|
|
else
|
2018-08-23 14:09:49 +03:00
|
|
|
strncpy(thread_args[i].comm, core->tc->comm, TASK_COMM_LEN - 1);
|
|
|
|
thread_args[i].comm[TASK_COMM_LEN - 1] = 0;
|
2018-02-01 22:29:06 +03:00
|
|
|
|
2012-12-21 18:58:23 +04:00
|
|
|
if (thread_args[i].pid != pid)
|
|
|
|
core_entry__free_unpacked(tcore, NULL);
|
2011-11-17 00:59:08 +04:00
|
|
|
|
2013-10-29 12:19:13 +04:00
|
|
|
pr_info("Thread %4d stack %8p rt_sigframe %8p\n",
|
2016-05-24 14:33:57 +03:00
|
|
|
i, mz[i].stack, mz[i].rt_sigframe);
|
2011-11-12 19:26:40 +04:00
|
|
|
|
|
|
|
}
|
|
|
|
|
2013-05-24 01:42:14 +04:00
|
|
|
/*
|
|
|
|
* Restorer needs own copy of vdso parameters. Runtime
|
|
|
|
* vdso must be kept non intersecting with anything else,
|
|
|
|
* since we need it being accessible even when own
|
|
|
|
* self-vmas are unmaped.
|
|
|
|
*/
|
2015-06-25 17:06:00 +03:00
|
|
|
mem += rst_mem_size;
|
2016-05-24 14:33:36 +03:00
|
|
|
task_args->vdso_rt_parked_at = (unsigned long)mem;
|
2017-06-15 19:36:12 +03:00
|
|
|
task_args->vdso_maps_rt = vdso_maps_rt;
|
2015-06-05 11:48:00 +03:00
|
|
|
task_args->vdso_rt_size = vdso_rt_size;
|
2017-07-17 15:40:00 +03:00
|
|
|
task_args->can_map_vdso = kdat.can_map_vdso;
|
2019-12-16 10:42:13 +00:00
|
|
|
task_args->has_clone3_set_tid = kdat.has_clone3_set_tid;
|
2013-05-24 01:42:14 +04:00
|
|
|
|
2016-05-24 14:33:57 +03:00
|
|
|
new_sp = restorer_stack(task_args->t->mz);
|
2012-12-21 18:58:16 +04:00
|
|
|
|
2014-07-31 02:00:00 +04:00
|
|
|
/* No longer need it */
|
|
|
|
core_entry__free_unpacked(core, NULL);
|
2014-08-15 16:02:14 +03:00
|
|
|
xfree(current->core);
|
2014-07-31 02:00:00 +04:00
|
|
|
|
2012-12-21 18:58:16 +04:00
|
|
|
/*
|
|
|
|
* Now prepare run-time data for threads restore.
|
|
|
|
*/
|
|
|
|
task_args->nr_threads = current->nr_threads;
|
|
|
|
task_args->thread_args = thread_args;
|
|
|
|
|
2018-07-24 12:12:27 +02:00
|
|
|
task_args->auto_dedup = opts.auto_dedup;
|
|
|
|
|
2019-03-26 20:01:40 +00:00
|
|
|
/*
|
|
|
|
* In the restorer we need to know if it is SELinux or not. For SELinux
|
|
|
|
* we must change the process context before creating threads. For
|
|
|
|
* Apparmor we can change each thread after they have been created.
|
|
|
|
*/
|
|
|
|
task_args->lsm_type = kdat.lsm;
|
|
|
|
|
2013-09-25 13:46:01 +04:00
|
|
|
/*
|
|
|
|
* Make root and cwd restore _that_ late not to break any
|
|
|
|
* attempts to open files by paths above (e.g. /proc).
|
|
|
|
*/
|
|
|
|
|
2014-07-03 19:07:44 +04:00
|
|
|
if (restore_fs(current))
|
2013-09-25 13:46:01 +04:00
|
|
|
goto err;
|
|
|
|
|
2018-01-10 17:02:00 +03:00
|
|
|
sfds_protected = false;
|
2012-03-16 17:24:00 +04:00
|
|
|
close_image_dir();
|
2014-04-21 18:23:30 +04:00
|
|
|
close_proc();
|
2017-02-23 18:15:26 +03:00
|
|
|
close_service_fd(TRANSPORT_FD_OFF);
|
|
|
|
close_service_fd(CR_PROC_FD_OFF);
|
2014-04-16 09:04:27 +04:00
|
|
|
close_service_fd(ROOT_FD_OFF);
|
2015-02-13 16:05:24 +04:00
|
|
|
close_service_fd(USERNSD_SK);
|
2017-02-07 11:43:29 +03:00
|
|
|
close_service_fd(FDSTORE_SK_OFF);
|
2017-02-07 11:43:27 +03:00
|
|
|
close_service_fd(RPC_SK_OFF);
|
2012-03-16 17:24:00 +04:00
|
|
|
|
2012-12-25 22:43:14 +04:00
|
|
|
__gcov_flush();
|
|
|
|
|
2011-11-16 18:19:24 +04:00
|
|
|
pr_info("task_args: %p\n"
|
|
|
|
"task_args->pid: %d\n"
|
|
|
|
"task_args->nr_threads: %d\n"
|
|
|
|
"task_args->clone_restore_fn: %p\n"
|
|
|
|
"task_args->thread_args: %p\n",
|
2012-12-21 18:58:23 +04:00
|
|
|
task_args, task_args->t->pid,
|
2012-03-02 19:30:23 +04:00
|
|
|
task_args->nr_threads,
|
|
|
|
task_args->clone_restore_fn,
|
2011-11-16 18:19:24 +04:00
|
|
|
task_args->thread_args);
|
|
|
|
|
2011-10-26 17:35:50 +04:00
|
|
|
/*
|
2013-04-12 13:00:06 -07:00
|
|
|
* An indirect call to task_restore, note it never returns
|
|
|
|
* and restoring core is extremely destructive.
|
2011-10-26 17:35:50 +04:00
|
|
|
*/
|
2013-01-09 17:39:23 +04:00
|
|
|
|
|
|
|
JUMP_TO_RESTORER_BLOB(new_sp, restore_task_exec_start, task_args);
|
2011-10-26 11:16:00 +04:00
|
|
|
|
2011-10-26 22:50:46 +04:00
|
|
|
err:
|
2013-03-01 20:11:51 +04:00
|
|
|
free_mappings(&self_vmas);
|
2013-11-03 17:23:31 +04:00
|
|
|
err_nv:
|
2011-10-26 17:35:50 +04:00
|
|
|
/* Just to be sure */
|
2012-01-17 10:56:28 +04:00
|
|
|
exit(1);
|
2012-03-21 19:37:00 +04:00
|
|
|
return -1;
|
2011-10-24 22:23:06 +04:00
|
|
|
}
|