2
0
mirror of https://github.com/checkpoint-restore/criu synced 2025-08-22 18:07:57 +00:00
criu/images/core.proto

147 lines
3.6 KiB
Protocol Buffer
Raw Permalink Normal View History

// SPDX-License-Identifier: MIT
syntax = "proto2";
import "core-x86.proto";
import "core-arm.proto";
import "core-aarch64.proto";
arch/ppc64: Add PowerPC 64 LE support This patch initiates the ppc64le architecture support in CRIU. Note that ppc64 (Big Endian) architecture is not yet supported since there are still several issues to address with this architecture. However, in the long term, the two architectures should be addressed using the almost the same code, so sharing the ppc64 directory. Major ppc64 issues: Loader is not involved when the parasite code is loaded. So no relocation is done for the parasite code. As a consequence r2 must be set manually when entering the parasite code, and GOT is not filled. Furthermore, the r2 fixup code at the services's global address which has not been fixed by the loader should not be run. Branching at local address, as the assembly code does is jumping over it. On the long term, relocation should be done when loading the parasite code. We are introducing 2 trampolines for the 2 entry points of the restorer blob. These entry points are dealing with r2. These ppc64 specific entry points are overwritting the standard one in sigreturn_restore() from cr-restore.c. Instead of using #ifdef, we may introduce a per arch wrapper here. CRIU needs 2 kernel patches to be run powerpc which are not yet upstream: - Tracking the vDSO remapping - Enabling the kcmp system call on powerpc Feature not yet supported: - Altivec registers C/R - VSX registers C/R - TM support - all lot of things I missed.. Signed-off-by: Laurent Dufour <ldufour@linux.vnet.ibm.com> Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-04-30 09:57:49 +03:00
import "core-ppc64.proto";
import "core-s390.proto";
import "core-mips.proto";
import "core-loongarch64.proto";
import "core-riscv64.proto";
import "rlimit.proto";
import "timer.proto";
import "creds.proto";
import "sa.proto";
import "siginfo.proto";
import "rseq.proto";
import "opts.proto";
/*
* These match the SECCOMP_MODE_* flags from <linux/seccomp.h>.
*/
enum seccomp_mode {
disabled = 0;
strict = 1;
filter = 2;
};
message task_core_entry {
required uint32 task_state = 1 [(criu).dict = "gen"];
required uint32 exit_code = 2;
required uint32 personality = 3;
required uint32 flags = 4;
required uint64 blk_sigset = 5[(criu).hex = true];
required string comm = 6;
optional task_timers_entry timers = 7;
optional task_rlimits_entry rlimits = 8;
/* This is deprecated, should be per-thread */
optional uint32 cg_set = 9;
optional signal_queue_entry signals_s = 10;
seccomp: Add engine to restore per-thread seccomp chains At now we pretend that all threads are sharing seccomp chains and at checkpoint moment we test seccomp modes to make sure if this assumption is valid refusing to dump otherwise. Still the kernel tacks seccomp filter chains per each thread and now we've faced applications (such as java) where per-thread chains are actively used. Thus we need to bring support of handling filters via per-thread basis. In this a bit intrusive patch the restore engine is lifted up to treat each thread separately. Here what is done: - Image core file is modified to keep seccomp filters inside thread_core_entry. For backward compatibility former seccomp_mode and seccomp_filter members in task_core_entry are renamed to have old_ prefix and on restore we test if we're dealing with old images. Since per-thread dump is not yet implemeneted the dumping procedure continue operating with old_ members. - In pie restorer code memory containing filters are addressed from inside thread_restore_args structure which now contains seccomp mode itself and chain attributes (number of filters and etc). Reading of per-thread data is done in seccomp_prepare_threads helper -- we take one pstree_item and walks over every thread inside to allocate pie memory and pin data there. Because of PIE specific, before jumping into pie code we have to relocate this memory into new place and for this seccomp_rst_reloc is served. In restorer itself we check if thread_restore_args provides us enabled seccomp mode (strict or filter passed) and call for restore_seccomp_filter if needed. - To unify names we start using seccomp_ prefix for all related stuff involved into this change (prepare_seccomp_filters renamed to seccomp_read_image because it only reads image and nothing more, image handler is renamed to seccomp_img_entry instead of too short 'se'. With this change we're now allowed to start collecting and dumping seccomp filters per each thread, which will be done in next patch. Signed-off-by: Cyrill Gorcunov <gorcunov@gmail.com> Signed-off-by: Andrei Vagin <avagin@virtuozzo.com>
2018-05-07 11:42:45 +03:00
/* These two are deprecated, should be per-thread */
optional seccomp_mode old_seccomp_mode = 11;
optional uint32 old_seccomp_filter = 12;
optional uint32 loginuid = 13;
optional int32 oom_score_adj = 14;
repeated sa_entry sigactions = 15;
// Reserved for tty inheritance
//optional int32 tty_nr = 16;
//optional int32 tty_pgrp = 17;
optional bool child_subreaper = 18;
// Reserved for container relative start time
//optional uint64 start_time = 19;
optional uint64 blk_sigset_extended = 20[(criu).hex = true];
optional uint32 stop_signo = 21;
optional uint32 membarrier_registration_mask = 22 [(criu).hex = true];
}
message task_kobj_ids_entry {
required uint32 vm_id = 1;
required uint32 files_id = 2;
required uint32 fs_id = 3;
required uint32 sighand_id = 4;
optional uint32 pid_ns_id = 5;
optional uint32 net_ns_id = 6;
optional uint32 ipc_ns_id = 7;
optional uint32 uts_ns_id = 8;
optional uint32 mnt_ns_id = 9;
optional uint32 user_ns_id = 10;
optional uint32 cgroup_ns_id = 11;
optional uint32 time_ns_id = 12;
}
message thread_sas_entry {
required uint64 ss_sp = 1;
required uint64 ss_size = 2;
required uint32 ss_flags = 3;
}
message thread_core_entry {
required uint64 futex_rla = 1;
required uint32 futex_rla_len = 2;
optional sint32 sched_nice = 3;
optional uint32 sched_policy = 4;
optional uint32 sched_prio = 5;
optional uint64 blk_sigset = 6;
optional thread_sas_entry sas = 7;
optional uint32 pdeath_sig = 8;
optional signal_queue_entry signals_p = 9;
optional creds_entry creds = 10;
seccomp: Add engine to restore per-thread seccomp chains At now we pretend that all threads are sharing seccomp chains and at checkpoint moment we test seccomp modes to make sure if this assumption is valid refusing to dump otherwise. Still the kernel tacks seccomp filter chains per each thread and now we've faced applications (such as java) where per-thread chains are actively used. Thus we need to bring support of handling filters via per-thread basis. In this a bit intrusive patch the restore engine is lifted up to treat each thread separately. Here what is done: - Image core file is modified to keep seccomp filters inside thread_core_entry. For backward compatibility former seccomp_mode and seccomp_filter members in task_core_entry are renamed to have old_ prefix and on restore we test if we're dealing with old images. Since per-thread dump is not yet implemeneted the dumping procedure continue operating with old_ members. - In pie restorer code memory containing filters are addressed from inside thread_restore_args structure which now contains seccomp mode itself and chain attributes (number of filters and etc). Reading of per-thread data is done in seccomp_prepare_threads helper -- we take one pstree_item and walks over every thread inside to allocate pie memory and pin data there. Because of PIE specific, before jumping into pie code we have to relocate this memory into new place and for this seccomp_rst_reloc is served. In restorer itself we check if thread_restore_args provides us enabled seccomp mode (strict or filter passed) and call for restore_seccomp_filter if needed. - To unify names we start using seccomp_ prefix for all related stuff involved into this change (prepare_seccomp_filters renamed to seccomp_read_image because it only reads image and nothing more, image handler is renamed to seccomp_img_entry instead of too short 'se'. With this change we're now allowed to start collecting and dumping seccomp filters per each thread, which will be done in next patch. Signed-off-by: Cyrill Gorcunov <gorcunov@gmail.com> Signed-off-by: Andrei Vagin <avagin@virtuozzo.com>
2018-05-07 11:42:45 +03:00
optional seccomp_mode seccomp_mode = 11;
optional uint32 seccomp_filter = 12;
optional string comm = 13;
optional uint64 blk_sigset_extended = 14;
optional rseq_entry rseq_entry = 15;
optional uint32 cg_set = 16;
}
message task_rlimits_entry {
repeated rlimit_entry rlimits = 1;
};
message core_entry {
enum march {
UNKNOWN = 0;
X86_64 = 1;
ARM = 2;
AARCH64 = 3;
arch/ppc64: Add PowerPC 64 LE support This patch initiates the ppc64le architecture support in CRIU. Note that ppc64 (Big Endian) architecture is not yet supported since there are still several issues to address with this architecture. However, in the long term, the two architectures should be addressed using the almost the same code, so sharing the ppc64 directory. Major ppc64 issues: Loader is not involved when the parasite code is loaded. So no relocation is done for the parasite code. As a consequence r2 must be set manually when entering the parasite code, and GOT is not filled. Furthermore, the r2 fixup code at the services's global address which has not been fixed by the loader should not be run. Branching at local address, as the assembly code does is jumping over it. On the long term, relocation should be done when loading the parasite code. We are introducing 2 trampolines for the 2 entry points of the restorer blob. These entry points are dealing with r2. These ppc64 specific entry points are overwritting the standard one in sigreturn_restore() from cr-restore.c. Instead of using #ifdef, we may introduce a per arch wrapper here. CRIU needs 2 kernel patches to be run powerpc which are not yet upstream: - Tracking the vDSO remapping - Enabling the kcmp system call on powerpc Feature not yet supported: - Altivec registers C/R - VSX registers C/R - TM support - all lot of things I missed.. Signed-off-by: Laurent Dufour <ldufour@linux.vnet.ibm.com> Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-04-30 09:57:49 +03:00
PPC64 = 4;
S390 = 5;
MIPS = 6;
LOONGARCH64 = 7;
RISCV64 = 8;
}
required march mtype = 1;
optional thread_info_x86 thread_info = 2;
optional thread_info_arm ti_arm = 6;
optional thread_info_aarch64 ti_aarch64 = 8;
arch/ppc64: Add PowerPC 64 LE support This patch initiates the ppc64le architecture support in CRIU. Note that ppc64 (Big Endian) architecture is not yet supported since there are still several issues to address with this architecture. However, in the long term, the two architectures should be addressed using the almost the same code, so sharing the ppc64 directory. Major ppc64 issues: Loader is not involved when the parasite code is loaded. So no relocation is done for the parasite code. As a consequence r2 must be set manually when entering the parasite code, and GOT is not filled. Furthermore, the r2 fixup code at the services's global address which has not been fixed by the loader should not be run. Branching at local address, as the assembly code does is jumping over it. On the long term, relocation should be done when loading the parasite code. We are introducing 2 trampolines for the 2 entry points of the restorer blob. These entry points are dealing with r2. These ppc64 specific entry points are overwritting the standard one in sigreturn_restore() from cr-restore.c. Instead of using #ifdef, we may introduce a per arch wrapper here. CRIU needs 2 kernel patches to be run powerpc which are not yet upstream: - Tracking the vDSO remapping - Enabling the kcmp system call on powerpc Feature not yet supported: - Altivec registers C/R - VSX registers C/R - TM support - all lot of things I missed.. Signed-off-by: Laurent Dufour <ldufour@linux.vnet.ibm.com> Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-04-30 09:57:49 +03:00
optional thread_info_ppc64 ti_ppc64 = 9;
optional thread_info_s390 ti_s390 = 10;
optional thread_info_mips ti_mips = 11;
optional thread_info_loongarch64 ti_loongarch64 = 12;
optional thread_info_riscv64 ti_riscv64 = 13;
optional task_core_entry tc = 3;
optional task_kobj_ids_entry ids = 4;
optional thread_core_entry thread_core = 5;
}