From 5aa72e723707e2bd7e8ed9841c2ad392781d066d Mon Sep 17 00:00:00 2001 From: Andrei Vagin Date: Sat, 7 Sep 2019 15:46:22 +0300 Subject: [PATCH] py: Reformat everything into pep8 style As discussed on the mailing list, current .py files formatting does not conform to the world standard, so we should better reformat it. For this the yapf tool is used. The command I used was yapf -i $(find -name *.py) Signed-off-by: Pavel Emelyanov --- coredump/criu_coredump/coredump.py | 1191 ++++----- coredump/criu_coredump/elf.py | 1015 ++++---- lib/py/cli.py | 552 +++-- lib/py/criu.py | 409 ++-- lib/py/images/images.py | 813 +++--- lib/py/images/pb2dict.py | 576 +++-- scripts/crit-setup.py | 19 +- scripts/magic-gen.py | 88 +- soccr/test/run.py | 20 +- test/check_actions.py | 41 +- test/crit-recode.py | 100 +- test/exhaustive/pipe.py | 384 +-- test/exhaustive/unix.py | 1167 ++++----- test/inhfd/fifo.py | 46 +- test/inhfd/pipe.py | 14 +- test/inhfd/socket.py | 14 +- test/inhfd/tty.py | 35 +- test/others/ext-tty/run.py | 27 +- test/others/mounts/mounts.py | 47 +- test/others/rpc/config_file.py | 247 +- test/others/rpc/errno.py | 182 +- test/others/rpc/ps_test.py | 76 +- test/others/rpc/read.py | 2 +- test/others/rpc/restore-loop.py | 37 +- test/others/rpc/test.py | 75 +- test/others/rpc/version.py | 34 +- test/others/shell-job/run.py | 13 +- test/zdtm.py | 3681 +++++++++++++++------------- 28 files changed, 5738 insertions(+), 5167 deletions(-) diff --git a/coredump/criu_coredump/coredump.py b/coredump/criu_coredump/coredump.py index 2b0c37f1a..9b2c6c60c 100644 --- a/coredump/criu_coredump/coredump.py +++ b/coredump/criu_coredump/coredump.py @@ -36,795 +36,802 @@ from pycriu import images # Some memory-related constants PAGESIZE = 4096 status = { - "VMA_AREA_NONE" : 0 << 0, - "VMA_AREA_REGULAR" : 1 << 0, - "VMA_AREA_STACK" : 1 << 1, - "VMA_AREA_VSYSCALL" : 1 << 2, - "VMA_AREA_VDSO" : 1 << 3, - "VMA_FORCE_READ" : 1 << 4, - "VMA_AREA_HEAP" : 1 << 5, - "VMA_FILE_PRIVATE" : 1 << 6, - "VMA_FILE_SHARED" : 1 << 7, - "VMA_ANON_SHARED" : 1 << 8, - "VMA_ANON_PRIVATE" : 1 << 9, - "VMA_AREA_SYSVIPC" : 1 << 10, - "VMA_AREA_SOCKET" : 1 << 11, - "VMA_AREA_VVAR" : 1 << 12, - "VMA_AREA_AIORING" : 1 << 13, - "VMA_AREA_UNSUPP" : 1 << 31 + "VMA_AREA_NONE": 0 << 0, + "VMA_AREA_REGULAR": 1 << 0, + "VMA_AREA_STACK": 1 << 1, + "VMA_AREA_VSYSCALL": 1 << 2, + "VMA_AREA_VDSO": 1 << 3, + "VMA_FORCE_READ": 1 << 4, + "VMA_AREA_HEAP": 1 << 5, + "VMA_FILE_PRIVATE": 1 << 6, + "VMA_FILE_SHARED": 1 << 7, + "VMA_ANON_SHARED": 1 << 8, + "VMA_ANON_PRIVATE": 1 << 9, + "VMA_AREA_SYSVIPC": 1 << 10, + "VMA_AREA_SOCKET": 1 << 11, + "VMA_AREA_VVAR": 1 << 12, + "VMA_AREA_AIORING": 1 << 13, + "VMA_AREA_UNSUPP": 1 << 31 } -prot = { - "PROT_READ" : 0x1, - "PROT_WRITE" : 0x2, - "PROT_EXEC" : 0x4 -} +prot = {"PROT_READ": 0x1, "PROT_WRITE": 0x2, "PROT_EXEC": 0x4} + class elf_note: - nhdr = None # Elf_Nhdr; - owner = None # i.e. CORE or LINUX; - data = None # Ctypes structure with note data; + nhdr = None # Elf_Nhdr; + owner = None # i.e. CORE or LINUX; + data = None # Ctypes structure with note data; class coredump: - """ + """ A class to keep elf core dump components inside and functions to properly write them to file. """ - ehdr = None # Elf ehdr; - phdrs = [] # Array of Phdrs; - notes = [] # Array of elf_notes; - vmas = [] # Array of BytesIO with memory content; - # FIXME keeping all vmas in memory is a bad idea; + ehdr = None # Elf ehdr; + phdrs = [] # Array of Phdrs; + notes = [] # Array of elf_notes; + vmas = [] # Array of BytesIO with memory content; - def write(self, f): - """ + # FIXME keeping all vmas in memory is a bad idea; + + def write(self, f): + """ Write core dump to file f. """ - buf = io.BytesIO() - buf.write(self.ehdr) + buf = io.BytesIO() + buf.write(self.ehdr) - for phdr in self.phdrs: - buf.write(phdr) + for phdr in self.phdrs: + buf.write(phdr) - for note in self.notes: - buf.write(note.nhdr) - buf.write(note.owner) - buf.write("\0"*(8-len(note.owner))) - buf.write(note.data) + for note in self.notes: + buf.write(note.nhdr) + buf.write(note.owner) + buf.write("\0" * (8 - len(note.owner))) + buf.write(note.data) - offset = ctypes.sizeof(elf.Elf64_Ehdr()) - offset += (len(self.vmas) + 1)*ctypes.sizeof(elf.Elf64_Phdr()) + offset = ctypes.sizeof(elf.Elf64_Ehdr()) + offset += (len(self.vmas) + 1) * ctypes.sizeof(elf.Elf64_Phdr()) - filesz = 0 - for note in self.notes: - filesz += ctypes.sizeof(note.nhdr) + ctypes.sizeof(note.data) + 8 + filesz = 0 + for note in self.notes: + filesz += ctypes.sizeof(note.nhdr) + ctypes.sizeof(note.data) + 8 - note_align = PAGESIZE - ((offset + filesz) % PAGESIZE) + note_align = PAGESIZE - ((offset + filesz) % PAGESIZE) - if note_align == PAGESIZE: - note_align = 0 + if note_align == PAGESIZE: + note_align = 0 - if note_align != 0: - scratch = (ctypes.c_char * note_align)() - ctypes.memset(ctypes.addressof(scratch), 0, ctypes.sizeof(scratch)) - buf.write(scratch) + if note_align != 0: + scratch = (ctypes.c_char * note_align)() + ctypes.memset(ctypes.addressof(scratch), 0, ctypes.sizeof(scratch)) + buf.write(scratch) - for vma in self.vmas: - buf.write(vma.data) + for vma in self.vmas: + buf.write(vma.data) - buf.seek(0) - f.write(buf.read()) + buf.seek(0) + f.write(buf.read()) class coredump_generator: - """ + """ Generate core dump from criu images. """ - coredumps = {} # coredumps by pid; + coredumps = {} # coredumps by pid; - pstree = {} # process info by pid; - cores = {} # cores by pid; - mms = {} # mm by pid; - reg_files = None # reg-files; - pagemaps = {} # pagemap by pid; + pstree = {} # process info by pid; + cores = {} # cores by pid; + mms = {} # mm by pid; + reg_files = None # reg-files; + pagemaps = {} # pagemap by pid; - def _img_open_and_strip(self, name, single = False, pid = None): - """ + def _img_open_and_strip(self, name, single=False, pid=None): + """ Load criu image and strip it from magic and redundant list. """ - path = self._imgs_dir + "/" + name - if pid: - path += "-"+str(pid) - path += ".img" + path = self._imgs_dir + "/" + name + if pid: + path += "-" + str(pid) + path += ".img" - with open(path) as f: - img = images.load(f) + with open(path) as f: + img = images.load(f) - if single: - return img["entries"][0] - else: - return img["entries"] + if single: + return img["entries"][0] + else: + return img["entries"] - - def __call__(self, imgs_dir): - """ + def __call__(self, imgs_dir): + """ Parse criu images stored in directory imgs_dir to fill core dumps. """ - self._imgs_dir = imgs_dir - pstree = self._img_open_and_strip("pstree") + self._imgs_dir = imgs_dir + pstree = self._img_open_and_strip("pstree") - for p in pstree: - pid = p['pid'] + for p in pstree: + pid = p['pid'] - self.pstree[pid] = p - for tid in p['threads']: - self.cores[tid] = self._img_open_and_strip("core", True, tid) - self.mms[pid] = self._img_open_and_strip("mm", True, pid) - self.pagemaps[pid] = self._img_open_and_strip("pagemap", False, pid) + self.pstree[pid] = p + for tid in p['threads']: + self.cores[tid] = self._img_open_and_strip("core", True, tid) + self.mms[pid] = self._img_open_and_strip("mm", True, pid) + self.pagemaps[pid] = self._img_open_and_strip( + "pagemap", False, pid) - files = self._img_open_and_strip("files", False) - self.reg_files = [ x["reg"] for x in files if x["type"]=="REG" ] + files = self._img_open_and_strip("files", False) + self.reg_files = [x["reg"] for x in files if x["type"] == "REG"] - for pid in self.pstree: - self.coredumps[pid] = self._gen_coredump(pid) + for pid in self.pstree: + self.coredumps[pid] = self._gen_coredump(pid) - return self.coredumps + return self.coredumps - - def write(self, coredumps_dir, pid = None): - """ + def write(self, coredumps_dir, pid=None): + """ Write core dumpt to cores_dir directory. Specify pid to choose core dump of only one process. """ - for p in self.coredumps: - if pid and p != pid: - continue - with open(coredumps_dir+"/"+"core."+str(p), 'w+') as f: - self.coredumps[p].write(f) + for p in self.coredumps: + if pid and p != pid: + continue + with open(coredumps_dir + "/" + "core." + str(p), 'w+') as f: + self.coredumps[p].write(f) - def _gen_coredump(self, pid): - """ + def _gen_coredump(self, pid): + """ Generate core dump for pid. """ - cd = coredump() + cd = coredump() - # Generate everything backwards so it is easier to calculate offset. - cd.vmas = self._gen_vmas(pid) - cd.notes = self._gen_notes(pid) - cd.phdrs = self._gen_phdrs(pid, cd.notes, cd.vmas) - cd.ehdr = self._gen_ehdr(pid, cd.phdrs) + # Generate everything backwards so it is easier to calculate offset. + cd.vmas = self._gen_vmas(pid) + cd.notes = self._gen_notes(pid) + cd.phdrs = self._gen_phdrs(pid, cd.notes, cd.vmas) + cd.ehdr = self._gen_ehdr(pid, cd.phdrs) - return cd + return cd - def _gen_ehdr(self, pid, phdrs): - """ + def _gen_ehdr(self, pid, phdrs): + """ Generate elf header for process pid with program headers phdrs. """ - ehdr = elf.Elf64_Ehdr() + ehdr = elf.Elf64_Ehdr() - ctypes.memset(ctypes.addressof(ehdr), 0, ctypes.sizeof(ehdr)) - ehdr.e_ident[elf.EI_MAG0] = elf.ELFMAG0 - ehdr.e_ident[elf.EI_MAG1] = elf.ELFMAG1 - ehdr.e_ident[elf.EI_MAG2] = elf.ELFMAG2 - ehdr.e_ident[elf.EI_MAG3] = elf.ELFMAG3 - ehdr.e_ident[elf.EI_CLASS] = elf.ELFCLASS64 - ehdr.e_ident[elf.EI_DATA] = elf.ELFDATA2LSB - ehdr.e_ident[elf.EI_VERSION] = elf.EV_CURRENT + ctypes.memset(ctypes.addressof(ehdr), 0, ctypes.sizeof(ehdr)) + ehdr.e_ident[elf.EI_MAG0] = elf.ELFMAG0 + ehdr.e_ident[elf.EI_MAG1] = elf.ELFMAG1 + ehdr.e_ident[elf.EI_MAG2] = elf.ELFMAG2 + ehdr.e_ident[elf.EI_MAG3] = elf.ELFMAG3 + ehdr.e_ident[elf.EI_CLASS] = elf.ELFCLASS64 + ehdr.e_ident[elf.EI_DATA] = elf.ELFDATA2LSB + ehdr.e_ident[elf.EI_VERSION] = elf.EV_CURRENT - ehdr.e_type = elf.ET_CORE - ehdr.e_machine = elf.EM_X86_64 - ehdr.e_version = elf.EV_CURRENT - ehdr.e_phoff = ctypes.sizeof(elf.Elf64_Ehdr()) - ehdr.e_ehsize = ctypes.sizeof(elf.Elf64_Ehdr()) - ehdr.e_phentsize = ctypes.sizeof(elf.Elf64_Phdr()) - #FIXME Case len(phdrs) > PN_XNUM should be handled properly. - # See fs/binfmt_elf.c from linux kernel. - ehdr.e_phnum = len(phdrs) + ehdr.e_type = elf.ET_CORE + ehdr.e_machine = elf.EM_X86_64 + ehdr.e_version = elf.EV_CURRENT + ehdr.e_phoff = ctypes.sizeof(elf.Elf64_Ehdr()) + ehdr.e_ehsize = ctypes.sizeof(elf.Elf64_Ehdr()) + ehdr.e_phentsize = ctypes.sizeof(elf.Elf64_Phdr()) + #FIXME Case len(phdrs) > PN_XNUM should be handled properly. + # See fs/binfmt_elf.c from linux kernel. + ehdr.e_phnum = len(phdrs) - return ehdr + return ehdr - def _gen_phdrs(self, pid, notes, vmas): - """ + def _gen_phdrs(self, pid, notes, vmas): + """ Generate program headers for process pid. """ - phdrs = [] + phdrs = [] - offset = ctypes.sizeof(elf.Elf64_Ehdr()) - offset += (len(vmas) + 1)*ctypes.sizeof(elf.Elf64_Phdr()) + offset = ctypes.sizeof(elf.Elf64_Ehdr()) + offset += (len(vmas) + 1) * ctypes.sizeof(elf.Elf64_Phdr()) - filesz = 0 - for note in notes: - filesz += ctypes.sizeof(note.nhdr) + ctypes.sizeof(note.data) + 8 + filesz = 0 + for note in notes: + filesz += ctypes.sizeof(note.nhdr) + ctypes.sizeof(note.data) + 8 - # PT_NOTE - phdr = elf.Elf64_Phdr() - ctypes.memset(ctypes.addressof(phdr), 0, ctypes.sizeof(phdr)) - phdr.p_type = elf.PT_NOTE - phdr.p_offset = offset - phdr.p_filesz = filesz + # PT_NOTE + phdr = elf.Elf64_Phdr() + ctypes.memset(ctypes.addressof(phdr), 0, ctypes.sizeof(phdr)) + phdr.p_type = elf.PT_NOTE + phdr.p_offset = offset + phdr.p_filesz = filesz - phdrs.append(phdr) + phdrs.append(phdr) - note_align = PAGESIZE - ((offset + filesz) % PAGESIZE) + note_align = PAGESIZE - ((offset + filesz) % PAGESIZE) - if note_align == PAGESIZE: - note_align = 0 + if note_align == PAGESIZE: + note_align = 0 - offset += note_align + offset += note_align - # VMA phdrs + # VMA phdrs - for vma in vmas: - offset += filesz - filesz = vma.filesz - phdr = elf.Elf64_Phdr() - ctypes.memset(ctypes.addressof(phdr), 0, ctypes.sizeof(phdr)) - phdr.p_type = elf.PT_LOAD - phdr.p_align = PAGESIZE - phdr.p_paddr = 0 - phdr.p_offset = offset - phdr.p_vaddr = vma.start - phdr.p_memsz = vma.memsz - phdr.p_filesz = vma.filesz - phdr.p_flags = vma.flags + for vma in vmas: + offset += filesz + filesz = vma.filesz + phdr = elf.Elf64_Phdr() + ctypes.memset(ctypes.addressof(phdr), 0, ctypes.sizeof(phdr)) + phdr.p_type = elf.PT_LOAD + phdr.p_align = PAGESIZE + phdr.p_paddr = 0 + phdr.p_offset = offset + phdr.p_vaddr = vma.start + phdr.p_memsz = vma.memsz + phdr.p_filesz = vma.filesz + phdr.p_flags = vma.flags - phdrs.append(phdr) + phdrs.append(phdr) - return phdrs + return phdrs - def _gen_prpsinfo(self, pid): - """ + def _gen_prpsinfo(self, pid): + """ Generate NT_PRPSINFO note for process pid. """ - pstree = self.pstree[pid] - core = self.cores[pid] + pstree = self.pstree[pid] + core = self.cores[pid] - prpsinfo = elf.elf_prpsinfo() - ctypes.memset(ctypes.addressof(prpsinfo), 0, ctypes.sizeof(prpsinfo)) + prpsinfo = elf.elf_prpsinfo() + ctypes.memset(ctypes.addressof(prpsinfo), 0, ctypes.sizeof(prpsinfo)) - # FIXME TASK_ALIVE means that it is either running or sleeping, need to - # teach criu to distinguish them. - TASK_ALIVE = 0x1 - # XXX A bit of confusion here, as in ps "dead" and "zombie" - # state are two separate states, and we use TASK_DEAD for zombies. - TASK_DEAD = 0x2 - TASK_STOPPED = 0x3 - if core["tc"]["task_state"] == TASK_ALIVE: - prpsinfo.pr_state = 0 - if core["tc"]["task_state"] == TASK_DEAD: - prpsinfo.pr_state = 4 - if core["tc"]["task_state"] == TASK_STOPPED: - prpsinfo.pr_state = 3 - # Don't even ask me why it is so, just borrowed from linux - # source and made pr_state match. - prpsinfo.pr_sname = '.' if prpsinfo.pr_state > 5 else "RSDTZW"[prpsinfo.pr_state] - prpsinfo.pr_zomb = 1 if prpsinfo.pr_state == 4 else 0 - prpsinfo.pr_nice = core["thread_core"]["sched_prio"] if "sched_prio" in core["thread_core"] else 0 - prpsinfo.pr_flag = core["tc"]["flags"] - prpsinfo.pr_uid = core["thread_core"]["creds"]["uid"] - prpsinfo.pr_gid = core["thread_core"]["creds"]["gid"] - prpsinfo.pr_pid = pid - prpsinfo.pr_ppid = pstree["ppid"] - prpsinfo.pr_pgrp = pstree["pgid"] - prpsinfo.pr_sid = pstree["sid"] - prpsinfo.pr_fname = core["tc"]["comm"] - prpsinfo.pr_psargs = self._gen_cmdline(pid) + # FIXME TASK_ALIVE means that it is either running or sleeping, need to + # teach criu to distinguish them. + TASK_ALIVE = 0x1 + # XXX A bit of confusion here, as in ps "dead" and "zombie" + # state are two separate states, and we use TASK_DEAD for zombies. + TASK_DEAD = 0x2 + TASK_STOPPED = 0x3 + if core["tc"]["task_state"] == TASK_ALIVE: + prpsinfo.pr_state = 0 + if core["tc"]["task_state"] == TASK_DEAD: + prpsinfo.pr_state = 4 + if core["tc"]["task_state"] == TASK_STOPPED: + prpsinfo.pr_state = 3 + # Don't even ask me why it is so, just borrowed from linux + # source and made pr_state match. + prpsinfo.pr_sname = '.' if prpsinfo.pr_state > 5 else "RSDTZW" [ + prpsinfo.pr_state] + prpsinfo.pr_zomb = 1 if prpsinfo.pr_state == 4 else 0 + prpsinfo.pr_nice = core["thread_core"][ + "sched_prio"] if "sched_prio" in core["thread_core"] else 0 + prpsinfo.pr_flag = core["tc"]["flags"] + prpsinfo.pr_uid = core["thread_core"]["creds"]["uid"] + prpsinfo.pr_gid = core["thread_core"]["creds"]["gid"] + prpsinfo.pr_pid = pid + prpsinfo.pr_ppid = pstree["ppid"] + prpsinfo.pr_pgrp = pstree["pgid"] + prpsinfo.pr_sid = pstree["sid"] + prpsinfo.pr_fname = core["tc"]["comm"] + prpsinfo.pr_psargs = self._gen_cmdline(pid) - nhdr = elf.Elf64_Nhdr() - nhdr.n_namesz = 5 - nhdr.n_descsz = ctypes.sizeof(elf.elf_prpsinfo()) - nhdr.n_type = elf.NT_PRPSINFO + nhdr = elf.Elf64_Nhdr() + nhdr.n_namesz = 5 + nhdr.n_descsz = ctypes.sizeof(elf.elf_prpsinfo()) + nhdr.n_type = elf.NT_PRPSINFO - note = elf_note() - note.data = prpsinfo - note.owner = "CORE" - note.nhdr = nhdr + note = elf_note() + note.data = prpsinfo + note.owner = "CORE" + note.nhdr = nhdr - return note + return note - def _gen_prstatus(self, pid, tid): - """ + def _gen_prstatus(self, pid, tid): + """ Generate NT_PRSTATUS note for thread tid of process pid. """ - core = self.cores[tid] - regs = core["thread_info"]["gpregs"] - pstree = self.pstree[pid] + core = self.cores[tid] + regs = core["thread_info"]["gpregs"] + pstree = self.pstree[pid] - prstatus = elf.elf_prstatus() + prstatus = elf.elf_prstatus() - ctypes.memset(ctypes.addressof(prstatus), 0, ctypes.sizeof(prstatus)) + ctypes.memset(ctypes.addressof(prstatus), 0, ctypes.sizeof(prstatus)) - #FIXME setting only some of the fields for now. Revisit later. - prstatus.pr_pid = tid - prstatus.pr_ppid = pstree["ppid"] - prstatus.pr_pgrp = pstree["pgid"] - prstatus.pr_sid = pstree["sid"] + #FIXME setting only some of the fields for now. Revisit later. + prstatus.pr_pid = tid + prstatus.pr_ppid = pstree["ppid"] + prstatus.pr_pgrp = pstree["pgid"] + prstatus.pr_sid = pstree["sid"] - prstatus.pr_reg.r15 = regs["r15"] - prstatus.pr_reg.r14 = regs["r14"] - prstatus.pr_reg.r13 = regs["r13"] - prstatus.pr_reg.r12 = regs["r12"] - prstatus.pr_reg.rbp = regs["bp"] - prstatus.pr_reg.rbx = regs["bx"] - prstatus.pr_reg.r11 = regs["r11"] - prstatus.pr_reg.r10 = regs["r10"] - prstatus.pr_reg.r9 = regs["r9"] - prstatus.pr_reg.r8 = regs["r8"] - prstatus.pr_reg.rax = regs["ax"] - prstatus.pr_reg.rcx = regs["cx"] - prstatus.pr_reg.rdx = regs["dx"] - prstatus.pr_reg.rsi = regs["si"] - prstatus.pr_reg.rdi = regs["di"] - prstatus.pr_reg.orig_rax = regs["orig_ax"] - prstatus.pr_reg.rip = regs["ip"] - prstatus.pr_reg.cs = regs["cs"] - prstatus.pr_reg.eflags = regs["flags"] - prstatus.pr_reg.rsp = regs["sp"] - prstatus.pr_reg.ss = regs["ss"] - prstatus.pr_reg.fs_base = regs["fs_base"] - prstatus.pr_reg.gs_base = regs["gs_base"] - prstatus.pr_reg.ds = regs["ds"] - prstatus.pr_reg.es = regs["es"] - prstatus.pr_reg.fs = regs["fs"] - prstatus.pr_reg.gs = regs["gs"] + prstatus.pr_reg.r15 = regs["r15"] + prstatus.pr_reg.r14 = regs["r14"] + prstatus.pr_reg.r13 = regs["r13"] + prstatus.pr_reg.r12 = regs["r12"] + prstatus.pr_reg.rbp = regs["bp"] + prstatus.pr_reg.rbx = regs["bx"] + prstatus.pr_reg.r11 = regs["r11"] + prstatus.pr_reg.r10 = regs["r10"] + prstatus.pr_reg.r9 = regs["r9"] + prstatus.pr_reg.r8 = regs["r8"] + prstatus.pr_reg.rax = regs["ax"] + prstatus.pr_reg.rcx = regs["cx"] + prstatus.pr_reg.rdx = regs["dx"] + prstatus.pr_reg.rsi = regs["si"] + prstatus.pr_reg.rdi = regs["di"] + prstatus.pr_reg.orig_rax = regs["orig_ax"] + prstatus.pr_reg.rip = regs["ip"] + prstatus.pr_reg.cs = regs["cs"] + prstatus.pr_reg.eflags = regs["flags"] + prstatus.pr_reg.rsp = regs["sp"] + prstatus.pr_reg.ss = regs["ss"] + prstatus.pr_reg.fs_base = regs["fs_base"] + prstatus.pr_reg.gs_base = regs["gs_base"] + prstatus.pr_reg.ds = regs["ds"] + prstatus.pr_reg.es = regs["es"] + prstatus.pr_reg.fs = regs["fs"] + prstatus.pr_reg.gs = regs["gs"] - nhdr = elf.Elf64_Nhdr() - nhdr.n_namesz = 5 - nhdr.n_descsz = ctypes.sizeof(elf.elf_prstatus()) - nhdr.n_type = elf.NT_PRSTATUS + nhdr = elf.Elf64_Nhdr() + nhdr.n_namesz = 5 + nhdr.n_descsz = ctypes.sizeof(elf.elf_prstatus()) + nhdr.n_type = elf.NT_PRSTATUS - note = elf_note() - note.data = prstatus - note.owner = "CORE" - note.nhdr = nhdr + note = elf_note() + note.data = prstatus + note.owner = "CORE" + note.nhdr = nhdr - return note + return note - def _gen_fpregset(self, pid, tid): - """ + def _gen_fpregset(self, pid, tid): + """ Generate NT_FPREGSET note for thread tid of process pid. """ - core = self.cores[tid] - regs = core["thread_info"]["fpregs"] + core = self.cores[tid] + regs = core["thread_info"]["fpregs"] - fpregset = elf.elf_fpregset_t() - ctypes.memset(ctypes.addressof(fpregset), 0, ctypes.sizeof(fpregset)) + fpregset = elf.elf_fpregset_t() + ctypes.memset(ctypes.addressof(fpregset), 0, ctypes.sizeof(fpregset)) - fpregset.cwd = regs["cwd"] - fpregset.swd = regs["swd"] - fpregset.ftw = regs["twd"] - fpregset.fop = regs["fop"] - fpregset.rip = regs["rip"] - fpregset.rdp = regs["rdp"] - fpregset.mxcsr = regs["mxcsr"] - fpregset.mxcr_mask = regs["mxcsr_mask"] - fpregset.st_space = (ctypes.c_uint * len(regs["st_space"]))(*regs["st_space"]) - fpregset.xmm_space = (ctypes.c_uint * len(regs["xmm_space"]))(*regs["xmm_space"]) - #fpregset.padding = regs["padding"] unused + fpregset.cwd = regs["cwd"] + fpregset.swd = regs["swd"] + fpregset.ftw = regs["twd"] + fpregset.fop = regs["fop"] + fpregset.rip = regs["rip"] + fpregset.rdp = regs["rdp"] + fpregset.mxcsr = regs["mxcsr"] + fpregset.mxcr_mask = regs["mxcsr_mask"] + fpregset.st_space = (ctypes.c_uint * len(regs["st_space"]))( + *regs["st_space"]) + fpregset.xmm_space = (ctypes.c_uint * len(regs["xmm_space"]))( + *regs["xmm_space"]) + #fpregset.padding = regs["padding"] unused - nhdr = elf.Elf64_Nhdr() - nhdr.n_namesz = 5 - nhdr.n_descsz = ctypes.sizeof(elf.elf_fpregset_t()) - nhdr.n_type = elf.NT_FPREGSET + nhdr = elf.Elf64_Nhdr() + nhdr.n_namesz = 5 + nhdr.n_descsz = ctypes.sizeof(elf.elf_fpregset_t()) + nhdr.n_type = elf.NT_FPREGSET - note = elf_note() - note.data = fpregset - note.owner = "CORE" - note.nhdr = nhdr + note = elf_note() + note.data = fpregset + note.owner = "CORE" + note.nhdr = nhdr - return note + return note - def _gen_x86_xstate(self, pid, tid): - """ + def _gen_x86_xstate(self, pid, tid): + """ Generate NT_X86_XSTATE note for thread tid of process pid. """ - core = self.cores[tid] - fpregs = core["thread_info"]["fpregs"] + core = self.cores[tid] + fpregs = core["thread_info"]["fpregs"] - data = elf.elf_xsave_struct() - ctypes.memset(ctypes.addressof(data), 0, ctypes.sizeof(data)) + data = elf.elf_xsave_struct() + ctypes.memset(ctypes.addressof(data), 0, ctypes.sizeof(data)) - data.i387.cwd = fpregs["cwd"] - data.i387.swd = fpregs["swd"] - data.i387.twd = fpregs["twd"] - data.i387.fop = fpregs["fop"] - data.i387.rip = fpregs["rip"] - data.i387.rdp = fpregs["rdp"] - data.i387.mxcsr = fpregs["mxcsr"] - data.i387.mxcsr_mask = fpregs["mxcsr_mask"] - data.i387.st_space = (ctypes.c_uint * len(fpregs["st_space"]))(*fpregs["st_space"]) - data.i387.xmm_space = (ctypes.c_uint * len(fpregs["xmm_space"]))(*fpregs["xmm_space"]) + data.i387.cwd = fpregs["cwd"] + data.i387.swd = fpregs["swd"] + data.i387.twd = fpregs["twd"] + data.i387.fop = fpregs["fop"] + data.i387.rip = fpregs["rip"] + data.i387.rdp = fpregs["rdp"] + data.i387.mxcsr = fpregs["mxcsr"] + data.i387.mxcsr_mask = fpregs["mxcsr_mask"] + data.i387.st_space = (ctypes.c_uint * len(fpregs["st_space"]))( + *fpregs["st_space"]) + data.i387.xmm_space = (ctypes.c_uint * len(fpregs["xmm_space"]))( + *fpregs["xmm_space"]) - if "xsave" in fpregs: - data.xsave_hdr.xstate_bv = fpregs["xsave"]["xstate_bv"] - data.ymmh.ymmh_space = (ctypes.c_uint * len(fpregs["xsave"]["ymmh_space"]))(*fpregs["xsave"]["ymmh_space"]) + if "xsave" in fpregs: + data.xsave_hdr.xstate_bv = fpregs["xsave"]["xstate_bv"] + data.ymmh.ymmh_space = (ctypes.c_uint * + len(fpregs["xsave"]["ymmh_space"]))( + *fpregs["xsave"]["ymmh_space"]) - nhdr = elf.Elf64_Nhdr() - nhdr.n_namesz = 6 - nhdr.n_descsz = ctypes.sizeof(data) - nhdr.n_type = elf.NT_X86_XSTATE + nhdr = elf.Elf64_Nhdr() + nhdr.n_namesz = 6 + nhdr.n_descsz = ctypes.sizeof(data) + nhdr.n_type = elf.NT_X86_XSTATE - note = elf_note() - note.data = data - note.owner = "LINUX" - note.nhdr = nhdr + note = elf_note() + note.data = data + note.owner = "LINUX" + note.nhdr = nhdr - return note + return note - def _gen_siginfo(self, pid, tid): - """ + def _gen_siginfo(self, pid, tid): + """ Generate NT_SIGINFO note for thread tid of process pid. """ - siginfo = elf.siginfo_t() - # FIXME zeroify everything for now - ctypes.memset(ctypes.addressof(siginfo), 0, ctypes.sizeof(siginfo)) + siginfo = elf.siginfo_t() + # FIXME zeroify everything for now + ctypes.memset(ctypes.addressof(siginfo), 0, ctypes.sizeof(siginfo)) - nhdr = elf.Elf64_Nhdr() - nhdr.n_namesz = 5 - nhdr.n_descsz = ctypes.sizeof(elf.siginfo_t()) - nhdr.n_type = elf.NT_SIGINFO + nhdr = elf.Elf64_Nhdr() + nhdr.n_namesz = 5 + nhdr.n_descsz = ctypes.sizeof(elf.siginfo_t()) + nhdr.n_type = elf.NT_SIGINFO - note = elf_note() - note.data = siginfo - note.owner = "CORE" - note.nhdr = nhdr + note = elf_note() + note.data = siginfo + note.owner = "CORE" + note.nhdr = nhdr - return note + return note - def _gen_auxv(self, pid): - """ + def _gen_auxv(self, pid): + """ Generate NT_AUXV note for thread tid of process pid. """ - mm = self.mms[pid] - num_auxv = len(mm["mm_saved_auxv"])/2 + mm = self.mms[pid] + num_auxv = len(mm["mm_saved_auxv"]) / 2 - class elf_auxv(ctypes.Structure): - _fields_ = [("auxv", elf.Elf64_auxv_t*num_auxv)] + class elf_auxv(ctypes.Structure): + _fields_ = [("auxv", elf.Elf64_auxv_t * num_auxv)] - auxv = elf_auxv() - for i in range(num_auxv): - auxv.auxv[i].a_type = mm["mm_saved_auxv"][i] - auxv.auxv[i].a_val = mm["mm_saved_auxv"][i+1] + auxv = elf_auxv() + for i in range(num_auxv): + auxv.auxv[i].a_type = mm["mm_saved_auxv"][i] + auxv.auxv[i].a_val = mm["mm_saved_auxv"][i + 1] - nhdr = elf.Elf64_Nhdr() - nhdr.n_namesz = 5 - nhdr.n_descsz = ctypes.sizeof(elf_auxv()) - nhdr.n_type = elf.NT_AUXV + nhdr = elf.Elf64_Nhdr() + nhdr.n_namesz = 5 + nhdr.n_descsz = ctypes.sizeof(elf_auxv()) + nhdr.n_type = elf.NT_AUXV - note = elf_note() - note.data = auxv - note.owner = "CORE" - note.nhdr = nhdr + note = elf_note() + note.data = auxv + note.owner = "CORE" + note.nhdr = nhdr - return note + return note - def _gen_files(self, pid): - """ + def _gen_files(self, pid): + """ Generate NT_FILE note for process pid. """ - mm = self.mms[pid] + mm = self.mms[pid] - class mmaped_file_info: - start = None - end = None - file_ofs = None - name = None + class mmaped_file_info: + start = None + end = None + file_ofs = None + name = None - infos = [] - for vma in mm["vmas"]: - if vma["shmid"] == 0: - # shmid == 0 means that it is not a file - continue + infos = [] + for vma in mm["vmas"]: + if vma["shmid"] == 0: + # shmid == 0 means that it is not a file + continue - shmid = vma["shmid"] - size = vma["end"] - vma["start"] - off = vma["pgoff"]/PAGESIZE + shmid = vma["shmid"] + size = vma["end"] - vma["start"] + off = vma["pgoff"] / PAGESIZE - files = self.reg_files - fname = filter(lambda x: x["id"] == shmid, files)[0]["name"] + files = self.reg_files + fname = filter(lambda x: x["id"] == shmid, files)[0]["name"] - info = mmaped_file_info() - info.start = vma["start"] - info.end = vma["end"] - info.file_ofs = off - info.name = fname + info = mmaped_file_info() + info.start = vma["start"] + info.end = vma["end"] + info.file_ofs = off + info.name = fname - infos.append(info) + infos.append(info) - # /* - # * Format of NT_FILE note: - # * - # * long count -- how many files are mapped - # * long page_size -- units for file_ofs - # * array of [COUNT] elements of - # * long start - # * long end - # * long file_ofs - # * followed by COUNT filenames in ASCII: "FILE1" NUL "FILE2" NUL... - # */ - fields = [] - fields.append(("count", ctypes.c_long)) - fields.append(("page_size", ctypes.c_long)) - for i in range(len(infos)): - fields.append(("start"+str(i), ctypes.c_long)) - fields.append(("end"+str(i), ctypes.c_long)) - fields.append(("file_ofs"+str(i), ctypes.c_long)) - for i in range(len(infos)): - fields.append(("name"+str(i), ctypes.c_char*(len(infos[i].name)+1))) + # /* + # * Format of NT_FILE note: + # * + # * long count -- how many files are mapped + # * long page_size -- units for file_ofs + # * array of [COUNT] elements of + # * long start + # * long end + # * long file_ofs + # * followed by COUNT filenames in ASCII: "FILE1" NUL "FILE2" NUL... + # */ + fields = [] + fields.append(("count", ctypes.c_long)) + fields.append(("page_size", ctypes.c_long)) + for i in range(len(infos)): + fields.append(("start" + str(i), ctypes.c_long)) + fields.append(("end" + str(i), ctypes.c_long)) + fields.append(("file_ofs" + str(i), ctypes.c_long)) + for i in range(len(infos)): + fields.append( + ("name" + str(i), ctypes.c_char * (len(infos[i].name) + 1))) - class elf_files(ctypes.Structure): - _fields_ = fields + class elf_files(ctypes.Structure): + _fields_ = fields - data = elf_files() - data.count = len(infos) - data.page_size = PAGESIZE - for i in range(len(infos)): - info = infos[i] - setattr(data, "start"+str(i), info.start) - setattr(data, "end"+str(i), info.end) - setattr(data, "file_ofs"+str(i), info.file_ofs) - setattr(data, "name"+str(i), info.name) + data = elf_files() + data.count = len(infos) + data.page_size = PAGESIZE + for i in range(len(infos)): + info = infos[i] + setattr(data, "start" + str(i), info.start) + setattr(data, "end" + str(i), info.end) + setattr(data, "file_ofs" + str(i), info.file_ofs) + setattr(data, "name" + str(i), info.name) - nhdr = elf.Elf64_Nhdr() + nhdr = elf.Elf64_Nhdr() - nhdr.n_namesz = 5#XXX strlen + 1 - nhdr.n_descsz = ctypes.sizeof(elf_files()) - nhdr.n_type = elf.NT_FILE + nhdr.n_namesz = 5 #XXX strlen + 1 + nhdr.n_descsz = ctypes.sizeof(elf_files()) + nhdr.n_type = elf.NT_FILE - note = elf_note() - note.nhdr = nhdr - note.owner = "CORE" - note.data = data + note = elf_note() + note.nhdr = nhdr + note.owner = "CORE" + note.data = data - return note + return note - def _gen_thread_notes(self, pid, tid): - notes = [] + def _gen_thread_notes(self, pid, tid): + notes = [] - notes.append(self._gen_prstatus(pid, tid)) - notes.append(self._gen_fpregset(pid, tid)) - notes.append(self._gen_x86_xstate(pid, tid)) - notes.append(self._gen_siginfo(pid, tid)) + notes.append(self._gen_prstatus(pid, tid)) + notes.append(self._gen_fpregset(pid, tid)) + notes.append(self._gen_x86_xstate(pid, tid)) + notes.append(self._gen_siginfo(pid, tid)) - return notes + return notes - def _gen_notes(self, pid): - """ + def _gen_notes(self, pid): + """ Generate notes for core dump of process pid. """ - notes = [] + notes = [] - notes.append(self._gen_prpsinfo(pid)) + notes.append(self._gen_prpsinfo(pid)) - threads = self.pstree[pid]["threads"] + threads = self.pstree[pid]["threads"] - # Main thread first - notes += self._gen_thread_notes(pid, pid) + # Main thread first + notes += self._gen_thread_notes(pid, pid) - # Then other threads - for tid in threads: - if tid == pid: - continue + # Then other threads + for tid in threads: + if tid == pid: + continue - notes += self._gen_thread_notes(pid, tid) + notes += self._gen_thread_notes(pid, tid) - notes.append(self._gen_auxv(pid)) - notes.append(self._gen_files(pid)) + notes.append(self._gen_auxv(pid)) + notes.append(self._gen_files(pid)) - return notes + return notes - def _get_page(self, pid, page_no): - """ + def _get_page(self, pid, page_no): + """ Try to find memory page page_no in pages.img image for process pid. """ - pagemap = self.pagemaps[pid] + pagemap = self.pagemaps[pid] - # First entry is pagemap_head, we will need it later to open - # proper pages.img. - pages_id = pagemap[0]["pages_id"] - off = 0# in pages - for m in pagemap[1:]: - found = False - for i in range(m["nr_pages"]): - if m["vaddr"] + i*PAGESIZE == page_no*PAGESIZE: - found = True - break - off += 1 + # First entry is pagemap_head, we will need it later to open + # proper pages.img. + pages_id = pagemap[0]["pages_id"] + off = 0 # in pages + for m in pagemap[1:]: + found = False + for i in range(m["nr_pages"]): + if m["vaddr"] + i * PAGESIZE == page_no * PAGESIZE: + found = True + break + off += 1 - if not found: - continue + if not found: + continue - if "in_parent" in m and m["in_parent"] == True: - ppid = self.pstree[pid]["ppid"] - return self._get_page(ppid, page_no) - else: - with open(self._imgs_dir+"/"+"pages-"+str(pages_id)+".img") as f: - f.seek(off*PAGESIZE) - return f.read(PAGESIZE) + if "in_parent" in m and m["in_parent"] == True: + ppid = self.pstree[pid]["ppid"] + return self._get_page(ppid, page_no) + else: + with open(self._imgs_dir + "/" + "pages-" + str(pages_id) + + ".img") as f: + f.seek(off * PAGESIZE) + return f.read(PAGESIZE) - return None + return None - def _gen_mem_chunk(self, pid, vma, size): - """ + def _gen_mem_chunk(self, pid, vma, size): + """ Obtain vma contents for process pid. """ - f = None + f = None - if size == 0: - return "" + if size == 0: + return "" - if vma["status"] & status["VMA_AREA_VVAR"]: - #FIXME this is what gdb does, as vvar vma - # is not readable from userspace? - return "\0"*size - elif vma["status"] & status["VMA_AREA_VSYSCALL"]: - #FIXME need to dump it with criu or read from - # current process. - return "\0"*size + if vma["status"] & status["VMA_AREA_VVAR"]: + #FIXME this is what gdb does, as vvar vma + # is not readable from userspace? + return "\0" * size + elif vma["status"] & status["VMA_AREA_VSYSCALL"]: + #FIXME need to dump it with criu or read from + # current process. + return "\0" * size - if vma["status"] & status["VMA_FILE_SHARED"] or \ - vma["status"] & status["VMA_FILE_PRIVATE"]: - # Open file before iterating vma pages - shmid = vma["shmid"] - off = vma["pgoff"] + if vma["status"] & status["VMA_FILE_SHARED"] or \ + vma["status"] & status["VMA_FILE_PRIVATE"]: + # Open file before iterating vma pages + shmid = vma["shmid"] + off = vma["pgoff"] - files = self.reg_files - fname = filter(lambda x: x["id"] == shmid, files)[0]["name"] + files = self.reg_files + fname = filter(lambda x: x["id"] == shmid, files)[0]["name"] - f = open(fname) - f.seek(off) + f = open(fname) + f.seek(off) - start = vma["start"] - end = vma["start"] + size + start = vma["start"] + end = vma["start"] + size - # Split requested memory chunk into pages, so it could be - # pictured as: - # - # "----" -- part of page with memory outside of our vma; - # "XXXX" -- memory from our vma; - # - # Start page Pages in the middle End page - # [-----XXXXX]...[XXXXXXXXXX][XXXXXXXXXX]...[XXX-------] - # - # Each page could be found in pages.img or in a standalone - # file described by shmid field in vma entry and - # corresponding entry in reg-files.img. - # For VMA_FILE_PRIVATE vma, unchanged pages are taken from - # a file, and changed ones -- from pages.img. - # Finally, if no page is found neither in pages.img nor - # in file, hole in inserted -- a page filled with zeroes. - start_page = start/PAGESIZE - end_page = end/PAGESIZE + # Split requested memory chunk into pages, so it could be + # pictured as: + # + # "----" -- part of page with memory outside of our vma; + # "XXXX" -- memory from our vma; + # + # Start page Pages in the middle End page + # [-----XXXXX]...[XXXXXXXXXX][XXXXXXXXXX]...[XXX-------] + # + # Each page could be found in pages.img or in a standalone + # file described by shmid field in vma entry and + # corresponding entry in reg-files.img. + # For VMA_FILE_PRIVATE vma, unchanged pages are taken from + # a file, and changed ones -- from pages.img. + # Finally, if no page is found neither in pages.img nor + # in file, hole in inserted -- a page filled with zeroes. + start_page = start / PAGESIZE + end_page = end / PAGESIZE - buf = "" - for page_no in range(start_page, end_page+1): - page = None + buf = "" + for page_no in range(start_page, end_page + 1): + page = None - # Search for needed page in pages.img and reg-files.img - # and choose appropriate. - page_mem = self._get_page(pid, page_no) + # Search for needed page in pages.img and reg-files.img + # and choose appropriate. + page_mem = self._get_page(pid, page_no) - if f != None: - page = f.read(PAGESIZE) + if f != None: + page = f.read(PAGESIZE) - if page_mem != None: - # Page from pages.img has higher priority - # than one from maped file on disk. - page = page_mem + if page_mem != None: + # Page from pages.img has higher priority + # than one from maped file on disk. + page = page_mem - if page == None: - # Hole - page = PAGESIZE*"\0" + if page == None: + # Hole + page = PAGESIZE * "\0" - # If it is a start or end page, we need to read - # only part of it. - if page_no == start_page: - n_skip = start - page_no*PAGESIZE - if start_page == end_page: - n_read = size - else: - n_read = PAGESIZE - n_skip - elif page_no == end_page: - n_skip = 0 - n_read = end - page_no*PAGESIZE - else: - n_skip = 0 - n_read = PAGESIZE + # If it is a start or end page, we need to read + # only part of it. + if page_no == start_page: + n_skip = start - page_no * PAGESIZE + if start_page == end_page: + n_read = size + else: + n_read = PAGESIZE - n_skip + elif page_no == end_page: + n_skip = 0 + n_read = end - page_no * PAGESIZE + else: + n_skip = 0 + n_read = PAGESIZE - buf += page[n_skip : n_skip + n_read] + buf += page[n_skip:n_skip + n_read] - # Don't forget to close file. - if f != None: - f.close() + # Don't forget to close file. + if f != None: + f.close() - return buf + return buf - def _gen_cmdline(self, pid): - """ + def _gen_cmdline(self, pid): + """ Generate full command with arguments. """ - mm = self.mms[pid] + mm = self.mms[pid] - vma = {} - vma["start"] = mm["mm_arg_start"] - vma["end"] = mm["mm_arg_end"] - # Dummy flags and status. - vma["flags"] = 0 - vma["status"] = 0 - size = vma["end"] - vma["start"] + vma = {} + vma["start"] = mm["mm_arg_start"] + vma["end"] = mm["mm_arg_end"] + # Dummy flags and status. + vma["flags"] = 0 + vma["status"] = 0 + size = vma["end"] - vma["start"] - chunk = self._gen_mem_chunk(pid, vma, size) + chunk = self._gen_mem_chunk(pid, vma, size) - # Replace all '\0's with spaces. - return chunk.replace('\0', ' ') + # Replace all '\0's with spaces. + return chunk.replace('\0', ' ') - def _get_vma_dump_size(self, vma): - """ + def _get_vma_dump_size(self, vma): + """ Calculate amount of vma to put into core dump. """ - if vma["status"] & status["VMA_AREA_VVAR"] or \ - vma["status"] & status["VMA_AREA_VSYSCALL"] or \ - vma["status"] & status["VMA_AREA_VDSO"]: - size = vma["end"] - vma["start"] - elif vma["prot"] == 0: - size = 0 - elif vma["prot"] & prot["PROT_READ"] and \ - vma["prot"] & prot["PROT_EXEC"]: - size = PAGESIZE - elif vma["status"] & status["VMA_ANON_SHARED"] or \ - vma["status"] & status["VMA_FILE_SHARED"] or \ - vma["status"] & status["VMA_ANON_PRIVATE"] or \ - vma["status"] & status["VMA_FILE_PRIVATE"]: - size = vma["end"] - vma["start"] - else: - size = 0 + if vma["status"] & status["VMA_AREA_VVAR"] or \ + vma["status"] & status["VMA_AREA_VSYSCALL"] or \ + vma["status"] & status["VMA_AREA_VDSO"]: + size = vma["end"] - vma["start"] + elif vma["prot"] == 0: + size = 0 + elif vma["prot"] & prot["PROT_READ"] and \ + vma["prot"] & prot["PROT_EXEC"]: + size = PAGESIZE + elif vma["status"] & status["VMA_ANON_SHARED"] or \ + vma["status"] & status["VMA_FILE_SHARED"] or \ + vma["status"] & status["VMA_ANON_PRIVATE"] or \ + vma["status"] & status["VMA_FILE_PRIVATE"]: + size = vma["end"] - vma["start"] + else: + size = 0 - return size + return size - def _get_vma_flags(self, vma): - """ + def _get_vma_flags(self, vma): + """ Convert vma flags int elf flags. """ - flags = 0 + flags = 0 - if vma['prot'] & prot["PROT_READ"]: - flags = flags | elf.PF_R + if vma['prot'] & prot["PROT_READ"]: + flags = flags | elf.PF_R - if vma['prot'] & prot["PROT_WRITE"]: - flags = flags | elf.PF_W + if vma['prot'] & prot["PROT_WRITE"]: + flags = flags | elf.PF_W - if vma['prot'] & prot["PROT_EXEC"]: - flags = flags | elf.PF_X + if vma['prot'] & prot["PROT_EXEC"]: + flags = flags | elf.PF_X - return flags + return flags - def _gen_vmas(self, pid): - """ + def _gen_vmas(self, pid): + """ Generate vma contents for core dump for process pid. """ - mm = self.mms[pid] + mm = self.mms[pid] - class vma_class: - data = None - filesz = None - memsz = None - flags = None - start = None + class vma_class: + data = None + filesz = None + memsz = None + flags = None + start = None - vmas = [] - for vma in mm["vmas"]: - size = self._get_vma_dump_size(vma) + vmas = [] + for vma in mm["vmas"]: + size = self._get_vma_dump_size(vma) - chunk = self._gen_mem_chunk(pid, vma, size) + chunk = self._gen_mem_chunk(pid, vma, size) - v = vma_class() - v.filesz = self._get_vma_dump_size(vma) - v.data = self._gen_mem_chunk(pid, vma, v.filesz) - v.memsz = vma["end"] - vma["start"] - v.start = vma["start"] - v.flags = self._get_vma_flags(vma) + v = vma_class() + v.filesz = self._get_vma_dump_size(vma) + v.data = self._gen_mem_chunk(pid, vma, v.filesz) + v.memsz = vma["end"] - vma["start"] + v.start = vma["start"] + v.flags = self._get_vma_flags(vma) - vmas.append(v) + vmas.append(v) - return vmas + return vmas diff --git a/coredump/criu_coredump/elf.py b/coredump/criu_coredump/elf.py index 1da06a6fd..65da583c3 100644 --- a/coredump/criu_coredump/elf.py +++ b/coredump/criu_coredump/elf.py @@ -1,526 +1,685 @@ # Define structures and constants for generating elf file. import ctypes -Elf64_Half = ctypes.c_uint16 # typedef uint16_t Elf64_Half; -Elf64_Word = ctypes.c_uint32 # typedef uint32_t Elf64_Word; -Elf64_Addr = ctypes.c_uint64 # typedef uint64_t Elf64_Addr; -Elf64_Off = ctypes.c_uint64 # typedef uint64_t Elf64_Off; -Elf64_Xword = ctypes.c_uint64 # typedef uint64_t Elf64_Xword; +Elf64_Half = ctypes.c_uint16 # typedef uint16_t Elf64_Half; +Elf64_Word = ctypes.c_uint32 # typedef uint32_t Elf64_Word; +Elf64_Addr = ctypes.c_uint64 # typedef uint64_t Elf64_Addr; +Elf64_Off = ctypes.c_uint64 # typedef uint64_t Elf64_Off; +Elf64_Xword = ctypes.c_uint64 # typedef uint64_t Elf64_Xword; # Elf64_Ehdr related constants. # e_ident size. -EI_NIDENT = 16 # #define EI_NIDENT (16) +EI_NIDENT = 16 # #define EI_NIDENT (16) -EI_MAG0 = 0 # #define EI_MAG0 0 /* File identification byte 0 index */ -ELFMAG0 = 0x7f # #define ELFMAG0 0x7f /* Magic number byte 0 */ +EI_MAG0 = 0 # #define EI_MAG0 0 /* File identification byte 0 index */ +ELFMAG0 = 0x7f # #define ELFMAG0 0x7f /* Magic number byte 0 */ -EI_MAG1 = 1 # #define EI_MAG1 1 /* File identification byte 1 index */ -ELFMAG1 = ord('E') # #define ELFMAG1 'E' /* Magic number byte 1 */ +EI_MAG1 = 1 # #define EI_MAG1 1 /* File identification byte 1 index */ +ELFMAG1 = ord( + 'E') # #define ELFMAG1 'E' /* Magic number byte 1 */ -EI_MAG2 = 2 # #define EI_MAG2 2 /* File identification byte 2 index */ -ELFMAG2 = ord('L') # #define ELFMAG2 'L' /* Magic number byte 2 */ +EI_MAG2 = 2 # #define EI_MAG2 2 /* File identification byte 2 index */ +ELFMAG2 = ord( + 'L') # #define ELFMAG2 'L' /* Magic number byte 2 */ -EI_MAG3 = 3 # #define EI_MAG3 3 /* File identification byte 3 index */ -ELFMAG3 = ord('F') # #define ELFMAG3 'F' /* Magic number byte 3 */ +EI_MAG3 = 3 # #define EI_MAG3 3 /* File identification byte 3 index */ +ELFMAG3 = ord( + 'F') # #define ELFMAG3 'F' /* Magic number byte 3 */ -EI_CLASS = 4 # #define EI_CLASS 4 /* File class byte index */ +EI_CLASS = 4 # #define EI_CLASS 4 /* File class byte index */ -EI_DATA = 5 # #define EI_DATA 5 /* Data encoding byte index */ +EI_DATA = 5 # #define EI_DATA 5 /* Data encoding byte index */ -EI_VERSION = 6 # #define EI_VERSION 6 /* File version byte index */ +EI_VERSION = 6 # #define EI_VERSION 6 /* File version byte index */ -ELFDATA2LSB = 1 # #define ELFDATA2LSB 1 /* 2's complement, little endian */ +ELFDATA2LSB = 1 # #define ELFDATA2LSB 1 /* 2's complement, little endian */ -ELFCLASS64 = 2 # #define ELFCLASS64 2 /* 64-bit objects */ +ELFCLASS64 = 2 # #define ELFCLASS64 2 /* 64-bit objects */ # Legal values for e_type (object file type). -ET_CORE = 4 # #define ET_CORE 4 /* Core file */ +ET_CORE = 4 # #define ET_CORE 4 /* Core file */ # Legal values for e_machine (architecture). -EM_X86_64 = 62 # #define EM_X86_64 62 /* AMD x86-64 architecture */ +EM_X86_64 = 62 # #define EM_X86_64 62 /* AMD x86-64 architecture */ # Legal values for e_version (version). -EV_CURRENT = 1 # #define EV_CURRENT 1 /* Current version */ +EV_CURRENT = 1 # #define EV_CURRENT 1 /* Current version */ -class Elf64_Ehdr(ctypes.Structure): # typedef struct - _fields_ = [ # { - ("e_ident", ctypes.c_ubyte*EI_NIDENT), # unsigned char e_ident[EI_NIDENT]; - ("e_type", Elf64_Half), # Elf64_Half e_type; - ("e_machine", Elf64_Half), # Elf64_Half e_machine; - ("e_version", Elf64_Word), # Elf64_Word e_version; - ("e_entry", Elf64_Addr), # Elf64_Addr e_entry; - ("e_phoff", Elf64_Off), # Elf64_Off e_phoff; - ("e_shoff", Elf64_Off), # Elf64_Off e_shoff; - ("e_flags", Elf64_Word), # Elf64_Word e_flags; - ("e_ehsize", Elf64_Half), # Elf64_Half e_ehsize; - ("e_phentsize", Elf64_Half), # Elf64_Half e_phentsize; - ("e_phnum", Elf64_Half), # Elf64_Half e_phnum; - ("e_shentsize", Elf64_Half), # Elf64_Half e_shentsize; - ("e_shnum", Elf64_Half), # Elf64_Half e_shnum; - ("e_shstrndx", Elf64_Half) # Elf64_Half e_shstrndx; - ] # } Elf64_Ehdr; + +class Elf64_Ehdr(ctypes.Structure): # typedef struct + _fields_ = [ # { + ("e_ident", + ctypes.c_ubyte * EI_NIDENT), # unsigned char e_ident[EI_NIDENT]; + ("e_type", Elf64_Half), # Elf64_Half e_type; + ("e_machine", Elf64_Half), # Elf64_Half e_machine; + ("e_version", Elf64_Word), # Elf64_Word e_version; + ("e_entry", Elf64_Addr), # Elf64_Addr e_entry; + ("e_phoff", Elf64_Off), # Elf64_Off e_phoff; + ("e_shoff", Elf64_Off), # Elf64_Off e_shoff; + ("e_flags", Elf64_Word), # Elf64_Word e_flags; + ("e_ehsize", Elf64_Half), # Elf64_Half e_ehsize; + ("e_phentsize", Elf64_Half), # Elf64_Half e_phentsize; + ("e_phnum", Elf64_Half), # Elf64_Half e_phnum; + ("e_shentsize", Elf64_Half), # Elf64_Half e_shentsize; + ("e_shnum", Elf64_Half), # Elf64_Half e_shnum; + ("e_shstrndx", Elf64_Half) # Elf64_Half e_shstrndx; + ] # } Elf64_Ehdr; # Elf64_Phdr related constants. # Legal values for p_type (segment type). -PT_LOAD = 1 # #define PT_LOAD 1 /* Loadable program segment */ -PT_NOTE = 4 # #define PT_NOTE 4 /* Auxiliary information */ +PT_LOAD = 1 # #define PT_LOAD 1 /* Loadable program segment */ +PT_NOTE = 4 # #define PT_NOTE 4 /* Auxiliary information */ # Legal values for p_flags (segment flags). -PF_X = 1 # #define PF_X (1 << 0) /* Segment is executable */ -PF_W = 1 << 1 # #define PF_W (1 << 1) /* Segment is writable */ -PF_R = 1 << 2 # #define PF_R (1 << 2) /* Segment is readable */ +PF_X = 1 # #define PF_X (1 << 0) /* Segment is executable */ +PF_W = 1 << 1 # #define PF_W (1 << 1) /* Segment is writable */ +PF_R = 1 << 2 # #define PF_R (1 << 2) /* Segment is readable */ -class Elf64_Phdr(ctypes.Structure): # typedef struct - _fields_ = [ # { - ("p_type", Elf64_Word), # Elf64_Word p_type; - ("p_flags", Elf64_Word), # Elf64_Word p_flags; - ("p_offset", Elf64_Off), # Elf64_Off p_offset; - ("p_vaddr", Elf64_Addr), # Elf64_Addr p_vaddr; - ("p_paddr", Elf64_Addr), # Elf64_Addr p_paddr; - ("p_filesz", Elf64_Xword), # Elf64_Xword p_filesz; - ("p_memsz", Elf64_Xword), # Elf64_Xword p_memsz; - ("p_align", Elf64_Xword), # Elf64_Xword p_align; - ] # } Elf64_Phdr; + +class Elf64_Phdr(ctypes.Structure): # typedef struct + _fields_ = [ # { + ("p_type", Elf64_Word), # Elf64_Word p_type; + ("p_flags", Elf64_Word), # Elf64_Word p_flags; + ("p_offset", Elf64_Off), # Elf64_Off p_offset; + ("p_vaddr", Elf64_Addr), # Elf64_Addr p_vaddr; + ("p_paddr", Elf64_Addr), # Elf64_Addr p_paddr; + ("p_filesz", Elf64_Xword), # Elf64_Xword p_filesz; + ("p_memsz", Elf64_Xword), # Elf64_Xword p_memsz; + ("p_align", Elf64_Xword), # Elf64_Xword p_align; + ] # } Elf64_Phdr; # Elf64_auxv_t related constants. -class _Elf64_auxv_t_U(ctypes.Union): - _fields_ = [ - ("a_val", ctypes.c_uint64) - ] -class Elf64_auxv_t(ctypes.Structure): # typedef struct - _fields_ = [ # { - ("a_type", ctypes.c_uint64), # uint64_t a_type; /* Entry type */ - ("a_un", _Elf64_auxv_t_U) # union - # { - # uint64_t a_val; /* Integer value */ - # /* We use to have pointer elements added here. We cannot do that, - # though, since it does not work when using 32-bit definitions - # on 64-bit platforms and vice versa. */ - # } a_un; - ] # } Elf64_auxv_t; +class _Elf64_auxv_t_U(ctypes.Union): + _fields_ = [("a_val", ctypes.c_uint64)] + + +class Elf64_auxv_t(ctypes.Structure): # typedef struct + _fields_ = [ # { + ("a_type", + ctypes.c_uint64), # uint64_t a_type; /* Entry type */ + ("a_un", _Elf64_auxv_t_U) # union + # { + # uint64_t a_val; /* Integer value */ + # /* We use to have pointer elements added here. We cannot do that, + # though, since it does not work when using 32-bit definitions + # on 64-bit platforms and vice versa. */ + # } a_un; + ] # } Elf64_auxv_t; # Elf64_Nhdr related constants. -NT_PRSTATUS = 1 # #define NT_PRSTATUS 1 /* Contains copy of prstatus struct */ -NT_FPREGSET = 2 # #define NT_FPREGSET 2 /* Contains copy of fpregset struct */ -NT_PRPSINFO = 3 # #define NT_PRPSINFO 3 /* Contains copy of prpsinfo struct */ -NT_AUXV = 6 # #define NT_AUXV 6 /* Contains copy of auxv array */ -NT_SIGINFO = 0x53494749 # #define NT_SIGINFO 0x53494749 /* Contains copy of siginfo_t, +NT_PRSTATUS = 1 # #define NT_PRSTATUS 1 /* Contains copy of prstatus struct */ +NT_FPREGSET = 2 # #define NT_FPREGSET 2 /* Contains copy of fpregset struct */ +NT_PRPSINFO = 3 # #define NT_PRPSINFO 3 /* Contains copy of prpsinfo struct */ +NT_AUXV = 6 # #define NT_AUXV 6 /* Contains copy of auxv array */ +NT_SIGINFO = 0x53494749 # #define NT_SIGINFO 0x53494749 /* Contains copy of siginfo_t, # size might increase */ -NT_FILE = 0x46494c45 # #define NT_FILE 0x46494c45 /* Contains information about mapped +NT_FILE = 0x46494c45 # #define NT_FILE 0x46494c45 /* Contains information about mapped # files */ -NT_X86_XSTATE = 0x202 # #define NT_X86_XSTATE 0x202 /* x86 extended state using xsave */ +NT_X86_XSTATE = 0x202 # #define NT_X86_XSTATE 0x202 /* x86 extended state using xsave */ -class Elf64_Nhdr(ctypes.Structure): # typedef struct - _fields_ = [ # { - ("n_namesz", Elf64_Word), # Elf64_Word n_namesz; /* Length of the note's name. */ - ("n_descsz", Elf64_Word), # Elf64_Word n_descsz; /* Length of the note's descriptor. */ - ("n_type", Elf64_Word), # Elf64_Word n_type; /* Type of the note. */ - ] # } Elf64_Nhdr; + +class Elf64_Nhdr(ctypes.Structure): # typedef struct + _fields_ = [ # { + ( + "n_namesz", Elf64_Word + ), # Elf64_Word n_namesz; /* Length of the note's name. */ + ( + "n_descsz", Elf64_Word + ), # Elf64_Word n_descsz; /* Length of the note's descriptor. */ + ("n_type", Elf64_Word + ), # Elf64_Word n_type; /* Type of the note. */ + ] # } Elf64_Nhdr; # Elf64_Shdr related constants. -class Elf64_Shdr(ctypes.Structure): # typedef struct - _fields_ = [ # { - ("sh_name", Elf64_Word), # Elf64_Word sh_name; /* Section name (string tbl index) */ - ("sh_type", Elf64_Word), # Elf64_Word sh_type; /* Section type */ - ("sh_flags", Elf64_Xword), # Elf64_Xword sh_flags; /* Section flags */ - ("sh_addr", Elf64_Addr), # Elf64_Addr sh_addr; /* Section virtual addr at execution */ - ("sh_offset", Elf64_Off), # Elf64_Off sh_offset; /* Section file offset */ - ("sh_size", Elf64_Xword), # Elf64_Xword sh_size; /* Section size in bytes */ - ("sh_link", Elf64_Word), # Elf64_Word sh_link; /* Link to another section */ - ("sh_info", Elf64_Word), # Elf64_Word sh_info; /* Additional section information */ - ("sh_addralign",Elf64_Xword), # Elf64_Xword sh_addralign; /* Section alignment */ - ("sh_entsize", Elf64_Xword) # Elf64_Xword sh_entsize; /* Entry size if section holds table */ - ] # } Elf64_Shdr; + +class Elf64_Shdr(ctypes.Structure): # typedef struct + _fields_ = [ # { + ( + "sh_name", Elf64_Word + ), # Elf64_Word sh_name; /* Section name (string tbl index) */ + ("sh_type", Elf64_Word + ), # Elf64_Word sh_type; /* Section type */ + ("sh_flags", Elf64_Xword + ), # Elf64_Xword sh_flags; /* Section flags */ + ( + "sh_addr", Elf64_Addr + ), # Elf64_Addr sh_addr; /* Section virtual addr at execution */ + ( + "sh_offset", Elf64_Off + ), # Elf64_Off sh_offset; /* Section file offset */ + ( + "sh_size", Elf64_Xword + ), # Elf64_Xword sh_size; /* Section size in bytes */ + ( + "sh_link", Elf64_Word + ), # Elf64_Word sh_link; /* Link to another section */ + ( + "sh_info", Elf64_Word + ), # Elf64_Word sh_info; /* Additional section information */ + ("sh_addralign", Elf64_Xword + ), # Elf64_Xword sh_addralign; /* Section alignment */ + ( + "sh_entsize", Elf64_Xword + ) # Elf64_Xword sh_entsize; /* Entry size if section holds table */ + ] # } Elf64_Shdr; # elf_prstatus related constants. + # Signal info. -class elf_siginfo(ctypes.Structure): # struct elf_siginfo - _fields_ = [ # { - ("si_signo", ctypes.c_int), # int si_signo; /* Signal number. */ - ("si_code", ctypes.c_int), # int si_code; /* Extra code. */ - ("si_errno", ctypes.c_int) # int si_errno; /* Errno. */ - ] # }; +class elf_siginfo(ctypes.Structure): # struct elf_siginfo + _fields_ = [ # { + ("si_signo", ctypes.c_int + ), # int si_signo; /* Signal number. */ + ("si_code", ctypes.c_int + ), # int si_code; /* Extra code. */ + ("si_errno", ctypes.c_int + ) # int si_errno; /* Errno. */ + ] # }; + # A time value that is accurate to the nearest # microsecond but also has a range of years. -class timeval(ctypes.Structure): # struct timeval - _fields_ = [ # { - ("tv_sec", ctypes.c_long), # __time_t tv_sec; /* Seconds. */ - ("tv_usec", ctypes.c_long) # __suseconds_t tv_usec; /* Microseconds. */ - ] # }; +class timeval(ctypes.Structure): # struct timeval + _fields_ = [ # { + ("tv_sec", + ctypes.c_long), # __time_t tv_sec; /* Seconds. */ + ("tv_usec", ctypes.c_long + ) # __suseconds_t tv_usec; /* Microseconds. */ + ] # }; + + +class user_regs_struct(ctypes.Structure): # struct user_regs_struct + _fields_ = [ # { + ("r15", + ctypes.c_ulonglong), # __extension__ unsigned long long int r15; + ("r14", + ctypes.c_ulonglong), # __extension__ unsigned long long int r14; + ("r13", + ctypes.c_ulonglong), # __extension__ unsigned long long int r13; + ("r12", + ctypes.c_ulonglong), # __extension__ unsigned long long int r12; + ("rbp", + ctypes.c_ulonglong), # __extension__ unsigned long long int rbp; + ("rbx", + ctypes.c_ulonglong), # __extension__ unsigned long long int rbx; + ("r11", + ctypes.c_ulonglong), # __extension__ unsigned long long int r11; + ("r10", + ctypes.c_ulonglong), # __extension__ unsigned long long int r10; + ("r9", + ctypes.c_ulonglong), # __extension__ unsigned long long int r9; + ("r8", + ctypes.c_ulonglong), # __extension__ unsigned long long int r8; + ("rax", + ctypes.c_ulonglong), # __extension__ unsigned long long int rax; + ("rcx", + ctypes.c_ulonglong), # __extension__ unsigned long long int rcx; + ("rdx", + ctypes.c_ulonglong), # __extension__ unsigned long long int rdx; + ("rsi", + ctypes.c_ulonglong), # __extension__ unsigned long long int rsi; + ("rdi", + ctypes.c_ulonglong), # __extension__ unsigned long long int rdi; + ("orig_rax", ctypes.c_ulonglong + ), # __extension__ unsigned long long int orig_rax; + ("rip", + ctypes.c_ulonglong), # __extension__ unsigned long long int rip; + ("cs", + ctypes.c_ulonglong), # __extension__ unsigned long long int cs; + ("eflags", + ctypes.c_ulonglong), # __extension__ unsigned long long int eflags; + ("rsp", + ctypes.c_ulonglong), # __extension__ unsigned long long int rsp; + ("ss", + ctypes.c_ulonglong), # __extension__ unsigned long long int ss; + ("fs_base", ctypes.c_ulonglong + ), # __extension__ unsigned long long int fs_base; + ("gs_base", ctypes.c_ulonglong + ), # __extension__ unsigned long long int gs_base; + ("ds", + ctypes.c_ulonglong), # __extension__ unsigned long long int ds; + ("es", + ctypes.c_ulonglong), # __extension__ unsigned long long int es; + ("fs", + ctypes.c_ulonglong), # __extension__ unsigned long long int fs; + ("gs", ctypes.c_ulonglong + ) # __extension__ unsigned long long int gs; + ] # }; -class user_regs_struct(ctypes.Structure): # struct user_regs_struct - _fields_ = [ # { - ("r15", ctypes.c_ulonglong), # __extension__ unsigned long long int r15; - ("r14", ctypes.c_ulonglong), # __extension__ unsigned long long int r14; - ("r13", ctypes.c_ulonglong), # __extension__ unsigned long long int r13; - ("r12", ctypes.c_ulonglong), # __extension__ unsigned long long int r12; - ("rbp", ctypes.c_ulonglong), # __extension__ unsigned long long int rbp; - ("rbx", ctypes.c_ulonglong), # __extension__ unsigned long long int rbx; - ("r11", ctypes.c_ulonglong), # __extension__ unsigned long long int r11; - ("r10", ctypes.c_ulonglong), # __extension__ unsigned long long int r10; - ("r9", ctypes.c_ulonglong), # __extension__ unsigned long long int r9; - ("r8", ctypes.c_ulonglong), # __extension__ unsigned long long int r8; - ("rax", ctypes.c_ulonglong), # __extension__ unsigned long long int rax; - ("rcx", ctypes.c_ulonglong), # __extension__ unsigned long long int rcx; - ("rdx", ctypes.c_ulonglong), # __extension__ unsigned long long int rdx; - ("rsi", ctypes.c_ulonglong), # __extension__ unsigned long long int rsi; - ("rdi", ctypes.c_ulonglong), # __extension__ unsigned long long int rdi; - ("orig_rax", ctypes.c_ulonglong), # __extension__ unsigned long long int orig_rax; - ("rip", ctypes.c_ulonglong), # __extension__ unsigned long long int rip; - ("cs", ctypes.c_ulonglong), # __extension__ unsigned long long int cs; - ("eflags", ctypes.c_ulonglong), # __extension__ unsigned long long int eflags; - ("rsp", ctypes.c_ulonglong), # __extension__ unsigned long long int rsp; - ("ss", ctypes.c_ulonglong), # __extension__ unsigned long long int ss; - ("fs_base", ctypes.c_ulonglong), # __extension__ unsigned long long int fs_base; - ("gs_base", ctypes.c_ulonglong), # __extension__ unsigned long long int gs_base; - ("ds", ctypes.c_ulonglong), # __extension__ unsigned long long int ds; - ("es", ctypes.c_ulonglong), # __extension__ unsigned long long int es; - ("fs", ctypes.c_ulonglong), # __extension__ unsigned long long int fs; - ("gs", ctypes.c_ulonglong) # __extension__ unsigned long long int gs; - ] # }; #elf_greg_t = ctypes.c_ulonglong #ELF_NGREG = ctypes.sizeof(user_regs_struct)/ctypes.sizeof(elf_greg_t) #elf_gregset_t = elf_greg_t*ELF_NGREG elf_gregset_t = user_regs_struct -class elf_prstatus(ctypes.Structure): # struct elf_prstatus - _fields_ = [ # { - ("pr_info", elf_siginfo), # struct elf_siginfo pr_info; /* Info associated with signal. */ - ("pr_cursig", ctypes.c_short), # short int pr_cursig; /* Current signal. */ - ("pr_sigpend", ctypes.c_ulong), # unsigned long int pr_sigpend; /* Set of pending signals. */ - ("pr_sighold", ctypes.c_ulong), # unsigned long int pr_sighold; /* Set of held signals. */ - ("pr_pid", ctypes.c_int), # __pid_t pr_pid; - ("pr_ppid", ctypes.c_int), # __pid_t pr_ppid; - ("pr_pgrp", ctypes.c_int), # __pid_t pr_pgrp; - ("pr_sid", ctypes.c_int), # __pid_t pr_sid; - ("pr_utime", timeval), # struct timeval pr_utime; /* User time. */ - ("pr_stime", timeval), # struct timeval pr_stime; /* System time. */ - ("pr_cutime", timeval), # struct timeval pr_cutime; /* Cumulative user time. */ - ("pr_cstime", timeval), # struct timeval pr_cstime; /* Cumulative system time. */ - ("pr_reg", elf_gregset_t), # elf_gregset_t pr_reg; /* GP registers. */ - ("pr_fpvalid", ctypes.c_int) # int pr_fpvalid; /* True if math copro being used. */ - ] # }; + +class elf_prstatus(ctypes.Structure): # struct elf_prstatus + _fields_ = [ # { + ( + "pr_info", elf_siginfo + ), # struct elf_siginfo pr_info; /* Info associated with signal. */ + ("pr_cursig", ctypes.c_short + ), # short int pr_cursig; /* Current signal. */ + ( + "pr_sigpend", ctypes.c_ulong + ), # unsigned long int pr_sigpend; /* Set of pending signals. */ + ( + "pr_sighold", ctypes.c_ulong + ), # unsigned long int pr_sighold; /* Set of held signals. */ + ("pr_pid", ctypes.c_int), # __pid_t pr_pid; + ("pr_ppid", ctypes.c_int), # __pid_t pr_ppid; + ("pr_pgrp", ctypes.c_int), # __pid_t pr_pgrp; + ("pr_sid", ctypes.c_int), # __pid_t pr_sid; + ("pr_utime", + timeval), # struct timeval pr_utime; /* User time. */ + ("pr_stime", timeval + ), # struct timeval pr_stime; /* System time. */ + ( + "pr_cutime", timeval + ), # struct timeval pr_cutime; /* Cumulative user time. */ + ( + "pr_cstime", timeval + ), # struct timeval pr_cstime; /* Cumulative system time. */ + ("pr_reg", elf_gregset_t + ), # elf_gregset_t pr_reg; /* GP registers. */ + ( + "pr_fpvalid", ctypes.c_int + ) # int pr_fpvalid; /* True if math copro being used. */ + ] # }; # elf_prpsinfo related constants. -ELF_PRARGSZ = 80 # #define ELF_PRARGSZ (80) /* Number of chars for args. */ - -class elf_prpsinfo(ctypes.Structure): # struct elf_prpsinfo - _fields_ = [ # { - ("pr_state", ctypes.c_byte), # char pr_state; /* Numeric process state. */ - ("pr_sname", ctypes.c_char), # char pr_sname; /* Char for pr_state. */ - ("pr_zomb", ctypes.c_byte), # char pr_zomb; /* Zombie. */ - ("pr_nice", ctypes.c_byte), # char pr_nice; /* Nice val. */ - ("pr_flag", ctypes.c_ulong), # unsigned long int pr_flag; /* Flags. */ - # #if __WORDSIZE == 32 - # unsigned short int pr_uid; - # unsigned short int pr_gid; - # #else - ("pr_uid", ctypes.c_uint), # unsigned int pr_uid; - ("pr_gid", ctypes.c_uint), # unsigned int pr_gid; - # #endif - ("pr_pid", ctypes.c_int), # int pr_pid, pr_ppid, pr_pgrp, pr_sid; - ("pr_ppid", ctypes.c_int), - ("pr_pgrp", ctypes.c_int), - ("pr_sid", ctypes.c_int), - # /* Lots missing */ - ("pr_fname", ctypes.c_char*16), # char pr_fname[16]; /* Filename of executable. */ - ("pr_psargs", ctypes.c_char*ELF_PRARGSZ) # char pr_psargs[ELF_PRARGSZ]; /* Initial part of arg list. */ - ] # }; +ELF_PRARGSZ = 80 # #define ELF_PRARGSZ (80) /* Number of chars for args. */ -class user_fpregs_struct(ctypes.Structure): # struct user_fpregs_struct - _fields_ = [ # { - ("cwd", ctypes.c_ushort), # unsigned short int cwd; - ("swd", ctypes.c_ushort), # unsigned short int swd; - ("ftw", ctypes.c_ushort), # unsigned short int ftw; - ("fop", ctypes.c_ushort), # unsigned short int fop; - ("rip", ctypes.c_ulonglong), # __extension__ unsigned long long int rip; - ("rdp", ctypes.c_ulonglong), # __extension__ unsigned long long int rdp; - ("mxcsr", ctypes.c_uint), # unsigned int mxcsr; - ("mxcr_mask", ctypes.c_uint), # unsigned int mxcr_mask; - ("st_space", ctypes.c_uint*32), # unsigned int st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */ - ("xmm_space", ctypes.c_uint*64), # unsigned int xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */ - ("padding", ctypes.c_uint*24), # unsigned int padding[24]; - ] # }; +class elf_prpsinfo(ctypes.Structure): # struct elf_prpsinfo + _fields_ = [ # { + ( + "pr_state", ctypes.c_byte + ), # char pr_state; /* Numeric process state. */ + ( + "pr_sname", ctypes.c_char + ), # char pr_sname; /* Char for pr_state. */ + ("pr_zomb", ctypes.c_byte + ), # char pr_zomb; /* Zombie. */ + ("pr_nice", ctypes.c_byte + ), # char pr_nice; /* Nice val. */ + ("pr_flag", ctypes.c_ulong + ), # unsigned long int pr_flag; /* Flags. */ + # #if __WORDSIZE == 32 + # unsigned short int pr_uid; + # unsigned short int pr_gid; + # #else + ("pr_uid", ctypes.c_uint), # unsigned int pr_uid; + ("pr_gid", ctypes.c_uint), # unsigned int pr_gid; + # #endif + ("pr_pid", ctypes.c_int), # int pr_pid, pr_ppid, pr_pgrp, pr_sid; + ("pr_ppid", ctypes.c_int), + ("pr_pgrp", ctypes.c_int), + ("pr_sid", ctypes.c_int), + # /* Lots missing */ + ( + "pr_fname", ctypes.c_char * 16 + ), # char pr_fname[16]; /* Filename of executable. */ + ( + "pr_psargs", ctypes.c_char * ELF_PRARGSZ + ) # char pr_psargs[ELF_PRARGSZ]; /* Initial part of arg list. */ + ] # }; + + +class user_fpregs_struct(ctypes.Structure): # struct user_fpregs_struct + _fields_ = [ # { + ("cwd", ctypes.c_ushort), # unsigned short int cwd; + ("swd", ctypes.c_ushort), # unsigned short int swd; + ("ftw", ctypes.c_ushort), # unsigned short int ftw; + ("fop", ctypes.c_ushort), # unsigned short int fop; + ("rip", + ctypes.c_ulonglong), # __extension__ unsigned long long int rip; + ("rdp", + ctypes.c_ulonglong), # __extension__ unsigned long long int rdp; + ("mxcsr", ctypes.c_uint), # unsigned int mxcsr; + ("mxcr_mask", ctypes.c_uint), # unsigned int mxcr_mask; + ( + "st_space", ctypes.c_uint * 32 + ), # unsigned int st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */ + ( + "xmm_space", ctypes.c_uint * 64 + ), # unsigned int xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */ + ("padding", + ctypes.c_uint * 24), # unsigned int padding[24]; + ] # }; elf_fpregset_t = user_fpregs_struct - # siginfo_t related constants. -_SI_MAX_SIZE = 128 -_SI_PAD_SIZE = (_SI_MAX_SIZE/ctypes.sizeof(ctypes.c_int)) - 4 +_SI_MAX_SIZE = 128 +_SI_PAD_SIZE = (_SI_MAX_SIZE / ctypes.sizeof(ctypes.c_int)) - 4 - # /* kill(). */ -class _siginfo_t_U_kill(ctypes.Structure): # struct - _fields_ = [ # { - ("si_pid", ctypes.c_int), # __pid_t si_pid; /* Sending process ID. */ - ("si_uid", ctypes.c_uint) # __uid_t si_uid; /* Real user ID of sending process. */ - ] # } _kill; +# /* kill(). */ +class _siginfo_t_U_kill(ctypes.Structure): # struct + _fields_ = [ # { + ("si_pid", ctypes.c_int + ), # __pid_t si_pid; /* Sending process ID. */ + ( + "si_uid", ctypes.c_uint + ) # __uid_t si_uid; /* Real user ID of sending process. */ + ] # } _kill; # Type for data associated with a signal. -class sigval_t(ctypes.Union): # typedef union sigval - _fields_ = [ # { - ("sival_int", ctypes.c_int), # int sival_int; - ("sical_ptr", ctypes.c_void_p), # void *sival_ptr; - ] # } sigval_t; - - # /* POSIX.1b timers. */ -class _siginfo_t_U_timer(ctypes.Structure): # struct - _fields_ = [ # { - ("si_tid", ctypes.c_int), # int si_tid; /* Timer ID. */ - ("si_overrun", ctypes.c_int), # int si_overrun; /* Overrun count. */ - ("si_sigval", sigval_t) # sigval_t si_sigval; /* Signal value. */ - ] # } _timer; +class sigval_t(ctypes.Union): # typedef union sigval + _fields_ = [ # { + ("sival_int", ctypes.c_int), # int sival_int; + ("sical_ptr", ctypes.c_void_p), # void *sival_ptr; + ] # } sigval_t; - # /* POSIX.1b signals. */ -class _siginfo_t_U_rt(ctypes.Structure): # struct - _fields_ = [ # { - ("si_pid", ctypes.c_int), # __pid_t si_pid; /* Sending process ID. */ - ("si_uid", ctypes.c_uint), # __uid_t si_uid; /* Real user ID of sending process. */ - ("si_sigval", sigval_t) # sigval_t si_sigval; /* Signal value. */ - ] # } _rt; + # /* POSIX.1b timers. */ +class _siginfo_t_U_timer(ctypes.Structure): # struct + _fields_ = [ # { + ("si_tid", + ctypes.c_int), # int si_tid; /* Timer ID. */ + ("si_overrun", ctypes.c_int + ), # int si_overrun; /* Overrun count. */ + ("si_sigval", sigval_t + ) # sigval_t si_sigval; /* Signal value. */ + ] # } _timer; - # /* SIGCHLD. */ -class _siginfo_t_U_sigchld(ctypes.Structure): # struct - _fields_ = [ # { - ("si_pid", ctypes.c_int), # __pid_t si_pid; /* Which child. */ - ("si_uid", ctypes.c_uint), # __uid_t si_uid; /* Real user ID of sending process. */ - ("si_status", ctypes.c_int), # int si_status; /* Exit value or signal. */ - ("si_utime", ctypes.c_long), # __sigchld_clock_t si_utime; - ("si_stime", ctypes.c_long) # __sigchld_clock_t si_stime; - ] # } _sigchld; - - # /* SIGILL, SIGFPE, SIGSEGV, SIGBUS. */ -class _siginfo_t_U_sigfault(ctypes.Structure): # struct - _fields_ = [ # { - ("si_addr", ctypes.c_void_p), # void *si_addr; /* Faulting insn/memory ref. */ - ("si_addr_lsb", ctypes.c_short) # short int si_addr_lsb; /* Valid LSB of the reported address. */ - ] # } _sigfault; - - # /* SIGPOLL. */ -class _siginfo_t_U_sigpoll(ctypes.Structure): # struct - _fields_ = [ # { - ("si_band", ctypes.c_long), # long int si_band; /* Band event for SIGPOLL. */ - ("si_fd", ctypes.c_int) # int si_fd; - ] # } _sigpoll; + # /* POSIX.1b signals. */ +class _siginfo_t_U_rt(ctypes.Structure): # struct + _fields_ = [ # { + ("si_pid", ctypes.c_int + ), # __pid_t si_pid; /* Sending process ID. */ + ( + "si_uid", ctypes.c_uint + ), # __uid_t si_uid; /* Real user ID of sending process. */ + ("si_sigval", sigval_t + ) # sigval_t si_sigval; /* Signal value. */ + ] # } _rt; - # /* SIGSYS. */ -class _siginfo_t_U_sigsys(ctypes.Structure): # struct - _fields_ = [ # { - ("_call_addr", ctypes.c_void_p), # void *_call_addr; /* Calling user insn. */ - ("_syscall", ctypes.c_int), # int _syscall; /* Triggering system call number. */ - ("_arch", ctypes.c_uint) # unsigned int _arch; /* AUDIT_ARCH_* of syscall. */ - ] # } _sigsys; + # /* SIGCHLD. */ +class _siginfo_t_U_sigchld(ctypes.Structure): # struct + _fields_ = [ # { + ("si_pid", + ctypes.c_int), # __pid_t si_pid; /* Which child. */ + ( + "si_uid", ctypes.c_uint + ), # __uid_t si_uid; /* Real user ID of sending process. */ + ("si_status", ctypes.c_int + ), # int si_status; /* Exit value or signal. */ + ("si_utime", ctypes.c_long), # __sigchld_clock_t si_utime; + ("si_stime", ctypes.c_long) # __sigchld_clock_t si_stime; + ] # } _sigchld; -class _siginfo_t_U(ctypes.Union): # union - _fields_ = [ # { - ("_pad", ctypes.c_int*_SI_PAD_SIZE), # int _pad[__SI_PAD_SIZE]; - # - # /* kill(). */ - ("_kill", _siginfo_t_U_kill), # struct - # { - # __pid_t si_pid; /* Sending process ID. */ - # __uid_t si_uid; /* Real user ID of sending process. */ - # } _kill; - # - # /* POSIX.1b timers. */ - ("_timer", _siginfo_t_U_timer), # struct - # { - # int si_tid; /* Timer ID. */ - # int si_overrun; /* Overrun count. */ - # sigval_t si_sigval; /* Signal value. */ - # } _timer; - # - # /* POSIX.1b signals. */ - ("_rt", _siginfo_t_U_rt), # struct - # { - # __pid_t si_pid; /* Sending process ID. */ - # __uid_t si_uid; /* Real user ID of sending process. */ - # sigval_t si_sigval; /* Signal value. */ - # } _rt; - # - # /* SIGCHLD. */ - ("_sigchld", _siginfo_t_U_sigchld), # struct - # { - # __pid_t si_pid; /* Which child. */ - # __uid_t si_uid; /* Real user ID of sending process. */ - # int si_status; /* Exit value or signal. */ - # __sigchld_clock_t si_utime; - # __sigchld_clock_t si_stime; - # } _sigchld; - # - # /* SIGILL, SIGFPE, SIGSEGV, SIGBUS. */ - ("_sigfault", _siginfo_t_U_sigfault), # struct - # { - # void *si_addr; /* Faulting insn/memory ref. */ - # short int si_addr_lsb; /* Valid LSB of the reported address. */ - # } _sigfault; - # - # /* SIGPOLL. */ - ("_sigpoll", _siginfo_t_U_sigpoll), # struct - # { - # long int si_band; /* Band event for SIGPOLL. */ - # int si_fd; - # } _sigpoll; - # - # /* SIGSYS. */ - ("_sigsys", _siginfo_t_U_sigpoll) # struct - # { - # void *_call_addr; /* Calling user insn. */ - # int _syscall; /* Triggering system call number. */ - # unsigned int _arch; /* AUDIT_ARCH_* of syscall. */ - # } _sigsys; - ] # } _sifields; + # /* SIGILL, SIGFPE, SIGSEGV, SIGBUS. */ +class _siginfo_t_U_sigfault(ctypes.Structure): # struct + _fields_ = [ # { + ("si_addr", ctypes.c_void_p + ), # void *si_addr; /* Faulting insn/memory ref. */ + ( + "si_addr_lsb", ctypes.c_short + ) # short int si_addr_lsb; /* Valid LSB of the reported address. */ + ] # } _sigfault; -class siginfo_t(ctypes.Structure): # typedef struct - _fields_ = [ # { - ("si_signo", ctypes.c_int), # int si_signo; /* Signal number. */ - ("si_errno", ctypes.c_int), # int si_errno; /* If non-zero, an errno value associated with - # this signal, as defined in . */ - ("si_code", ctypes.c_int), # int si_code; /* Signal code. */ - # - ("_sifields", _siginfo_t_U) # union - # { - # int _pad[__SI_PAD_SIZE]; - # - # /* kill(). */ - # struct - # { - # __pid_t si_pid; /* Sending process ID. */ - # __uid_t si_uid; /* Real user ID of sending process. */ - # } _kill; - # - # /* POSIX.1b timers. */ - # struct - # { - # int si_tid; /* Timer ID. */ - # int si_overrun; /* Overrun count. */ - # sigval_t si_sigval; /* Signal value. */ - # } _timer; - # - # /* POSIX.1b signals. */ - # struct - # { - # __pid_t si_pid; /* Sending process ID. */ - # __uid_t si_uid; /* Real user ID of sending process. */ - # sigval_t si_sigval; /* Signal value. */ - # } _rt; - # - # /* SIGCHLD. */ - # struct - # { - # __pid_t si_pid; /* Which child. */ - # __uid_t si_uid; /* Real user ID of sending process. */ - # int si_status; /* Exit value or signal. */ - # __sigchld_clock_t si_utime; - # __sigchld_clock_t si_stime; - # } _sigchld; - # - # /* SIGILL, SIGFPE, SIGSEGV, SIGBUS. */ - # struct - # { - # void *si_addr; /* Faulting insn/memory ref. */ - # short int si_addr_lsb; /* Valid LSB of the reported address. */ - # } _sigfault; - # - # /* SIGPOLL. */ - # struct - # { - # long int si_band; /* Band event for SIGPOLL. */ - # int si_fd; - # } _sigpoll; - # - # /* SIGSYS. */ - # struct - # { - # void *_call_addr; /* Calling user insn. */ - # int _syscall; /* Triggering system call number. */ - # unsigned int _arch; /* AUDIT_ARCH_* of syscall. */ - # } _sigsys; - # } _sifields; - ] # } siginfo_t __SI_ALIGNMENT; + + # /* SIGPOLL. */ +class _siginfo_t_U_sigpoll(ctypes.Structure): # struct + _fields_ = [ # { + ("si_band", ctypes.c_long + ), # long int si_band; /* Band event for SIGPOLL. */ + ("si_fd", ctypes.c_int) # int si_fd; + ] # } _sigpoll; + + + # /* SIGSYS. */ +class _siginfo_t_U_sigsys(ctypes.Structure): # struct + _fields_ = [ # { + ("_call_addr", ctypes.c_void_p + ), # void *_call_addr; /* Calling user insn. */ + ( + "_syscall", ctypes.c_int + ), # int _syscall; /* Triggering system call number. */ + ("_arch", ctypes.c_uint + ) # unsigned int _arch; /* AUDIT_ARCH_* of syscall. */ + ] # } _sigsys; + + +class _siginfo_t_U(ctypes.Union): # union + _fields_ = [ # { + ("_pad", + ctypes.c_int * _SI_PAD_SIZE), # int _pad[__SI_PAD_SIZE]; + # + # /* kill(). */ + ("_kill", _siginfo_t_U_kill), # struct + # { + # __pid_t si_pid; /* Sending process ID. */ + # __uid_t si_uid; /* Real user ID of sending process. */ + # } _kill; + # + # /* POSIX.1b timers. */ + ("_timer", _siginfo_t_U_timer), # struct + # { + # int si_tid; /* Timer ID. */ + # int si_overrun; /* Overrun count. */ + # sigval_t si_sigval; /* Signal value. */ + # } _timer; + # + # /* POSIX.1b signals. */ + ("_rt", _siginfo_t_U_rt), # struct + # { + # __pid_t si_pid; /* Sending process ID. */ + # __uid_t si_uid; /* Real user ID of sending process. */ + # sigval_t si_sigval; /* Signal value. */ + # } _rt; + # + # /* SIGCHLD. */ + ("_sigchld", _siginfo_t_U_sigchld), # struct + # { + # __pid_t si_pid; /* Which child. */ + # __uid_t si_uid; /* Real user ID of sending process. */ + # int si_status; /* Exit value or signal. */ + # __sigchld_clock_t si_utime; + # __sigchld_clock_t si_stime; + # } _sigchld; + # + # /* SIGILL, SIGFPE, SIGSEGV, SIGBUS. */ + ("_sigfault", _siginfo_t_U_sigfault), # struct + # { + # void *si_addr; /* Faulting insn/memory ref. */ + # short int si_addr_lsb; /* Valid LSB of the reported address. */ + # } _sigfault; + # + # /* SIGPOLL. */ + ("_sigpoll", _siginfo_t_U_sigpoll), # struct + # { + # long int si_band; /* Band event for SIGPOLL. */ + # int si_fd; + # } _sigpoll; + # + # /* SIGSYS. */ + ("_sigsys", _siginfo_t_U_sigpoll) # struct + # { + # void *_call_addr; /* Calling user insn. */ + # int _syscall; /* Triggering system call number. */ + # unsigned int _arch; /* AUDIT_ARCH_* of syscall. */ + # } _sigsys; + ] # } _sifields; + + +class siginfo_t(ctypes.Structure): # typedef struct + _fields_ = [ # { + ("si_signo", ctypes.c_int + ), # int si_signo; /* Signal number. */ + ( + "si_errno", ctypes.c_int + ), # int si_errno; /* If non-zero, an errno value associated with + # this signal, as defined in . */ + ("si_code", ctypes.c_int + ), # int si_code; /* Signal code. */ + # + ("_sifields", _siginfo_t_U) # union + # { + # int _pad[__SI_PAD_SIZE]; + # + # /* kill(). */ + # struct + # { + # __pid_t si_pid; /* Sending process ID. */ + # __uid_t si_uid; /* Real user ID of sending process. */ + # } _kill; + # + # /* POSIX.1b timers. */ + # struct + # { + # int si_tid; /* Timer ID. */ + # int si_overrun; /* Overrun count. */ + # sigval_t si_sigval; /* Signal value. */ + # } _timer; + # + # /* POSIX.1b signals. */ + # struct + # { + # __pid_t si_pid; /* Sending process ID. */ + # __uid_t si_uid; /* Real user ID of sending process. */ + # sigval_t si_sigval; /* Signal value. */ + # } _rt; + # + # /* SIGCHLD. */ + # struct + # { + # __pid_t si_pid; /* Which child. */ + # __uid_t si_uid; /* Real user ID of sending process. */ + # int si_status; /* Exit value or signal. */ + # __sigchld_clock_t si_utime; + # __sigchld_clock_t si_stime; + # } _sigchld; + # + # /* SIGILL, SIGFPE, SIGSEGV, SIGBUS. */ + # struct + # { + # void *si_addr; /* Faulting insn/memory ref. */ + # short int si_addr_lsb; /* Valid LSB of the reported address. */ + # } _sigfault; + # + # /* SIGPOLL. */ + # struct + # { + # long int si_band; /* Band event for SIGPOLL. */ + # int si_fd; + # } _sigpoll; + # + # /* SIGSYS. */ + # struct + # { + # void *_call_addr; /* Calling user insn. */ + # int _syscall; /* Triggering system call number. */ + # unsigned int _arch; /* AUDIT_ARCH_* of syscall. */ + # } _sigsys; + # } _sifields; + ] # } siginfo_t __SI_ALIGNMENT; # xsave related. -class ymmh_struct(ctypes.Structure): # struct ymmh_struct { - _fields_ = [ - ("ymmh_space", 64*ctypes.c_uint) # u32 ymmh_space[64]; - ] # } __packed; + +class ymmh_struct(ctypes.Structure): # struct ymmh_struct { + _fields_ = [("ymmh_space", 64 * ctypes.c_uint + ) # u32 ymmh_space[64]; + ] # } __packed; -class xsave_hdr_struct(ctypes.Structure): # struct xsave_hdr_struct { - _fields_ = [ - ("xstate_bv", ctypes.c_ulonglong), # u64 xstate_bv; - ("reserved1", ctypes.c_ulonglong*2), # u64 reserved1[2]; - ("reserved2", ctypes.c_ulonglong*5) # u64 reserved2[5]; - ] # } __packed; +class xsave_hdr_struct(ctypes.Structure): # struct xsave_hdr_struct { + _fields_ = [ + ("xstate_bv", ctypes.c_ulonglong + ), # u64 xstate_bv; + ("reserved1", ctypes.c_ulonglong * + 2), # u64 reserved1[2]; + ("reserved2", ctypes.c_ulonglong * 5 + ) # u64 reserved2[5]; + ] # } __packed; -class i387_fxsave_struct(ctypes.Structure): # struct i387_fxsave_struct { - _fields_ = [ - ("cwd", ctypes.c_ushort), # u16 cwd; /* Control Word */ - ("swd", ctypes.c_ushort), # u16 swd; /* Status Word */ - ("twd", ctypes.c_ushort), # u16 twd; /* Tag Word */ - ("fop", ctypes.c_ushort), # u16 fop; /* Last Instruction Opcode */ - # union { - # struct { - ("rip", ctypes.c_ulonglong), # u64 rip; /* Instruction Pointer */ - ("rdp", ctypes.c_ulonglong), # u64 rdp; /* Data Pointer */ - # }; - # struct { - # u32 fip; /* FPU IP Offset */ - # u32 fcs; /* FPU IP Selector */ - # u32 foo; /* FPU Operand Offset */ - # u32 fos; /* FPU Operand Selector */ - # }; - # }; - ("mxcsr", ctypes.c_uint), # u32 mxcsr; /* MXCSR Register State */ - ("mxcsr_mask", ctypes.c_uint), # u32 mxcsr_mask; /* MXCSR Mask */ - # - # /* 8*16 bytes for each FP-reg = 128 bytes */ - ("st_space", ctypes.c_uint*32), # u32 st_space[32]; -# - # /* 16*16 bytes for each XMM-reg = 256 bytes */ - ("xmm_space", ctypes.c_uint*64), # u32 xmm_space[64]; - # - ("padding", ctypes.c_uint*12), # u32 padding[12]; - # - # union { - ("padding1", ctypes.c_uint*12) # u32 padding1[12]; - # u32 sw_reserved[12]; - # }; - # - ] # } __aligned(16); +class i387_fxsave_struct(ctypes.Structure): # struct i387_fxsave_struct { + _fields_ = [ + ( + "cwd", ctypes.c_ushort + ), # u16 cwd; /* Control Word */ + ( + "swd", ctypes.c_ushort + ), # u16 swd; /* Status Word */ + ( + "twd", ctypes.c_ushort + ), # u16 twd; /* Tag Word */ + ( + "fop", ctypes.c_ushort + ), # u16 fop; /* Last Instruction Opcode */ + # union { + # struct { + ( + "rip", ctypes.c_ulonglong + ), # u64 rip; /* Instruction Pointer */ + ( + "rdp", ctypes.c_ulonglong + ), # u64 rdp; /* Data Pointer */ + # }; + # struct { + # u32 fip; /* FPU IP Offset */ + # u32 fcs; /* FPU IP Selector */ + # u32 foo; /* FPU Operand Offset */ + # u32 fos; /* FPU Operand Selector */ + # }; + # }; + ( + "mxcsr", ctypes.c_uint + ), # u32 mxcsr; /* MXCSR Register State */ + ( + "mxcsr_mask", ctypes.c_uint + ), # u32 mxcsr_mask; /* MXCSR Mask */ + # + # /* 8*16 bytes for each FP-reg = 128 bytes */ + ("st_space", ctypes.c_uint * 32 + ), # u32 st_space[32]; + # + # /* 16*16 bytes for each XMM-reg = 256 bytes */ + ("xmm_space", ctypes.c_uint * 64 + ), # u32 xmm_space[64]; + # + ("padding", ctypes.c_uint * 12 + ), # u32 padding[12]; + # + # union { + ("padding1", ctypes.c_uint * 12 + ) # u32 padding1[12]; + # u32 sw_reserved[12]; + # }; + # + ] # } __aligned(16); -class elf_xsave_struct(ctypes.Structure): # struct xsave_struct { - _fields_ = [ - ("i387", i387_fxsave_struct), # struct i387_fxsave_struct i387; - ("xsave_hdr", xsave_hdr_struct), # struct xsave_hdr_struct xsave_hdr; - ("ymmh", ymmh_struct) # struct ymmh_struct ymmh; - ] # } __aligned(FP_MIN_ALIGN_BYTES) __packed; +class elf_xsave_struct(ctypes.Structure): # struct xsave_struct { + _fields_ = [ + ("i387", + i387_fxsave_struct), # struct i387_fxsave_struct i387; + ("xsave_hdr", xsave_hdr_struct + ), # struct xsave_hdr_struct xsave_hdr; + ("ymmh", ymmh_struct) # struct ymmh_struct ymmh; + ] # } __aligned(FP_MIN_ALIGN_BYTES) __packed; diff --git a/lib/py/cli.py b/lib/py/cli.py index abaf0720c..da343022e 100755 --- a/lib/py/cli.py +++ b/lib/py/cli.py @@ -6,337 +6,409 @@ import os import pycriu + def inf(opts): - if opts['in']: - return open(opts['in'], 'rb') - else: - return sys.stdin + if opts['in']: + return open(opts['in'], 'rb') + else: + return sys.stdin + def outf(opts): - if opts['out']: - return open(opts['out'], 'w+') - else: - return sys.stdout + if opts['out']: + return open(opts['out'], 'w+') + else: + return sys.stdout + def dinf(opts, name): - return open(os.path.join(opts['dir'], name)) + return open(os.path.join(opts['dir'], name)) + def decode(opts): - indent = None + indent = None - try: - img = pycriu.images.load(inf(opts), opts['pretty'], opts['nopl']) - except pycriu.images.MagicException as exc: - print("Unknown magic %#x.\n"\ - "Maybe you are feeding me an image with "\ - "raw data(i.e. pages.img)?" % exc.magic, file=sys.stderr) - sys.exit(1) + try: + img = pycriu.images.load(inf(opts), opts['pretty'], opts['nopl']) + except pycriu.images.MagicException as exc: + print("Unknown magic %#x.\n"\ + "Maybe you are feeding me an image with "\ + "raw data(i.e. pages.img)?" % exc.magic, file=sys.stderr) + sys.exit(1) - if opts['pretty']: - indent = 4 + if opts['pretty']: + indent = 4 + + f = outf(opts) + json.dump(img, f, indent=indent) + if f == sys.stdout: + f.write("\n") - f = outf(opts) - json.dump(img, f, indent=indent) - if f == sys.stdout: - f.write("\n") def encode(opts): - img = json.load(inf(opts)) - pycriu.images.dump(img, outf(opts)) + img = json.load(inf(opts)) + pycriu.images.dump(img, outf(opts)) + def info(opts): - infs = pycriu.images.info(inf(opts)) - json.dump(infs, sys.stdout, indent = 4) - print() + infs = pycriu.images.info(inf(opts)) + json.dump(infs, sys.stdout, indent=4) + print() + def get_task_id(p, val): - return p[val] if val in p else p['ns_' + val][0] + return p[val] if val in p else p['ns_' + val][0] + + # # Explorers # -class ps_item: - def __init__(self, p, core): - self.pid = get_task_id(p, 'pid') - self.ppid = p['ppid'] - self.p = p - self.core = core - self.kids = [] -def show_ps(p, opts, depth = 0): - print("%7d%7d%7d %s%s" % (p.pid, get_task_id(p.p, 'pgid'), get_task_id(p.p, 'sid'), - ' ' * (4 * depth), p.core['tc']['comm'])) - for kid in p.kids: - show_ps(kid, opts, depth + 1) +class ps_item: + def __init__(self, p, core): + self.pid = get_task_id(p, 'pid') + self.ppid = p['ppid'] + self.p = p + self.core = core + self.kids = [] + + +def show_ps(p, opts, depth=0): + print("%7d%7d%7d %s%s" % + (p.pid, get_task_id(p.p, 'pgid'), get_task_id(p.p, 'sid'), ' ' * + (4 * depth), p.core['tc']['comm'])) + for kid in p.kids: + show_ps(kid, opts, depth + 1) + def explore_ps(opts): - pss = { } - ps_img = pycriu.images.load(dinf(opts, 'pstree.img')) - for p in ps_img['entries']: - core = pycriu.images.load(dinf(opts, 'core-%d.img' % get_task_id(p, 'pid'))) - ps = ps_item(p, core['entries'][0]) - pss[ps.pid] = ps + pss = {} + ps_img = pycriu.images.load(dinf(opts, 'pstree.img')) + for p in ps_img['entries']: + core = pycriu.images.load( + dinf(opts, 'core-%d.img' % get_task_id(p, 'pid'))) + ps = ps_item(p, core['entries'][0]) + pss[ps.pid] = ps - # Build tree - psr = None - for pid in pss: - p = pss[pid] - if p.ppid == 0: - psr = p - continue + # Build tree + psr = None + for pid in pss: + p = pss[pid] + if p.ppid == 0: + psr = p + continue - pp = pss[p.ppid] - pp.kids.append(p) + pp = pss[p.ppid] + pp.kids.append(p) + + print("%7s%7s%7s %s" % ('PID', 'PGID', 'SID', 'COMM')) + show_ps(psr, opts) - print("%7s%7s%7s %s" % ('PID', 'PGID', 'SID', 'COMM')) - show_ps(psr, opts) files_img = None + def ftype_find_in_files(opts, ft, fid): - global files_img + global files_img - if files_img is None: - try: - files_img = pycriu.images.load(dinf(opts, "files.img"))['entries'] - except: - files_img = [] + if files_img is None: + try: + files_img = pycriu.images.load(dinf(opts, "files.img"))['entries'] + except: + files_img = [] - if len(files_img) == 0: - return None + if len(files_img) == 0: + return None - for f in files_img: - if f['id'] == fid: - return f + for f in files_img: + if f['id'] == fid: + return f - return None + return None def ftype_find_in_image(opts, ft, fid, img): - f = ftype_find_in_files(opts, ft, fid) - if f: - return f[ft['field']] + f = ftype_find_in_files(opts, ft, fid) + if f: + return f[ft['field']] + + if ft['img'] == None: + ft['img'] = pycriu.images.load(dinf(opts, img))['entries'] + for f in ft['img']: + if f['id'] == fid: + return f + return None - if ft['img'] == None: - ft['img'] = pycriu.images.load(dinf(opts, img))['entries'] - for f in ft['img']: - if f['id'] == fid: - return f - return None def ftype_reg(opts, ft, fid): - rf = ftype_find_in_image(opts, ft, fid, 'reg-files.img') - return rf and rf['name'] or 'unknown path' + rf = ftype_find_in_image(opts, ft, fid, 'reg-files.img') + return rf and rf['name'] or 'unknown path' + def ftype_pipe(opts, ft, fid): - p = ftype_find_in_image(opts, ft, fid, 'pipes.img') - return p and 'pipe[%d]' % p['pipe_id'] or 'pipe[?]' + p = ftype_find_in_image(opts, ft, fid, 'pipes.img') + return p and 'pipe[%d]' % p['pipe_id'] or 'pipe[?]' + def ftype_unix(opts, ft, fid): - ux = ftype_find_in_image(opts, ft, fid, 'unixsk.img') - if not ux: - return 'unix[?]' + ux = ftype_find_in_image(opts, ft, fid, 'unixsk.img') + if not ux: + return 'unix[?]' + + n = ux['name'] and ' %s' % ux['name'] or '' + return 'unix[%d (%d)%s]' % (ux['ino'], ux['peer'], n) - n = ux['name'] and ' %s' % ux['name'] or '' - return 'unix[%d (%d)%s]' % (ux['ino'], ux['peer'], n) file_types = { - 'REG': {'get': ftype_reg, 'img': None, 'field': 'reg'}, - 'PIPE': {'get': ftype_pipe, 'img': None, 'field': 'pipe'}, - 'UNIXSK': {'get': ftype_unix, 'img': None, 'field': 'usk'}, + 'REG': { + 'get': ftype_reg, + 'img': None, + 'field': 'reg' + }, + 'PIPE': { + 'get': ftype_pipe, + 'img': None, + 'field': 'pipe' + }, + 'UNIXSK': { + 'get': ftype_unix, + 'img': None, + 'field': 'usk' + }, } -def ftype_gen(opts, ft, fid): - return '%s.%d' % (ft['typ'], fid) -files_cache = { } +def ftype_gen(opts, ft, fid): + return '%s.%d' % (ft['typ'], fid) + + +files_cache = {} + def get_file_str(opts, fd): - key = (fd['type'], fd['id']) - f = files_cache.get(key, None) - if not f: - ft = file_types.get(fd['type'], {'get': ftype_gen, 'typ': fd['type']}) - f = ft['get'](opts, ft, fd['id']) - files_cache[key] = f + key = (fd['type'], fd['id']) + f = files_cache.get(key, None) + if not f: + ft = file_types.get(fd['type'], {'get': ftype_gen, 'typ': fd['type']}) + f = ft['get'](opts, ft, fd['id']) + files_cache[key] = f + + return f - return f def explore_fds(opts): - ps_img = pycriu.images.load(dinf(opts, 'pstree.img')) - for p in ps_img['entries']: - pid = get_task_id(p, 'pid') - idi = pycriu.images.load(dinf(opts, 'ids-%s.img' % pid)) - fdt = idi['entries'][0]['files_id'] - fdi = pycriu.images.load(dinf(opts, 'fdinfo-%d.img' % fdt)) + ps_img = pycriu.images.load(dinf(opts, 'pstree.img')) + for p in ps_img['entries']: + pid = get_task_id(p, 'pid') + idi = pycriu.images.load(dinf(opts, 'ids-%s.img' % pid)) + fdt = idi['entries'][0]['files_id'] + fdi = pycriu.images.load(dinf(opts, 'fdinfo-%d.img' % fdt)) - print("%d" % pid) - for fd in fdi['entries']: - print("\t%7d: %s" % (fd['fd'], get_file_str(opts, fd))) + print("%d" % pid) + for fd in fdi['entries']: + print("\t%7d: %s" % (fd['fd'], get_file_str(opts, fd))) - fdi = pycriu.images.load(dinf(opts, 'fs-%d.img' % pid))['entries'][0] - print("\t%7s: %s" % ('cwd', get_file_str(opts, {'type': 'REG', 'id': fdi['cwd_id']}))) - print("\t%7s: %s" % ('root', get_file_str(opts, {'type': 'REG', 'id': fdi['root_id']}))) + fdi = pycriu.images.load(dinf(opts, 'fs-%d.img' % pid))['entries'][0] + print("\t%7s: %s" % + ('cwd', get_file_str(opts, { + 'type': 'REG', + 'id': fdi['cwd_id'] + }))) + print("\t%7s: %s" % + ('root', get_file_str(opts, { + 'type': 'REG', + 'id': fdi['root_id'] + }))) class vma_id: - def __init__(self): - self.__ids = {} - self.__last = 1 + def __init__(self): + self.__ids = {} + self.__last = 1 - def get(self, iid): - ret = self.__ids.get(iid, None) - if not ret: - ret = self.__last - self.__last += 1 - self.__ids[iid] = ret + def get(self, iid): + ret = self.__ids.get(iid, None) + if not ret: + ret = self.__last + self.__last += 1 + self.__ids[iid] = ret + + return ret - return ret def explore_mems(opts): - ps_img = pycriu.images.load(dinf(opts, 'pstree.img')) - vids = vma_id() - for p in ps_img['entries']: - pid = get_task_id(p, 'pid') - mmi = pycriu.images.load(dinf(opts, 'mm-%d.img' % pid))['entries'][0] + ps_img = pycriu.images.load(dinf(opts, 'pstree.img')) + vids = vma_id() + for p in ps_img['entries']: + pid = get_task_id(p, 'pid') + mmi = pycriu.images.load(dinf(opts, 'mm-%d.img' % pid))['entries'][0] - print("%d" % pid) - print("\t%-36s %s" % ('exe', get_file_str(opts, {'type': 'REG', 'id': mmi['exe_file_id']}))) + print("%d" % pid) + print("\t%-36s %s" % ('exe', + get_file_str(opts, { + 'type': 'REG', + 'id': mmi['exe_file_id'] + }))) - for vma in mmi['vmas']: - st = vma['status'] - if st & (1 << 10): - fn = ' ' + 'ips[%lx]' % vids.get(vma['shmid']) - elif st & (1 << 8): - fn = ' ' + 'shmem[%lx]' % vids.get(vma['shmid']) - elif st & (1 << 11): - fn = ' ' + 'packet[%lx]' % vids.get(vma['shmid']) - elif st & ((1 << 6) | (1 << 7)): - fn = ' ' + get_file_str(opts, {'type': 'REG', 'id': vma['shmid']}) - if vma['pgoff']: - fn += ' + %#lx' % vma['pgoff'] - if st & (1 << 7): - fn += ' (s)' - elif st & (1 << 1): - fn = ' [stack]' - elif st & (1 << 2): - fn = ' [vsyscall]' - elif st & (1 << 3): - fn = ' [vdso]' - elif vma['flags'] & 0x0100: # growsdown - fn = ' [stack?]' - else: - fn = '' + for vma in mmi['vmas']: + st = vma['status'] + if st & (1 << 10): + fn = ' ' + 'ips[%lx]' % vids.get(vma['shmid']) + elif st & (1 << 8): + fn = ' ' + 'shmem[%lx]' % vids.get(vma['shmid']) + elif st & (1 << 11): + fn = ' ' + 'packet[%lx]' % vids.get(vma['shmid']) + elif st & ((1 << 6) | (1 << 7)): + fn = ' ' + get_file_str(opts, { + 'type': 'REG', + 'id': vma['shmid'] + }) + if vma['pgoff']: + fn += ' + %#lx' % vma['pgoff'] + if st & (1 << 7): + fn += ' (s)' + elif st & (1 << 1): + fn = ' [stack]' + elif st & (1 << 2): + fn = ' [vsyscall]' + elif st & (1 << 3): + fn = ' [vdso]' + elif vma['flags'] & 0x0100: # growsdown + fn = ' [stack?]' + else: + fn = '' - if not st & (1 << 0): - fn += ' *' + if not st & (1 << 0): + fn += ' *' - prot = vma['prot'] & 0x1 and 'r' or '-' - prot += vma['prot'] & 0x2 and 'w' or '-' - prot += vma['prot'] & 0x4 and 'x' or '-' + prot = vma['prot'] & 0x1 and 'r' or '-' + prot += vma['prot'] & 0x2 and 'w' or '-' + prot += vma['prot'] & 0x4 and 'x' or '-' - astr = '%08lx-%08lx' % (vma['start'], vma['end']) - print("\t%-36s%s%s" % (astr, prot, fn)) + astr = '%08lx-%08lx' % (vma['start'], vma['end']) + print("\t%-36s%s%s" % (astr, prot, fn)) def explore_rss(opts): - ps_img = pycriu.images.load(dinf(opts, 'pstree.img')) - for p in ps_img['entries']: - pid = get_task_id(p, 'pid') - vmas = pycriu.images.load(dinf(opts, 'mm-%d.img' % pid))['entries'][0]['vmas'] - pms = pycriu.images.load(dinf(opts, 'pagemap-%d.img' % pid))['entries'] + ps_img = pycriu.images.load(dinf(opts, 'pstree.img')) + for p in ps_img['entries']: + pid = get_task_id(p, 'pid') + vmas = pycriu.images.load(dinf(opts, 'mm-%d.img' % + pid))['entries'][0]['vmas'] + pms = pycriu.images.load(dinf(opts, 'pagemap-%d.img' % pid))['entries'] - print("%d" % pid) - vmi = 0 - pvmi = -1 - for pm in pms[1:]: - pstr = '\t%lx / %-8d' % (pm['vaddr'], pm['nr_pages']) - while vmas[vmi]['end'] <= pm['vaddr']: - vmi += 1 + print("%d" % pid) + vmi = 0 + pvmi = -1 + for pm in pms[1:]: + pstr = '\t%lx / %-8d' % (pm['vaddr'], pm['nr_pages']) + while vmas[vmi]['end'] <= pm['vaddr']: + vmi += 1 - pme = pm['vaddr'] + (pm['nr_pages'] << 12) - vstr = '' - while vmas[vmi]['start'] < pme: - vma = vmas[vmi] - if vmi == pvmi: - vstr += ' ~' - else: - vstr += ' %08lx / %-8d' % (vma['start'], (vma['end'] - vma['start'])>>12) - if vma['status'] & ((1 << 6) | (1 << 7)): - vstr += ' ' + get_file_str(opts, {'type': 'REG', 'id': vma['shmid']}) - pvmi = vmi - vstr += '\n\t%23s' % '' - vmi += 1 + pme = pm['vaddr'] + (pm['nr_pages'] << 12) + vstr = '' + while vmas[vmi]['start'] < pme: + vma = vmas[vmi] + if vmi == pvmi: + vstr += ' ~' + else: + vstr += ' %08lx / %-8d' % ( + vma['start'], (vma['end'] - vma['start']) >> 12) + if vma['status'] & ((1 << 6) | (1 << 7)): + vstr += ' ' + get_file_str(opts, { + 'type': 'REG', + 'id': vma['shmid'] + }) + pvmi = vmi + vstr += '\n\t%23s' % '' + vmi += 1 - vmi -= 1 + vmi -= 1 - print('%-24s%s' % (pstr, vstr)) + print('%-24s%s' % (pstr, vstr)) +explorers = { + 'ps': explore_ps, + 'fds': explore_fds, + 'mems': explore_mems, + 'rss': explore_rss +} -explorers = { 'ps': explore_ps, 'fds': explore_fds, 'mems': explore_mems, 'rss': explore_rss } def explore(opts): - explorers[opts['what']](opts) + explorers[opts['what']](opts) + def main(): - desc = 'CRiu Image Tool' - parser = argparse.ArgumentParser(description=desc, - formatter_class=argparse.RawTextHelpFormatter) + desc = 'CRiu Image Tool' + parser = argparse.ArgumentParser( + description=desc, formatter_class=argparse.RawTextHelpFormatter) - subparsers = parser.add_subparsers(help='Use crit CMD --help for command-specific help') + subparsers = parser.add_subparsers( + help='Use crit CMD --help for command-specific help') - # Decode - decode_parser = subparsers.add_parser('decode', - help = 'convert criu image from binary type to json') - decode_parser.add_argument('--pretty', - help = 'Multiline with indents and some numerical fields in field-specific format', - action = 'store_true') - decode_parser.add_argument('-i', - '--in', - help = 'criu image in binary format to be decoded (stdin by default)') - decode_parser.add_argument('-o', - '--out', - help = 'where to put criu image in json format (stdout by default)') - decode_parser.set_defaults(func=decode, nopl=False) + # Decode + decode_parser = subparsers.add_parser( + 'decode', help='convert criu image from binary type to json') + decode_parser.add_argument( + '--pretty', + help= + 'Multiline with indents and some numerical fields in field-specific format', + action='store_true') + decode_parser.add_argument( + '-i', + '--in', + help='criu image in binary format to be decoded (stdin by default)') + decode_parser.add_argument( + '-o', + '--out', + help='where to put criu image in json format (stdout by default)') + decode_parser.set_defaults(func=decode, nopl=False) - # Encode - encode_parser = subparsers.add_parser('encode', - help = 'convert criu image from json type to binary') - encode_parser.add_argument('-i', - '--in', - help = 'criu image in json format to be encoded (stdin by default)') - encode_parser.add_argument('-o', - '--out', - help = 'where to put criu image in binary format (stdout by default)') - encode_parser.set_defaults(func=encode) + # Encode + encode_parser = subparsers.add_parser( + 'encode', help='convert criu image from json type to binary') + encode_parser.add_argument( + '-i', + '--in', + help='criu image in json format to be encoded (stdin by default)') + encode_parser.add_argument( + '-o', + '--out', + help='where to put criu image in binary format (stdout by default)') + encode_parser.set_defaults(func=encode) - # Info - info_parser = subparsers.add_parser('info', - help = 'show info about image') - info_parser.add_argument("in") - info_parser.set_defaults(func=info) + # Info + info_parser = subparsers.add_parser('info', help='show info about image') + info_parser.add_argument("in") + info_parser.set_defaults(func=info) - # Explore - x_parser = subparsers.add_parser('x', help = 'explore image dir') - x_parser.add_argument('dir') - x_parser.add_argument('what', choices = [ 'ps', 'fds', 'mems', 'rss']) - x_parser.set_defaults(func=explore) + # Explore + x_parser = subparsers.add_parser('x', help='explore image dir') + x_parser.add_argument('dir') + x_parser.add_argument('what', choices=['ps', 'fds', 'mems', 'rss']) + x_parser.set_defaults(func=explore) - # Show - show_parser = subparsers.add_parser('show', - help = "convert criu image from binary to human-readable json") - show_parser.add_argument("in") - show_parser.add_argument('--nopl', help = 'do not show entry payload (if exists)', action = 'store_true') - show_parser.set_defaults(func=decode, pretty=True, out=None) + # Show + show_parser = subparsers.add_parser( + 'show', help="convert criu image from binary to human-readable json") + show_parser.add_argument("in") + show_parser.add_argument('--nopl', + help='do not show entry payload (if exists)', + action='store_true') + show_parser.set_defaults(func=decode, pretty=True, out=None) - opts = vars(parser.parse_args()) + opts = vars(parser.parse_args()) - if not opts: - sys.stderr.write(parser.format_usage()) - sys.stderr.write("crit: error: too few arguments\n") - sys.exit(1) + if not opts: + sys.stderr.write(parser.format_usage()) + sys.stderr.write("crit: error: too few arguments\n") + sys.exit(1) + + opts["func"](opts) - opts["func"](opts) if __name__ == '__main__': - main() + main() diff --git a/lib/py/criu.py b/lib/py/criu.py index de1a214a3..d94fea9e1 100644 --- a/lib/py/criu.py +++ b/lib/py/criu.py @@ -8,325 +8,336 @@ import struct import pycriu.rpc_pb2 as rpc + class _criu_comm: - """ + """ Base class for communication classes. """ - COMM_SK = 0 - COMM_FD = 1 - COMM_BIN = 2 - comm_type = None - comm = None - sk = None + COMM_SK = 0 + COMM_FD = 1 + COMM_BIN = 2 + comm_type = None + comm = None + sk = None - def connect(self, daemon): - """ + def connect(self, daemon): + """ Connect to criu and return socket object. daemon -- is for whether or not criu should daemonize if executing criu from binary(comm_bin). """ - pass + pass - def disconnect(self): - """ + def disconnect(self): + """ Disconnect from criu. """ - pass + pass class _criu_comm_sk(_criu_comm): - """ + """ Communication class for unix socket. """ - def __init__(self, sk_path): - self.comm_type = self.COMM_SK - self.comm = sk_path - def connect(self, daemon): - self.sk = socket.socket(socket.AF_UNIX, socket.SOCK_SEQPACKET) - self.sk.connect(self.comm) + def __init__(self, sk_path): + self.comm_type = self.COMM_SK + self.comm = sk_path - return self.sk + def connect(self, daemon): + self.sk = socket.socket(socket.AF_UNIX, socket.SOCK_SEQPACKET) + self.sk.connect(self.comm) - def disconnect(self): - self.sk.close() + return self.sk + + def disconnect(self): + self.sk.close() class _criu_comm_fd(_criu_comm): - """ + """ Communication class for file descriptor. """ - def __init__(self, fd): - self.comm_type = self.COMM_FD - self.comm = fd - def connect(self, daemon): - self.sk = socket.fromfd(self.comm, socket.AF_UNIX, socket.SOCK_SEQPACKET) + def __init__(self, fd): + self.comm_type = self.COMM_FD + self.comm = fd - return self.sk + def connect(self, daemon): + self.sk = socket.fromfd(self.comm, socket.AF_UNIX, + socket.SOCK_SEQPACKET) + + return self.sk + + def disconnect(self): + self.sk.close() - def disconnect(self): - self.sk.close() class _criu_comm_bin(_criu_comm): - """ + """ Communication class for binary. """ - def __init__(self, bin_path): - self.comm_type = self.COMM_BIN - self.comm = bin_path - self.swrk = None - self.daemon = None - def connect(self, daemon): - # Kind of the same thing we do in libcriu - css = socket.socketpair(socket.AF_UNIX, socket.SOCK_SEQPACKET) - flags = fcntl.fcntl(css[1], fcntl.F_GETFD) - fcntl.fcntl(css[1], fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC) - flags = fcntl.fcntl(css[0], fcntl.F_GETFD) - fcntl.fcntl(css[0], fcntl.F_SETFD, flags & ~fcntl.FD_CLOEXEC) + def __init__(self, bin_path): + self.comm_type = self.COMM_BIN + self.comm = bin_path + self.swrk = None + self.daemon = None - self.daemon = daemon + def connect(self, daemon): + # Kind of the same thing we do in libcriu + css = socket.socketpair(socket.AF_UNIX, socket.SOCK_SEQPACKET) + flags = fcntl.fcntl(css[1], fcntl.F_GETFD) + fcntl.fcntl(css[1], fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC) + flags = fcntl.fcntl(css[0], fcntl.F_GETFD) + fcntl.fcntl(css[0], fcntl.F_SETFD, flags & ~fcntl.FD_CLOEXEC) - p = os.fork() + self.daemon = daemon - if p == 0: - def exec_criu(): - os.close(0) - os.close(1) - os.close(2) + p = os.fork() - css[0].send(struct.pack('i', os.getpid())) - os.execv(self.comm, [self.comm, 'swrk', "%d" % css[0].fileno()]) - os._exit(1) + if p == 0: - if daemon: - # Python has no daemon(3) alternative, - # so we need to mimic it ourself. - p = os.fork() + def exec_criu(): + os.close(0) + os.close(1) + os.close(2) - if p == 0: - os.setsid() + css[0].send(struct.pack('i', os.getpid())) + os.execv(self.comm, + [self.comm, 'swrk', + "%d" % css[0].fileno()]) + os._exit(1) - exec_criu() - else: - os._exit(0) - else: - exec_criu() - else: - if daemon: - os.waitpid(p, 0) + if daemon: + # Python has no daemon(3) alternative, + # so we need to mimic it ourself. + p = os.fork() - css[0].close() - self.swrk = struct.unpack('i', css[1].recv(4))[0] - self.sk = css[1] + if p == 0: + os.setsid() - return self.sk + exec_criu() + else: + os._exit(0) + else: + exec_criu() + else: + if daemon: + os.waitpid(p, 0) - def disconnect(self): - self.sk.close() - if not self.daemon: - os.waitpid(self.swrk, 0) + css[0].close() + self.swrk = struct.unpack('i', css[1].recv(4))[0] + self.sk = css[1] + + return self.sk + + def disconnect(self): + self.sk.close() + if not self.daemon: + os.waitpid(self.swrk, 0) class CRIUException(Exception): - """ + """ Exception class for handling and storing criu errors. """ - typ = None - _str = None + typ = None + _str = None - def __str__(self): - return self._str + def __str__(self): + return self._str class CRIUExceptionInternal(CRIUException): - """ + """ Exception class for handling and storing internal errors. """ - def __init__(self, typ, s): - self.typ = typ - self._str = "%s failed with internal error: %s" % (rpc.criu_req_type.Name(self.typ), s) + + def __init__(self, typ, s): + self.typ = typ + self._str = "%s failed with internal error: %s" % ( + rpc.criu_req_type.Name(self.typ), s) class CRIUExceptionExternal(CRIUException): - """ + """ Exception class for handling and storing criu RPC errors. """ - def __init__(self, req_typ, resp_typ, errno): - self.typ = req_typ - self.resp_typ = resp_typ - self.errno = errno - self._str = self._gen_error_str() + def __init__(self, req_typ, resp_typ, errno): + self.typ = req_typ + self.resp_typ = resp_typ + self.errno = errno + self._str = self._gen_error_str() - def _gen_error_str(self): - s = "%s failed: " % (rpc.criu_req_type.Name(self.typ), ) + def _gen_error_str(self): + s = "%s failed: " % (rpc.criu_req_type.Name(self.typ), ) - if self.typ != self.resp_typ: - s += "Unexpected response type %d: " % (self.resp_typ, ) + if self.typ != self.resp_typ: + s += "Unexpected response type %d: " % (self.resp_typ, ) - s += "Error(%d): " % (self.errno, ) + s += "Error(%d): " % (self.errno, ) - if self.errno == errno.EBADRQC: - s += "Bad options" + if self.errno == errno.EBADRQC: + s += "Bad options" - if self.typ == rpc.DUMP: - if self.errno == errno.ESRCH: - s += "No process with such pid" + if self.typ == rpc.DUMP: + if self.errno == errno.ESRCH: + s += "No process with such pid" - if self.typ == rpc.RESTORE: - if self.errno == errno.EEXIST: - s += "Process with requested pid already exists" + if self.typ == rpc.RESTORE: + if self.errno == errno.EEXIST: + s += "Process with requested pid already exists" - s += "Unknown" + s += "Unknown" - return s + return s class criu: - """ + """ Call criu through RPC. """ - opts = None #CRIU options in pb format + opts = None #CRIU options in pb format - _comm = None #Communication method + _comm = None #Communication method - def __init__(self): - self.use_binary('criu') - self.opts = rpc.criu_opts() - self.sk = None + def __init__(self): + self.use_binary('criu') + self.opts = rpc.criu_opts() + self.sk = None - def use_sk(self, sk_name): - """ + def use_sk(self, sk_name): + """ Access criu using unix socket which that belongs to criu service daemon. """ - self._comm = _criu_comm_sk(sk_name) + self._comm = _criu_comm_sk(sk_name) - def use_fd(self, fd): - """ + def use_fd(self, fd): + """ Access criu using provided fd. """ - self._comm = _criu_comm_fd(fd) + self._comm = _criu_comm_fd(fd) - def use_binary(self, bin_name): - """ + def use_binary(self, bin_name): + """ Access criu by execing it using provided path to criu binary. """ - self._comm = _criu_comm_bin(bin_name) + self._comm = _criu_comm_bin(bin_name) - def _send_req_and_recv_resp(self, req): - """ + def _send_req_and_recv_resp(self, req): + """ As simple as send request and receive response. """ - # In case of self-dump we need to spawn criu swrk detached - # from our current process, as criu has a hard time separating - # process resources from its own if criu is located in a same - # process tree it is trying to dump. - daemon = False - if req.type == rpc.DUMP and not req.opts.HasField('pid'): - daemon = True + # In case of self-dump we need to spawn criu swrk detached + # from our current process, as criu has a hard time separating + # process resources from its own if criu is located in a same + # process tree it is trying to dump. + daemon = False + if req.type == rpc.DUMP and not req.opts.HasField('pid'): + daemon = True - try: - if not self.sk: - s = self._comm.connect(daemon) - else: - s = self.sk + try: + if not self.sk: + s = self._comm.connect(daemon) + else: + s = self.sk - if req.keep_open: - self.sk = s + if req.keep_open: + self.sk = s - s.send(req.SerializeToString()) + s.send(req.SerializeToString()) - buf = s.recv(len(s.recv(1, socket.MSG_TRUNC | socket.MSG_PEEK))) + buf = s.recv(len(s.recv(1, socket.MSG_TRUNC | socket.MSG_PEEK))) - if not req.keep_open: - self._comm.disconnect() + if not req.keep_open: + self._comm.disconnect() - resp = rpc.criu_resp() - resp.ParseFromString(buf) - except Exception as e: - raise CRIUExceptionInternal(req.type, str(e)) + resp = rpc.criu_resp() + resp.ParseFromString(buf) + except Exception as e: + raise CRIUExceptionInternal(req.type, str(e)) - return resp + return resp - def check(self): - """ + def check(self): + """ Checks whether the kernel support is up-to-date. """ - req = rpc.criu_req() - req.type = rpc.CHECK + req = rpc.criu_req() + req.type = rpc.CHECK - resp = self._send_req_and_recv_resp(req) + resp = self._send_req_and_recv_resp(req) - if not resp.success: - raise CRIUExceptionExternal(req.type, resp.type, resp.cr_errno) + if not resp.success: + raise CRIUExceptionExternal(req.type, resp.type, resp.cr_errno) - def dump(self): - """ + def dump(self): + """ Checkpoint a process/tree identified by opts.pid. """ - req = rpc.criu_req() - req.type = rpc.DUMP - req.opts.MergeFrom(self.opts) + req = rpc.criu_req() + req.type = rpc.DUMP + req.opts.MergeFrom(self.opts) - resp = self._send_req_and_recv_resp(req) + resp = self._send_req_and_recv_resp(req) - if not resp.success: - raise CRIUExceptionExternal(req.type, resp.type, resp.cr_errno) + if not resp.success: + raise CRIUExceptionExternal(req.type, resp.type, resp.cr_errno) - return resp.dump + return resp.dump - def pre_dump(self): - """ + def pre_dump(self): + """ Checkpoint a process/tree identified by opts.pid. """ - req = rpc.criu_req() - req.type = rpc.PRE_DUMP - req.opts.MergeFrom(self.opts) + req = rpc.criu_req() + req.type = rpc.PRE_DUMP + req.opts.MergeFrom(self.opts) - resp = self._send_req_and_recv_resp(req) + resp = self._send_req_and_recv_resp(req) - if not resp.success: - raise CRIUExceptionExternal(req.type, resp.type, resp.cr_errno) + if not resp.success: + raise CRIUExceptionExternal(req.type, resp.type, resp.cr_errno) - return resp.dump + return resp.dump - def restore(self): - """ + def restore(self): + """ Restore a process/tree. """ - req = rpc.criu_req() - req.type = rpc.RESTORE - req.opts.MergeFrom(self.opts) + req = rpc.criu_req() + req.type = rpc.RESTORE + req.opts.MergeFrom(self.opts) - resp = self._send_req_and_recv_resp(req) + resp = self._send_req_and_recv_resp(req) - if not resp.success: - raise CRIUExceptionExternal(req.type, resp.type, resp.cr_errno) + if not resp.success: + raise CRIUExceptionExternal(req.type, resp.type, resp.cr_errno) - return resp.restore + return resp.restore - def page_server_chld(self): - req = rpc.criu_req() - req.type = rpc.PAGE_SERVER_CHLD - req.opts.MergeFrom(self.opts) - req.keep_open = True + def page_server_chld(self): + req = rpc.criu_req() + req.type = rpc.PAGE_SERVER_CHLD + req.opts.MergeFrom(self.opts) + req.keep_open = True - resp = self._send_req_and_recv_resp(req) + resp = self._send_req_and_recv_resp(req) - if not resp.success: - raise CRIUExceptionExternal(req.type, resp.type, resp.cr_errno) + if not resp.success: + raise CRIUExceptionExternal(req.type, resp.type, resp.cr_errno) - return resp.ps + return resp.ps - def wait_pid(self, pid): - req = rpc.criu_req() - req.type = rpc.WAIT_PID - req.pid = pid + def wait_pid(self, pid): + req = rpc.criu_req() + req.type = rpc.WAIT_PID + req.pid = pid - resp = self._send_req_and_recv_resp(req) + resp = self._send_req_and_recv_resp(req) - if not resp.success: - raise CRIUExceptionExternal(req.type, resp.type, resp.cr_errno) + if not resp.success: + raise CRIUExceptionExternal(req.type, resp.type, resp.cr_errno) - return resp.status + return resp.status diff --git a/lib/py/images/images.py b/lib/py/images/images.py index 7a9b9da6e..28c6d9e1f 100644 --- a/lib/py/images/images.py +++ b/lib/py/images/images.py @@ -48,8 +48,8 @@ from . import pb from . import pb2dict if "encodebytes" not in dir(base64): - base64.encodebytes = base64.encodestring - base64.decodebytes = base64.decodestring + base64.encodebytes = base64.encodestring + base64.decodebytes = base64.decodestring # # Predefined hardcoded constants @@ -57,233 +57,241 @@ sizeof_u16 = 2 sizeof_u32 = 4 sizeof_u64 = 8 + # A helper for rounding -def round_up(x,y): - return (((x - 1) | (y - 1)) + 1) +def round_up(x, y): + return (((x - 1) | (y - 1)) + 1) + class MagicException(Exception): - def __init__(self, magic): - self.magic = magic + def __init__(self, magic): + self.magic = magic + # Generic class to handle loading/dumping criu images entries from/to bin # format to/from dict(json). class entry_handler: - """ + """ Generic class to handle loading/dumping criu images entries from/to bin format to/from dict(json). """ - def __init__(self, payload, extra_handler=None): - """ + + def __init__(self, payload, extra_handler=None): + """ Sets payload class and extra handler class. """ - self.payload = payload - self.extra_handler = extra_handler + self.payload = payload + self.extra_handler = extra_handler - def load(self, f, pretty = False, no_payload = False): - """ + def load(self, f, pretty=False, no_payload=False): + """ Convert criu image entries from binary format to dict(json). Takes a file-like object and returnes a list with entries in dict(json) format. """ - entries = [] + entries = [] - while True: - entry = {} + while True: + entry = {} - # Read payload - pbuff = self.payload() - buf = f.read(4) - if buf == b'': - break - size, = struct.unpack('i', buf) - pbuff.ParseFromString(f.read(size)) - entry = pb2dict.pb2dict(pbuff, pretty) + # Read payload + pbuff = self.payload() + buf = f.read(4) + if buf == b'': + break + size, = struct.unpack('i', buf) + pbuff.ParseFromString(f.read(size)) + entry = pb2dict.pb2dict(pbuff, pretty) - # Read extra - if self.extra_handler: - if no_payload: - def human_readable(num): - for unit in ['','K','M','G','T','P','E','Z']: - if num < 1024.0: - if int(num) == num: - return "%d%sB" % (num, unit) - else: - return "%.1f%sB" % (num, unit) - num /= 1024.0 - return "%.1fYB" % num + # Read extra + if self.extra_handler: + if no_payload: - pl_size = self.extra_handler.skip(f, pbuff) - entry['extra'] = '... <%s>' % human_readable(pl_size) - else: - entry['extra'] = self.extra_handler.load(f, pbuff) + def human_readable(num): + for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']: + if num < 1024.0: + if int(num) == num: + return "%d%sB" % (num, unit) + else: + return "%.1f%sB" % (num, unit) + num /= 1024.0 + return "%.1fYB" % num - entries.append(entry) + pl_size = self.extra_handler.skip(f, pbuff) + entry['extra'] = '... <%s>' % human_readable(pl_size) + else: + entry['extra'] = self.extra_handler.load(f, pbuff) - return entries + entries.append(entry) - def loads(self, s, pretty = False): - """ + return entries + + def loads(self, s, pretty=False): + """ Same as load(), but takes a string as an argument. """ - f = io.BytesIO(s) - return self.load(f, pretty) + f = io.BytesIO(s) + return self.load(f, pretty) - def dump(self, entries, f): - """ + def dump(self, entries, f): + """ Convert criu image entries from dict(json) format to binary. Takes a list of entries and a file-like object to write entries in binary format to. """ - for entry in entries: - extra = entry.pop('extra', None) + for entry in entries: + extra = entry.pop('extra', None) - # Write payload - pbuff = self.payload() - pb2dict.dict2pb(entry, pbuff) - pb_str = pbuff.SerializeToString() - size = len(pb_str) - f.write(struct.pack('i', size)) - f.write(pb_str) + # Write payload + pbuff = self.payload() + pb2dict.dict2pb(entry, pbuff) + pb_str = pbuff.SerializeToString() + size = len(pb_str) + f.write(struct.pack('i', size)) + f.write(pb_str) - # Write extra - if self.extra_handler and extra: - self.extra_handler.dump(extra, f, pbuff) + # Write extra + if self.extra_handler and extra: + self.extra_handler.dump(extra, f, pbuff) - def dumps(self, entries): - """ + def dumps(self, entries): + """ Same as dump(), but doesn't take file-like object and just returns a string. """ - f = io.BytesIO('') - self.dump(entries, f) - return f.read() + f = io.BytesIO('') + self.dump(entries, f) + return f.read() - def count(self, f): - """ + def count(self, f): + """ Counts the number of top-level object in the image file """ - entries = 0 + entries = 0 - while True: - buf = f.read(4) - if buf == '': - break - size, = struct.unpack('i', buf) - f.seek(size, 1) - entries += 1 + while True: + buf = f.read(4) + if buf == '': + break + size, = struct.unpack('i', buf) + f.seek(size, 1) + entries += 1 + + return entries - return entries # Special handler for pagemap.img class pagemap_handler: - """ + """ Special entry handler for pagemap.img, which is unique in a way that it has a header of pagemap_head type followed by entries of pagemap_entry type. """ - def load(self, f, pretty = False, no_payload = False): - entries = [] - pbuff = pb.pagemap_head() - while True: - buf = f.read(4) - if buf == b'': - break - size, = struct.unpack('i', buf) - pbuff.ParseFromString(f.read(size)) - entries.append(pb2dict.pb2dict(pbuff, pretty)) + def load(self, f, pretty=False, no_payload=False): + entries = [] - pbuff = pb.pagemap_entry() + pbuff = pb.pagemap_head() + while True: + buf = f.read(4) + if buf == b'': + break + size, = struct.unpack('i', buf) + pbuff.ParseFromString(f.read(size)) + entries.append(pb2dict.pb2dict(pbuff, pretty)) - return entries + pbuff = pb.pagemap_entry() - def loads(self, s, pretty = False): - f = io.BytesIO(s) - return self.load(f, pretty) + return entries - def dump(self, entries, f): - pbuff = pb.pagemap_head() - for item in entries: - pb2dict.dict2pb(item, pbuff) - pb_str = pbuff.SerializeToString() - size = len(pb_str) - f.write(struct.pack('i', size)) - f.write(pb_str) + def loads(self, s, pretty=False): + f = io.BytesIO(s) + return self.load(f, pretty) - pbuff = pb.pagemap_entry() + def dump(self, entries, f): + pbuff = pb.pagemap_head() + for item in entries: + pb2dict.dict2pb(item, pbuff) + pb_str = pbuff.SerializeToString() + size = len(pb_str) + f.write(struct.pack('i', size)) + f.write(pb_str) - def dumps(self, entries): - f = io.BytesIO('') - self.dump(entries, f) - return f.read() + pbuff = pb.pagemap_entry() + + def dumps(self, entries): + f = io.BytesIO('') + self.dump(entries, f) + return f.read() + + def count(self, f): + return entry_handler(None).count(f) - 1 - def count(self, f): - return entry_handler(None).count(f) - 1 # Special handler for ghost-file.img class ghost_file_handler: - def load(self, f, pretty = False, no_payload = False): - entries = [] + def load(self, f, pretty=False, no_payload=False): + entries = [] - gf = pb.ghost_file_entry() - buf = f.read(4) - size, = struct.unpack('i', buf) - gf.ParseFromString(f.read(size)) - g_entry = pb2dict.pb2dict(gf, pretty) + gf = pb.ghost_file_entry() + buf = f.read(4) + size, = struct.unpack('i', buf) + gf.ParseFromString(f.read(size)) + g_entry = pb2dict.pb2dict(gf, pretty) - if gf.chunks: - entries.append(g_entry) - while True: - gc = pb.ghost_chunk_entry() - buf = f.read(4) - if buf == '': - break - size, = struct.unpack('i', buf) - gc.ParseFromString(f.read(size)) - entry = pb2dict.pb2dict(gc, pretty) - if no_payload: - f.seek(gc.len, os.SEEK_CUR) - else: - entry['extra'] = base64.encodebytes(f.read(gc.len)) - entries.append(entry) - else: - if no_payload: - f.seek(0, os.SEEK_END) - else: - g_entry['extra'] = base64.encodebytes(f.read()) - entries.append(g_entry) + if gf.chunks: + entries.append(g_entry) + while True: + gc = pb.ghost_chunk_entry() + buf = f.read(4) + if buf == '': + break + size, = struct.unpack('i', buf) + gc.ParseFromString(f.read(size)) + entry = pb2dict.pb2dict(gc, pretty) + if no_payload: + f.seek(gc.len, os.SEEK_CUR) + else: + entry['extra'] = base64.encodebytes(f.read(gc.len)) + entries.append(entry) + else: + if no_payload: + f.seek(0, os.SEEK_END) + else: + g_entry['extra'] = base64.encodebytes(f.read()) + entries.append(g_entry) - return entries + return entries - def loads(self, s, pretty = False): - f = io.BytesIO(s) - return self.load(f, pretty) + def loads(self, s, pretty=False): + f = io.BytesIO(s) + return self.load(f, pretty) - def dump(self, entries, f): - pbuff = pb.ghost_file_entry() - item = entries.pop(0) - pb2dict.dict2pb(item, pbuff) - pb_str = pbuff.SerializeToString() - size = len(pb_str) - f.write(struct.pack('i', size)) - f.write(pb_str) + def dump(self, entries, f): + pbuff = pb.ghost_file_entry() + item = entries.pop(0) + pb2dict.dict2pb(item, pbuff) + pb_str = pbuff.SerializeToString() + size = len(pb_str) + f.write(struct.pack('i', size)) + f.write(pb_str) - if pbuff.chunks: - for item in entries: - pbuff = pb.ghost_chunk_entry() - pb2dict.dict2pb(item, pbuff) - pb_str = pbuff.SerializeToString() - size = len(pb_str) - f.write(struct.pack('i', size)) - f.write(pb_str) - f.write(base64.decodebytes(item['extra'])) - else: - f.write(base64.decodebytes(item['extra'])) + if pbuff.chunks: + for item in entries: + pbuff = pb.ghost_chunk_entry() + pb2dict.dict2pb(item, pbuff) + pb_str = pbuff.SerializeToString() + size = len(pb_str) + f.write(struct.pack('i', size)) + f.write(pb_str) + f.write(base64.decodebytes(item['extra'])) + else: + f.write(base64.decodebytes(item['extra'])) - def dumps(self, entries): - f = io.BytesIO('') - self.dump(entries, f) - return f.read() + def dumps(self, entries): + f = io.BytesIO('') + self.dump(entries, f) + return f.read() # In following extra handlers we use base64 encoding @@ -293,304 +301,317 @@ class ghost_file_handler: # do not store big amounts of binary data. They # are negligible comparing to pages size. class pipes_data_extra_handler: - def load(self, f, pload): - size = pload.bytes - data = f.read(size) - return base64.encodebytes(data) + def load(self, f, pload): + size = pload.bytes + data = f.read(size) + return base64.encodebytes(data) - def dump(self, extra, f, pload): - data = base64.decodebytes(extra) - f.write(data) + def dump(self, extra, f, pload): + data = base64.decodebytes(extra) + f.write(data) + + def skip(self, f, pload): + f.seek(pload.bytes, os.SEEK_CUR) + return pload.bytes - def skip(self, f, pload): - f.seek(pload.bytes, os.SEEK_CUR) - return pload.bytes class sk_queues_extra_handler: - def load(self, f, pload): - size = pload.length - data = f.read(size) - return base64.encodebytes(data) + def load(self, f, pload): + size = pload.length + data = f.read(size) + return base64.encodebytes(data) - def dump(self, extra, f, _unused): - data = base64.decodebytes(extra) - f.write(data) + def dump(self, extra, f, _unused): + data = base64.decodebytes(extra) + f.write(data) - def skip(self, f, pload): - f.seek(pload.length, os.SEEK_CUR) - return pload.length + def skip(self, f, pload): + f.seek(pload.length, os.SEEK_CUR) + return pload.length class tcp_stream_extra_handler: - def load(self, f, pbuff): - d = {} + def load(self, f, pbuff): + d = {} - inq = f.read(pbuff.inq_len) - outq = f.read(pbuff.outq_len) + inq = f.read(pbuff.inq_len) + outq = f.read(pbuff.outq_len) - d['inq'] = base64.encodebytes(inq) - d['outq'] = base64.encodebytes(outq) + d['inq'] = base64.encodebytes(inq) + d['outq'] = base64.encodebytes(outq) - return d + return d - def dump(self, extra, f, _unused): - inq = base64.decodebytes(extra['inq']) - outq = base64.decodebytes(extra['outq']) + def dump(self, extra, f, _unused): + inq = base64.decodebytes(extra['inq']) + outq = base64.decodebytes(extra['outq']) - f.write(inq) - f.write(outq) + f.write(inq) + f.write(outq) + + def skip(self, f, pbuff): + f.seek(0, os.SEEK_END) + return pbuff.inq_len + pbuff.outq_len - def skip(self, f, pbuff): - f.seek(0, os.SEEK_END) - return pbuff.inq_len + pbuff.outq_len class ipc_sem_set_handler: - def load(self, f, pbuff): - entry = pb2dict.pb2dict(pbuff) - size = sizeof_u16 * entry['nsems'] - rounded = round_up(size, sizeof_u64) - s = array.array('H') - if s.itemsize != sizeof_u16: - raise Exception("Array size mismatch") - s.fromstring(f.read(size)) - f.seek(rounded - size, 1) - return s.tolist() + def load(self, f, pbuff): + entry = pb2dict.pb2dict(pbuff) + size = sizeof_u16 * entry['nsems'] + rounded = round_up(size, sizeof_u64) + s = array.array('H') + if s.itemsize != sizeof_u16: + raise Exception("Array size mismatch") + s.fromstring(f.read(size)) + f.seek(rounded - size, 1) + return s.tolist() - def dump(self, extra, f, pbuff): - entry = pb2dict.pb2dict(pbuff) - size = sizeof_u16 * entry['nsems'] - rounded = round_up(size, sizeof_u64) - s = array.array('H') - if s.itemsize != sizeof_u16: - raise Exception("Array size mismatch") - s.fromlist(extra) - if len(s) != entry['nsems']: - raise Exception("Number of semaphores mismatch") - f.write(s.tostring()) - f.write('\0' * (rounded - size)) + def dump(self, extra, f, pbuff): + entry = pb2dict.pb2dict(pbuff) + size = sizeof_u16 * entry['nsems'] + rounded = round_up(size, sizeof_u64) + s = array.array('H') + if s.itemsize != sizeof_u16: + raise Exception("Array size mismatch") + s.fromlist(extra) + if len(s) != entry['nsems']: + raise Exception("Number of semaphores mismatch") + f.write(s.tostring()) + f.write('\0' * (rounded - size)) + + def skip(self, f, pbuff): + entry = pb2dict.pb2dict(pbuff) + size = sizeof_u16 * entry['nsems'] + f.seek(round_up(size, sizeof_u64), os.SEEK_CUR) + return size - def skip(self, f, pbuff): - entry = pb2dict.pb2dict(pbuff) - size = sizeof_u16 * entry['nsems'] - f.seek(round_up(size, sizeof_u64), os.SEEK_CUR) - return size class ipc_msg_queue_handler: - def load(self, f, pbuff): - entry = pb2dict.pb2dict(pbuff) - messages = [] - for x in range (0, entry['qnum']): - buf = f.read(4) - if buf == '': - break - size, = struct.unpack('i', buf) - msg = pb.ipc_msg() - msg.ParseFromString(f.read(size)) - rounded = round_up(msg.msize, sizeof_u64) - data = f.read(msg.msize) - f.seek(rounded - msg.msize, 1) - messages.append(pb2dict.pb2dict(msg)) - messages.append(base64.encodebytes(data)) - return messages + def load(self, f, pbuff): + entry = pb2dict.pb2dict(pbuff) + messages = [] + for x in range(0, entry['qnum']): + buf = f.read(4) + if buf == '': + break + size, = struct.unpack('i', buf) + msg = pb.ipc_msg() + msg.ParseFromString(f.read(size)) + rounded = round_up(msg.msize, sizeof_u64) + data = f.read(msg.msize) + f.seek(rounded - msg.msize, 1) + messages.append(pb2dict.pb2dict(msg)) + messages.append(base64.encodebytes(data)) + return messages - def dump(self, extra, f, pbuff): - entry = pb2dict.pb2dict(pbuff) - for i in range (0, len(extra), 2): - msg = pb.ipc_msg() - pb2dict.dict2pb(extra[i], msg) - msg_str = msg.SerializeToString() - size = len(msg_str) - f.write(struct.pack('i', size)) - f.write(msg_str) - rounded = round_up(msg.msize, sizeof_u64) - data = base64.decodebytes(extra[i + 1]) - f.write(data[:msg.msize]) - f.write('\0' * (rounded - msg.msize)) + def dump(self, extra, f, pbuff): + entry = pb2dict.pb2dict(pbuff) + for i in range(0, len(extra), 2): + msg = pb.ipc_msg() + pb2dict.dict2pb(extra[i], msg) + msg_str = msg.SerializeToString() + size = len(msg_str) + f.write(struct.pack('i', size)) + f.write(msg_str) + rounded = round_up(msg.msize, sizeof_u64) + data = base64.decodebytes(extra[i + 1]) + f.write(data[:msg.msize]) + f.write('\0' * (rounded - msg.msize)) - def skip(self, f, pbuff): - entry = pb2dict.pb2dict(pbuff) - pl_len = 0 - for x in range (0, entry['qnum']): - buf = f.read(4) - if buf == '': - break - size, = struct.unpack('i', buf) - msg = pb.ipc_msg() - msg.ParseFromString(f.read(size)) - rounded = round_up(msg.msize, sizeof_u64) - f.seek(rounded, os.SEEK_CUR) - pl_len += size + msg.msize + def skip(self, f, pbuff): + entry = pb2dict.pb2dict(pbuff) + pl_len = 0 + for x in range(0, entry['qnum']): + buf = f.read(4) + if buf == '': + break + size, = struct.unpack('i', buf) + msg = pb.ipc_msg() + msg.ParseFromString(f.read(size)) + rounded = round_up(msg.msize, sizeof_u64) + f.seek(rounded, os.SEEK_CUR) + pl_len += size + msg.msize + + return pl_len - return pl_len class ipc_shm_handler: - def load(self, f, pbuff): - entry = pb2dict.pb2dict(pbuff) - size = entry['size'] - data = f.read(size) - rounded = round_up(size, sizeof_u32) - f.seek(rounded - size, 1) - return base64.encodebytes(data) + def load(self, f, pbuff): + entry = pb2dict.pb2dict(pbuff) + size = entry['size'] + data = f.read(size) + rounded = round_up(size, sizeof_u32) + f.seek(rounded - size, 1) + return base64.encodebytes(data) - def dump(self, extra, f, pbuff): - entry = pb2dict.pb2dict(pbuff) - size = entry['size'] - data = base64.decodebytes(extra) - rounded = round_up(size, sizeof_u32) - f.write(data[:size]) - f.write('\0' * (rounded - size)) + def dump(self, extra, f, pbuff): + entry = pb2dict.pb2dict(pbuff) + size = entry['size'] + data = base64.decodebytes(extra) + rounded = round_up(size, sizeof_u32) + f.write(data[:size]) + f.write('\0' * (rounded - size)) - def skip(self, f, pbuff): - entry = pb2dict.pb2dict(pbuff) - size = entry['size'] - rounded = round_up(size, sizeof_u32) - f.seek(rounded, os.SEEK_CUR) - return size + def skip(self, f, pbuff): + entry = pb2dict.pb2dict(pbuff) + size = entry['size'] + rounded = round_up(size, sizeof_u32) + f.seek(rounded, os.SEEK_CUR) + return size handlers = { - 'INVENTORY' : entry_handler(pb.inventory_entry), - 'CORE' : entry_handler(pb.core_entry), - 'IDS' : entry_handler(pb.task_kobj_ids_entry), - 'CREDS' : entry_handler(pb.creds_entry), - 'UTSNS' : entry_handler(pb.utsns_entry), - 'IPC_VAR' : entry_handler(pb.ipc_var_entry), - 'FS' : entry_handler(pb.fs_entry), - 'GHOST_FILE' : ghost_file_handler(), - 'MM' : entry_handler(pb.mm_entry), - 'CGROUP' : entry_handler(pb.cgroup_entry), - 'TCP_STREAM' : entry_handler(pb.tcp_stream_entry, tcp_stream_extra_handler()), - 'STATS' : entry_handler(pb.stats_entry), - 'PAGEMAP' : pagemap_handler(), # Special one - 'PSTREE' : entry_handler(pb.pstree_entry), - 'REG_FILES' : entry_handler(pb.reg_file_entry), - 'NS_FILES' : entry_handler(pb.ns_file_entry), - 'EVENTFD_FILE' : entry_handler(pb.eventfd_file_entry), - 'EVENTPOLL_FILE' : entry_handler(pb.eventpoll_file_entry), - 'EVENTPOLL_TFD' : entry_handler(pb.eventpoll_tfd_entry), - 'SIGNALFD' : entry_handler(pb.signalfd_entry), - 'TIMERFD' : entry_handler(pb.timerfd_entry), - 'INOTIFY_FILE' : entry_handler(pb.inotify_file_entry), - 'INOTIFY_WD' : entry_handler(pb.inotify_wd_entry), - 'FANOTIFY_FILE' : entry_handler(pb.fanotify_file_entry), - 'FANOTIFY_MARK' : entry_handler(pb.fanotify_mark_entry), - 'VMAS' : entry_handler(pb.vma_entry), - 'PIPES' : entry_handler(pb.pipe_entry), - 'FIFO' : entry_handler(pb.fifo_entry), - 'SIGACT' : entry_handler(pb.sa_entry), - 'NETLINK_SK' : entry_handler(pb.netlink_sk_entry), - 'REMAP_FPATH' : entry_handler(pb.remap_file_path_entry), - 'MNTS' : entry_handler(pb.mnt_entry), - 'TTY_FILES' : entry_handler(pb.tty_file_entry), - 'TTY_INFO' : entry_handler(pb.tty_info_entry), - 'TTY_DATA' : entry_handler(pb.tty_data_entry), - 'RLIMIT' : entry_handler(pb.rlimit_entry), - 'TUNFILE' : entry_handler(pb.tunfile_entry), - 'EXT_FILES' : entry_handler(pb.ext_file_entry), - 'IRMAP_CACHE' : entry_handler(pb.irmap_cache_entry), - 'FILE_LOCKS' : entry_handler(pb.file_lock_entry), - 'FDINFO' : entry_handler(pb.fdinfo_entry), - 'UNIXSK' : entry_handler(pb.unix_sk_entry), - 'INETSK' : entry_handler(pb.inet_sk_entry), - 'PACKETSK' : entry_handler(pb.packet_sock_entry), - 'ITIMERS' : entry_handler(pb.itimer_entry), - 'POSIX_TIMERS' : entry_handler(pb.posix_timer_entry), - 'NETDEV' : entry_handler(pb.net_device_entry), - 'PIPES_DATA' : entry_handler(pb.pipe_data_entry, pipes_data_extra_handler()), - 'FIFO_DATA' : entry_handler(pb.pipe_data_entry, pipes_data_extra_handler()), - 'SK_QUEUES' : entry_handler(pb.sk_packet_entry, sk_queues_extra_handler()), - 'IPCNS_SHM' : entry_handler(pb.ipc_shm_entry, ipc_shm_handler()), - 'IPCNS_SEM' : entry_handler(pb.ipc_sem_entry, ipc_sem_set_handler()), - 'IPCNS_MSG' : entry_handler(pb.ipc_msg_entry, ipc_msg_queue_handler()), - 'NETNS' : entry_handler(pb.netns_entry), - 'USERNS' : entry_handler(pb.userns_entry), - 'SECCOMP' : entry_handler(pb.seccomp_entry), - 'AUTOFS' : entry_handler(pb.autofs_entry), - 'FILES' : entry_handler(pb.file_entry), - 'CPUINFO' : entry_handler(pb.cpuinfo_entry), - } + 'INVENTORY': entry_handler(pb.inventory_entry), + 'CORE': entry_handler(pb.core_entry), + 'IDS': entry_handler(pb.task_kobj_ids_entry), + 'CREDS': entry_handler(pb.creds_entry), + 'UTSNS': entry_handler(pb.utsns_entry), + 'IPC_VAR': entry_handler(pb.ipc_var_entry), + 'FS': entry_handler(pb.fs_entry), + 'GHOST_FILE': ghost_file_handler(), + 'MM': entry_handler(pb.mm_entry), + 'CGROUP': entry_handler(pb.cgroup_entry), + 'TCP_STREAM': entry_handler(pb.tcp_stream_entry, + tcp_stream_extra_handler()), + 'STATS': entry_handler(pb.stats_entry), + 'PAGEMAP': pagemap_handler(), # Special one + 'PSTREE': entry_handler(pb.pstree_entry), + 'REG_FILES': entry_handler(pb.reg_file_entry), + 'NS_FILES': entry_handler(pb.ns_file_entry), + 'EVENTFD_FILE': entry_handler(pb.eventfd_file_entry), + 'EVENTPOLL_FILE': entry_handler(pb.eventpoll_file_entry), + 'EVENTPOLL_TFD': entry_handler(pb.eventpoll_tfd_entry), + 'SIGNALFD': entry_handler(pb.signalfd_entry), + 'TIMERFD': entry_handler(pb.timerfd_entry), + 'INOTIFY_FILE': entry_handler(pb.inotify_file_entry), + 'INOTIFY_WD': entry_handler(pb.inotify_wd_entry), + 'FANOTIFY_FILE': entry_handler(pb.fanotify_file_entry), + 'FANOTIFY_MARK': entry_handler(pb.fanotify_mark_entry), + 'VMAS': entry_handler(pb.vma_entry), + 'PIPES': entry_handler(pb.pipe_entry), + 'FIFO': entry_handler(pb.fifo_entry), + 'SIGACT': entry_handler(pb.sa_entry), + 'NETLINK_SK': entry_handler(pb.netlink_sk_entry), + 'REMAP_FPATH': entry_handler(pb.remap_file_path_entry), + 'MNTS': entry_handler(pb.mnt_entry), + 'TTY_FILES': entry_handler(pb.tty_file_entry), + 'TTY_INFO': entry_handler(pb.tty_info_entry), + 'TTY_DATA': entry_handler(pb.tty_data_entry), + 'RLIMIT': entry_handler(pb.rlimit_entry), + 'TUNFILE': entry_handler(pb.tunfile_entry), + 'EXT_FILES': entry_handler(pb.ext_file_entry), + 'IRMAP_CACHE': entry_handler(pb.irmap_cache_entry), + 'FILE_LOCKS': entry_handler(pb.file_lock_entry), + 'FDINFO': entry_handler(pb.fdinfo_entry), + 'UNIXSK': entry_handler(pb.unix_sk_entry), + 'INETSK': entry_handler(pb.inet_sk_entry), + 'PACKETSK': entry_handler(pb.packet_sock_entry), + 'ITIMERS': entry_handler(pb.itimer_entry), + 'POSIX_TIMERS': entry_handler(pb.posix_timer_entry), + 'NETDEV': entry_handler(pb.net_device_entry), + 'PIPES_DATA': entry_handler(pb.pipe_data_entry, + pipes_data_extra_handler()), + 'FIFO_DATA': entry_handler(pb.pipe_data_entry, pipes_data_extra_handler()), + 'SK_QUEUES': entry_handler(pb.sk_packet_entry, sk_queues_extra_handler()), + 'IPCNS_SHM': entry_handler(pb.ipc_shm_entry, ipc_shm_handler()), + 'IPCNS_SEM': entry_handler(pb.ipc_sem_entry, ipc_sem_set_handler()), + 'IPCNS_MSG': entry_handler(pb.ipc_msg_entry, ipc_msg_queue_handler()), + 'NETNS': entry_handler(pb.netns_entry), + 'USERNS': entry_handler(pb.userns_entry), + 'SECCOMP': entry_handler(pb.seccomp_entry), + 'AUTOFS': entry_handler(pb.autofs_entry), + 'FILES': entry_handler(pb.file_entry), + 'CPUINFO': entry_handler(pb.cpuinfo_entry), +} + def __rhandler(f): - # Images v1.1 NOTE: First read "first" magic. - img_magic, = struct.unpack('i', f.read(4)) - if img_magic in (magic.by_name['IMG_COMMON'], magic.by_name['IMG_SERVICE']): - img_magic, = struct.unpack('i', f.read(4)) + # Images v1.1 NOTE: First read "first" magic. + img_magic, = struct.unpack('i', f.read(4)) + if img_magic in (magic.by_name['IMG_COMMON'], + magic.by_name['IMG_SERVICE']): + img_magic, = struct.unpack('i', f.read(4)) - try: - m = magic.by_val[img_magic] - except: - raise MagicException(img_magic) + try: + m = magic.by_val[img_magic] + except: + raise MagicException(img_magic) - try: - handler = handlers[m] - except: - raise Exception("No handler found for image with magic " + m) + try: + handler = handlers[m] + except: + raise Exception("No handler found for image with magic " + m) - return m, handler + return m, handler -def load(f, pretty = False, no_payload = False): - """ + +def load(f, pretty=False, no_payload=False): + """ Convert criu image from binary format to dict(json). Takes a file-like object to read criu image from. Returns criu image in dict(json) format. """ - image = {} + image = {} - m, handler = __rhandler(f) + m, handler = __rhandler(f) - image['magic'] = m - image['entries'] = handler.load(f, pretty, no_payload) + image['magic'] = m + image['entries'] = handler.load(f, pretty, no_payload) + + return image - return image def info(f): - res = {} + res = {} - m, handler = __rhandler(f) + m, handler = __rhandler(f) - res['magic'] = m - res['count'] = handler.count(f) + res['magic'] = m + res['count'] = handler.count(f) - return res + return res -def loads(s, pretty = False): - """ + +def loads(s, pretty=False): + """ Same as load(), but takes a string. """ - f = io.BytesIO(s) - return load(f, pretty) + f = io.BytesIO(s) + return load(f, pretty) + def dump(img, f): - """ + """ Convert criu image from dict(json) format to binary. Takes an image in dict(json) format and file-like object to write to. """ - m = img['magic'] - magic_val = magic.by_name[img['magic']] + m = img['magic'] + magic_val = magic.by_name[img['magic']] - # Images v1.1 NOTE: use "second" magic to identify what "first" - # should be written. - if m != 'INVENTORY': - if m in ('STATS', 'IRMAP_CACHE'): - f.write(struct.pack('i', magic.by_name['IMG_SERVICE'])) - else: - f.write(struct.pack('i', magic.by_name['IMG_COMMON'])) + # Images v1.1 NOTE: use "second" magic to identify what "first" + # should be written. + if m != 'INVENTORY': + if m in ('STATS', 'IRMAP_CACHE'): + f.write(struct.pack('i', magic.by_name['IMG_SERVICE'])) + else: + f.write(struct.pack('i', magic.by_name['IMG_COMMON'])) - f.write(struct.pack('i', magic_val)) + f.write(struct.pack('i', magic_val)) - try: - handler = handlers[m] - except: - raise Exception("No handler found for image with such magic") + try: + handler = handlers[m] + except: + raise Exception("No handler found for image with such magic") + + handler.dump(img['entries'], f) - handler.dump(img['entries'], f) def dumps(img): - """ + """ Same as dump(), but takes only an image and returns a string. """ - f = io.BytesIO(b'') - dump(img, f) - return f.getvalue() + f = io.BytesIO(b'') + dump(img, f) + return f.getvalue() diff --git a/lib/py/images/pb2dict.py b/lib/py/images/pb2dict.py index 6b4a772c7..daaa7297e 100644 --- a/lib/py/images/pb2dict.py +++ b/lib/py/images/pb2dict.py @@ -9,8 +9,8 @@ import base64 import quopri if "encodebytes" not in dir(base64): - base64.encodebytes = base64.encodestring - base64.decodebytes = base64.decodestring + base64.encodebytes = base64.encodestring + base64.decodebytes = base64.decodestring # pb2dict and dict2pb are methods to convert pb to/from dict. # Inspired by: @@ -29,350 +29,396 @@ if "encodebytes" not in dir(base64): # enums to string value too. (i.e. "march : x86_64" is better then # "march : 1"). - _basic_cast = { - FD.TYPE_FIXED64 : int, - FD.TYPE_FIXED32 : int, - FD.TYPE_SFIXED64 : int, - FD.TYPE_SFIXED32 : int, - - FD.TYPE_INT64 : int, - FD.TYPE_UINT64 : int, - FD.TYPE_SINT64 : int, - - FD.TYPE_INT32 : int, - FD.TYPE_UINT32 : int, - FD.TYPE_SINT32 : int, - - FD.TYPE_BOOL : bool, - - FD.TYPE_STRING : str + FD.TYPE_FIXED64: int, + FD.TYPE_FIXED32: int, + FD.TYPE_SFIXED64: int, + FD.TYPE_SFIXED32: int, + FD.TYPE_INT64: int, + FD.TYPE_UINT64: int, + FD.TYPE_SINT64: int, + FD.TYPE_INT32: int, + FD.TYPE_UINT32: int, + FD.TYPE_SINT32: int, + FD.TYPE_BOOL: bool, + FD.TYPE_STRING: str } + def _marked_as_hex(field): - return field.GetOptions().Extensions[opts_pb2.criu].hex + return field.GetOptions().Extensions[opts_pb2.criu].hex + def _marked_as_ip(field): - return field.GetOptions().Extensions[opts_pb2.criu].ipadd + return field.GetOptions().Extensions[opts_pb2.criu].ipadd + def _marked_as_flags(field): - return field.GetOptions().Extensions[opts_pb2.criu].flags + return field.GetOptions().Extensions[opts_pb2.criu].flags + def _marked_as_dev(field): - return field.GetOptions().Extensions[opts_pb2.criu].dev + return field.GetOptions().Extensions[opts_pb2.criu].dev + def _marked_as_odev(field): - return field.GetOptions().Extensions[opts_pb2.criu].odev + return field.GetOptions().Extensions[opts_pb2.criu].odev + def _marked_as_dict(field): - return field.GetOptions().Extensions[opts_pb2.criu].dict + return field.GetOptions().Extensions[opts_pb2.criu].dict + def _custom_conv(field): - return field.GetOptions().Extensions[opts_pb2.criu].conv + return field.GetOptions().Extensions[opts_pb2.criu].conv + mmap_prot_map = [ - ('PROT_READ', 0x1), - ('PROT_WRITE', 0x2), - ('PROT_EXEC', 0x4), + ('PROT_READ', 0x1), + ('PROT_WRITE', 0x2), + ('PROT_EXEC', 0x4), ] mmap_flags_map = [ - ('MAP_SHARED', 0x1), - ('MAP_PRIVATE', 0x2), - ('MAP_ANON', 0x20), - ('MAP_GROWSDOWN', 0x0100), + ('MAP_SHARED', 0x1), + ('MAP_PRIVATE', 0x2), + ('MAP_ANON', 0x20), + ('MAP_GROWSDOWN', 0x0100), ] mmap_status_map = [ - ('VMA_AREA_NONE', 0 << 0), - ('VMA_AREA_REGULAR', 1 << 0), - ('VMA_AREA_STACK', 1 << 1), - ('VMA_AREA_VSYSCALL', 1 << 2), - ('VMA_AREA_VDSO', 1 << 3), - ('VMA_AREA_HEAP', 1 << 5), - - ('VMA_FILE_PRIVATE', 1 << 6), - ('VMA_FILE_SHARED', 1 << 7), - ('VMA_ANON_SHARED', 1 << 8), - ('VMA_ANON_PRIVATE', 1 << 9), - - ('VMA_AREA_SYSVIPC', 1 << 10), - ('VMA_AREA_SOCKET', 1 << 11), - ('VMA_AREA_VVAR', 1 << 12), - ('VMA_AREA_AIORING', 1 << 13), - - ('VMA_UNSUPP', 1 << 31), + ('VMA_AREA_NONE', 0 << 0), + ('VMA_AREA_REGULAR', 1 << 0), + ('VMA_AREA_STACK', 1 << 1), + ('VMA_AREA_VSYSCALL', 1 << 2), + ('VMA_AREA_VDSO', 1 << 3), + ('VMA_AREA_HEAP', 1 << 5), + ('VMA_FILE_PRIVATE', 1 << 6), + ('VMA_FILE_SHARED', 1 << 7), + ('VMA_ANON_SHARED', 1 << 8), + ('VMA_ANON_PRIVATE', 1 << 9), + ('VMA_AREA_SYSVIPC', 1 << 10), + ('VMA_AREA_SOCKET', 1 << 11), + ('VMA_AREA_VVAR', 1 << 12), + ('VMA_AREA_AIORING', 1 << 13), + ('VMA_UNSUPP', 1 << 31), ] rfile_flags_map = [ - ('O_WRONLY', 0o1), - ('O_RDWR', 0o2), - ('O_APPEND', 0o2000), - ('O_DIRECT', 0o40000), - ('O_LARGEFILE', 0o100000), + ('O_WRONLY', 0o1), + ('O_RDWR', 0o2), + ('O_APPEND', 0o2000), + ('O_DIRECT', 0o40000), + ('O_LARGEFILE', 0o100000), ] pmap_flags_map = [ - ('PE_PARENT', 1 << 0), - ('PE_LAZY', 1 << 1), - ('PE_PRESENT', 1 << 2), + ('PE_PARENT', 1 << 0), + ('PE_LAZY', 1 << 1), + ('PE_PRESENT', 1 << 2), ] flags_maps = { - 'mmap.prot' : mmap_prot_map, - 'mmap.flags' : mmap_flags_map, - 'mmap.status' : mmap_status_map, - 'rfile.flags' : rfile_flags_map, - 'pmap.flags' : pmap_flags_map, + 'mmap.prot': mmap_prot_map, + 'mmap.flags': mmap_flags_map, + 'mmap.status': mmap_status_map, + 'rfile.flags': rfile_flags_map, + 'pmap.flags': pmap_flags_map, } gen_maps = { - 'task_state' : { 1: 'Alive', 3: 'Zombie', 6: 'Stopped' }, + 'task_state': { + 1: 'Alive', + 3: 'Zombie', + 6: 'Stopped' + }, } sk_maps = { - 'family' : { 1: 'UNIX', - 2: 'INET', - 10: 'INET6', - 16: 'NETLINK', - 17: 'PACKET' }, - 'type' : { 1: 'STREAM', - 2: 'DGRAM', - 3: 'RAW', - 5: 'SEQPACKET', - 10: 'PACKET' }, - 'state' : { 1: 'ESTABLISHED', - 2: 'SYN_SENT', - 3: 'SYN_RECV', - 4: 'FIN_WAIT1', - 5: 'FIN_WAIT2', - 6: 'TIME_WAIT', - 7: 'CLOSE', - 8: 'CLOSE_WAIT', - 9: 'LAST_ACK', - 10: 'LISTEN' }, - 'proto' : { 0: 'IP', - 6: 'TCP', - 17: 'UDP', - 136: 'UDPLITE' }, + 'family': { + 1: 'UNIX', + 2: 'INET', + 10: 'INET6', + 16: 'NETLINK', + 17: 'PACKET' + }, + 'type': { + 1: 'STREAM', + 2: 'DGRAM', + 3: 'RAW', + 5: 'SEQPACKET', + 10: 'PACKET' + }, + 'state': { + 1: 'ESTABLISHED', + 2: 'SYN_SENT', + 3: 'SYN_RECV', + 4: 'FIN_WAIT1', + 5: 'FIN_WAIT2', + 6: 'TIME_WAIT', + 7: 'CLOSE', + 8: 'CLOSE_WAIT', + 9: 'LAST_ACK', + 10: 'LISTEN' + }, + 'proto': { + 0: 'IP', + 6: 'TCP', + 17: 'UDP', + 136: 'UDPLITE' + }, } -gen_rmaps = { k: {v2:k2 for k2,v2 in list(v.items())} for k,v in list(gen_maps.items()) } -sk_rmaps = { k: {v2:k2 for k2,v2 in list(v.items())} for k,v in list(sk_maps.items()) } +gen_rmaps = { + k: {v2: k2 + for k2, v2 in list(v.items())} + for k, v in list(gen_maps.items()) +} +sk_rmaps = { + k: {v2: k2 + for k2, v2 in list(v.items())} + for k, v in list(sk_maps.items()) +} dict_maps = { - 'gen' : ( gen_maps, gen_rmaps ), - 'sk' : ( sk_maps, sk_rmaps ), + 'gen': (gen_maps, gen_rmaps), + 'sk': (sk_maps, sk_rmaps), } + def map_flags(value, flags_map): - bs = [x[0] for x in [x for x in flags_map if value & x[1]]] - value &= ~sum([x[1] for x in flags_map]) - if value: - bs.append("0x%x" % value) - return " | ".join(bs) + bs = [x[0] for x in [x for x in flags_map if value & x[1]]] + value &= ~sum([x[1] for x in flags_map]) + if value: + bs.append("0x%x" % value) + return " | ".join(bs) + def unmap_flags(value, flags_map): - if value == '': - return 0 + if value == '': + return 0 - bd = dict(flags_map) - return sum([int(str(bd.get(x, x)), 0) for x in [x.strip() for x in value.split('|')]]) + bd = dict(flags_map) + return sum([ + int(str(bd.get(x, x)), 0) + for x in [x.strip() for x in value.split('|')] + ]) + + +kern_minorbits = 20 # This is how kernel encodes dev_t in new format -kern_minorbits = 20 # This is how kernel encodes dev_t in new format def decode_dev(field, value): - if _marked_as_odev(field): - return "%d:%d" % (os.major(value), os.minor(value)) - else: - return "%d:%d" % (value >> kern_minorbits, value & ((1 << kern_minorbits) - 1)) + if _marked_as_odev(field): + return "%d:%d" % (os.major(value), os.minor(value)) + else: + return "%d:%d" % (value >> kern_minorbits, + value & ((1 << kern_minorbits) - 1)) + def encode_dev(field, value): - dev = [int(x) for x in value.split(':')] - if _marked_as_odev(field): - return os.makedev(dev[0], dev[1]) - else: - return dev[0] << kern_minorbits | dev[1] + dev = [int(x) for x in value.split(':')] + if _marked_as_odev(field): + return os.makedev(dev[0], dev[1]) + else: + return dev[0] << kern_minorbits | dev[1] + def encode_base64(value): - return base64.encodebytes(value) + return base64.encodebytes(value) + + def decode_base64(value): - return base64.decodebytes(value) + return base64.decodebytes(value) + def encode_unix(value): - return quopri.encodestring(value) -def decode_unix(value): - return quopri.decodestring(value) + return quopri.encodestring(value) + + +def decode_unix(value): + return quopri.decodestring(value) + + +encode = {'unix_name': encode_unix} +decode = {'unix_name': decode_unix} -encode = { 'unix_name': encode_unix } -decode = { 'unix_name': decode_unix } def get_bytes_enc(field): - c = _custom_conv(field) - if c: - return encode[c] - else: - return encode_base64 + c = _custom_conv(field) + if c: + return encode[c] + else: + return encode_base64 + def get_bytes_dec(field): - c = _custom_conv(field) - if c: - return decode[c] - else: - return decode_base64 + c = _custom_conv(field) + if c: + return decode[c] + else: + return decode_base64 + def is_string(value): - # Python 3 compatibility - if "basestring" in __builtins__: - string_types = basestring # noqa: F821 - else: - string_types = (str, bytes) - return isinstance(value, string_types) + # Python 3 compatibility + if "basestring" in __builtins__: + string_types = basestring # noqa: F821 + else: + string_types = (str, bytes) + return isinstance(value, string_types) -def _pb2dict_cast(field, value, pretty = False, is_hex = False): - if not is_hex: - is_hex = _marked_as_hex(field) - if field.type == FD.TYPE_MESSAGE: - return pb2dict(value, pretty, is_hex) - elif field.type == FD.TYPE_BYTES: - return get_bytes_enc(field)(value) - elif field.type == FD.TYPE_ENUM: - return field.enum_type.values_by_number.get(value, None).name - elif field.type in _basic_cast: - cast = _basic_cast[field.type] - if pretty and (cast == int): - if is_hex: - # Fields that have (criu).hex = true option set - # should be stored in hex string format. - return "0x%x" % value +def _pb2dict_cast(field, value, pretty=False, is_hex=False): + if not is_hex: + is_hex = _marked_as_hex(field) - if _marked_as_dev(field): - return decode_dev(field, value) + if field.type == FD.TYPE_MESSAGE: + return pb2dict(value, pretty, is_hex) + elif field.type == FD.TYPE_BYTES: + return get_bytes_enc(field)(value) + elif field.type == FD.TYPE_ENUM: + return field.enum_type.values_by_number.get(value, None).name + elif field.type in _basic_cast: + cast = _basic_cast[field.type] + if pretty and (cast == int): + if is_hex: + # Fields that have (criu).hex = true option set + # should be stored in hex string format. + return "0x%x" % value - flags = _marked_as_flags(field) - if flags: - try: - flags_map = flags_maps[flags] - except: - return "0x%x" % value # flags are better seen as hex anyway - else: - return map_flags(value, flags_map) + if _marked_as_dev(field): + return decode_dev(field, value) - dct = _marked_as_dict(field) - if dct: - return dict_maps[dct][0][field.name].get(value, cast(value)) + flags = _marked_as_flags(field) + if flags: + try: + flags_map = flags_maps[flags] + except Exception: + return "0x%x" % value # flags are better seen as hex anyway + else: + return map_flags(value, flags_map) - return cast(value) - else: - raise Exception("Field(%s) has unsupported type %d" % (field.name, field.type)) + dct = _marked_as_dict(field) + if dct: + return dict_maps[dct][0][field.name].get(value, cast(value)) -def pb2dict(pb, pretty = False, is_hex = False): - """ - Convert protobuf msg to dictionary. - Takes a protobuf message and returns a dict. - """ - d = collections.OrderedDict() if pretty else {} - for field, value in pb.ListFields(): - if field.label == FD.LABEL_REPEATED: - d_val = [] - if pretty and _marked_as_ip(field): - if len(value) == 1: - v = socket.ntohl(value[0]) - addr = IPv4Address(v) - else: - v = 0 + (socket.ntohl(value[0]) << (32 * 3)) + \ - (socket.ntohl(value[1]) << (32 * 2)) + \ - (socket.ntohl(value[2]) << (32 * 1)) + \ - (socket.ntohl(value[3])) - addr = IPv6Address(v) + return cast(value) + else: + raise Exception("Field(%s) has unsupported type %d" % + (field.name, field.type)) - d_val.append(addr.compressed) - else: - for v in value: - d_val.append(_pb2dict_cast(field, v, pretty, is_hex)) - else: - d_val = _pb2dict_cast(field, value, pretty, is_hex) - d[field.name] = d_val - return d +def pb2dict(pb, pretty=False, is_hex=False): + """ + Convert protobuf msg to dictionary. + Takes a protobuf message and returns a dict. + """ + d = collections.OrderedDict() if pretty else {} + for field, value in pb.ListFields(): + if field.label == FD.LABEL_REPEATED: + d_val = [] + if pretty and _marked_as_ip(field): + if len(value) == 1: + v = socket.ntohl(value[0]) + addr = IPv4Address(v) + else: + v = 0 + (socket.ntohl(value[0]) << (32 * 3)) + \ + (socket.ntohl(value[1]) << (32 * 2)) + \ + (socket.ntohl(value[2]) << (32 * 1)) + \ + (socket.ntohl(value[3])) + addr = IPv6Address(v) + + d_val.append(addr.compressed) + else: + for v in value: + d_val.append(_pb2dict_cast(field, v, pretty, is_hex)) + else: + d_val = _pb2dict_cast(field, value, pretty, is_hex) + + d[field.name] = d_val + return d + def _dict2pb_cast(field, value): - # Not considering TYPE_MESSAGE here, as repeated - # and non-repeated messages need special treatment - # in this case, and are hadled separately. - if field.type == FD.TYPE_BYTES: - return get_bytes_dec(field)(value) - elif field.type == FD.TYPE_ENUM: - return field.enum_type.values_by_name.get(value, None).number - elif field.type in _basic_cast: - cast = _basic_cast[field.type] - if (cast == int) and is_string(value): - if _marked_as_dev(field): - return encode_dev(field, value) + # Not considering TYPE_MESSAGE here, as repeated + # and non-repeated messages need special treatment + # in this case, and are hadled separately. + if field.type == FD.TYPE_BYTES: + return get_bytes_dec(field)(value) + elif field.type == FD.TYPE_ENUM: + return field.enum_type.values_by_name.get(value, None).number + elif field.type in _basic_cast: + cast = _basic_cast[field.type] + if (cast == int) and is_string(value): + if _marked_as_dev(field): + return encode_dev(field, value) - flags = _marked_as_flags(field) - if flags: - try: - flags_map = flags_maps[flags] - except: - pass # Try to use plain string cast - else: - return unmap_flags(value, flags_map) + flags = _marked_as_flags(field) + if flags: + try: + flags_map = flags_maps[flags] + except Exception: + pass # Try to use plain string cast + else: + return unmap_flags(value, flags_map) - dct = _marked_as_dict(field) - if dct: - ret = dict_maps[dct][1][field.name].get(value, None) - if ret == None: - ret = cast(value, 0) - return ret + dct = _marked_as_dict(field) + if dct: + ret = dict_maps[dct][1][field.name].get(value, None) + if ret is None: + ret = cast(value, 0) + return ret + + # Some int or long fields might be stored as hex + # strings. See _pb2dict_cast. + return cast(value, 0) + else: + return cast(value) + else: + raise Exception("Field(%s) has unsupported type %d" % + (field.name, field.type)) - # Some int or long fields might be stored as hex - # strings. See _pb2dict_cast. - return cast(value, 0) - else: - return cast(value) - else: - raise Exception("Field(%s) has unsupported type %d" % (field.name, field.type)) def dict2pb(d, pb): - """ - Convert dictionary to protobuf msg. - Takes dict and protobuf message to be merged into. - """ - for field in pb.DESCRIPTOR.fields: - if field.name not in d: - continue - value = d[field.name] - if field.label == FD.LABEL_REPEATED: - pb_val = getattr(pb, field.name, None) - if is_string(value[0]) and _marked_as_ip(field): - val = ip_address(value[0]) - if val.version == 4: - pb_val.append(socket.htonl(int(val))) - elif val.version == 6: - ival = int(val) - pb_val.append(socket.htonl((ival >> (32 * 3)) & 0xFFFFFFFF)) - pb_val.append(socket.htonl((ival >> (32 * 2)) & 0xFFFFFFFF)) - pb_val.append(socket.htonl((ival >> (32 * 1)) & 0xFFFFFFFF)) - pb_val.append(socket.htonl((ival >> (32 * 0)) & 0xFFFFFFFF)) - else: - raise Exception("Unknown IP address version %d" % val.version) - continue + """ + Convert dictionary to protobuf msg. + Takes dict and protobuf message to be merged into. + """ + for field in pb.DESCRIPTOR.fields: + if field.name not in d: + continue + value = d[field.name] + if field.label == FD.LABEL_REPEATED: + pb_val = getattr(pb, field.name, None) + if is_string(value[0]) and _marked_as_ip(field): + val = ip_address(value[0]) + if val.version == 4: + pb_val.append(socket.htonl(int(val))) + elif val.version == 6: + ival = int(val) + pb_val.append(socket.htonl((ival >> (32 * 3)) & 0xFFFFFFFF)) + pb_val.append(socket.htonl((ival >> (32 * 2)) & 0xFFFFFFFF)) + pb_val.append(socket.htonl((ival >> (32 * 1)) & 0xFFFFFFFF)) + pb_val.append(socket.htonl((ival >> (32 * 0)) & 0xFFFFFFFF)) + else: + raise Exception("Unknown IP address version %d" % + val.version) + continue - for v in value: - if field.type == FD.TYPE_MESSAGE: - dict2pb(v, pb_val.add()) - else: - pb_val.append(_dict2pb_cast(field, v)) - else: - if field.type == FD.TYPE_MESSAGE: - # SetInParent method acts just like has_* = true in C, - # and helps to properly treat cases when we have optional - # field with empty repeated inside. - getattr(pb, field.name).SetInParent() + for v in value: + if field.type == FD.TYPE_MESSAGE: + dict2pb(v, pb_val.add()) + else: + pb_val.append(_dict2pb_cast(field, v)) + else: + if field.type == FD.TYPE_MESSAGE: + # SetInParent method acts just like has_* = true in C, + # and helps to properly treat cases when we have optional + # field with empty repeated inside. + getattr(pb, field.name).SetInParent() - dict2pb(value, getattr(pb, field.name, None)) - else: - setattr(pb, field.name, _dict2pb_cast(field, value)) - return pb + dict2pb(value, getattr(pb, field.name, None)) + else: + setattr(pb, field.name, _dict2pb_cast(field, value)) + return pb diff --git a/scripts/crit-setup.py b/scripts/crit-setup.py index f40588142..871e55921 100644 --- a/scripts/crit-setup.py +++ b/scripts/crit-setup.py @@ -1,12 +1,11 @@ from distutils.core import setup -setup(name = "crit", - version = "0.0.1", - description = "CRiu Image Tool", - author = "CRIU team", - author_email = "criu@openvz.org", - url = "https://github.com/checkpoint-restore/criu", - package_dir = {'pycriu': 'lib/py'}, - packages = ["pycriu", "pycriu.images"], - scripts = ["crit/crit"] - ) +setup(name="crit", + version="0.0.1", + description="CRiu Image Tool", + author="CRIU team", + author_email="criu@openvz.org", + url="https://github.com/checkpoint-restore/criu", + package_dir={'pycriu': 'lib/py'}, + packages=["pycriu", "pycriu.images"], + scripts=["crit/crit"]) diff --git a/scripts/magic-gen.py b/scripts/magic-gen.py index 7088f634d..3d9777735 100755 --- a/scripts/magic-gen.py +++ b/scripts/magic-gen.py @@ -1,61 +1,63 @@ #!/bin/env python2 import sys + # This program parses criu magic.h file and produces # magic.py with all *_MAGIC constants except RAW and V1. def main(argv): - if len(argv) != 3: - print("Usage: magic-gen.py path/to/image.h path/to/magic.py") - exit(1) + if len(argv) != 3: + print("Usage: magic-gen.py path/to/image.h path/to/magic.py") + exit(1) - magic_c_header = argv[1] - magic_py = argv[2] + magic_c_header = argv[1] + magic_py = argv[2] - out = open(magic_py, 'w+') + out = open(magic_py, 'w+') - # all_magic is used to parse constructions like: - # #define PAGEMAP_MAGIC 0x56084025 - # #define SHMEM_PAGEMAP_MAGIC PAGEMAP_MAGIC - all_magic = {} - # and magic is used to store only unique magic. - magic = {} + # all_magic is used to parse constructions like: + # #define PAGEMAP_MAGIC 0x56084025 + # #define SHMEM_PAGEMAP_MAGIC PAGEMAP_MAGIC + all_magic = {} + # and magic is used to store only unique magic. + magic = {} - f = open(magic_c_header, 'r') - for line in f: - split = line.split() + f = open(magic_c_header, 'r') + for line in f: + split = line.split() - if len(split) < 3: - continue + if len(split) < 3: + continue - if not '#define' in split[0]: - continue + if not '#define' in split[0]: + continue - key = split[1] - value = split[2] + key = split[1] + value = split[2] - if value in all_magic: - value = all_magic[value] - else: - magic[key] = value + if value in all_magic: + value = all_magic[value] + else: + magic[key] = value - all_magic[key] = value + all_magic[key] = value + + out.write('#Autogenerated. Do not edit!\n') + out.write('by_name = {}\n') + out.write('by_val = {}\n') + for k, v in list(magic.items()): + # We don't need RAW or V1 magic, because + # they can't be used to identify images. + if v == '0x0' or v == '1' or k == '0x0' or v == '1': + continue + if k.endswith("_MAGIC"): + # Just cutting _MAGIC suffix + k = k[:-6] + v = int(v, 16) + out.write("by_name['" + k + "'] = " + str(v) + "\n") + out.write("by_val[" + str(v) + "] = '" + k + "'\n") + f.close() + out.close() - out.write('#Autogenerated. Do not edit!\n') - out.write('by_name = {}\n') - out.write('by_val = {}\n') - for k,v in list(magic.items()): - # We don't need RAW or V1 magic, because - # they can't be used to identify images. - if v == '0x0' or v == '1' or k == '0x0' or v == '1': - continue - if k.endswith("_MAGIC"): - # Just cutting _MAGIC suffix - k = k[:-6] - v = int(v, 16) - out.write("by_name['"+ k +"'] = "+ str(v) +"\n") - out.write("by_val["+ str(v) +"] = '"+ k +"'\n") - f.close() - out.close() if __name__ == "__main__": - main(sys.argv) + main(sys.argv) diff --git a/soccr/test/run.py b/soccr/test/run.py index a25c29263..446584a71 100644 --- a/soccr/test/run.py +++ b/soccr/test/run.py @@ -13,17 +13,17 @@ sport = os.getenv("TCP_SPORT", "12345") dport = os.getenv("TCP_DPORT", "54321") print(sys.argv[1]) -args = [sys.argv[1], - "--addr", src, "--port", sport, "--seq", "555", - "--next", - "--addr", dst, "--port", dport, "--seq", "666", - "--reverse", "--", "./tcp-test.py"] +args = [ + sys.argv[1], "--addr", src, "--port", sport, "--seq", "555", "--next", + "--addr", dst, "--port", dport, "--seq", "666", "--reverse", "--", + "./tcp-test.py" +] -p1 = Popen(args + ["dst"], stdout = PIPE, stdin = PIPE) +p1 = Popen(args + ["dst"], stdout=PIPE, stdin=PIPE) -args.remove("--reverse"); +args.remove("--reverse") -p2 = Popen(args + ["src"], stdout = PIPE, stdin = PIPE) +p2 = Popen(args + ["src"], stdout=PIPE, stdin=PIPE) p1.stdout.read(5) p2.stdout.read(5) @@ -42,7 +42,7 @@ str2 = m.hexdigest() if str2 != eval(s): print("FAIL", repr(str2), repr(s)) - sys.exit(5); + sys.exit(5) s = p1.stdout.read() m = hashlib.md5() @@ -52,7 +52,7 @@ str1 = m.hexdigest() s = p2.stdout.read() if str1 != eval(s): print("FAIL", repr(str1), s) - sys.exit(5); + sys.exit(5) if p1.wait(): sys.exit(1) diff --git a/test/check_actions.py b/test/check_actions.py index 0e3daf178..ae909e668 100755 --- a/test/check_actions.py +++ b/test/check_actions.py @@ -4,37 +4,38 @@ import sys import os actions = set(['pre-dump', 'pre-restore', 'post-dump', 'setup-namespaces', \ - 'post-setup-namespaces', 'post-restore', 'post-resume', \ - 'network-lock', 'network-unlock' ]) + 'post-setup-namespaces', 'post-restore', 'post-resume', \ + 'network-lock', 'network-unlock' ]) errors = [] af = os.path.dirname(os.path.abspath(__file__)) + '/actions_called.txt' for act in open(af): - act = act.strip().split() - act.append('EMPTY') - act.append('EMPTY') + act = act.strip().split() + act.append('EMPTY') + act.append('EMPTY') - if act[0] == 'EMPTY': - raise Exception("Error in test, bogus actions line") + if act[0] == 'EMPTY': + raise Exception("Error in test, bogus actions line") - if act[1] == 'EMPTY': - errors.append('Action %s misses CRTOOLS_IMAGE_DIR' % act[0]) + if act[1] == 'EMPTY': + errors.append('Action %s misses CRTOOLS_IMAGE_DIR' % act[0]) - if act[0] in ('post-dump', 'setup-namespaces', 'post-setup-namespaces', \ - 'post-restore', 'post-resume', 'network-lock', 'network-unlock'): - if act[2] == 'EMPTY': - errors.append('Action %s misses CRTOOLS_INIT_PID' % act[0]) - elif not act[2].isdigit() or int(act[2]) == 0: - errors.append('Action %s PID is not number (%s)' % (act[0], act[2])) + if act[0] in ('post-dump', 'setup-namespaces', 'post-setup-namespaces', \ + 'post-restore', 'post-resume', 'network-lock', 'network-unlock'): + if act[2] == 'EMPTY': + errors.append('Action %s misses CRTOOLS_INIT_PID' % act[0]) + elif not act[2].isdigit() or int(act[2]) == 0: + errors.append('Action %s PID is not number (%s)' % + (act[0], act[2])) - actions -= set([act[0]]) + actions -= set([act[0]]) if actions: - errors.append('Not all actions called: %r' % actions) + errors.append('Not all actions called: %r' % actions) if errors: - for x in errors: - print(x) - sys.exit(1) + for x in errors: + print(x) + sys.exit(1) print('PASS') diff --git a/test/crit-recode.py b/test/crit-recode.py index 441f7757e..a7dcc7272 100755 --- a/test/crit-recode.py +++ b/test/crit-recode.py @@ -6,70 +6,72 @@ import sys import os import subprocess -find = subprocess.Popen(['find', 'test/dump/', '-size', '+0', '-name', '*.img'], - stdout = subprocess.PIPE) +find = subprocess.Popen( + ['find', 'test/dump/', '-size', '+0', '-name', '*.img'], + stdout=subprocess.PIPE) test_pass = True + def recode_and_check(imgf, o_img, pretty): - try: - pb = pycriu.images.loads(o_img, pretty) - except pycriu.images.MagicException as me: - print("%s magic %x error" % (imgf, me.magic)) - return False - except Exception as e: - print("%s %sdecode fails: %s" % (imgf, pretty and 'pretty ' or '', e)) - return False + try: + pb = pycriu.images.loads(o_img, pretty) + except pycriu.images.MagicException as me: + print("%s magic %x error" % (imgf, me.magic)) + return False + except Exception as e: + print("%s %sdecode fails: %s" % (imgf, pretty and 'pretty ' or '', e)) + return False - try: - r_img = pycriu.images.dumps(pb) - except Exception as e: - r_img = pycriu.images.dumps(pb) - print("%s %s encode fails: %s" % (imgf, pretty and 'pretty ' or '', e)) - return False + try: + r_img = pycriu.images.dumps(pb) + except Exception as e: + r_img = pycriu.images.dumps(pb) + print("%s %s encode fails: %s" % (imgf, pretty and 'pretty ' or '', e)) + return False - if o_img != r_img: - print("%s %s recode mismatch" % (imgf, pretty and 'pretty ' or '')) - return False + if o_img != r_img: + print("%s %s recode mismatch" % (imgf, pretty and 'pretty ' or '')) + return False - return True + return True for imgf in find.stdout.readlines(): - imgf = imgf.strip() - imgf_b = os.path.basename(imgf) + imgf = imgf.strip() + imgf_b = os.path.basename(imgf) - if imgf_b.startswith(b'pages-'): - continue - if imgf_b.startswith(b'iptables-'): - continue - if imgf_b.startswith(b'ip6tables-'): - continue - if imgf_b.startswith(b'route-'): - continue - if imgf_b.startswith(b'route6-'): - continue - if imgf_b.startswith(b'ifaddr-'): - continue - if imgf_b.startswith(b'tmpfs-'): - continue - if imgf_b.startswith(b'netns-ct-'): - continue - if imgf_b.startswith(b'netns-exp-'): - continue - if imgf_b.startswith(b'rule-'): - continue + if imgf_b.startswith(b'pages-'): + continue + if imgf_b.startswith(b'iptables-'): + continue + if imgf_b.startswith(b'ip6tables-'): + continue + if imgf_b.startswith(b'route-'): + continue + if imgf_b.startswith(b'route6-'): + continue + if imgf_b.startswith(b'ifaddr-'): + continue + if imgf_b.startswith(b'tmpfs-'): + continue + if imgf_b.startswith(b'netns-ct-'): + continue + if imgf_b.startswith(b'netns-exp-'): + continue + if imgf_b.startswith(b'rule-'): + continue - o_img = open(imgf.decode(), "rb").read() - if not recode_and_check(imgf, o_img, False): - test_pass = False - if not recode_and_check(imgf, o_img, True): - test_pass = False + o_img = open(imgf.decode(), "rb").read() + if not recode_and_check(imgf, o_img, False): + test_pass = False + if not recode_and_check(imgf, o_img, True): + test_pass = False find.wait() if not test_pass: - print("FAIL") - sys.exit(1) + print("FAIL") + sys.exit(1) print("PASS") diff --git a/test/exhaustive/pipe.py b/test/exhaustive/pipe.py index 17e065800..fdadc480c 100755 --- a/test/exhaustive/pipe.py +++ b/test/exhaustive/pipe.py @@ -8,125 +8,127 @@ import time import sys import subprocess -criu_bin='../../criu/criu' +criu_bin = '../../criu/criu' + def mix(nr_tasks, nr_pipes): - # Returned is the list of combinations. - # Each combination is the lists of pipe descriptors. - # Each pipe descriptor is a 2-elemtn tuple, that contains values - # for R and W ends of pipes, each being a bit-field denoting in - # which tasks the respective end should be opened or not. + # Returned is the list of combinations. + # Each combination is the lists of pipe descriptors. + # Each pipe descriptor is a 2-elemtn tuple, that contains values + # for R and W ends of pipes, each being a bit-field denoting in + # which tasks the respective end should be opened or not. - # First -- make a full set of combinations for a single pipe. - max_idx = 1 << nr_tasks - pipe_mix = [[(r, w)] for r in range(0, max_idx) for w in range(0, max_idx)] + # First -- make a full set of combinations for a single pipe. + max_idx = 1 << nr_tasks + pipe_mix = [[(r, w)] for r in range(0, max_idx) for w in range(0, max_idx)] - # Now, for every pipe throw another one into the game making - # all possible combinations of what was seen before with the - # newbie. - pipes_mix = pipe_mix - for t in range(1, nr_pipes): - pipes_mix = [ o + n for o in pipes_mix for n in pipe_mix ] + # Now, for every pipe throw another one into the game making + # all possible combinations of what was seen before with the + # newbie. + pipes_mix = pipe_mix + for t in range(1, nr_pipes): + pipes_mix = [o + n for o in pipes_mix for n in pipe_mix] - return pipes_mix + return pipes_mix # Called by a test sub-process. It just closes the not needed ends # of pipes and sleeps waiting for death. def make_pipes(task_nr, nr_pipes, pipes, comb, status_pipe): - print('\t\tMake pipes for %d' % task_nr) - # We need to make sure that pipes have their - # ends according to comb for task_nr + print('\t\tMake pipes for %d' % task_nr) + # We need to make sure that pipes have their + # ends according to comb for task_nr - for i in range(0, nr_pipes): - # Read end - if not (comb[i][0] & (1 << task_nr)): - os.close(pipes[i][0]) - # Write end - if not (comb[i][1] & (1 << task_nr)): - os.close(pipes[i][1]) + for i in range(0, nr_pipes): + # Read end + if not (comb[i][0] & (1 << task_nr)): + os.close(pipes[i][0]) + # Write end + if not (comb[i][1] & (1 << task_nr)): + os.close(pipes[i][1]) - os.write(status_pipe, '0') - os.close(status_pipe) - while True: - time.sleep(100) + os.write(status_pipe, '0') + os.close(status_pipe) + while True: + time.sleep(100) def get_pipe_ino(pid, fd): - try: - return os.stat('/proc/%d/fd/%d' % (pid, fd)).st_ino - except: - return None + try: + return os.stat('/proc/%d/fd/%d' % (pid, fd)).st_ino + except: + return None def get_pipe_rw(pid, fd): - for l in open('/proc/%d/fdinfo/%d' % (pid, fd)): - if l.startswith('flags:'): - f = l.split(None, 1)[1][-2] - if f == '0': - return 0 # Read - elif f == '1': - return 1 # Write - break + for l in open('/proc/%d/fdinfo/%d' % (pid, fd)): + if l.startswith('flags:'): + f = l.split(None, 1)[1][-2] + if f == '0': + return 0 # Read + elif f == '1': + return 1 # Write + break - raise Exception('Unexpected fdinfo contents') + raise Exception('Unexpected fdinfo contents') def check_pipe_y(pid, fd, rw, inos): - ino = get_pipe_ino(pid, fd) - if ino == None: - return 'missing ' - if not inos.has_key(fd): - inos[fd] = ino - elif inos[fd] != ino: - return 'wrong ' - mod = get_pipe_rw(pid, fd) - if mod != rw: - return 'badmode ' - return None + ino = get_pipe_ino(pid, fd) + if ino == None: + return 'missing ' + if not inos.has_key(fd): + inos[fd] = ino + elif inos[fd] != ino: + return 'wrong ' + mod = get_pipe_rw(pid, fd) + if mod != rw: + return 'badmode ' + return None def check_pipe_n(pid, fd): - ino = get_pipe_ino(pid, fd) - if ino == None: - return None - else: - return 'present ' + ino = get_pipe_ino(pid, fd) + if ino == None: + return None + else: + return 'present ' def check_pipe_end(kids, fd, comb, rw, inos): - t_nr = 0 - for t_pid in kids: - if comb & (1 << t_nr): - res = check_pipe_y(t_pid, fd, rw, inos) - else: - res = check_pipe_n(t_pid, fd) - if res != None: - return res + 'kid(%d)' % t_nr - t_nr += 1 - return None + t_nr = 0 + for t_pid in kids: + if comb & (1 << t_nr): + res = check_pipe_y(t_pid, fd, rw, inos) + else: + res = check_pipe_n(t_pid, fd) + if res != None: + return res + 'kid(%d)' % t_nr + t_nr += 1 + return None def check_pipe(kids, fds, comb, inos): - for e in (0, 1): # 0 == R, 1 == W, see get_pipe_rw() - res = check_pipe_end(kids, fds[e], comb[e], e, inos) - if res != None: - return res + 'end(%d)' % e - return None + for e in (0, 1): # 0 == R, 1 == W, see get_pipe_rw() + res = check_pipe_end(kids, fds[e], comb[e], e, inos) + if res != None: + return res + 'end(%d)' % e + return None + def check_pipes(kids, pipes, comb): - # Kids contain pids - # Pipes contain pipe FDs - # Comb contain list of pairs of bits for RW ends - p_nr = 0 - p_inos = {} - for p_fds in pipes: - res = check_pipe(kids, p_fds, comb[p_nr], p_inos) - if res != None: - return res + 'pipe(%d)' % p_nr - p_nr += 1 + # Kids contain pids + # Pipes contain pipe FDs + # Comb contain list of pairs of bits for RW ends + p_nr = 0 + p_inos = {} + for p_fds in pipes: + res = check_pipe(kids, p_fds, comb[p_nr], p_inos) + if res != None: + return res + 'pipe(%d)' % p_nr + p_nr += 1 - return None + return None # Run by test main process. It opens pipes, then forks kids that @@ -134,128 +136,134 @@ def check_pipes(kids, pipes, comb): # and waits for a signal (unix socket message) to start checking # the kids' FD tables. def make_comb(comb, opts, status_pipe): - print('\tMake pipes') - # 1st -- make needed pipes - pipes = [] - for p in range(0, opts.pipes): - pipes.append(os.pipe()) + print('\tMake pipes') + # 1st -- make needed pipes + pipes = [] + for p in range(0, opts.pipes): + pipes.append(os.pipe()) - # Fork the kids that'll make pipes - kc_pipe = os.pipe() - kids = [] - for t in range(0, opts.tasks): - pid = os.fork() - if pid == 0: - os.close(status_pipe) - os.close(kc_pipe[0]) - make_pipes(t, opts.pipes, pipes, comb, kc_pipe[1]) - sys.exit(1) - kids.append(pid) + # Fork the kids that'll make pipes + kc_pipe = os.pipe() + kids = [] + for t in range(0, opts.tasks): + pid = os.fork() + if pid == 0: + os.close(status_pipe) + os.close(kc_pipe[0]) + make_pipes(t, opts.pipes, pipes, comb, kc_pipe[1]) + sys.exit(1) + kids.append(pid) - os.close(kc_pipe[1]) - for p in pipes: - os.close(p[0]) - os.close(p[1]) + os.close(kc_pipe[1]) + for p in pipes: + os.close(p[0]) + os.close(p[1]) - # Wait for kids to get ready - k_res = '' - while True: - v = os.read(kc_pipe[0], 16) - if v == '': - break - k_res += v - os.close(kc_pipe[0]) + # Wait for kids to get ready + k_res = '' + while True: + v = os.read(kc_pipe[0], 16) + if v == '': + break + k_res += v + os.close(kc_pipe[0]) - ex_code = 1 - if k_res == '0' * opts.tasks: - print('\tWait for C/R') - cmd_sk = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM, 0) - cmd_sk.bind('\0CRIUPCSK') + ex_code = 1 + if k_res == '0' * opts.tasks: + print('\tWait for C/R') + cmd_sk = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM, 0) + cmd_sk.bind('\0CRIUPCSK') - # Kids are ready, so is socket for kicking us. Notify the - # parent task that we are good to go. - os.write(status_pipe, '0') - os.close(status_pipe) - v = cmd_sk.recv(16) - if v == '0': - print('\tCheck pipes') - res = check_pipes(kids, pipes, comb) - if res == None: - ex_code = 0 - else: - print('\tFAIL %s' % res) + # Kids are ready, so is socket for kicking us. Notify the + # parent task that we are good to go. + os.write(status_pipe, '0') + os.close(status_pipe) + v = cmd_sk.recv(16) + if v == '0': + print('\tCheck pipes') + res = check_pipes(kids, pipes, comb) + if res == None: + ex_code = 0 + else: + print('\tFAIL %s' % res) - # Just kill kids, all checks are done by us, we don't need'em any more - for t in kids: - os.kill(t, signal.SIGKILL) - os.waitpid(t, 0) + # Just kill kids, all checks are done by us, we don't need'em any more + for t in kids: + os.kill(t, signal.SIGKILL) + os.waitpid(t, 0) - return ex_code + return ex_code def cr_test(pid): - print('C/R test') - img_dir = 'pimg_%d' % pid - try: - os.mkdir(img_dir) - subprocess.check_call([criu_bin, 'dump', '-t', '%d' % pid, '-D', img_dir, '-o', 'dump.log', '-v4', '-j']) - except: - print('`- dump fail') - return False + print('C/R test') + img_dir = 'pimg_%d' % pid + try: + os.mkdir(img_dir) + subprocess.check_call([ + criu_bin, 'dump', '-t', + '%d' % pid, '-D', img_dir, '-o', 'dump.log', '-v4', '-j' + ]) + except: + print('`- dump fail') + return False - try: - os.waitpid(pid, 0) - subprocess.check_call([criu_bin, 'restore', '-D', img_dir, '-o', 'rst.log', '-v4', '-j', '-d', '-S']) - except: - print('`- restore fail') - return False + try: + os.waitpid(pid, 0) + subprocess.check_call([ + criu_bin, 'restore', '-D', img_dir, '-o', 'rst.log', '-v4', '-j', + '-d', '-S' + ]) + except: + print('`- restore fail') + return False - return True + return True def run(comb, opts): - print('Checking %r' % comb) - cpipe = os.pipe() - pid = os.fork() - if pid == 0: - os.close(cpipe[0]) - ret = make_comb(comb, opts, cpipe[1]) - sys.exit(ret) + print('Checking %r' % comb) + cpipe = os.pipe() + pid = os.fork() + if pid == 0: + os.close(cpipe[0]) + ret = make_comb(comb, opts, cpipe[1]) + sys.exit(ret) - # Wait for the main process to get ready - os.close(cpipe[1]) - res = os.read(cpipe[0], 16) - os.close(cpipe[0]) + # Wait for the main process to get ready + os.close(cpipe[1]) + res = os.read(cpipe[0], 16) + os.close(cpipe[0]) - if res == '0': - res = cr_test(pid) + if res == '0': + res = cr_test(pid) - print('Wake up test') - s = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM, 0) - if res: - res = '0' - else: - res = 'X' - try: - # Kick the test to check its state - s.sendto(res, '\0CRIUPCSK') - except: - # Restore might have failed or smth else happened - os.kill(pid, signal.SIGKILL) - s.close() + print('Wake up test') + s = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM, 0) + if res: + res = '0' + else: + res = 'X' + try: + # Kick the test to check its state + s.sendto(res, '\0CRIUPCSK') + except: + # Restore might have failed or smth else happened + os.kill(pid, signal.SIGKILL) + s.close() - # Wait for the guy to exit and get the result (PASS/FAIL) - p, st = os.waitpid(pid, 0) - if os.WIFEXITED(st): - st = os.WEXITSTATUS(st) + # Wait for the guy to exit and get the result (PASS/FAIL) + p, st = os.waitpid(pid, 0) + if os.WIFEXITED(st): + st = os.WEXITSTATUS(st) - print('Done (%d, pid == %d)' % (st, pid)) - return st == 0 + print('Done (%d, pid == %d)' % (st, pid)) + return st == 0 p = argparse.ArgumentParser("CRIU test suite") -p.add_argument("--tasks", help = "Number of tasks", default = '2') -p.add_argument("--pipes", help = "Number of pipes", default = '2') +p.add_argument("--tasks", help="Number of tasks", default='2') +p.add_argument("--pipes", help="Number of pipes", default='2') opts = p.parse_args() opts.tasks = int(opts.tasks) opts.pipes = int(opts.pipes) @@ -263,8 +271,8 @@ opts.pipes = int(opts.pipes) pipe_combs = mix(opts.tasks, opts.pipes) for comb in pipe_combs: - if not run(comb, opts): - print('FAIL') - break + if not run(comb, opts): + print('FAIL') + break else: - print('PASS') + print('PASS') diff --git a/test/exhaustive/unix.py b/test/exhaustive/unix.py index 41053bd0d..98dbbb7b0 100755 --- a/test/exhaustive/unix.py +++ b/test/exhaustive/unix.py @@ -9,11 +9,11 @@ import signal import fcntl import stat -criu_bin='../../criu/criu' +criu_bin = '../../criu/criu' sk_type_s = { - socket.SOCK_STREAM: "S", - socket.SOCK_DGRAM: "D", + socket.SOCK_STREAM: "S", + socket.SOCK_DGRAM: "D", } # Actions that can be done by test. Actions are not only syscall @@ -25,721 +25,739 @@ sk_type_s = { # - do() method, that actually does what's required # - show() method to return the string description of what's done + def mk_socket(st, typ): - st.sk_id += 1 - sk = sock(st.sk_id, typ) - st.add_socket(sk) - return sk + st.sk_id += 1 + sk = sock(st.sk_id, typ) + st.add_socket(sk) + return sk + class act_socket: - def __init__(self, typ): - self.typ = typ + def __init__(self, typ): + self.typ = typ - def act(self, st): - sk = mk_socket(st, self.typ) - self.sk_id = sk.sk_id + def act(self, st): + sk = mk_socket(st, self.typ) + self.sk_id = sk.sk_id - def do(self, st): - sk = socket.socket(socket.AF_UNIX, self.typ, 0) - st.real_sockets[self.sk_id] = sk + def do(self, st): + sk = socket.socket(socket.AF_UNIX, self.typ, 0) + st.real_sockets[self.sk_id] = sk - def show(self): - return 'socket(%s) = %d' % (sk_type_s[self.typ], self.sk_id) + def show(self): + return 'socket(%s) = %d' % (sk_type_s[self.typ], self.sk_id) class act_close: - def __init__(self, sk_id): - self.sk_id = sk_id + def __init__(self, sk_id): + self.sk_id = sk_id - def act(self, st): - sk = st.get_socket(self.sk_id) - st.del_socket(sk) - for ic in sk.icons: - sk = st.get_socket(ic) - st.del_socket(sk) + def act(self, st): + sk = st.get_socket(self.sk_id) + st.del_socket(sk) + for ic in sk.icons: + sk = st.get_socket(ic) + st.del_socket(sk) - def do(self, st): - sk = st.real_sockets.pop(self.sk_id) - sk.close() + def do(self, st): + sk = st.real_sockets.pop(self.sk_id) + sk.close() - def show(self): - return 'close(%d)' % self.sk_id + def show(self): + return 'close(%d)' % self.sk_id class act_listen: - def __init__(self, sk_id): - self.sk_id = sk_id + def __init__(self, sk_id): + self.sk_id = sk_id - def act(self, st): - sk = st.get_socket(self.sk_id) - sk.listen = True + def act(self, st): + sk = st.get_socket(self.sk_id) + sk.listen = True - def do(self, st): - sk = st.real_sockets[self.sk_id] - sk.listen(10) + def do(self, st): + sk = st.real_sockets[self.sk_id] + sk.listen(10) - def show(self): - return 'listen(%d)' % self.sk_id + def show(self): + return 'listen(%d)' % self.sk_id class act_bind: - def __init__(self, sk_id, name_id): - self.sk_id = sk_id - self.name_id = name_id + def __init__(self, sk_id, name_id): + self.sk_id = sk_id + self.name_id = name_id - def act(self, st): - sk = st.get_socket(self.sk_id) - sk.name = self.name_id + def act(self, st): + sk = st.get_socket(self.sk_id) + sk.name = self.name_id - def do(self, st): - sk = st.real_sockets[self.sk_id] - sk.bind(sock.real_name_for(self.name_id)) + def do(self, st): + sk = st.real_sockets[self.sk_id] + sk.bind(sock.real_name_for(self.name_id)) - def show(self): - return 'bind(%d, $name-%d)' % (self.sk_id, self.name_id) + def show(self): + return 'bind(%d, $name-%d)' % (self.sk_id, self.name_id) class act_connect: - def __init__(self, sk_id, listen_sk_id): - self.sk_id = sk_id - self.lsk_id = listen_sk_id + def __init__(self, sk_id, listen_sk_id): + self.sk_id = sk_id + self.lsk_id = listen_sk_id - def act(self, st): - sk = st.get_socket(self.sk_id) - if st.sk_type == socket.SOCK_STREAM: - lsk = st.get_socket(self.lsk_id) - psk = mk_socket(st, socket.SOCK_STREAM) - psk.visible = False - sk.peer = psk.sk_id - psk.peer = sk.sk_id - psk.name = lsk.name - lsk.icons.append(psk.sk_id) - lsk.icons_seq += 1 - else: - sk.peer = self.lsk_id - psk = st.get_socket(self.lsk_id) - psk.icons_seq += 1 + def act(self, st): + sk = st.get_socket(self.sk_id) + if st.sk_type == socket.SOCK_STREAM: + lsk = st.get_socket(self.lsk_id) + psk = mk_socket(st, socket.SOCK_STREAM) + psk.visible = False + sk.peer = psk.sk_id + psk.peer = sk.sk_id + psk.name = lsk.name + lsk.icons.append(psk.sk_id) + lsk.icons_seq += 1 + else: + sk.peer = self.lsk_id + psk = st.get_socket(self.lsk_id) + psk.icons_seq += 1 - def do(self, st): - sk = st.real_sockets[self.sk_id] - sk.connect(sock.real_name_for(self.lsk_id)) + def do(self, st): + sk = st.real_sockets[self.sk_id] + sk.connect(sock.real_name_for(self.lsk_id)) - def show(self): - return 'connect(%d, $name-%d)' % (self.sk_id, self.lsk_id) + def show(self): + return 'connect(%d, $name-%d)' % (self.sk_id, self.lsk_id) class act_accept: - def __init__(self, sk_id): - self.sk_id = sk_id + def __init__(self, sk_id): + self.sk_id = sk_id - def act(self, st): - lsk = st.get_socket(self.sk_id) - iid = lsk.icons.pop(0) - nsk = st.get_socket(iid) - nsk.visible = True - self.nsk_id = nsk.sk_id + def act(self, st): + lsk = st.get_socket(self.sk_id) + iid = lsk.icons.pop(0) + nsk = st.get_socket(iid) + nsk.visible = True + self.nsk_id = nsk.sk_id - def do(self, st): - sk = st.real_sockets[self.sk_id] - nsk, ai = sk.accept() - if self.nsk_id in st.real_sockets: - raise Exception("SK ID conflict") - st.real_sockets[self.nsk_id] = nsk + def do(self, st): + sk = st.real_sockets[self.sk_id] + nsk, ai = sk.accept() + if self.nsk_id in st.real_sockets: + raise Exception("SK ID conflict") + st.real_sockets[self.nsk_id] = nsk - def show(self): - return 'accept(%d) = %d' % (self.sk_id, self.nsk_id) + def show(self): + return 'accept(%d) = %d' % (self.sk_id, self.nsk_id) class act_sendmsg: - def __init__(self, sk_id, to_id): - self.sk_id = sk_id - self.to_id = to_id - self.direct_send = None + def __init__(self, sk_id, to_id): + self.sk_id = sk_id + self.to_id = to_id + self.direct_send = None - def act(self, st): - sk = st.get_socket(self.sk_id) - msg = (sk.sk_id, sk.outseq) - self.msg_id = sk.outseq - sk.outseq += 1 - psk = st.get_socket(self.to_id) - psk.inqueue.append(msg) - self.direct_send = (sk.peer == psk.sk_id) + def act(self, st): + sk = st.get_socket(self.sk_id) + msg = (sk.sk_id, sk.outseq) + self.msg_id = sk.outseq + sk.outseq += 1 + psk = st.get_socket(self.to_id) + psk.inqueue.append(msg) + self.direct_send = (sk.peer == psk.sk_id) - def do(self, st): - sk = st.real_sockets[self.sk_id] - msgv = act_sendmsg.msgval(self.msg_id) - if self.direct_send: - sk.send(msgv) - else: - sk.sendto(msgv, sock.real_name_for(self.to_id)) + def do(self, st): + sk = st.real_sockets[self.sk_id] + msgv = act_sendmsg.msgval(self.msg_id) + if self.direct_send: + sk.send(msgv) + else: + sk.sendto(msgv, sock.real_name_for(self.to_id)) - def show(self): - return 'send(%d, %d, $message-%d)' % (self.sk_id, self.to_id, self.msg_id) + def show(self): + return 'send(%d, %d, $message-%d)' % (self.sk_id, self.to_id, + self.msg_id) + + @staticmethod + def msgval(msgid, pref=''): + return '%sMSG%d' % (pref, msgid) - @staticmethod - def msgval(msgid, pref = ''): - return '%sMSG%d' % (pref, msgid) # # Description of a socket # class sock: - def __init__(self, sk_id, sock_type): - # ID of a socket. Since states and sockets are cloned - # while we scan the tree of states the only valid way - # to address a socket is to find one by ID. - self.sk_id = sk_id - # The socket.SOCK_FOO value - self.sk_type = sock_type - # Sockets that haven't yet been accept()-ed are in the - # state, but user cannot operate on them. Also this - # invisibility contributes to state description since - # connection to not accepted socket is not the same - # as connection to accepted one. - self.visible = True - # The listen() was called. - self.listen = False - # The bind() was called. Also set by accept(), the name - # inherits from listener. - self.name = None - # The connect() was called. Set on two sockets when the - # connect() is called. - self.peer = None - # Progress on accepting connections. Used to check when - # it's OK to close the socket (see comment below). - self.icons_seq = 0 - # List of IDs of sockets that can be accept()-ed - self.icons = [] - # Number to generate message contents. - self.outseq = 0 - # Incoming queue of messages. - self.inqueue = [] + def __init__(self, sk_id, sock_type): + # ID of a socket. Since states and sockets are cloned + # while we scan the tree of states the only valid way + # to address a socket is to find one by ID. + self.sk_id = sk_id + # The socket.SOCK_FOO value + self.sk_type = sock_type + # Sockets that haven't yet been accept()-ed are in the + # state, but user cannot operate on them. Also this + # invisibility contributes to state description since + # connection to not accepted socket is not the same + # as connection to accepted one. + self.visible = True + # The listen() was called. + self.listen = False + # The bind() was called. Also set by accept(), the name + # inherits from listener. + self.name = None + # The connect() was called. Set on two sockets when the + # connect() is called. + self.peer = None + # Progress on accepting connections. Used to check when + # it's OK to close the socket (see comment below). + self.icons_seq = 0 + # List of IDs of sockets that can be accept()-ed + self.icons = [] + # Number to generate message contents. + self.outseq = 0 + # Incoming queue of messages. + self.inqueue = [] - def clone(self): - sk = sock(self.sk_id, self.sk_type) - sk.visible = self.visible - sk.listen = self.listen - sk.name = self.name - sk.peer = self.peer - sk.icons_seq = self.icons_seq - sk.icons = list(self.icons) - sk.outseq = self.outseq - sk.inqueue = list(self.inqueue) - return sk + def clone(self): + sk = sock(self.sk_id, self.sk_type) + sk.visible = self.visible + sk.listen = self.listen + sk.name = self.name + sk.peer = self.peer + sk.icons_seq = self.icons_seq + sk.icons = list(self.icons) + sk.outseq = self.outseq + sk.inqueue = list(self.inqueue) + return sk - def get_actions(self, st): - if not self.visible: - return [] + def get_actions(self, st): + if not self.visible: + return [] - if st.sk_type == socket.SOCK_STREAM: - return self.get_stream_actions(st) - else: - return self.get_dgram_actions(st) + if st.sk_type == socket.SOCK_STREAM: + return self.get_stream_actions(st) + else: + return self.get_dgram_actions(st) - def get_send_action(self, to, st): - # However, if peer has a message from us at - # the queue tail, sending a new one doesn't - # really make sense - want_msg = True - if len(to.inqueue) != 0: - lmsg = to.inqueue[-1] - if lmsg[0] == self.sk_id: - want_msg = False - if want_msg: - return [ act_sendmsg(self.sk_id, to.sk_id) ] - else: - return [ ] + def get_send_action(self, to, st): + # However, if peer has a message from us at + # the queue tail, sending a new one doesn't + # really make sense + want_msg = True + if len(to.inqueue) != 0: + lmsg = to.inqueue[-1] + if lmsg[0] == self.sk_id: + want_msg = False + if want_msg: + return [act_sendmsg(self.sk_id, to.sk_id)] + else: + return [] - def get_stream_actions(self, st): - act_list = [] + def get_stream_actions(self, st): + act_list = [] - # Any socket can be closed, but closing a socket - # that hasn't contributed to some new states is - # just waste of time, so we close only connected - # sockets or listeners that has at least one - # incoming connection pendig or served + # Any socket can be closed, but closing a socket + # that hasn't contributed to some new states is + # just waste of time, so we close only connected + # sockets or listeners that has at least one + # incoming connection pendig or served - if self.listen: - if self.icons: - act_list.append(act_accept(self.sk_id)) - if self.icons_seq: - act_list.append(act_close(self.sk_id)) - elif self.peer: - act_list.append(act_close(self.sk_id)) - # Connected sockets can send and receive messages - # But receiving seem not to produce any new states, - # so only sending - # Also sending to a closed socket doesn't work - psk = st.get_socket(self.peer, True) - if psk: - act_list += self.get_send_action(psk, st) - else: - for psk in st.sockets: - if psk.listen and psk.name: - act_list.append(act_connect(self.sk_id, psk.sk_id)) + if self.listen: + if self.icons: + act_list.append(act_accept(self.sk_id)) + if self.icons_seq: + act_list.append(act_close(self.sk_id)) + elif self.peer: + act_list.append(act_close(self.sk_id)) + # Connected sockets can send and receive messages + # But receiving seem not to produce any new states, + # so only sending + # Also sending to a closed socket doesn't work + psk = st.get_socket(self.peer, True) + if psk: + act_list += self.get_send_action(psk, st) + else: + for psk in st.sockets: + if psk.listen and psk.name: + act_list.append(act_connect(self.sk_id, psk.sk_id)) - # Listen on not-bound socket is prohibited as - # well as binding a listening socket - if not self.name: - # TODO: support for file paths (see real_name_for) - # TODO: these names can overlap each other - act_list.append(act_bind(self.sk_id, self.sk_id)) - else: - act_list.append(act_listen(self.sk_id)) + # Listen on not-bound socket is prohibited as + # well as binding a listening socket + if not self.name: + # TODO: support for file paths (see real_name_for) + # TODO: these names can overlap each other + act_list.append(act_bind(self.sk_id, self.sk_id)) + else: + act_list.append(act_listen(self.sk_id)) - return act_list + return act_list - def get_dgram_actions(self, st): - act_list = [] + def get_dgram_actions(self, st): + act_list = [] - # Dgram socket can bind at any time - if not self.name: - act_list.append(act_bind(self.sk_id, self.sk_id)) + # Dgram socket can bind at any time + if not self.name: + act_list.append(act_bind(self.sk_id, self.sk_id)) - # Can connect to peer-less sockets - for psk in st.sockets: - if psk == self: - continue - if psk.peer != None and psk.peer != self.sk_id: - # Peer by someone else, can do nothing - continue + # Can connect to peer-less sockets + for psk in st.sockets: + if psk == self: + continue + if psk.peer != None and psk.peer != self.sk_id: + # Peer by someone else, can do nothing + continue - # Peer-less psk or having us as peer - # We can connect to or send messages - if psk.name and self.peer != psk.sk_id: - act_list.append(act_connect(self.sk_id, psk.sk_id)) + # Peer-less psk or having us as peer + # We can connect to or send messages + if psk.name and self.peer != psk.sk_id: + act_list.append(act_connect(self.sk_id, psk.sk_id)) - if psk.name or self.peer == psk.sk_id: - act_list += self.get_send_action(psk, st) + if psk.name or self.peer == psk.sk_id: + act_list += self.get_send_action(psk, st) - if self.outseq != 0 or self.icons_seq != 0: - act_list.append(act_close(self.sk_id)) + if self.outseq != 0 or self.icons_seq != 0: + act_list.append(act_close(self.sk_id)) - return act_list + return act_list - @staticmethod - def name_of(sk): - if not sk: - return 'X' - elif not sk.visible: - return 'H' - elif sk.name: - return 'B' - else: - return 'A' + @staticmethod + def name_of(sk): + if not sk: + return 'X' + elif not sk.visible: + return 'H' + elif sk.name: + return 'B' + else: + return 'A' - @staticmethod - def real_name_for(sk_id): - return "\0" + "CRSK%d" % sk_id + @staticmethod + def real_name_for(sk_id): + return "\0" + "CRSK%d" % sk_id - # The describe() generates a string that represents - # a state of a socket. Called by state.describe(), see - # comment there about what description is. - def describe(self, st): - dsc = '%s' % sk_type_s[self.sk_type] - dsc += sock.name_of(self) + # The describe() generates a string that represents + # a state of a socket. Called by state.describe(), see + # comment there about what description is. + def describe(self, st): + dsc = '%s' % sk_type_s[self.sk_type] + dsc += sock.name_of(self) - if self.listen: - dsc += 'L' - if self.peer: - psk = st.get_socket(self.peer, True) - dsc += '-C%s' % sock.name_of(psk) - if self.icons: - i_dsc = '' - for c in self.icons: - psk = st.get_socket(c) - psk = st.get_socket(psk.peer, True) - i_dsc += sock.name_of(psk) - dsc += '-I%s' % i_dsc - if self.inqueue: - froms = set() - for m in self.inqueue: - froms.add(m[0]) - q_dsc = '' - for f in froms: - fsk = st.get_socket(f, True) - q_dsc += sock.name_of(fsk) - dsc += '-M%s' % q_dsc - return dsc + if self.listen: + dsc += 'L' + if self.peer: + psk = st.get_socket(self.peer, True) + dsc += '-C%s' % sock.name_of(psk) + if self.icons: + i_dsc = '' + for c in self.icons: + psk = st.get_socket(c) + psk = st.get_socket(psk.peer, True) + i_dsc += sock.name_of(psk) + dsc += '-I%s' % i_dsc + if self.inqueue: + froms = set() + for m in self.inqueue: + froms.add(m[0]) + q_dsc = '' + for f in froms: + fsk = st.get_socket(f, True) + q_dsc += sock.name_of(fsk) + dsc += '-M%s' % q_dsc + return dsc class state: - def __init__(self, max_sockets, sk_type): - self.sockets = [] - self.sk_id = 0 - self.steps = [] - self.real_sockets = {} - self.sockets_left = max_sockets - self.sk_type = sk_type + def __init__(self, max_sockets, sk_type): + self.sockets = [] + self.sk_id = 0 + self.steps = [] + self.real_sockets = {} + self.sockets_left = max_sockets + self.sk_type = sk_type - def add_socket(self, sk): - self.sockets.append(sk) + def add_socket(self, sk): + self.sockets.append(sk) - def del_socket(self, sk): - self.sockets.remove(sk) + def del_socket(self, sk): + self.sockets.remove(sk) - def get_socket(self, sk_id, can_be_null = False): - for sk in self.sockets: - if sk.sk_id == sk_id: - return sk + def get_socket(self, sk_id, can_be_null=False): + for sk in self.sockets: + if sk.sk_id == sk_id: + return sk - if not can_be_null: - raise Exception("%d socket not in list" % sk_id) + if not can_be_null: + raise Exception("%d socket not in list" % sk_id) - return None + return None - def get_actions(self): - act_list = [] + def get_actions(self): + act_list = [] - # Any socket in the state we can change it - for sk in self.sockets: - act_list += sk.get_actions(self) + # Any socket in the state we can change it + for sk in self.sockets: + act_list += sk.get_actions(self) - if self.sockets_left > 0: - act_list.append(act_socket(self.sk_type)) - self.sockets_left -= 1 + if self.sockets_left > 0: + act_list.append(act_socket(self.sk_type)) + self.sockets_left -= 1 - return act_list + return act_list - def clone(self): - nst = state(self.sockets_left, self.sk_type) - for sk in self.sockets: - nst.sockets.append(sk.clone()) - nst.sk_id = self.sk_id - nst.steps = list(self.steps) - return nst + def clone(self): + nst = state(self.sockets_left, self.sk_type) + for sk in self.sockets: + nst.sockets.append(sk.clone()) + nst.sk_id = self.sk_id + nst.steps = list(self.steps) + return nst - # Generates textual description of a state. Different states - # may have same descriptions, e.g. if we have two sockets and - # only one of them is in listen state, we don't care which - # one in which. At the same time really different states - # shouldn't map to the same string. - def describe(self): - sks = [x.describe(self) for x in self.sockets] - sks = sorted(sks) - return '_'.join(sks) + # Generates textual description of a state. Different states + # may have same descriptions, e.g. if we have two sockets and + # only one of them is in listen state, we don't care which + # one in which. At the same time really different states + # shouldn't map to the same string. + def describe(self): + sks = [x.describe(self) for x in self.sockets] + sks = sorted(sks) + return '_'.join(sks) def set_nonblock(sk): - fd = sk.fileno() - flags = fcntl.fcntl(fd, fcntl.F_GETFL) - fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK) + fd = sk.fileno() + flags = fcntl.fcntl(fd, fcntl.F_GETFL) + fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK) -CHK_FAIL_UNKNOWN = 10 -CHK_FAIL_SOCKET = 11 -CHK_FAIL_STAT = 12 -CHK_FAIL_LISTEN = 13 -CHK_FAIL_NAME = 14 -CHK_FAIL_ACCEPT = 15 -CHK_FAIL_RECV_0 = 16 -CHK_FAIL_RECV_MIX = 17 -CHK_FAIL_CONNECT = 18 -CHK_FAIL_CONNECT2 = 19 -CHK_FAIL_KILLED = 20 -CHK_FAIL_DUMP = 21 -CHK_FAIL_RESTORE = 22 -CHK_PASS = 42 +CHK_FAIL_UNKNOWN = 10 +CHK_FAIL_SOCKET = 11 +CHK_FAIL_STAT = 12 +CHK_FAIL_LISTEN = 13 +CHK_FAIL_NAME = 14 +CHK_FAIL_ACCEPT = 15 +CHK_FAIL_RECV_0 = 16 +CHK_FAIL_RECV_MIX = 17 +CHK_FAIL_CONNECT = 18 +CHK_FAIL_CONNECT2 = 19 +CHK_FAIL_KILLED = 20 +CHK_FAIL_DUMP = 21 +CHK_FAIL_RESTORE = 22 + +CHK_PASS = 42 fail_desc = { - CHK_FAIL_UNKNOWN: 'Aliens invaded the test', - CHK_FAIL_LISTEN: 'Listen state lost on restore', - CHK_FAIL_NAME: 'Name lost on restore', - CHK_FAIL_ACCEPT: 'Incoming connection lost on restore', - CHK_FAIL_RECV_0: 'Message lost on restore', - CHK_FAIL_RECV_MIX: 'Message misorder on restore', - CHK_FAIL_CONNECT: 'Connectivity broken on restore', - CHK_FAIL_CONNECT2: 'Connectivity broken the hard way on restore', - CHK_FAIL_KILLED: 'Test process died unexpectedly', - CHK_FAIL_DUMP: 'Cannot dump', - CHK_FAIL_RESTORE: 'Cannot restore', + CHK_FAIL_UNKNOWN: 'Aliens invaded the test', + CHK_FAIL_LISTEN: 'Listen state lost on restore', + CHK_FAIL_NAME: 'Name lost on restore', + CHK_FAIL_ACCEPT: 'Incoming connection lost on restore', + CHK_FAIL_RECV_0: 'Message lost on restore', + CHK_FAIL_RECV_MIX: 'Message misorder on restore', + CHK_FAIL_CONNECT: 'Connectivity broken on restore', + CHK_FAIL_CONNECT2: 'Connectivity broken the hard way on restore', + CHK_FAIL_KILLED: 'Test process died unexpectedly', + CHK_FAIL_DUMP: 'Cannot dump', + CHK_FAIL_RESTORE: 'Cannot restore', } + def chk_real_state(st): - # Before enything else -- check that we still have - # all the sockets at hands - for sk in st.sockets: - if not sk.visible: - continue + # Before enything else -- check that we still have + # all the sockets at hands + for sk in st.sockets: + if not sk.visible: + continue - # In theory we can have key-not-found exception here, - # but this has nothing to do with sockets restore, - # since it's just bytes in memory, so ... we assume - # that we have object here and just check for it in - # the fdtable - rsk = st.real_sockets[sk.sk_id] - try: - s_st = os.fstat(rsk.fileno()) - except: - print('FAIL: Socket %d lost' % sk.sk_id) - return CHK_FAIL_SOCKET - if not stat.S_ISSOCK(s_st.st_mode): - print('FAIL: Not a socket %d at %d' % (sk.sk_id, rsk.fileno())) - return CHK_FAIL_STAT + # In theory we can have key-not-found exception here, + # but this has nothing to do with sockets restore, + # since it's just bytes in memory, so ... we assume + # that we have object here and just check for it in + # the fdtable + rsk = st.real_sockets[sk.sk_id] + try: + s_st = os.fstat(rsk.fileno()) + except: + print('FAIL: Socket %d lost' % sk.sk_id) + return CHK_FAIL_SOCKET + if not stat.S_ISSOCK(s_st.st_mode): + print('FAIL: Not a socket %d at %d' % (sk.sk_id, rsk.fileno())) + return CHK_FAIL_STAT - # First -- check the listen states and names - for sk in st.sockets: - if not sk.visible: - continue + # First -- check the listen states and names + for sk in st.sockets: + if not sk.visible: + continue - rsk = st.real_sockets[sk.sk_id] - r_listen = rsk.getsockopt(socket.SOL_SOCKET, socket.SO_ACCEPTCONN) - if (sk.listen and r_listen == 0) or (not sk.listen and r_listen == 1): - print("FAIL: Socket %d listen %d, expected %d" - % (sk.sk_id, r_listen, sk.listen and 1 or 0)) - return CHK_FAIL_LISTEN + rsk = st.real_sockets[sk.sk_id] + r_listen = rsk.getsockopt(socket.SOL_SOCKET, socket.SO_ACCEPTCONN) + if (sk.listen and r_listen == 0) or (not sk.listen and r_listen == 1): + print("FAIL: Socket %d listen %d, expected %d" % + (sk.sk_id, r_listen, sk.listen and 1 or 0)) + return CHK_FAIL_LISTEN - if sk.name: - r_name = rsk.getsockname() - w_name = sock.real_name_for(sk.name) - if r_name != w_name: - print('FAIL: Socket %d name mismatch [%s], want [%s]' - % (sk.sk_id, r_name, w_name)) - return CHK_FAIL_NAME + if sk.name: + r_name = rsk.getsockname() + w_name = sock.real_name_for(sk.name) + if r_name != w_name: + print('FAIL: Socket %d name mismatch [%s], want [%s]' % + (sk.sk_id, r_name, w_name)) + return CHK_FAIL_NAME - # Second -- check (accept) pending connections - for sk in st.sockets: - if not sk.listen: - continue + # Second -- check (accept) pending connections + for sk in st.sockets: + if not sk.listen: + continue - rsk = st.real_sockets[sk.sk_id] - set_nonblock(rsk) + rsk = st.real_sockets[sk.sk_id] + set_nonblock(rsk) - while sk.icons: - # Do act_accept to change the state properly - # and not write the code twice - acc = act_accept(sk.sk_id) - acc.act(st) - try: - acc.do(st) - except: - print('FAIL: Cannot accept pending connection for %d' % sk.sk_id) - return CHK_FAIL_ACCEPT + while sk.icons: + # Do act_accept to change the state properly + # and not write the code twice + acc = act_accept(sk.sk_id) + acc.act(st) + try: + acc.do(st) + except: + print('FAIL: Cannot accept pending connection for %d' % + sk.sk_id) + return CHK_FAIL_ACCEPT - print(' `- did %s' % acc.show()) + print(' `- did %s' % acc.show()) - # Third -- check inqueues - for sk in st.sockets: - if not sk.inqueue: - continue + # Third -- check inqueues + for sk in st.sockets: + if not sk.inqueue: + continue - rsk = st.real_sockets[sk.sk_id] - set_nonblock(rsk) + rsk = st.real_sockets[sk.sk_id] + set_nonblock(rsk) - while sk.inqueue: - msg = sk.inqueue.pop(0) - try: - r_msg, m_from = rsk.recvfrom(128) - except: - print('FAIL: No message in queue for %d' % sk.sk_id) - return CHK_FAIL_RECV_0 + while sk.inqueue: + msg = sk.inqueue.pop(0) + try: + r_msg, m_from = rsk.recvfrom(128) + except: + print('FAIL: No message in queue for %d' % sk.sk_id) + return CHK_FAIL_RECV_0 - w_msg = act_sendmsg.msgval(msg[1]) - if r_msg != w_msg: - print('FAIL: Message misorder: %s want %s (from %d)' - %(r_msg, w_msg, msg[0])) - return CHK_FAIL_RECV_MIX + w_msg = act_sendmsg.msgval(msg[1]) + if r_msg != w_msg: + print('FAIL: Message misorder: %s want %s (from %d)' % + (r_msg, w_msg, msg[0])) + return CHK_FAIL_RECV_MIX - # TODO -- check sender - print(' `- recvd %d.%d msg %s -> %d' - % (msg[0], msg[1], m_from, sk.sk_id)) + # TODO -- check sender + print(' `- recvd %d.%d msg %s -> %d' % + (msg[0], msg[1], m_from, sk.sk_id)) - # Finally, after all sockets are visible and all inqueues are - # drained -- check the sockets connectivity - for sk in st.sockets: - if not sk.peer: - continue + # Finally, after all sockets are visible and all inqueues are + # drained -- check the sockets connectivity + for sk in st.sockets: + if not sk.peer: + continue - # Closed connection with one peer alive. Cannot check. - if not sk.peer in st.real_sockets: - continue + # Closed connection with one peer alive. Cannot check. + if not sk.peer in st.real_sockets: + continue - rsk = st.real_sockets[sk.sk_id] - psk = st.real_sockets[sk.peer] - set_nonblock(psk) - msgv = act_sendmsg.msgval(3 * sk.sk_id + 5 * sk.peer, 'C') # just random + rsk = st.real_sockets[sk.sk_id] + psk = st.real_sockets[sk.peer] + set_nonblock(psk) + msgv = act_sendmsg.msgval(3 * sk.sk_id + 5 * sk.peer, + 'C') # just random - try: - rsk.send(msgv) - rmsg = psk.recv(128) - except: - print('FAIL: Connectivity %d -> %d lost' % (sk.sk_id, sk.peer)) - return CHK_FAIL_CONNECT + try: + rsk.send(msgv) + rmsg = psk.recv(128) + except: + print('FAIL: Connectivity %d -> %d lost' % (sk.sk_id, sk.peer)) + return CHK_FAIL_CONNECT - # If sockets are not connected the recv above - # would generate exception and the check would - # fail. But just in case we've screwed the queues - # the hard way -- also check for the message being - # delivered for real - if rmsg != msgv: - print('FAIL: Connectivity %d -> %d not verified' - % (sk.sk_id, sk.peer)) - return CHK_FAIL_CONNECT2 + # If sockets are not connected the recv above + # would generate exception and the check would + # fail. But just in case we've screwed the queues + # the hard way -- also check for the message being + # delivered for real + if rmsg != msgv: + print('FAIL: Connectivity %d -> %d not verified' % + (sk.sk_id, sk.peer)) + return CHK_FAIL_CONNECT2 - print(' `- checked %d -> %d with %s' % (sk.sk_id, sk.peer, msgv)) + print(' `- checked %d -> %d with %s' % (sk.sk_id, sk.peer, msgv)) - return CHK_PASS + return CHK_PASS def chk_state(st, opts): - print("Will check state") + print("Will check state") - sigsk_name = "\0" + "CRSIGSKC" - signal_sk = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM, 0) - signal_sk.bind(sigsk_name) + sigsk_name = "\0" + "CRSIGSKC" + signal_sk = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM, 0) + signal_sk.bind(sigsk_name) - # FIXME Ideally call to criu should be performed by the run_state's - # pid!=0 branch, but for simplicity we fork the kid which has the - # same set of sockets we do, then dump it. Then restore and notify - # via dgram socket to check its state. Current task still has all - # the same sockets :) so we close them not to produce bind() name - # conflicts on restore + # FIXME Ideally call to criu should be performed by the run_state's + # pid!=0 branch, but for simplicity we fork the kid which has the + # same set of sockets we do, then dump it. Then restore and notify + # via dgram socket to check its state. Current task still has all + # the same sockets :) so we close them not to produce bind() name + # conflicts on restore - pid = os.fork() - if pid == 0: - msg = signal_sk.recv(64) - ret = chk_real_state(st) - sys.exit(ret) + pid = os.fork() + if pid == 0: + msg = signal_sk.recv(64) + ret = chk_real_state(st) + sys.exit(ret) - signal_sk.close() - for rsk in st.real_sockets.values(): - rsk.close() + signal_sk.close() + for rsk in st.real_sockets.values(): + rsk.close() - print("`- dump") - img_path = "sti_" + st.describe() - try: - os.mkdir(img_path) - subprocess.check_call([criu_bin, "dump", "-t", "%d" % pid, "-D", img_path, "-v4", "-o", "dump.log", "-j"]) - except: - print("Dump failed") - os.kill(pid, signal.SIGKILL) - return CHK_FAIL_DUMP + print("`- dump") + img_path = "sti_" + st.describe() + try: + os.mkdir(img_path) + subprocess.check_call([ + criu_bin, "dump", "-t", + "%d" % pid, "-D", img_path, "-v4", "-o", "dump.log", "-j" + ]) + except: + print("Dump failed") + os.kill(pid, signal.SIGKILL) + return CHK_FAIL_DUMP - print("`- restore") - try: - os.waitpid(pid, 0) - subprocess.check_call([criu_bin, "restore", "-D", img_path, "-v4", "-o", "rst.log", "-j", "-d", "-S"]) - except: - print("Restore failed") - return CHK_FAIL_RESTORE + print("`- restore") + try: + os.waitpid(pid, 0) + subprocess.check_call([ + criu_bin, "restore", "-D", img_path, "-v4", "-o", "rst.log", "-j", + "-d", "-S" + ]) + except: + print("Restore failed") + return CHK_FAIL_RESTORE - print("`- check") - signal_sk = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM, 0) - try: - signal_sk.sendto('check', sigsk_name) - except: - # Probably the peer has died before us or smth else went wrong - os.kill(pid, signal.SIGKILL) + print("`- check") + signal_sk = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM, 0) + try: + signal_sk.sendto('check', sigsk_name) + except: + # Probably the peer has died before us or smth else went wrong + os.kill(pid, signal.SIGKILL) - wp, status = os.waitpid(pid, 0) - if os.WIFEXITED(status): - status = os.WEXITSTATUS(status) - if status != CHK_PASS: - print("`- exited with %d" % status) - return status - elif os.WIFSIGNALED(status): - status = os.WTERMSIG(status) - print("`- killed with %d" % status) - return CHK_FAIL_KILLED - else: - return CHK_FAIL_UNKNOWN + wp, status = os.waitpid(pid, 0) + if os.WIFEXITED(status): + status = os.WEXITSTATUS(status) + if status != CHK_PASS: + print("`- exited with %d" % status) + return status + elif os.WIFSIGNALED(status): + status = os.WTERMSIG(status) + print("`- killed with %d" % status) + return CHK_FAIL_KILLED + else: + return CHK_FAIL_UNKNOWN - return CHK_PASS + return CHK_PASS def run_state(st, opts): - print("Will run state") - pid = os.fork() - if pid != 0: - wpid, status = os.wait() - if os.WIFEXITED(status): - status = os.WEXITSTATUS(status) - elif os.WIFSIGNALED(status): - status = CHK_FAIL_KILLED - else: - status = CHK_FAIL_UNKNOWN - return status + print("Will run state") + pid = os.fork() + if pid != 0: + wpid, status = os.wait() + if os.WIFEXITED(status): + status = os.WEXITSTATUS(status) + elif os.WIFSIGNALED(status): + status = CHK_FAIL_KILLED + else: + status = CHK_FAIL_UNKNOWN + return status - # Try the states in subprocess so that once - # it exits the created sockets are removed - for step in st.steps: - step.do(st) + # Try the states in subprocess so that once + # it exits the created sockets are removed + for step in st.steps: + step.do(st) - if not opts.run: - ret = chk_state(st, opts) - else: - ret = chk_real_state(st) + if not opts.run: + ret = chk_state(st, opts) + else: + ret = chk_real_state(st) - sys.exit(ret) + sys.exit(ret) -def proceed(st, seen, failed, opts, depth = 0): - desc = st.describe() - if not desc: - pass - elif not desc in seen: - # When scanning the tree we run and try only states that - # differ, but don't stop tree traversal on them. This is - # because sometimes we can get into the already seen state - # using less steps and it's better to proceed as we have - # depth to move forward and generate more states. - seen[desc] = len(st.steps) - print('%s' % desc) - for s in st.steps: - print('\t%s' % s.show()) +def proceed(st, seen, failed, opts, depth=0): + desc = st.describe() + if not desc: + pass + elif not desc in seen: + # When scanning the tree we run and try only states that + # differ, but don't stop tree traversal on them. This is + # because sometimes we can get into the already seen state + # using less steps and it's better to proceed as we have + # depth to move forward and generate more states. + seen[desc] = len(st.steps) + print('%s' % desc) + for s in st.steps: + print('\t%s' % s.show()) - if not opts.gen: - ret = run_state(st, opts) - if ret != CHK_PASS: - failed.add((desc, ret)) - if not opts.keep: - return False - else: - # Don't even proceed with this state if we've already - # seen one but get there with less steps - seen_score = seen[desc] - if len(st.steps) > seen_score: - return True - else: - seen[desc] = len(st.steps) + if not opts.gen: + ret = run_state(st, opts) + if ret != CHK_PASS: + failed.add((desc, ret)) + if not opts.keep: + return False + else: + # Don't even proceed with this state if we've already + # seen one but get there with less steps + seen_score = seen[desc] + if len(st.steps) > seen_score: + return True + else: + seen[desc] = len(st.steps) - if depth >= opts.depth: - return True + if depth >= opts.depth: + return True - actions = st.get_actions() - for act in actions: - nst = st.clone() - act.act(nst) - nst.steps.append(act) - if not proceed(nst, seen, failed, opts, depth + 1): - return False + actions = st.get_actions() + for act in actions: + nst = st.clone() + act.act(nst) + nst.steps.append(act) + if not proceed(nst, seen, failed, opts, depth + 1): + return False - return True + return True p = argparse.ArgumentParser("CRIU test suite") -p.add_argument("--depth", help = "Depth of generated tree", default = '8') -p.add_argument("--sockets", help = "Maximum number of sockets", default = '1') -p.add_argument("--dgram", help = "Use SOCK_DGRAM sockets", action = 'store_true') -p.add_argument("--stream", help = "Use SOCK_STREAM sockets", action = 'store_true') -p.add_argument("--gen", help = "Only generate and show states", action = 'store_true') -p.add_argument("--run", help = "Run the states, but don't C/R", action = 'store_true') -p.add_argument("--keep", help = "Don't stop on error", action = 'store_true') +p.add_argument("--depth", help="Depth of generated tree", default='8') +p.add_argument("--sockets", help="Maximum number of sockets", default='1') +p.add_argument("--dgram", help="Use SOCK_DGRAM sockets", action='store_true') +p.add_argument("--stream", help="Use SOCK_STREAM sockets", action='store_true') +p.add_argument("--gen", + help="Only generate and show states", + action='store_true') +p.add_argument("--run", + help="Run the states, but don't C/R", + action='store_true') +p.add_argument("--keep", help="Don't stop on error", action='store_true') opts = p.parse_args() opts.depth = int(opts.depth) # XXX: does it make any sense to mix two types in one go? if opts.stream and opts.dgram: - print('Choose only one type') - sys.exit(1) + print('Choose only one type') + sys.exit(1) if opts.stream: - sk_type = socket.SOCK_STREAM + sk_type = socket.SOCK_STREAM elif opts.dgram: - sk_type = socket.SOCK_DGRAM + sk_type = socket.SOCK_DGRAM else: - print('Choose some type') - sys.exit(1) + print('Choose some type') + sys.exit(1) st = state(int(opts.sockets), sk_type) seen = {} @@ -747,8 +765,9 @@ failed = set() proceed(st, seen, failed, opts) if len(failed) == 0: - print('PASS (%d states)' % len(seen)) + print('PASS (%d states)' % len(seen)) else: - print('FAIL %d/%d' % (len(failed), len(seen))) - for f in failed: - print("\t%-50s: %s" % (f[0], fail_desc.get(f[1], 'unknown reason %d' % f[1]))) + print('FAIL %d/%d' % (len(failed), len(seen))) + for f in failed: + print("\t%-50s: %s" % + (f[0], fail_desc.get(f[1], 'unknown reason %d' % f[1]))) diff --git a/test/inhfd/fifo.py b/test/inhfd/fifo.py index 64e5f8f13..2d20e4dbf 100755 --- a/test/inhfd/fifo.py +++ b/test/inhfd/fifo.py @@ -5,35 +5,35 @@ id_str = "" def create_fds(): - tdir = tempfile.mkdtemp("zdtm.inhfd.XXXXXX") - if os.system("mount -t tmpfs zdtm.inhfd %s" % tdir) != 0: - raise Exception("Unable to mount tmpfs") - tfifo = os.path.join(tdir, "test_fifo") - os.mkfifo(tfifo) - fd2 = open(tfifo, "w+b", buffering=0) - fd1 = open(tfifo, "rb") - os.system("umount -l %s" % tdir) - os.rmdir(tdir) + tdir = tempfile.mkdtemp("zdtm.inhfd.XXXXXX") + if os.system("mount -t tmpfs zdtm.inhfd %s" % tdir) != 0: + raise Exception("Unable to mount tmpfs") + tfifo = os.path.join(tdir, "test_fifo") + os.mkfifo(tfifo) + fd2 = open(tfifo, "w+b", buffering=0) + fd1 = open(tfifo, "rb") + os.system("umount -l %s" % tdir) + os.rmdir(tdir) - mnt_id = -1 - with open("/proc/self/fdinfo/%d" % fd1.fileno()) as f: - for line in f: - line = line.split() - if line[0] == "mnt_id:": - mnt_id = int(line[1]) - break - else: - raise Exception("Unable to find mnt_id") + mnt_id = -1 + with open("/proc/self/fdinfo/%d" % fd1.fileno()) as f: + for line in f: + line = line.split() + if line[0] == "mnt_id:": + mnt_id = int(line[1]) + break + else: + raise Exception("Unable to find mnt_id") - global id_str - id_str = "file[%x:%x]" % (mnt_id, os.fstat(fd1.fileno()).st_ino) + global id_str + id_str = "file[%x:%x]" % (mnt_id, os.fstat(fd1.fileno()).st_ino) - return [(fd2, fd1)] + return [(fd2, fd1)] def filename(pipef): - return id_str + return id_str def dump_opts(sockf): - return ["--external", id_str] + return ["--external", id_str] diff --git a/test/inhfd/pipe.py b/test/inhfd/pipe.py index 318dc862d..8d8318d5b 100755 --- a/test/inhfd/pipe.py +++ b/test/inhfd/pipe.py @@ -2,16 +2,16 @@ import os def create_fds(): - pipes = [] - for i in range(10): - (fd1, fd2) = os.pipe() - pipes.append((os.fdopen(fd2, "wb"), os.fdopen(fd1, "rb"))) - return pipes + pipes = [] + for i in range(10): + (fd1, fd2) = os.pipe() + pipes.append((os.fdopen(fd2, "wb"), os.fdopen(fd1, "rb"))) + return pipes def filename(pipef): - return 'pipe:[%d]' % os.fstat(pipef.fileno()).st_ino + return 'pipe:[%d]' % os.fstat(pipef.fileno()).st_ino def dump_opts(sockf): - return [] + return [] diff --git a/test/inhfd/socket.py b/test/inhfd/socket.py index feba0e0c6..9cea16ffb 100755 --- a/test/inhfd/socket.py +++ b/test/inhfd/socket.py @@ -3,19 +3,19 @@ import os def create_fds(): - (sk1, sk2) = socket.socketpair(socket.AF_UNIX, socket.SOCK_STREAM) - (sk3, sk4) = socket.socketpair(socket.AF_UNIX, socket.SOCK_STREAM) - return [(sk1.makefile("wb"), sk2.makefile("rb")), - (sk3.makefile("wb"), sk4.makefile("rb"))] + (sk1, sk2) = socket.socketpair(socket.AF_UNIX, socket.SOCK_STREAM) + (sk3, sk4) = socket.socketpair(socket.AF_UNIX, socket.SOCK_STREAM) + return [(sk1.makefile("wb"), sk2.makefile("rb")), + (sk3.makefile("wb"), sk4.makefile("rb"))] def __sock_ino(sockf): - return os.fstat(sockf.fileno()).st_ino + return os.fstat(sockf.fileno()).st_ino def filename(sockf): - return 'socket:[%d]' % __sock_ino(sockf) + return 'socket:[%d]' % __sock_ino(sockf) def dump_opts(sockf): - return ['--external', 'unix[%d]' % __sock_ino(sockf)] + return ['--external', 'unix[%d]' % __sock_ino(sockf)] diff --git a/test/inhfd/tty.py b/test/inhfd/tty.py index ae76a96d4..c11a57117 100755 --- a/test/inhfd/tty.py +++ b/test/inhfd/tty.py @@ -4,34 +4,33 @@ import os import pty import termios - ctl = False def child_prep(fd): - global ctl - if ctl: - return - ctl = True - fcntl.ioctl(fd.fileno(), termios.TIOCSCTTY, 1) + global ctl + if ctl: + return + ctl = True + fcntl.ioctl(fd.fileno(), termios.TIOCSCTTY, 1) def create_fds(): - ttys = [] - for i in range(10): - (fd1, fd2) = pty.openpty() - newattr = termios.tcgetattr(fd1) - newattr[3] &= ~termios.ICANON & ~termios.ECHO - termios.tcsetattr(fd1, termios.TCSADRAIN, newattr) - ttys.append((os.fdopen(fd1, "wb"), os.fdopen(fd2, "rb"))) - return ttys + ttys = [] + for i in range(10): + (fd1, fd2) = pty.openpty() + newattr = termios.tcgetattr(fd1) + newattr[3] &= ~termios.ICANON & ~termios.ECHO + termios.tcsetattr(fd1, termios.TCSADRAIN, newattr) + ttys.append((os.fdopen(fd1, "wb"), os.fdopen(fd2, "rb"))) + return ttys def filename(pipef): - st = os.fstat(pipef.fileno()) - return 'tty[%x:%x]' % (st.st_rdev, st.st_dev) + st = os.fstat(pipef.fileno()) + return 'tty[%x:%x]' % (st.st_rdev, st.st_dev) def dump_opts(sockf): - st = os.fstat(sockf.fileno()) - return "--external", 'tty[%x:%x]' % (st.st_rdev, st.st_dev) + st = os.fstat(sockf.fileno()) + return "--external", 'tty[%x:%x]' % (st.st_rdev, st.st_dev) diff --git a/test/others/ext-tty/run.py b/test/others/ext-tty/run.py index f44b1d946..b1dcb4a5a 100755 --- a/test/others/ext-tty/run.py +++ b/test/others/ext-tty/run.py @@ -5,32 +5,41 @@ import os, sys, time, signal, pty master, slave = pty.openpty() p = subprocess.Popen(["setsid", "--ctty", "sleep", "10000"], - stdin = slave, stdout = slave, stderr = slave, close_fds = True) + stdin=slave, + stdout=slave, + stderr=slave, + close_fds=True) st = os.stat("/proc/self/fd/%d" % slave) ttyid = "tty[%x:%x]" % (st.st_rdev, st.st_dev) os.close(slave) time.sleep(1) -ret = subprocess.Popen(["../../../criu/criu", "dump", "-t", str(p.pid), "-v4", "--external", ttyid]).wait() +ret = subprocess.Popen([ + "../../../criu/criu", "dump", "-t", + str(p.pid), "-v4", "--external", ttyid +]).wait() if ret: - sys.exit(ret) + sys.exit(ret) p.wait() -new_master, slave = pty.openpty() # get another pty pair +new_master, slave = pty.openpty() # get another pty pair os.close(master) ttyid = "fd[%d]:tty[%x:%x]" % (slave, st.st_rdev, st.st_dev) -ret = subprocess.Popen(["../../../criu/criu", "restore", "-v4", "--inherit-fd", ttyid, "--restore-sibling", "--restore-detach"]).wait() +ret = subprocess.Popen([ + "../../../criu/criu", "restore", "-v4", "--inherit-fd", ttyid, + "--restore-sibling", "--restore-detach" +]).wait() if ret: - sys.exit(ret) + sys.exit(ret) os.close(slave) -os.waitpid(-1, os.WNOHANG) # is the process alive +os.waitpid(-1, os.WNOHANG) # is the process alive os.close(new_master) _, status = os.wait() if not os.WIFSIGNALED(status) or os.WTERMSIG(status) != signal.SIGHUP: - print(status) - sys.exit(1) + print(status) + sys.exit(1) print("PASS") diff --git a/test/others/mounts/mounts.py b/test/others/mounts/mounts.py index dc65ba45c..70b0be5fa 100755 --- a/test/others/mounts/mounts.py +++ b/test/others/mounts/mounts.py @@ -1,31 +1,36 @@ import os import tempfile, random + def mount(src, dst, shared, private, slave): - cmd = "mount" - if shared: - cmd += " --make-shared" - if private: - cmd += " --make-private" - if slave: - cmd += " --make-slave" - if src: - cmd += " --bind '%s' '%s'" % (src, dst) - else: - cmd += " -t tmpfs none '%s'" % (dst) + cmd = "mount" + if shared: + cmd += " --make-shared" + if private: + cmd += " --make-private" + if slave: + cmd += " --make-slave" + if src: + cmd += " --bind '%s' '%s'" % (src, dst) + else: + cmd += " -t tmpfs none '%s'" % (dst) - print(cmd) - ret = os.system(cmd) - if ret: - print("failed") + print(cmd) + ret = os.system(cmd) + if ret: + print("failed") -root = tempfile.mkdtemp(prefix = "root.mount", dir = "/tmp") + +root = tempfile.mkdtemp(prefix="root.mount", dir="/tmp") mount(None, root, 1, 0, 0) mounts = [root] for i in range(10): - dstdir = random.choice(mounts) - dst = tempfile.mkdtemp(prefix = "mount", dir = dstdir) - src = random.choice(mounts + [None]) - mount(src, dst, random.randint(0,100) > 50, random.randint(0,100) > 90, random.randint(0,100) > 50) - mounts.append(dst) + dstdir = random.choice(mounts) + dst = tempfile.mkdtemp(prefix="mount", dir=dstdir) + src = random.choice(mounts + [None]) + mount(src, dst, + random.randint(0, 100) > 50, + random.randint(0, 100) > 90, + random.randint(0, 100) > 50) + mounts.append(dst) diff --git a/test/others/rpc/config_file.py b/test/others/rpc/config_file.py index 23a06615f..3579ac76f 100755 --- a/test/others/rpc/config_file.py +++ b/test/others/rpc/config_file.py @@ -14,169 +14,174 @@ does_not_exist = 'does-not.exist' def setup_swrk(): - print('Connecting to CRIU in swrk mode.') - css = socket.socketpair(socket.AF_UNIX, socket.SOCK_SEQPACKET) - swrk = subprocess.Popen(['./criu', "swrk", "%d" % css[0].fileno()]) - css[0].close() - return swrk, css[1] + print('Connecting to CRIU in swrk mode.') + css = socket.socketpair(socket.AF_UNIX, socket.SOCK_SEQPACKET) + swrk = subprocess.Popen(['./criu', "swrk", "%d" % css[0].fileno()]) + css[0].close() + return swrk, css[1] def setup_config_file(content): - # Creating a temporary file which will be used as configuration file. - fd, path = mkstemp() + # Creating a temporary file which will be used as configuration file. + fd, path = mkstemp() - with os.fdopen(fd, 'w') as f: - f.write(content) + with os.fdopen(fd, 'w') as f: + f.write(content) - os.environ['CRIU_CONFIG_FILE'] = path + os.environ['CRIU_CONFIG_FILE'] = path - return path + return path def cleanup_config_file(path): - if os.environ.get('CRIU_CONFIG_FILE', None) is not None: - del os.environ['CRIU_CONFIG_FILE'] - os.unlink(path) + if os.environ.get('CRIU_CONFIG_FILE', None) is not None: + del os.environ['CRIU_CONFIG_FILE'] + os.unlink(path) def cleanup_output(path): - for f in (does_not_exist, log_file): - f = os.path.join(path, f) - if os.access(f, os.F_OK): - os.unlink(f) + for f in (does_not_exist, log_file): + f = os.path.join(path, f) + if os.access(f, os.F_OK): + os.unlink(f) def setup_criu_dump_request(): - # Create criu msg, set it's type to dump request - # and set dump options. Checkout more options in protobuf/rpc.proto - req = rpc.criu_req() - req.type = rpc.DUMP - req.opts.leave_running = True - req.opts.log_level = 4 - req.opts.log_file = log_file - req.opts.images_dir_fd = os.open(args['dir'], os.O_DIRECTORY) - # Not necessary, just for testing - req.opts.tcp_established = True - req.opts.shell_job = True - return req + # Create criu msg, set it's type to dump request + # and set dump options. Checkout more options in protobuf/rpc.proto + req = rpc.criu_req() + req.type = rpc.DUMP + req.opts.leave_running = True + req.opts.log_level = 4 + req.opts.log_file = log_file + req.opts.images_dir_fd = os.open(args['dir'], os.O_DIRECTORY) + # Not necessary, just for testing + req.opts.tcp_established = True + req.opts.shell_job = True + return req def do_rpc(s, req): - # Send request - s.send(req.SerializeToString()) + # Send request + s.send(req.SerializeToString()) - # Recv response - resp = rpc.criu_resp() - MAX_MSG_SIZE = 1024 - resp.ParseFromString(s.recv(MAX_MSG_SIZE)) + # Recv response + resp = rpc.criu_resp() + MAX_MSG_SIZE = 1024 + resp.ParseFromString(s.recv(MAX_MSG_SIZE)) - s.close() - return resp + s.close() + return resp def test_broken_configuration_file(): - # Testing RPC configuration file mode with a broken configuration file. - # This should fail - content = 'hopefully-this-option-will-never=exist' - path = setup_config_file(content) - swrk, s = setup_swrk() - s.close() - # This test is only about detecting wrong configuration files. - # If we do not sleep it might happen that we kill CRIU before - # it parses the configuration file. A short sleep makes sure - # that the configuration file has been parsed. Hopefully. - # (I am sure this will fail horribly at some point) - time.sleep(0.3) - swrk.kill() - return_code = swrk.wait() - # delete temporary file again - cleanup_config_file(path) - if return_code != 1: - print('FAIL: CRIU should have returned 1 instead of %d' % return_code) - sys.exit(-1) + # Testing RPC configuration file mode with a broken configuration file. + # This should fail + content = 'hopefully-this-option-will-never=exist' + path = setup_config_file(content) + swrk, s = setup_swrk() + s.close() + # This test is only about detecting wrong configuration files. + # If we do not sleep it might happen that we kill CRIU before + # it parses the configuration file. A short sleep makes sure + # that the configuration file has been parsed. Hopefully. + # (I am sure this will fail horribly at some point) + time.sleep(0.3) + swrk.kill() + return_code = swrk.wait() + # delete temporary file again + cleanup_config_file(path) + if return_code != 1: + print('FAIL: CRIU should have returned 1 instead of %d' % return_code) + sys.exit(-1) def search_in_log_file(log, message): - with open(os.path.join(args['dir'], log)) as f: - if message not in f.read(): - print('FAIL: Missing the expected error message (%s) in the log file' % message) - sys.exit(-1) + with open(os.path.join(args['dir'], log)) as f: + if message not in f.read(): + print( + 'FAIL: Missing the expected error message (%s) in the log file' + % message) + sys.exit(-1) def check_results(resp, log): - # Check if the specified log file exists - if not os.path.isfile(os.path.join(args['dir'], log)): - print('FAIL: Expected log file %s does not exist' % log) - sys.exit(-1) - # Dump should have failed with: 'The criu itself is within dumped tree' - if resp.type != rpc.DUMP: - print('FAIL: Unexpected msg type %r' % resp.type) - sys.exit(-1) - if 'The criu itself is within dumped tree' not in resp.cr_errmsg: - print('FAIL: Missing the expected error message in RPC response') - sys.exit(-1) - # Look into the log file for the same message - search_in_log_file(log, 'The criu itself is within dumped tree') + # Check if the specified log file exists + if not os.path.isfile(os.path.join(args['dir'], log)): + print('FAIL: Expected log file %s does not exist' % log) + sys.exit(-1) + # Dump should have failed with: 'The criu itself is within dumped tree' + if resp.type != rpc.DUMP: + print('FAIL: Unexpected msg type %r' % resp.type) + sys.exit(-1) + if 'The criu itself is within dumped tree' not in resp.cr_errmsg: + print('FAIL: Missing the expected error message in RPC response') + sys.exit(-1) + # Look into the log file for the same message + search_in_log_file(log, 'The criu itself is within dumped tree') def test_rpc_without_configuration_file(): - # Testing without configuration file - # Just doing a dump and checking for the logfile - req = setup_criu_dump_request() - _, s = setup_swrk() - resp = do_rpc(s, req) - s.close() - check_results(resp, log_file) + # Testing without configuration file + # Just doing a dump and checking for the logfile + req = setup_criu_dump_request() + _, s = setup_swrk() + resp = do_rpc(s, req) + s.close() + check_results(resp, log_file) def test_rpc_with_configuration_file(): - # Testing with configuration file - # Just doing a dump and checking for the logfile + # Testing with configuration file + # Just doing a dump and checking for the logfile - # Setting a different log file via configuration file - # This should not work as RPC settings overwrite configuration - # file settings in the default configuration. - log = does_not_exist - content = 'log-file ' + log + '\n' - content += 'no-tcp-established\nno-shell-job' - path = setup_config_file(content) - req = setup_criu_dump_request() - _, s = setup_swrk() - do_rpc(s, req) - s.close() - cleanup_config_file(path) - # Check if the specified log file exists - # It should not as configuration files do not overwrite RPC values. - if os.path.isfile(os.path.join(args['dir'], log)): - print('FAIL: log file %s should not exist' % log) - sys.exit(-1) + # Setting a different log file via configuration file + # This should not work as RPC settings overwrite configuration + # file settings in the default configuration. + log = does_not_exist + content = 'log-file ' + log + '\n' + content += 'no-tcp-established\nno-shell-job' + path = setup_config_file(content) + req = setup_criu_dump_request() + _, s = setup_swrk() + do_rpc(s, req) + s.close() + cleanup_config_file(path) + # Check if the specified log file exists + # It should not as configuration files do not overwrite RPC values. + if os.path.isfile(os.path.join(args['dir'], log)): + print('FAIL: log file %s should not exist' % log) + sys.exit(-1) def test_rpc_with_configuration_file_overwriting_rpc(): - # Testing with configuration file - # Just doing a dump and checking for the logfile + # Testing with configuration file + # Just doing a dump and checking for the logfile - # Setting a different log file via configuration file - # This should not work as RPC settings overwrite configuration - # file settings in the default configuration. - log = does_not_exist - content = 'log-file ' + log + '\n' - content += 'no-tcp-established\nno-shell-job' - path = setup_config_file(content) - # Only set the configuration file via RPC; - # not via environment variable - del os.environ['CRIU_CONFIG_FILE'] - req = setup_criu_dump_request() - req.opts.config_file = path - _, s = setup_swrk() - resp = do_rpc(s, req) - s.close() - cleanup_config_file(path) - check_results(resp, log) + # Setting a different log file via configuration file + # This should not work as RPC settings overwrite configuration + # file settings in the default configuration. + log = does_not_exist + content = 'log-file ' + log + '\n' + content += 'no-tcp-established\nno-shell-job' + path = setup_config_file(content) + # Only set the configuration file via RPC; + # not via environment variable + del os.environ['CRIU_CONFIG_FILE'] + req = setup_criu_dump_request() + req.opts.config_file = path + _, s = setup_swrk() + resp = do_rpc(s, req) + s.close() + cleanup_config_file(path) + check_results(resp, log) -parser = argparse.ArgumentParser(description="Test config files using CRIU RPC") -parser.add_argument('dir', type = str, help = "Directory where CRIU images should be placed") +parser = argparse.ArgumentParser( + description="Test config files using CRIU RPC") +parser.add_argument('dir', + type=str, + help="Directory where CRIU images should be placed") args = vars(parser.parse_args()) diff --git a/test/others/rpc/errno.py b/test/others/rpc/errno.py index ee9e90d8c..49cb622de 100755 --- a/test/others/rpc/errno.py +++ b/test/others/rpc/errno.py @@ -6,130 +6,136 @@ import rpc_pb2 as rpc import argparse parser = argparse.ArgumentParser(description="Test errno reported by CRIU RPC") -parser.add_argument('socket', type = str, help = "CRIU service socket") -parser.add_argument('dir', type = str, help = "Directory where CRIU images should be placed") +parser.add_argument('socket', type=str, help="CRIU service socket") +parser.add_argument('dir', + type=str, + help="Directory where CRIU images should be placed") args = vars(parser.parse_args()) + # Prepare dir for images class test: - def __init__(self): - self.imgs_fd = os.open(args['dir'], os.O_DIRECTORY) - self.s = -1 - self._MAX_MSG_SIZE = 1024 + def __init__(self): + self.imgs_fd = os.open(args['dir'], os.O_DIRECTORY) + self.s = -1 + self._MAX_MSG_SIZE = 1024 - def connect(self): - self.s = socket.socket(socket.AF_UNIX, socket.SOCK_SEQPACKET) - self.s.connect(args['socket']) + def connect(self): + self.s = socket.socket(socket.AF_UNIX, socket.SOCK_SEQPACKET) + self.s.connect(args['socket']) - def get_base_req(self): - req = rpc.criu_req() - req.opts.log_level = 4 - req.opts.images_dir_fd = self.imgs_fd - return req + def get_base_req(self): + req = rpc.criu_req() + req.opts.log_level = 4 + req.opts.images_dir_fd = self.imgs_fd + return req - def send_req(self, req): - self.connect() - self.s.send(req.SerializeToString()) + def send_req(self, req): + self.connect() + self.s.send(req.SerializeToString()) - def recv_resp(self): - resp = rpc.criu_resp() - resp.ParseFromString(self.s.recv(self._MAX_MSG_SIZE)) - return resp + def recv_resp(self): + resp = rpc.criu_resp() + resp.ParseFromString(self.s.recv(self._MAX_MSG_SIZE)) + return resp - def check_resp(self, resp, typ, err): - if resp.type != typ: - raise Exception('Unexpected responce type ' + str(resp.type)) + def check_resp(self, resp, typ, err): + if resp.type != typ: + raise Exception('Unexpected responce type ' + str(resp.type)) - if resp.success: - raise Exception('Unexpected success = True') + if resp.success: + raise Exception('Unexpected success = True') - if err and resp.cr_errno != err: - raise Exception('Unexpected cr_errno ' + str(resp.cr_errno)) + if err and resp.cr_errno != err: + raise Exception('Unexpected cr_errno ' + str(resp.cr_errno)) - def no_process(self): - print('Try to dump unexisting process') - # Get pid of non-existing process. - # Suppose max_pid is not taken by any process. - with open("/proc/sys/kernel/pid_max", "r") as f: - pid = int(f.readline()) - try: - os.kill(pid, 0) - except OSError: - pass - else: - raise Exception('max pid is taken') + def no_process(self): + print('Try to dump unexisting process') + # Get pid of non-existing process. + # Suppose max_pid is not taken by any process. + with open("/proc/sys/kernel/pid_max", "r") as f: + pid = int(f.readline()) + try: + os.kill(pid, 0) + except OSError: + pass + else: + raise Exception('max pid is taken') - # Ask criu to dump non-existing process. - req = self.get_base_req() - req.type = rpc.DUMP - req.opts.pid = pid + # Ask criu to dump non-existing process. + req = self.get_base_req() + req.type = rpc.DUMP + req.opts.pid = pid - self.send_req(req) - resp = self.recv_resp() + self.send_req(req) + resp = self.recv_resp() - self.check_resp(resp, rpc.DUMP, errno.ESRCH) + self.check_resp(resp, rpc.DUMP, errno.ESRCH) - print('Success') + print('Success') - def process_exists(self): - print('Try to restore process which pid is already taken by other process') + def process_exists(self): + print( + 'Try to restore process which pid is already taken by other process' + ) - # Perform self-dump - req = self.get_base_req() - req.type = rpc.DUMP - req.opts.leave_running = True + # Perform self-dump + req = self.get_base_req() + req.type = rpc.DUMP + req.opts.leave_running = True - self.send_req(req) - resp = self.recv_resp() + self.send_req(req) + resp = self.recv_resp() - if resp.success != True: - raise Exception('Self-dump failed') + if resp.success != True: + raise Exception('Self-dump failed') - # Ask to restore process from images of ourselves - req = self.get_base_req() - req.type = rpc.RESTORE + # Ask to restore process from images of ourselves + req = self.get_base_req() + req.type = rpc.RESTORE - self.send_req(req) - resp = self.recv_resp() + self.send_req(req) + resp = self.recv_resp() - self.check_resp(resp, rpc.RESTORE, errno.EEXIST) + self.check_resp(resp, rpc.RESTORE, errno.EEXIST) - print('Success') + print('Success') - def bad_options(self): - print('Try to send criu invalid opts') + def bad_options(self): + print('Try to send criu invalid opts') - # Subdirs are not allowed in log_file - req = self.get_base_req() - req.type = rpc.DUMP - req.opts.log_file = "../file.log" + # Subdirs are not allowed in log_file + req = self.get_base_req() + req.type = rpc.DUMP + req.opts.log_file = "../file.log" - self.send_req(req) - resp = self.recv_resp() + self.send_req(req) + resp = self.recv_resp() - self.check_resp(resp, rpc.DUMP, errno.EBADRQC) + self.check_resp(resp, rpc.DUMP, errno.EBADRQC) - print('Success') + print('Success') - def bad_request(self): - print('Try to send criu invalid request type') + def bad_request(self): + print('Try to send criu invalid request type') - req = self.get_base_req() - req.type = rpc.NOTIFY + req = self.get_base_req() + req.type = rpc.NOTIFY - self.send_req(req) - resp = self.recv_resp() + self.send_req(req) + resp = self.recv_resp() - self.check_resp(resp, rpc.EMPTY, None) + self.check_resp(resp, rpc.EMPTY, None) - print('Success') + print('Success') + + def run(self): + self.no_process() + self.process_exists() + self.bad_options() + self.bad_request() - def run(self): - self.no_process() - self.process_exists() - self.bad_options() - self.bad_request() t = test() t.run() diff --git a/test/others/rpc/ps_test.py b/test/others/rpc/ps_test.py index 1872120fc..d16efd3f6 100755 --- a/test/others/rpc/ps_test.py +++ b/test/others/rpc/ps_test.py @@ -5,8 +5,10 @@ import rpc_pb2 as rpc import argparse parser = argparse.ArgumentParser(description="Test page-server using CRIU RPC") -parser.add_argument('socket', type = str, help = "CRIU service socket") -parser.add_argument('dir', type = str, help = "Directory where CRIU images should be placed") +parser.add_argument('socket', type=str, help="CRIU service socket") +parser.add_argument('dir', + type=str, + help="Directory where CRIU images should be placed") args = vars(parser.parse_args()) @@ -16,45 +18,45 @@ s.connect(args['socket']) # Start page-server print('Starting page-server') -req = rpc.criu_req() -req.type = rpc.PAGE_SERVER -req.opts.log_file = 'page-server.log' -req.opts.log_level = 4 -req.opts.images_dir_fd = os.open(args['dir'], os.O_DIRECTORY) +req = rpc.criu_req() +req.type = rpc.PAGE_SERVER +req.opts.log_file = 'page-server.log' +req.opts.log_level = 4 +req.opts.images_dir_fd = os.open(args['dir'], os.O_DIRECTORY) s.send(req.SerializeToString()) -resp = rpc.criu_resp() +resp = rpc.criu_resp() MAX_MSG_SIZE = 1024 resp.ParseFromString(s.recv(MAX_MSG_SIZE)) if resp.type != rpc.PAGE_SERVER: - print('Unexpected msg type') - sys.exit(1) + print('Unexpected msg type') + sys.exit(1) else: - if resp.success: - # check if pid even exists - try: - os.kill(resp.ps.pid, 0) - except OSError as err: - if err.errno == errno.ESRCH: - print('No process with page-server pid %d' %(resp.ps.pid)) - else: - print('Can\'t check that process %d exists' %(resp.ps.pid)) - sys.exit(1) - print('Success, page-server pid %d started on port %u' %(resp.ps.pid, resp.ps.port)) - else: - print('Failed to start page-server') - sys.exit(1) - + if resp.success: + # check if pid even exists + try: + os.kill(resp.ps.pid, 0) + except OSError as err: + if err.errno == errno.ESRCH: + print('No process with page-server pid %d' % (resp.ps.pid)) + else: + print('Can\'t check that process %d exists' % (resp.ps.pid)) + sys.exit(1) + print('Success, page-server pid %d started on port %u' % + (resp.ps.pid, resp.ps.port)) + else: + print('Failed to start page-server') + sys.exit(1) # Perform self-dump print('Dumping myself using page-server') -req.type = rpc.DUMP -req.opts.ps.port = resp.ps.port -req.opts.ps.address = "127.0.0.1" -req.opts.log_file = 'dump.log' -req.opts.leave_running = True +req.type = rpc.DUMP +req.opts.ps.port = resp.ps.port +req.opts.ps.address = "127.0.0.1" +req.opts.log_file = 'dump.log' +req.opts.leave_running = True s.close() s = socket.socket(socket.AF_UNIX, socket.SOCK_SEQPACKET) @@ -64,11 +66,11 @@ s.send(req.SerializeToString()) resp.ParseFromString(s.recv(MAX_MSG_SIZE)) if resp.type != rpc.DUMP: - print('Unexpected msg type') - sys.exit(1) + print('Unexpected msg type') + sys.exit(1) else: - if resp.success: - print('Success') - else: - print('Fail') - sys.exit(1) + if resp.success: + print('Success') + else: + print('Fail') + sys.exit(1) diff --git a/test/others/rpc/read.py b/test/others/rpc/read.py index bbf69b6cb..ff7e5c1a0 100644 --- a/test/others/rpc/read.py +++ b/test/others/rpc/read.py @@ -12,6 +12,6 @@ r = f.read(1) f.close() if r == '\0': - sys.exit(0) + sys.exit(0) sys.exit(-1) diff --git a/test/others/rpc/restore-loop.py b/test/others/rpc/restore-loop.py index ce5786a56..c81567426 100755 --- a/test/others/rpc/restore-loop.py +++ b/test/others/rpc/restore-loop.py @@ -4,9 +4,12 @@ import socket, os, sys import rpc_pb2 as rpc import argparse -parser = argparse.ArgumentParser(description="Test ability to restore a process from images using CRIU RPC") -parser.add_argument('socket', type = str, help = "CRIU service socket") -parser.add_argument('dir', type = str, help = "Directory where CRIU images could be found") +parser = argparse.ArgumentParser( + description="Test ability to restore a process from images using CRIU RPC") +parser.add_argument('socket', type=str, help="CRIU service socket") +parser.add_argument('dir', + type=str, + help="Directory where CRIU images could be found") args = vars(parser.parse_args()) @@ -16,30 +19,30 @@ s.connect(args['socket']) # Create criu msg, set it's type to dump request # and set dump options. Checkout more options in protobuf/rpc.proto -req = rpc.criu_req() -req.type = rpc.RESTORE -req.opts.images_dir_fd = os.open(args['dir'], os.O_DIRECTORY) +req = rpc.criu_req() +req.type = rpc.RESTORE +req.opts.images_dir_fd = os.open(args['dir'], os.O_DIRECTORY) # As the dumped process is running with setsid this should not # be necessary. There seems to be a problem for this testcase # in combination with alpine's setsid. # The dump is now done with -j and the restore also. -req.opts.shell_job = True +req.opts.shell_job = True # Send request s.send(req.SerializeToString()) # Recv response -resp = rpc.criu_resp() -MAX_MSG_SIZE = 1024 +resp = rpc.criu_resp() +MAX_MSG_SIZE = 1024 resp.ParseFromString(s.recv(MAX_MSG_SIZE)) if resp.type != rpc.RESTORE: - print('Unexpected msg type') - sys.exit(-1) + print('Unexpected msg type') + sys.exit(-1) else: - if resp.success: - print('Restore success') - else: - print('Restore fail') - sys.exit(-1) - print("PID of the restored program is %d\n" %(resp.restore.pid)) + if resp.success: + print('Restore success') + else: + print('Restore fail') + sys.exit(-1) + print("PID of the restored program is %d\n" % (resp.restore.pid)) diff --git a/test/others/rpc/test.py b/test/others/rpc/test.py index 0addbaedc..9a35e0e97 100755 --- a/test/others/rpc/test.py +++ b/test/others/rpc/test.py @@ -4,9 +4,12 @@ import socket, os, sys import rpc_pb2 as rpc import argparse -parser = argparse.ArgumentParser(description="Test dump/restore using CRIU RPC") -parser.add_argument('socket', type = str, help = "CRIU service socket") -parser.add_argument('dir', type = str, help = "Directory where CRIU images should be placed") +parser = argparse.ArgumentParser( + description="Test dump/restore using CRIU RPC") +parser.add_argument('socket', type=str, help="CRIU service socket") +parser.add_argument('dir', + type=str, + help="Directory where CRIU images should be placed") args = vars(parser.parse_args()) @@ -16,32 +19,32 @@ s.connect(args['socket']) # Create criu msg, set it's type to dump request # and set dump options. Checkout more options in protobuf/rpc.proto -req = rpc.criu_req() -req.type = rpc.DUMP -req.opts.leave_running = True -req.opts.log_level = 4 -req.opts.images_dir_fd = os.open(args['dir'], os.O_DIRECTORY) +req = rpc.criu_req() +req.type = rpc.DUMP +req.opts.leave_running = True +req.opts.log_level = 4 +req.opts.images_dir_fd = os.open(args['dir'], os.O_DIRECTORY) # Send request s.send(req.SerializeToString()) # Recv response -resp = rpc.criu_resp() -MAX_MSG_SIZE = 1024 +resp = rpc.criu_resp() +MAX_MSG_SIZE = 1024 resp.ParseFromString(s.recv(MAX_MSG_SIZE)) if resp.type != rpc.DUMP: - print('Unexpected msg type') - sys.exit(-1) + print('Unexpected msg type') + sys.exit(-1) else: - if resp.success: - print('Success') - else: - print('Fail') - sys.exit(-1) + if resp.success: + print('Success') + else: + print('Fail') + sys.exit(-1) - if resp.dump.restored: - print('Restored') + if resp.dump.restored: + print('Restored') # Connect to service socket s = socket.socket(socket.AF_UNIX, socket.SOCK_SEQPACKET) @@ -61,21 +64,21 @@ MAX_MSG_SIZE = 1024 resp.ParseFromString(s.recv(MAX_MSG_SIZE)) if resp.type != rpc.VERSION: - print('RPC: Unexpected msg type') - sys.exit(-1) + print('RPC: Unexpected msg type') + sys.exit(-1) else: - if resp.success: - print('RPC: Success') - print('CRIU major %d' % resp.version.major_number) - print('CRIU minor %d' % resp.version.minor_number) - if resp.version.HasField('gitid'): - print('CRIU gitid %s' % resp.version.gitid) - if resp.version.HasField('sublevel'): - print('CRIU sublevel %s' % resp.version.sublevel) - if resp.version.HasField('extra'): - print('CRIU extra %s' % resp.version.extra) - if resp.version.HasField('name'): - print('CRIU name %s' % resp.version.name) - else: - print('Fail') - sys.exit(-1) + if resp.success: + print('RPC: Success') + print('CRIU major %d' % resp.version.major_number) + print('CRIU minor %d' % resp.version.minor_number) + if resp.version.HasField('gitid'): + print('CRIU gitid %s' % resp.version.gitid) + if resp.version.HasField('sublevel'): + print('CRIU sublevel %s' % resp.version.sublevel) + if resp.version.HasField('extra'): + print('CRIU extra %s' % resp.version.extra) + if resp.version.HasField('name'): + print('CRIU name %s' % resp.version.name) + else: + print('Fail') + sys.exit(-1) diff --git a/test/others/rpc/version.py b/test/others/rpc/version.py index 247bc466d..f978c6c37 100755 --- a/test/others/rpc/version.py +++ b/test/others/rpc/version.py @@ -27,21 +27,21 @@ MAX_MSG_SIZE = 1024 resp.ParseFromString(s.recv(MAX_MSG_SIZE)) if resp.type != rpc.VERSION: - print('RPC: Unexpected msg type') - sys.exit(-1) + print('RPC: Unexpected msg type') + sys.exit(-1) else: - if resp.success: - print('RPC: Success') - print('CRIU major %d' % resp.version.major_number) - print('CRIU minor %d' % resp.version.minor_number) - if resp.version.HasField('gitid'): - print('CRIU gitid %s' % resp.version.gitid) - if resp.version.HasField('sublevel'): - print('CRIU sublevel %s' % resp.version.sublevel) - if resp.version.HasField('extra'): - print('CRIU extra %s' % resp.version.extra) - if resp.version.HasField('name'): - print('CRIU name %s' % resp.version.name) - else: - print('Fail') - sys.exit(-1) + if resp.success: + print('RPC: Success') + print('CRIU major %d' % resp.version.major_number) + print('CRIU minor %d' % resp.version.minor_number) + if resp.version.HasField('gitid'): + print('CRIU gitid %s' % resp.version.gitid) + if resp.version.HasField('sublevel'): + print('CRIU sublevel %s' % resp.version.sublevel) + if resp.version.HasField('extra'): + print('CRIU extra %s' % resp.version.extra) + if resp.version.HasField('name'): + print('CRIU name %s' % resp.version.name) + else: + print('Fail') + sys.exit(-1) diff --git a/test/others/shell-job/run.py b/test/others/shell-job/run.py index 4f4dfadef..bd5c42509 100755 --- a/test/others/shell-job/run.py +++ b/test/others/shell-job/run.py @@ -6,15 +6,17 @@ cr_bin = "../../../criu/criu" os.chdir(os.getcwd()) + def create_pty(): - (fd1, fd2) = pty.openpty() - return (os.fdopen(fd1, "w+"), os.fdopen(fd2, "w+")) + (fd1, fd2) = pty.openpty() + return (os.fdopen(fd1, "w+"), os.fdopen(fd2, "w+")) + if not os.access("work", os.X_OK): os.mkdir("work", 0755) open("running", "w").close() -m,s = create_pty() +m, s = create_pty() p = os.pipe() pr = os.fdopen(p[0], "r") pw = os.fdopen(p[1], "w") @@ -46,14 +48,15 @@ if ret != 0: os.wait() os.unlink("running") -m,s = create_pty() +m, s = create_pty() cpid = os.fork() if cpid == 0: os.setsid() fcntl.ioctl(m.fileno(), termios.TIOCSCTTY, 1) cmd = [cr_bin, "restore", "-j", "-D", "work", "-v"] print("Run: %s" % " ".join(cmd)) - ret = subprocess.Popen([cr_bin, "restore", "-j", "-D", "work", "-v"]).wait() + ret = subprocess.Popen([cr_bin, "restore", "-j", "-D", "work", + "-v"]).wait() if ret != 0: sys.exit(1) sys.exit(0) diff --git a/test/zdtm.py b/test/zdtm.py index c52964528..0153c6058 100755 --- a/test/zdtm.py +++ b/test/zdtm.py @@ -32,26 +32,26 @@ prev_line = None def alarm(*args): - print("==== ALARM ====") + print("==== ALARM ====") signal.signal(signal.SIGALRM, alarm) def traceit(f, e, a): - if e == "line": - lineno = f.f_lineno - fil = f.f_globals["__file__"] - if fil.endswith("zdtm.py"): - global prev_line - line = linecache.getline(fil, lineno) - if line == prev_line: - print(" ...") - else: - prev_line = line - print("+%4d: %s" % (lineno, line.rstrip())) + if e == "line": + lineno = f.f_lineno + fil = f.f_globals["__file__"] + if fil.endswith("zdtm.py"): + global prev_line + line = linecache.getline(fil, lineno) + if line == prev_line: + print(" ...") + else: + prev_line = line + print("+%4d: %s" % (lineno, line.rstrip())) - return traceit + return traceit # Root dir for ns and uns flavors. All tests @@ -60,17 +60,17 @@ tests_root = None def clean_tests_root(): - global tests_root - if tests_root and tests_root[0] == os.getpid(): - os.rmdir(tests_root[1]) + global tests_root + if tests_root and tests_root[0] == os.getpid(): + os.rmdir(tests_root[1]) def make_tests_root(): - global tests_root - if not tests_root: - tests_root = (os.getpid(), tempfile.mkdtemp("", "criu-root-", "/tmp")) - atexit.register(clean_tests_root) - return tests_root[1] + global tests_root + if not tests_root: + tests_root = (os.getpid(), tempfile.mkdtemp("", "criu-root-", "/tmp")) + atexit.register(clean_tests_root) + return tests_root[1] # Report generation @@ -79,60 +79,61 @@ report_dir = None def init_report(path): - global report_dir - report_dir = path - if not os.access(report_dir, os.F_OK): - os.makedirs(report_dir) + global report_dir + report_dir = path + if not os.access(report_dir, os.F_OK): + os.makedirs(report_dir) def add_to_report(path, tgt_name): - global report_dir - if report_dir: - tgt_path = os.path.join(report_dir, tgt_name) - att = 0 - while os.access(tgt_path, os.F_OK): - tgt_path = os.path.join(report_dir, tgt_name + ".%d" % att) - att += 1 + global report_dir + if report_dir: + tgt_path = os.path.join(report_dir, tgt_name) + att = 0 + while os.access(tgt_path, os.F_OK): + tgt_path = os.path.join(report_dir, tgt_name + ".%d" % att) + att += 1 - ignore = shutil.ignore_patterns('*.socket') - if os.path.isdir(path): - shutil.copytree(path, tgt_path, ignore = ignore) - else: - if not os.path.exists(os.path.dirname(tgt_path)): - os.mkdir(os.path.dirname(tgt_path)) - shutil.copy2(path, tgt_path) + ignore = shutil.ignore_patterns('*.socket') + if os.path.isdir(path): + shutil.copytree(path, tgt_path, ignore=ignore) + else: + if not os.path.exists(os.path.dirname(tgt_path)): + os.mkdir(os.path.dirname(tgt_path)) + shutil.copy2(path, tgt_path) def add_to_output(path): - global report_dir - if not report_dir: - return + global report_dir + if not report_dir: + return - output_path = os.path.join(report_dir, "output") - with open(path, "r") as fdi, open(output_path, "a") as fdo: - for line in fdi: - fdo.write(line) + output_path = os.path.join(report_dir, "output") + with open(path, "r") as fdi, open(output_path, "a") as fdo: + for line in fdi: + fdo.write(line) prev_crash_reports = set(glob.glob("/tmp/zdtm-core-*.txt")) def check_core_files(): - reports = set(glob.glob("/tmp/zdtm-core-*.txt")) - prev_crash_reports - if not reports: - return False + reports = set(glob.glob("/tmp/zdtm-core-*.txt")) - prev_crash_reports + if not reports: + return False - while subprocess.Popen(r"ps axf | grep 'abrt\.sh'", shell = True).wait() == 0: - time.sleep(1) + while subprocess.Popen(r"ps axf | grep 'abrt\.sh'", + shell=True).wait() == 0: + time.sleep(1) - for i in reports: - add_to_report(i, os.path.basename(i)) - print_sep(i) - with open(i, "r") as report: - print(report.read()) - print_sep(i) + for i in reports: + add_to_report(i, os.path.basename(i)) + print_sep(i) + with open(i, "r") as report: + print(report.read()) + print_sep(i) - return True + return True # Arch we run on @@ -147,148 +148,161 @@ arch = os.uname()[4] class host_flavor: - def __init__(self, opts): - self.name = "host" - self.ns = False - self.root = None + def __init__(self, opts): + self.name = "host" + self.ns = False + self.root = None - def init(self, l_bins, x_bins): - pass + def init(self, l_bins, x_bins): + pass - def fini(self): - pass + def fini(self): + pass - @staticmethod - def clean(): - pass + @staticmethod + def clean(): + pass class ns_flavor: - __root_dirs = ["/bin", "/sbin", "/etc", "/lib", "/lib64", "/dev", "/dev/pts", "/dev/net", "/tmp", "/usr", "/proc", "/run"] + __root_dirs = [ + "/bin", "/sbin", "/etc", "/lib", "/lib64", "/dev", "/dev/pts", + "/dev/net", "/tmp", "/usr", "/proc", "/run" + ] - def __init__(self, opts): - self.name = "ns" - self.ns = True - self.uns = False - self.root = make_tests_root() - self.root_mounted = False + def __init__(self, opts): + self.name = "ns" + self.ns = True + self.uns = False + self.root = make_tests_root() + self.root_mounted = False - def __copy_one(self, fname): - tfname = self.root + fname - if not os.access(tfname, os.F_OK): - # Copying should be atomic as tests can be - # run in parallel - try: - os.makedirs(self.root + os.path.dirname(fname)) - except OSError as e: - if e.errno != errno.EEXIST: - raise - dst = tempfile.mktemp(".tso", "", self.root + os.path.dirname(fname)) - shutil.copy2(fname, dst) - os.rename(dst, tfname) + def __copy_one(self, fname): + tfname = self.root + fname + if not os.access(tfname, os.F_OK): + # Copying should be atomic as tests can be + # run in parallel + try: + os.makedirs(self.root + os.path.dirname(fname)) + except OSError as e: + if e.errno != errno.EEXIST: + raise + dst = tempfile.mktemp(".tso", "", + self.root + os.path.dirname(fname)) + shutil.copy2(fname, dst) + os.rename(dst, tfname) - def __copy_libs(self, binary): - ldd = subprocess.Popen(["ldd", binary], stdout = subprocess.PIPE) - xl = re.compile(r'^(linux-gate.so|linux-vdso(64)?.so|not a dynamic|.*\s*ldd\s)') + def __copy_libs(self, binary): + ldd = subprocess.Popen(["ldd", binary], stdout=subprocess.PIPE) + xl = re.compile( + r'^(linux-gate.so|linux-vdso(64)?.so|not a dynamic|.*\s*ldd\s)') - # This Mayakovsky-style code gets list of libraries a binary - # needs minus vdso and gate .so-s - libs = map(lambda x: x[1] == '=>' and x[2] or x[0], - map(lambda x: str(x).split(), - filter(lambda x: not xl.match(x), - map(lambda x: str(x).strip(), - filter(lambda x: str(x).startswith('\t'), ldd.stdout.read().decode('ascii').splitlines()))))) + # This Mayakovsky-style code gets list of libraries a binary + # needs minus vdso and gate .so-s + libs = map( + lambda x: x[1] == '=>' and x[2] or x[0], + map( + lambda x: str(x).split(), + filter( + lambda x: not xl.match(x), + map( + lambda x: str(x).strip(), + filter(lambda x: str(x).startswith('\t'), + ldd.stdout.read().decode( + 'ascii').splitlines()))))) - ldd.wait() + ldd.wait() - for lib in libs: - if not os.access(lib, os.F_OK): - raise test_fail_exc("Can't find lib %s required by %s" % (lib, binary)) - self.__copy_one(lib) + for lib in libs: + if not os.access(lib, os.F_OK): + raise test_fail_exc("Can't find lib %s required by %s" % + (lib, binary)) + self.__copy_one(lib) - def __mknod(self, name, rdev = None): - name = "/dev/" + name - if not rdev: - if not os.access(name, os.F_OK): - print("Skipping %s at root" % name) - return - else: - rdev = os.stat(name).st_rdev + def __mknod(self, name, rdev=None): + name = "/dev/" + name + if not rdev: + if not os.access(name, os.F_OK): + print("Skipping %s at root" % name) + return + else: + rdev = os.stat(name).st_rdev - name = self.root + name - os.mknod(name, stat.S_IFCHR, rdev) - os.chmod(name, 0o666) + name = self.root + name + os.mknod(name, stat.S_IFCHR, rdev) + os.chmod(name, 0o666) - def __construct_root(self): - for dir in self.__root_dirs: - os.mkdir(self.root + dir) - os.chmod(self.root + dir, 0o777) + def __construct_root(self): + for dir in self.__root_dirs: + os.mkdir(self.root + dir) + os.chmod(self.root + dir, 0o777) - for ldir in ["/bin", "/sbin", "/lib", "/lib64"]: - os.symlink(".." + ldir, self.root + "/usr" + ldir) + for ldir in ["/bin", "/sbin", "/lib", "/lib64"]: + os.symlink(".." + ldir, self.root + "/usr" + ldir) - self.__mknod("tty", os.makedev(5, 0)) - self.__mknod("null", os.makedev(1, 3)) - self.__mknod("net/tun") - self.__mknod("rtc") - self.__mknod("autofs", os.makedev(10, 235)) + self.__mknod("tty", os.makedev(5, 0)) + self.__mknod("null", os.makedev(1, 3)) + self.__mknod("net/tun") + self.__mknod("rtc") + self.__mknod("autofs", os.makedev(10, 235)) - def __copy_deps(self, deps): - for d in deps.split('|'): - if os.access(d, os.F_OK): - self.__copy_one(d) - self.__copy_libs(d) - return - raise test_fail_exc("Deps check %s failed" % deps) + def __copy_deps(self, deps): + for d in deps.split('|'): + if os.access(d, os.F_OK): + self.__copy_one(d) + self.__copy_libs(d) + return + raise test_fail_exc("Deps check %s failed" % deps) - def init(self, l_bins, x_bins): - subprocess.check_call(["mount", "--make-slave", "--bind", ".", self.root]) - self.root_mounted = True + def init(self, l_bins, x_bins): + subprocess.check_call( + ["mount", "--make-slave", "--bind", ".", self.root]) + self.root_mounted = True - if not os.access(self.root + "/.constructed", os.F_OK): - with open(os.path.abspath(__file__)) as o: - fcntl.flock(o, fcntl.LOCK_EX) - if not os.access(self.root + "/.constructed", os.F_OK): - print("Construct root for %s" % l_bins[0]) - self.__construct_root() - os.mknod(self.root + "/.constructed", stat.S_IFREG | 0o600) + if not os.access(self.root + "/.constructed", os.F_OK): + with open(os.path.abspath(__file__)) as o: + fcntl.flock(o, fcntl.LOCK_EX) + if not os.access(self.root + "/.constructed", os.F_OK): + print("Construct root for %s" % l_bins[0]) + self.__construct_root() + os.mknod(self.root + "/.constructed", stat.S_IFREG | 0o600) - for b in l_bins: - self.__copy_libs(b) - for b in x_bins: - self.__copy_deps(b) + for b in l_bins: + self.__copy_libs(b) + for b in x_bins: + self.__copy_deps(b) - def fini(self): - if self.root_mounted: - subprocess.check_call(["./umount2", self.root]) - self.root_mounted = False + def fini(self): + if self.root_mounted: + subprocess.check_call(["./umount2", self.root]) + self.root_mounted = False - @staticmethod - def clean(): - for d in ns_flavor.__root_dirs: - p = './' + d - print('Remove %s' % p) - if os.access(p, os.F_OK): - shutil.rmtree('./' + d) + @staticmethod + def clean(): + for d in ns_flavor.__root_dirs: + p = './' + d + print('Remove %s' % p) + if os.access(p, os.F_OK): + shutil.rmtree('./' + d) - if os.access('./.constructed', os.F_OK): - os.unlink('./.constructed') + if os.access('./.constructed', os.F_OK): + os.unlink('./.constructed') class userns_flavor(ns_flavor): - def __init__(self, opts): - ns_flavor.__init__(self, opts) - self.name = "userns" - self.uns = True + def __init__(self, opts): + ns_flavor.__init__(self, opts) + self.name = "userns" + self.uns = True - def init(self, l_bins, x_bins): - # To be able to create roots_yard in CRIU - os.chmod(".", os.stat(".").st_mode | 0o077) - ns_flavor.init(self, l_bins, x_bins) + def init(self, l_bins, x_bins): + # To be able to create roots_yard in CRIU + os.chmod(".", os.stat(".").st_mode | 0o077) + ns_flavor.init(self, l_bins, x_bins) - @staticmethod - def clean(): - pass + @staticmethod + def clean(): + pass flavors = {'h': host_flavor, 'ns': ns_flavor, 'uns': userns_flavor} @@ -300,47 +314,47 @@ flavors_codes = dict(zip(range(len(flavors)), sorted(flavors.keys()))) def encode_flav(f): - return sorted(flavors.keys()).index(f) + 128 + return sorted(flavors.keys()).index(f) + 128 def decode_flav(i): - return flavors_codes.get(i - 128, "unknown") + return flavors_codes.get(i - 128, "unknown") def tail(path): - p = subprocess.Popen(['tail', '-n1', path], - stdout = subprocess.PIPE) - out = p.stdout.readline() - p.wait() - return out.decode() + p = subprocess.Popen(['tail', '-n1', path], stdout=subprocess.PIPE) + out = p.stdout.readline() + p.wait() + return out.decode() def rpidfile(path): - with open(path) as fd: - return fd.readline().strip() + with open(path) as fd: + return fd.readline().strip() -def wait_pid_die(pid, who, tmo = 30): - stime = 0.1 - while stime < tmo: - try: - os.kill(int(pid), 0) - except OSError as e: - if e.errno != errno.ESRCH: - print(e) - break +def wait_pid_die(pid, who, tmo=30): + stime = 0.1 + while stime < tmo: + try: + os.kill(int(pid), 0) + except OSError as e: + if e.errno != errno.ESRCH: + print(e) + break - print("Wait for %s(%d) to die for %f" % (who, pid, stime)) - time.sleep(stime) - stime *= 2 - else: - subprocess.Popen(["ps", "-p", str(pid)]).wait() - subprocess.Popen(["ps", "axf", str(pid)]).wait() - raise test_fail_exc("%s die" % who) + print("Wait for %s(%d) to die for %f" % (who, pid, stime)) + time.sleep(stime) + stime *= 2 + else: + subprocess.Popen(["ps", "-p", str(pid)]).wait() + subprocess.Popen(["ps", "axf", str(pid)]).wait() + raise test_fail_exc("%s die" % who) def test_flag(tdesc, flag): - return flag in tdesc.get('flags', '').split() + return flag in tdesc.get('flags', '').split() + # # Exception thrown when something inside the test goes wrong, @@ -350,16 +364,17 @@ def test_flag(tdesc, flag): class test_fail_exc(Exception): - def __init__(self, step): - self.step = step + def __init__(self, step): + self.step = step - def __str__(self): - return str(self.step) + def __str__(self): + return str(self.step) class test_fail_expected_exc(Exception): - def __init__(self, cr_action): - self.cr_action = cr_action + def __init__(self, cr_action): + self.cr_action = cr_action + # # A test from zdtm/ directory. @@ -367,418 +382,440 @@ class test_fail_expected_exc(Exception): class zdtm_test: - def __init__(self, name, desc, flavor, freezer): - self.__name = name - self.__desc = desc - self.__freezer = None - self.__make_action('cleanout') - self.__pid = 0 - self.__flavor = flavor - self.__freezer = freezer - self._bins = [name] - self._env = {} - self._deps = desc.get('deps', []) - self.auto_reap = True - self.__timeout = int(self.__desc.get('timeout') or 30) + def __init__(self, name, desc, flavor, freezer): + self.__name = name + self.__desc = desc + self.__freezer = None + self.__make_action('cleanout') + self.__pid = 0 + self.__flavor = flavor + self.__freezer = freezer + self._bins = [name] + self._env = {} + self._deps = desc.get('deps', []) + self.auto_reap = True + self.__timeout = int(self.__desc.get('timeout') or 30) - def __make_action(self, act, env = None, root = None): - sys.stdout.flush() # Not to let make's messages appear before ours - tpath = self.__name + '.' + act - s_args = ['make', '--no-print-directory', - '-C', os.path.dirname(tpath), - os.path.basename(tpath)] + def __make_action(self, act, env=None, root=None): + sys.stdout.flush() # Not to let make's messages appear before ours + tpath = self.__name + '.' + act + s_args = [ + 'make', '--no-print-directory', '-C', + os.path.dirname(tpath), + os.path.basename(tpath) + ] - if env: - env = dict(os.environ, **env) + if env: + env = dict(os.environ, **env) - s = subprocess.Popen(s_args, env = env, cwd = root, close_fds = True, - preexec_fn = self.__freezer and self.__freezer.attach or None) - if act == "pid": - try_run_hook(self, ["--post-start"]) - if s.wait(): - raise test_fail_exc(str(s_args)) + s = subprocess.Popen( + s_args, + env=env, + cwd=root, + close_fds=True, + preexec_fn=self.__freezer and self.__freezer.attach or None) + if act == "pid": + try_run_hook(self, ["--post-start"]) + if s.wait(): + raise test_fail_exc(str(s_args)) - if self.__freezer: - self.__freezer.freeze() + if self.__freezer: + self.__freezer.freeze() - def __pidfile(self): - return self.__name + '.pid' + def __pidfile(self): + return self.__name + '.pid' - def __wait_task_die(self): - wait_pid_die(int(self.__pid), self.__name, self.__timeout) + def __wait_task_die(self): + wait_pid_die(int(self.__pid), self.__name, self.__timeout) - def __add_wperms(self): - # Add write perms for .out and .pid files - for b in self._bins: - p = os.path.dirname(b) - os.chmod(p, os.stat(p).st_mode | 0o222) + def __add_wperms(self): + # Add write perms for .out and .pid files + for b in self._bins: + p = os.path.dirname(b) + os.chmod(p, os.stat(p).st_mode | 0o222) - def start(self): - self.__flavor.init(self._bins, self._deps) + def start(self): + self.__flavor.init(self._bins, self._deps) - print("Start test") + print("Start test") - env = self._env - if not self.__freezer.kernel: - env['ZDTM_THREAD_BOMB'] = "5" + env = self._env + if not self.__freezer.kernel: + env['ZDTM_THREAD_BOMB'] = "5" - if test_flag(self.__desc, 'pre-dump-notify'): - env['ZDTM_NOTIFY_FDIN'] = "100" - env['ZDTM_NOTIFY_FDOUT'] = "101" + if test_flag(self.__desc, 'pre-dump-notify'): + env['ZDTM_NOTIFY_FDIN'] = "100" + env['ZDTM_NOTIFY_FDOUT'] = "101" - if not test_flag(self.__desc, 'suid'): - # Numbers should match those in criu - env['ZDTM_UID'] = "18943" - env['ZDTM_GID'] = "58467" - env['ZDTM_GROUPS'] = "27495 48244" - self.__add_wperms() - else: - print("Test is SUID") + if not test_flag(self.__desc, 'suid'): + # Numbers should match those in criu + env['ZDTM_UID'] = "18943" + env['ZDTM_GID'] = "58467" + env['ZDTM_GROUPS'] = "27495 48244" + self.__add_wperms() + else: + print("Test is SUID") - if self.__flavor.ns: - env['ZDTM_NEWNS'] = "1" - env['ZDTM_ROOT'] = self.__flavor.root - env['PATH'] = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + if self.__flavor.ns: + env['ZDTM_NEWNS'] = "1" + env['ZDTM_ROOT'] = self.__flavor.root + env['PATH'] = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" - if self.__flavor.uns: - env['ZDTM_USERNS'] = "1" - self.__add_wperms() - if os.getenv("GCOV"): - criu_dir = os.path.dirname(os.getcwd()) - criu_dir_r = "%s%s" % (self.__flavor.root, criu_dir) + if self.__flavor.uns: + env['ZDTM_USERNS'] = "1" + self.__add_wperms() + if os.getenv("GCOV"): + criu_dir = os.path.dirname(os.getcwd()) + criu_dir_r = "%s%s" % (self.__flavor.root, criu_dir) - env['ZDTM_CRIU'] = os.path.dirname(os.getcwd()) - subprocess.check_call(["mkdir", "-p", criu_dir_r]) + env['ZDTM_CRIU'] = os.path.dirname(os.getcwd()) + subprocess.check_call(["mkdir", "-p", criu_dir_r]) - self.__make_action('pid', env, self.__flavor.root) + self.__make_action('pid', env, self.__flavor.root) - try: - os.kill(int(self.getpid()), 0) - except Exception as e: - raise test_fail_exc("start: %s" % e) + try: + os.kill(int(self.getpid()), 0) + except Exception as e: + raise test_fail_exc("start: %s" % e) - if not self.static(): - # Wait less than a second to give the test chance to - # move into some semi-random state - time.sleep(random.random()) + if not self.static(): + # Wait less than a second to give the test chance to + # move into some semi-random state + time.sleep(random.random()) - def kill(self, sig = signal.SIGKILL): - self.__freezer.thaw() - if self.__pid: - print("Send the %d signal to %s" % (sig, self.__pid)) - os.kill(int(self.__pid), sig) - self.gone(sig == signal.SIGKILL) + def kill(self, sig=signal.SIGKILL): + self.__freezer.thaw() + if self.__pid: + print("Send the %d signal to %s" % (sig, self.__pid)) + os.kill(int(self.__pid), sig) + self.gone(sig == signal.SIGKILL) - self.__flavor.fini() + self.__flavor.fini() - def pre_dump_notify(self): - env = self._env + def pre_dump_notify(self): + env = self._env - if 'ZDTM_NOTIFY_FDIN' not in env: - return + if 'ZDTM_NOTIFY_FDIN' not in env: + return - if self.__pid == 0: - self.getpid() + if self.__pid == 0: + self.getpid() - notify_fdout_path = "/proc/%s/fd/%s" % (self.__pid, env['ZDTM_NOTIFY_FDOUT']) - notify_fdin_path = "/proc/%s/fd/%s" % (self.__pid, env['ZDTM_NOTIFY_FDIN']) + notify_fdout_path = "/proc/%s/fd/%s" % (self.__pid, + env['ZDTM_NOTIFY_FDOUT']) + notify_fdin_path = "/proc/%s/fd/%s" % (self.__pid, + env['ZDTM_NOTIFY_FDIN']) - print("Send pre-dump notify to %s" % (self.__pid)) - with open(notify_fdout_path, "rb") as fdout: - with open(notify_fdin_path, "wb") as fdin: - fdin.write(struct.pack("i", 0)) - fdin.flush() - print("Wait pre-dump notify reply") - ret = struct.unpack('i', fdout.read(4)) - print("Completed pre-dump notify with %d" % (ret)) + print("Send pre-dump notify to %s" % (self.__pid)) + with open(notify_fdout_path, "rb") as fdout: + with open(notify_fdin_path, "wb") as fdin: + fdin.write(struct.pack("i", 0)) + fdin.flush() + print("Wait pre-dump notify reply") + ret = struct.unpack('i', fdout.read(4)) + print("Completed pre-dump notify with %d" % (ret)) - def stop(self): - self.__freezer.thaw() - self.getpid() # Read the pid from pidfile back - self.kill(signal.SIGTERM) + def stop(self): + self.__freezer.thaw() + self.getpid() # Read the pid from pidfile back + self.kill(signal.SIGTERM) - res = tail(self.__name + '.out') - if 'PASS' not in list(map(lambda s: s.strip(), res.split())): - if os.access(self.__name + '.out.inprogress', os.F_OK): - print_sep(self.__name + '.out.inprogress') - with open(self.__name + '.out.inprogress') as fd: - print(fd.read()) - print_sep(self.__name + '.out.inprogress') - raise test_fail_exc("result check") + res = tail(self.__name + '.out') + if 'PASS' not in list(map(lambda s: s.strip(), res.split())): + if os.access(self.__name + '.out.inprogress', os.F_OK): + print_sep(self.__name + '.out.inprogress') + with open(self.__name + '.out.inprogress') as fd: + print(fd.read()) + print_sep(self.__name + '.out.inprogress') + raise test_fail_exc("result check") - def getpid(self): - if self.__pid == 0: - self.__pid = rpidfile(self.__pidfile()) + def getpid(self): + if self.__pid == 0: + self.__pid = rpidfile(self.__pidfile()) - return self.__pid + return self.__pid - def getname(self): - return self.__name + def getname(self): + return self.__name - def __getcropts(self): - opts = self.__desc.get('opts', '').split() + ["--pidfile", os.path.realpath(self.__pidfile())] - if self.__flavor.ns: - opts += ["--root", self.__flavor.root] - if test_flag(self.__desc, 'crlib'): - opts += ["-L", os.path.dirname(os.path.realpath(self.__name)) + '/lib'] - return opts + def __getcropts(self): + opts = self.__desc.get('opts', '').split() + [ + "--pidfile", os.path.realpath(self.__pidfile()) + ] + if self.__flavor.ns: + opts += ["--root", self.__flavor.root] + if test_flag(self.__desc, 'crlib'): + opts += [ + "-L", + os.path.dirname(os.path.realpath(self.__name)) + '/lib' + ] + return opts - def getdopts(self): - return self.__getcropts() + self.__freezer.getdopts() + self.__desc.get('dopts', '').split() + def getdopts(self): + return self.__getcropts() + self.__freezer.getdopts( + ) + self.__desc.get('dopts', '').split() - def getropts(self): - return self.__getcropts() + self.__freezer.getropts() + self.__desc.get('ropts', '').split() + def getropts(self): + return self.__getcropts() + self.__freezer.getropts( + ) + self.__desc.get('ropts', '').split() - def unlink_pidfile(self): - self.__pid = 0 - os.unlink(self.__pidfile()) + def unlink_pidfile(self): + self.__pid = 0 + os.unlink(self.__pidfile()) - def gone(self, force = True): - if not self.auto_reap: - pid, status = os.waitpid(int(self.__pid), 0) - if pid != int(self.__pid): - raise test_fail_exc("kill pid mess") + def gone(self, force=True): + if not self.auto_reap: + pid, status = os.waitpid(int(self.__pid), 0) + if pid != int(self.__pid): + raise test_fail_exc("kill pid mess") - self.__wait_task_die() - self.__pid = 0 - if force: - os.unlink(self.__pidfile()) + self.__wait_task_die() + self.__pid = 0 + if force: + os.unlink(self.__pidfile()) - def print_output(self): - if os.access(self.__name + '.out', os.R_OK): - print("Test output: " + "=" * 32) - with open(self.__name + '.out') as output: - print(output.read()) - print(" <<< " + "=" * 32) + def print_output(self): + if os.access(self.__name + '.out', os.R_OK): + print("Test output: " + "=" * 32) + with open(self.__name + '.out') as output: + print(output.read()) + print(" <<< " + "=" * 32) - def static(self): - return self.__name.split('/')[1] == 'static' + def static(self): + return self.__name.split('/')[1] == 'static' - def ns(self): - return self.__flavor.ns + def ns(self): + return self.__flavor.ns - def blocking(self): - return test_flag(self.__desc, 'crfail') + def blocking(self): + return test_flag(self.__desc, 'crfail') - @staticmethod - def available(): - if not os.access("umount2", os.X_OK): - subprocess.check_call(["make", "umount2"]) - if not os.access("zdtm_ct", os.X_OK): - subprocess.check_call(["make", "zdtm_ct"]) - if not os.access("zdtm/lib/libzdtmtst.a", os.F_OK): - subprocess.check_call(["make", "-C", "zdtm/"]) - subprocess.check_call(["flock", "zdtm_mount_cgroups.lock", "./zdtm_mount_cgroups"]) + @staticmethod + def available(): + if not os.access("umount2", os.X_OK): + subprocess.check_call(["make", "umount2"]) + if not os.access("zdtm_ct", os.X_OK): + subprocess.check_call(["make", "zdtm_ct"]) + if not os.access("zdtm/lib/libzdtmtst.a", os.F_OK): + subprocess.check_call(["make", "-C", "zdtm/"]) + subprocess.check_call( + ["flock", "zdtm_mount_cgroups.lock", "./zdtm_mount_cgroups"]) - @staticmethod - def cleanup(): - subprocess.check_call(["flock", "zdtm_mount_cgroups.lock", "./zdtm_umount_cgroups"]) + @staticmethod + def cleanup(): + subprocess.check_call( + ["flock", "zdtm_mount_cgroups.lock", "./zdtm_umount_cgroups"]) def load_module_from_file(name, path): - if sys.version_info[0] == 3 and sys.version_info[1] >= 5: - import importlib.util - spec = importlib.util.spec_from_file_location(name, path) - mod = importlib.util.module_from_spec(spec) - spec.loader.exec_module(mod) - else: - import imp - mod = imp.load_source(name, path) - return mod + if sys.version_info[0] == 3 and sys.version_info[1] >= 5: + import importlib.util + spec = importlib.util.spec_from_file_location(name, path) + mod = importlib.util.module_from_spec(spec) + spec.loader.exec_module(mod) + else: + import imp + mod = imp.load_source(name, path) + return mod class inhfd_test: - def __init__(self, name, desc, flavor, freezer): - self.__name = os.path.basename(name) - print("Load %s" % name) - self.__fdtyp = load_module_from_file(self.__name, name) - self.__peer_pid = 0 - self.__files = None - self.__peer_file_names = [] - self.__dump_opts = [] - self.__messages = {} + def __init__(self, name, desc, flavor, freezer): + self.__name = os.path.basename(name) + print("Load %s" % name) + self.__fdtyp = load_module_from_file(self.__name, name) + self.__peer_pid = 0 + self.__files = None + self.__peer_file_names = [] + self.__dump_opts = [] + self.__messages = {} - def __get_message(self, i): - m = self.__messages.get(i, None) - if not m: - m = b"".join([random.choice(string.ascii_letters).encode() for _ in range(10)]) + b"%06d" % i - self.__messages[i] = m - return m + def __get_message(self, i): + m = self.__messages.get(i, None) + if not m: + m = b"".join([ + random.choice(string.ascii_letters).encode() for _ in range(10) + ]) + b"%06d" % i + self.__messages[i] = m + return m - def start(self): - self.__files = self.__fdtyp.create_fds() + def start(self): + self.__files = self.__fdtyp.create_fds() - # Check FDs returned for inter-connection - i = 0 - for my_file, peer_file in self.__files: - msg = self.__get_message(i) - my_file.write(msg) - my_file.flush() - data = peer_file.read(len(msg)) - if data != msg: - raise test_fail_exc("FDs screwup: %r %r" % (msg, data)) - i += 1 + # Check FDs returned for inter-connection + i = 0 + for my_file, peer_file in self.__files: + msg = self.__get_message(i) + my_file.write(msg) + my_file.flush() + data = peer_file.read(len(msg)) + if data != msg: + raise test_fail_exc("FDs screwup: %r %r" % (msg, data)) + i += 1 - start_pipe = os.pipe() - self.__peer_pid = os.fork() - if self.__peer_pid == 0: - os.setsid() + start_pipe = os.pipe() + self.__peer_pid = os.fork() + if self.__peer_pid == 0: + os.setsid() - for _, peer_file in self.__files: - getattr(self.__fdtyp, "child_prep", lambda fd: None)(peer_file) + for _, peer_file in self.__files: + getattr(self.__fdtyp, "child_prep", lambda fd: None)(peer_file) - try: - os.unlink(self.__name + ".out") - except Exception as e: - print(e) - fd = os.open(self.__name + ".out", os.O_WRONLY | os.O_APPEND | os.O_CREAT) - os.dup2(fd, 1) - os.dup2(fd, 2) - os.close(fd) - fd = os.open("/dev/null", os.O_RDONLY) - os.dup2(fd, 0) - for my_file, _ in self.__files: - my_file.close() - os.close(start_pipe[0]) - os.close(start_pipe[1]) - i = 0 - for _, peer_file in self.__files: - msg = self.__get_message(i) - my_file.close() - try: - data = peer_file.read(16) - except Exception as e: - print("Unable to read a peer file: %s" % e) - sys.exit(1) + try: + os.unlink(self.__name + ".out") + except Exception as e: + print(e) + fd = os.open(self.__name + ".out", + os.O_WRONLY | os.O_APPEND | os.O_CREAT) + os.dup2(fd, 1) + os.dup2(fd, 2) + os.close(fd) + fd = os.open("/dev/null", os.O_RDONLY) + os.dup2(fd, 0) + for my_file, _ in self.__files: + my_file.close() + os.close(start_pipe[0]) + os.close(start_pipe[1]) + i = 0 + for _, peer_file in self.__files: + msg = self.__get_message(i) + my_file.close() + try: + data = peer_file.read(16) + except Exception as e: + print("Unable to read a peer file: %s" % e) + sys.exit(1) - if data != msg: - print("%r %r" % (data, msg)) - i += 1 - sys.exit(data == msg and 42 or 2) + if data != msg: + print("%r %r" % (data, msg)) + i += 1 + sys.exit(data == msg and 42 or 2) - os.close(start_pipe[1]) - os.read(start_pipe[0], 12) - os.close(start_pipe[0]) + os.close(start_pipe[1]) + os.read(start_pipe[0], 12) + os.close(start_pipe[0]) - for _, peer_file in self.__files: - self.__peer_file_names.append(self.__fdtyp.filename(peer_file)) - self.__dump_opts += self.__fdtyp.dump_opts(peer_file) + for _, peer_file in self.__files: + self.__peer_file_names.append(self.__fdtyp.filename(peer_file)) + self.__dump_opts += self.__fdtyp.dump_opts(peer_file) - self.__fds = set(os.listdir("/proc/%s/fd" % self.__peer_pid)) + self.__fds = set(os.listdir("/proc/%s/fd" % self.__peer_pid)) - def stop(self): - fds = set(os.listdir("/proc/%s/fd" % self.__peer_pid)) - if fds != self.__fds: - raise test_fail_exc("File descriptors mismatch: %s %s" % (fds, self.__fds)) - i = 0 - for my_file, _ in self.__files: - msg = self.__get_message(i) - my_file.write(msg) - my_file.flush() - i += 1 - pid, status = os.waitpid(self.__peer_pid, 0) - with open(self.__name + ".out") as output: - print(output.read()) - self.__peer_pid = 0 - if not os.WIFEXITED(status) or os.WEXITSTATUS(status) != 42: - raise test_fail_exc("test failed with %d" % status) + def stop(self): + fds = set(os.listdir("/proc/%s/fd" % self.__peer_pid)) + if fds != self.__fds: + raise test_fail_exc("File descriptors mismatch: %s %s" % + (fds, self.__fds)) + i = 0 + for my_file, _ in self.__files: + msg = self.__get_message(i) + my_file.write(msg) + my_file.flush() + i += 1 + pid, status = os.waitpid(self.__peer_pid, 0) + with open(self.__name + ".out") as output: + print(output.read()) + self.__peer_pid = 0 + if not os.WIFEXITED(status) or os.WEXITSTATUS(status) != 42: + raise test_fail_exc("test failed with %d" % status) - def kill(self): - if self.__peer_pid: - os.kill(self.__peer_pid, signal.SIGKILL) + def kill(self): + if self.__peer_pid: + os.kill(self.__peer_pid, signal.SIGKILL) - def getname(self): - return self.__name + def getname(self): + return self.__name - def getpid(self): - return "%s" % self.__peer_pid + def getpid(self): + return "%s" % self.__peer_pid - def gone(self, force = True): - os.waitpid(self.__peer_pid, 0) - wait_pid_die(self.__peer_pid, self.__name) - self.__files = None + def gone(self, force=True): + os.waitpid(self.__peer_pid, 0) + wait_pid_die(self.__peer_pid, self.__name) + self.__files = None - def getdopts(self): - return self.__dump_opts + def getdopts(self): + return self.__dump_opts - def getropts(self): - self.__files = self.__fdtyp.create_fds() - ropts = ["--restore-sibling"] - for i in range(len(self.__files)): - my_file, peer_file = self.__files[i] - fd = peer_file.fileno() - fdflags = fcntl.fcntl(fd, fcntl.F_GETFD) & ~fcntl.FD_CLOEXEC - fcntl.fcntl(fd, fcntl.F_SETFD, fdflags) - peer_file_name = self.__peer_file_names[i] - ropts.extend(["--inherit-fd", "fd[%d]:%s" % (fd, peer_file_name)]) - return ropts + def getropts(self): + self.__files = self.__fdtyp.create_fds() + ropts = ["--restore-sibling"] + for i in range(len(self.__files)): + my_file, peer_file = self.__files[i] + fd = peer_file.fileno() + fdflags = fcntl.fcntl(fd, fcntl.F_GETFD) & ~fcntl.FD_CLOEXEC + fcntl.fcntl(fd, fcntl.F_SETFD, fdflags) + peer_file_name = self.__peer_file_names[i] + ropts.extend(["--inherit-fd", "fd[%d]:%s" % (fd, peer_file_name)]) + return ropts - def print_output(self): - pass + def print_output(self): + pass - def static(self): - return True + def static(self): + return True - def blocking(self): - return False + def blocking(self): + return False - @staticmethod - def available(): - pass + @staticmethod + def available(): + pass - @staticmethod - def cleanup(): - pass + @staticmethod + def cleanup(): + pass class groups_test(zdtm_test): - def __init__(self, name, desc, flavor, freezer): - zdtm_test.__init__(self, 'zdtm/lib/groups', desc, flavor, freezer) - if flavor.ns: - self.__real_name = name - with open(name) as fd: - self.__subs = map(lambda x: x.strip(), fd.readlines()) - print("Subs:\n%s" % '\n'.join(self.__subs)) - else: - self.__real_name = '' - self.__subs = [] + def __init__(self, name, desc, flavor, freezer): + zdtm_test.__init__(self, 'zdtm/lib/groups', desc, flavor, freezer) + if flavor.ns: + self.__real_name = name + with open(name) as fd: + self.__subs = map(lambda x: x.strip(), fd.readlines()) + print("Subs:\n%s" % '\n'.join(self.__subs)) + else: + self.__real_name = '' + self.__subs = [] - self._bins += self.__subs - self._deps += get_test_desc('zdtm/lib/groups')['deps'] - self._env = {'ZDTM_TESTS': self.__real_name} + self._bins += self.__subs + self._deps += get_test_desc('zdtm/lib/groups')['deps'] + self._env = {'ZDTM_TESTS': self.__real_name} - def __get_start_cmd(self, name): - tdir = os.path.dirname(name) - tname = os.path.basename(name) + def __get_start_cmd(self, name): + tdir = os.path.dirname(name) + tname = os.path.basename(name) - s_args = ['make', '--no-print-directory', '-C', tdir] - subprocess.check_call(s_args + [tname + '.cleanout']) - s = subprocess.Popen(s_args + ['--dry-run', tname + '.pid'], stdout = subprocess.PIPE) - cmd = s.stdout.readlines().pop().strip() - s.wait() + s_args = ['make', '--no-print-directory', '-C', tdir] + subprocess.check_call(s_args + [tname + '.cleanout']) + s = subprocess.Popen(s_args + ['--dry-run', tname + '.pid'], + stdout=subprocess.PIPE) + cmd = s.stdout.readlines().pop().strip() + s.wait() - return 'cd /' + tdir + ' && ' + cmd + return 'cd /' + tdir + ' && ' + cmd - def start(self): - if (self.__subs): - with open(self.__real_name + '.start', 'w') as f: - for test in self.__subs: - cmd = self.__get_start_cmd(test) - f.write(cmd + '\n') + def start(self): + if (self.__subs): + with open(self.__real_name + '.start', 'w') as f: + for test in self.__subs: + cmd = self.__get_start_cmd(test) + f.write(cmd + '\n') - with open(self.__real_name + '.stop', 'w') as f: - for test in self.__subs: - f.write('kill -TERM `cat /%s.pid`\n' % test) + with open(self.__real_name + '.stop', 'w') as f: + for test in self.__subs: + f.write('kill -TERM `cat /%s.pid`\n' % test) - zdtm_test.start(self) + zdtm_test.start(self) - def stop(self): - zdtm_test.stop(self) + def stop(self): + zdtm_test.stop(self) - for test in self.__subs: - res = tail(test + '.out') - if 'PASS' not in res.split(): - raise test_fail_exc("sub %s result check" % test) + for test in self.__subs: + res = tail(test + '.out') + if 'PASS' not in res.split(): + raise test_fail_exc("sub %s result check" % test) test_classes = {'zdtm': zdtm_test, 'inhfd': inhfd_test, 'groups': groups_test} @@ -791,495 +828,543 @@ join_ns_file = '/run/netns/zdtm_netns' class criu_cli: - @staticmethod - def run(action, args, criu_bin, fault = None, strace = [], preexec = None, nowait = False): - env = dict(os.environ, ASAN_OPTIONS = "log_path=asan.log:disable_coredump=0:detect_leaks=0") + @staticmethod + def run(action, + args, + criu_bin, + fault=None, + strace=[], + preexec=None, + nowait=False): + env = dict( + os.environ, + ASAN_OPTIONS="log_path=asan.log:disable_coredump=0:detect_leaks=0") - if fault: - print("Forcing %s fault" % fault) - env['CRIU_FAULT'] = fault + if fault: + print("Forcing %s fault" % fault) + env['CRIU_FAULT'] = fault - cr = subprocess.Popen(strace + [criu_bin, action, "--no-default-config"] + args, - env = env, close_fds = False, preexec_fn = preexec) - if nowait: - return cr - return cr.wait() + cr = subprocess.Popen(strace + + [criu_bin, action, "--no-default-config"] + args, + env=env, + close_fds=False, + preexec_fn=preexec) + if nowait: + return cr + return cr.wait() class criu_rpc_process: - def wait(self): - return self.criu.wait_pid(self.pid) + def wait(self): + return self.criu.wait_pid(self.pid) - def terminate(self): - os.kill(self.pid, signal.SIGTERM) + def terminate(self): + os.kill(self.pid, signal.SIGTERM) class criu_rpc: - @staticmethod - def __set_opts(criu, args, ctx): - while len(args) != 0: - arg = args.pop(0) - if arg == '-v4': - criu.opts.log_level = 4 - continue - if arg == '-o': - criu.opts.log_file = args.pop(0) - continue - if arg == '-D': - criu.opts.images_dir_fd = os.open(args.pop(0), os.O_DIRECTORY) - ctx['imgd'] = criu.opts.images_dir_fd - continue - if arg == '-t': - criu.opts.pid = int(args.pop(0)) - continue - if arg == '--pidfile': - ctx['pidf'] = args.pop(0) - continue - if arg == '--timeout': - criu.opts.timeout = int(args.pop(0)) - continue - if arg == '--restore-detached': - # Set by service by default - ctx['rd'] = True - continue - if arg == '--root': - criu.opts.root = args.pop(0) - continue - if arg == '--external': - criu.opts.external.append(args.pop(0)) - continue - if arg == '--status-fd': - fd = int(args.pop(0)) - os.write(fd, b"\0") - fcntl.fcntl(fd, fcntl.F_SETFD, fcntl.FD_CLOEXEC) - continue - if arg == '--port': - criu.opts.ps.port = int(args.pop(0)) - continue - if arg == '--address': - criu.opts.ps.address = args.pop(0) - continue - if arg == '--page-server': - continue - if arg == '--prev-images-dir': - criu.opts.parent_img = args.pop(0) - continue - if arg == '--track-mem': - criu.opts.track_mem = True - continue - if arg == '--tcp-established': - criu.opts.tcp_established = True - continue - if arg == '--restore-sibling': - criu.opts.rst_sibling = True - continue - if arg == "--inherit-fd": - inhfd = criu.opts.inherit_fd.add() - key = args.pop(0) - fd, key = key.split(":", 1) - inhfd.fd = int(fd[3:-1]) - inhfd.key = key - continue + @staticmethod + def __set_opts(criu, args, ctx): + while len(args) != 0: + arg = args.pop(0) + if arg == '-v4': + criu.opts.log_level = 4 + continue + if arg == '-o': + criu.opts.log_file = args.pop(0) + continue + if arg == '-D': + criu.opts.images_dir_fd = os.open(args.pop(0), os.O_DIRECTORY) + ctx['imgd'] = criu.opts.images_dir_fd + continue + if arg == '-t': + criu.opts.pid = int(args.pop(0)) + continue + if arg == '--pidfile': + ctx['pidf'] = args.pop(0) + continue + if arg == '--timeout': + criu.opts.timeout = int(args.pop(0)) + continue + if arg == '--restore-detached': + # Set by service by default + ctx['rd'] = True + continue + if arg == '--root': + criu.opts.root = args.pop(0) + continue + if arg == '--external': + criu.opts.external.append(args.pop(0)) + continue + if arg == '--status-fd': + fd = int(args.pop(0)) + os.write(fd, b"\0") + fcntl.fcntl(fd, fcntl.F_SETFD, fcntl.FD_CLOEXEC) + continue + if arg == '--port': + criu.opts.ps.port = int(args.pop(0)) + continue + if arg == '--address': + criu.opts.ps.address = args.pop(0) + continue + if arg == '--page-server': + continue + if arg == '--prev-images-dir': + criu.opts.parent_img = args.pop(0) + continue + if arg == '--track-mem': + criu.opts.track_mem = True + continue + if arg == '--tcp-established': + criu.opts.tcp_established = True + continue + if arg == '--restore-sibling': + criu.opts.rst_sibling = True + continue + if arg == "--inherit-fd": + inhfd = criu.opts.inherit_fd.add() + key = args.pop(0) + fd, key = key.split(":", 1) + inhfd.fd = int(fd[3:-1]) + inhfd.key = key + continue - raise test_fail_exc('RPC for %s required' % arg) + raise test_fail_exc('RPC for %s required' % arg) - @staticmethod - def run(action, args, criu_bin, fault = None, strace = [], preexec = None, nowait = False): - if fault: - raise test_fail_exc('RPC and FAULT not supported') - if strace: - raise test_fail_exc('RPC and SAT not supported') - if preexec: - raise test_fail_exc('RPC and PREEXEC not supported') + @staticmethod + def run(action, + args, + criu_bin, + fault=None, + strace=[], + preexec=None, + nowait=False): + if fault: + raise test_fail_exc('RPC and FAULT not supported') + if strace: + raise test_fail_exc('RPC and SAT not supported') + if preexec: + raise test_fail_exc('RPC and PREEXEC not supported') - ctx = {} # Object used to keep info untill action is done - criu = crpc.criu() - criu.use_binary(criu_bin) - criu_rpc.__set_opts(criu, args, ctx) - p = None + ctx = {} # Object used to keep info untill action is done + criu = crpc.criu() + criu.use_binary(criu_bin) + criu_rpc.__set_opts(criu, args, ctx) + p = None - try: - if action == 'dump': - criu.dump() - elif action == 'pre-dump': - criu.pre_dump() - elif action == 'restore': - if 'rd' not in ctx: - raise test_fail_exc('RPC Non-detached restore is impossible') + try: + if action == 'dump': + criu.dump() + elif action == 'pre-dump': + criu.pre_dump() + elif action == 'restore': + if 'rd' not in ctx: + raise test_fail_exc( + 'RPC Non-detached restore is impossible') - res = criu.restore() - pidf = ctx.get('pidf') - if pidf: - with open(pidf, 'w') as fd: - fd.write('%d\n' % res.pid) - elif action == "page-server": - res = criu.page_server_chld() - p = criu_rpc_process() - p.pid = res.pid - p.criu = criu - else: - raise test_fail_exc('RPC for %s required' % action) - except crpc.CRIUExceptionExternal as e: - print("Fail", e) - ret = -1 - else: - ret = 0 + res = criu.restore() + pidf = ctx.get('pidf') + if pidf: + with open(pidf, 'w') as fd: + fd.write('%d\n' % res.pid) + elif action == "page-server": + res = criu.page_server_chld() + p = criu_rpc_process() + p.pid = res.pid + p.criu = criu + else: + raise test_fail_exc('RPC for %s required' % action) + except crpc.CRIUExceptionExternal as e: + print("Fail", e) + ret = -1 + else: + ret = 0 - imgd = ctx.get('imgd') - if imgd: - os.close(imgd) + imgd = ctx.get('imgd') + if imgd: + os.close(imgd) - if nowait and ret == 0: - return p + if nowait and ret == 0: + return p - return ret + return ret class criu: - def __init__(self, opts): - self.__test = None - self.__dump_path = None - self.__iter = 0 - self.__prev_dump_iter = None - self.__page_server = bool(opts['page_server']) - self.__remote_lazy_pages = bool(opts['remote_lazy_pages']) - self.__lazy_pages = (self.__remote_lazy_pages or - bool(opts['lazy_pages'])) - self.__lazy_migrate = bool(opts['lazy_migrate']) - self.__restore_sibling = bool(opts['sibling']) - self.__join_ns = bool(opts['join_ns']) - self.__empty_ns = bool(opts['empty_ns']) - self.__fault = opts['fault'] - self.__script = opts['script'] - self.__sat = bool(opts['sat']) - self.__dedup = bool(opts['dedup']) - self.__mdedup = bool(opts['noauto_dedup']) - self.__user = bool(opts['user']) - self.__leave_stopped = bool(opts['stop']) - self.__criu = (opts['rpc'] and criu_rpc or criu_cli) - self.__show_stats = bool(opts['show_stats']) - self.__lazy_pages_p = None - self.__page_server_p = None - self.__dump_process = None - self.__tls = self.__tls_options() if opts['tls'] else [] - self.__criu_bin = opts['criu_bin'] - self.__crit_bin = opts['crit_bin'] + def __init__(self, opts): + self.__test = None + self.__dump_path = None + self.__iter = 0 + self.__prev_dump_iter = None + self.__page_server = bool(opts['page_server']) + self.__remote_lazy_pages = bool(opts['remote_lazy_pages']) + self.__lazy_pages = (self.__remote_lazy_pages or + bool(opts['lazy_pages'])) + self.__lazy_migrate = bool(opts['lazy_migrate']) + self.__restore_sibling = bool(opts['sibling']) + self.__join_ns = bool(opts['join_ns']) + self.__empty_ns = bool(opts['empty_ns']) + self.__fault = opts['fault'] + self.__script = opts['script'] + self.__sat = bool(opts['sat']) + self.__dedup = bool(opts['dedup']) + self.__mdedup = bool(opts['noauto_dedup']) + self.__user = bool(opts['user']) + self.__leave_stopped = bool(opts['stop']) + self.__criu = (opts['rpc'] and criu_rpc or criu_cli) + self.__show_stats = bool(opts['show_stats']) + self.__lazy_pages_p = None + self.__page_server_p = None + self.__dump_process = None + self.__tls = self.__tls_options() if opts['tls'] else [] + self.__criu_bin = opts['criu_bin'] + self.__crit_bin = opts['crit_bin'] - def fini(self): - if self.__lazy_migrate: - ret = self.__dump_process.wait() - if self.__lazy_pages_p: - ret = self.__lazy_pages_p.wait() - grep_errors(os.path.join(self.__ddir(), "lazy-pages.log")) - self.__lazy_pages_p = None - if ret: - raise test_fail_exc("criu lazy-pages exited with %s" % ret) - if self.__page_server_p: - ret = self.__page_server_p.wait() - grep_errors(os.path.join(self.__ddir(), "page-server.log")) - self.__page_server_p = None - if ret: - raise test_fail_exc("criu page-server exited with %s" % ret) - if self.__dump_process: - ret = self.__dump_process.wait() - grep_errors(os.path.join(self.__ddir(), "dump.log")) - self.__dump_process = None - if ret: - raise test_fail_exc("criu dump exited with %s" % ret) - return + def fini(self): + if self.__lazy_migrate: + ret = self.__dump_process.wait() + if self.__lazy_pages_p: + ret = self.__lazy_pages_p.wait() + grep_errors(os.path.join(self.__ddir(), "lazy-pages.log")) + self.__lazy_pages_p = None + if ret: + raise test_fail_exc("criu lazy-pages exited with %s" % ret) + if self.__page_server_p: + ret = self.__page_server_p.wait() + grep_errors(os.path.join(self.__ddir(), "page-server.log")) + self.__page_server_p = None + if ret: + raise test_fail_exc("criu page-server exited with %s" % ret) + if self.__dump_process: + ret = self.__dump_process.wait() + grep_errors(os.path.join(self.__ddir(), "dump.log")) + self.__dump_process = None + if ret: + raise test_fail_exc("criu dump exited with %s" % ret) + return - def logs(self): - return self.__dump_path + def logs(self): + return self.__dump_path - def set_test(self, test): - self.__test = test - self.__dump_path = "dump/" + test.getname() + "/" + test.getpid() - if os.path.exists(self.__dump_path): - for i in range(100): - newpath = self.__dump_path + "." + str(i) - if not os.path.exists(newpath): - os.rename(self.__dump_path, newpath) - break - else: - raise test_fail_exc("couldn't find dump dir %s" % self.__dump_path) + def set_test(self, test): + self.__test = test + self.__dump_path = "dump/" + test.getname() + "/" + test.getpid() + if os.path.exists(self.__dump_path): + for i in range(100): + newpath = self.__dump_path + "." + str(i) + if not os.path.exists(newpath): + os.rename(self.__dump_path, newpath) + break + else: + raise test_fail_exc("couldn't find dump dir %s" % + self.__dump_path) - os.makedirs(self.__dump_path) + os.makedirs(self.__dump_path) - def cleanup(self): - if self.__dump_path: - print("Removing %s" % self.__dump_path) - shutil.rmtree(self.__dump_path) + def cleanup(self): + if self.__dump_path: + print("Removing %s" % self.__dump_path) + shutil.rmtree(self.__dump_path) - def __tls_options(self): - pki_dir = os.path.dirname(os.path.abspath(__file__)) + "/pki" - return ["--tls", "--tls-no-cn-verify", - "--tls-key", pki_dir + "/key.pem", - "--tls-cert", pki_dir + "/cert.pem", - "--tls-cacert", pki_dir + "/cacert.pem"] + def __tls_options(self): + pki_dir = os.path.dirname(os.path.abspath(__file__)) + "/pki" + return [ + "--tls", "--tls-no-cn-verify", "--tls-key", pki_dir + "/key.pem", + "--tls-cert", pki_dir + "/cert.pem", "--tls-cacert", + pki_dir + "/cacert.pem" + ] - def __ddir(self): - return os.path.join(self.__dump_path, "%d" % self.__iter) + def __ddir(self): + return os.path.join(self.__dump_path, "%d" % self.__iter) - def set_user_id(self): - # Numbers should match those in zdtm_test - os.setresgid(58467, 58467, 58467) - os.setresuid(18943, 18943, 18943) + def set_user_id(self): + # Numbers should match those in zdtm_test + os.setresgid(58467, 58467, 58467) + os.setresuid(18943, 18943, 18943) - def __criu_act(self, action, opts = [], log = None, nowait = False): - if not log: - log = action + ".log" + def __criu_act(self, action, opts=[], log=None, nowait=False): + if not log: + log = action + ".log" - s_args = ["-o", log, "-D", self.__ddir(), "-v4"] + opts + s_args = ["-o", log, "-D", self.__ddir(), "-v4"] + opts - with open(os.path.join(self.__ddir(), action + '.cropt'), 'w') as f: - f.write(' '.join(s_args) + '\n') + with open(os.path.join(self.__ddir(), action + '.cropt'), 'w') as f: + f.write(' '.join(s_args) + '\n') - print("Run criu " + action) + print("Run criu " + action) - strace = [] - if self.__sat: - fname = os.path.join(self.__ddir(), action + '.strace') - print_fname(fname, 'strace') - strace = ["strace", "-o", fname, '-T'] - if action == 'restore': - strace += ['-f'] - s_args += ['--action-script', os.getcwd() + '/../scripts/fake-restore.sh'] + strace = [] + if self.__sat: + fname = os.path.join(self.__ddir(), action + '.strace') + print_fname(fname, 'strace') + strace = ["strace", "-o", fname, '-T'] + if action == 'restore': + strace += ['-f'] + s_args += [ + '--action-script', + os.getcwd() + '/../scripts/fake-restore.sh' + ] - if self.__script: - s_args += ['--action-script', self.__script] + if self.__script: + s_args += ['--action-script', self.__script] - if action == "restore": - preexec = None - else: - preexec = self.__user and self.set_user_id or None + if action == "restore": + preexec = None + else: + preexec = self.__user and self.set_user_id or None - __ddir = self.__ddir() + __ddir = self.__ddir() - status_fds = None - if nowait: - status_fds = os.pipe() - fd = status_fds[1] - fdflags = fcntl.fcntl(fd, fcntl.F_GETFD) - fcntl.fcntl(fd, fcntl.F_SETFD, fdflags & ~fcntl.FD_CLOEXEC) - s_args += ["--status-fd", str(fd)] + status_fds = None + if nowait: + status_fds = os.pipe() + fd = status_fds[1] + fdflags = fcntl.fcntl(fd, fcntl.F_GETFD) + fcntl.fcntl(fd, fcntl.F_SETFD, fdflags & ~fcntl.FD_CLOEXEC) + s_args += ["--status-fd", str(fd)] - with open("/proc/sys/kernel/ns_last_pid") as ns_last_pid_fd: - ns_last_pid = ns_last_pid_fd.read() + with open("/proc/sys/kernel/ns_last_pid") as ns_last_pid_fd: + ns_last_pid = ns_last_pid_fd.read() - ret = self.__criu.run(action, s_args, self.__criu_bin, self.__fault, strace, preexec, nowait) + ret = self.__criu.run(action, s_args, self.__criu_bin, self.__fault, + strace, preexec, nowait) - if nowait: - os.close(status_fds[1]) - if os.read(status_fds[0], 1) != b'\0': - ret = ret.wait() - if self.__test.blocking(): - raise test_fail_expected_exc(action) - else: - raise test_fail_exc("criu %s exited with %s" % (action, ret)) - os.close(status_fds[0]) - return ret + if nowait: + os.close(status_fds[1]) + if os.read(status_fds[0], 1) != b'\0': + ret = ret.wait() + if self.__test.blocking(): + raise test_fail_expected_exc(action) + else: + raise test_fail_exc("criu %s exited with %s" % + (action, ret)) + os.close(status_fds[0]) + return ret - grep_errors(os.path.join(__ddir, log)) - if ret != 0: - if self.__fault and int(self.__fault) < 128: - try_run_hook(self.__test, ["--fault", action]) - if action == "dump": - # create a clean directory for images - os.rename(__ddir, __ddir + ".fail") - os.mkdir(__ddir) - os.chmod(__ddir, 0o777) - else: - # on restore we move only a log file, because we need images - os.rename(os.path.join(__ddir, log), os.path.join(__ddir, log + ".fail")) - # restore ns_last_pid to avoid a case when criu gets - # PID of one of restored processes. - with open("/proc/sys/kernel/ns_last_pid", "w+") as fd: - fd.write(ns_last_pid) - # try again without faults - print("Run criu " + action) - ret = self.__criu.run(action, s_args, self.__criu_bin, False, strace, preexec) - grep_errors(os.path.join(__ddir, log)) - if ret == 0: - return - rst_succeeded = os.access(os.path.join(__ddir, "restore-succeeded"), os.F_OK) - if self.__test.blocking() or (self.__sat and action == 'restore' and rst_succeeded): - raise test_fail_expected_exc(action) - else: - raise test_fail_exc("CRIU %s" % action) + grep_errors(os.path.join(__ddir, log)) + if ret != 0: + if self.__fault and int(self.__fault) < 128: + try_run_hook(self.__test, ["--fault", action]) + if action == "dump": + # create a clean directory for images + os.rename(__ddir, __ddir + ".fail") + os.mkdir(__ddir) + os.chmod(__ddir, 0o777) + else: + # on restore we move only a log file, because we need images + os.rename(os.path.join(__ddir, log), + os.path.join(__ddir, log + ".fail")) + # restore ns_last_pid to avoid a case when criu gets + # PID of one of restored processes. + with open("/proc/sys/kernel/ns_last_pid", "w+") as fd: + fd.write(ns_last_pid) + # try again without faults + print("Run criu " + action) + ret = self.__criu.run(action, s_args, self.__criu_bin, False, + strace, preexec) + grep_errors(os.path.join(__ddir, log)) + if ret == 0: + return + rst_succeeded = os.access( + os.path.join(__ddir, "restore-succeeded"), os.F_OK) + if self.__test.blocking() or (self.__sat and action == 'restore' and + rst_succeeded): + raise test_fail_expected_exc(action) + else: + raise test_fail_exc("CRIU %s" % action) - def __stats_file(self, action): - return os.path.join(self.__ddir(), "stats-%s" % action) + def __stats_file(self, action): + return os.path.join(self.__ddir(), "stats-%s" % action) - def show_stats(self, action): - if not self.__show_stats: - return + def show_stats(self, action): + if not self.__show_stats: + return - subprocess.Popen([self.__crit_bin, "show", self.__stats_file(action)]).wait() + subprocess.Popen([self.__crit_bin, "show", + self.__stats_file(action)]).wait() - def check_pages_counts(self): - if not os.access(self.__stats_file("dump"), os.R_OK): - return + def check_pages_counts(self): + if not os.access(self.__stats_file("dump"), os.R_OK): + return - stats_written = -1 - with open(self.__stats_file("dump"), 'rb') as stfile: - stats = crpc.images.load(stfile) - stent = stats['entries'][0]['dump'] - stats_written = int(stent['shpages_written']) + int(stent['pages_written']) + stats_written = -1 + with open(self.__stats_file("dump"), 'rb') as stfile: + stats = crpc.images.load(stfile) + stent = stats['entries'][0]['dump'] + stats_written = int(stent['shpages_written']) + int( + stent['pages_written']) - real_written = 0 - for f in os.listdir(self.__ddir()): - if f.startswith('pages-'): - real_written += os.path.getsize(os.path.join(self.__ddir(), f)) + real_written = 0 + for f in os.listdir(self.__ddir()): + if f.startswith('pages-'): + real_written += os.path.getsize(os.path.join(self.__ddir(), f)) - r_pages = real_written / mmap.PAGESIZE - r_off = real_written % mmap.PAGESIZE - if (stats_written != r_pages) or (r_off != 0): - print("ERROR: bad page counts, stats = %d real = %d(%d)" % (stats_written, r_pages, r_off)) - raise test_fail_exc("page counts mismatch") + r_pages = real_written / mmap.PAGESIZE + r_off = real_written % mmap.PAGESIZE + if (stats_written != r_pages) or (r_off != 0): + print("ERROR: bad page counts, stats = %d real = %d(%d)" % + (stats_written, r_pages, r_off)) + raise test_fail_exc("page counts mismatch") - def dump(self, action, opts = []): - self.__iter += 1 - os.mkdir(self.__ddir()) - os.chmod(self.__ddir(), 0o777) + def dump(self, action, opts=[]): + self.__iter += 1 + os.mkdir(self.__ddir()) + os.chmod(self.__ddir(), 0o777) - a_opts = ["-t", self.__test.getpid()] - if self.__prev_dump_iter: - a_opts += ["--prev-images-dir", "../%d" % self.__prev_dump_iter, "--track-mem"] - self.__prev_dump_iter = self.__iter + a_opts = ["-t", self.__test.getpid()] + if self.__prev_dump_iter: + a_opts += [ + "--prev-images-dir", + "../%d" % self.__prev_dump_iter, "--track-mem" + ] + self.__prev_dump_iter = self.__iter - if self.__page_server: - print("Adding page server") + if self.__page_server: + print("Adding page server") - ps_opts = ["--port", "12345"] + self.__tls - if self.__dedup: - ps_opts += ["--auto-dedup"] + ps_opts = ["--port", "12345"] + self.__tls + if self.__dedup: + ps_opts += ["--auto-dedup"] - self.__page_server_p = self.__criu_act("page-server", opts = ps_opts, nowait = True) - a_opts += ["--page-server", "--address", "127.0.0.1", "--port", "12345"] + self.__tls + self.__page_server_p = self.__criu_act("page-server", + opts=ps_opts, + nowait=True) + a_opts += [ + "--page-server", "--address", "127.0.0.1", "--port", "12345" + ] + self.__tls - a_opts += self.__test.getdopts() + a_opts += self.__test.getdopts() - if self.__dedup: - a_opts += ["--auto-dedup"] + if self.__dedup: + a_opts += ["--auto-dedup"] - a_opts += ["--timeout", "10"] + a_opts += ["--timeout", "10"] - criu_dir = os.path.dirname(os.getcwd()) - if os.getenv("GCOV"): - a_opts.append('--external') - a_opts.append('mnt[%s]:zdtm' % criu_dir) + criu_dir = os.path.dirname(os.getcwd()) + if os.getenv("GCOV"): + a_opts.append('--external') + a_opts.append('mnt[%s]:zdtm' % criu_dir) - if self.__leave_stopped: - a_opts += ['--leave-stopped'] - if self.__empty_ns: - a_opts += ['--empty-ns', 'net'] + if self.__leave_stopped: + a_opts += ['--leave-stopped'] + if self.__empty_ns: + a_opts += ['--empty-ns', 'net'] - nowait = False - if self.__lazy_migrate and action == "dump": - a_opts += ["--lazy-pages", "--port", "12345"] + self.__tls - nowait = True - self.__dump_process = self.__criu_act(action, opts = a_opts + opts, nowait = nowait) - if self.__mdedup and self.__iter > 1: - self.__criu_act("dedup", opts = []) + nowait = False + if self.__lazy_migrate and action == "dump": + a_opts += ["--lazy-pages", "--port", "12345"] + self.__tls + nowait = True + self.__dump_process = self.__criu_act(action, + opts=a_opts + opts, + nowait=nowait) + if self.__mdedup and self.__iter > 1: + self.__criu_act("dedup", opts=[]) - self.show_stats("dump") - self.check_pages_counts() + self.show_stats("dump") + self.check_pages_counts() - if self.__leave_stopped: - pstree_check_stopped(self.__test.getpid()) - pstree_signal(self.__test.getpid(), signal.SIGKILL) + if self.__leave_stopped: + pstree_check_stopped(self.__test.getpid()) + pstree_signal(self.__test.getpid(), signal.SIGKILL) - if self.__page_server_p: - ret = self.__page_server_p.wait() - grep_errors(os.path.join(self.__ddir(), "page-server.log")) - self.__page_server_p = None - if ret: - raise test_fail_exc("criu page-server exited with %d" % ret) + if self.__page_server_p: + ret = self.__page_server_p.wait() + grep_errors(os.path.join(self.__ddir(), "page-server.log")) + self.__page_server_p = None + if ret: + raise test_fail_exc("criu page-server exited with %d" % ret) - def restore(self): - r_opts = [] - if self.__restore_sibling: - r_opts = ["--restore-sibling"] - self.__test.auto_reap = False - r_opts += self.__test.getropts() - if self.__join_ns: - r_opts.append("--join-ns") - r_opts.append("net:%s" % join_ns_file) - if self.__empty_ns: - r_opts += ['--empty-ns', 'net'] - r_opts += ['--action-script', os.getcwd() + '/empty-netns-prep.sh'] + def restore(self): + r_opts = [] + if self.__restore_sibling: + r_opts = ["--restore-sibling"] + self.__test.auto_reap = False + r_opts += self.__test.getropts() + if self.__join_ns: + r_opts.append("--join-ns") + r_opts.append("net:%s" % join_ns_file) + if self.__empty_ns: + r_opts += ['--empty-ns', 'net'] + r_opts += ['--action-script', os.getcwd() + '/empty-netns-prep.sh'] - if self.__dedup: - r_opts += ["--auto-dedup"] + if self.__dedup: + r_opts += ["--auto-dedup"] - if self.__dedup: - r_opts += ["--auto-dedup"] + self.__prev_dump_iter = None + criu_dir = os.path.dirname(os.getcwd()) + if os.getenv("GCOV"): + r_opts.append('--external') + r_opts.append('mnt[zdtm]:%s' % criu_dir) - self.__prev_dump_iter = None - criu_dir = os.path.dirname(os.getcwd()) - if os.getenv("GCOV"): - r_opts.append('--external') - r_opts.append('mnt[zdtm]:%s' % criu_dir) + if self.__lazy_pages or self.__lazy_migrate: + lp_opts = [] + if self.__remote_lazy_pages or self.__lazy_migrate: + lp_opts += [ + "--page-server", "--port", "12345", "--address", + "127.0.0.1" + ] + self.__tls - if self.__lazy_pages or self.__lazy_migrate: - lp_opts = [] - if self.__remote_lazy_pages or self.__lazy_migrate: - lp_opts += ["--page-server", "--port", "12345", - "--address", "127.0.0.1"] + self.__tls + if self.__remote_lazy_pages: + ps_opts = [ + "--pidfile", "ps.pid", "--port", "12345", "--lazy-pages" + ] + self.__tls + self.__page_server_p = self.__criu_act("page-server", + opts=ps_opts, + nowait=True) + self.__lazy_pages_p = self.__criu_act("lazy-pages", + opts=lp_opts, + nowait=True) + r_opts += ["--lazy-pages"] - if self.__remote_lazy_pages: - ps_opts = ["--pidfile", "ps.pid", - "--port", "12345", "--lazy-pages"] + self.__tls - self.__page_server_p = self.__criu_act("page-server", opts = ps_opts, nowait = True) - self.__lazy_pages_p = self.__criu_act("lazy-pages", opts = lp_opts, nowait = True) - r_opts += ["--lazy-pages"] + if self.__leave_stopped: + r_opts += ['--leave-stopped'] - if self.__leave_stopped: - r_opts += ['--leave-stopped'] + self.__criu_act("restore", opts=r_opts + ["--restore-detached"]) + self.show_stats("restore") - self.__criu_act("restore", opts = r_opts + ["--restore-detached"]) - self.show_stats("restore") + if self.__leave_stopped: + pstree_check_stopped(self.__test.getpid()) + pstree_signal(self.__test.getpid(), signal.SIGCONT) - if self.__leave_stopped: - pstree_check_stopped(self.__test.getpid()) - pstree_signal(self.__test.getpid(), signal.SIGCONT) + @staticmethod + def check(feature): + return criu_cli.run( + "check", ["--no-default-config", "-v0", "--feature", feature], + opts['criu_bin']) == 0 - @staticmethod - def check(feature): - return criu_cli.run("check", ["--no-default-config", "-v0", - "--feature", feature], opts['criu_bin']) == 0 + @staticmethod + def available(): + if not os.access(opts['criu_bin'], os.X_OK): + print("CRIU binary not found at %s" % opts['criu_bin']) + sys.exit(1) - @staticmethod - def available(): - if not os.access(opts['criu_bin'], os.X_OK): - print("CRIU binary not found at %s" % opts['criu_bin']) - sys.exit(1) - - def kill(self): - if self.__lazy_pages_p: - self.__lazy_pages_p.terminate() - print("criu lazy-pages exited with %s" % self.__lazy_pages_p.wait()) - grep_errors(os.path.join(self.__ddir(), "lazy-pages.log")) - self.__lazy_pages_p = None - if self.__page_server_p: - self.__page_server_p.terminate() - print("criu page-server exited with %s" % self.__page_server_p.wait()) - grep_errors(os.path.join(self.__ddir(), "page-server.log")) - self.__page_server_p = None - if self.__dump_process: - self.__dump_process.terminate() - print("criu dump exited with %s" % self.__dump_process.wait()) - grep_errors(os.path.join(self.__ddir(), "dump.log")) - self.__dump_process = None + def kill(self): + if self.__lazy_pages_p: + self.__lazy_pages_p.terminate() + print("criu lazy-pages exited with %s" % + self.__lazy_pages_p.wait()) + grep_errors(os.path.join(self.__ddir(), "lazy-pages.log")) + self.__lazy_pages_p = None + if self.__page_server_p: + self.__page_server_p.terminate() + print("criu page-server exited with %s" % + self.__page_server_p.wait()) + grep_errors(os.path.join(self.__ddir(), "page-server.log")) + self.__page_server_p = None + if self.__dump_process: + self.__dump_process.terminate() + print("criu dump exited with %s" % self.__dump_process.wait()) + grep_errors(os.path.join(self.__ddir(), "dump.log")) + self.__dump_process = None def try_run_hook(test, args): - hname = test.getname() + '.hook' - if os.access(hname, os.X_OK): - print("Running %s(%s)" % (hname, ', '.join(args))) - hook = subprocess.Popen([hname] + args) - if hook.wait() != 0: - raise test_fail_exc("hook " + " ".join(args)) + hname = test.getname() + '.hook' + if os.access(hname, os.X_OK): + print("Running %s(%s)" % (hname, ', '.join(args))) + hook = subprocess.Popen([hname] + args) + if hook.wait() != 0: + raise test_fail_exc("hook " + " ".join(args)) # @@ -1290,583 +1375,615 @@ do_sbs = False def init_sbs(): - if sys.stdout.isatty(): - global do_sbs - do_sbs = True - else: - print("Can't do step-by-step in this runtime") + if sys.stdout.isatty(): + global do_sbs + do_sbs = True + else: + print("Can't do step-by-step in this runtime") def sbs(what): - if do_sbs: - input("Pause at %s. Press Enter to continue." % what) + if do_sbs: + input("Pause at %s. Press Enter to continue." % what) # # Main testing entity -- dump (probably with pre-dumps) and restore # def iter_parm(opt, dflt): - x = ((opt or str(dflt)) + ":0").split(':') - return (range(0, int(x[0])), float(x[1])) + x = ((opt or str(dflt)) + ":0").split(':') + return (range(0, int(x[0])), float(x[1])) def cr(cr_api, test, opts): - if opts['nocr']: - return + if opts['nocr']: + return - cr_api.set_test(test) + cr_api.set_test(test) - iters = iter_parm(opts['iters'], 1) - for i in iters[0]: - pres = iter_parm(opts['pre'], 0) - for p in pres[0]: - if opts['snaps']: - cr_api.dump("dump", opts = ["--leave-running", "--track-mem"]) - else: - cr_api.dump("pre-dump") - try_run_hook(test, ["--post-pre-dump"]) - test.pre_dump_notify() - time.sleep(pres[1]) + iters = iter_parm(opts['iters'], 1) + for i in iters[0]: + pres = iter_parm(opts['pre'], 0) + for p in pres[0]: + if opts['snaps']: + cr_api.dump("dump", opts=["--leave-running", "--track-mem"]) + else: + cr_api.dump("pre-dump") + try_run_hook(test, ["--post-pre-dump"]) + test.pre_dump_notify() + time.sleep(pres[1]) - sbs('pre-dump') + sbs('pre-dump') - os.environ["ZDTM_TEST_PID"] = str(test.getpid()) - if opts['norst']: - try_run_hook(test, ["--pre-dump"]) - cr_api.dump("dump", opts = ["--leave-running"]) - else: - try_run_hook(test, ["--pre-dump"]) - cr_api.dump("dump") - if not opts['lazy_migrate']: - test.gone() - else: - test.unlink_pidfile() - sbs('pre-restore') - try_run_hook(test, ["--pre-restore"]) - cr_api.restore() - os.environ["ZDTM_TEST_PID"] = str(test.getpid()) - os.environ["ZDTM_IMG_DIR"] = cr_api.logs() - try_run_hook(test, ["--post-restore"]) - sbs('post-restore') + os.environ["ZDTM_TEST_PID"] = str(test.getpid()) + if opts['norst']: + try_run_hook(test, ["--pre-dump"]) + cr_api.dump("dump", opts=["--leave-running"]) + else: + try_run_hook(test, ["--pre-dump"]) + cr_api.dump("dump") + if not opts['lazy_migrate']: + test.gone() + else: + test.unlink_pidfile() + sbs('pre-restore') + try_run_hook(test, ["--pre-restore"]) + cr_api.restore() + os.environ["ZDTM_TEST_PID"] = str(test.getpid()) + os.environ["ZDTM_IMG_DIR"] = cr_api.logs() + try_run_hook(test, ["--post-restore"]) + sbs('post-restore') - time.sleep(iters[1]) + time.sleep(iters[1]) # Additional checks that can be done outside of test process + def get_visible_state(test): - maps = {} - files = {} - mounts = {} + maps = {} + files = {} + mounts = {} - if not getattr(test, "static", lambda: False)() or \ - not getattr(test, "ns", lambda: False)(): - return ({}, {}, {}) + if not getattr(test, "static", lambda: False)() or \ + not getattr(test, "ns", lambda: False)(): + return ({}, {}, {}) - r = re.compile('^[0-9]+$') - pids = filter(lambda p: r.match(p), os.listdir("/proc/%s/root/proc/" % test.getpid())) - for pid in pids: - files[pid] = set(os.listdir("/proc/%s/root/proc/%s/fd" % (test.getpid(), pid))) + r = re.compile('^[0-9]+$') + pids = filter(lambda p: r.match(p), + os.listdir("/proc/%s/root/proc/" % test.getpid())) + for pid in pids: + files[pid] = set( + os.listdir("/proc/%s/root/proc/%s/fd" % (test.getpid(), pid))) - cmaps = [[0, 0, ""]] - last = 0 - mapsfd = open("/proc/%s/root/proc/%s/maps" % (test.getpid(), pid)) - for mp in mapsfd: - m = list(map(lambda x: int('0x' + x, 0), mp.split()[0].split('-'))) + cmaps = [[0, 0, ""]] + last = 0 + mapsfd = open("/proc/%s/root/proc/%s/maps" % (test.getpid(), pid)) + for mp in mapsfd: + m = list(map(lambda x: int('0x' + x, 0), mp.split()[0].split('-'))) - m.append(mp.split()[1]) + m.append(mp.split()[1]) - f = "/proc/%s/root/proc/%s/map_files/%s" % (test.getpid(), pid, mp.split()[0]) - if os.access(f, os.F_OK): - st = os.lstat(f) - m.append(oct(st.st_mode)) + f = "/proc/%s/root/proc/%s/map_files/%s" % (test.getpid(), pid, + mp.split()[0]) + if os.access(f, os.F_OK): + st = os.lstat(f) + m.append(oct(st.st_mode)) - if cmaps[last][1] == m[0] and cmaps[last][2] == m[2]: - cmaps[last][1] = m[1] - else: - cmaps.append(m) - last += 1 - mapsfd.close() + if cmaps[last][1] == m[0] and cmaps[last][2] == m[2]: + cmaps[last][1] = m[1] + else: + cmaps.append(m) + last += 1 + mapsfd.close() - maps[pid] = set(map(lambda x: '%x-%x %s' % (x[0], x[1], " ".join(x[2:])), cmaps)) + maps[pid] = set( + map(lambda x: '%x-%x %s' % (x[0], x[1], " ".join(x[2:])), cmaps)) - cmounts = [] - try: - r = re.compile(r"^\S+\s\S+\s\S+\s(\S+)\s(\S+)\s(\S+)\s[^-]*?(shared)?[^-]*?(master)?[^-]*?-") - with open("/proc/%s/root/proc/%s/mountinfo" % (test.getpid(), pid)) as mountinfo: - for m in mountinfo: - cmounts.append(r.match(m).groups()) - except IOError as e: - if e.errno != errno.EINVAL: - raise e - mounts[pid] = cmounts - return files, maps, mounts + cmounts = [] + try: + r = re.compile( + r"^\S+\s\S+\s\S+\s(\S+)\s(\S+)\s(\S+)\s[^-]*?(shared)?[^-]*?(master)?[^-]*?-" + ) + with open("/proc/%s/root/proc/%s/mountinfo" % + (test.getpid(), pid)) as mountinfo: + for m in mountinfo: + cmounts.append(r.match(m).groups()) + except IOError as e: + if e.errno != errno.EINVAL: + raise e + mounts[pid] = cmounts + return files, maps, mounts def check_visible_state(test, state, opts): - new = get_visible_state(test) + new = get_visible_state(test) - for pid in state[0].keys(): - fnew = new[0][pid] - fold = state[0][pid] - if fnew != fold: - print("%s: Old files lost: %s" % (pid, fold - fnew)) - print("%s: New files appeared: %s" % (pid, fnew - fold)) - raise test_fail_exc("fds compare") + for pid in state[0].keys(): + fnew = new[0][pid] + fold = state[0][pid] + if fnew != fold: + print("%s: Old files lost: %s" % (pid, fold - fnew)) + print("%s: New files appeared: %s" % (pid, fnew - fold)) + raise test_fail_exc("fds compare") - old_maps = state[1][pid] - new_maps = new[1][pid] - if os.getenv("COMPAT_TEST"): - # the vsyscall vma isn't unmapped from x32 processes - vsyscall = u"ffffffffff600000-ffffffffff601000 r-xp" - if vsyscall in new_maps and vsyscall not in old_maps: - new_maps.remove(vsyscall) - if old_maps != new_maps: - print("%s: Old maps lost: %s" % (pid, old_maps - new_maps)) - print("%s: New maps appeared: %s" % (pid, new_maps - old_maps)) - if not opts['fault']: # skip parasite blob - raise test_fail_exc("maps compare") + old_maps = state[1][pid] + new_maps = new[1][pid] + if os.getenv("COMPAT_TEST"): + # the vsyscall vma isn't unmapped from x32 processes + vsyscall = u"ffffffffff600000-ffffffffff601000 r-xp" + if vsyscall in new_maps and vsyscall not in old_maps: + new_maps.remove(vsyscall) + if old_maps != new_maps: + print("%s: Old maps lost: %s" % (pid, old_maps - new_maps)) + print("%s: New maps appeared: %s" % (pid, new_maps - old_maps)) + if not opts['fault']: # skip parasite blob + raise test_fail_exc("maps compare") - old_mounts = state[2][pid] - new_mounts = new[2][pid] - for i in range(len(old_mounts)): - m = old_mounts.pop(0) - if m in new_mounts: - new_mounts.remove(m) - else: - old_mounts.append(m) - if old_mounts or new_mounts: - print("%s: Old mounts lost: %s" % (pid, old_mounts)) - print("%s: New mounts appeared: %s" % (pid, new_mounts)) - raise test_fail_exc("mounts compare") + old_mounts = state[2][pid] + new_mounts = new[2][pid] + for i in range(len(old_mounts)): + m = old_mounts.pop(0) + if m in new_mounts: + new_mounts.remove(m) + else: + old_mounts.append(m) + if old_mounts or new_mounts: + print("%s: Old mounts lost: %s" % (pid, old_mounts)) + print("%s: New mounts appeared: %s" % (pid, new_mounts)) + raise test_fail_exc("mounts compare") - if '--link-remap' in test.getdopts(): - import glob - link_remap_list = glob.glob(os.path.dirname(test.getname()) + '/link_remap*') - if link_remap_list: - print("%s: link-remap files left: %s" % (test.getname(), link_remap_list)) - raise test_fail_exc("link remaps left") + if '--link-remap' in test.getdopts(): + import glob + link_remap_list = glob.glob( + os.path.dirname(test.getname()) + '/link_remap*') + if link_remap_list: + print("%s: link-remap files left: %s" % + (test.getname(), link_remap_list)) + raise test_fail_exc("link remaps left") class noop_freezer: - def __init__(self): - self.kernel = False + def __init__(self): + self.kernel = False - def attach(self): - pass + def attach(self): + pass - def freeze(self): - pass + def freeze(self): + pass - def thaw(self): - pass + def thaw(self): + pass - def getdopts(self): - return [] + def getdopts(self): + return [] - def getropts(self): - return [] + def getropts(self): + return [] class cg_freezer: - def __init__(self, path, state): - self.__path = '/sys/fs/cgroup/freezer/' + path - self.__state = state - self.kernel = True + def __init__(self, path, state): + self.__path = '/sys/fs/cgroup/freezer/' + path + self.__state = state + self.kernel = True - def attach(self): - if not os.access(self.__path, os.F_OK): - os.makedirs(self.__path) - with open(self.__path + '/tasks', 'w') as f: - f.write('0') + def attach(self): + if not os.access(self.__path, os.F_OK): + os.makedirs(self.__path) + with open(self.__path + '/tasks', 'w') as f: + f.write('0') - def __set_state(self, state): - with open(self.__path + '/freezer.state', 'w') as f: - f.write(state) + def __set_state(self, state): + with open(self.__path + '/freezer.state', 'w') as f: + f.write(state) - def freeze(self): - if self.__state.startswith('f'): - self.__set_state('FROZEN') + def freeze(self): + if self.__state.startswith('f'): + self.__set_state('FROZEN') - def thaw(self): - if self.__state.startswith('f'): - self.__set_state('THAWED') + def thaw(self): + if self.__state.startswith('f'): + self.__set_state('THAWED') - def getdopts(self): - return ['--freeze-cgroup', self.__path, '--manage-cgroups'] + def getdopts(self): + return ['--freeze-cgroup', self.__path, '--manage-cgroups'] - def getropts(self): - return ['--manage-cgroups'] + def getropts(self): + return ['--manage-cgroups'] def get_freezer(desc): - if not desc: - return noop_freezer() + if not desc: + return noop_freezer() - fd = desc.split(':') - fr = cg_freezer(path = fd[0], state = fd[1]) - return fr + fd = desc.split(':') + fr = cg_freezer(path=fd[0], state=fd[1]) + return fr def cmp_ns(ns1, match, ns2, msg): - ns1_ino = os.stat(ns1).st_ino - ns2_ino = os.stat(ns2).st_ino - if eval("%r %s %r" % (ns1_ino, match, ns2_ino)): - print("%s match (%r %s %r) fail" % (msg, ns1_ino, match, ns2_ino)) - raise test_fail_exc("%s compare" % msg) + ns1_ino = os.stat(ns1).st_ino + ns2_ino = os.stat(ns2).st_ino + if eval("%r %s %r" % (ns1_ino, match, ns2_ino)): + print("%s match (%r %s %r) fail" % (msg, ns1_ino, match, ns2_ino)) + raise test_fail_exc("%s compare" % msg) def check_joinns_state(t): - cmp_ns("/proc/%s/ns/net" % t.getpid(), "!=", join_ns_file, "join-ns") + cmp_ns("/proc/%s/ns/net" % t.getpid(), "!=", join_ns_file, "join-ns") def pstree_each_pid(root_pid): - f_children_path = "/proc/{0}/task/{0}/children".format(root_pid) - child_pids = [] - try: - with open(f_children_path, "r") as f_children: - pid_line = f_children.readline().strip(" \n") - if pid_line: - child_pids += pid_line.split(" ") - except Exception as e: - print("Unable to read /proc/*/children: %s" % e) - return # process is dead + f_children_path = "/proc/{0}/task/{0}/children".format(root_pid) + child_pids = [] + try: + with open(f_children_path, "r") as f_children: + pid_line = f_children.readline().strip(" \n") + if pid_line: + child_pids += pid_line.split(" ") + except Exception as e: + print("Unable to read /proc/*/children: %s" % e) + return # process is dead - yield root_pid - for child_pid in child_pids: - for pid in pstree_each_pid(child_pid): - yield pid + yield root_pid + for child_pid in child_pids: + for pid in pstree_each_pid(child_pid): + yield pid def is_proc_stopped(pid): - def get_thread_status(thread_dir): - try: - with open(os.path.join(thread_dir, "status")) as f_status: - for line in f_status.readlines(): - if line.startswith("State:"): - return line.split(":", 1)[1].strip().split(" ")[0] - except Exception as e: - print("Unable to read a thread status: %s" % e) - pass # process is dead - return None + def get_thread_status(thread_dir): + try: + with open(os.path.join(thread_dir, "status")) as f_status: + for line in f_status.readlines(): + if line.startswith("State:"): + return line.split(":", 1)[1].strip().split(" ")[0] + except Exception as e: + print("Unable to read a thread status: %s" % e) + pass # process is dead + return None - def is_thread_stopped(status): - return (status is None) or (status == "T") or (status == "Z") + def is_thread_stopped(status): + return (status is None) or (status == "T") or (status == "Z") - tasks_dir = "/proc/%s/task" % pid - thread_dirs = [] - try: - thread_dirs = os.listdir(tasks_dir) - except Exception as e: - print("Unable to read threads: %s" % e) - pass # process is dead + tasks_dir = "/proc/%s/task" % pid + thread_dirs = [] + try: + thread_dirs = os.listdir(tasks_dir) + except Exception as e: + print("Unable to read threads: %s" % e) + pass # process is dead - for thread_dir in thread_dirs: - thread_status = get_thread_status(os.path.join(tasks_dir, thread_dir)) - if not is_thread_stopped(thread_status): - return False + for thread_dir in thread_dirs: + thread_status = get_thread_status(os.path.join(tasks_dir, thread_dir)) + if not is_thread_stopped(thread_status): + return False - if not is_thread_stopped(get_thread_status("/proc/%s" % pid)): - return False + if not is_thread_stopped(get_thread_status("/proc/%s" % pid)): + return False - return True + return True def pstree_check_stopped(root_pid): - for pid in pstree_each_pid(root_pid): - if not is_proc_stopped(pid): - raise test_fail_exc("CRIU --leave-stopped %s" % pid) + for pid in pstree_each_pid(root_pid): + if not is_proc_stopped(pid): + raise test_fail_exc("CRIU --leave-stopped %s" % pid) def pstree_signal(root_pid, signal): - for pid in pstree_each_pid(root_pid): - try: - os.kill(int(pid), signal) - except Exception as e: - print("Unable to kill %d: %s" % (pid, e)) - pass # process is dead + for pid in pstree_each_pid(root_pid): + try: + os.kill(int(pid), signal) + except Exception as e: + print("Unable to kill %d: %s" % (pid, e)) + pass # process is dead def do_run_test(tname, tdesc, flavs, opts): - tcname = tname.split('/')[0] - tclass = test_classes.get(tcname, None) - if not tclass: - print("Unknown test class %s" % tcname) - return + tcname = tname.split('/')[0] + tclass = test_classes.get(tcname, None) + if not tclass: + print("Unknown test class %s" % tcname) + return - if opts['report']: - init_report(opts['report']) - if opts['sbs']: - init_sbs() + if opts['report']: + init_report(opts['report']) + if opts['sbs']: + init_sbs() - fcg = get_freezer(opts['freezecg']) + fcg = get_freezer(opts['freezecg']) - for f in flavs: - print_sep("Run %s in %s" % (tname, f)) - if opts['dry_run']: - continue - flav = flavors[f](opts) - t = tclass(tname, tdesc, flav, fcg) - cr_api = criu(opts) + for f in flavs: + print_sep("Run %s in %s" % (tname, f)) + if opts['dry_run']: + continue + flav = flavors[f](opts) + t = tclass(tname, tdesc, flav, fcg) + cr_api = criu(opts) - try: - t.start() - s = get_visible_state(t) - try: - cr(cr_api, t, opts) - except test_fail_expected_exc as e: - if e.cr_action == "dump": - t.stop() - else: - check_visible_state(t, s, opts) - if opts['join_ns']: - check_joinns_state(t) - t.stop() - cr_api.fini() - try_run_hook(t, ["--clean"]) - except test_fail_exc as e: - print_sep("Test %s FAIL at %s" % (tname, e.step), '#') - t.print_output() - t.kill() - cr_api.kill() - try_run_hook(t, ["--clean"]) - if cr_api.logs(): - add_to_report(cr_api.logs(), tname.replace('/', '_') + "_" + f + "/images") - if opts['keep_img'] == 'never': - cr_api.cleanup() - # When option --keep-going not specified this exit - # does two things: exits from subprocess and aborts the - # main script execution on the 1st error met - sys.exit(encode_flav(f)) - else: - if opts['keep_img'] != 'always': - cr_api.cleanup() - print_sep("Test %s PASS" % tname) + try: + t.start() + s = get_visible_state(t) + try: + cr(cr_api, t, opts) + except test_fail_expected_exc as e: + if e.cr_action == "dump": + t.stop() + else: + check_visible_state(t, s, opts) + if opts['join_ns']: + check_joinns_state(t) + t.stop() + cr_api.fini() + try_run_hook(t, ["--clean"]) + except test_fail_exc as e: + print_sep("Test %s FAIL at %s" % (tname, e.step), '#') + t.print_output() + t.kill() + cr_api.kill() + try_run_hook(t, ["--clean"]) + if cr_api.logs(): + add_to_report(cr_api.logs(), + tname.replace('/', '_') + "_" + f + "/images") + if opts['keep_img'] == 'never': + cr_api.cleanup() + # When option --keep-going not specified this exit + # does two things: exits from subprocess and aborts the + # main script execution on the 1st error met + sys.exit(encode_flav(f)) + else: + if opts['keep_img'] != 'always': + cr_api.cleanup() + print_sep("Test %s PASS" % tname) class Launcher: - def __init__(self, opts, nr_tests): - self.__opts = opts - self.__total = nr_tests - self.__runtest = 0 - self.__nr = 0 - self.__max = int(opts['parallel'] or 1) - self.__subs = {} - self.__fail = False - self.__file_report = None - self.__junit_file = None - self.__junit_test_cases = None - self.__failed = [] - self.__nr_skip = 0 - if self.__max > 1 and self.__total > 1: - self.__use_log = True - elif opts['report']: - self.__use_log = True - else: - self.__use_log = False + def __init__(self, opts, nr_tests): + self.__opts = opts + self.__total = nr_tests + self.__runtest = 0 + self.__nr = 0 + self.__max = int(opts['parallel'] or 1) + self.__subs = {} + self.__fail = False + self.__file_report = None + self.__junit_file = None + self.__junit_test_cases = None + self.__failed = [] + self.__nr_skip = 0 + if self.__max > 1 and self.__total > 1: + self.__use_log = True + elif opts['report']: + self.__use_log = True + else: + self.__use_log = False - if opts['report'] and (opts['keep_going'] or self.__total == 1): - global TestSuite, TestCase - from junit_xml import TestSuite, TestCase - now = datetime.datetime.now() - att = 0 - reportname = os.path.join(report_dir, "criu-testreport.tap") - junitreport = os.path.join(report_dir, "criu-testreport.xml") - while os.access(reportname, os.F_OK) or os.access(junitreport, os.F_OK): - reportname = os.path.join(report_dir, "criu-testreport" + ".%d.tap" % att) - junitreport = os.path.join(report_dir, "criu-testreport" + ".%d.xml" % att) - att += 1 + if opts['report'] and (opts['keep_going'] or self.__total == 1): + global TestSuite, TestCase + from junit_xml import TestSuite, TestCase + now = datetime.datetime.now() + att = 0 + reportname = os.path.join(report_dir, "criu-testreport.tap") + junitreport = os.path.join(report_dir, "criu-testreport.xml") + while os.access(reportname, os.F_OK) or os.access( + junitreport, os.F_OK): + reportname = os.path.join(report_dir, + "criu-testreport" + ".%d.tap" % att) + junitreport = os.path.join(report_dir, + "criu-testreport" + ".%d.xml" % att) + att += 1 - self.__junit_file = open(junitreport, 'a') - self.__junit_test_cases = [] + self.__junit_file = open(junitreport, 'a') + self.__junit_test_cases = [] - self.__file_report = open(reportname, 'a') - print(u"TAP version 13", file=self.__file_report) - print(u"# Hardware architecture: " + arch, file=self.__file_report) - print(u"# Timestamp: " + now.strftime("%Y-%m-%d %H:%M") + " (GMT+1)", file=self.__file_report) - print(u"# ", file=self.__file_report) - print(u"1.." + str(nr_tests), file=self.__file_report) - with open("/proc/sys/kernel/tainted") as taintfd: - self.__taint = taintfd.read() - if int(self.__taint, 0) != 0: - print("The kernel is tainted: %r" % self.__taint) - if not opts["ignore_taint"]: - raise Exception("The kernel is tainted: %r" % self.__taint) + self.__file_report = open(reportname, 'a') + print(u"TAP version 13", file=self.__file_report) + print(u"# Hardware architecture: " + arch, file=self.__file_report) + print(u"# Timestamp: " + now.strftime("%Y-%m-%d %H:%M") + + " (GMT+1)", + file=self.__file_report) + print(u"# ", file=self.__file_report) + print(u"1.." + str(nr_tests), file=self.__file_report) + with open("/proc/sys/kernel/tainted") as taintfd: + self.__taint = taintfd.read() + if int(self.__taint, 0) != 0: + print("The kernel is tainted: %r" % self.__taint) + if not opts["ignore_taint"]: + raise Exception("The kernel is tainted: %r" % self.__taint) - def __show_progress(self, msg): - perc = int(self.__nr * 16 / self.__total) - print("=== Run %d/%d %s %s" % (self.__nr, self.__total, '=' * perc + '-' * (16 - perc), msg)) + def __show_progress(self, msg): + perc = int(self.__nr * 16 / self.__total) + print("=== Run %d/%d %s %s" % + (self.__nr, self.__total, '=' * perc + '-' * (16 - perc), msg)) - def skip(self, name, reason): - print("Skipping %s (%s)" % (name, reason)) - self.__nr += 1 - self.__runtest += 1 - self.__nr_skip += 1 + def skip(self, name, reason): + print("Skipping %s (%s)" % (name, reason)) + self.__nr += 1 + self.__runtest += 1 + self.__nr_skip += 1 - if self.__junit_test_cases is not None: - tc = TestCase(name) - tc.add_skipped_info(reason) - self.__junit_test_cases.append(tc) - if self.__file_report: - testline = u"ok %d - %s # SKIP %s" % (self.__runtest, name, reason) - print(testline, file=self.__file_report) + if self.__junit_test_cases is not None: + tc = TestCase(name) + tc.add_skipped_info(reason) + self.__junit_test_cases.append(tc) + if self.__file_report: + testline = u"ok %d - %s # SKIP %s" % (self.__runtest, name, reason) + print(testline, file=self.__file_report) - def run_test(self, name, desc, flavor): + def run_test(self, name, desc, flavor): - if len(self.__subs) >= self.__max: - self.wait() + if len(self.__subs) >= self.__max: + self.wait() - with open("/proc/sys/kernel/tainted") as taintfd: - taint = taintfd.read() - if self.__taint != taint: - raise Exception("The kernel is tainted: %r (%r)" % (taint, self.__taint)) + with open("/proc/sys/kernel/tainted") as taintfd: + taint = taintfd.read() + if self.__taint != taint: + raise Exception("The kernel is tainted: %r (%r)" % + (taint, self.__taint)) - if test_flag(desc, 'excl'): - self.wait_all() + if test_flag(desc, 'excl'): + self.wait_all() - self.__nr += 1 - self.__show_progress(name) + self.__nr += 1 + self.__show_progress(name) - nd = ('nocr', 'norst', 'pre', 'iters', 'page_server', 'sibling', 'stop', 'empty_ns', - 'fault', 'keep_img', 'report', 'snaps', 'sat', 'script', 'rpc', 'lazy_pages', - 'join_ns', 'dedup', 'sbs', 'freezecg', 'user', 'dry_run', 'noauto_dedup', - 'remote_lazy_pages', 'show_stats', 'lazy_migrate', 'tls', - 'criu_bin', 'crit_bin') - arg = repr((name, desc, flavor, {d: self.__opts[d] for d in nd})) + nd = ('nocr', 'norst', 'pre', 'iters', 'page_server', 'sibling', + 'stop', 'empty_ns', 'fault', 'keep_img', 'report', 'snaps', + 'sat', 'script', 'rpc', 'lazy_pages', 'join_ns', 'dedup', 'sbs', + 'freezecg', 'user', 'dry_run', 'noauto_dedup', + 'remote_lazy_pages', 'show_stats', 'lazy_migrate', + 'tls', 'criu_bin', 'crit_bin') + arg = repr((name, desc, flavor, {d: self.__opts[d] for d in nd})) - if self.__use_log: - logf = name.replace('/', '_') + ".log" - log = open(logf, "w") - else: - logf = None - log = None + if self.__use_log: + logf = name.replace('/', '_') + ".log" + log = open(logf, "w") + else: + logf = None + log = None - sub = subprocess.Popen(["./zdtm_ct", "zdtm.py"], - env = dict(os.environ, CR_CT_TEST_INFO = arg), - stdout = log, stderr = subprocess.STDOUT, close_fds = True) - self.__subs[sub.pid] = {'sub': sub, 'log': logf, 'name': name, "start": time.time()} + sub = subprocess.Popen(["./zdtm_ct", "zdtm.py"], + env=dict(os.environ, CR_CT_TEST_INFO=arg), + stdout=log, + stderr=subprocess.STDOUT, + close_fds=True) + self.__subs[sub.pid] = { + 'sub': sub, + 'log': logf, + 'name': name, + "start": time.time() + } - if test_flag(desc, 'excl'): - self.wait() + if test_flag(desc, 'excl'): + self.wait() - def __wait_one(self, flags): - pid = -1 - status = -1 - signal.alarm(10) - while True: - try: - pid, status = os.waitpid(0, flags) - except OSError as e: - if e.errno == errno.EINTR: - subprocess.Popen(["ps", "axf"]).wait() - continue - signal.alarm(0) - raise e - else: - break - signal.alarm(0) + def __wait_one(self, flags): + pid = -1 + status = -1 + signal.alarm(10) + while True: + try: + pid, status = os.waitpid(0, flags) + except OSError as e: + if e.errno == errno.EINTR: + subprocess.Popen(["ps", "axf"]).wait() + continue + signal.alarm(0) + raise e + else: + break + signal.alarm(0) - self.__runtest += 1 - if pid != 0: - sub = self.__subs.pop(pid) - tc = None - if self.__junit_test_cases is not None: - tc = TestCase(sub['name'], elapsed_sec=time.time() - sub['start']) - self.__junit_test_cases.append(tc) - if status != 0: - self.__fail = True - failed_flavor = decode_flav(os.WEXITSTATUS(status)) - self.__failed.append([sub['name'], failed_flavor]) - if self.__file_report: - testline = u"not ok %d - %s # flavor %s" % (self.__runtest, sub['name'], failed_flavor) - with open(sub['log']) as sublog: - output = sublog.read() - details = {'output': output} - tc.add_error_info(output = output) - print(testline, file=self.__file_report) - print("%s" % yaml.safe_dump(details, explicit_start=True, - explicit_end=True, default_style='|'), file=self.__file_report) - if sub['log']: - add_to_output(sub['log']) - else: - if self.__file_report: - testline = u"ok %d - %s" % (self.__runtest, sub['name']) - print(testline, file=self.__file_report) + self.__runtest += 1 + if pid != 0: + sub = self.__subs.pop(pid) + tc = None + if self.__junit_test_cases is not None: + tc = TestCase(sub['name'], + elapsed_sec=time.time() - sub['start']) + self.__junit_test_cases.append(tc) + if status != 0: + self.__fail = True + failed_flavor = decode_flav(os.WEXITSTATUS(status)) + self.__failed.append([sub['name'], failed_flavor]) + if self.__file_report: + testline = u"not ok %d - %s # flavor %s" % ( + self.__runtest, sub['name'], failed_flavor) + with open(sub['log']) as sublog: + output = sublog.read() + details = {'output': output} + tc.add_error_info(output=output) + print(testline, file=self.__file_report) + print("%s" % yaml.safe_dump(details, + explicit_start=True, + explicit_end=True, + default_style='|'), + file=self.__file_report) + if sub['log']: + add_to_output(sub['log']) + else: + if self.__file_report: + testline = u"ok %d - %s" % (self.__runtest, sub['name']) + print(testline, file=self.__file_report) - if sub['log']: - with open(sub['log']) as sublog: - print("%s" % sublog.read().encode('ascii', 'ignore').decode('utf-8')) - os.unlink(sub['log']) + if sub['log']: + with open(sub['log']) as sublog: + print("%s" % sublog.read().encode( + 'ascii', 'ignore').decode('utf-8')) + os.unlink(sub['log']) - return True + return True - return False + return False - def __wait_all(self): - while self.__subs: - self.__wait_one(0) + def __wait_all(self): + while self.__subs: + self.__wait_one(0) - def wait(self): - self.__wait_one(0) - while self.__subs: - if not self.__wait_one(os.WNOHANG): - break - if self.__fail and not opts['keep_going']: - raise test_fail_exc('') + def wait(self): + self.__wait_one(0) + while self.__subs: + if not self.__wait_one(os.WNOHANG): + break + if self.__fail and not opts['keep_going']: + raise test_fail_exc('') - def wait_all(self): - self.__wait_all() - if self.__fail and not opts['keep_going']: - raise test_fail_exc('') + def wait_all(self): + self.__wait_all() + if self.__fail and not opts['keep_going']: + raise test_fail_exc('') - def finish(self): - self.__wait_all() - if not opts['fault'] and check_core_files(): - self.__fail = True - if self.__file_report: - ts = TestSuite(opts['title'], self.__junit_test_cases, os.getenv("NODE_NAME")) - self.__junit_file.write(TestSuite.to_xml_string([ts])) - self.__junit_file.close() - self.__file_report.close() + def finish(self): + self.__wait_all() + if not opts['fault'] and check_core_files(): + self.__fail = True + if self.__file_report: + ts = TestSuite(opts['title'], self.__junit_test_cases, + os.getenv("NODE_NAME")) + self.__junit_file.write(TestSuite.to_xml_string([ts])) + self.__junit_file.close() + self.__file_report.close() - if opts['keep_going']: - if self.__fail: - print_sep("%d TEST(S) FAILED (TOTAL %d/SKIPPED %d)" - % (len(self.__failed), self.__total, self.__nr_skip), "#") - for failed in self.__failed: - print(" * %s(%s)" % (failed[0], failed[1])) - else: - print_sep("ALL TEST(S) PASSED (TOTAL %d/SKIPPED %d)" - % (self.__total, self.__nr_skip), "#") + if opts['keep_going']: + if self.__fail: + print_sep( + "%d TEST(S) FAILED (TOTAL %d/SKIPPED %d)" % + (len(self.__failed), self.__total, self.__nr_skip), "#") + for failed in self.__failed: + print(" * %s(%s)" % (failed[0], failed[1])) + else: + print_sep( + "ALL TEST(S) PASSED (TOTAL %d/SKIPPED %d)" % + (self.__total, self.__nr_skip), "#") - if self.__fail: - print_sep("FAIL", "#") - sys.exit(1) + if self.__fail: + print_sep("FAIL", "#") + sys.exit(1) def all_tests(opts): - with open(opts['set'] + '.desc') as fd: - desc = eval(fd.read()) + with open(opts['set'] + '.desc') as fd: + desc = eval(fd.read()) - files = [] - mask = stat.S_IFREG | stat.S_IXUSR - for d in os.walk(desc['dir']): - for f in d[2]: - fp = os.path.join(d[0], f) - st = os.lstat(fp) - if (st.st_mode & mask) != mask: - continue - if stat.S_IFMT(st.st_mode) in [stat.S_IFLNK, stat.S_IFSOCK]: - continue - files.append(fp) - excl = list(map(lambda x: os.path.join(desc['dir'], x), desc['exclude'])) - tlist = filter(lambda x: - not x.endswith('.checkskip') and - not x.endswith('.hook') and - x not in excl, - map(lambda x: x.strip(), files) - ) - return tlist + files = [] + mask = stat.S_IFREG | stat.S_IXUSR + for d in os.walk(desc['dir']): + for f in d[2]: + fp = os.path.join(d[0], f) + st = os.lstat(fp) + if (st.st_mode & mask) != mask: + continue + if stat.S_IFMT(st.st_mode) in [stat.S_IFLNK, stat.S_IFSOCK]: + continue + files.append(fp) + excl = list(map(lambda x: os.path.join(desc['dir'], x), desc['exclude'])) + tlist = filter( + lambda x: not x.endswith('.checkskip') and not x.endswith('.hook') and + x not in excl, map(lambda x: x.strip(), files)) + return tlist # Descriptor for abstract test not in list @@ -1874,355 +1991,363 @@ default_test = {} def get_test_desc(tname): - d_path = tname + '.desc' - if os.access(d_path, os.F_OK) and os.path.getsize(d_path) > 0: - with open(d_path) as fd: - return eval(fd.read()) + d_path = tname + '.desc' + if os.access(d_path, os.F_OK) and os.path.getsize(d_path) > 0: + with open(d_path) as fd: + return eval(fd.read()) - return default_test + return default_test def self_checkskip(tname): - chs = tname + '.checkskip' - if os.access(chs, os.X_OK): - ch = subprocess.Popen([chs]) - return not ch.wait() == 0 + chs = tname + '.checkskip' + if os.access(chs, os.X_OK): + ch = subprocess.Popen([chs]) + return not ch.wait() == 0 - return False + return False def print_fname(fname, typ): - print("=[%s]=> %s" % (typ, fname)) + print("=[%s]=> %s" % (typ, fname)) -def print_sep(title, sep = "=", width = 80): - print((" " + title + " ").center(width, sep)) +def print_sep(title, sep="=", width=80): + print((" " + title + " ").center(width, sep)) def print_error(line): - line = line.rstrip() - print(line) - if line.endswith('>'): # combine pie output - return True - return False + line = line.rstrip() + print(line) + if line.endswith('>'): # combine pie output + return True + return False def grep_errors(fname): - first = True - print_next = False - before = [] - with open(fname) as fd: - for l in fd: - before.append(l) - if len(before) > 5: - before.pop(0) - if "Error" in l or "Warn" in l: - if first: - print_fname(fname, 'log') - print_sep("grep Error", "-", 60) - first = False - for i in before: - print_next = print_error(i) - before = [] - else: - if print_next: - print_next = print_error(l) - before = [] - if not first: - print_sep("ERROR OVER", "-", 60) + first = True + print_next = False + before = [] + with open(fname) as fd: + for l in fd: + before.append(l) + if len(before) > 5: + before.pop(0) + if "Error" in l or "Warn" in l: + if first: + print_fname(fname, 'log') + print_sep("grep Error", "-", 60) + first = False + for i in before: + print_next = print_error(i) + before = [] + else: + if print_next: + print_next = print_error(l) + before = [] + if not first: + print_sep("ERROR OVER", "-", 60) def run_tests(opts): - excl = None - features = {} + excl = None + features = {} - if opts['pre'] or opts['snaps']: - if not criu.check("mem_dirty_track"): - print("Tracking memory is not available") - return + if opts['pre'] or opts['snaps']: + if not criu.check("mem_dirty_track"): + print("Tracking memory is not available") + return - if opts['all']: - torun = all_tests(opts) - run_all = True - elif opts['tests']: - r = re.compile(opts['tests']) - torun = filter(lambda x: r.match(x), all_tests(opts)) - run_all = True - elif opts['test']: - torun = opts['test'] - run_all = False - elif opts['from']: - if not os.access(opts['from'], os.R_OK): - print("No such file") - return + if opts['all']: + torun = all_tests(opts) + run_all = True + elif opts['tests']: + r = re.compile(opts['tests']) + torun = filter(lambda x: r.match(x), all_tests(opts)) + run_all = True + elif opts['test']: + torun = opts['test'] + run_all = False + elif opts['from']: + if not os.access(opts['from'], os.R_OK): + print("No such file") + return - with open(opts['from']) as fd: - torun = map(lambda x: x.strip(), fd) - opts['keep_going'] = False - run_all = True - else: - print("Specify test with -t or -a") - return + with open(opts['from']) as fd: + torun = map(lambda x: x.strip(), fd) + opts['keep_going'] = False + run_all = True + else: + print("Specify test with -t or -a") + return - torun = list(torun) - if opts['keep_going'] and len(torun) < 2: - print("[WARNING] Option --keep-going is more useful when running multiple tests") - opts['keep_going'] = False + torun = list(torun) + if opts['keep_going'] and len(torun) < 2: + print( + "[WARNING] Option --keep-going is more useful when running multiple tests" + ) + opts['keep_going'] = False - if opts['exclude']: - excl = re.compile(".*(" + "|".join(opts['exclude']) + ")") - print("Compiled exclusion list") + if opts['exclude']: + excl = re.compile(".*(" + "|".join(opts['exclude']) + ")") + print("Compiled exclusion list") - if opts['report']: - init_report(opts['report']) + if opts['report']: + init_report(opts['report']) - if opts['parallel'] and opts['freezecg']: - print("Parallel launch with freezer not supported") - opts['parallel'] = None + if opts['parallel'] and opts['freezecg']: + print("Parallel launch with freezer not supported") + opts['parallel'] = None - if opts['join_ns']: - if subprocess.Popen(["ip", "netns", "add", "zdtm_netns"]).wait(): - raise Exception("Unable to create a network namespace") - if subprocess.Popen(["ip", "netns", "exec", "zdtm_netns", "ip", "link", "set", "up", "dev", "lo"]).wait(): - raise Exception("ip link set up dev lo") + if opts['join_ns']: + if subprocess.Popen(["ip", "netns", "add", "zdtm_netns"]).wait(): + raise Exception("Unable to create a network namespace") + if subprocess.Popen([ + "ip", "netns", "exec", "zdtm_netns", "ip", "link", "set", "up", + "dev", "lo" + ]).wait(): + raise Exception("ip link set up dev lo") - if opts['lazy_pages'] or opts['remote_lazy_pages'] or opts['lazy_migrate']: - uffd = criu.check("uffd") - uffd_noncoop = criu.check("uffd-noncoop") - if not uffd: - raise Exception("UFFD is not supported, cannot run with --lazy-pages") - if not uffd_noncoop: - # Most tests will work with 4.3 - 4.11 - print("[WARNING] Non-cooperative UFFD is missing, some tests might spuriously fail") + if opts['lazy_pages'] or opts['remote_lazy_pages'] or opts['lazy_migrate']: + uffd = criu.check("uffd") + uffd_noncoop = criu.check("uffd-noncoop") + if not uffd: + raise Exception( + "UFFD is not supported, cannot run with --lazy-pages") + if not uffd_noncoop: + # Most tests will work with 4.3 - 4.11 + print( + "[WARNING] Non-cooperative UFFD is missing, some tests might spuriously fail" + ) - launcher = Launcher(opts, len(torun)) - try: - for t in torun: - global arch + launcher = Launcher(opts, len(torun)) + try: + for t in torun: + global arch - if excl and excl.match(t): - launcher.skip(t, "exclude") - continue + if excl and excl.match(t): + launcher.skip(t, "exclude") + continue - tdesc = get_test_desc(t) - if tdesc.get('arch', arch) != arch: - launcher.skip(t, "arch %s" % tdesc['arch']) - continue + tdesc = get_test_desc(t) + if tdesc.get('arch', arch) != arch: + launcher.skip(t, "arch %s" % tdesc['arch']) + continue - if test_flag(tdesc, 'reqrst') and opts['norst']: - launcher.skip(t, "restore stage is required") - continue + if test_flag(tdesc, 'reqrst') and opts['norst']: + launcher.skip(t, "restore stage is required") + continue - if run_all and test_flag(tdesc, 'noauto'): - launcher.skip(t, "manual run only") - continue + if run_all and test_flag(tdesc, 'noauto'): + launcher.skip(t, "manual run only") + continue - feat_list = tdesc.get('feature', "") - for feat in feat_list.split(): - if feat not in features: - print("Checking feature %s" % feat) - features[feat] = criu.check(feat) + feat_list = tdesc.get('feature', "") + for feat in feat_list.split(): + if feat not in features: + print("Checking feature %s" % feat) + features[feat] = criu.check(feat) - if not features[feat]: - launcher.skip(t, "no %s feature" % feat) - feat_list = None - break - if feat_list is None: - continue + if not features[feat]: + launcher.skip(t, "no %s feature" % feat) + feat_list = None + break + if feat_list is None: + continue - if self_checkskip(t): - launcher.skip(t, "checkskip failed") - continue + if self_checkskip(t): + launcher.skip(t, "checkskip failed") + continue - if opts['user']: - if test_flag(tdesc, 'suid'): - launcher.skip(t, "suid test in user mode") - continue - if test_flag(tdesc, 'nouser'): - launcher.skip(t, "criu root prio needed") - continue + if opts['user']: + if test_flag(tdesc, 'suid'): + launcher.skip(t, "suid test in user mode") + continue + if test_flag(tdesc, 'nouser'): + launcher.skip(t, "criu root prio needed") + continue - if opts['join_ns']: - if test_flag(tdesc, 'samens'): - launcher.skip(t, "samens test in the same namespace") - continue + if opts['join_ns']: + if test_flag(tdesc, 'samens'): + launcher.skip(t, "samens test in the same namespace") + continue - if opts['lazy_pages'] or opts['remote_lazy_pages'] or opts['lazy_migrate']: - if test_flag(tdesc, 'nolazy'): - launcher.skip(t, "lazy pages are not supported") - continue + if opts['lazy_pages'] or opts['remote_lazy_pages'] or opts[ + 'lazy_migrate']: + if test_flag(tdesc, 'nolazy'): + launcher.skip(t, "lazy pages are not supported") + continue - if opts['remote_lazy_pages']: - if test_flag(tdesc, 'noremotelazy'): - launcher.skip(t, "remote lazy pages are not supported") - continue + if opts['remote_lazy_pages']: + if test_flag(tdesc, 'noremotelazy'): + launcher.skip(t, "remote lazy pages are not supported") + continue - test_flavs = tdesc.get('flavor', 'h ns uns').split() - opts_flavs = (opts['flavor'] or 'h,ns,uns').split(',') - if opts_flavs != ['best']: - run_flavs = set(test_flavs) & set(opts_flavs) - else: - run_flavs = set([test_flavs.pop()]) - if not criu.check("userns"): - run_flavs -= set(['uns']) - if opts['user']: - # FIXME -- probably uns will make sense - run_flavs -= set(['ns', 'uns']) + test_flavs = tdesc.get('flavor', 'h ns uns').split() + opts_flavs = (opts['flavor'] or 'h,ns,uns').split(',') + if opts_flavs != ['best']: + run_flavs = set(test_flavs) & set(opts_flavs) + else: + run_flavs = set([test_flavs.pop()]) + if not criu.check("userns"): + run_flavs -= set(['uns']) + if opts['user']: + # FIXME -- probably uns will make sense + run_flavs -= set(['ns', 'uns']) - # remove ns and uns flavor in join_ns - if opts['join_ns']: - run_flavs -= set(['ns', 'uns']) - if opts['empty_ns']: - run_flavs -= set(['h']) + # remove ns and uns flavor in join_ns + if opts['join_ns']: + run_flavs -= set(['ns', 'uns']) + if opts['empty_ns']: + run_flavs -= set(['h']) - if run_flavs: - launcher.run_test(t, tdesc, run_flavs) - else: - launcher.skip(t, "no flavors") - finally: - launcher.finish() - if opts['join_ns']: - subprocess.Popen(["ip", "netns", "delete", "zdtm_netns"]).wait() + if run_flavs: + launcher.run_test(t, tdesc, run_flavs) + else: + launcher.skip(t, "no flavors") + finally: + launcher.finish() + if opts['join_ns']: + subprocess.Popen(["ip", "netns", "delete", "zdtm_netns"]).wait() sti_fmt = "%-40s%-10s%s" def show_test_info(t): - tdesc = get_test_desc(t) - flavs = tdesc.get('flavor', '') - return sti_fmt % (t, flavs, tdesc.get('flags', '')) + tdesc = get_test_desc(t) + flavs = tdesc.get('flavor', '') + return sti_fmt % (t, flavs, tdesc.get('flags', '')) def list_tests(opts): - tlist = all_tests(opts) - if opts['info']: - print(sti_fmt % ('Name', 'Flavors', 'Flags')) - tlist = map(lambda x: show_test_info(x), tlist) - print('\n'.join(tlist)) + tlist = all_tests(opts) + if opts['info']: + print(sti_fmt % ('Name', 'Flavors', 'Flags')) + tlist = map(lambda x: show_test_info(x), tlist) + print('\n'.join(tlist)) class group: - def __init__(self, tname, tdesc): - self.__tests = [tname] - self.__desc = tdesc - self.__deps = set() + def __init__(self, tname, tdesc): + self.__tests = [tname] + self.__desc = tdesc + self.__deps = set() - def __is_mergeable_desc(self, desc): - # For now make it full match - if self.__desc.get('flags') != desc.get('flags'): - return False - if self.__desc.get('flavor') != desc.get('flavor'): - return False - if self.__desc.get('arch') != desc.get('arch'): - return False - if self.__desc.get('opts') != desc.get('opts'): - return False - if self.__desc.get('feature') != desc.get('feature'): - return False - return True + def __is_mergeable_desc(self, desc): + # For now make it full match + if self.__desc.get('flags') != desc.get('flags'): + return False + if self.__desc.get('flavor') != desc.get('flavor'): + return False + if self.__desc.get('arch') != desc.get('arch'): + return False + if self.__desc.get('opts') != desc.get('opts'): + return False + if self.__desc.get('feature') != desc.get('feature'): + return False + return True - def merge(self, tname, tdesc): - if not self.__is_mergeable_desc(tdesc): - return False + def merge(self, tname, tdesc): + if not self.__is_mergeable_desc(tdesc): + return False - self.__deps |= set(tdesc.get('deps', [])) - self.__tests.append(tname) - return True + self.__deps |= set(tdesc.get('deps', [])) + self.__tests.append(tname) + return True - def size(self): - return len(self.__tests) + def size(self): + return len(self.__tests) - # common method to write a "meta" auxiliary script (hook/checkskip) - # which will call all tests' scripts in turn - def __dump_meta(self, fname, ext): - scripts = filter(lambda names: os.access(names[1], os.X_OK), - map(lambda test: (test, test + ext), - self.__tests)) - if scripts: - f = open(fname + ext, "w") - f.write("#!/bin/sh -e\n") + # common method to write a "meta" auxiliary script (hook/checkskip) + # which will call all tests' scripts in turn + def __dump_meta(self, fname, ext): + scripts = filter(lambda names: os.access(names[1], os.X_OK), + map(lambda test: (test, test + ext), self.__tests)) + if scripts: + f = open(fname + ext, "w") + f.write("#!/bin/sh -e\n") - for test, script in scripts: - f.write("echo 'Running %s for %s'\n" % (ext, test)) - f.write('%s "$@"\n' % script) + for test, script in scripts: + f.write("echo 'Running %s for %s'\n" % (ext, test)) + f.write('%s "$@"\n' % script) - f.write("echo 'All %s scripts OK'\n" % ext) - f.close() - os.chmod(fname + ext, 0o700) + f.write("echo 'All %s scripts OK'\n" % ext) + f.close() + os.chmod(fname + ext, 0o700) - def dump(self, fname): - f = open(fname, "w") - for t in self.__tests: - f.write(t + '\n') - f.close() - os.chmod(fname, 0o700) + def dump(self, fname): + f = open(fname, "w") + for t in self.__tests: + f.write(t + '\n') + f.close() + os.chmod(fname, 0o700) - if len(self.__desc) or len(self.__deps): - f = open(fname + '.desc', "w") - if len(self.__deps): - self.__desc['deps'] = list(self.__deps) - f.write(repr(self.__desc)) - f.close() + if len(self.__desc) or len(self.__deps): + f = open(fname + '.desc', "w") + if len(self.__deps): + self.__desc['deps'] = list(self.__deps) + f.write(repr(self.__desc)) + f.close() - # write "meta" .checkskip and .hook scripts - self.__dump_meta(fname, '.checkskip') - self.__dump_meta(fname, '.hook') + # write "meta" .checkskip and .hook scripts + self.__dump_meta(fname, '.checkskip') + self.__dump_meta(fname, '.hook') def group_tests(opts): - excl = None - groups = [] - pend_groups = [] - maxs = int(opts['max_size']) + excl = None + groups = [] + pend_groups = [] + maxs = int(opts['max_size']) - if not os.access("groups", os.F_OK): - os.mkdir("groups") + if not os.access("groups", os.F_OK): + os.mkdir("groups") - tlist = all_tests(opts) - random.shuffle(tlist) - if opts['exclude']: - excl = re.compile(".*(" + "|".join(opts['exclude']) + ")") - print("Compiled exclusion list") + tlist = all_tests(opts) + random.shuffle(tlist) + if opts['exclude']: + excl = re.compile(".*(" + "|".join(opts['exclude']) + ")") + print("Compiled exclusion list") - for t in tlist: - if excl and excl.match(t): - continue + for t in tlist: + if excl and excl.match(t): + continue - td = get_test_desc(t) + td = get_test_desc(t) - for g in pend_groups: - if g.merge(t, td): - if g.size() == maxs: - pend_groups.remove(g) - groups.append(g) - break - else: - g = group(t, td) - pend_groups.append(g) + for g in pend_groups: + if g.merge(t, td): + if g.size() == maxs: + pend_groups.remove(g) + groups.append(g) + break + else: + g = group(t, td) + pend_groups.append(g) - groups += pend_groups + groups += pend_groups - nr = 0 - suf = opts['name'] or 'group' + nr = 0 + suf = opts['name'] or 'group' - for g in groups: - if maxs > 1 and g.size() == 1: # Not much point in group test for this - continue + for g in groups: + if maxs > 1 and g.size() == 1: # Not much point in group test for this + continue - fn = os.path.join("groups", "%s.%d" % (suf, nr)) - g.dump(fn) - nr += 1 + fn = os.path.join("groups", "%s.%d" % (suf, nr)) + g.dump(fn) + nr += 1 - print("Generated %d group(s)" % nr) + print("Generated %d group(s)" % nr) def clean_stuff(opts): - print("Cleaning %s" % opts['what']) - if opts['what'] == 'nsroot': - for f in flavors: - f = flavors[f] - f.clean() + print("Cleaning %s" % opts['what']) + if opts['what'] == 'nsroot': + for f in flavors: + f = flavors[f] + f.clean() # @@ -2230,103 +2355,167 @@ def clean_stuff(opts): # if 'CR_CT_TEST_INFO' in os.environ: - # Fork here, since we're new pidns init and are supposed to - # collect this namespace's zombies - status = 0 - pid = os.fork() - if pid == 0: - tinfo = eval(os.environ['CR_CT_TEST_INFO']) - do_run_test(tinfo[0], tinfo[1], tinfo[2], tinfo[3]) - else: - while True: - wpid, status = os.wait() - if wpid == pid: - if os.WIFEXITED(status): - status = os.WEXITSTATUS(status) - else: - status = 1 - break + # Fork here, since we're new pidns init and are supposed to + # collect this namespace's zombies + status = 0 + pid = os.fork() + if pid == 0: + tinfo = eval(os.environ['CR_CT_TEST_INFO']) + do_run_test(tinfo[0], tinfo[1], tinfo[2], tinfo[3]) + else: + while True: + wpid, status = os.wait() + if wpid == pid: + if os.WIFEXITED(status): + status = os.WEXITSTATUS(status) + else: + status = 1 + break - sys.exit(status) + sys.exit(status) p = argparse.ArgumentParser("CRIU test suite") -p.add_argument("--debug", help = "Print what's being executed", action = 'store_true') -p.add_argument("--set", help = "Which set of tests to use", default = 'zdtm') +p.add_argument("--debug", + help="Print what's being executed", + action='store_true') +p.add_argument("--set", help="Which set of tests to use", default='zdtm') -sp = p.add_subparsers(help = "Use --help for list of actions") +sp = p.add_subparsers(help="Use --help for list of actions") -rp = sp.add_parser("run", help = "Run test(s)") -rp.set_defaults(action = run_tests) -rp.add_argument("-a", "--all", action = 'store_true') -rp.add_argument("-t", "--test", help = "Test name", action = 'append') -rp.add_argument("-T", "--tests", help = "Regexp") -rp.add_argument("-F", "--from", help = "From file") -rp.add_argument("-f", "--flavor", help = "Flavor to run") -rp.add_argument("-x", "--exclude", help = "Exclude tests from --all run", action = 'append') +rp = sp.add_parser("run", help="Run test(s)") +rp.set_defaults(action=run_tests) +rp.add_argument("-a", "--all", action='store_true') +rp.add_argument("-t", "--test", help="Test name", action='append') +rp.add_argument("-T", "--tests", help="Regexp") +rp.add_argument("-F", "--from", help="From file") +rp.add_argument("-f", "--flavor", help="Flavor to run") +rp.add_argument("-x", + "--exclude", + help="Exclude tests from --all run", + action='append') -rp.add_argument("--sibling", help = "Restore tests as siblings", action = 'store_true') -rp.add_argument("--join-ns", help = "Restore tests and join existing namespace", action = 'store_true') -rp.add_argument("--empty-ns", help = "Restore tests in empty net namespace", action = 'store_true') -rp.add_argument("--pre", help = "Do some pre-dumps before dump (n[:pause])") -rp.add_argument("--snaps", help = "Instead of pre-dumps do full dumps", action = 'store_true') -rp.add_argument("--dedup", help = "Auto-deduplicate images on iterations", action = 'store_true') -rp.add_argument("--noauto-dedup", help = "Manual deduplicate images on iterations", action = 'store_true') -rp.add_argument("--nocr", help = "Do not CR anything, just check test works", action = 'store_true') -rp.add_argument("--norst", help = "Don't restore tasks, leave them running after dump", action = 'store_true') -rp.add_argument("--stop", help = "Check that --leave-stopped option stops ps tree.", action = 'store_true') -rp.add_argument("--iters", help = "Do CR cycle several times before check (n[:pause])") -rp.add_argument("--fault", help = "Test fault injection") -rp.add_argument("--sat", help = "Generate criu strace-s for sat tool (restore is fake, images are kept)", action = 'store_true') -rp.add_argument("--sbs", help = "Do step-by-step execution, asking user for keypress to continue", action = 'store_true') -rp.add_argument("--freezecg", help = "Use freeze cgroup (path:state)") -rp.add_argument("--user", help = "Run CRIU as regular user", action = 'store_true') -rp.add_argument("--rpc", help = "Run CRIU via RPC rather than CLI", action = 'store_true') +rp.add_argument("--sibling", + help="Restore tests as siblings", + action='store_true') +rp.add_argument("--join-ns", + help="Restore tests and join existing namespace", + action='store_true') +rp.add_argument("--empty-ns", + help="Restore tests in empty net namespace", + action='store_true') +rp.add_argument("--pre", help="Do some pre-dumps before dump (n[:pause])") +rp.add_argument("--snaps", + help="Instead of pre-dumps do full dumps", + action='store_true') +rp.add_argument("--dedup", + help="Auto-deduplicate images on iterations", + action='store_true') +rp.add_argument("--noauto-dedup", + help="Manual deduplicate images on iterations", + action='store_true') +rp.add_argument("--nocr", + help="Do not CR anything, just check test works", + action='store_true') +rp.add_argument("--norst", + help="Don't restore tasks, leave them running after dump", + action='store_true') +rp.add_argument("--stop", + help="Check that --leave-stopped option stops ps tree.", + action='store_true') +rp.add_argument("--iters", + help="Do CR cycle several times before check (n[:pause])") +rp.add_argument("--fault", help="Test fault injection") +rp.add_argument( + "--sat", + help="Generate criu strace-s for sat tool (restore is fake, images are kept)", + action='store_true') +rp.add_argument( + "--sbs", + help="Do step-by-step execution, asking user for keypress to continue", + action='store_true') +rp.add_argument("--freezecg", help="Use freeze cgroup (path:state)") +rp.add_argument("--user", help="Run CRIU as regular user", action='store_true') +rp.add_argument("--rpc", + help="Run CRIU via RPC rather than CLI", + action='store_true') -rp.add_argument("--page-server", help = "Use page server dump", action = 'store_true') -rp.add_argument("-p", "--parallel", help = "Run test in parallel") -rp.add_argument("--dry-run", help="Don't run tests, just pretend to", action='store_true') +rp.add_argument("--page-server", + help="Use page server dump", + action='store_true') +rp.add_argument("--remote", + help="Use remote option for diskless C/R", + action='store_true') +rp.add_argument("-p", "--parallel", help="Run test in parallel") +rp.add_argument("--dry-run", + help="Don't run tests, just pretend to", + action='store_true') rp.add_argument("--script", help="Add script to get notified by criu") -rp.add_argument("-k", "--keep-img", help = "Whether or not to keep images after test", - choices = ['always', 'never', 'failed'], default = 'failed') -rp.add_argument("--report", help = "Generate summary report in directory") -rp.add_argument("--keep-going", help = "Keep running tests in spite of failures", action = 'store_true') -rp.add_argument("--ignore-taint", help = "Don't care about a non-zero kernel taint flag", action = 'store_true') -rp.add_argument("--lazy-pages", help = "restore pages on demand", action = 'store_true') -rp.add_argument("--lazy-migrate", help = "restore pages on demand", action = 'store_true') -rp.add_argument("--remote-lazy-pages", help = "simulate lazy migration", action = 'store_true') -rp.add_argument("--tls", help = "use TLS for migration", action = 'store_true') -rp.add_argument("--title", help = "A test suite title", default = "criu") -rp.add_argument("--show-stats", help = "Show criu statistics", action = 'store_true') -rp.add_argument("--criu-bin", help = "Path to criu binary", default = '../criu/criu') -rp.add_argument("--crit-bin", help = "Path to crit binary", default = '../crit/crit') +rp.add_argument("-k", + "--keep-img", + help="Whether or not to keep images after test", + choices=['always', 'never', 'failed'], + default='failed') +rp.add_argument("--report", help="Generate summary report in directory") +rp.add_argument("--keep-going", + help="Keep running tests in spite of failures", + action='store_true') +rp.add_argument("--ignore-taint", + help="Don't care about a non-zero kernel taint flag", + action='store_true') +rp.add_argument("--lazy-pages", + help="restore pages on demand", + action='store_true') +rp.add_argument("--lazy-migrate", + help="restore pages on demand", + action='store_true') +rp.add_argument("--remote-lazy-pages", + help="simulate lazy migration", + action='store_true') +rp.add_argument("--tls", help="use TLS for migration", action='store_true') +rp.add_argument("--title", help="A test suite title", default="criu") +rp.add_argument("--show-stats", + help="Show criu statistics", + action='store_true') +rp.add_argument("--criu-bin", + help="Path to criu binary", + default='../criu/criu') +rp.add_argument("--crit-bin", + help="Path to crit binary", + default='../crit/crit') -lp = sp.add_parser("list", help = "List tests") -lp.set_defaults(action = list_tests) -lp.add_argument('-i', '--info', help = "Show more info about tests", action = 'store_true') +lp = sp.add_parser("list", help="List tests") +lp.set_defaults(action=list_tests) +lp.add_argument('-i', + '--info', + help="Show more info about tests", + action='store_true') -gp = sp.add_parser("group", help = "Generate groups") -gp.set_defaults(action = group_tests) -gp.add_argument("-m", "--max-size", help = "Maximum number of tests in group") -gp.add_argument("-n", "--name", help = "Common name for group tests") -gp.add_argument("-x", "--exclude", help = "Exclude tests from --all run", action = 'append') +gp = sp.add_parser("group", help="Generate groups") +gp.set_defaults(action=group_tests) +gp.add_argument("-m", "--max-size", help="Maximum number of tests in group") +gp.add_argument("-n", "--name", help="Common name for group tests") +gp.add_argument("-x", + "--exclude", + help="Exclude tests from --all run", + action='append') -cp = sp.add_parser("clean", help = "Clean something") -cp.set_defaults(action = clean_stuff) -cp.add_argument("what", choices = ['nsroot']) +cp = sp.add_parser("clean", help="Clean something") +cp.set_defaults(action=clean_stuff) +cp.add_argument("what", choices=['nsroot']) opts = vars(p.parse_args()) if opts.get('sat', False): - opts['keep_img'] = 'always' + opts['keep_img'] = 'always' if opts['debug']: - sys.settrace(traceit) + sys.settrace(traceit) if opts['action'] == 'run': - criu.available() + criu.available() for tst in test_classes.values(): - tst.available() + tst.available() opts['action'](opts) for tst in test_classes.values(): - tst.cleanup() + tst.cleanup()