2
0
mirror of https://github.com/checkpoint-restore/criu synced 2025-08-22 09:58:09 +00:00

py: Reformat everything into pep8 style

As discussed on the mailing list, current .py files formatting does not
conform to the world standard, so we should better reformat it. For this
the yapf tool is used. The command I used was

  yapf -i $(find -name *.py)

Signed-off-by: Pavel Emelyanov <xemul@virtuozzo.com>
This commit is contained in:
Andrei Vagin 2019-09-07 15:46:22 +03:00
parent 5ff4fcb753
commit 5aa72e7237
28 changed files with 5738 additions and 5167 deletions

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -6,337 +6,409 @@ import os
import pycriu import pycriu
def inf(opts): def inf(opts):
if opts['in']: if opts['in']:
return open(opts['in'], 'rb') return open(opts['in'], 'rb')
else: else:
return sys.stdin return sys.stdin
def outf(opts): def outf(opts):
if opts['out']: if opts['out']:
return open(opts['out'], 'w+') return open(opts['out'], 'w+')
else: else:
return sys.stdout return sys.stdout
def dinf(opts, name): def dinf(opts, name):
return open(os.path.join(opts['dir'], name)) return open(os.path.join(opts['dir'], name))
def decode(opts): def decode(opts):
indent = None indent = None
try: try:
img = pycriu.images.load(inf(opts), opts['pretty'], opts['nopl']) img = pycriu.images.load(inf(opts), opts['pretty'], opts['nopl'])
except pycriu.images.MagicException as exc: except pycriu.images.MagicException as exc:
print("Unknown magic %#x.\n"\ print("Unknown magic %#x.\n"\
"Maybe you are feeding me an image with "\ "Maybe you are feeding me an image with "\
"raw data(i.e. pages.img)?" % exc.magic, file=sys.stderr) "raw data(i.e. pages.img)?" % exc.magic, file=sys.stderr)
sys.exit(1) sys.exit(1)
if opts['pretty']: if opts['pretty']:
indent = 4 indent = 4
f = outf(opts)
json.dump(img, f, indent=indent)
if f == sys.stdout:
f.write("\n")
f = outf(opts)
json.dump(img, f, indent=indent)
if f == sys.stdout:
f.write("\n")
def encode(opts): def encode(opts):
img = json.load(inf(opts)) img = json.load(inf(opts))
pycriu.images.dump(img, outf(opts)) pycriu.images.dump(img, outf(opts))
def info(opts): def info(opts):
infs = pycriu.images.info(inf(opts)) infs = pycriu.images.info(inf(opts))
json.dump(infs, sys.stdout, indent = 4) json.dump(infs, sys.stdout, indent=4)
print() print()
def get_task_id(p, val): def get_task_id(p, val):
return p[val] if val in p else p['ns_' + val][0] return p[val] if val in p else p['ns_' + val][0]
# #
# Explorers # Explorers
# #
class ps_item:
def __init__(self, p, core):
self.pid = get_task_id(p, 'pid')
self.ppid = p['ppid']
self.p = p
self.core = core
self.kids = []
def show_ps(p, opts, depth = 0): class ps_item:
print("%7d%7d%7d %s%s" % (p.pid, get_task_id(p.p, 'pgid'), get_task_id(p.p, 'sid'), def __init__(self, p, core):
' ' * (4 * depth), p.core['tc']['comm'])) self.pid = get_task_id(p, 'pid')
for kid in p.kids: self.ppid = p['ppid']
show_ps(kid, opts, depth + 1) self.p = p
self.core = core
self.kids = []
def show_ps(p, opts, depth=0):
print("%7d%7d%7d %s%s" %
(p.pid, get_task_id(p.p, 'pgid'), get_task_id(p.p, 'sid'), ' ' *
(4 * depth), p.core['tc']['comm']))
for kid in p.kids:
show_ps(kid, opts, depth + 1)
def explore_ps(opts): def explore_ps(opts):
pss = { } pss = {}
ps_img = pycriu.images.load(dinf(opts, 'pstree.img')) ps_img = pycriu.images.load(dinf(opts, 'pstree.img'))
for p in ps_img['entries']: for p in ps_img['entries']:
core = pycriu.images.load(dinf(opts, 'core-%d.img' % get_task_id(p, 'pid'))) core = pycriu.images.load(
ps = ps_item(p, core['entries'][0]) dinf(opts, 'core-%d.img' % get_task_id(p, 'pid')))
pss[ps.pid] = ps ps = ps_item(p, core['entries'][0])
pss[ps.pid] = ps
# Build tree # Build tree
psr = None psr = None
for pid in pss: for pid in pss:
p = pss[pid] p = pss[pid]
if p.ppid == 0: if p.ppid == 0:
psr = p psr = p
continue continue
pp = pss[p.ppid] pp = pss[p.ppid]
pp.kids.append(p) pp.kids.append(p)
print("%7s%7s%7s %s" % ('PID', 'PGID', 'SID', 'COMM'))
show_ps(psr, opts)
print("%7s%7s%7s %s" % ('PID', 'PGID', 'SID', 'COMM'))
show_ps(psr, opts)
files_img = None files_img = None
def ftype_find_in_files(opts, ft, fid): def ftype_find_in_files(opts, ft, fid):
global files_img global files_img
if files_img is None: if files_img is None:
try: try:
files_img = pycriu.images.load(dinf(opts, "files.img"))['entries'] files_img = pycriu.images.load(dinf(opts, "files.img"))['entries']
except: except:
files_img = [] files_img = []
if len(files_img) == 0: if len(files_img) == 0:
return None return None
for f in files_img: for f in files_img:
if f['id'] == fid: if f['id'] == fid:
return f return f
return None return None
def ftype_find_in_image(opts, ft, fid, img): def ftype_find_in_image(opts, ft, fid, img):
f = ftype_find_in_files(opts, ft, fid) f = ftype_find_in_files(opts, ft, fid)
if f: if f:
return f[ft['field']] return f[ft['field']]
if ft['img'] == None:
ft['img'] = pycriu.images.load(dinf(opts, img))['entries']
for f in ft['img']:
if f['id'] == fid:
return f
return None
if ft['img'] == None:
ft['img'] = pycriu.images.load(dinf(opts, img))['entries']
for f in ft['img']:
if f['id'] == fid:
return f
return None
def ftype_reg(opts, ft, fid): def ftype_reg(opts, ft, fid):
rf = ftype_find_in_image(opts, ft, fid, 'reg-files.img') rf = ftype_find_in_image(opts, ft, fid, 'reg-files.img')
return rf and rf['name'] or 'unknown path' return rf and rf['name'] or 'unknown path'
def ftype_pipe(opts, ft, fid): def ftype_pipe(opts, ft, fid):
p = ftype_find_in_image(opts, ft, fid, 'pipes.img') p = ftype_find_in_image(opts, ft, fid, 'pipes.img')
return p and 'pipe[%d]' % p['pipe_id'] or 'pipe[?]' return p and 'pipe[%d]' % p['pipe_id'] or 'pipe[?]'
def ftype_unix(opts, ft, fid): def ftype_unix(opts, ft, fid):
ux = ftype_find_in_image(opts, ft, fid, 'unixsk.img') ux = ftype_find_in_image(opts, ft, fid, 'unixsk.img')
if not ux: if not ux:
return 'unix[?]' return 'unix[?]'
n = ux['name'] and ' %s' % ux['name'] or ''
return 'unix[%d (%d)%s]' % (ux['ino'], ux['peer'], n)
n = ux['name'] and ' %s' % ux['name'] or ''
return 'unix[%d (%d)%s]' % (ux['ino'], ux['peer'], n)
file_types = { file_types = {
'REG': {'get': ftype_reg, 'img': None, 'field': 'reg'}, 'REG': {
'PIPE': {'get': ftype_pipe, 'img': None, 'field': 'pipe'}, 'get': ftype_reg,
'UNIXSK': {'get': ftype_unix, 'img': None, 'field': 'usk'}, 'img': None,
'field': 'reg'
},
'PIPE': {
'get': ftype_pipe,
'img': None,
'field': 'pipe'
},
'UNIXSK': {
'get': ftype_unix,
'img': None,
'field': 'usk'
},
} }
def ftype_gen(opts, ft, fid):
return '%s.%d' % (ft['typ'], fid)
files_cache = { } def ftype_gen(opts, ft, fid):
return '%s.%d' % (ft['typ'], fid)
files_cache = {}
def get_file_str(opts, fd): def get_file_str(opts, fd):
key = (fd['type'], fd['id']) key = (fd['type'], fd['id'])
f = files_cache.get(key, None) f = files_cache.get(key, None)
if not f: if not f:
ft = file_types.get(fd['type'], {'get': ftype_gen, 'typ': fd['type']}) ft = file_types.get(fd['type'], {'get': ftype_gen, 'typ': fd['type']})
f = ft['get'](opts, ft, fd['id']) f = ft['get'](opts, ft, fd['id'])
files_cache[key] = f files_cache[key] = f
return f
return f
def explore_fds(opts): def explore_fds(opts):
ps_img = pycriu.images.load(dinf(opts, 'pstree.img')) ps_img = pycriu.images.load(dinf(opts, 'pstree.img'))
for p in ps_img['entries']: for p in ps_img['entries']:
pid = get_task_id(p, 'pid') pid = get_task_id(p, 'pid')
idi = pycriu.images.load(dinf(opts, 'ids-%s.img' % pid)) idi = pycriu.images.load(dinf(opts, 'ids-%s.img' % pid))
fdt = idi['entries'][0]['files_id'] fdt = idi['entries'][0]['files_id']
fdi = pycriu.images.load(dinf(opts, 'fdinfo-%d.img' % fdt)) fdi = pycriu.images.load(dinf(opts, 'fdinfo-%d.img' % fdt))
print("%d" % pid) print("%d" % pid)
for fd in fdi['entries']: for fd in fdi['entries']:
print("\t%7d: %s" % (fd['fd'], get_file_str(opts, fd))) print("\t%7d: %s" % (fd['fd'], get_file_str(opts, fd)))
fdi = pycriu.images.load(dinf(opts, 'fs-%d.img' % pid))['entries'][0] fdi = pycriu.images.load(dinf(opts, 'fs-%d.img' % pid))['entries'][0]
print("\t%7s: %s" % ('cwd', get_file_str(opts, {'type': 'REG', 'id': fdi['cwd_id']}))) print("\t%7s: %s" %
print("\t%7s: %s" % ('root', get_file_str(opts, {'type': 'REG', 'id': fdi['root_id']}))) ('cwd', get_file_str(opts, {
'type': 'REG',
'id': fdi['cwd_id']
})))
print("\t%7s: %s" %
('root', get_file_str(opts, {
'type': 'REG',
'id': fdi['root_id']
})))
class vma_id: class vma_id:
def __init__(self): def __init__(self):
self.__ids = {} self.__ids = {}
self.__last = 1 self.__last = 1
def get(self, iid): def get(self, iid):
ret = self.__ids.get(iid, None) ret = self.__ids.get(iid, None)
if not ret: if not ret:
ret = self.__last ret = self.__last
self.__last += 1 self.__last += 1
self.__ids[iid] = ret self.__ids[iid] = ret
return ret
return ret
def explore_mems(opts): def explore_mems(opts):
ps_img = pycriu.images.load(dinf(opts, 'pstree.img')) ps_img = pycriu.images.load(dinf(opts, 'pstree.img'))
vids = vma_id() vids = vma_id()
for p in ps_img['entries']: for p in ps_img['entries']:
pid = get_task_id(p, 'pid') pid = get_task_id(p, 'pid')
mmi = pycriu.images.load(dinf(opts, 'mm-%d.img' % pid))['entries'][0] mmi = pycriu.images.load(dinf(opts, 'mm-%d.img' % pid))['entries'][0]
print("%d" % pid) print("%d" % pid)
print("\t%-36s %s" % ('exe', get_file_str(opts, {'type': 'REG', 'id': mmi['exe_file_id']}))) print("\t%-36s %s" % ('exe',
get_file_str(opts, {
'type': 'REG',
'id': mmi['exe_file_id']
})))
for vma in mmi['vmas']: for vma in mmi['vmas']:
st = vma['status'] st = vma['status']
if st & (1 << 10): if st & (1 << 10):
fn = ' ' + 'ips[%lx]' % vids.get(vma['shmid']) fn = ' ' + 'ips[%lx]' % vids.get(vma['shmid'])
elif st & (1 << 8): elif st & (1 << 8):
fn = ' ' + 'shmem[%lx]' % vids.get(vma['shmid']) fn = ' ' + 'shmem[%lx]' % vids.get(vma['shmid'])
elif st & (1 << 11): elif st & (1 << 11):
fn = ' ' + 'packet[%lx]' % vids.get(vma['shmid']) fn = ' ' + 'packet[%lx]' % vids.get(vma['shmid'])
elif st & ((1 << 6) | (1 << 7)): elif st & ((1 << 6) | (1 << 7)):
fn = ' ' + get_file_str(opts, {'type': 'REG', 'id': vma['shmid']}) fn = ' ' + get_file_str(opts, {
if vma['pgoff']: 'type': 'REG',
fn += ' + %#lx' % vma['pgoff'] 'id': vma['shmid']
if st & (1 << 7): })
fn += ' (s)' if vma['pgoff']:
elif st & (1 << 1): fn += ' + %#lx' % vma['pgoff']
fn = ' [stack]' if st & (1 << 7):
elif st & (1 << 2): fn += ' (s)'
fn = ' [vsyscall]' elif st & (1 << 1):
elif st & (1 << 3): fn = ' [stack]'
fn = ' [vdso]' elif st & (1 << 2):
elif vma['flags'] & 0x0100: # growsdown fn = ' [vsyscall]'
fn = ' [stack?]' elif st & (1 << 3):
else: fn = ' [vdso]'
fn = '' elif vma['flags'] & 0x0100: # growsdown
fn = ' [stack?]'
else:
fn = ''
if not st & (1 << 0): if not st & (1 << 0):
fn += ' *' fn += ' *'
prot = vma['prot'] & 0x1 and 'r' or '-' prot = vma['prot'] & 0x1 and 'r' or '-'
prot += vma['prot'] & 0x2 and 'w' or '-' prot += vma['prot'] & 0x2 and 'w' or '-'
prot += vma['prot'] & 0x4 and 'x' or '-' prot += vma['prot'] & 0x4 and 'x' or '-'
astr = '%08lx-%08lx' % (vma['start'], vma['end']) astr = '%08lx-%08lx' % (vma['start'], vma['end'])
print("\t%-36s%s%s" % (astr, prot, fn)) print("\t%-36s%s%s" % (astr, prot, fn))
def explore_rss(opts): def explore_rss(opts):
ps_img = pycriu.images.load(dinf(opts, 'pstree.img')) ps_img = pycriu.images.load(dinf(opts, 'pstree.img'))
for p in ps_img['entries']: for p in ps_img['entries']:
pid = get_task_id(p, 'pid') pid = get_task_id(p, 'pid')
vmas = pycriu.images.load(dinf(opts, 'mm-%d.img' % pid))['entries'][0]['vmas'] vmas = pycriu.images.load(dinf(opts, 'mm-%d.img' %
pms = pycriu.images.load(dinf(opts, 'pagemap-%d.img' % pid))['entries'] pid))['entries'][0]['vmas']
pms = pycriu.images.load(dinf(opts, 'pagemap-%d.img' % pid))['entries']
print("%d" % pid) print("%d" % pid)
vmi = 0 vmi = 0
pvmi = -1 pvmi = -1
for pm in pms[1:]: for pm in pms[1:]:
pstr = '\t%lx / %-8d' % (pm['vaddr'], pm['nr_pages']) pstr = '\t%lx / %-8d' % (pm['vaddr'], pm['nr_pages'])
while vmas[vmi]['end'] <= pm['vaddr']: while vmas[vmi]['end'] <= pm['vaddr']:
vmi += 1 vmi += 1
pme = pm['vaddr'] + (pm['nr_pages'] << 12) pme = pm['vaddr'] + (pm['nr_pages'] << 12)
vstr = '' vstr = ''
while vmas[vmi]['start'] < pme: while vmas[vmi]['start'] < pme:
vma = vmas[vmi] vma = vmas[vmi]
if vmi == pvmi: if vmi == pvmi:
vstr += ' ~' vstr += ' ~'
else: else:
vstr += ' %08lx / %-8d' % (vma['start'], (vma['end'] - vma['start'])>>12) vstr += ' %08lx / %-8d' % (
if vma['status'] & ((1 << 6) | (1 << 7)): vma['start'], (vma['end'] - vma['start']) >> 12)
vstr += ' ' + get_file_str(opts, {'type': 'REG', 'id': vma['shmid']}) if vma['status'] & ((1 << 6) | (1 << 7)):
pvmi = vmi vstr += ' ' + get_file_str(opts, {
vstr += '\n\t%23s' % '' 'type': 'REG',
vmi += 1 'id': vma['shmid']
})
pvmi = vmi
vstr += '\n\t%23s' % ''
vmi += 1
vmi -= 1 vmi -= 1
print('%-24s%s' % (pstr, vstr)) print('%-24s%s' % (pstr, vstr))
explorers = {
'ps': explore_ps,
'fds': explore_fds,
'mems': explore_mems,
'rss': explore_rss
}
explorers = { 'ps': explore_ps, 'fds': explore_fds, 'mems': explore_mems, 'rss': explore_rss }
def explore(opts): def explore(opts):
explorers[opts['what']](opts) explorers[opts['what']](opts)
def main(): def main():
desc = 'CRiu Image Tool' desc = 'CRiu Image Tool'
parser = argparse.ArgumentParser(description=desc, parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter) description=desc, formatter_class=argparse.RawTextHelpFormatter)
subparsers = parser.add_subparsers(help='Use crit CMD --help for command-specific help') subparsers = parser.add_subparsers(
help='Use crit CMD --help for command-specific help')
# Decode # Decode
decode_parser = subparsers.add_parser('decode', decode_parser = subparsers.add_parser(
help = 'convert criu image from binary type to json') 'decode', help='convert criu image from binary type to json')
decode_parser.add_argument('--pretty', decode_parser.add_argument(
help = 'Multiline with indents and some numerical fields in field-specific format', '--pretty',
action = 'store_true') help=
decode_parser.add_argument('-i', 'Multiline with indents and some numerical fields in field-specific format',
'--in', action='store_true')
help = 'criu image in binary format to be decoded (stdin by default)') decode_parser.add_argument(
decode_parser.add_argument('-o', '-i',
'--out', '--in',
help = 'where to put criu image in json format (stdout by default)') help='criu image in binary format to be decoded (stdin by default)')
decode_parser.set_defaults(func=decode, nopl=False) decode_parser.add_argument(
'-o',
'--out',
help='where to put criu image in json format (stdout by default)')
decode_parser.set_defaults(func=decode, nopl=False)
# Encode # Encode
encode_parser = subparsers.add_parser('encode', encode_parser = subparsers.add_parser(
help = 'convert criu image from json type to binary') 'encode', help='convert criu image from json type to binary')
encode_parser.add_argument('-i', encode_parser.add_argument(
'--in', '-i',
help = 'criu image in json format to be encoded (stdin by default)') '--in',
encode_parser.add_argument('-o', help='criu image in json format to be encoded (stdin by default)')
'--out', encode_parser.add_argument(
help = 'where to put criu image in binary format (stdout by default)') '-o',
encode_parser.set_defaults(func=encode) '--out',
help='where to put criu image in binary format (stdout by default)')
encode_parser.set_defaults(func=encode)
# Info # Info
info_parser = subparsers.add_parser('info', info_parser = subparsers.add_parser('info', help='show info about image')
help = 'show info about image') info_parser.add_argument("in")
info_parser.add_argument("in") info_parser.set_defaults(func=info)
info_parser.set_defaults(func=info)
# Explore # Explore
x_parser = subparsers.add_parser('x', help = 'explore image dir') x_parser = subparsers.add_parser('x', help='explore image dir')
x_parser.add_argument('dir') x_parser.add_argument('dir')
x_parser.add_argument('what', choices = [ 'ps', 'fds', 'mems', 'rss']) x_parser.add_argument('what', choices=['ps', 'fds', 'mems', 'rss'])
x_parser.set_defaults(func=explore) x_parser.set_defaults(func=explore)
# Show # Show
show_parser = subparsers.add_parser('show', show_parser = subparsers.add_parser(
help = "convert criu image from binary to human-readable json") 'show', help="convert criu image from binary to human-readable json")
show_parser.add_argument("in") show_parser.add_argument("in")
show_parser.add_argument('--nopl', help = 'do not show entry payload (if exists)', action = 'store_true') show_parser.add_argument('--nopl',
show_parser.set_defaults(func=decode, pretty=True, out=None) help='do not show entry payload (if exists)',
action='store_true')
show_parser.set_defaults(func=decode, pretty=True, out=None)
opts = vars(parser.parse_args()) opts = vars(parser.parse_args())
if not opts: if not opts:
sys.stderr.write(parser.format_usage()) sys.stderr.write(parser.format_usage())
sys.stderr.write("crit: error: too few arguments\n") sys.stderr.write("crit: error: too few arguments\n")
sys.exit(1) sys.exit(1)
opts["func"](opts)
opts["func"](opts)
if __name__ == '__main__': if __name__ == '__main__':
main() main()

View File

@ -8,325 +8,336 @@ import struct
import pycriu.rpc_pb2 as rpc import pycriu.rpc_pb2 as rpc
class _criu_comm: class _criu_comm:
""" """
Base class for communication classes. Base class for communication classes.
""" """
COMM_SK = 0 COMM_SK = 0
COMM_FD = 1 COMM_FD = 1
COMM_BIN = 2 COMM_BIN = 2
comm_type = None comm_type = None
comm = None comm = None
sk = None sk = None
def connect(self, daemon): def connect(self, daemon):
""" """
Connect to criu and return socket object. Connect to criu and return socket object.
daemon -- is for whether or not criu should daemonize if executing criu from binary(comm_bin). daemon -- is for whether or not criu should daemonize if executing criu from binary(comm_bin).
""" """
pass pass
def disconnect(self): def disconnect(self):
""" """
Disconnect from criu. Disconnect from criu.
""" """
pass pass
class _criu_comm_sk(_criu_comm): class _criu_comm_sk(_criu_comm):
""" """
Communication class for unix socket. Communication class for unix socket.
""" """
def __init__(self, sk_path):
self.comm_type = self.COMM_SK
self.comm = sk_path
def connect(self, daemon): def __init__(self, sk_path):
self.sk = socket.socket(socket.AF_UNIX, socket.SOCK_SEQPACKET) self.comm_type = self.COMM_SK
self.sk.connect(self.comm) self.comm = sk_path
return self.sk def connect(self, daemon):
self.sk = socket.socket(socket.AF_UNIX, socket.SOCK_SEQPACKET)
self.sk.connect(self.comm)
def disconnect(self): return self.sk
self.sk.close()
def disconnect(self):
self.sk.close()
class _criu_comm_fd(_criu_comm): class _criu_comm_fd(_criu_comm):
""" """
Communication class for file descriptor. Communication class for file descriptor.
""" """
def __init__(self, fd):
self.comm_type = self.COMM_FD
self.comm = fd
def connect(self, daemon): def __init__(self, fd):
self.sk = socket.fromfd(self.comm, socket.AF_UNIX, socket.SOCK_SEQPACKET) self.comm_type = self.COMM_FD
self.comm = fd
return self.sk def connect(self, daemon):
self.sk = socket.fromfd(self.comm, socket.AF_UNIX,
socket.SOCK_SEQPACKET)
return self.sk
def disconnect(self):
self.sk.close()
def disconnect(self):
self.sk.close()
class _criu_comm_bin(_criu_comm): class _criu_comm_bin(_criu_comm):
""" """
Communication class for binary. Communication class for binary.
""" """
def __init__(self, bin_path):
self.comm_type = self.COMM_BIN
self.comm = bin_path
self.swrk = None
self.daemon = None
def connect(self, daemon): def __init__(self, bin_path):
# Kind of the same thing we do in libcriu self.comm_type = self.COMM_BIN
css = socket.socketpair(socket.AF_UNIX, socket.SOCK_SEQPACKET) self.comm = bin_path
flags = fcntl.fcntl(css[1], fcntl.F_GETFD) self.swrk = None
fcntl.fcntl(css[1], fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC) self.daemon = None
flags = fcntl.fcntl(css[0], fcntl.F_GETFD)
fcntl.fcntl(css[0], fcntl.F_SETFD, flags & ~fcntl.FD_CLOEXEC)
self.daemon = daemon def connect(self, daemon):
# Kind of the same thing we do in libcriu
css = socket.socketpair(socket.AF_UNIX, socket.SOCK_SEQPACKET)
flags = fcntl.fcntl(css[1], fcntl.F_GETFD)
fcntl.fcntl(css[1], fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC)
flags = fcntl.fcntl(css[0], fcntl.F_GETFD)
fcntl.fcntl(css[0], fcntl.F_SETFD, flags & ~fcntl.FD_CLOEXEC)
p = os.fork() self.daemon = daemon
if p == 0: p = os.fork()
def exec_criu():
os.close(0)
os.close(1)
os.close(2)
css[0].send(struct.pack('i', os.getpid())) if p == 0:
os.execv(self.comm, [self.comm, 'swrk', "%d" % css[0].fileno()])
os._exit(1)
if daemon: def exec_criu():
# Python has no daemon(3) alternative, os.close(0)
# so we need to mimic it ourself. os.close(1)
p = os.fork() os.close(2)
if p == 0: css[0].send(struct.pack('i', os.getpid()))
os.setsid() os.execv(self.comm,
[self.comm, 'swrk',
"%d" % css[0].fileno()])
os._exit(1)
exec_criu() if daemon:
else: # Python has no daemon(3) alternative,
os._exit(0) # so we need to mimic it ourself.
else: p = os.fork()
exec_criu()
else:
if daemon:
os.waitpid(p, 0)
css[0].close() if p == 0:
self.swrk = struct.unpack('i', css[1].recv(4))[0] os.setsid()
self.sk = css[1]
return self.sk exec_criu()
else:
os._exit(0)
else:
exec_criu()
else:
if daemon:
os.waitpid(p, 0)
def disconnect(self): css[0].close()
self.sk.close() self.swrk = struct.unpack('i', css[1].recv(4))[0]
if not self.daemon: self.sk = css[1]
os.waitpid(self.swrk, 0)
return self.sk
def disconnect(self):
self.sk.close()
if not self.daemon:
os.waitpid(self.swrk, 0)
class CRIUException(Exception): class CRIUException(Exception):
""" """
Exception class for handling and storing criu errors. Exception class for handling and storing criu errors.
""" """
typ = None typ = None
_str = None _str = None
def __str__(self): def __str__(self):
return self._str return self._str
class CRIUExceptionInternal(CRIUException): class CRIUExceptionInternal(CRIUException):
""" """
Exception class for handling and storing internal errors. Exception class for handling and storing internal errors.
""" """
def __init__(self, typ, s):
self.typ = typ def __init__(self, typ, s):
self._str = "%s failed with internal error: %s" % (rpc.criu_req_type.Name(self.typ), s) self.typ = typ
self._str = "%s failed with internal error: %s" % (
rpc.criu_req_type.Name(self.typ), s)
class CRIUExceptionExternal(CRIUException): class CRIUExceptionExternal(CRIUException):
""" """
Exception class for handling and storing criu RPC errors. Exception class for handling and storing criu RPC errors.
""" """
def __init__(self, req_typ, resp_typ, errno): def __init__(self, req_typ, resp_typ, errno):
self.typ = req_typ self.typ = req_typ
self.resp_typ = resp_typ self.resp_typ = resp_typ
self.errno = errno self.errno = errno
self._str = self._gen_error_str() self._str = self._gen_error_str()
def _gen_error_str(self): def _gen_error_str(self):
s = "%s failed: " % (rpc.criu_req_type.Name(self.typ), ) s = "%s failed: " % (rpc.criu_req_type.Name(self.typ), )
if self.typ != self.resp_typ: if self.typ != self.resp_typ:
s += "Unexpected response type %d: " % (self.resp_typ, ) s += "Unexpected response type %d: " % (self.resp_typ, )
s += "Error(%d): " % (self.errno, ) s += "Error(%d): " % (self.errno, )
if self.errno == errno.EBADRQC: if self.errno == errno.EBADRQC:
s += "Bad options" s += "Bad options"
if self.typ == rpc.DUMP: if self.typ == rpc.DUMP:
if self.errno == errno.ESRCH: if self.errno == errno.ESRCH:
s += "No process with such pid" s += "No process with such pid"
if self.typ == rpc.RESTORE: if self.typ == rpc.RESTORE:
if self.errno == errno.EEXIST: if self.errno == errno.EEXIST:
s += "Process with requested pid already exists" s += "Process with requested pid already exists"
s += "Unknown" s += "Unknown"
return s return s
class criu: class criu:
""" """
Call criu through RPC. Call criu through RPC.
""" """
opts = None #CRIU options in pb format opts = None #CRIU options in pb format
_comm = None #Communication method _comm = None #Communication method
def __init__(self): def __init__(self):
self.use_binary('criu') self.use_binary('criu')
self.opts = rpc.criu_opts() self.opts = rpc.criu_opts()
self.sk = None self.sk = None
def use_sk(self, sk_name): def use_sk(self, sk_name):
""" """
Access criu using unix socket which that belongs to criu service daemon. Access criu using unix socket which that belongs to criu service daemon.
""" """
self._comm = _criu_comm_sk(sk_name) self._comm = _criu_comm_sk(sk_name)
def use_fd(self, fd): def use_fd(self, fd):
""" """
Access criu using provided fd. Access criu using provided fd.
""" """
self._comm = _criu_comm_fd(fd) self._comm = _criu_comm_fd(fd)
def use_binary(self, bin_name): def use_binary(self, bin_name):
""" """
Access criu by execing it using provided path to criu binary. Access criu by execing it using provided path to criu binary.
""" """
self._comm = _criu_comm_bin(bin_name) self._comm = _criu_comm_bin(bin_name)
def _send_req_and_recv_resp(self, req): def _send_req_and_recv_resp(self, req):
""" """
As simple as send request and receive response. As simple as send request and receive response.
""" """
# In case of self-dump we need to spawn criu swrk detached # In case of self-dump we need to spawn criu swrk detached
# from our current process, as criu has a hard time separating # from our current process, as criu has a hard time separating
# process resources from its own if criu is located in a same # process resources from its own if criu is located in a same
# process tree it is trying to dump. # process tree it is trying to dump.
daemon = False daemon = False
if req.type == rpc.DUMP and not req.opts.HasField('pid'): if req.type == rpc.DUMP and not req.opts.HasField('pid'):
daemon = True daemon = True
try: try:
if not self.sk: if not self.sk:
s = self._comm.connect(daemon) s = self._comm.connect(daemon)
else: else:
s = self.sk s = self.sk
if req.keep_open: if req.keep_open:
self.sk = s self.sk = s
s.send(req.SerializeToString()) s.send(req.SerializeToString())
buf = s.recv(len(s.recv(1, socket.MSG_TRUNC | socket.MSG_PEEK))) buf = s.recv(len(s.recv(1, socket.MSG_TRUNC | socket.MSG_PEEK)))
if not req.keep_open: if not req.keep_open:
self._comm.disconnect() self._comm.disconnect()
resp = rpc.criu_resp() resp = rpc.criu_resp()
resp.ParseFromString(buf) resp.ParseFromString(buf)
except Exception as e: except Exception as e:
raise CRIUExceptionInternal(req.type, str(e)) raise CRIUExceptionInternal(req.type, str(e))
return resp return resp
def check(self): def check(self):
""" """
Checks whether the kernel support is up-to-date. Checks whether the kernel support is up-to-date.
""" """
req = rpc.criu_req() req = rpc.criu_req()
req.type = rpc.CHECK req.type = rpc.CHECK
resp = self._send_req_and_recv_resp(req) resp = self._send_req_and_recv_resp(req)
if not resp.success: if not resp.success:
raise CRIUExceptionExternal(req.type, resp.type, resp.cr_errno) raise CRIUExceptionExternal(req.type, resp.type, resp.cr_errno)
def dump(self): def dump(self):
""" """
Checkpoint a process/tree identified by opts.pid. Checkpoint a process/tree identified by opts.pid.
""" """
req = rpc.criu_req() req = rpc.criu_req()
req.type = rpc.DUMP req.type = rpc.DUMP
req.opts.MergeFrom(self.opts) req.opts.MergeFrom(self.opts)
resp = self._send_req_and_recv_resp(req) resp = self._send_req_and_recv_resp(req)
if not resp.success: if not resp.success:
raise CRIUExceptionExternal(req.type, resp.type, resp.cr_errno) raise CRIUExceptionExternal(req.type, resp.type, resp.cr_errno)
return resp.dump return resp.dump
def pre_dump(self): def pre_dump(self):
""" """
Checkpoint a process/tree identified by opts.pid. Checkpoint a process/tree identified by opts.pid.
""" """
req = rpc.criu_req() req = rpc.criu_req()
req.type = rpc.PRE_DUMP req.type = rpc.PRE_DUMP
req.opts.MergeFrom(self.opts) req.opts.MergeFrom(self.opts)
resp = self._send_req_and_recv_resp(req) resp = self._send_req_and_recv_resp(req)
if not resp.success: if not resp.success:
raise CRIUExceptionExternal(req.type, resp.type, resp.cr_errno) raise CRIUExceptionExternal(req.type, resp.type, resp.cr_errno)
return resp.dump return resp.dump
def restore(self): def restore(self):
""" """
Restore a process/tree. Restore a process/tree.
""" """
req = rpc.criu_req() req = rpc.criu_req()
req.type = rpc.RESTORE req.type = rpc.RESTORE
req.opts.MergeFrom(self.opts) req.opts.MergeFrom(self.opts)
resp = self._send_req_and_recv_resp(req) resp = self._send_req_and_recv_resp(req)
if not resp.success: if not resp.success:
raise CRIUExceptionExternal(req.type, resp.type, resp.cr_errno) raise CRIUExceptionExternal(req.type, resp.type, resp.cr_errno)
return resp.restore return resp.restore
def page_server_chld(self): def page_server_chld(self):
req = rpc.criu_req() req = rpc.criu_req()
req.type = rpc.PAGE_SERVER_CHLD req.type = rpc.PAGE_SERVER_CHLD
req.opts.MergeFrom(self.opts) req.opts.MergeFrom(self.opts)
req.keep_open = True req.keep_open = True
resp = self._send_req_and_recv_resp(req) resp = self._send_req_and_recv_resp(req)
if not resp.success: if not resp.success:
raise CRIUExceptionExternal(req.type, resp.type, resp.cr_errno) raise CRIUExceptionExternal(req.type, resp.type, resp.cr_errno)
return resp.ps return resp.ps
def wait_pid(self, pid): def wait_pid(self, pid):
req = rpc.criu_req() req = rpc.criu_req()
req.type = rpc.WAIT_PID req.type = rpc.WAIT_PID
req.pid = pid req.pid = pid
resp = self._send_req_and_recv_resp(req) resp = self._send_req_and_recv_resp(req)
if not resp.success: if not resp.success:
raise CRIUExceptionExternal(req.type, resp.type, resp.cr_errno) raise CRIUExceptionExternal(req.type, resp.type, resp.cr_errno)
return resp.status return resp.status

View File

@ -48,8 +48,8 @@ from . import pb
from . import pb2dict from . import pb2dict
if "encodebytes" not in dir(base64): if "encodebytes" not in dir(base64):
base64.encodebytes = base64.encodestring base64.encodebytes = base64.encodestring
base64.decodebytes = base64.decodestring base64.decodebytes = base64.decodestring
# #
# Predefined hardcoded constants # Predefined hardcoded constants
@ -57,233 +57,241 @@ sizeof_u16 = 2
sizeof_u32 = 4 sizeof_u32 = 4
sizeof_u64 = 8 sizeof_u64 = 8
# A helper for rounding # A helper for rounding
def round_up(x,y): def round_up(x, y):
return (((x - 1) | (y - 1)) + 1) return (((x - 1) | (y - 1)) + 1)
class MagicException(Exception): class MagicException(Exception):
def __init__(self, magic): def __init__(self, magic):
self.magic = magic self.magic = magic
# Generic class to handle loading/dumping criu images entries from/to bin # Generic class to handle loading/dumping criu images entries from/to bin
# format to/from dict(json). # format to/from dict(json).
class entry_handler: class entry_handler:
""" """
Generic class to handle loading/dumping criu images Generic class to handle loading/dumping criu images
entries from/to bin format to/from dict(json). entries from/to bin format to/from dict(json).
""" """
def __init__(self, payload, extra_handler=None):
""" def __init__(self, payload, extra_handler=None):
"""
Sets payload class and extra handler class. Sets payload class and extra handler class.
""" """
self.payload = payload self.payload = payload
self.extra_handler = extra_handler self.extra_handler = extra_handler
def load(self, f, pretty = False, no_payload = False): def load(self, f, pretty=False, no_payload=False):
""" """
Convert criu image entries from binary format to dict(json). Convert criu image entries from binary format to dict(json).
Takes a file-like object and returnes a list with entries in Takes a file-like object and returnes a list with entries in
dict(json) format. dict(json) format.
""" """
entries = [] entries = []
while True: while True:
entry = {} entry = {}
# Read payload # Read payload
pbuff = self.payload() pbuff = self.payload()
buf = f.read(4) buf = f.read(4)
if buf == b'': if buf == b'':
break break
size, = struct.unpack('i', buf) size, = struct.unpack('i', buf)
pbuff.ParseFromString(f.read(size)) pbuff.ParseFromString(f.read(size))
entry = pb2dict.pb2dict(pbuff, pretty) entry = pb2dict.pb2dict(pbuff, pretty)
# Read extra # Read extra
if self.extra_handler: if self.extra_handler:
if no_payload: if no_payload:
def human_readable(num):
for unit in ['','K','M','G','T','P','E','Z']:
if num < 1024.0:
if int(num) == num:
return "%d%sB" % (num, unit)
else:
return "%.1f%sB" % (num, unit)
num /= 1024.0
return "%.1fYB" % num
pl_size = self.extra_handler.skip(f, pbuff) def human_readable(num):
entry['extra'] = '... <%s>' % human_readable(pl_size) for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']:
else: if num < 1024.0:
entry['extra'] = self.extra_handler.load(f, pbuff) if int(num) == num:
return "%d%sB" % (num, unit)
else:
return "%.1f%sB" % (num, unit)
num /= 1024.0
return "%.1fYB" % num
entries.append(entry) pl_size = self.extra_handler.skip(f, pbuff)
entry['extra'] = '... <%s>' % human_readable(pl_size)
else:
entry['extra'] = self.extra_handler.load(f, pbuff)
return entries entries.append(entry)
def loads(self, s, pretty = False): return entries
"""
def loads(self, s, pretty=False):
"""
Same as load(), but takes a string as an argument. Same as load(), but takes a string as an argument.
""" """
f = io.BytesIO(s) f = io.BytesIO(s)
return self.load(f, pretty) return self.load(f, pretty)
def dump(self, entries, f): def dump(self, entries, f):
""" """
Convert criu image entries from dict(json) format to binary. Convert criu image entries from dict(json) format to binary.
Takes a list of entries and a file-like object to write entries Takes a list of entries and a file-like object to write entries
in binary format to. in binary format to.
""" """
for entry in entries: for entry in entries:
extra = entry.pop('extra', None) extra = entry.pop('extra', None)
# Write payload # Write payload
pbuff = self.payload() pbuff = self.payload()
pb2dict.dict2pb(entry, pbuff) pb2dict.dict2pb(entry, pbuff)
pb_str = pbuff.SerializeToString() pb_str = pbuff.SerializeToString()
size = len(pb_str) size = len(pb_str)
f.write(struct.pack('i', size)) f.write(struct.pack('i', size))
f.write(pb_str) f.write(pb_str)
# Write extra # Write extra
if self.extra_handler and extra: if self.extra_handler and extra:
self.extra_handler.dump(extra, f, pbuff) self.extra_handler.dump(extra, f, pbuff)
def dumps(self, entries): def dumps(self, entries):
""" """
Same as dump(), but doesn't take file-like object and just Same as dump(), but doesn't take file-like object and just
returns a string. returns a string.
""" """
f = io.BytesIO('') f = io.BytesIO('')
self.dump(entries, f) self.dump(entries, f)
return f.read() return f.read()
def count(self, f): def count(self, f):
""" """
Counts the number of top-level object in the image file Counts the number of top-level object in the image file
""" """
entries = 0 entries = 0
while True: while True:
buf = f.read(4) buf = f.read(4)
if buf == '': if buf == '':
break break
size, = struct.unpack('i', buf) size, = struct.unpack('i', buf)
f.seek(size, 1) f.seek(size, 1)
entries += 1 entries += 1
return entries
return entries
# Special handler for pagemap.img # Special handler for pagemap.img
class pagemap_handler: class pagemap_handler:
""" """
Special entry handler for pagemap.img, which is unique in a way Special entry handler for pagemap.img, which is unique in a way
that it has a header of pagemap_head type followed by entries that it has a header of pagemap_head type followed by entries
of pagemap_entry type. of pagemap_entry type.
""" """
def load(self, f, pretty = False, no_payload = False):
entries = []
pbuff = pb.pagemap_head() def load(self, f, pretty=False, no_payload=False):
while True: entries = []
buf = f.read(4)
if buf == b'':
break
size, = struct.unpack('i', buf)
pbuff.ParseFromString(f.read(size))
entries.append(pb2dict.pb2dict(pbuff, pretty))
pbuff = pb.pagemap_entry() pbuff = pb.pagemap_head()
while True:
buf = f.read(4)
if buf == b'':
break
size, = struct.unpack('i', buf)
pbuff.ParseFromString(f.read(size))
entries.append(pb2dict.pb2dict(pbuff, pretty))
return entries pbuff = pb.pagemap_entry()
def loads(self, s, pretty = False): return entries
f = io.BytesIO(s)
return self.load(f, pretty)
def dump(self, entries, f): def loads(self, s, pretty=False):
pbuff = pb.pagemap_head() f = io.BytesIO(s)
for item in entries: return self.load(f, pretty)
pb2dict.dict2pb(item, pbuff)
pb_str = pbuff.SerializeToString()
size = len(pb_str)
f.write(struct.pack('i', size))
f.write(pb_str)
pbuff = pb.pagemap_entry() def dump(self, entries, f):
pbuff = pb.pagemap_head()
for item in entries:
pb2dict.dict2pb(item, pbuff)
pb_str = pbuff.SerializeToString()
size = len(pb_str)
f.write(struct.pack('i', size))
f.write(pb_str)
def dumps(self, entries): pbuff = pb.pagemap_entry()
f = io.BytesIO('')
self.dump(entries, f) def dumps(self, entries):
return f.read() f = io.BytesIO('')
self.dump(entries, f)
return f.read()
def count(self, f):
return entry_handler(None).count(f) - 1
def count(self, f):
return entry_handler(None).count(f) - 1
# Special handler for ghost-file.img # Special handler for ghost-file.img
class ghost_file_handler: class ghost_file_handler:
def load(self, f, pretty = False, no_payload = False): def load(self, f, pretty=False, no_payload=False):
entries = [] entries = []
gf = pb.ghost_file_entry() gf = pb.ghost_file_entry()
buf = f.read(4) buf = f.read(4)
size, = struct.unpack('i', buf) size, = struct.unpack('i', buf)
gf.ParseFromString(f.read(size)) gf.ParseFromString(f.read(size))
g_entry = pb2dict.pb2dict(gf, pretty) g_entry = pb2dict.pb2dict(gf, pretty)
if gf.chunks: if gf.chunks:
entries.append(g_entry) entries.append(g_entry)
while True: while True:
gc = pb.ghost_chunk_entry() gc = pb.ghost_chunk_entry()
buf = f.read(4) buf = f.read(4)
if buf == '': if buf == '':
break break
size, = struct.unpack('i', buf) size, = struct.unpack('i', buf)
gc.ParseFromString(f.read(size)) gc.ParseFromString(f.read(size))
entry = pb2dict.pb2dict(gc, pretty) entry = pb2dict.pb2dict(gc, pretty)
if no_payload: if no_payload:
f.seek(gc.len, os.SEEK_CUR) f.seek(gc.len, os.SEEK_CUR)
else: else:
entry['extra'] = base64.encodebytes(f.read(gc.len)) entry['extra'] = base64.encodebytes(f.read(gc.len))
entries.append(entry) entries.append(entry)
else: else:
if no_payload: if no_payload:
f.seek(0, os.SEEK_END) f.seek(0, os.SEEK_END)
else: else:
g_entry['extra'] = base64.encodebytes(f.read()) g_entry['extra'] = base64.encodebytes(f.read())
entries.append(g_entry) entries.append(g_entry)
return entries return entries
def loads(self, s, pretty = False): def loads(self, s, pretty=False):
f = io.BytesIO(s) f = io.BytesIO(s)
return self.load(f, pretty) return self.load(f, pretty)
def dump(self, entries, f): def dump(self, entries, f):
pbuff = pb.ghost_file_entry() pbuff = pb.ghost_file_entry()
item = entries.pop(0) item = entries.pop(0)
pb2dict.dict2pb(item, pbuff) pb2dict.dict2pb(item, pbuff)
pb_str = pbuff.SerializeToString() pb_str = pbuff.SerializeToString()
size = len(pb_str) size = len(pb_str)
f.write(struct.pack('i', size)) f.write(struct.pack('i', size))
f.write(pb_str) f.write(pb_str)
if pbuff.chunks: if pbuff.chunks:
for item in entries: for item in entries:
pbuff = pb.ghost_chunk_entry() pbuff = pb.ghost_chunk_entry()
pb2dict.dict2pb(item, pbuff) pb2dict.dict2pb(item, pbuff)
pb_str = pbuff.SerializeToString() pb_str = pbuff.SerializeToString()
size = len(pb_str) size = len(pb_str)
f.write(struct.pack('i', size)) f.write(struct.pack('i', size))
f.write(pb_str) f.write(pb_str)
f.write(base64.decodebytes(item['extra'])) f.write(base64.decodebytes(item['extra']))
else: else:
f.write(base64.decodebytes(item['extra'])) f.write(base64.decodebytes(item['extra']))
def dumps(self, entries): def dumps(self, entries):
f = io.BytesIO('') f = io.BytesIO('')
self.dump(entries, f) self.dump(entries, f)
return f.read() return f.read()
# In following extra handlers we use base64 encoding # In following extra handlers we use base64 encoding
@ -293,304 +301,317 @@ class ghost_file_handler:
# do not store big amounts of binary data. They # do not store big amounts of binary data. They
# are negligible comparing to pages size. # are negligible comparing to pages size.
class pipes_data_extra_handler: class pipes_data_extra_handler:
def load(self, f, pload): def load(self, f, pload):
size = pload.bytes size = pload.bytes
data = f.read(size) data = f.read(size)
return base64.encodebytes(data) return base64.encodebytes(data)
def dump(self, extra, f, pload): def dump(self, extra, f, pload):
data = base64.decodebytes(extra) data = base64.decodebytes(extra)
f.write(data) f.write(data)
def skip(self, f, pload):
f.seek(pload.bytes, os.SEEK_CUR)
return pload.bytes
def skip(self, f, pload):
f.seek(pload.bytes, os.SEEK_CUR)
return pload.bytes
class sk_queues_extra_handler: class sk_queues_extra_handler:
def load(self, f, pload): def load(self, f, pload):
size = pload.length size = pload.length
data = f.read(size) data = f.read(size)
return base64.encodebytes(data) return base64.encodebytes(data)
def dump(self, extra, f, _unused): def dump(self, extra, f, _unused):
data = base64.decodebytes(extra) data = base64.decodebytes(extra)
f.write(data) f.write(data)
def skip(self, f, pload): def skip(self, f, pload):
f.seek(pload.length, os.SEEK_CUR) f.seek(pload.length, os.SEEK_CUR)
return pload.length return pload.length
class tcp_stream_extra_handler: class tcp_stream_extra_handler:
def load(self, f, pbuff): def load(self, f, pbuff):
d = {} d = {}
inq = f.read(pbuff.inq_len) inq = f.read(pbuff.inq_len)
outq = f.read(pbuff.outq_len) outq = f.read(pbuff.outq_len)
d['inq'] = base64.encodebytes(inq) d['inq'] = base64.encodebytes(inq)
d['outq'] = base64.encodebytes(outq) d['outq'] = base64.encodebytes(outq)
return d return d
def dump(self, extra, f, _unused): def dump(self, extra, f, _unused):
inq = base64.decodebytes(extra['inq']) inq = base64.decodebytes(extra['inq'])
outq = base64.decodebytes(extra['outq']) outq = base64.decodebytes(extra['outq'])
f.write(inq) f.write(inq)
f.write(outq) f.write(outq)
def skip(self, f, pbuff):
f.seek(0, os.SEEK_END)
return pbuff.inq_len + pbuff.outq_len
def skip(self, f, pbuff):
f.seek(0, os.SEEK_END)
return pbuff.inq_len + pbuff.outq_len
class ipc_sem_set_handler: class ipc_sem_set_handler:
def load(self, f, pbuff): def load(self, f, pbuff):
entry = pb2dict.pb2dict(pbuff) entry = pb2dict.pb2dict(pbuff)
size = sizeof_u16 * entry['nsems'] size = sizeof_u16 * entry['nsems']
rounded = round_up(size, sizeof_u64) rounded = round_up(size, sizeof_u64)
s = array.array('H') s = array.array('H')
if s.itemsize != sizeof_u16: if s.itemsize != sizeof_u16:
raise Exception("Array size mismatch") raise Exception("Array size mismatch")
s.fromstring(f.read(size)) s.fromstring(f.read(size))
f.seek(rounded - size, 1) f.seek(rounded - size, 1)
return s.tolist() return s.tolist()
def dump(self, extra, f, pbuff): def dump(self, extra, f, pbuff):
entry = pb2dict.pb2dict(pbuff) entry = pb2dict.pb2dict(pbuff)
size = sizeof_u16 * entry['nsems'] size = sizeof_u16 * entry['nsems']
rounded = round_up(size, sizeof_u64) rounded = round_up(size, sizeof_u64)
s = array.array('H') s = array.array('H')
if s.itemsize != sizeof_u16: if s.itemsize != sizeof_u16:
raise Exception("Array size mismatch") raise Exception("Array size mismatch")
s.fromlist(extra) s.fromlist(extra)
if len(s) != entry['nsems']: if len(s) != entry['nsems']:
raise Exception("Number of semaphores mismatch") raise Exception("Number of semaphores mismatch")
f.write(s.tostring()) f.write(s.tostring())
f.write('\0' * (rounded - size)) f.write('\0' * (rounded - size))
def skip(self, f, pbuff):
entry = pb2dict.pb2dict(pbuff)
size = sizeof_u16 * entry['nsems']
f.seek(round_up(size, sizeof_u64), os.SEEK_CUR)
return size
def skip(self, f, pbuff):
entry = pb2dict.pb2dict(pbuff)
size = sizeof_u16 * entry['nsems']
f.seek(round_up(size, sizeof_u64), os.SEEK_CUR)
return size
class ipc_msg_queue_handler: class ipc_msg_queue_handler:
def load(self, f, pbuff): def load(self, f, pbuff):
entry = pb2dict.pb2dict(pbuff) entry = pb2dict.pb2dict(pbuff)
messages = [] messages = []
for x in range (0, entry['qnum']): for x in range(0, entry['qnum']):
buf = f.read(4) buf = f.read(4)
if buf == '': if buf == '':
break break
size, = struct.unpack('i', buf) size, = struct.unpack('i', buf)
msg = pb.ipc_msg() msg = pb.ipc_msg()
msg.ParseFromString(f.read(size)) msg.ParseFromString(f.read(size))
rounded = round_up(msg.msize, sizeof_u64) rounded = round_up(msg.msize, sizeof_u64)
data = f.read(msg.msize) data = f.read(msg.msize)
f.seek(rounded - msg.msize, 1) f.seek(rounded - msg.msize, 1)
messages.append(pb2dict.pb2dict(msg)) messages.append(pb2dict.pb2dict(msg))
messages.append(base64.encodebytes(data)) messages.append(base64.encodebytes(data))
return messages return messages
def dump(self, extra, f, pbuff): def dump(self, extra, f, pbuff):
entry = pb2dict.pb2dict(pbuff) entry = pb2dict.pb2dict(pbuff)
for i in range (0, len(extra), 2): for i in range(0, len(extra), 2):
msg = pb.ipc_msg() msg = pb.ipc_msg()
pb2dict.dict2pb(extra[i], msg) pb2dict.dict2pb(extra[i], msg)
msg_str = msg.SerializeToString() msg_str = msg.SerializeToString()
size = len(msg_str) size = len(msg_str)
f.write(struct.pack('i', size)) f.write(struct.pack('i', size))
f.write(msg_str) f.write(msg_str)
rounded = round_up(msg.msize, sizeof_u64) rounded = round_up(msg.msize, sizeof_u64)
data = base64.decodebytes(extra[i + 1]) data = base64.decodebytes(extra[i + 1])
f.write(data[:msg.msize]) f.write(data[:msg.msize])
f.write('\0' * (rounded - msg.msize)) f.write('\0' * (rounded - msg.msize))
def skip(self, f, pbuff): def skip(self, f, pbuff):
entry = pb2dict.pb2dict(pbuff) entry = pb2dict.pb2dict(pbuff)
pl_len = 0 pl_len = 0
for x in range (0, entry['qnum']): for x in range(0, entry['qnum']):
buf = f.read(4) buf = f.read(4)
if buf == '': if buf == '':
break break
size, = struct.unpack('i', buf) size, = struct.unpack('i', buf)
msg = pb.ipc_msg() msg = pb.ipc_msg()
msg.ParseFromString(f.read(size)) msg.ParseFromString(f.read(size))
rounded = round_up(msg.msize, sizeof_u64) rounded = round_up(msg.msize, sizeof_u64)
f.seek(rounded, os.SEEK_CUR) f.seek(rounded, os.SEEK_CUR)
pl_len += size + msg.msize pl_len += size + msg.msize
return pl_len
return pl_len
class ipc_shm_handler: class ipc_shm_handler:
def load(self, f, pbuff): def load(self, f, pbuff):
entry = pb2dict.pb2dict(pbuff) entry = pb2dict.pb2dict(pbuff)
size = entry['size'] size = entry['size']
data = f.read(size) data = f.read(size)
rounded = round_up(size, sizeof_u32) rounded = round_up(size, sizeof_u32)
f.seek(rounded - size, 1) f.seek(rounded - size, 1)
return base64.encodebytes(data) return base64.encodebytes(data)
def dump(self, extra, f, pbuff): def dump(self, extra, f, pbuff):
entry = pb2dict.pb2dict(pbuff) entry = pb2dict.pb2dict(pbuff)
size = entry['size'] size = entry['size']
data = base64.decodebytes(extra) data = base64.decodebytes(extra)
rounded = round_up(size, sizeof_u32) rounded = round_up(size, sizeof_u32)
f.write(data[:size]) f.write(data[:size])
f.write('\0' * (rounded - size)) f.write('\0' * (rounded - size))
def skip(self, f, pbuff): def skip(self, f, pbuff):
entry = pb2dict.pb2dict(pbuff) entry = pb2dict.pb2dict(pbuff)
size = entry['size'] size = entry['size']
rounded = round_up(size, sizeof_u32) rounded = round_up(size, sizeof_u32)
f.seek(rounded, os.SEEK_CUR) f.seek(rounded, os.SEEK_CUR)
return size return size
handlers = { handlers = {
'INVENTORY' : entry_handler(pb.inventory_entry), 'INVENTORY': entry_handler(pb.inventory_entry),
'CORE' : entry_handler(pb.core_entry), 'CORE': entry_handler(pb.core_entry),
'IDS' : entry_handler(pb.task_kobj_ids_entry), 'IDS': entry_handler(pb.task_kobj_ids_entry),
'CREDS' : entry_handler(pb.creds_entry), 'CREDS': entry_handler(pb.creds_entry),
'UTSNS' : entry_handler(pb.utsns_entry), 'UTSNS': entry_handler(pb.utsns_entry),
'IPC_VAR' : entry_handler(pb.ipc_var_entry), 'IPC_VAR': entry_handler(pb.ipc_var_entry),
'FS' : entry_handler(pb.fs_entry), 'FS': entry_handler(pb.fs_entry),
'GHOST_FILE' : ghost_file_handler(), 'GHOST_FILE': ghost_file_handler(),
'MM' : entry_handler(pb.mm_entry), 'MM': entry_handler(pb.mm_entry),
'CGROUP' : entry_handler(pb.cgroup_entry), 'CGROUP': entry_handler(pb.cgroup_entry),
'TCP_STREAM' : entry_handler(pb.tcp_stream_entry, tcp_stream_extra_handler()), 'TCP_STREAM': entry_handler(pb.tcp_stream_entry,
'STATS' : entry_handler(pb.stats_entry), tcp_stream_extra_handler()),
'PAGEMAP' : pagemap_handler(), # Special one 'STATS': entry_handler(pb.stats_entry),
'PSTREE' : entry_handler(pb.pstree_entry), 'PAGEMAP': pagemap_handler(), # Special one
'REG_FILES' : entry_handler(pb.reg_file_entry), 'PSTREE': entry_handler(pb.pstree_entry),
'NS_FILES' : entry_handler(pb.ns_file_entry), 'REG_FILES': entry_handler(pb.reg_file_entry),
'EVENTFD_FILE' : entry_handler(pb.eventfd_file_entry), 'NS_FILES': entry_handler(pb.ns_file_entry),
'EVENTPOLL_FILE' : entry_handler(pb.eventpoll_file_entry), 'EVENTFD_FILE': entry_handler(pb.eventfd_file_entry),
'EVENTPOLL_TFD' : entry_handler(pb.eventpoll_tfd_entry), 'EVENTPOLL_FILE': entry_handler(pb.eventpoll_file_entry),
'SIGNALFD' : entry_handler(pb.signalfd_entry), 'EVENTPOLL_TFD': entry_handler(pb.eventpoll_tfd_entry),
'TIMERFD' : entry_handler(pb.timerfd_entry), 'SIGNALFD': entry_handler(pb.signalfd_entry),
'INOTIFY_FILE' : entry_handler(pb.inotify_file_entry), 'TIMERFD': entry_handler(pb.timerfd_entry),
'INOTIFY_WD' : entry_handler(pb.inotify_wd_entry), 'INOTIFY_FILE': entry_handler(pb.inotify_file_entry),
'FANOTIFY_FILE' : entry_handler(pb.fanotify_file_entry), 'INOTIFY_WD': entry_handler(pb.inotify_wd_entry),
'FANOTIFY_MARK' : entry_handler(pb.fanotify_mark_entry), 'FANOTIFY_FILE': entry_handler(pb.fanotify_file_entry),
'VMAS' : entry_handler(pb.vma_entry), 'FANOTIFY_MARK': entry_handler(pb.fanotify_mark_entry),
'PIPES' : entry_handler(pb.pipe_entry), 'VMAS': entry_handler(pb.vma_entry),
'FIFO' : entry_handler(pb.fifo_entry), 'PIPES': entry_handler(pb.pipe_entry),
'SIGACT' : entry_handler(pb.sa_entry), 'FIFO': entry_handler(pb.fifo_entry),
'NETLINK_SK' : entry_handler(pb.netlink_sk_entry), 'SIGACT': entry_handler(pb.sa_entry),
'REMAP_FPATH' : entry_handler(pb.remap_file_path_entry), 'NETLINK_SK': entry_handler(pb.netlink_sk_entry),
'MNTS' : entry_handler(pb.mnt_entry), 'REMAP_FPATH': entry_handler(pb.remap_file_path_entry),
'TTY_FILES' : entry_handler(pb.tty_file_entry), 'MNTS': entry_handler(pb.mnt_entry),
'TTY_INFO' : entry_handler(pb.tty_info_entry), 'TTY_FILES': entry_handler(pb.tty_file_entry),
'TTY_DATA' : entry_handler(pb.tty_data_entry), 'TTY_INFO': entry_handler(pb.tty_info_entry),
'RLIMIT' : entry_handler(pb.rlimit_entry), 'TTY_DATA': entry_handler(pb.tty_data_entry),
'TUNFILE' : entry_handler(pb.tunfile_entry), 'RLIMIT': entry_handler(pb.rlimit_entry),
'EXT_FILES' : entry_handler(pb.ext_file_entry), 'TUNFILE': entry_handler(pb.tunfile_entry),
'IRMAP_CACHE' : entry_handler(pb.irmap_cache_entry), 'EXT_FILES': entry_handler(pb.ext_file_entry),
'FILE_LOCKS' : entry_handler(pb.file_lock_entry), 'IRMAP_CACHE': entry_handler(pb.irmap_cache_entry),
'FDINFO' : entry_handler(pb.fdinfo_entry), 'FILE_LOCKS': entry_handler(pb.file_lock_entry),
'UNIXSK' : entry_handler(pb.unix_sk_entry), 'FDINFO': entry_handler(pb.fdinfo_entry),
'INETSK' : entry_handler(pb.inet_sk_entry), 'UNIXSK': entry_handler(pb.unix_sk_entry),
'PACKETSK' : entry_handler(pb.packet_sock_entry), 'INETSK': entry_handler(pb.inet_sk_entry),
'ITIMERS' : entry_handler(pb.itimer_entry), 'PACKETSK': entry_handler(pb.packet_sock_entry),
'POSIX_TIMERS' : entry_handler(pb.posix_timer_entry), 'ITIMERS': entry_handler(pb.itimer_entry),
'NETDEV' : entry_handler(pb.net_device_entry), 'POSIX_TIMERS': entry_handler(pb.posix_timer_entry),
'PIPES_DATA' : entry_handler(pb.pipe_data_entry, pipes_data_extra_handler()), 'NETDEV': entry_handler(pb.net_device_entry),
'FIFO_DATA' : entry_handler(pb.pipe_data_entry, pipes_data_extra_handler()), 'PIPES_DATA': entry_handler(pb.pipe_data_entry,
'SK_QUEUES' : entry_handler(pb.sk_packet_entry, sk_queues_extra_handler()), pipes_data_extra_handler()),
'IPCNS_SHM' : entry_handler(pb.ipc_shm_entry, ipc_shm_handler()), 'FIFO_DATA': entry_handler(pb.pipe_data_entry, pipes_data_extra_handler()),
'IPCNS_SEM' : entry_handler(pb.ipc_sem_entry, ipc_sem_set_handler()), 'SK_QUEUES': entry_handler(pb.sk_packet_entry, sk_queues_extra_handler()),
'IPCNS_MSG' : entry_handler(pb.ipc_msg_entry, ipc_msg_queue_handler()), 'IPCNS_SHM': entry_handler(pb.ipc_shm_entry, ipc_shm_handler()),
'NETNS' : entry_handler(pb.netns_entry), 'IPCNS_SEM': entry_handler(pb.ipc_sem_entry, ipc_sem_set_handler()),
'USERNS' : entry_handler(pb.userns_entry), 'IPCNS_MSG': entry_handler(pb.ipc_msg_entry, ipc_msg_queue_handler()),
'SECCOMP' : entry_handler(pb.seccomp_entry), 'NETNS': entry_handler(pb.netns_entry),
'AUTOFS' : entry_handler(pb.autofs_entry), 'USERNS': entry_handler(pb.userns_entry),
'FILES' : entry_handler(pb.file_entry), 'SECCOMP': entry_handler(pb.seccomp_entry),
'CPUINFO' : entry_handler(pb.cpuinfo_entry), 'AUTOFS': entry_handler(pb.autofs_entry),
} 'FILES': entry_handler(pb.file_entry),
'CPUINFO': entry_handler(pb.cpuinfo_entry),
}
def __rhandler(f): def __rhandler(f):
# Images v1.1 NOTE: First read "first" magic. # Images v1.1 NOTE: First read "first" magic.
img_magic, = struct.unpack('i', f.read(4)) img_magic, = struct.unpack('i', f.read(4))
if img_magic in (magic.by_name['IMG_COMMON'], magic.by_name['IMG_SERVICE']): if img_magic in (magic.by_name['IMG_COMMON'],
img_magic, = struct.unpack('i', f.read(4)) magic.by_name['IMG_SERVICE']):
img_magic, = struct.unpack('i', f.read(4))
try: try:
m = magic.by_val[img_magic] m = magic.by_val[img_magic]
except: except:
raise MagicException(img_magic) raise MagicException(img_magic)
try: try:
handler = handlers[m] handler = handlers[m]
except: except:
raise Exception("No handler found for image with magic " + m) raise Exception("No handler found for image with magic " + m)
return m, handler return m, handler
def load(f, pretty = False, no_payload = False):
""" def load(f, pretty=False, no_payload=False):
"""
Convert criu image from binary format to dict(json). Convert criu image from binary format to dict(json).
Takes a file-like object to read criu image from. Takes a file-like object to read criu image from.
Returns criu image in dict(json) format. Returns criu image in dict(json) format.
""" """
image = {} image = {}
m, handler = __rhandler(f) m, handler = __rhandler(f)
image['magic'] = m image['magic'] = m
image['entries'] = handler.load(f, pretty, no_payload) image['entries'] = handler.load(f, pretty, no_payload)
return image
return image
def info(f): def info(f):
res = {} res = {}
m, handler = __rhandler(f) m, handler = __rhandler(f)
res['magic'] = m res['magic'] = m
res['count'] = handler.count(f) res['count'] = handler.count(f)
return res return res
def loads(s, pretty = False):
""" def loads(s, pretty=False):
"""
Same as load(), but takes a string. Same as load(), but takes a string.
""" """
f = io.BytesIO(s) f = io.BytesIO(s)
return load(f, pretty) return load(f, pretty)
def dump(img, f): def dump(img, f):
""" """
Convert criu image from dict(json) format to binary. Convert criu image from dict(json) format to binary.
Takes an image in dict(json) format and file-like Takes an image in dict(json) format and file-like
object to write to. object to write to.
""" """
m = img['magic'] m = img['magic']
magic_val = magic.by_name[img['magic']] magic_val = magic.by_name[img['magic']]
# Images v1.1 NOTE: use "second" magic to identify what "first" # Images v1.1 NOTE: use "second" magic to identify what "first"
# should be written. # should be written.
if m != 'INVENTORY': if m != 'INVENTORY':
if m in ('STATS', 'IRMAP_CACHE'): if m in ('STATS', 'IRMAP_CACHE'):
f.write(struct.pack('i', magic.by_name['IMG_SERVICE'])) f.write(struct.pack('i', magic.by_name['IMG_SERVICE']))
else: else:
f.write(struct.pack('i', magic.by_name['IMG_COMMON'])) f.write(struct.pack('i', magic.by_name['IMG_COMMON']))
f.write(struct.pack('i', magic_val)) f.write(struct.pack('i', magic_val))
try: try:
handler = handlers[m] handler = handlers[m]
except: except:
raise Exception("No handler found for image with such magic") raise Exception("No handler found for image with such magic")
handler.dump(img['entries'], f)
handler.dump(img['entries'], f)
def dumps(img): def dumps(img):
""" """
Same as dump(), but takes only an image and returns Same as dump(), but takes only an image and returns
a string. a string.
""" """
f = io.BytesIO(b'') f = io.BytesIO(b'')
dump(img, f) dump(img, f)
return f.getvalue() return f.getvalue()

View File

@ -9,8 +9,8 @@ import base64
import quopri import quopri
if "encodebytes" not in dir(base64): if "encodebytes" not in dir(base64):
base64.encodebytes = base64.encodestring base64.encodebytes = base64.encodestring
base64.decodebytes = base64.decodestring base64.decodebytes = base64.decodestring
# pb2dict and dict2pb are methods to convert pb to/from dict. # pb2dict and dict2pb are methods to convert pb to/from dict.
# Inspired by: # Inspired by:
@ -29,350 +29,396 @@ if "encodebytes" not in dir(base64):
# enums to string value too. (i.e. "march : x86_64" is better then # enums to string value too. (i.e. "march : x86_64" is better then
# "march : 1"). # "march : 1").
_basic_cast = { _basic_cast = {
FD.TYPE_FIXED64 : int, FD.TYPE_FIXED64: int,
FD.TYPE_FIXED32 : int, FD.TYPE_FIXED32: int,
FD.TYPE_SFIXED64 : int, FD.TYPE_SFIXED64: int,
FD.TYPE_SFIXED32 : int, FD.TYPE_SFIXED32: int,
FD.TYPE_INT64: int,
FD.TYPE_INT64 : int, FD.TYPE_UINT64: int,
FD.TYPE_UINT64 : int, FD.TYPE_SINT64: int,
FD.TYPE_SINT64 : int, FD.TYPE_INT32: int,
FD.TYPE_UINT32: int,
FD.TYPE_INT32 : int, FD.TYPE_SINT32: int,
FD.TYPE_UINT32 : int, FD.TYPE_BOOL: bool,
FD.TYPE_SINT32 : int, FD.TYPE_STRING: str
FD.TYPE_BOOL : bool,
FD.TYPE_STRING : str
} }
def _marked_as_hex(field): def _marked_as_hex(field):
return field.GetOptions().Extensions[opts_pb2.criu].hex return field.GetOptions().Extensions[opts_pb2.criu].hex
def _marked_as_ip(field): def _marked_as_ip(field):
return field.GetOptions().Extensions[opts_pb2.criu].ipadd return field.GetOptions().Extensions[opts_pb2.criu].ipadd
def _marked_as_flags(field): def _marked_as_flags(field):
return field.GetOptions().Extensions[opts_pb2.criu].flags return field.GetOptions().Extensions[opts_pb2.criu].flags
def _marked_as_dev(field): def _marked_as_dev(field):
return field.GetOptions().Extensions[opts_pb2.criu].dev return field.GetOptions().Extensions[opts_pb2.criu].dev
def _marked_as_odev(field): def _marked_as_odev(field):
return field.GetOptions().Extensions[opts_pb2.criu].odev return field.GetOptions().Extensions[opts_pb2.criu].odev
def _marked_as_dict(field): def _marked_as_dict(field):
return field.GetOptions().Extensions[opts_pb2.criu].dict return field.GetOptions().Extensions[opts_pb2.criu].dict
def _custom_conv(field): def _custom_conv(field):
return field.GetOptions().Extensions[opts_pb2.criu].conv return field.GetOptions().Extensions[opts_pb2.criu].conv
mmap_prot_map = [ mmap_prot_map = [
('PROT_READ', 0x1), ('PROT_READ', 0x1),
('PROT_WRITE', 0x2), ('PROT_WRITE', 0x2),
('PROT_EXEC', 0x4), ('PROT_EXEC', 0x4),
] ]
mmap_flags_map = [ mmap_flags_map = [
('MAP_SHARED', 0x1), ('MAP_SHARED', 0x1),
('MAP_PRIVATE', 0x2), ('MAP_PRIVATE', 0x2),
('MAP_ANON', 0x20), ('MAP_ANON', 0x20),
('MAP_GROWSDOWN', 0x0100), ('MAP_GROWSDOWN', 0x0100),
] ]
mmap_status_map = [ mmap_status_map = [
('VMA_AREA_NONE', 0 << 0), ('VMA_AREA_NONE', 0 << 0),
('VMA_AREA_REGULAR', 1 << 0), ('VMA_AREA_REGULAR', 1 << 0),
('VMA_AREA_STACK', 1 << 1), ('VMA_AREA_STACK', 1 << 1),
('VMA_AREA_VSYSCALL', 1 << 2), ('VMA_AREA_VSYSCALL', 1 << 2),
('VMA_AREA_VDSO', 1 << 3), ('VMA_AREA_VDSO', 1 << 3),
('VMA_AREA_HEAP', 1 << 5), ('VMA_AREA_HEAP', 1 << 5),
('VMA_FILE_PRIVATE', 1 << 6),
('VMA_FILE_PRIVATE', 1 << 6), ('VMA_FILE_SHARED', 1 << 7),
('VMA_FILE_SHARED', 1 << 7), ('VMA_ANON_SHARED', 1 << 8),
('VMA_ANON_SHARED', 1 << 8), ('VMA_ANON_PRIVATE', 1 << 9),
('VMA_ANON_PRIVATE', 1 << 9), ('VMA_AREA_SYSVIPC', 1 << 10),
('VMA_AREA_SOCKET', 1 << 11),
('VMA_AREA_SYSVIPC', 1 << 10), ('VMA_AREA_VVAR', 1 << 12),
('VMA_AREA_SOCKET', 1 << 11), ('VMA_AREA_AIORING', 1 << 13),
('VMA_AREA_VVAR', 1 << 12), ('VMA_UNSUPP', 1 << 31),
('VMA_AREA_AIORING', 1 << 13),
('VMA_UNSUPP', 1 << 31),
] ]
rfile_flags_map = [ rfile_flags_map = [
('O_WRONLY', 0o1), ('O_WRONLY', 0o1),
('O_RDWR', 0o2), ('O_RDWR', 0o2),
('O_APPEND', 0o2000), ('O_APPEND', 0o2000),
('O_DIRECT', 0o40000), ('O_DIRECT', 0o40000),
('O_LARGEFILE', 0o100000), ('O_LARGEFILE', 0o100000),
] ]
pmap_flags_map = [ pmap_flags_map = [
('PE_PARENT', 1 << 0), ('PE_PARENT', 1 << 0),
('PE_LAZY', 1 << 1), ('PE_LAZY', 1 << 1),
('PE_PRESENT', 1 << 2), ('PE_PRESENT', 1 << 2),
] ]
flags_maps = { flags_maps = {
'mmap.prot' : mmap_prot_map, 'mmap.prot': mmap_prot_map,
'mmap.flags' : mmap_flags_map, 'mmap.flags': mmap_flags_map,
'mmap.status' : mmap_status_map, 'mmap.status': mmap_status_map,
'rfile.flags' : rfile_flags_map, 'rfile.flags': rfile_flags_map,
'pmap.flags' : pmap_flags_map, 'pmap.flags': pmap_flags_map,
} }
gen_maps = { gen_maps = {
'task_state' : { 1: 'Alive', 3: 'Zombie', 6: 'Stopped' }, 'task_state': {
1: 'Alive',
3: 'Zombie',
6: 'Stopped'
},
} }
sk_maps = { sk_maps = {
'family' : { 1: 'UNIX', 'family': {
2: 'INET', 1: 'UNIX',
10: 'INET6', 2: 'INET',
16: 'NETLINK', 10: 'INET6',
17: 'PACKET' }, 16: 'NETLINK',
'type' : { 1: 'STREAM', 17: 'PACKET'
2: 'DGRAM', },
3: 'RAW', 'type': {
5: 'SEQPACKET', 1: 'STREAM',
10: 'PACKET' }, 2: 'DGRAM',
'state' : { 1: 'ESTABLISHED', 3: 'RAW',
2: 'SYN_SENT', 5: 'SEQPACKET',
3: 'SYN_RECV', 10: 'PACKET'
4: 'FIN_WAIT1', },
5: 'FIN_WAIT2', 'state': {
6: 'TIME_WAIT', 1: 'ESTABLISHED',
7: 'CLOSE', 2: 'SYN_SENT',
8: 'CLOSE_WAIT', 3: 'SYN_RECV',
9: 'LAST_ACK', 4: 'FIN_WAIT1',
10: 'LISTEN' }, 5: 'FIN_WAIT2',
'proto' : { 0: 'IP', 6: 'TIME_WAIT',
6: 'TCP', 7: 'CLOSE',
17: 'UDP', 8: 'CLOSE_WAIT',
136: 'UDPLITE' }, 9: 'LAST_ACK',
10: 'LISTEN'
},
'proto': {
0: 'IP',
6: 'TCP',
17: 'UDP',
136: 'UDPLITE'
},
} }
gen_rmaps = { k: {v2:k2 for k2,v2 in list(v.items())} for k,v in list(gen_maps.items()) } gen_rmaps = {
sk_rmaps = { k: {v2:k2 for k2,v2 in list(v.items())} for k,v in list(sk_maps.items()) } k: {v2: k2
for k2, v2 in list(v.items())}
for k, v in list(gen_maps.items())
}
sk_rmaps = {
k: {v2: k2
for k2, v2 in list(v.items())}
for k, v in list(sk_maps.items())
}
dict_maps = { dict_maps = {
'gen' : ( gen_maps, gen_rmaps ), 'gen': (gen_maps, gen_rmaps),
'sk' : ( sk_maps, sk_rmaps ), 'sk': (sk_maps, sk_rmaps),
} }
def map_flags(value, flags_map): def map_flags(value, flags_map):
bs = [x[0] for x in [x for x in flags_map if value & x[1]]] bs = [x[0] for x in [x for x in flags_map if value & x[1]]]
value &= ~sum([x[1] for x in flags_map]) value &= ~sum([x[1] for x in flags_map])
if value: if value:
bs.append("0x%x" % value) bs.append("0x%x" % value)
return " | ".join(bs) return " | ".join(bs)
def unmap_flags(value, flags_map): def unmap_flags(value, flags_map):
if value == '': if value == '':
return 0 return 0
bd = dict(flags_map) bd = dict(flags_map)
return sum([int(str(bd.get(x, x)), 0) for x in [x.strip() for x in value.split('|')]]) return sum([
int(str(bd.get(x, x)), 0)
for x in [x.strip() for x in value.split('|')]
])
kern_minorbits = 20 # This is how kernel encodes dev_t in new format
kern_minorbits = 20 # This is how kernel encodes dev_t in new format
def decode_dev(field, value): def decode_dev(field, value):
if _marked_as_odev(field): if _marked_as_odev(field):
return "%d:%d" % (os.major(value), os.minor(value)) return "%d:%d" % (os.major(value), os.minor(value))
else: else:
return "%d:%d" % (value >> kern_minorbits, value & ((1 << kern_minorbits) - 1)) return "%d:%d" % (value >> kern_minorbits,
value & ((1 << kern_minorbits) - 1))
def encode_dev(field, value): def encode_dev(field, value):
dev = [int(x) for x in value.split(':')] dev = [int(x) for x in value.split(':')]
if _marked_as_odev(field): if _marked_as_odev(field):
return os.makedev(dev[0], dev[1]) return os.makedev(dev[0], dev[1])
else: else:
return dev[0] << kern_minorbits | dev[1] return dev[0] << kern_minorbits | dev[1]
def encode_base64(value): def encode_base64(value):
return base64.encodebytes(value) return base64.encodebytes(value)
def decode_base64(value): def decode_base64(value):
return base64.decodebytes(value) return base64.decodebytes(value)
def encode_unix(value): def encode_unix(value):
return quopri.encodestring(value) return quopri.encodestring(value)
def decode_unix(value):
return quopri.decodestring(value)
def decode_unix(value):
return quopri.decodestring(value)
encode = {'unix_name': encode_unix}
decode = {'unix_name': decode_unix}
encode = { 'unix_name': encode_unix }
decode = { 'unix_name': decode_unix }
def get_bytes_enc(field): def get_bytes_enc(field):
c = _custom_conv(field) c = _custom_conv(field)
if c: if c:
return encode[c] return encode[c]
else: else:
return encode_base64 return encode_base64
def get_bytes_dec(field): def get_bytes_dec(field):
c = _custom_conv(field) c = _custom_conv(field)
if c: if c:
return decode[c] return decode[c]
else: else:
return decode_base64 return decode_base64
def is_string(value): def is_string(value):
# Python 3 compatibility # Python 3 compatibility
if "basestring" in __builtins__: if "basestring" in __builtins__:
string_types = basestring # noqa: F821 string_types = basestring # noqa: F821
else: else:
string_types = (str, bytes) string_types = (str, bytes)
return isinstance(value, string_types) return isinstance(value, string_types)
def _pb2dict_cast(field, value, pretty = False, is_hex = False):
if not is_hex:
is_hex = _marked_as_hex(field)
if field.type == FD.TYPE_MESSAGE: def _pb2dict_cast(field, value, pretty=False, is_hex=False):
return pb2dict(value, pretty, is_hex) if not is_hex:
elif field.type == FD.TYPE_BYTES: is_hex = _marked_as_hex(field)
return get_bytes_enc(field)(value)
elif field.type == FD.TYPE_ENUM:
return field.enum_type.values_by_number.get(value, None).name
elif field.type in _basic_cast:
cast = _basic_cast[field.type]
if pretty and (cast == int):
if is_hex:
# Fields that have (criu).hex = true option set
# should be stored in hex string format.
return "0x%x" % value
if _marked_as_dev(field): if field.type == FD.TYPE_MESSAGE:
return decode_dev(field, value) return pb2dict(value, pretty, is_hex)
elif field.type == FD.TYPE_BYTES:
return get_bytes_enc(field)(value)
elif field.type == FD.TYPE_ENUM:
return field.enum_type.values_by_number.get(value, None).name
elif field.type in _basic_cast:
cast = _basic_cast[field.type]
if pretty and (cast == int):
if is_hex:
# Fields that have (criu).hex = true option set
# should be stored in hex string format.
return "0x%x" % value
flags = _marked_as_flags(field) if _marked_as_dev(field):
if flags: return decode_dev(field, value)
try:
flags_map = flags_maps[flags]
except:
return "0x%x" % value # flags are better seen as hex anyway
else:
return map_flags(value, flags_map)
dct = _marked_as_dict(field) flags = _marked_as_flags(field)
if dct: if flags:
return dict_maps[dct][0][field.name].get(value, cast(value)) try:
flags_map = flags_maps[flags]
except Exception:
return "0x%x" % value # flags are better seen as hex anyway
else:
return map_flags(value, flags_map)
return cast(value) dct = _marked_as_dict(field)
else: if dct:
raise Exception("Field(%s) has unsupported type %d" % (field.name, field.type)) return dict_maps[dct][0][field.name].get(value, cast(value))
def pb2dict(pb, pretty = False, is_hex = False): return cast(value)
""" else:
Convert protobuf msg to dictionary. raise Exception("Field(%s) has unsupported type %d" %
Takes a protobuf message and returns a dict. (field.name, field.type))
"""
d = collections.OrderedDict() if pretty else {}
for field, value in pb.ListFields():
if field.label == FD.LABEL_REPEATED:
d_val = []
if pretty and _marked_as_ip(field):
if len(value) == 1:
v = socket.ntohl(value[0])
addr = IPv4Address(v)
else:
v = 0 + (socket.ntohl(value[0]) << (32 * 3)) + \
(socket.ntohl(value[1]) << (32 * 2)) + \
(socket.ntohl(value[2]) << (32 * 1)) + \
(socket.ntohl(value[3]))
addr = IPv6Address(v)
d_val.append(addr.compressed)
else:
for v in value:
d_val.append(_pb2dict_cast(field, v, pretty, is_hex))
else:
d_val = _pb2dict_cast(field, value, pretty, is_hex)
d[field.name] = d_val def pb2dict(pb, pretty=False, is_hex=False):
return d """
Convert protobuf msg to dictionary.
Takes a protobuf message and returns a dict.
"""
d = collections.OrderedDict() if pretty else {}
for field, value in pb.ListFields():
if field.label == FD.LABEL_REPEATED:
d_val = []
if pretty and _marked_as_ip(field):
if len(value) == 1:
v = socket.ntohl(value[0])
addr = IPv4Address(v)
else:
v = 0 + (socket.ntohl(value[0]) << (32 * 3)) + \
(socket.ntohl(value[1]) << (32 * 2)) + \
(socket.ntohl(value[2]) << (32 * 1)) + \
(socket.ntohl(value[3]))
addr = IPv6Address(v)
d_val.append(addr.compressed)
else:
for v in value:
d_val.append(_pb2dict_cast(field, v, pretty, is_hex))
else:
d_val = _pb2dict_cast(field, value, pretty, is_hex)
d[field.name] = d_val
return d
def _dict2pb_cast(field, value): def _dict2pb_cast(field, value):
# Not considering TYPE_MESSAGE here, as repeated # Not considering TYPE_MESSAGE here, as repeated
# and non-repeated messages need special treatment # and non-repeated messages need special treatment
# in this case, and are hadled separately. # in this case, and are hadled separately.
if field.type == FD.TYPE_BYTES: if field.type == FD.TYPE_BYTES:
return get_bytes_dec(field)(value) return get_bytes_dec(field)(value)
elif field.type == FD.TYPE_ENUM: elif field.type == FD.TYPE_ENUM:
return field.enum_type.values_by_name.get(value, None).number return field.enum_type.values_by_name.get(value, None).number
elif field.type in _basic_cast: elif field.type in _basic_cast:
cast = _basic_cast[field.type] cast = _basic_cast[field.type]
if (cast == int) and is_string(value): if (cast == int) and is_string(value):
if _marked_as_dev(field): if _marked_as_dev(field):
return encode_dev(field, value) return encode_dev(field, value)
flags = _marked_as_flags(field) flags = _marked_as_flags(field)
if flags: if flags:
try: try:
flags_map = flags_maps[flags] flags_map = flags_maps[flags]
except: except Exception:
pass # Try to use plain string cast pass # Try to use plain string cast
else: else:
return unmap_flags(value, flags_map) return unmap_flags(value, flags_map)
dct = _marked_as_dict(field) dct = _marked_as_dict(field)
if dct: if dct:
ret = dict_maps[dct][1][field.name].get(value, None) ret = dict_maps[dct][1][field.name].get(value, None)
if ret == None: if ret is None:
ret = cast(value, 0) ret = cast(value, 0)
return ret return ret
# Some int or long fields might be stored as hex
# strings. See _pb2dict_cast.
return cast(value, 0)
else:
return cast(value)
else:
raise Exception("Field(%s) has unsupported type %d" %
(field.name, field.type))
# Some int or long fields might be stored as hex
# strings. See _pb2dict_cast.
return cast(value, 0)
else:
return cast(value)
else:
raise Exception("Field(%s) has unsupported type %d" % (field.name, field.type))
def dict2pb(d, pb): def dict2pb(d, pb):
""" """
Convert dictionary to protobuf msg. Convert dictionary to protobuf msg.
Takes dict and protobuf message to be merged into. Takes dict and protobuf message to be merged into.
""" """
for field in pb.DESCRIPTOR.fields: for field in pb.DESCRIPTOR.fields:
if field.name not in d: if field.name not in d:
continue continue
value = d[field.name] value = d[field.name]
if field.label == FD.LABEL_REPEATED: if field.label == FD.LABEL_REPEATED:
pb_val = getattr(pb, field.name, None) pb_val = getattr(pb, field.name, None)
if is_string(value[0]) and _marked_as_ip(field): if is_string(value[0]) and _marked_as_ip(field):
val = ip_address(value[0]) val = ip_address(value[0])
if val.version == 4: if val.version == 4:
pb_val.append(socket.htonl(int(val))) pb_val.append(socket.htonl(int(val)))
elif val.version == 6: elif val.version == 6:
ival = int(val) ival = int(val)
pb_val.append(socket.htonl((ival >> (32 * 3)) & 0xFFFFFFFF)) pb_val.append(socket.htonl((ival >> (32 * 3)) & 0xFFFFFFFF))
pb_val.append(socket.htonl((ival >> (32 * 2)) & 0xFFFFFFFF)) pb_val.append(socket.htonl((ival >> (32 * 2)) & 0xFFFFFFFF))
pb_val.append(socket.htonl((ival >> (32 * 1)) & 0xFFFFFFFF)) pb_val.append(socket.htonl((ival >> (32 * 1)) & 0xFFFFFFFF))
pb_val.append(socket.htonl((ival >> (32 * 0)) & 0xFFFFFFFF)) pb_val.append(socket.htonl((ival >> (32 * 0)) & 0xFFFFFFFF))
else: else:
raise Exception("Unknown IP address version %d" % val.version) raise Exception("Unknown IP address version %d" %
continue val.version)
continue
for v in value: for v in value:
if field.type == FD.TYPE_MESSAGE: if field.type == FD.TYPE_MESSAGE:
dict2pb(v, pb_val.add()) dict2pb(v, pb_val.add())
else: else:
pb_val.append(_dict2pb_cast(field, v)) pb_val.append(_dict2pb_cast(field, v))
else: else:
if field.type == FD.TYPE_MESSAGE: if field.type == FD.TYPE_MESSAGE:
# SetInParent method acts just like has_* = true in C, # SetInParent method acts just like has_* = true in C,
# and helps to properly treat cases when we have optional # and helps to properly treat cases when we have optional
# field with empty repeated inside. # field with empty repeated inside.
getattr(pb, field.name).SetInParent() getattr(pb, field.name).SetInParent()
dict2pb(value, getattr(pb, field.name, None)) dict2pb(value, getattr(pb, field.name, None))
else: else:
setattr(pb, field.name, _dict2pb_cast(field, value)) setattr(pb, field.name, _dict2pb_cast(field, value))
return pb return pb

View File

@ -1,12 +1,11 @@
from distutils.core import setup from distutils.core import setup
setup(name = "crit", setup(name="crit",
version = "0.0.1", version="0.0.1",
description = "CRiu Image Tool", description="CRiu Image Tool",
author = "CRIU team", author="CRIU team",
author_email = "criu@openvz.org", author_email="criu@openvz.org",
url = "https://github.com/checkpoint-restore/criu", url="https://github.com/checkpoint-restore/criu",
package_dir = {'pycriu': 'lib/py'}, package_dir={'pycriu': 'lib/py'},
packages = ["pycriu", "pycriu.images"], packages=["pycriu", "pycriu.images"],
scripts = ["crit/crit"] scripts=["crit/crit"])
)

View File

@ -1,61 +1,63 @@
#!/bin/env python2 #!/bin/env python2
import sys import sys
# This program parses criu magic.h file and produces # This program parses criu magic.h file and produces
# magic.py with all *_MAGIC constants except RAW and V1. # magic.py with all *_MAGIC constants except RAW and V1.
def main(argv): def main(argv):
if len(argv) != 3: if len(argv) != 3:
print("Usage: magic-gen.py path/to/image.h path/to/magic.py") print("Usage: magic-gen.py path/to/image.h path/to/magic.py")
exit(1) exit(1)
magic_c_header = argv[1] magic_c_header = argv[1]
magic_py = argv[2] magic_py = argv[2]
out = open(magic_py, 'w+') out = open(magic_py, 'w+')
# all_magic is used to parse constructions like: # all_magic is used to parse constructions like:
# #define PAGEMAP_MAGIC 0x56084025 # #define PAGEMAP_MAGIC 0x56084025
# #define SHMEM_PAGEMAP_MAGIC PAGEMAP_MAGIC # #define SHMEM_PAGEMAP_MAGIC PAGEMAP_MAGIC
all_magic = {} all_magic = {}
# and magic is used to store only unique magic. # and magic is used to store only unique magic.
magic = {} magic = {}
f = open(magic_c_header, 'r') f = open(magic_c_header, 'r')
for line in f: for line in f:
split = line.split() split = line.split()
if len(split) < 3: if len(split) < 3:
continue continue
if not '#define' in split[0]: if not '#define' in split[0]:
continue continue
key = split[1] key = split[1]
value = split[2] value = split[2]
if value in all_magic: if value in all_magic:
value = all_magic[value] value = all_magic[value]
else: else:
magic[key] = value magic[key] = value
all_magic[key] = value all_magic[key] = value
out.write('#Autogenerated. Do not edit!\n')
out.write('by_name = {}\n')
out.write('by_val = {}\n')
for k, v in list(magic.items()):
# We don't need RAW or V1 magic, because
# they can't be used to identify images.
if v == '0x0' or v == '1' or k == '0x0' or v == '1':
continue
if k.endswith("_MAGIC"):
# Just cutting _MAGIC suffix
k = k[:-6]
v = int(v, 16)
out.write("by_name['" + k + "'] = " + str(v) + "\n")
out.write("by_val[" + str(v) + "] = '" + k + "'\n")
f.close()
out.close()
out.write('#Autogenerated. Do not edit!\n')
out.write('by_name = {}\n')
out.write('by_val = {}\n')
for k,v in list(magic.items()):
# We don't need RAW or V1 magic, because
# they can't be used to identify images.
if v == '0x0' or v == '1' or k == '0x0' or v == '1':
continue
if k.endswith("_MAGIC"):
# Just cutting _MAGIC suffix
k = k[:-6]
v = int(v, 16)
out.write("by_name['"+ k +"'] = "+ str(v) +"\n")
out.write("by_val["+ str(v) +"] = '"+ k +"'\n")
f.close()
out.close()
if __name__ == "__main__": if __name__ == "__main__":
main(sys.argv) main(sys.argv)

View File

@ -13,17 +13,17 @@ sport = os.getenv("TCP_SPORT", "12345")
dport = os.getenv("TCP_DPORT", "54321") dport = os.getenv("TCP_DPORT", "54321")
print(sys.argv[1]) print(sys.argv[1])
args = [sys.argv[1], args = [
"--addr", src, "--port", sport, "--seq", "555", sys.argv[1], "--addr", src, "--port", sport, "--seq", "555", "--next",
"--next", "--addr", dst, "--port", dport, "--seq", "666", "--reverse", "--",
"--addr", dst, "--port", dport, "--seq", "666", "./tcp-test.py"
"--reverse", "--", "./tcp-test.py"] ]
p1 = Popen(args + ["dst"], stdout = PIPE, stdin = PIPE) p1 = Popen(args + ["dst"], stdout=PIPE, stdin=PIPE)
args.remove("--reverse"); args.remove("--reverse")
p2 = Popen(args + ["src"], stdout = PIPE, stdin = PIPE) p2 = Popen(args + ["src"], stdout=PIPE, stdin=PIPE)
p1.stdout.read(5) p1.stdout.read(5)
p2.stdout.read(5) p2.stdout.read(5)
@ -42,7 +42,7 @@ str2 = m.hexdigest()
if str2 != eval(s): if str2 != eval(s):
print("FAIL", repr(str2), repr(s)) print("FAIL", repr(str2), repr(s))
sys.exit(5); sys.exit(5)
s = p1.stdout.read() s = p1.stdout.read()
m = hashlib.md5() m = hashlib.md5()
@ -52,7 +52,7 @@ str1 = m.hexdigest()
s = p2.stdout.read() s = p2.stdout.read()
if str1 != eval(s): if str1 != eval(s):
print("FAIL", repr(str1), s) print("FAIL", repr(str1), s)
sys.exit(5); sys.exit(5)
if p1.wait(): if p1.wait():
sys.exit(1) sys.exit(1)

View File

@ -4,37 +4,38 @@ import sys
import os import os
actions = set(['pre-dump', 'pre-restore', 'post-dump', 'setup-namespaces', \ actions = set(['pre-dump', 'pre-restore', 'post-dump', 'setup-namespaces', \
'post-setup-namespaces', 'post-restore', 'post-resume', \ 'post-setup-namespaces', 'post-restore', 'post-resume', \
'network-lock', 'network-unlock' ]) 'network-lock', 'network-unlock' ])
errors = [] errors = []
af = os.path.dirname(os.path.abspath(__file__)) + '/actions_called.txt' af = os.path.dirname(os.path.abspath(__file__)) + '/actions_called.txt'
for act in open(af): for act in open(af):
act = act.strip().split() act = act.strip().split()
act.append('EMPTY') act.append('EMPTY')
act.append('EMPTY') act.append('EMPTY')
if act[0] == 'EMPTY': if act[0] == 'EMPTY':
raise Exception("Error in test, bogus actions line") raise Exception("Error in test, bogus actions line")
if act[1] == 'EMPTY': if act[1] == 'EMPTY':
errors.append('Action %s misses CRTOOLS_IMAGE_DIR' % act[0]) errors.append('Action %s misses CRTOOLS_IMAGE_DIR' % act[0])
if act[0] in ('post-dump', 'setup-namespaces', 'post-setup-namespaces', \ if act[0] in ('post-dump', 'setup-namespaces', 'post-setup-namespaces', \
'post-restore', 'post-resume', 'network-lock', 'network-unlock'): 'post-restore', 'post-resume', 'network-lock', 'network-unlock'):
if act[2] == 'EMPTY': if act[2] == 'EMPTY':
errors.append('Action %s misses CRTOOLS_INIT_PID' % act[0]) errors.append('Action %s misses CRTOOLS_INIT_PID' % act[0])
elif not act[2].isdigit() or int(act[2]) == 0: elif not act[2].isdigit() or int(act[2]) == 0:
errors.append('Action %s PID is not number (%s)' % (act[0], act[2])) errors.append('Action %s PID is not number (%s)' %
(act[0], act[2]))
actions -= set([act[0]]) actions -= set([act[0]])
if actions: if actions:
errors.append('Not all actions called: %r' % actions) errors.append('Not all actions called: %r' % actions)
if errors: if errors:
for x in errors: for x in errors:
print(x) print(x)
sys.exit(1) sys.exit(1)
print('PASS') print('PASS')

View File

@ -6,70 +6,72 @@ import sys
import os import os
import subprocess import subprocess
find = subprocess.Popen(['find', 'test/dump/', '-size', '+0', '-name', '*.img'], find = subprocess.Popen(
stdout = subprocess.PIPE) ['find', 'test/dump/', '-size', '+0', '-name', '*.img'],
stdout=subprocess.PIPE)
test_pass = True test_pass = True
def recode_and_check(imgf, o_img, pretty): def recode_and_check(imgf, o_img, pretty):
try: try:
pb = pycriu.images.loads(o_img, pretty) pb = pycriu.images.loads(o_img, pretty)
except pycriu.images.MagicException as me: except pycriu.images.MagicException as me:
print("%s magic %x error" % (imgf, me.magic)) print("%s magic %x error" % (imgf, me.magic))
return False return False
except Exception as e: except Exception as e:
print("%s %sdecode fails: %s" % (imgf, pretty and 'pretty ' or '', e)) print("%s %sdecode fails: %s" % (imgf, pretty and 'pretty ' or '', e))
return False return False
try: try:
r_img = pycriu.images.dumps(pb) r_img = pycriu.images.dumps(pb)
except Exception as e: except Exception as e:
r_img = pycriu.images.dumps(pb) r_img = pycriu.images.dumps(pb)
print("%s %s encode fails: %s" % (imgf, pretty and 'pretty ' or '', e)) print("%s %s encode fails: %s" % (imgf, pretty and 'pretty ' or '', e))
return False return False
if o_img != r_img: if o_img != r_img:
print("%s %s recode mismatch" % (imgf, pretty and 'pretty ' or '')) print("%s %s recode mismatch" % (imgf, pretty and 'pretty ' or ''))
return False return False
return True return True
for imgf in find.stdout.readlines(): for imgf in find.stdout.readlines():
imgf = imgf.strip() imgf = imgf.strip()
imgf_b = os.path.basename(imgf) imgf_b = os.path.basename(imgf)
if imgf_b.startswith(b'pages-'): if imgf_b.startswith(b'pages-'):
continue continue
if imgf_b.startswith(b'iptables-'): if imgf_b.startswith(b'iptables-'):
continue continue
if imgf_b.startswith(b'ip6tables-'): if imgf_b.startswith(b'ip6tables-'):
continue continue
if imgf_b.startswith(b'route-'): if imgf_b.startswith(b'route-'):
continue continue
if imgf_b.startswith(b'route6-'): if imgf_b.startswith(b'route6-'):
continue continue
if imgf_b.startswith(b'ifaddr-'): if imgf_b.startswith(b'ifaddr-'):
continue continue
if imgf_b.startswith(b'tmpfs-'): if imgf_b.startswith(b'tmpfs-'):
continue continue
if imgf_b.startswith(b'netns-ct-'): if imgf_b.startswith(b'netns-ct-'):
continue continue
if imgf_b.startswith(b'netns-exp-'): if imgf_b.startswith(b'netns-exp-'):
continue continue
if imgf_b.startswith(b'rule-'): if imgf_b.startswith(b'rule-'):
continue continue
o_img = open(imgf.decode(), "rb").read() o_img = open(imgf.decode(), "rb").read()
if not recode_and_check(imgf, o_img, False): if not recode_and_check(imgf, o_img, False):
test_pass = False test_pass = False
if not recode_and_check(imgf, o_img, True): if not recode_and_check(imgf, o_img, True):
test_pass = False test_pass = False
find.wait() find.wait()
if not test_pass: if not test_pass:
print("FAIL") print("FAIL")
sys.exit(1) sys.exit(1)
print("PASS") print("PASS")

View File

@ -8,125 +8,127 @@ import time
import sys import sys
import subprocess import subprocess
criu_bin='../../criu/criu' criu_bin = '../../criu/criu'
def mix(nr_tasks, nr_pipes): def mix(nr_tasks, nr_pipes):
# Returned is the list of combinations. # Returned is the list of combinations.
# Each combination is the lists of pipe descriptors. # Each combination is the lists of pipe descriptors.
# Each pipe descriptor is a 2-elemtn tuple, that contains values # Each pipe descriptor is a 2-elemtn tuple, that contains values
# for R and W ends of pipes, each being a bit-field denoting in # for R and W ends of pipes, each being a bit-field denoting in
# which tasks the respective end should be opened or not. # which tasks the respective end should be opened or not.
# First -- make a full set of combinations for a single pipe. # First -- make a full set of combinations for a single pipe.
max_idx = 1 << nr_tasks max_idx = 1 << nr_tasks
pipe_mix = [[(r, w)] for r in range(0, max_idx) for w in range(0, max_idx)] pipe_mix = [[(r, w)] for r in range(0, max_idx) for w in range(0, max_idx)]
# Now, for every pipe throw another one into the game making # Now, for every pipe throw another one into the game making
# all possible combinations of what was seen before with the # all possible combinations of what was seen before with the
# newbie. # newbie.
pipes_mix = pipe_mix pipes_mix = pipe_mix
for t in range(1, nr_pipes): for t in range(1, nr_pipes):
pipes_mix = [ o + n for o in pipes_mix for n in pipe_mix ] pipes_mix = [o + n for o in pipes_mix for n in pipe_mix]
return pipes_mix return pipes_mix
# Called by a test sub-process. It just closes the not needed ends # Called by a test sub-process. It just closes the not needed ends
# of pipes and sleeps waiting for death. # of pipes and sleeps waiting for death.
def make_pipes(task_nr, nr_pipes, pipes, comb, status_pipe): def make_pipes(task_nr, nr_pipes, pipes, comb, status_pipe):
print('\t\tMake pipes for %d' % task_nr) print('\t\tMake pipes for %d' % task_nr)
# We need to make sure that pipes have their # We need to make sure that pipes have their
# ends according to comb for task_nr # ends according to comb for task_nr
for i in range(0, nr_pipes): for i in range(0, nr_pipes):
# Read end # Read end
if not (comb[i][0] & (1 << task_nr)): if not (comb[i][0] & (1 << task_nr)):
os.close(pipes[i][0]) os.close(pipes[i][0])
# Write end # Write end
if not (comb[i][1] & (1 << task_nr)): if not (comb[i][1] & (1 << task_nr)):
os.close(pipes[i][1]) os.close(pipes[i][1])
os.write(status_pipe, '0') os.write(status_pipe, '0')
os.close(status_pipe) os.close(status_pipe)
while True: while True:
time.sleep(100) time.sleep(100)
def get_pipe_ino(pid, fd): def get_pipe_ino(pid, fd):
try: try:
return os.stat('/proc/%d/fd/%d' % (pid, fd)).st_ino return os.stat('/proc/%d/fd/%d' % (pid, fd)).st_ino
except: except:
return None return None
def get_pipe_rw(pid, fd): def get_pipe_rw(pid, fd):
for l in open('/proc/%d/fdinfo/%d' % (pid, fd)): for l in open('/proc/%d/fdinfo/%d' % (pid, fd)):
if l.startswith('flags:'): if l.startswith('flags:'):
f = l.split(None, 1)[1][-2] f = l.split(None, 1)[1][-2]
if f == '0': if f == '0':
return 0 # Read return 0 # Read
elif f == '1': elif f == '1':
return 1 # Write return 1 # Write
break break
raise Exception('Unexpected fdinfo contents') raise Exception('Unexpected fdinfo contents')
def check_pipe_y(pid, fd, rw, inos): def check_pipe_y(pid, fd, rw, inos):
ino = get_pipe_ino(pid, fd) ino = get_pipe_ino(pid, fd)
if ino == None: if ino == None:
return 'missing ' return 'missing '
if not inos.has_key(fd): if not inos.has_key(fd):
inos[fd] = ino inos[fd] = ino
elif inos[fd] != ino: elif inos[fd] != ino:
return 'wrong ' return 'wrong '
mod = get_pipe_rw(pid, fd) mod = get_pipe_rw(pid, fd)
if mod != rw: if mod != rw:
return 'badmode ' return 'badmode '
return None return None
def check_pipe_n(pid, fd): def check_pipe_n(pid, fd):
ino = get_pipe_ino(pid, fd) ino = get_pipe_ino(pid, fd)
if ino == None: if ino == None:
return None return None
else: else:
return 'present ' return 'present '
def check_pipe_end(kids, fd, comb, rw, inos): def check_pipe_end(kids, fd, comb, rw, inos):
t_nr = 0 t_nr = 0
for t_pid in kids: for t_pid in kids:
if comb & (1 << t_nr): if comb & (1 << t_nr):
res = check_pipe_y(t_pid, fd, rw, inos) res = check_pipe_y(t_pid, fd, rw, inos)
else: else:
res = check_pipe_n(t_pid, fd) res = check_pipe_n(t_pid, fd)
if res != None: if res != None:
return res + 'kid(%d)' % t_nr return res + 'kid(%d)' % t_nr
t_nr += 1 t_nr += 1
return None return None
def check_pipe(kids, fds, comb, inos): def check_pipe(kids, fds, comb, inos):
for e in (0, 1): # 0 == R, 1 == W, see get_pipe_rw() for e in (0, 1): # 0 == R, 1 == W, see get_pipe_rw()
res = check_pipe_end(kids, fds[e], comb[e], e, inos) res = check_pipe_end(kids, fds[e], comb[e], e, inos)
if res != None: if res != None:
return res + 'end(%d)' % e return res + 'end(%d)' % e
return None return None
def check_pipes(kids, pipes, comb): def check_pipes(kids, pipes, comb):
# Kids contain pids # Kids contain pids
# Pipes contain pipe FDs # Pipes contain pipe FDs
# Comb contain list of pairs of bits for RW ends # Comb contain list of pairs of bits for RW ends
p_nr = 0 p_nr = 0
p_inos = {} p_inos = {}
for p_fds in pipes: for p_fds in pipes:
res = check_pipe(kids, p_fds, comb[p_nr], p_inos) res = check_pipe(kids, p_fds, comb[p_nr], p_inos)
if res != None: if res != None:
return res + 'pipe(%d)' % p_nr return res + 'pipe(%d)' % p_nr
p_nr += 1 p_nr += 1
return None return None
# Run by test main process. It opens pipes, then forks kids that # Run by test main process. It opens pipes, then forks kids that
@ -134,128 +136,134 @@ def check_pipes(kids, pipes, comb):
# and waits for a signal (unix socket message) to start checking # and waits for a signal (unix socket message) to start checking
# the kids' FD tables. # the kids' FD tables.
def make_comb(comb, opts, status_pipe): def make_comb(comb, opts, status_pipe):
print('\tMake pipes') print('\tMake pipes')
# 1st -- make needed pipes # 1st -- make needed pipes
pipes = [] pipes = []
for p in range(0, opts.pipes): for p in range(0, opts.pipes):
pipes.append(os.pipe()) pipes.append(os.pipe())
# Fork the kids that'll make pipes # Fork the kids that'll make pipes
kc_pipe = os.pipe() kc_pipe = os.pipe()
kids = [] kids = []
for t in range(0, opts.tasks): for t in range(0, opts.tasks):
pid = os.fork() pid = os.fork()
if pid == 0: if pid == 0:
os.close(status_pipe) os.close(status_pipe)
os.close(kc_pipe[0]) os.close(kc_pipe[0])
make_pipes(t, opts.pipes, pipes, comb, kc_pipe[1]) make_pipes(t, opts.pipes, pipes, comb, kc_pipe[1])
sys.exit(1) sys.exit(1)
kids.append(pid) kids.append(pid)
os.close(kc_pipe[1]) os.close(kc_pipe[1])
for p in pipes: for p in pipes:
os.close(p[0]) os.close(p[0])
os.close(p[1]) os.close(p[1])
# Wait for kids to get ready # Wait for kids to get ready
k_res = '' k_res = ''
while True: while True:
v = os.read(kc_pipe[0], 16) v = os.read(kc_pipe[0], 16)
if v == '': if v == '':
break break
k_res += v k_res += v
os.close(kc_pipe[0]) os.close(kc_pipe[0])
ex_code = 1 ex_code = 1
if k_res == '0' * opts.tasks: if k_res == '0' * opts.tasks:
print('\tWait for C/R') print('\tWait for C/R')
cmd_sk = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM, 0) cmd_sk = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM, 0)
cmd_sk.bind('\0CRIUPCSK') cmd_sk.bind('\0CRIUPCSK')
# Kids are ready, so is socket for kicking us. Notify the # Kids are ready, so is socket for kicking us. Notify the
# parent task that we are good to go. # parent task that we are good to go.
os.write(status_pipe, '0') os.write(status_pipe, '0')
os.close(status_pipe) os.close(status_pipe)
v = cmd_sk.recv(16) v = cmd_sk.recv(16)
if v == '0': if v == '0':
print('\tCheck pipes') print('\tCheck pipes')
res = check_pipes(kids, pipes, comb) res = check_pipes(kids, pipes, comb)
if res == None: if res == None:
ex_code = 0 ex_code = 0
else: else:
print('\tFAIL %s' % res) print('\tFAIL %s' % res)
# Just kill kids, all checks are done by us, we don't need'em any more # Just kill kids, all checks are done by us, we don't need'em any more
for t in kids: for t in kids:
os.kill(t, signal.SIGKILL) os.kill(t, signal.SIGKILL)
os.waitpid(t, 0) os.waitpid(t, 0)
return ex_code return ex_code
def cr_test(pid): def cr_test(pid):
print('C/R test') print('C/R test')
img_dir = 'pimg_%d' % pid img_dir = 'pimg_%d' % pid
try: try:
os.mkdir(img_dir) os.mkdir(img_dir)
subprocess.check_call([criu_bin, 'dump', '-t', '%d' % pid, '-D', img_dir, '-o', 'dump.log', '-v4', '-j']) subprocess.check_call([
except: criu_bin, 'dump', '-t',
print('`- dump fail') '%d' % pid, '-D', img_dir, '-o', 'dump.log', '-v4', '-j'
return False ])
except:
print('`- dump fail')
return False
try: try:
os.waitpid(pid, 0) os.waitpid(pid, 0)
subprocess.check_call([criu_bin, 'restore', '-D', img_dir, '-o', 'rst.log', '-v4', '-j', '-d', '-S']) subprocess.check_call([
except: criu_bin, 'restore', '-D', img_dir, '-o', 'rst.log', '-v4', '-j',
print('`- restore fail') '-d', '-S'
return False ])
except:
print('`- restore fail')
return False
return True return True
def run(comb, opts): def run(comb, opts):
print('Checking %r' % comb) print('Checking %r' % comb)
cpipe = os.pipe() cpipe = os.pipe()
pid = os.fork() pid = os.fork()
if pid == 0: if pid == 0:
os.close(cpipe[0]) os.close(cpipe[0])
ret = make_comb(comb, opts, cpipe[1]) ret = make_comb(comb, opts, cpipe[1])
sys.exit(ret) sys.exit(ret)
# Wait for the main process to get ready # Wait for the main process to get ready
os.close(cpipe[1]) os.close(cpipe[1])
res = os.read(cpipe[0], 16) res = os.read(cpipe[0], 16)
os.close(cpipe[0]) os.close(cpipe[0])
if res == '0': if res == '0':
res = cr_test(pid) res = cr_test(pid)
print('Wake up test') print('Wake up test')
s = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM, 0) s = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM, 0)
if res: if res:
res = '0' res = '0'
else: else:
res = 'X' res = 'X'
try: try:
# Kick the test to check its state # Kick the test to check its state
s.sendto(res, '\0CRIUPCSK') s.sendto(res, '\0CRIUPCSK')
except: except:
# Restore might have failed or smth else happened # Restore might have failed or smth else happened
os.kill(pid, signal.SIGKILL) os.kill(pid, signal.SIGKILL)
s.close() s.close()
# Wait for the guy to exit and get the result (PASS/FAIL) # Wait for the guy to exit and get the result (PASS/FAIL)
p, st = os.waitpid(pid, 0) p, st = os.waitpid(pid, 0)
if os.WIFEXITED(st): if os.WIFEXITED(st):
st = os.WEXITSTATUS(st) st = os.WEXITSTATUS(st)
print('Done (%d, pid == %d)' % (st, pid)) print('Done (%d, pid == %d)' % (st, pid))
return st == 0 return st == 0
p = argparse.ArgumentParser("CRIU test suite") p = argparse.ArgumentParser("CRIU test suite")
p.add_argument("--tasks", help = "Number of tasks", default = '2') p.add_argument("--tasks", help="Number of tasks", default='2')
p.add_argument("--pipes", help = "Number of pipes", default = '2') p.add_argument("--pipes", help="Number of pipes", default='2')
opts = p.parse_args() opts = p.parse_args()
opts.tasks = int(opts.tasks) opts.tasks = int(opts.tasks)
opts.pipes = int(opts.pipes) opts.pipes = int(opts.pipes)
@ -263,8 +271,8 @@ opts.pipes = int(opts.pipes)
pipe_combs = mix(opts.tasks, opts.pipes) pipe_combs = mix(opts.tasks, opts.pipes)
for comb in pipe_combs: for comb in pipe_combs:
if not run(comb, opts): if not run(comb, opts):
print('FAIL') print('FAIL')
break break
else: else:
print('PASS') print('PASS')

File diff suppressed because it is too large Load Diff

View File

@ -5,35 +5,35 @@ id_str = ""
def create_fds(): def create_fds():
tdir = tempfile.mkdtemp("zdtm.inhfd.XXXXXX") tdir = tempfile.mkdtemp("zdtm.inhfd.XXXXXX")
if os.system("mount -t tmpfs zdtm.inhfd %s" % tdir) != 0: if os.system("mount -t tmpfs zdtm.inhfd %s" % tdir) != 0:
raise Exception("Unable to mount tmpfs") raise Exception("Unable to mount tmpfs")
tfifo = os.path.join(tdir, "test_fifo") tfifo = os.path.join(tdir, "test_fifo")
os.mkfifo(tfifo) os.mkfifo(tfifo)
fd2 = open(tfifo, "w+b", buffering=0) fd2 = open(tfifo, "w+b", buffering=0)
fd1 = open(tfifo, "rb") fd1 = open(tfifo, "rb")
os.system("umount -l %s" % tdir) os.system("umount -l %s" % tdir)
os.rmdir(tdir) os.rmdir(tdir)
mnt_id = -1 mnt_id = -1
with open("/proc/self/fdinfo/%d" % fd1.fileno()) as f: with open("/proc/self/fdinfo/%d" % fd1.fileno()) as f:
for line in f: for line in f:
line = line.split() line = line.split()
if line[0] == "mnt_id:": if line[0] == "mnt_id:":
mnt_id = int(line[1]) mnt_id = int(line[1])
break break
else: else:
raise Exception("Unable to find mnt_id") raise Exception("Unable to find mnt_id")
global id_str global id_str
id_str = "file[%x:%x]" % (mnt_id, os.fstat(fd1.fileno()).st_ino) id_str = "file[%x:%x]" % (mnt_id, os.fstat(fd1.fileno()).st_ino)
return [(fd2, fd1)] return [(fd2, fd1)]
def filename(pipef): def filename(pipef):
return id_str return id_str
def dump_opts(sockf): def dump_opts(sockf):
return ["--external", id_str] return ["--external", id_str]

View File

@ -2,16 +2,16 @@ import os
def create_fds(): def create_fds():
pipes = [] pipes = []
for i in range(10): for i in range(10):
(fd1, fd2) = os.pipe() (fd1, fd2) = os.pipe()
pipes.append((os.fdopen(fd2, "wb"), os.fdopen(fd1, "rb"))) pipes.append((os.fdopen(fd2, "wb"), os.fdopen(fd1, "rb")))
return pipes return pipes
def filename(pipef): def filename(pipef):
return 'pipe:[%d]' % os.fstat(pipef.fileno()).st_ino return 'pipe:[%d]' % os.fstat(pipef.fileno()).st_ino
def dump_opts(sockf): def dump_opts(sockf):
return [] return []

View File

@ -3,19 +3,19 @@ import os
def create_fds(): def create_fds():
(sk1, sk2) = socket.socketpair(socket.AF_UNIX, socket.SOCK_STREAM) (sk1, sk2) = socket.socketpair(socket.AF_UNIX, socket.SOCK_STREAM)
(sk3, sk4) = socket.socketpair(socket.AF_UNIX, socket.SOCK_STREAM) (sk3, sk4) = socket.socketpair(socket.AF_UNIX, socket.SOCK_STREAM)
return [(sk1.makefile("wb"), sk2.makefile("rb")), return [(sk1.makefile("wb"), sk2.makefile("rb")),
(sk3.makefile("wb"), sk4.makefile("rb"))] (sk3.makefile("wb"), sk4.makefile("rb"))]
def __sock_ino(sockf): def __sock_ino(sockf):
return os.fstat(sockf.fileno()).st_ino return os.fstat(sockf.fileno()).st_ino
def filename(sockf): def filename(sockf):
return 'socket:[%d]' % __sock_ino(sockf) return 'socket:[%d]' % __sock_ino(sockf)
def dump_opts(sockf): def dump_opts(sockf):
return ['--external', 'unix[%d]' % __sock_ino(sockf)] return ['--external', 'unix[%d]' % __sock_ino(sockf)]

View File

@ -4,34 +4,33 @@ import os
import pty import pty
import termios import termios
ctl = False ctl = False
def child_prep(fd): def child_prep(fd):
global ctl global ctl
if ctl: if ctl:
return return
ctl = True ctl = True
fcntl.ioctl(fd.fileno(), termios.TIOCSCTTY, 1) fcntl.ioctl(fd.fileno(), termios.TIOCSCTTY, 1)
def create_fds(): def create_fds():
ttys = [] ttys = []
for i in range(10): for i in range(10):
(fd1, fd2) = pty.openpty() (fd1, fd2) = pty.openpty()
newattr = termios.tcgetattr(fd1) newattr = termios.tcgetattr(fd1)
newattr[3] &= ~termios.ICANON & ~termios.ECHO newattr[3] &= ~termios.ICANON & ~termios.ECHO
termios.tcsetattr(fd1, termios.TCSADRAIN, newattr) termios.tcsetattr(fd1, termios.TCSADRAIN, newattr)
ttys.append((os.fdopen(fd1, "wb"), os.fdopen(fd2, "rb"))) ttys.append((os.fdopen(fd1, "wb"), os.fdopen(fd2, "rb")))
return ttys return ttys
def filename(pipef): def filename(pipef):
st = os.fstat(pipef.fileno()) st = os.fstat(pipef.fileno())
return 'tty[%x:%x]' % (st.st_rdev, st.st_dev) return 'tty[%x:%x]' % (st.st_rdev, st.st_dev)
def dump_opts(sockf): def dump_opts(sockf):
st = os.fstat(sockf.fileno()) st = os.fstat(sockf.fileno())
return "--external", 'tty[%x:%x]' % (st.st_rdev, st.st_dev) return "--external", 'tty[%x:%x]' % (st.st_rdev, st.st_dev)

View File

@ -5,32 +5,41 @@ import os, sys, time, signal, pty
master, slave = pty.openpty() master, slave = pty.openpty()
p = subprocess.Popen(["setsid", "--ctty", "sleep", "10000"], p = subprocess.Popen(["setsid", "--ctty", "sleep", "10000"],
stdin = slave, stdout = slave, stderr = slave, close_fds = True) stdin=slave,
stdout=slave,
stderr=slave,
close_fds=True)
st = os.stat("/proc/self/fd/%d" % slave) st = os.stat("/proc/self/fd/%d" % slave)
ttyid = "tty[%x:%x]" % (st.st_rdev, st.st_dev) ttyid = "tty[%x:%x]" % (st.st_rdev, st.st_dev)
os.close(slave) os.close(slave)
time.sleep(1) time.sleep(1)
ret = subprocess.Popen(["../../../criu/criu", "dump", "-t", str(p.pid), "-v4", "--external", ttyid]).wait() ret = subprocess.Popen([
"../../../criu/criu", "dump", "-t",
str(p.pid), "-v4", "--external", ttyid
]).wait()
if ret: if ret:
sys.exit(ret) sys.exit(ret)
p.wait() p.wait()
new_master, slave = pty.openpty() # get another pty pair new_master, slave = pty.openpty() # get another pty pair
os.close(master) os.close(master)
ttyid = "fd[%d]:tty[%x:%x]" % (slave, st.st_rdev, st.st_dev) ttyid = "fd[%d]:tty[%x:%x]" % (slave, st.st_rdev, st.st_dev)
ret = subprocess.Popen(["../../../criu/criu", "restore", "-v4", "--inherit-fd", ttyid, "--restore-sibling", "--restore-detach"]).wait() ret = subprocess.Popen([
"../../../criu/criu", "restore", "-v4", "--inherit-fd", ttyid,
"--restore-sibling", "--restore-detach"
]).wait()
if ret: if ret:
sys.exit(ret) sys.exit(ret)
os.close(slave) os.close(slave)
os.waitpid(-1, os.WNOHANG) # is the process alive os.waitpid(-1, os.WNOHANG) # is the process alive
os.close(new_master) os.close(new_master)
_, status = os.wait() _, status = os.wait()
if not os.WIFSIGNALED(status) or os.WTERMSIG(status) != signal.SIGHUP: if not os.WIFSIGNALED(status) or os.WTERMSIG(status) != signal.SIGHUP:
print(status) print(status)
sys.exit(1) sys.exit(1)
print("PASS") print("PASS")

View File

@ -1,31 +1,36 @@
import os import os
import tempfile, random import tempfile, random
def mount(src, dst, shared, private, slave): def mount(src, dst, shared, private, slave):
cmd = "mount" cmd = "mount"
if shared: if shared:
cmd += " --make-shared" cmd += " --make-shared"
if private: if private:
cmd += " --make-private" cmd += " --make-private"
if slave: if slave:
cmd += " --make-slave" cmd += " --make-slave"
if src: if src:
cmd += " --bind '%s' '%s'" % (src, dst) cmd += " --bind '%s' '%s'" % (src, dst)
else: else:
cmd += " -t tmpfs none '%s'" % (dst) cmd += " -t tmpfs none '%s'" % (dst)
print(cmd) print(cmd)
ret = os.system(cmd) ret = os.system(cmd)
if ret: if ret:
print("failed") print("failed")
root = tempfile.mkdtemp(prefix = "root.mount", dir = "/tmp")
root = tempfile.mkdtemp(prefix="root.mount", dir="/tmp")
mount(None, root, 1, 0, 0) mount(None, root, 1, 0, 0)
mounts = [root] mounts = [root]
for i in range(10): for i in range(10):
dstdir = random.choice(mounts) dstdir = random.choice(mounts)
dst = tempfile.mkdtemp(prefix = "mount", dir = dstdir) dst = tempfile.mkdtemp(prefix="mount", dir=dstdir)
src = random.choice(mounts + [None]) src = random.choice(mounts + [None])
mount(src, dst, random.randint(0,100) > 50, random.randint(0,100) > 90, random.randint(0,100) > 50) mount(src, dst,
mounts.append(dst) random.randint(0, 100) > 50,
random.randint(0, 100) > 90,
random.randint(0, 100) > 50)
mounts.append(dst)

View File

@ -14,169 +14,174 @@ does_not_exist = 'does-not.exist'
def setup_swrk(): def setup_swrk():
print('Connecting to CRIU in swrk mode.') print('Connecting to CRIU in swrk mode.')
css = socket.socketpair(socket.AF_UNIX, socket.SOCK_SEQPACKET) css = socket.socketpair(socket.AF_UNIX, socket.SOCK_SEQPACKET)
swrk = subprocess.Popen(['./criu', "swrk", "%d" % css[0].fileno()]) swrk = subprocess.Popen(['./criu', "swrk", "%d" % css[0].fileno()])
css[0].close() css[0].close()
return swrk, css[1] return swrk, css[1]
def setup_config_file(content): def setup_config_file(content):
# Creating a temporary file which will be used as configuration file. # Creating a temporary file which will be used as configuration file.
fd, path = mkstemp() fd, path = mkstemp()
with os.fdopen(fd, 'w') as f: with os.fdopen(fd, 'w') as f:
f.write(content) f.write(content)
os.environ['CRIU_CONFIG_FILE'] = path os.environ['CRIU_CONFIG_FILE'] = path
return path return path
def cleanup_config_file(path): def cleanup_config_file(path):
if os.environ.get('CRIU_CONFIG_FILE', None) is not None: if os.environ.get('CRIU_CONFIG_FILE', None) is not None:
del os.environ['CRIU_CONFIG_FILE'] del os.environ['CRIU_CONFIG_FILE']
os.unlink(path) os.unlink(path)
def cleanup_output(path): def cleanup_output(path):
for f in (does_not_exist, log_file): for f in (does_not_exist, log_file):
f = os.path.join(path, f) f = os.path.join(path, f)
if os.access(f, os.F_OK): if os.access(f, os.F_OK):
os.unlink(f) os.unlink(f)
def setup_criu_dump_request(): def setup_criu_dump_request():
# Create criu msg, set it's type to dump request # Create criu msg, set it's type to dump request
# and set dump options. Checkout more options in protobuf/rpc.proto # and set dump options. Checkout more options in protobuf/rpc.proto
req = rpc.criu_req() req = rpc.criu_req()
req.type = rpc.DUMP req.type = rpc.DUMP
req.opts.leave_running = True req.opts.leave_running = True
req.opts.log_level = 4 req.opts.log_level = 4
req.opts.log_file = log_file req.opts.log_file = log_file
req.opts.images_dir_fd = os.open(args['dir'], os.O_DIRECTORY) req.opts.images_dir_fd = os.open(args['dir'], os.O_DIRECTORY)
# Not necessary, just for testing # Not necessary, just for testing
req.opts.tcp_established = True req.opts.tcp_established = True
req.opts.shell_job = True req.opts.shell_job = True
return req return req
def do_rpc(s, req): def do_rpc(s, req):
# Send request # Send request
s.send(req.SerializeToString()) s.send(req.SerializeToString())
# Recv response # Recv response
resp = rpc.criu_resp() resp = rpc.criu_resp()
MAX_MSG_SIZE = 1024 MAX_MSG_SIZE = 1024
resp.ParseFromString(s.recv(MAX_MSG_SIZE)) resp.ParseFromString(s.recv(MAX_MSG_SIZE))
s.close() s.close()
return resp return resp
def test_broken_configuration_file(): def test_broken_configuration_file():
# Testing RPC configuration file mode with a broken configuration file. # Testing RPC configuration file mode with a broken configuration file.
# This should fail # This should fail
content = 'hopefully-this-option-will-never=exist' content = 'hopefully-this-option-will-never=exist'
path = setup_config_file(content) path = setup_config_file(content)
swrk, s = setup_swrk() swrk, s = setup_swrk()
s.close() s.close()
# This test is only about detecting wrong configuration files. # This test is only about detecting wrong configuration files.
# If we do not sleep it might happen that we kill CRIU before # If we do not sleep it might happen that we kill CRIU before
# it parses the configuration file. A short sleep makes sure # it parses the configuration file. A short sleep makes sure
# that the configuration file has been parsed. Hopefully. # that the configuration file has been parsed. Hopefully.
# (I am sure this will fail horribly at some point) # (I am sure this will fail horribly at some point)
time.sleep(0.3) time.sleep(0.3)
swrk.kill() swrk.kill()
return_code = swrk.wait() return_code = swrk.wait()
# delete temporary file again # delete temporary file again
cleanup_config_file(path) cleanup_config_file(path)
if return_code != 1: if return_code != 1:
print('FAIL: CRIU should have returned 1 instead of %d' % return_code) print('FAIL: CRIU should have returned 1 instead of %d' % return_code)
sys.exit(-1) sys.exit(-1)
def search_in_log_file(log, message): def search_in_log_file(log, message):
with open(os.path.join(args['dir'], log)) as f: with open(os.path.join(args['dir'], log)) as f:
if message not in f.read(): if message not in f.read():
print('FAIL: Missing the expected error message (%s) in the log file' % message) print(
sys.exit(-1) 'FAIL: Missing the expected error message (%s) in the log file'
% message)
sys.exit(-1)
def check_results(resp, log): def check_results(resp, log):
# Check if the specified log file exists # Check if the specified log file exists
if not os.path.isfile(os.path.join(args['dir'], log)): if not os.path.isfile(os.path.join(args['dir'], log)):
print('FAIL: Expected log file %s does not exist' % log) print('FAIL: Expected log file %s does not exist' % log)
sys.exit(-1) sys.exit(-1)
# Dump should have failed with: 'The criu itself is within dumped tree' # Dump should have failed with: 'The criu itself is within dumped tree'
if resp.type != rpc.DUMP: if resp.type != rpc.DUMP:
print('FAIL: Unexpected msg type %r' % resp.type) print('FAIL: Unexpected msg type %r' % resp.type)
sys.exit(-1) sys.exit(-1)
if 'The criu itself is within dumped tree' not in resp.cr_errmsg: if 'The criu itself is within dumped tree' not in resp.cr_errmsg:
print('FAIL: Missing the expected error message in RPC response') print('FAIL: Missing the expected error message in RPC response')
sys.exit(-1) sys.exit(-1)
# Look into the log file for the same message # Look into the log file for the same message
search_in_log_file(log, 'The criu itself is within dumped tree') search_in_log_file(log, 'The criu itself is within dumped tree')
def test_rpc_without_configuration_file(): def test_rpc_without_configuration_file():
# Testing without configuration file # Testing without configuration file
# Just doing a dump and checking for the logfile # Just doing a dump and checking for the logfile
req = setup_criu_dump_request() req = setup_criu_dump_request()
_, s = setup_swrk() _, s = setup_swrk()
resp = do_rpc(s, req) resp = do_rpc(s, req)
s.close() s.close()
check_results(resp, log_file) check_results(resp, log_file)
def test_rpc_with_configuration_file(): def test_rpc_with_configuration_file():
# Testing with configuration file # Testing with configuration file
# Just doing a dump and checking for the logfile # Just doing a dump and checking for the logfile
# Setting a different log file via configuration file # Setting a different log file via configuration file
# This should not work as RPC settings overwrite configuration # This should not work as RPC settings overwrite configuration
# file settings in the default configuration. # file settings in the default configuration.
log = does_not_exist log = does_not_exist
content = 'log-file ' + log + '\n' content = 'log-file ' + log + '\n'
content += 'no-tcp-established\nno-shell-job' content += 'no-tcp-established\nno-shell-job'
path = setup_config_file(content) path = setup_config_file(content)
req = setup_criu_dump_request() req = setup_criu_dump_request()
_, s = setup_swrk() _, s = setup_swrk()
do_rpc(s, req) do_rpc(s, req)
s.close() s.close()
cleanup_config_file(path) cleanup_config_file(path)
# Check if the specified log file exists # Check if the specified log file exists
# It should not as configuration files do not overwrite RPC values. # It should not as configuration files do not overwrite RPC values.
if os.path.isfile(os.path.join(args['dir'], log)): if os.path.isfile(os.path.join(args['dir'], log)):
print('FAIL: log file %s should not exist' % log) print('FAIL: log file %s should not exist' % log)
sys.exit(-1) sys.exit(-1)
def test_rpc_with_configuration_file_overwriting_rpc(): def test_rpc_with_configuration_file_overwriting_rpc():
# Testing with configuration file # Testing with configuration file
# Just doing a dump and checking for the logfile # Just doing a dump and checking for the logfile
# Setting a different log file via configuration file # Setting a different log file via configuration file
# This should not work as RPC settings overwrite configuration # This should not work as RPC settings overwrite configuration
# file settings in the default configuration. # file settings in the default configuration.
log = does_not_exist log = does_not_exist
content = 'log-file ' + log + '\n' content = 'log-file ' + log + '\n'
content += 'no-tcp-established\nno-shell-job' content += 'no-tcp-established\nno-shell-job'
path = setup_config_file(content) path = setup_config_file(content)
# Only set the configuration file via RPC; # Only set the configuration file via RPC;
# not via environment variable # not via environment variable
del os.environ['CRIU_CONFIG_FILE'] del os.environ['CRIU_CONFIG_FILE']
req = setup_criu_dump_request() req = setup_criu_dump_request()
req.opts.config_file = path req.opts.config_file = path
_, s = setup_swrk() _, s = setup_swrk()
resp = do_rpc(s, req) resp = do_rpc(s, req)
s.close() s.close()
cleanup_config_file(path) cleanup_config_file(path)
check_results(resp, log) check_results(resp, log)
parser = argparse.ArgumentParser(description="Test config files using CRIU RPC") parser = argparse.ArgumentParser(
parser.add_argument('dir', type = str, help = "Directory where CRIU images should be placed") description="Test config files using CRIU RPC")
parser.add_argument('dir',
type=str,
help="Directory where CRIU images should be placed")
args = vars(parser.parse_args()) args = vars(parser.parse_args())

View File

@ -6,130 +6,136 @@ import rpc_pb2 as rpc
import argparse import argparse
parser = argparse.ArgumentParser(description="Test errno reported by CRIU RPC") parser = argparse.ArgumentParser(description="Test errno reported by CRIU RPC")
parser.add_argument('socket', type = str, help = "CRIU service socket") parser.add_argument('socket', type=str, help="CRIU service socket")
parser.add_argument('dir', type = str, help = "Directory where CRIU images should be placed") parser.add_argument('dir',
type=str,
help="Directory where CRIU images should be placed")
args = vars(parser.parse_args()) args = vars(parser.parse_args())
# Prepare dir for images # Prepare dir for images
class test: class test:
def __init__(self): def __init__(self):
self.imgs_fd = os.open(args['dir'], os.O_DIRECTORY) self.imgs_fd = os.open(args['dir'], os.O_DIRECTORY)
self.s = -1 self.s = -1
self._MAX_MSG_SIZE = 1024 self._MAX_MSG_SIZE = 1024
def connect(self): def connect(self):
self.s = socket.socket(socket.AF_UNIX, socket.SOCK_SEQPACKET) self.s = socket.socket(socket.AF_UNIX, socket.SOCK_SEQPACKET)
self.s.connect(args['socket']) self.s.connect(args['socket'])
def get_base_req(self): def get_base_req(self):
req = rpc.criu_req() req = rpc.criu_req()
req.opts.log_level = 4 req.opts.log_level = 4
req.opts.images_dir_fd = self.imgs_fd req.opts.images_dir_fd = self.imgs_fd
return req return req
def send_req(self, req): def send_req(self, req):
self.connect() self.connect()
self.s.send(req.SerializeToString()) self.s.send(req.SerializeToString())
def recv_resp(self): def recv_resp(self):
resp = rpc.criu_resp() resp = rpc.criu_resp()
resp.ParseFromString(self.s.recv(self._MAX_MSG_SIZE)) resp.ParseFromString(self.s.recv(self._MAX_MSG_SIZE))
return resp return resp
def check_resp(self, resp, typ, err): def check_resp(self, resp, typ, err):
if resp.type != typ: if resp.type != typ:
raise Exception('Unexpected responce type ' + str(resp.type)) raise Exception('Unexpected responce type ' + str(resp.type))
if resp.success: if resp.success:
raise Exception('Unexpected success = True') raise Exception('Unexpected success = True')
if err and resp.cr_errno != err: if err and resp.cr_errno != err:
raise Exception('Unexpected cr_errno ' + str(resp.cr_errno)) raise Exception('Unexpected cr_errno ' + str(resp.cr_errno))
def no_process(self): def no_process(self):
print('Try to dump unexisting process') print('Try to dump unexisting process')
# Get pid of non-existing process. # Get pid of non-existing process.
# Suppose max_pid is not taken by any process. # Suppose max_pid is not taken by any process.
with open("/proc/sys/kernel/pid_max", "r") as f: with open("/proc/sys/kernel/pid_max", "r") as f:
pid = int(f.readline()) pid = int(f.readline())
try: try:
os.kill(pid, 0) os.kill(pid, 0)
except OSError: except OSError:
pass pass
else: else:
raise Exception('max pid is taken') raise Exception('max pid is taken')
# Ask criu to dump non-existing process. # Ask criu to dump non-existing process.
req = self.get_base_req() req = self.get_base_req()
req.type = rpc.DUMP req.type = rpc.DUMP
req.opts.pid = pid req.opts.pid = pid
self.send_req(req) self.send_req(req)
resp = self.recv_resp() resp = self.recv_resp()
self.check_resp(resp, rpc.DUMP, errno.ESRCH) self.check_resp(resp, rpc.DUMP, errno.ESRCH)
print('Success') print('Success')
def process_exists(self): def process_exists(self):
print('Try to restore process which pid is already taken by other process') print(
'Try to restore process which pid is already taken by other process'
)
# Perform self-dump # Perform self-dump
req = self.get_base_req() req = self.get_base_req()
req.type = rpc.DUMP req.type = rpc.DUMP
req.opts.leave_running = True req.opts.leave_running = True
self.send_req(req) self.send_req(req)
resp = self.recv_resp() resp = self.recv_resp()
if resp.success != True: if resp.success != True:
raise Exception('Self-dump failed') raise Exception('Self-dump failed')
# Ask to restore process from images of ourselves # Ask to restore process from images of ourselves
req = self.get_base_req() req = self.get_base_req()
req.type = rpc.RESTORE req.type = rpc.RESTORE
self.send_req(req) self.send_req(req)
resp = self.recv_resp() resp = self.recv_resp()
self.check_resp(resp, rpc.RESTORE, errno.EEXIST) self.check_resp(resp, rpc.RESTORE, errno.EEXIST)
print('Success') print('Success')
def bad_options(self): def bad_options(self):
print('Try to send criu invalid opts') print('Try to send criu invalid opts')
# Subdirs are not allowed in log_file # Subdirs are not allowed in log_file
req = self.get_base_req() req = self.get_base_req()
req.type = rpc.DUMP req.type = rpc.DUMP
req.opts.log_file = "../file.log" req.opts.log_file = "../file.log"
self.send_req(req) self.send_req(req)
resp = self.recv_resp() resp = self.recv_resp()
self.check_resp(resp, rpc.DUMP, errno.EBADRQC) self.check_resp(resp, rpc.DUMP, errno.EBADRQC)
print('Success') print('Success')
def bad_request(self): def bad_request(self):
print('Try to send criu invalid request type') print('Try to send criu invalid request type')
req = self.get_base_req() req = self.get_base_req()
req.type = rpc.NOTIFY req.type = rpc.NOTIFY
self.send_req(req) self.send_req(req)
resp = self.recv_resp() resp = self.recv_resp()
self.check_resp(resp, rpc.EMPTY, None) self.check_resp(resp, rpc.EMPTY, None)
print('Success') print('Success')
def run(self):
self.no_process()
self.process_exists()
self.bad_options()
self.bad_request()
def run(self):
self.no_process()
self.process_exists()
self.bad_options()
self.bad_request()
t = test() t = test()
t.run() t.run()

View File

@ -5,8 +5,10 @@ import rpc_pb2 as rpc
import argparse import argparse
parser = argparse.ArgumentParser(description="Test page-server using CRIU RPC") parser = argparse.ArgumentParser(description="Test page-server using CRIU RPC")
parser.add_argument('socket', type = str, help = "CRIU service socket") parser.add_argument('socket', type=str, help="CRIU service socket")
parser.add_argument('dir', type = str, help = "Directory where CRIU images should be placed") parser.add_argument('dir',
type=str,
help="Directory where CRIU images should be placed")
args = vars(parser.parse_args()) args = vars(parser.parse_args())
@ -16,45 +18,45 @@ s.connect(args['socket'])
# Start page-server # Start page-server
print('Starting page-server') print('Starting page-server')
req = rpc.criu_req() req = rpc.criu_req()
req.type = rpc.PAGE_SERVER req.type = rpc.PAGE_SERVER
req.opts.log_file = 'page-server.log' req.opts.log_file = 'page-server.log'
req.opts.log_level = 4 req.opts.log_level = 4
req.opts.images_dir_fd = os.open(args['dir'], os.O_DIRECTORY) req.opts.images_dir_fd = os.open(args['dir'], os.O_DIRECTORY)
s.send(req.SerializeToString()) s.send(req.SerializeToString())
resp = rpc.criu_resp() resp = rpc.criu_resp()
MAX_MSG_SIZE = 1024 MAX_MSG_SIZE = 1024
resp.ParseFromString(s.recv(MAX_MSG_SIZE)) resp.ParseFromString(s.recv(MAX_MSG_SIZE))
if resp.type != rpc.PAGE_SERVER: if resp.type != rpc.PAGE_SERVER:
print('Unexpected msg type') print('Unexpected msg type')
sys.exit(1) sys.exit(1)
else: else:
if resp.success: if resp.success:
# check if pid even exists # check if pid even exists
try: try:
os.kill(resp.ps.pid, 0) os.kill(resp.ps.pid, 0)
except OSError as err: except OSError as err:
if err.errno == errno.ESRCH: if err.errno == errno.ESRCH:
print('No process with page-server pid %d' %(resp.ps.pid)) print('No process with page-server pid %d' % (resp.ps.pid))
else: else:
print('Can\'t check that process %d exists' %(resp.ps.pid)) print('Can\'t check that process %d exists' % (resp.ps.pid))
sys.exit(1) sys.exit(1)
print('Success, page-server pid %d started on port %u' %(resp.ps.pid, resp.ps.port)) print('Success, page-server pid %d started on port %u' %
else: (resp.ps.pid, resp.ps.port))
print('Failed to start page-server') else:
sys.exit(1) print('Failed to start page-server')
sys.exit(1)
# Perform self-dump # Perform self-dump
print('Dumping myself using page-server') print('Dumping myself using page-server')
req.type = rpc.DUMP req.type = rpc.DUMP
req.opts.ps.port = resp.ps.port req.opts.ps.port = resp.ps.port
req.opts.ps.address = "127.0.0.1" req.opts.ps.address = "127.0.0.1"
req.opts.log_file = 'dump.log' req.opts.log_file = 'dump.log'
req.opts.leave_running = True req.opts.leave_running = True
s.close() s.close()
s = socket.socket(socket.AF_UNIX, socket.SOCK_SEQPACKET) s = socket.socket(socket.AF_UNIX, socket.SOCK_SEQPACKET)
@ -64,11 +66,11 @@ s.send(req.SerializeToString())
resp.ParseFromString(s.recv(MAX_MSG_SIZE)) resp.ParseFromString(s.recv(MAX_MSG_SIZE))
if resp.type != rpc.DUMP: if resp.type != rpc.DUMP:
print('Unexpected msg type') print('Unexpected msg type')
sys.exit(1) sys.exit(1)
else: else:
if resp.success: if resp.success:
print('Success') print('Success')
else: else:
print('Fail') print('Fail')
sys.exit(1) sys.exit(1)

View File

@ -12,6 +12,6 @@ r = f.read(1)
f.close() f.close()
if r == '\0': if r == '\0':
sys.exit(0) sys.exit(0)
sys.exit(-1) sys.exit(-1)

View File

@ -4,9 +4,12 @@ import socket, os, sys
import rpc_pb2 as rpc import rpc_pb2 as rpc
import argparse import argparse
parser = argparse.ArgumentParser(description="Test ability to restore a process from images using CRIU RPC") parser = argparse.ArgumentParser(
parser.add_argument('socket', type = str, help = "CRIU service socket") description="Test ability to restore a process from images using CRIU RPC")
parser.add_argument('dir', type = str, help = "Directory where CRIU images could be found") parser.add_argument('socket', type=str, help="CRIU service socket")
parser.add_argument('dir',
type=str,
help="Directory where CRIU images could be found")
args = vars(parser.parse_args()) args = vars(parser.parse_args())
@ -16,30 +19,30 @@ s.connect(args['socket'])
# Create criu msg, set it's type to dump request # Create criu msg, set it's type to dump request
# and set dump options. Checkout more options in protobuf/rpc.proto # and set dump options. Checkout more options in protobuf/rpc.proto
req = rpc.criu_req() req = rpc.criu_req()
req.type = rpc.RESTORE req.type = rpc.RESTORE
req.opts.images_dir_fd = os.open(args['dir'], os.O_DIRECTORY) req.opts.images_dir_fd = os.open(args['dir'], os.O_DIRECTORY)
# As the dumped process is running with setsid this should not # As the dumped process is running with setsid this should not
# be necessary. There seems to be a problem for this testcase # be necessary. There seems to be a problem for this testcase
# in combination with alpine's setsid. # in combination with alpine's setsid.
# The dump is now done with -j and the restore also. # The dump is now done with -j and the restore also.
req.opts.shell_job = True req.opts.shell_job = True
# Send request # Send request
s.send(req.SerializeToString()) s.send(req.SerializeToString())
# Recv response # Recv response
resp = rpc.criu_resp() resp = rpc.criu_resp()
MAX_MSG_SIZE = 1024 MAX_MSG_SIZE = 1024
resp.ParseFromString(s.recv(MAX_MSG_SIZE)) resp.ParseFromString(s.recv(MAX_MSG_SIZE))
if resp.type != rpc.RESTORE: if resp.type != rpc.RESTORE:
print('Unexpected msg type') print('Unexpected msg type')
sys.exit(-1) sys.exit(-1)
else: else:
if resp.success: if resp.success:
print('Restore success') print('Restore success')
else: else:
print('Restore fail') print('Restore fail')
sys.exit(-1) sys.exit(-1)
print("PID of the restored program is %d\n" %(resp.restore.pid)) print("PID of the restored program is %d\n" % (resp.restore.pid))

View File

@ -4,9 +4,12 @@ import socket, os, sys
import rpc_pb2 as rpc import rpc_pb2 as rpc
import argparse import argparse
parser = argparse.ArgumentParser(description="Test dump/restore using CRIU RPC") parser = argparse.ArgumentParser(
parser.add_argument('socket', type = str, help = "CRIU service socket") description="Test dump/restore using CRIU RPC")
parser.add_argument('dir', type = str, help = "Directory where CRIU images should be placed") parser.add_argument('socket', type=str, help="CRIU service socket")
parser.add_argument('dir',
type=str,
help="Directory where CRIU images should be placed")
args = vars(parser.parse_args()) args = vars(parser.parse_args())
@ -16,32 +19,32 @@ s.connect(args['socket'])
# Create criu msg, set it's type to dump request # Create criu msg, set it's type to dump request
# and set dump options. Checkout more options in protobuf/rpc.proto # and set dump options. Checkout more options in protobuf/rpc.proto
req = rpc.criu_req() req = rpc.criu_req()
req.type = rpc.DUMP req.type = rpc.DUMP
req.opts.leave_running = True req.opts.leave_running = True
req.opts.log_level = 4 req.opts.log_level = 4
req.opts.images_dir_fd = os.open(args['dir'], os.O_DIRECTORY) req.opts.images_dir_fd = os.open(args['dir'], os.O_DIRECTORY)
# Send request # Send request
s.send(req.SerializeToString()) s.send(req.SerializeToString())
# Recv response # Recv response
resp = rpc.criu_resp() resp = rpc.criu_resp()
MAX_MSG_SIZE = 1024 MAX_MSG_SIZE = 1024
resp.ParseFromString(s.recv(MAX_MSG_SIZE)) resp.ParseFromString(s.recv(MAX_MSG_SIZE))
if resp.type != rpc.DUMP: if resp.type != rpc.DUMP:
print('Unexpected msg type') print('Unexpected msg type')
sys.exit(-1) sys.exit(-1)
else: else:
if resp.success: if resp.success:
print('Success') print('Success')
else: else:
print('Fail') print('Fail')
sys.exit(-1) sys.exit(-1)
if resp.dump.restored: if resp.dump.restored:
print('Restored') print('Restored')
# Connect to service socket # Connect to service socket
s = socket.socket(socket.AF_UNIX, socket.SOCK_SEQPACKET) s = socket.socket(socket.AF_UNIX, socket.SOCK_SEQPACKET)
@ -61,21 +64,21 @@ MAX_MSG_SIZE = 1024
resp.ParseFromString(s.recv(MAX_MSG_SIZE)) resp.ParseFromString(s.recv(MAX_MSG_SIZE))
if resp.type != rpc.VERSION: if resp.type != rpc.VERSION:
print('RPC: Unexpected msg type') print('RPC: Unexpected msg type')
sys.exit(-1) sys.exit(-1)
else: else:
if resp.success: if resp.success:
print('RPC: Success') print('RPC: Success')
print('CRIU major %d' % resp.version.major_number) print('CRIU major %d' % resp.version.major_number)
print('CRIU minor %d' % resp.version.minor_number) print('CRIU minor %d' % resp.version.minor_number)
if resp.version.HasField('gitid'): if resp.version.HasField('gitid'):
print('CRIU gitid %s' % resp.version.gitid) print('CRIU gitid %s' % resp.version.gitid)
if resp.version.HasField('sublevel'): if resp.version.HasField('sublevel'):
print('CRIU sublevel %s' % resp.version.sublevel) print('CRIU sublevel %s' % resp.version.sublevel)
if resp.version.HasField('extra'): if resp.version.HasField('extra'):
print('CRIU extra %s' % resp.version.extra) print('CRIU extra %s' % resp.version.extra)
if resp.version.HasField('name'): if resp.version.HasField('name'):
print('CRIU name %s' % resp.version.name) print('CRIU name %s' % resp.version.name)
else: else:
print('Fail') print('Fail')
sys.exit(-1) sys.exit(-1)

View File

@ -27,21 +27,21 @@ MAX_MSG_SIZE = 1024
resp.ParseFromString(s.recv(MAX_MSG_SIZE)) resp.ParseFromString(s.recv(MAX_MSG_SIZE))
if resp.type != rpc.VERSION: if resp.type != rpc.VERSION:
print('RPC: Unexpected msg type') print('RPC: Unexpected msg type')
sys.exit(-1) sys.exit(-1)
else: else:
if resp.success: if resp.success:
print('RPC: Success') print('RPC: Success')
print('CRIU major %d' % resp.version.major_number) print('CRIU major %d' % resp.version.major_number)
print('CRIU minor %d' % resp.version.minor_number) print('CRIU minor %d' % resp.version.minor_number)
if resp.version.HasField('gitid'): if resp.version.HasField('gitid'):
print('CRIU gitid %s' % resp.version.gitid) print('CRIU gitid %s' % resp.version.gitid)
if resp.version.HasField('sublevel'): if resp.version.HasField('sublevel'):
print('CRIU sublevel %s' % resp.version.sublevel) print('CRIU sublevel %s' % resp.version.sublevel)
if resp.version.HasField('extra'): if resp.version.HasField('extra'):
print('CRIU extra %s' % resp.version.extra) print('CRIU extra %s' % resp.version.extra)
if resp.version.HasField('name'): if resp.version.HasField('name'):
print('CRIU name %s' % resp.version.name) print('CRIU name %s' % resp.version.name)
else: else:
print('Fail') print('Fail')
sys.exit(-1) sys.exit(-1)

View File

@ -6,15 +6,17 @@ cr_bin = "../../../criu/criu"
os.chdir(os.getcwd()) os.chdir(os.getcwd())
def create_pty(): def create_pty():
(fd1, fd2) = pty.openpty() (fd1, fd2) = pty.openpty()
return (os.fdopen(fd1, "w+"), os.fdopen(fd2, "w+")) return (os.fdopen(fd1, "w+"), os.fdopen(fd2, "w+"))
if not os.access("work", os.X_OK): if not os.access("work", os.X_OK):
os.mkdir("work", 0755) os.mkdir("work", 0755)
open("running", "w").close() open("running", "w").close()
m,s = create_pty() m, s = create_pty()
p = os.pipe() p = os.pipe()
pr = os.fdopen(p[0], "r") pr = os.fdopen(p[0], "r")
pw = os.fdopen(p[1], "w") pw = os.fdopen(p[1], "w")
@ -46,14 +48,15 @@ if ret != 0:
os.wait() os.wait()
os.unlink("running") os.unlink("running")
m,s = create_pty() m, s = create_pty()
cpid = os.fork() cpid = os.fork()
if cpid == 0: if cpid == 0:
os.setsid() os.setsid()
fcntl.ioctl(m.fileno(), termios.TIOCSCTTY, 1) fcntl.ioctl(m.fileno(), termios.TIOCSCTTY, 1)
cmd = [cr_bin, "restore", "-j", "-D", "work", "-v"] cmd = [cr_bin, "restore", "-j", "-D", "work", "-v"]
print("Run: %s" % " ".join(cmd)) print("Run: %s" % " ".join(cmd))
ret = subprocess.Popen([cr_bin, "restore", "-j", "-D", "work", "-v"]).wait() ret = subprocess.Popen([cr_bin, "restore", "-j", "-D", "work",
"-v"]).wait()
if ret != 0: if ret != 0:
sys.exit(1) sys.exit(1)
sys.exit(0) sys.exit(0)

File diff suppressed because it is too large Load Diff