2
0
mirror of https://github.com/checkpoint-restore/criu synced 2025-08-22 01:51:51 +00:00

py: Reformat everything into pep8 style

As discussed on the mailing list, current .py files formatting does not
conform to the world standard, so we should better reformat it. For this
the yapf tool is used. The command I used was

  yapf -i $(find -name *.py)

Signed-off-by: Pavel Emelyanov <xemul@virtuozzo.com>
This commit is contained in:
Andrei Vagin 2019-09-07 15:46:22 +03:00
parent 5ff4fcb753
commit 5aa72e7237
28 changed files with 5738 additions and 5167 deletions

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -6,337 +6,409 @@ import os
import pycriu
def inf(opts):
if opts['in']:
return open(opts['in'], 'rb')
else:
return sys.stdin
if opts['in']:
return open(opts['in'], 'rb')
else:
return sys.stdin
def outf(opts):
if opts['out']:
return open(opts['out'], 'w+')
else:
return sys.stdout
if opts['out']:
return open(opts['out'], 'w+')
else:
return sys.stdout
def dinf(opts, name):
return open(os.path.join(opts['dir'], name))
return open(os.path.join(opts['dir'], name))
def decode(opts):
indent = None
indent = None
try:
img = pycriu.images.load(inf(opts), opts['pretty'], opts['nopl'])
except pycriu.images.MagicException as exc:
print("Unknown magic %#x.\n"\
"Maybe you are feeding me an image with "\
"raw data(i.e. pages.img)?" % exc.magic, file=sys.stderr)
sys.exit(1)
try:
img = pycriu.images.load(inf(opts), opts['pretty'], opts['nopl'])
except pycriu.images.MagicException as exc:
print("Unknown magic %#x.\n"\
"Maybe you are feeding me an image with "\
"raw data(i.e. pages.img)?" % exc.magic, file=sys.stderr)
sys.exit(1)
if opts['pretty']:
indent = 4
if opts['pretty']:
indent = 4
f = outf(opts)
json.dump(img, f, indent=indent)
if f == sys.stdout:
f.write("\n")
f = outf(opts)
json.dump(img, f, indent=indent)
if f == sys.stdout:
f.write("\n")
def encode(opts):
img = json.load(inf(opts))
pycriu.images.dump(img, outf(opts))
img = json.load(inf(opts))
pycriu.images.dump(img, outf(opts))
def info(opts):
infs = pycriu.images.info(inf(opts))
json.dump(infs, sys.stdout, indent = 4)
print()
infs = pycriu.images.info(inf(opts))
json.dump(infs, sys.stdout, indent=4)
print()
def get_task_id(p, val):
return p[val] if val in p else p['ns_' + val][0]
return p[val] if val in p else p['ns_' + val][0]
#
# Explorers
#
class ps_item:
def __init__(self, p, core):
self.pid = get_task_id(p, 'pid')
self.ppid = p['ppid']
self.p = p
self.core = core
self.kids = []
def show_ps(p, opts, depth = 0):
print("%7d%7d%7d %s%s" % (p.pid, get_task_id(p.p, 'pgid'), get_task_id(p.p, 'sid'),
' ' * (4 * depth), p.core['tc']['comm']))
for kid in p.kids:
show_ps(kid, opts, depth + 1)
class ps_item:
def __init__(self, p, core):
self.pid = get_task_id(p, 'pid')
self.ppid = p['ppid']
self.p = p
self.core = core
self.kids = []
def show_ps(p, opts, depth=0):
print("%7d%7d%7d %s%s" %
(p.pid, get_task_id(p.p, 'pgid'), get_task_id(p.p, 'sid'), ' ' *
(4 * depth), p.core['tc']['comm']))
for kid in p.kids:
show_ps(kid, opts, depth + 1)
def explore_ps(opts):
pss = { }
ps_img = pycriu.images.load(dinf(opts, 'pstree.img'))
for p in ps_img['entries']:
core = pycriu.images.load(dinf(opts, 'core-%d.img' % get_task_id(p, 'pid')))
ps = ps_item(p, core['entries'][0])
pss[ps.pid] = ps
pss = {}
ps_img = pycriu.images.load(dinf(opts, 'pstree.img'))
for p in ps_img['entries']:
core = pycriu.images.load(
dinf(opts, 'core-%d.img' % get_task_id(p, 'pid')))
ps = ps_item(p, core['entries'][0])
pss[ps.pid] = ps
# Build tree
psr = None
for pid in pss:
p = pss[pid]
if p.ppid == 0:
psr = p
continue
# Build tree
psr = None
for pid in pss:
p = pss[pid]
if p.ppid == 0:
psr = p
continue
pp = pss[p.ppid]
pp.kids.append(p)
pp = pss[p.ppid]
pp.kids.append(p)
print("%7s%7s%7s %s" % ('PID', 'PGID', 'SID', 'COMM'))
show_ps(psr, opts)
print("%7s%7s%7s %s" % ('PID', 'PGID', 'SID', 'COMM'))
show_ps(psr, opts)
files_img = None
def ftype_find_in_files(opts, ft, fid):
global files_img
global files_img
if files_img is None:
try:
files_img = pycriu.images.load(dinf(opts, "files.img"))['entries']
except:
files_img = []
if files_img is None:
try:
files_img = pycriu.images.load(dinf(opts, "files.img"))['entries']
except:
files_img = []
if len(files_img) == 0:
return None
if len(files_img) == 0:
return None
for f in files_img:
if f['id'] == fid:
return f
for f in files_img:
if f['id'] == fid:
return f
return None
return None
def ftype_find_in_image(opts, ft, fid, img):
f = ftype_find_in_files(opts, ft, fid)
if f:
return f[ft['field']]
f = ftype_find_in_files(opts, ft, fid)
if f:
return f[ft['field']]
if ft['img'] == None:
ft['img'] = pycriu.images.load(dinf(opts, img))['entries']
for f in ft['img']:
if f['id'] == fid:
return f
return None
if ft['img'] == None:
ft['img'] = pycriu.images.load(dinf(opts, img))['entries']
for f in ft['img']:
if f['id'] == fid:
return f
return None
def ftype_reg(opts, ft, fid):
rf = ftype_find_in_image(opts, ft, fid, 'reg-files.img')
return rf and rf['name'] or 'unknown path'
rf = ftype_find_in_image(opts, ft, fid, 'reg-files.img')
return rf and rf['name'] or 'unknown path'
def ftype_pipe(opts, ft, fid):
p = ftype_find_in_image(opts, ft, fid, 'pipes.img')
return p and 'pipe[%d]' % p['pipe_id'] or 'pipe[?]'
p = ftype_find_in_image(opts, ft, fid, 'pipes.img')
return p and 'pipe[%d]' % p['pipe_id'] or 'pipe[?]'
def ftype_unix(opts, ft, fid):
ux = ftype_find_in_image(opts, ft, fid, 'unixsk.img')
if not ux:
return 'unix[?]'
ux = ftype_find_in_image(opts, ft, fid, 'unixsk.img')
if not ux:
return 'unix[?]'
n = ux['name'] and ' %s' % ux['name'] or ''
return 'unix[%d (%d)%s]' % (ux['ino'], ux['peer'], n)
n = ux['name'] and ' %s' % ux['name'] or ''
return 'unix[%d (%d)%s]' % (ux['ino'], ux['peer'], n)
file_types = {
'REG': {'get': ftype_reg, 'img': None, 'field': 'reg'},
'PIPE': {'get': ftype_pipe, 'img': None, 'field': 'pipe'},
'UNIXSK': {'get': ftype_unix, 'img': None, 'field': 'usk'},
'REG': {
'get': ftype_reg,
'img': None,
'field': 'reg'
},
'PIPE': {
'get': ftype_pipe,
'img': None,
'field': 'pipe'
},
'UNIXSK': {
'get': ftype_unix,
'img': None,
'field': 'usk'
},
}
def ftype_gen(opts, ft, fid):
return '%s.%d' % (ft['typ'], fid)
files_cache = { }
def ftype_gen(opts, ft, fid):
return '%s.%d' % (ft['typ'], fid)
files_cache = {}
def get_file_str(opts, fd):
key = (fd['type'], fd['id'])
f = files_cache.get(key, None)
if not f:
ft = file_types.get(fd['type'], {'get': ftype_gen, 'typ': fd['type']})
f = ft['get'](opts, ft, fd['id'])
files_cache[key] = f
key = (fd['type'], fd['id'])
f = files_cache.get(key, None)
if not f:
ft = file_types.get(fd['type'], {'get': ftype_gen, 'typ': fd['type']})
f = ft['get'](opts, ft, fd['id'])
files_cache[key] = f
return f
return f
def explore_fds(opts):
ps_img = pycriu.images.load(dinf(opts, 'pstree.img'))
for p in ps_img['entries']:
pid = get_task_id(p, 'pid')
idi = pycriu.images.load(dinf(opts, 'ids-%s.img' % pid))
fdt = idi['entries'][0]['files_id']
fdi = pycriu.images.load(dinf(opts, 'fdinfo-%d.img' % fdt))
ps_img = pycriu.images.load(dinf(opts, 'pstree.img'))
for p in ps_img['entries']:
pid = get_task_id(p, 'pid')
idi = pycriu.images.load(dinf(opts, 'ids-%s.img' % pid))
fdt = idi['entries'][0]['files_id']
fdi = pycriu.images.load(dinf(opts, 'fdinfo-%d.img' % fdt))
print("%d" % pid)
for fd in fdi['entries']:
print("\t%7d: %s" % (fd['fd'], get_file_str(opts, fd)))
print("%d" % pid)
for fd in fdi['entries']:
print("\t%7d: %s" % (fd['fd'], get_file_str(opts, fd)))
fdi = pycriu.images.load(dinf(opts, 'fs-%d.img' % pid))['entries'][0]
print("\t%7s: %s" % ('cwd', get_file_str(opts, {'type': 'REG', 'id': fdi['cwd_id']})))
print("\t%7s: %s" % ('root', get_file_str(opts, {'type': 'REG', 'id': fdi['root_id']})))
fdi = pycriu.images.load(dinf(opts, 'fs-%d.img' % pid))['entries'][0]
print("\t%7s: %s" %
('cwd', get_file_str(opts, {
'type': 'REG',
'id': fdi['cwd_id']
})))
print("\t%7s: %s" %
('root', get_file_str(opts, {
'type': 'REG',
'id': fdi['root_id']
})))
class vma_id:
def __init__(self):
self.__ids = {}
self.__last = 1
def __init__(self):
self.__ids = {}
self.__last = 1
def get(self, iid):
ret = self.__ids.get(iid, None)
if not ret:
ret = self.__last
self.__last += 1
self.__ids[iid] = ret
def get(self, iid):
ret = self.__ids.get(iid, None)
if not ret:
ret = self.__last
self.__last += 1
self.__ids[iid] = ret
return ret
return ret
def explore_mems(opts):
ps_img = pycriu.images.load(dinf(opts, 'pstree.img'))
vids = vma_id()
for p in ps_img['entries']:
pid = get_task_id(p, 'pid')
mmi = pycriu.images.load(dinf(opts, 'mm-%d.img' % pid))['entries'][0]
ps_img = pycriu.images.load(dinf(opts, 'pstree.img'))
vids = vma_id()
for p in ps_img['entries']:
pid = get_task_id(p, 'pid')
mmi = pycriu.images.load(dinf(opts, 'mm-%d.img' % pid))['entries'][0]
print("%d" % pid)
print("\t%-36s %s" % ('exe', get_file_str(opts, {'type': 'REG', 'id': mmi['exe_file_id']})))
print("%d" % pid)
print("\t%-36s %s" % ('exe',
get_file_str(opts, {
'type': 'REG',
'id': mmi['exe_file_id']
})))
for vma in mmi['vmas']:
st = vma['status']
if st & (1 << 10):
fn = ' ' + 'ips[%lx]' % vids.get(vma['shmid'])
elif st & (1 << 8):
fn = ' ' + 'shmem[%lx]' % vids.get(vma['shmid'])
elif st & (1 << 11):
fn = ' ' + 'packet[%lx]' % vids.get(vma['shmid'])
elif st & ((1 << 6) | (1 << 7)):
fn = ' ' + get_file_str(opts, {'type': 'REG', 'id': vma['shmid']})
if vma['pgoff']:
fn += ' + %#lx' % vma['pgoff']
if st & (1 << 7):
fn += ' (s)'
elif st & (1 << 1):
fn = ' [stack]'
elif st & (1 << 2):
fn = ' [vsyscall]'
elif st & (1 << 3):
fn = ' [vdso]'
elif vma['flags'] & 0x0100: # growsdown
fn = ' [stack?]'
else:
fn = ''
for vma in mmi['vmas']:
st = vma['status']
if st & (1 << 10):
fn = ' ' + 'ips[%lx]' % vids.get(vma['shmid'])
elif st & (1 << 8):
fn = ' ' + 'shmem[%lx]' % vids.get(vma['shmid'])
elif st & (1 << 11):
fn = ' ' + 'packet[%lx]' % vids.get(vma['shmid'])
elif st & ((1 << 6) | (1 << 7)):
fn = ' ' + get_file_str(opts, {
'type': 'REG',
'id': vma['shmid']
})
if vma['pgoff']:
fn += ' + %#lx' % vma['pgoff']
if st & (1 << 7):
fn += ' (s)'
elif st & (1 << 1):
fn = ' [stack]'
elif st & (1 << 2):
fn = ' [vsyscall]'
elif st & (1 << 3):
fn = ' [vdso]'
elif vma['flags'] & 0x0100: # growsdown
fn = ' [stack?]'
else:
fn = ''
if not st & (1 << 0):
fn += ' *'
if not st & (1 << 0):
fn += ' *'
prot = vma['prot'] & 0x1 and 'r' or '-'
prot += vma['prot'] & 0x2 and 'w' or '-'
prot += vma['prot'] & 0x4 and 'x' or '-'
prot = vma['prot'] & 0x1 and 'r' or '-'
prot += vma['prot'] & 0x2 and 'w' or '-'
prot += vma['prot'] & 0x4 and 'x' or '-'
astr = '%08lx-%08lx' % (vma['start'], vma['end'])
print("\t%-36s%s%s" % (astr, prot, fn))
astr = '%08lx-%08lx' % (vma['start'], vma['end'])
print("\t%-36s%s%s" % (astr, prot, fn))
def explore_rss(opts):
ps_img = pycriu.images.load(dinf(opts, 'pstree.img'))
for p in ps_img['entries']:
pid = get_task_id(p, 'pid')
vmas = pycriu.images.load(dinf(opts, 'mm-%d.img' % pid))['entries'][0]['vmas']
pms = pycriu.images.load(dinf(opts, 'pagemap-%d.img' % pid))['entries']
ps_img = pycriu.images.load(dinf(opts, 'pstree.img'))
for p in ps_img['entries']:
pid = get_task_id(p, 'pid')
vmas = pycriu.images.load(dinf(opts, 'mm-%d.img' %
pid))['entries'][0]['vmas']
pms = pycriu.images.load(dinf(opts, 'pagemap-%d.img' % pid))['entries']
print("%d" % pid)
vmi = 0
pvmi = -1
for pm in pms[1:]:
pstr = '\t%lx / %-8d' % (pm['vaddr'], pm['nr_pages'])
while vmas[vmi]['end'] <= pm['vaddr']:
vmi += 1
print("%d" % pid)
vmi = 0
pvmi = -1
for pm in pms[1:]:
pstr = '\t%lx / %-8d' % (pm['vaddr'], pm['nr_pages'])
while vmas[vmi]['end'] <= pm['vaddr']:
vmi += 1
pme = pm['vaddr'] + (pm['nr_pages'] << 12)
vstr = ''
while vmas[vmi]['start'] < pme:
vma = vmas[vmi]
if vmi == pvmi:
vstr += ' ~'
else:
vstr += ' %08lx / %-8d' % (vma['start'], (vma['end'] - vma['start'])>>12)
if vma['status'] & ((1 << 6) | (1 << 7)):
vstr += ' ' + get_file_str(opts, {'type': 'REG', 'id': vma['shmid']})
pvmi = vmi
vstr += '\n\t%23s' % ''
vmi += 1
pme = pm['vaddr'] + (pm['nr_pages'] << 12)
vstr = ''
while vmas[vmi]['start'] < pme:
vma = vmas[vmi]
if vmi == pvmi:
vstr += ' ~'
else:
vstr += ' %08lx / %-8d' % (
vma['start'], (vma['end'] - vma['start']) >> 12)
if vma['status'] & ((1 << 6) | (1 << 7)):
vstr += ' ' + get_file_str(opts, {
'type': 'REG',
'id': vma['shmid']
})
pvmi = vmi
vstr += '\n\t%23s' % ''
vmi += 1
vmi -= 1
vmi -= 1
print('%-24s%s' % (pstr, vstr))
print('%-24s%s' % (pstr, vstr))
explorers = {
'ps': explore_ps,
'fds': explore_fds,
'mems': explore_mems,
'rss': explore_rss
}
explorers = { 'ps': explore_ps, 'fds': explore_fds, 'mems': explore_mems, 'rss': explore_rss }
def explore(opts):
explorers[opts['what']](opts)
explorers[opts['what']](opts)
def main():
desc = 'CRiu Image Tool'
parser = argparse.ArgumentParser(description=desc,
formatter_class=argparse.RawTextHelpFormatter)
desc = 'CRiu Image Tool'
parser = argparse.ArgumentParser(
description=desc, formatter_class=argparse.RawTextHelpFormatter)
subparsers = parser.add_subparsers(help='Use crit CMD --help for command-specific help')
subparsers = parser.add_subparsers(
help='Use crit CMD --help for command-specific help')
# Decode
decode_parser = subparsers.add_parser('decode',
help = 'convert criu image from binary type to json')
decode_parser.add_argument('--pretty',
help = 'Multiline with indents and some numerical fields in field-specific format',
action = 'store_true')
decode_parser.add_argument('-i',
'--in',
help = 'criu image in binary format to be decoded (stdin by default)')
decode_parser.add_argument('-o',
'--out',
help = 'where to put criu image in json format (stdout by default)')
decode_parser.set_defaults(func=decode, nopl=False)
# Decode
decode_parser = subparsers.add_parser(
'decode', help='convert criu image from binary type to json')
decode_parser.add_argument(
'--pretty',
help=
'Multiline with indents and some numerical fields in field-specific format',
action='store_true')
decode_parser.add_argument(
'-i',
'--in',
help='criu image in binary format to be decoded (stdin by default)')
decode_parser.add_argument(
'-o',
'--out',
help='where to put criu image in json format (stdout by default)')
decode_parser.set_defaults(func=decode, nopl=False)
# Encode
encode_parser = subparsers.add_parser('encode',
help = 'convert criu image from json type to binary')
encode_parser.add_argument('-i',
'--in',
help = 'criu image in json format to be encoded (stdin by default)')
encode_parser.add_argument('-o',
'--out',
help = 'where to put criu image in binary format (stdout by default)')
encode_parser.set_defaults(func=encode)
# Encode
encode_parser = subparsers.add_parser(
'encode', help='convert criu image from json type to binary')
encode_parser.add_argument(
'-i',
'--in',
help='criu image in json format to be encoded (stdin by default)')
encode_parser.add_argument(
'-o',
'--out',
help='where to put criu image in binary format (stdout by default)')
encode_parser.set_defaults(func=encode)
# Info
info_parser = subparsers.add_parser('info',
help = 'show info about image')
info_parser.add_argument("in")
info_parser.set_defaults(func=info)
# Info
info_parser = subparsers.add_parser('info', help='show info about image')
info_parser.add_argument("in")
info_parser.set_defaults(func=info)
# Explore
x_parser = subparsers.add_parser('x', help = 'explore image dir')
x_parser.add_argument('dir')
x_parser.add_argument('what', choices = [ 'ps', 'fds', 'mems', 'rss'])
x_parser.set_defaults(func=explore)
# Explore
x_parser = subparsers.add_parser('x', help='explore image dir')
x_parser.add_argument('dir')
x_parser.add_argument('what', choices=['ps', 'fds', 'mems', 'rss'])
x_parser.set_defaults(func=explore)
# Show
show_parser = subparsers.add_parser('show',
help = "convert criu image from binary to human-readable json")
show_parser.add_argument("in")
show_parser.add_argument('--nopl', help = 'do not show entry payload (if exists)', action = 'store_true')
show_parser.set_defaults(func=decode, pretty=True, out=None)
# Show
show_parser = subparsers.add_parser(
'show', help="convert criu image from binary to human-readable json")
show_parser.add_argument("in")
show_parser.add_argument('--nopl',
help='do not show entry payload (if exists)',
action='store_true')
show_parser.set_defaults(func=decode, pretty=True, out=None)
opts = vars(parser.parse_args())
opts = vars(parser.parse_args())
if not opts:
sys.stderr.write(parser.format_usage())
sys.stderr.write("crit: error: too few arguments\n")
sys.exit(1)
if not opts:
sys.stderr.write(parser.format_usage())
sys.stderr.write("crit: error: too few arguments\n")
sys.exit(1)
opts["func"](opts)
opts["func"](opts)
if __name__ == '__main__':
main()
main()

View File

@ -8,325 +8,336 @@ import struct
import pycriu.rpc_pb2 as rpc
class _criu_comm:
"""
"""
Base class for communication classes.
"""
COMM_SK = 0
COMM_FD = 1
COMM_BIN = 2
comm_type = None
comm = None
sk = None
COMM_SK = 0
COMM_FD = 1
COMM_BIN = 2
comm_type = None
comm = None
sk = None
def connect(self, daemon):
"""
def connect(self, daemon):
"""
Connect to criu and return socket object.
daemon -- is for whether or not criu should daemonize if executing criu from binary(comm_bin).
"""
pass
pass
def disconnect(self):
"""
def disconnect(self):
"""
Disconnect from criu.
"""
pass
pass
class _criu_comm_sk(_criu_comm):
"""
"""
Communication class for unix socket.
"""
def __init__(self, sk_path):
self.comm_type = self.COMM_SK
self.comm = sk_path
def connect(self, daemon):
self.sk = socket.socket(socket.AF_UNIX, socket.SOCK_SEQPACKET)
self.sk.connect(self.comm)
def __init__(self, sk_path):
self.comm_type = self.COMM_SK
self.comm = sk_path
return self.sk
def connect(self, daemon):
self.sk = socket.socket(socket.AF_UNIX, socket.SOCK_SEQPACKET)
self.sk.connect(self.comm)
def disconnect(self):
self.sk.close()
return self.sk
def disconnect(self):
self.sk.close()
class _criu_comm_fd(_criu_comm):
"""
"""
Communication class for file descriptor.
"""
def __init__(self, fd):
self.comm_type = self.COMM_FD
self.comm = fd
def connect(self, daemon):
self.sk = socket.fromfd(self.comm, socket.AF_UNIX, socket.SOCK_SEQPACKET)
def __init__(self, fd):
self.comm_type = self.COMM_FD
self.comm = fd
return self.sk
def connect(self, daemon):
self.sk = socket.fromfd(self.comm, socket.AF_UNIX,
socket.SOCK_SEQPACKET)
return self.sk
def disconnect(self):
self.sk.close()
def disconnect(self):
self.sk.close()
class _criu_comm_bin(_criu_comm):
"""
"""
Communication class for binary.
"""
def __init__(self, bin_path):
self.comm_type = self.COMM_BIN
self.comm = bin_path
self.swrk = None
self.daemon = None
def connect(self, daemon):
# Kind of the same thing we do in libcriu
css = socket.socketpair(socket.AF_UNIX, socket.SOCK_SEQPACKET)
flags = fcntl.fcntl(css[1], fcntl.F_GETFD)
fcntl.fcntl(css[1], fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC)
flags = fcntl.fcntl(css[0], fcntl.F_GETFD)
fcntl.fcntl(css[0], fcntl.F_SETFD, flags & ~fcntl.FD_CLOEXEC)
def __init__(self, bin_path):
self.comm_type = self.COMM_BIN
self.comm = bin_path
self.swrk = None
self.daemon = None
self.daemon = daemon
def connect(self, daemon):
# Kind of the same thing we do in libcriu
css = socket.socketpair(socket.AF_UNIX, socket.SOCK_SEQPACKET)
flags = fcntl.fcntl(css[1], fcntl.F_GETFD)
fcntl.fcntl(css[1], fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC)
flags = fcntl.fcntl(css[0], fcntl.F_GETFD)
fcntl.fcntl(css[0], fcntl.F_SETFD, flags & ~fcntl.FD_CLOEXEC)
p = os.fork()
self.daemon = daemon
if p == 0:
def exec_criu():
os.close(0)
os.close(1)
os.close(2)
p = os.fork()
css[0].send(struct.pack('i', os.getpid()))
os.execv(self.comm, [self.comm, 'swrk', "%d" % css[0].fileno()])
os._exit(1)
if p == 0:
if daemon:
# Python has no daemon(3) alternative,
# so we need to mimic it ourself.
p = os.fork()
def exec_criu():
os.close(0)
os.close(1)
os.close(2)
if p == 0:
os.setsid()
css[0].send(struct.pack('i', os.getpid()))
os.execv(self.comm,
[self.comm, 'swrk',
"%d" % css[0].fileno()])
os._exit(1)
exec_criu()
else:
os._exit(0)
else:
exec_criu()
else:
if daemon:
os.waitpid(p, 0)
if daemon:
# Python has no daemon(3) alternative,
# so we need to mimic it ourself.
p = os.fork()
css[0].close()
self.swrk = struct.unpack('i', css[1].recv(4))[0]
self.sk = css[1]
if p == 0:
os.setsid()
return self.sk
exec_criu()
else:
os._exit(0)
else:
exec_criu()
else:
if daemon:
os.waitpid(p, 0)
def disconnect(self):
self.sk.close()
if not self.daemon:
os.waitpid(self.swrk, 0)
css[0].close()
self.swrk = struct.unpack('i', css[1].recv(4))[0]
self.sk = css[1]
return self.sk
def disconnect(self):
self.sk.close()
if not self.daemon:
os.waitpid(self.swrk, 0)
class CRIUException(Exception):
"""
"""
Exception class for handling and storing criu errors.
"""
typ = None
_str = None
typ = None
_str = None
def __str__(self):
return self._str
def __str__(self):
return self._str
class CRIUExceptionInternal(CRIUException):
"""
"""
Exception class for handling and storing internal errors.
"""
def __init__(self, typ, s):
self.typ = typ
self._str = "%s failed with internal error: %s" % (rpc.criu_req_type.Name(self.typ), s)
def __init__(self, typ, s):
self.typ = typ
self._str = "%s failed with internal error: %s" % (
rpc.criu_req_type.Name(self.typ), s)
class CRIUExceptionExternal(CRIUException):
"""
"""
Exception class for handling and storing criu RPC errors.
"""
def __init__(self, req_typ, resp_typ, errno):
self.typ = req_typ
self.resp_typ = resp_typ
self.errno = errno
self._str = self._gen_error_str()
def __init__(self, req_typ, resp_typ, errno):
self.typ = req_typ
self.resp_typ = resp_typ
self.errno = errno
self._str = self._gen_error_str()
def _gen_error_str(self):
s = "%s failed: " % (rpc.criu_req_type.Name(self.typ), )
def _gen_error_str(self):
s = "%s failed: " % (rpc.criu_req_type.Name(self.typ), )
if self.typ != self.resp_typ:
s += "Unexpected response type %d: " % (self.resp_typ, )
if self.typ != self.resp_typ:
s += "Unexpected response type %d: " % (self.resp_typ, )
s += "Error(%d): " % (self.errno, )
s += "Error(%d): " % (self.errno, )
if self.errno == errno.EBADRQC:
s += "Bad options"
if self.errno == errno.EBADRQC:
s += "Bad options"
if self.typ == rpc.DUMP:
if self.errno == errno.ESRCH:
s += "No process with such pid"
if self.typ == rpc.DUMP:
if self.errno == errno.ESRCH:
s += "No process with such pid"
if self.typ == rpc.RESTORE:
if self.errno == errno.EEXIST:
s += "Process with requested pid already exists"
if self.typ == rpc.RESTORE:
if self.errno == errno.EEXIST:
s += "Process with requested pid already exists"
s += "Unknown"
s += "Unknown"
return s
return s
class criu:
"""
"""
Call criu through RPC.
"""
opts = None #CRIU options in pb format
opts = None #CRIU options in pb format
_comm = None #Communication method
_comm = None #Communication method
def __init__(self):
self.use_binary('criu')
self.opts = rpc.criu_opts()
self.sk = None
def __init__(self):
self.use_binary('criu')
self.opts = rpc.criu_opts()
self.sk = None
def use_sk(self, sk_name):
"""
def use_sk(self, sk_name):
"""
Access criu using unix socket which that belongs to criu service daemon.
"""
self._comm = _criu_comm_sk(sk_name)
self._comm = _criu_comm_sk(sk_name)
def use_fd(self, fd):
"""
def use_fd(self, fd):
"""
Access criu using provided fd.
"""
self._comm = _criu_comm_fd(fd)
self._comm = _criu_comm_fd(fd)
def use_binary(self, bin_name):
"""
def use_binary(self, bin_name):
"""
Access criu by execing it using provided path to criu binary.
"""
self._comm = _criu_comm_bin(bin_name)
self._comm = _criu_comm_bin(bin_name)
def _send_req_and_recv_resp(self, req):
"""
def _send_req_and_recv_resp(self, req):
"""
As simple as send request and receive response.
"""
# In case of self-dump we need to spawn criu swrk detached
# from our current process, as criu has a hard time separating
# process resources from its own if criu is located in a same
# process tree it is trying to dump.
daemon = False
if req.type == rpc.DUMP and not req.opts.HasField('pid'):
daemon = True
# In case of self-dump we need to spawn criu swrk detached
# from our current process, as criu has a hard time separating
# process resources from its own if criu is located in a same
# process tree it is trying to dump.
daemon = False
if req.type == rpc.DUMP and not req.opts.HasField('pid'):
daemon = True
try:
if not self.sk:
s = self._comm.connect(daemon)
else:
s = self.sk
try:
if not self.sk:
s = self._comm.connect(daemon)
else:
s = self.sk
if req.keep_open:
self.sk = s
if req.keep_open:
self.sk = s
s.send(req.SerializeToString())
s.send(req.SerializeToString())
buf = s.recv(len(s.recv(1, socket.MSG_TRUNC | socket.MSG_PEEK)))
buf = s.recv(len(s.recv(1, socket.MSG_TRUNC | socket.MSG_PEEK)))
if not req.keep_open:
self._comm.disconnect()
if not req.keep_open:
self._comm.disconnect()
resp = rpc.criu_resp()
resp.ParseFromString(buf)
except Exception as e:
raise CRIUExceptionInternal(req.type, str(e))
resp = rpc.criu_resp()
resp.ParseFromString(buf)
except Exception as e:
raise CRIUExceptionInternal(req.type, str(e))
return resp
return resp
def check(self):
"""
def check(self):
"""
Checks whether the kernel support is up-to-date.
"""
req = rpc.criu_req()
req.type = rpc.CHECK
req = rpc.criu_req()
req.type = rpc.CHECK
resp = self._send_req_and_recv_resp(req)
resp = self._send_req_and_recv_resp(req)
if not resp.success:
raise CRIUExceptionExternal(req.type, resp.type, resp.cr_errno)
if not resp.success:
raise CRIUExceptionExternal(req.type, resp.type, resp.cr_errno)
def dump(self):
"""
def dump(self):
"""
Checkpoint a process/tree identified by opts.pid.
"""
req = rpc.criu_req()
req.type = rpc.DUMP
req.opts.MergeFrom(self.opts)
req = rpc.criu_req()
req.type = rpc.DUMP
req.opts.MergeFrom(self.opts)
resp = self._send_req_and_recv_resp(req)
resp = self._send_req_and_recv_resp(req)
if not resp.success:
raise CRIUExceptionExternal(req.type, resp.type, resp.cr_errno)
if not resp.success:
raise CRIUExceptionExternal(req.type, resp.type, resp.cr_errno)
return resp.dump
return resp.dump
def pre_dump(self):
"""
def pre_dump(self):
"""
Checkpoint a process/tree identified by opts.pid.
"""
req = rpc.criu_req()
req.type = rpc.PRE_DUMP
req.opts.MergeFrom(self.opts)
req = rpc.criu_req()
req.type = rpc.PRE_DUMP
req.opts.MergeFrom(self.opts)
resp = self._send_req_and_recv_resp(req)
resp = self._send_req_and_recv_resp(req)
if not resp.success:
raise CRIUExceptionExternal(req.type, resp.type, resp.cr_errno)
if not resp.success:
raise CRIUExceptionExternal(req.type, resp.type, resp.cr_errno)
return resp.dump
return resp.dump
def restore(self):
"""
def restore(self):
"""
Restore a process/tree.
"""
req = rpc.criu_req()
req.type = rpc.RESTORE
req.opts.MergeFrom(self.opts)
req = rpc.criu_req()
req.type = rpc.RESTORE
req.opts.MergeFrom(self.opts)
resp = self._send_req_and_recv_resp(req)
resp = self._send_req_and_recv_resp(req)
if not resp.success:
raise CRIUExceptionExternal(req.type, resp.type, resp.cr_errno)
if not resp.success:
raise CRIUExceptionExternal(req.type, resp.type, resp.cr_errno)
return resp.restore
return resp.restore
def page_server_chld(self):
req = rpc.criu_req()
req.type = rpc.PAGE_SERVER_CHLD
req.opts.MergeFrom(self.opts)
req.keep_open = True
def page_server_chld(self):
req = rpc.criu_req()
req.type = rpc.PAGE_SERVER_CHLD
req.opts.MergeFrom(self.opts)
req.keep_open = True
resp = self._send_req_and_recv_resp(req)
resp = self._send_req_and_recv_resp(req)
if not resp.success:
raise CRIUExceptionExternal(req.type, resp.type, resp.cr_errno)
if not resp.success:
raise CRIUExceptionExternal(req.type, resp.type, resp.cr_errno)
return resp.ps
return resp.ps
def wait_pid(self, pid):
req = rpc.criu_req()
req.type = rpc.WAIT_PID
req.pid = pid
def wait_pid(self, pid):
req = rpc.criu_req()
req.type = rpc.WAIT_PID
req.pid = pid
resp = self._send_req_and_recv_resp(req)
resp = self._send_req_and_recv_resp(req)
if not resp.success:
raise CRIUExceptionExternal(req.type, resp.type, resp.cr_errno)
if not resp.success:
raise CRIUExceptionExternal(req.type, resp.type, resp.cr_errno)
return resp.status
return resp.status

View File

@ -48,8 +48,8 @@ from . import pb
from . import pb2dict
if "encodebytes" not in dir(base64):
base64.encodebytes = base64.encodestring
base64.decodebytes = base64.decodestring
base64.encodebytes = base64.encodestring
base64.decodebytes = base64.decodestring
#
# Predefined hardcoded constants
@ -57,233 +57,241 @@ sizeof_u16 = 2
sizeof_u32 = 4
sizeof_u64 = 8
# A helper for rounding
def round_up(x,y):
return (((x - 1) | (y - 1)) + 1)
def round_up(x, y):
return (((x - 1) | (y - 1)) + 1)
class MagicException(Exception):
def __init__(self, magic):
self.magic = magic
def __init__(self, magic):
self.magic = magic
# Generic class to handle loading/dumping criu images entries from/to bin
# format to/from dict(json).
class entry_handler:
"""
"""
Generic class to handle loading/dumping criu images
entries from/to bin format to/from dict(json).
"""
def __init__(self, payload, extra_handler=None):
"""
def __init__(self, payload, extra_handler=None):
"""
Sets payload class and extra handler class.
"""
self.payload = payload
self.extra_handler = extra_handler
self.payload = payload
self.extra_handler = extra_handler
def load(self, f, pretty = False, no_payload = False):
"""
def load(self, f, pretty=False, no_payload=False):
"""
Convert criu image entries from binary format to dict(json).
Takes a file-like object and returnes a list with entries in
dict(json) format.
"""
entries = []
entries = []
while True:
entry = {}
while True:
entry = {}
# Read payload
pbuff = self.payload()
buf = f.read(4)
if buf == b'':
break
size, = struct.unpack('i', buf)
pbuff.ParseFromString(f.read(size))
entry = pb2dict.pb2dict(pbuff, pretty)
# Read payload
pbuff = self.payload()
buf = f.read(4)
if buf == b'':
break
size, = struct.unpack('i', buf)
pbuff.ParseFromString(f.read(size))
entry = pb2dict.pb2dict(pbuff, pretty)
# Read extra
if self.extra_handler:
if no_payload:
def human_readable(num):
for unit in ['','K','M','G','T','P','E','Z']:
if num < 1024.0:
if int(num) == num:
return "%d%sB" % (num, unit)
else:
return "%.1f%sB" % (num, unit)
num /= 1024.0
return "%.1fYB" % num
# Read extra
if self.extra_handler:
if no_payload:
pl_size = self.extra_handler.skip(f, pbuff)
entry['extra'] = '... <%s>' % human_readable(pl_size)
else:
entry['extra'] = self.extra_handler.load(f, pbuff)
def human_readable(num):
for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']:
if num < 1024.0:
if int(num) == num:
return "%d%sB" % (num, unit)
else:
return "%.1f%sB" % (num, unit)
num /= 1024.0
return "%.1fYB" % num
entries.append(entry)
pl_size = self.extra_handler.skip(f, pbuff)
entry['extra'] = '... <%s>' % human_readable(pl_size)
else:
entry['extra'] = self.extra_handler.load(f, pbuff)
return entries
entries.append(entry)
def loads(self, s, pretty = False):
"""
return entries
def loads(self, s, pretty=False):
"""
Same as load(), but takes a string as an argument.
"""
f = io.BytesIO(s)
return self.load(f, pretty)
f = io.BytesIO(s)
return self.load(f, pretty)
def dump(self, entries, f):
"""
def dump(self, entries, f):
"""
Convert criu image entries from dict(json) format to binary.
Takes a list of entries and a file-like object to write entries
in binary format to.
"""
for entry in entries:
extra = entry.pop('extra', None)
for entry in entries:
extra = entry.pop('extra', None)
# Write payload
pbuff = self.payload()
pb2dict.dict2pb(entry, pbuff)
pb_str = pbuff.SerializeToString()
size = len(pb_str)
f.write(struct.pack('i', size))
f.write(pb_str)
# Write payload
pbuff = self.payload()
pb2dict.dict2pb(entry, pbuff)
pb_str = pbuff.SerializeToString()
size = len(pb_str)
f.write(struct.pack('i', size))
f.write(pb_str)
# Write extra
if self.extra_handler and extra:
self.extra_handler.dump(extra, f, pbuff)
# Write extra
if self.extra_handler and extra:
self.extra_handler.dump(extra, f, pbuff)
def dumps(self, entries):
"""
def dumps(self, entries):
"""
Same as dump(), but doesn't take file-like object and just
returns a string.
"""
f = io.BytesIO('')
self.dump(entries, f)
return f.read()
f = io.BytesIO('')
self.dump(entries, f)
return f.read()
def count(self, f):
"""
def count(self, f):
"""
Counts the number of top-level object in the image file
"""
entries = 0
entries = 0
while True:
buf = f.read(4)
if buf == '':
break
size, = struct.unpack('i', buf)
f.seek(size, 1)
entries += 1
while True:
buf = f.read(4)
if buf == '':
break
size, = struct.unpack('i', buf)
f.seek(size, 1)
entries += 1
return entries
return entries
# Special handler for pagemap.img
class pagemap_handler:
"""
"""
Special entry handler for pagemap.img, which is unique in a way
that it has a header of pagemap_head type followed by entries
of pagemap_entry type.
"""
def load(self, f, pretty = False, no_payload = False):
entries = []
pbuff = pb.pagemap_head()
while True:
buf = f.read(4)
if buf == b'':
break
size, = struct.unpack('i', buf)
pbuff.ParseFromString(f.read(size))
entries.append(pb2dict.pb2dict(pbuff, pretty))
def load(self, f, pretty=False, no_payload=False):
entries = []
pbuff = pb.pagemap_entry()
pbuff = pb.pagemap_head()
while True:
buf = f.read(4)
if buf == b'':
break
size, = struct.unpack('i', buf)
pbuff.ParseFromString(f.read(size))
entries.append(pb2dict.pb2dict(pbuff, pretty))
return entries
pbuff = pb.pagemap_entry()
def loads(self, s, pretty = False):
f = io.BytesIO(s)
return self.load(f, pretty)
return entries
def dump(self, entries, f):
pbuff = pb.pagemap_head()
for item in entries:
pb2dict.dict2pb(item, pbuff)
pb_str = pbuff.SerializeToString()
size = len(pb_str)
f.write(struct.pack('i', size))
f.write(pb_str)
def loads(self, s, pretty=False):
f = io.BytesIO(s)
return self.load(f, pretty)
pbuff = pb.pagemap_entry()
def dump(self, entries, f):
pbuff = pb.pagemap_head()
for item in entries:
pb2dict.dict2pb(item, pbuff)
pb_str = pbuff.SerializeToString()
size = len(pb_str)
f.write(struct.pack('i', size))
f.write(pb_str)
def dumps(self, entries):
f = io.BytesIO('')
self.dump(entries, f)
return f.read()
pbuff = pb.pagemap_entry()
def dumps(self, entries):
f = io.BytesIO('')
self.dump(entries, f)
return f.read()
def count(self, f):
return entry_handler(None).count(f) - 1
def count(self, f):
return entry_handler(None).count(f) - 1
# Special handler for ghost-file.img
class ghost_file_handler:
def load(self, f, pretty = False, no_payload = False):
entries = []
def load(self, f, pretty=False, no_payload=False):
entries = []
gf = pb.ghost_file_entry()
buf = f.read(4)
size, = struct.unpack('i', buf)
gf.ParseFromString(f.read(size))
g_entry = pb2dict.pb2dict(gf, pretty)
gf = pb.ghost_file_entry()
buf = f.read(4)
size, = struct.unpack('i', buf)
gf.ParseFromString(f.read(size))
g_entry = pb2dict.pb2dict(gf, pretty)
if gf.chunks:
entries.append(g_entry)
while True:
gc = pb.ghost_chunk_entry()
buf = f.read(4)
if buf == '':
break
size, = struct.unpack('i', buf)
gc.ParseFromString(f.read(size))
entry = pb2dict.pb2dict(gc, pretty)
if no_payload:
f.seek(gc.len, os.SEEK_CUR)
else:
entry['extra'] = base64.encodebytes(f.read(gc.len))
entries.append(entry)
else:
if no_payload:
f.seek(0, os.SEEK_END)
else:
g_entry['extra'] = base64.encodebytes(f.read())
entries.append(g_entry)
if gf.chunks:
entries.append(g_entry)
while True:
gc = pb.ghost_chunk_entry()
buf = f.read(4)
if buf == '':
break
size, = struct.unpack('i', buf)
gc.ParseFromString(f.read(size))
entry = pb2dict.pb2dict(gc, pretty)
if no_payload:
f.seek(gc.len, os.SEEK_CUR)
else:
entry['extra'] = base64.encodebytes(f.read(gc.len))
entries.append(entry)
else:
if no_payload:
f.seek(0, os.SEEK_END)
else:
g_entry['extra'] = base64.encodebytes(f.read())
entries.append(g_entry)
return entries
return entries
def loads(self, s, pretty = False):
f = io.BytesIO(s)
return self.load(f, pretty)
def loads(self, s, pretty=False):
f = io.BytesIO(s)
return self.load(f, pretty)
def dump(self, entries, f):
pbuff = pb.ghost_file_entry()
item = entries.pop(0)
pb2dict.dict2pb(item, pbuff)
pb_str = pbuff.SerializeToString()
size = len(pb_str)
f.write(struct.pack('i', size))
f.write(pb_str)
def dump(self, entries, f):
pbuff = pb.ghost_file_entry()
item = entries.pop(0)
pb2dict.dict2pb(item, pbuff)
pb_str = pbuff.SerializeToString()
size = len(pb_str)
f.write(struct.pack('i', size))
f.write(pb_str)
if pbuff.chunks:
for item in entries:
pbuff = pb.ghost_chunk_entry()
pb2dict.dict2pb(item, pbuff)
pb_str = pbuff.SerializeToString()
size = len(pb_str)
f.write(struct.pack('i', size))
f.write(pb_str)
f.write(base64.decodebytes(item['extra']))
else:
f.write(base64.decodebytes(item['extra']))
if pbuff.chunks:
for item in entries:
pbuff = pb.ghost_chunk_entry()
pb2dict.dict2pb(item, pbuff)
pb_str = pbuff.SerializeToString()
size = len(pb_str)
f.write(struct.pack('i', size))
f.write(pb_str)
f.write(base64.decodebytes(item['extra']))
else:
f.write(base64.decodebytes(item['extra']))
def dumps(self, entries):
f = io.BytesIO('')
self.dump(entries, f)
return f.read()
def dumps(self, entries):
f = io.BytesIO('')
self.dump(entries, f)
return f.read()
# In following extra handlers we use base64 encoding
@ -293,304 +301,317 @@ class ghost_file_handler:
# do not store big amounts of binary data. They
# are negligible comparing to pages size.
class pipes_data_extra_handler:
def load(self, f, pload):
size = pload.bytes
data = f.read(size)
return base64.encodebytes(data)
def load(self, f, pload):
size = pload.bytes
data = f.read(size)
return base64.encodebytes(data)
def dump(self, extra, f, pload):
data = base64.decodebytes(extra)
f.write(data)
def dump(self, extra, f, pload):
data = base64.decodebytes(extra)
f.write(data)
def skip(self, f, pload):
f.seek(pload.bytes, os.SEEK_CUR)
return pload.bytes
def skip(self, f, pload):
f.seek(pload.bytes, os.SEEK_CUR)
return pload.bytes
class sk_queues_extra_handler:
def load(self, f, pload):
size = pload.length
data = f.read(size)
return base64.encodebytes(data)
def load(self, f, pload):
size = pload.length
data = f.read(size)
return base64.encodebytes(data)
def dump(self, extra, f, _unused):
data = base64.decodebytes(extra)
f.write(data)
def dump(self, extra, f, _unused):
data = base64.decodebytes(extra)
f.write(data)
def skip(self, f, pload):
f.seek(pload.length, os.SEEK_CUR)
return pload.length
def skip(self, f, pload):
f.seek(pload.length, os.SEEK_CUR)
return pload.length
class tcp_stream_extra_handler:
def load(self, f, pbuff):
d = {}
def load(self, f, pbuff):
d = {}
inq = f.read(pbuff.inq_len)
outq = f.read(pbuff.outq_len)
inq = f.read(pbuff.inq_len)
outq = f.read(pbuff.outq_len)
d['inq'] = base64.encodebytes(inq)
d['outq'] = base64.encodebytes(outq)
d['inq'] = base64.encodebytes(inq)
d['outq'] = base64.encodebytes(outq)
return d
return d
def dump(self, extra, f, _unused):
inq = base64.decodebytes(extra['inq'])
outq = base64.decodebytes(extra['outq'])
def dump(self, extra, f, _unused):
inq = base64.decodebytes(extra['inq'])
outq = base64.decodebytes(extra['outq'])
f.write(inq)
f.write(outq)
f.write(inq)
f.write(outq)
def skip(self, f, pbuff):
f.seek(0, os.SEEK_END)
return pbuff.inq_len + pbuff.outq_len
def skip(self, f, pbuff):
f.seek(0, os.SEEK_END)
return pbuff.inq_len + pbuff.outq_len
class ipc_sem_set_handler:
def load(self, f, pbuff):
entry = pb2dict.pb2dict(pbuff)
size = sizeof_u16 * entry['nsems']
rounded = round_up(size, sizeof_u64)
s = array.array('H')
if s.itemsize != sizeof_u16:
raise Exception("Array size mismatch")
s.fromstring(f.read(size))
f.seek(rounded - size, 1)
return s.tolist()
def load(self, f, pbuff):
entry = pb2dict.pb2dict(pbuff)
size = sizeof_u16 * entry['nsems']
rounded = round_up(size, sizeof_u64)
s = array.array('H')
if s.itemsize != sizeof_u16:
raise Exception("Array size mismatch")
s.fromstring(f.read(size))
f.seek(rounded - size, 1)
return s.tolist()
def dump(self, extra, f, pbuff):
entry = pb2dict.pb2dict(pbuff)
size = sizeof_u16 * entry['nsems']
rounded = round_up(size, sizeof_u64)
s = array.array('H')
if s.itemsize != sizeof_u16:
raise Exception("Array size mismatch")
s.fromlist(extra)
if len(s) != entry['nsems']:
raise Exception("Number of semaphores mismatch")
f.write(s.tostring())
f.write('\0' * (rounded - size))
def dump(self, extra, f, pbuff):
entry = pb2dict.pb2dict(pbuff)
size = sizeof_u16 * entry['nsems']
rounded = round_up(size, sizeof_u64)
s = array.array('H')
if s.itemsize != sizeof_u16:
raise Exception("Array size mismatch")
s.fromlist(extra)
if len(s) != entry['nsems']:
raise Exception("Number of semaphores mismatch")
f.write(s.tostring())
f.write('\0' * (rounded - size))
def skip(self, f, pbuff):
entry = pb2dict.pb2dict(pbuff)
size = sizeof_u16 * entry['nsems']
f.seek(round_up(size, sizeof_u64), os.SEEK_CUR)
return size
def skip(self, f, pbuff):
entry = pb2dict.pb2dict(pbuff)
size = sizeof_u16 * entry['nsems']
f.seek(round_up(size, sizeof_u64), os.SEEK_CUR)
return size
class ipc_msg_queue_handler:
def load(self, f, pbuff):
entry = pb2dict.pb2dict(pbuff)
messages = []
for x in range (0, entry['qnum']):
buf = f.read(4)
if buf == '':
break
size, = struct.unpack('i', buf)
msg = pb.ipc_msg()
msg.ParseFromString(f.read(size))
rounded = round_up(msg.msize, sizeof_u64)
data = f.read(msg.msize)
f.seek(rounded - msg.msize, 1)
messages.append(pb2dict.pb2dict(msg))
messages.append(base64.encodebytes(data))
return messages
def load(self, f, pbuff):
entry = pb2dict.pb2dict(pbuff)
messages = []
for x in range(0, entry['qnum']):
buf = f.read(4)
if buf == '':
break
size, = struct.unpack('i', buf)
msg = pb.ipc_msg()
msg.ParseFromString(f.read(size))
rounded = round_up(msg.msize, sizeof_u64)
data = f.read(msg.msize)
f.seek(rounded - msg.msize, 1)
messages.append(pb2dict.pb2dict(msg))
messages.append(base64.encodebytes(data))
return messages
def dump(self, extra, f, pbuff):
entry = pb2dict.pb2dict(pbuff)
for i in range (0, len(extra), 2):
msg = pb.ipc_msg()
pb2dict.dict2pb(extra[i], msg)
msg_str = msg.SerializeToString()
size = len(msg_str)
f.write(struct.pack('i', size))
f.write(msg_str)
rounded = round_up(msg.msize, sizeof_u64)
data = base64.decodebytes(extra[i + 1])
f.write(data[:msg.msize])
f.write('\0' * (rounded - msg.msize))
def dump(self, extra, f, pbuff):
entry = pb2dict.pb2dict(pbuff)
for i in range(0, len(extra), 2):
msg = pb.ipc_msg()
pb2dict.dict2pb(extra[i], msg)
msg_str = msg.SerializeToString()
size = len(msg_str)
f.write(struct.pack('i', size))
f.write(msg_str)
rounded = round_up(msg.msize, sizeof_u64)
data = base64.decodebytes(extra[i + 1])
f.write(data[:msg.msize])
f.write('\0' * (rounded - msg.msize))
def skip(self, f, pbuff):
entry = pb2dict.pb2dict(pbuff)
pl_len = 0
for x in range (0, entry['qnum']):
buf = f.read(4)
if buf == '':
break
size, = struct.unpack('i', buf)
msg = pb.ipc_msg()
msg.ParseFromString(f.read(size))
rounded = round_up(msg.msize, sizeof_u64)
f.seek(rounded, os.SEEK_CUR)
pl_len += size + msg.msize
def skip(self, f, pbuff):
entry = pb2dict.pb2dict(pbuff)
pl_len = 0
for x in range(0, entry['qnum']):
buf = f.read(4)
if buf == '':
break
size, = struct.unpack('i', buf)
msg = pb.ipc_msg()
msg.ParseFromString(f.read(size))
rounded = round_up(msg.msize, sizeof_u64)
f.seek(rounded, os.SEEK_CUR)
pl_len += size + msg.msize
return pl_len
return pl_len
class ipc_shm_handler:
def load(self, f, pbuff):
entry = pb2dict.pb2dict(pbuff)
size = entry['size']
data = f.read(size)
rounded = round_up(size, sizeof_u32)
f.seek(rounded - size, 1)
return base64.encodebytes(data)
def load(self, f, pbuff):
entry = pb2dict.pb2dict(pbuff)
size = entry['size']
data = f.read(size)
rounded = round_up(size, sizeof_u32)
f.seek(rounded - size, 1)
return base64.encodebytes(data)
def dump(self, extra, f, pbuff):
entry = pb2dict.pb2dict(pbuff)
size = entry['size']
data = base64.decodebytes(extra)
rounded = round_up(size, sizeof_u32)
f.write(data[:size])
f.write('\0' * (rounded - size))
def dump(self, extra, f, pbuff):
entry = pb2dict.pb2dict(pbuff)
size = entry['size']
data = base64.decodebytes(extra)
rounded = round_up(size, sizeof_u32)
f.write(data[:size])
f.write('\0' * (rounded - size))
def skip(self, f, pbuff):
entry = pb2dict.pb2dict(pbuff)
size = entry['size']
rounded = round_up(size, sizeof_u32)
f.seek(rounded, os.SEEK_CUR)
return size
def skip(self, f, pbuff):
entry = pb2dict.pb2dict(pbuff)
size = entry['size']
rounded = round_up(size, sizeof_u32)
f.seek(rounded, os.SEEK_CUR)
return size
handlers = {
'INVENTORY' : entry_handler(pb.inventory_entry),
'CORE' : entry_handler(pb.core_entry),
'IDS' : entry_handler(pb.task_kobj_ids_entry),
'CREDS' : entry_handler(pb.creds_entry),
'UTSNS' : entry_handler(pb.utsns_entry),
'IPC_VAR' : entry_handler(pb.ipc_var_entry),
'FS' : entry_handler(pb.fs_entry),
'GHOST_FILE' : ghost_file_handler(),
'MM' : entry_handler(pb.mm_entry),
'CGROUP' : entry_handler(pb.cgroup_entry),
'TCP_STREAM' : entry_handler(pb.tcp_stream_entry, tcp_stream_extra_handler()),
'STATS' : entry_handler(pb.stats_entry),
'PAGEMAP' : pagemap_handler(), # Special one
'PSTREE' : entry_handler(pb.pstree_entry),
'REG_FILES' : entry_handler(pb.reg_file_entry),
'NS_FILES' : entry_handler(pb.ns_file_entry),
'EVENTFD_FILE' : entry_handler(pb.eventfd_file_entry),
'EVENTPOLL_FILE' : entry_handler(pb.eventpoll_file_entry),
'EVENTPOLL_TFD' : entry_handler(pb.eventpoll_tfd_entry),
'SIGNALFD' : entry_handler(pb.signalfd_entry),
'TIMERFD' : entry_handler(pb.timerfd_entry),
'INOTIFY_FILE' : entry_handler(pb.inotify_file_entry),
'INOTIFY_WD' : entry_handler(pb.inotify_wd_entry),
'FANOTIFY_FILE' : entry_handler(pb.fanotify_file_entry),
'FANOTIFY_MARK' : entry_handler(pb.fanotify_mark_entry),
'VMAS' : entry_handler(pb.vma_entry),
'PIPES' : entry_handler(pb.pipe_entry),
'FIFO' : entry_handler(pb.fifo_entry),
'SIGACT' : entry_handler(pb.sa_entry),
'NETLINK_SK' : entry_handler(pb.netlink_sk_entry),
'REMAP_FPATH' : entry_handler(pb.remap_file_path_entry),
'MNTS' : entry_handler(pb.mnt_entry),
'TTY_FILES' : entry_handler(pb.tty_file_entry),
'TTY_INFO' : entry_handler(pb.tty_info_entry),
'TTY_DATA' : entry_handler(pb.tty_data_entry),
'RLIMIT' : entry_handler(pb.rlimit_entry),
'TUNFILE' : entry_handler(pb.tunfile_entry),
'EXT_FILES' : entry_handler(pb.ext_file_entry),
'IRMAP_CACHE' : entry_handler(pb.irmap_cache_entry),
'FILE_LOCKS' : entry_handler(pb.file_lock_entry),
'FDINFO' : entry_handler(pb.fdinfo_entry),
'UNIXSK' : entry_handler(pb.unix_sk_entry),
'INETSK' : entry_handler(pb.inet_sk_entry),
'PACKETSK' : entry_handler(pb.packet_sock_entry),
'ITIMERS' : entry_handler(pb.itimer_entry),
'POSIX_TIMERS' : entry_handler(pb.posix_timer_entry),
'NETDEV' : entry_handler(pb.net_device_entry),
'PIPES_DATA' : entry_handler(pb.pipe_data_entry, pipes_data_extra_handler()),
'FIFO_DATA' : entry_handler(pb.pipe_data_entry, pipes_data_extra_handler()),
'SK_QUEUES' : entry_handler(pb.sk_packet_entry, sk_queues_extra_handler()),
'IPCNS_SHM' : entry_handler(pb.ipc_shm_entry, ipc_shm_handler()),
'IPCNS_SEM' : entry_handler(pb.ipc_sem_entry, ipc_sem_set_handler()),
'IPCNS_MSG' : entry_handler(pb.ipc_msg_entry, ipc_msg_queue_handler()),
'NETNS' : entry_handler(pb.netns_entry),
'USERNS' : entry_handler(pb.userns_entry),
'SECCOMP' : entry_handler(pb.seccomp_entry),
'AUTOFS' : entry_handler(pb.autofs_entry),
'FILES' : entry_handler(pb.file_entry),
'CPUINFO' : entry_handler(pb.cpuinfo_entry),
}
'INVENTORY': entry_handler(pb.inventory_entry),
'CORE': entry_handler(pb.core_entry),
'IDS': entry_handler(pb.task_kobj_ids_entry),
'CREDS': entry_handler(pb.creds_entry),
'UTSNS': entry_handler(pb.utsns_entry),
'IPC_VAR': entry_handler(pb.ipc_var_entry),
'FS': entry_handler(pb.fs_entry),
'GHOST_FILE': ghost_file_handler(),
'MM': entry_handler(pb.mm_entry),
'CGROUP': entry_handler(pb.cgroup_entry),
'TCP_STREAM': entry_handler(pb.tcp_stream_entry,
tcp_stream_extra_handler()),
'STATS': entry_handler(pb.stats_entry),
'PAGEMAP': pagemap_handler(), # Special one
'PSTREE': entry_handler(pb.pstree_entry),
'REG_FILES': entry_handler(pb.reg_file_entry),
'NS_FILES': entry_handler(pb.ns_file_entry),
'EVENTFD_FILE': entry_handler(pb.eventfd_file_entry),
'EVENTPOLL_FILE': entry_handler(pb.eventpoll_file_entry),
'EVENTPOLL_TFD': entry_handler(pb.eventpoll_tfd_entry),
'SIGNALFD': entry_handler(pb.signalfd_entry),
'TIMERFD': entry_handler(pb.timerfd_entry),
'INOTIFY_FILE': entry_handler(pb.inotify_file_entry),
'INOTIFY_WD': entry_handler(pb.inotify_wd_entry),
'FANOTIFY_FILE': entry_handler(pb.fanotify_file_entry),
'FANOTIFY_MARK': entry_handler(pb.fanotify_mark_entry),
'VMAS': entry_handler(pb.vma_entry),
'PIPES': entry_handler(pb.pipe_entry),
'FIFO': entry_handler(pb.fifo_entry),
'SIGACT': entry_handler(pb.sa_entry),
'NETLINK_SK': entry_handler(pb.netlink_sk_entry),
'REMAP_FPATH': entry_handler(pb.remap_file_path_entry),
'MNTS': entry_handler(pb.mnt_entry),
'TTY_FILES': entry_handler(pb.tty_file_entry),
'TTY_INFO': entry_handler(pb.tty_info_entry),
'TTY_DATA': entry_handler(pb.tty_data_entry),
'RLIMIT': entry_handler(pb.rlimit_entry),
'TUNFILE': entry_handler(pb.tunfile_entry),
'EXT_FILES': entry_handler(pb.ext_file_entry),
'IRMAP_CACHE': entry_handler(pb.irmap_cache_entry),
'FILE_LOCKS': entry_handler(pb.file_lock_entry),
'FDINFO': entry_handler(pb.fdinfo_entry),
'UNIXSK': entry_handler(pb.unix_sk_entry),
'INETSK': entry_handler(pb.inet_sk_entry),
'PACKETSK': entry_handler(pb.packet_sock_entry),
'ITIMERS': entry_handler(pb.itimer_entry),
'POSIX_TIMERS': entry_handler(pb.posix_timer_entry),
'NETDEV': entry_handler(pb.net_device_entry),
'PIPES_DATA': entry_handler(pb.pipe_data_entry,
pipes_data_extra_handler()),
'FIFO_DATA': entry_handler(pb.pipe_data_entry, pipes_data_extra_handler()),
'SK_QUEUES': entry_handler(pb.sk_packet_entry, sk_queues_extra_handler()),
'IPCNS_SHM': entry_handler(pb.ipc_shm_entry, ipc_shm_handler()),
'IPCNS_SEM': entry_handler(pb.ipc_sem_entry, ipc_sem_set_handler()),
'IPCNS_MSG': entry_handler(pb.ipc_msg_entry, ipc_msg_queue_handler()),
'NETNS': entry_handler(pb.netns_entry),
'USERNS': entry_handler(pb.userns_entry),
'SECCOMP': entry_handler(pb.seccomp_entry),
'AUTOFS': entry_handler(pb.autofs_entry),
'FILES': entry_handler(pb.file_entry),
'CPUINFO': entry_handler(pb.cpuinfo_entry),
}
def __rhandler(f):
# Images v1.1 NOTE: First read "first" magic.
img_magic, = struct.unpack('i', f.read(4))
if img_magic in (magic.by_name['IMG_COMMON'], magic.by_name['IMG_SERVICE']):
img_magic, = struct.unpack('i', f.read(4))
# Images v1.1 NOTE: First read "first" magic.
img_magic, = struct.unpack('i', f.read(4))
if img_magic in (magic.by_name['IMG_COMMON'],
magic.by_name['IMG_SERVICE']):
img_magic, = struct.unpack('i', f.read(4))
try:
m = magic.by_val[img_magic]
except:
raise MagicException(img_magic)
try:
m = magic.by_val[img_magic]
except:
raise MagicException(img_magic)
try:
handler = handlers[m]
except:
raise Exception("No handler found for image with magic " + m)
try:
handler = handlers[m]
except:
raise Exception("No handler found for image with magic " + m)
return m, handler
return m, handler
def load(f, pretty = False, no_payload = False):
"""
def load(f, pretty=False, no_payload=False):
"""
Convert criu image from binary format to dict(json).
Takes a file-like object to read criu image from.
Returns criu image in dict(json) format.
"""
image = {}
image = {}
m, handler = __rhandler(f)
m, handler = __rhandler(f)
image['magic'] = m
image['entries'] = handler.load(f, pretty, no_payload)
image['magic'] = m
image['entries'] = handler.load(f, pretty, no_payload)
return image
return image
def info(f):
res = {}
res = {}
m, handler = __rhandler(f)
m, handler = __rhandler(f)
res['magic'] = m
res['count'] = handler.count(f)
res['magic'] = m
res['count'] = handler.count(f)
return res
return res
def loads(s, pretty = False):
"""
def loads(s, pretty=False):
"""
Same as load(), but takes a string.
"""
f = io.BytesIO(s)
return load(f, pretty)
f = io.BytesIO(s)
return load(f, pretty)
def dump(img, f):
"""
"""
Convert criu image from dict(json) format to binary.
Takes an image in dict(json) format and file-like
object to write to.
"""
m = img['magic']
magic_val = magic.by_name[img['magic']]
m = img['magic']
magic_val = magic.by_name[img['magic']]
# Images v1.1 NOTE: use "second" magic to identify what "first"
# should be written.
if m != 'INVENTORY':
if m in ('STATS', 'IRMAP_CACHE'):
f.write(struct.pack('i', magic.by_name['IMG_SERVICE']))
else:
f.write(struct.pack('i', magic.by_name['IMG_COMMON']))
# Images v1.1 NOTE: use "second" magic to identify what "first"
# should be written.
if m != 'INVENTORY':
if m in ('STATS', 'IRMAP_CACHE'):
f.write(struct.pack('i', magic.by_name['IMG_SERVICE']))
else:
f.write(struct.pack('i', magic.by_name['IMG_COMMON']))
f.write(struct.pack('i', magic_val))
f.write(struct.pack('i', magic_val))
try:
handler = handlers[m]
except:
raise Exception("No handler found for image with such magic")
try:
handler = handlers[m]
except:
raise Exception("No handler found for image with such magic")
handler.dump(img['entries'], f)
handler.dump(img['entries'], f)
def dumps(img):
"""
"""
Same as dump(), but takes only an image and returns
a string.
"""
f = io.BytesIO(b'')
dump(img, f)
return f.getvalue()
f = io.BytesIO(b'')
dump(img, f)
return f.getvalue()

View File

@ -9,8 +9,8 @@ import base64
import quopri
if "encodebytes" not in dir(base64):
base64.encodebytes = base64.encodestring
base64.decodebytes = base64.decodestring
base64.encodebytes = base64.encodestring
base64.decodebytes = base64.decodestring
# pb2dict and dict2pb are methods to convert pb to/from dict.
# Inspired by:
@ -29,350 +29,396 @@ if "encodebytes" not in dir(base64):
# enums to string value too. (i.e. "march : x86_64" is better then
# "march : 1").
_basic_cast = {
FD.TYPE_FIXED64 : int,
FD.TYPE_FIXED32 : int,
FD.TYPE_SFIXED64 : int,
FD.TYPE_SFIXED32 : int,
FD.TYPE_INT64 : int,
FD.TYPE_UINT64 : int,
FD.TYPE_SINT64 : int,
FD.TYPE_INT32 : int,
FD.TYPE_UINT32 : int,
FD.TYPE_SINT32 : int,
FD.TYPE_BOOL : bool,
FD.TYPE_STRING : str
FD.TYPE_FIXED64: int,
FD.TYPE_FIXED32: int,
FD.TYPE_SFIXED64: int,
FD.TYPE_SFIXED32: int,
FD.TYPE_INT64: int,
FD.TYPE_UINT64: int,
FD.TYPE_SINT64: int,
FD.TYPE_INT32: int,
FD.TYPE_UINT32: int,
FD.TYPE_SINT32: int,
FD.TYPE_BOOL: bool,
FD.TYPE_STRING: str
}
def _marked_as_hex(field):
return field.GetOptions().Extensions[opts_pb2.criu].hex
return field.GetOptions().Extensions[opts_pb2.criu].hex
def _marked_as_ip(field):
return field.GetOptions().Extensions[opts_pb2.criu].ipadd
return field.GetOptions().Extensions[opts_pb2.criu].ipadd
def _marked_as_flags(field):
return field.GetOptions().Extensions[opts_pb2.criu].flags
return field.GetOptions().Extensions[opts_pb2.criu].flags
def _marked_as_dev(field):
return field.GetOptions().Extensions[opts_pb2.criu].dev
return field.GetOptions().Extensions[opts_pb2.criu].dev
def _marked_as_odev(field):
return field.GetOptions().Extensions[opts_pb2.criu].odev
return field.GetOptions().Extensions[opts_pb2.criu].odev
def _marked_as_dict(field):
return field.GetOptions().Extensions[opts_pb2.criu].dict
return field.GetOptions().Extensions[opts_pb2.criu].dict
def _custom_conv(field):
return field.GetOptions().Extensions[opts_pb2.criu].conv
return field.GetOptions().Extensions[opts_pb2.criu].conv
mmap_prot_map = [
('PROT_READ', 0x1),
('PROT_WRITE', 0x2),
('PROT_EXEC', 0x4),
('PROT_READ', 0x1),
('PROT_WRITE', 0x2),
('PROT_EXEC', 0x4),
]
mmap_flags_map = [
('MAP_SHARED', 0x1),
('MAP_PRIVATE', 0x2),
('MAP_ANON', 0x20),
('MAP_GROWSDOWN', 0x0100),
('MAP_SHARED', 0x1),
('MAP_PRIVATE', 0x2),
('MAP_ANON', 0x20),
('MAP_GROWSDOWN', 0x0100),
]
mmap_status_map = [
('VMA_AREA_NONE', 0 << 0),
('VMA_AREA_REGULAR', 1 << 0),
('VMA_AREA_STACK', 1 << 1),
('VMA_AREA_VSYSCALL', 1 << 2),
('VMA_AREA_VDSO', 1 << 3),
('VMA_AREA_HEAP', 1 << 5),
('VMA_FILE_PRIVATE', 1 << 6),
('VMA_FILE_SHARED', 1 << 7),
('VMA_ANON_SHARED', 1 << 8),
('VMA_ANON_PRIVATE', 1 << 9),
('VMA_AREA_SYSVIPC', 1 << 10),
('VMA_AREA_SOCKET', 1 << 11),
('VMA_AREA_VVAR', 1 << 12),
('VMA_AREA_AIORING', 1 << 13),
('VMA_UNSUPP', 1 << 31),
('VMA_AREA_NONE', 0 << 0),
('VMA_AREA_REGULAR', 1 << 0),
('VMA_AREA_STACK', 1 << 1),
('VMA_AREA_VSYSCALL', 1 << 2),
('VMA_AREA_VDSO', 1 << 3),
('VMA_AREA_HEAP', 1 << 5),
('VMA_FILE_PRIVATE', 1 << 6),
('VMA_FILE_SHARED', 1 << 7),
('VMA_ANON_SHARED', 1 << 8),
('VMA_ANON_PRIVATE', 1 << 9),
('VMA_AREA_SYSVIPC', 1 << 10),
('VMA_AREA_SOCKET', 1 << 11),
('VMA_AREA_VVAR', 1 << 12),
('VMA_AREA_AIORING', 1 << 13),
('VMA_UNSUPP', 1 << 31),
]
rfile_flags_map = [
('O_WRONLY', 0o1),
('O_RDWR', 0o2),
('O_APPEND', 0o2000),
('O_DIRECT', 0o40000),
('O_LARGEFILE', 0o100000),
('O_WRONLY', 0o1),
('O_RDWR', 0o2),
('O_APPEND', 0o2000),
('O_DIRECT', 0o40000),
('O_LARGEFILE', 0o100000),
]
pmap_flags_map = [
('PE_PARENT', 1 << 0),
('PE_LAZY', 1 << 1),
('PE_PRESENT', 1 << 2),
('PE_PARENT', 1 << 0),
('PE_LAZY', 1 << 1),
('PE_PRESENT', 1 << 2),
]
flags_maps = {
'mmap.prot' : mmap_prot_map,
'mmap.flags' : mmap_flags_map,
'mmap.status' : mmap_status_map,
'rfile.flags' : rfile_flags_map,
'pmap.flags' : pmap_flags_map,
'mmap.prot': mmap_prot_map,
'mmap.flags': mmap_flags_map,
'mmap.status': mmap_status_map,
'rfile.flags': rfile_flags_map,
'pmap.flags': pmap_flags_map,
}
gen_maps = {
'task_state' : { 1: 'Alive', 3: 'Zombie', 6: 'Stopped' },
'task_state': {
1: 'Alive',
3: 'Zombie',
6: 'Stopped'
},
}
sk_maps = {
'family' : { 1: 'UNIX',
2: 'INET',
10: 'INET6',
16: 'NETLINK',
17: 'PACKET' },
'type' : { 1: 'STREAM',
2: 'DGRAM',
3: 'RAW',
5: 'SEQPACKET',
10: 'PACKET' },
'state' : { 1: 'ESTABLISHED',
2: 'SYN_SENT',
3: 'SYN_RECV',
4: 'FIN_WAIT1',
5: 'FIN_WAIT2',
6: 'TIME_WAIT',
7: 'CLOSE',
8: 'CLOSE_WAIT',
9: 'LAST_ACK',
10: 'LISTEN' },
'proto' : { 0: 'IP',
6: 'TCP',
17: 'UDP',
136: 'UDPLITE' },
'family': {
1: 'UNIX',
2: 'INET',
10: 'INET6',
16: 'NETLINK',
17: 'PACKET'
},
'type': {
1: 'STREAM',
2: 'DGRAM',
3: 'RAW',
5: 'SEQPACKET',
10: 'PACKET'
},
'state': {
1: 'ESTABLISHED',
2: 'SYN_SENT',
3: 'SYN_RECV',
4: 'FIN_WAIT1',
5: 'FIN_WAIT2',
6: 'TIME_WAIT',
7: 'CLOSE',
8: 'CLOSE_WAIT',
9: 'LAST_ACK',
10: 'LISTEN'
},
'proto': {
0: 'IP',
6: 'TCP',
17: 'UDP',
136: 'UDPLITE'
},
}
gen_rmaps = { k: {v2:k2 for k2,v2 in list(v.items())} for k,v in list(gen_maps.items()) }
sk_rmaps = { k: {v2:k2 for k2,v2 in list(v.items())} for k,v in list(sk_maps.items()) }
gen_rmaps = {
k: {v2: k2
for k2, v2 in list(v.items())}
for k, v in list(gen_maps.items())
}
sk_rmaps = {
k: {v2: k2
for k2, v2 in list(v.items())}
for k, v in list(sk_maps.items())
}
dict_maps = {
'gen' : ( gen_maps, gen_rmaps ),
'sk' : ( sk_maps, sk_rmaps ),
'gen': (gen_maps, gen_rmaps),
'sk': (sk_maps, sk_rmaps),
}
def map_flags(value, flags_map):
bs = [x[0] for x in [x for x in flags_map if value & x[1]]]
value &= ~sum([x[1] for x in flags_map])
if value:
bs.append("0x%x" % value)
return " | ".join(bs)
bs = [x[0] for x in [x for x in flags_map if value & x[1]]]
value &= ~sum([x[1] for x in flags_map])
if value:
bs.append("0x%x" % value)
return " | ".join(bs)
def unmap_flags(value, flags_map):
if value == '':
return 0
if value == '':
return 0
bd = dict(flags_map)
return sum([int(str(bd.get(x, x)), 0) for x in [x.strip() for x in value.split('|')]])
bd = dict(flags_map)
return sum([
int(str(bd.get(x, x)), 0)
for x in [x.strip() for x in value.split('|')]
])
kern_minorbits = 20 # This is how kernel encodes dev_t in new format
kern_minorbits = 20 # This is how kernel encodes dev_t in new format
def decode_dev(field, value):
if _marked_as_odev(field):
return "%d:%d" % (os.major(value), os.minor(value))
else:
return "%d:%d" % (value >> kern_minorbits, value & ((1 << kern_minorbits) - 1))
if _marked_as_odev(field):
return "%d:%d" % (os.major(value), os.minor(value))
else:
return "%d:%d" % (value >> kern_minorbits,
value & ((1 << kern_minorbits) - 1))
def encode_dev(field, value):
dev = [int(x) for x in value.split(':')]
if _marked_as_odev(field):
return os.makedev(dev[0], dev[1])
else:
return dev[0] << kern_minorbits | dev[1]
dev = [int(x) for x in value.split(':')]
if _marked_as_odev(field):
return os.makedev(dev[0], dev[1])
else:
return dev[0] << kern_minorbits | dev[1]
def encode_base64(value):
return base64.encodebytes(value)
return base64.encodebytes(value)
def decode_base64(value):
return base64.decodebytes(value)
return base64.decodebytes(value)
def encode_unix(value):
return quopri.encodestring(value)
def decode_unix(value):
return quopri.decodestring(value)
return quopri.encodestring(value)
def decode_unix(value):
return quopri.decodestring(value)
encode = {'unix_name': encode_unix}
decode = {'unix_name': decode_unix}
encode = { 'unix_name': encode_unix }
decode = { 'unix_name': decode_unix }
def get_bytes_enc(field):
c = _custom_conv(field)
if c:
return encode[c]
else:
return encode_base64
c = _custom_conv(field)
if c:
return encode[c]
else:
return encode_base64
def get_bytes_dec(field):
c = _custom_conv(field)
if c:
return decode[c]
else:
return decode_base64
c = _custom_conv(field)
if c:
return decode[c]
else:
return decode_base64
def is_string(value):
# Python 3 compatibility
if "basestring" in __builtins__:
string_types = basestring # noqa: F821
else:
string_types = (str, bytes)
return isinstance(value, string_types)
# Python 3 compatibility
if "basestring" in __builtins__:
string_types = basestring # noqa: F821
else:
string_types = (str, bytes)
return isinstance(value, string_types)
def _pb2dict_cast(field, value, pretty = False, is_hex = False):
if not is_hex:
is_hex = _marked_as_hex(field)
if field.type == FD.TYPE_MESSAGE:
return pb2dict(value, pretty, is_hex)
elif field.type == FD.TYPE_BYTES:
return get_bytes_enc(field)(value)
elif field.type == FD.TYPE_ENUM:
return field.enum_type.values_by_number.get(value, None).name
elif field.type in _basic_cast:
cast = _basic_cast[field.type]
if pretty and (cast == int):
if is_hex:
# Fields that have (criu).hex = true option set
# should be stored in hex string format.
return "0x%x" % value
def _pb2dict_cast(field, value, pretty=False, is_hex=False):
if not is_hex:
is_hex = _marked_as_hex(field)
if _marked_as_dev(field):
return decode_dev(field, value)
if field.type == FD.TYPE_MESSAGE:
return pb2dict(value, pretty, is_hex)
elif field.type == FD.TYPE_BYTES:
return get_bytes_enc(field)(value)
elif field.type == FD.TYPE_ENUM:
return field.enum_type.values_by_number.get(value, None).name
elif field.type in _basic_cast:
cast = _basic_cast[field.type]
if pretty and (cast == int):
if is_hex:
# Fields that have (criu).hex = true option set
# should be stored in hex string format.
return "0x%x" % value
flags = _marked_as_flags(field)
if flags:
try:
flags_map = flags_maps[flags]
except:
return "0x%x" % value # flags are better seen as hex anyway
else:
return map_flags(value, flags_map)
if _marked_as_dev(field):
return decode_dev(field, value)
dct = _marked_as_dict(field)
if dct:
return dict_maps[dct][0][field.name].get(value, cast(value))
flags = _marked_as_flags(field)
if flags:
try:
flags_map = flags_maps[flags]
except Exception:
return "0x%x" % value # flags are better seen as hex anyway
else:
return map_flags(value, flags_map)
return cast(value)
else:
raise Exception("Field(%s) has unsupported type %d" % (field.name, field.type))
dct = _marked_as_dict(field)
if dct:
return dict_maps[dct][0][field.name].get(value, cast(value))
def pb2dict(pb, pretty = False, is_hex = False):
"""
Convert protobuf msg to dictionary.
Takes a protobuf message and returns a dict.
"""
d = collections.OrderedDict() if pretty else {}
for field, value in pb.ListFields():
if field.label == FD.LABEL_REPEATED:
d_val = []
if pretty and _marked_as_ip(field):
if len(value) == 1:
v = socket.ntohl(value[0])
addr = IPv4Address(v)
else:
v = 0 + (socket.ntohl(value[0]) << (32 * 3)) + \
(socket.ntohl(value[1]) << (32 * 2)) + \
(socket.ntohl(value[2]) << (32 * 1)) + \
(socket.ntohl(value[3]))
addr = IPv6Address(v)
return cast(value)
else:
raise Exception("Field(%s) has unsupported type %d" %
(field.name, field.type))
d_val.append(addr.compressed)
else:
for v in value:
d_val.append(_pb2dict_cast(field, v, pretty, is_hex))
else:
d_val = _pb2dict_cast(field, value, pretty, is_hex)
d[field.name] = d_val
return d
def pb2dict(pb, pretty=False, is_hex=False):
"""
Convert protobuf msg to dictionary.
Takes a protobuf message and returns a dict.
"""
d = collections.OrderedDict() if pretty else {}
for field, value in pb.ListFields():
if field.label == FD.LABEL_REPEATED:
d_val = []
if pretty and _marked_as_ip(field):
if len(value) == 1:
v = socket.ntohl(value[0])
addr = IPv4Address(v)
else:
v = 0 + (socket.ntohl(value[0]) << (32 * 3)) + \
(socket.ntohl(value[1]) << (32 * 2)) + \
(socket.ntohl(value[2]) << (32 * 1)) + \
(socket.ntohl(value[3]))
addr = IPv6Address(v)
d_val.append(addr.compressed)
else:
for v in value:
d_val.append(_pb2dict_cast(field, v, pretty, is_hex))
else:
d_val = _pb2dict_cast(field, value, pretty, is_hex)
d[field.name] = d_val
return d
def _dict2pb_cast(field, value):
# Not considering TYPE_MESSAGE here, as repeated
# and non-repeated messages need special treatment
# in this case, and are hadled separately.
if field.type == FD.TYPE_BYTES:
return get_bytes_dec(field)(value)
elif field.type == FD.TYPE_ENUM:
return field.enum_type.values_by_name.get(value, None).number
elif field.type in _basic_cast:
cast = _basic_cast[field.type]
if (cast == int) and is_string(value):
if _marked_as_dev(field):
return encode_dev(field, value)
# Not considering TYPE_MESSAGE here, as repeated
# and non-repeated messages need special treatment
# in this case, and are hadled separately.
if field.type == FD.TYPE_BYTES:
return get_bytes_dec(field)(value)
elif field.type == FD.TYPE_ENUM:
return field.enum_type.values_by_name.get(value, None).number
elif field.type in _basic_cast:
cast = _basic_cast[field.type]
if (cast == int) and is_string(value):
if _marked_as_dev(field):
return encode_dev(field, value)
flags = _marked_as_flags(field)
if flags:
try:
flags_map = flags_maps[flags]
except:
pass # Try to use plain string cast
else:
return unmap_flags(value, flags_map)
flags = _marked_as_flags(field)
if flags:
try:
flags_map = flags_maps[flags]
except Exception:
pass # Try to use plain string cast
else:
return unmap_flags(value, flags_map)
dct = _marked_as_dict(field)
if dct:
ret = dict_maps[dct][1][field.name].get(value, None)
if ret == None:
ret = cast(value, 0)
return ret
dct = _marked_as_dict(field)
if dct:
ret = dict_maps[dct][1][field.name].get(value, None)
if ret is None:
ret = cast(value, 0)
return ret
# Some int or long fields might be stored as hex
# strings. See _pb2dict_cast.
return cast(value, 0)
else:
return cast(value)
else:
raise Exception("Field(%s) has unsupported type %d" %
(field.name, field.type))
# Some int or long fields might be stored as hex
# strings. See _pb2dict_cast.
return cast(value, 0)
else:
return cast(value)
else:
raise Exception("Field(%s) has unsupported type %d" % (field.name, field.type))
def dict2pb(d, pb):
"""
Convert dictionary to protobuf msg.
Takes dict and protobuf message to be merged into.
"""
for field in pb.DESCRIPTOR.fields:
if field.name not in d:
continue
value = d[field.name]
if field.label == FD.LABEL_REPEATED:
pb_val = getattr(pb, field.name, None)
if is_string(value[0]) and _marked_as_ip(field):
val = ip_address(value[0])
if val.version == 4:
pb_val.append(socket.htonl(int(val)))
elif val.version == 6:
ival = int(val)
pb_val.append(socket.htonl((ival >> (32 * 3)) & 0xFFFFFFFF))
pb_val.append(socket.htonl((ival >> (32 * 2)) & 0xFFFFFFFF))
pb_val.append(socket.htonl((ival >> (32 * 1)) & 0xFFFFFFFF))
pb_val.append(socket.htonl((ival >> (32 * 0)) & 0xFFFFFFFF))
else:
raise Exception("Unknown IP address version %d" % val.version)
continue
"""
Convert dictionary to protobuf msg.
Takes dict and protobuf message to be merged into.
"""
for field in pb.DESCRIPTOR.fields:
if field.name not in d:
continue
value = d[field.name]
if field.label == FD.LABEL_REPEATED:
pb_val = getattr(pb, field.name, None)
if is_string(value[0]) and _marked_as_ip(field):
val = ip_address(value[0])
if val.version == 4:
pb_val.append(socket.htonl(int(val)))
elif val.version == 6:
ival = int(val)
pb_val.append(socket.htonl((ival >> (32 * 3)) & 0xFFFFFFFF))
pb_val.append(socket.htonl((ival >> (32 * 2)) & 0xFFFFFFFF))
pb_val.append(socket.htonl((ival >> (32 * 1)) & 0xFFFFFFFF))
pb_val.append(socket.htonl((ival >> (32 * 0)) & 0xFFFFFFFF))
else:
raise Exception("Unknown IP address version %d" %
val.version)
continue
for v in value:
if field.type == FD.TYPE_MESSAGE:
dict2pb(v, pb_val.add())
else:
pb_val.append(_dict2pb_cast(field, v))
else:
if field.type == FD.TYPE_MESSAGE:
# SetInParent method acts just like has_* = true in C,
# and helps to properly treat cases when we have optional
# field with empty repeated inside.
getattr(pb, field.name).SetInParent()
for v in value:
if field.type == FD.TYPE_MESSAGE:
dict2pb(v, pb_val.add())
else:
pb_val.append(_dict2pb_cast(field, v))
else:
if field.type == FD.TYPE_MESSAGE:
# SetInParent method acts just like has_* = true in C,
# and helps to properly treat cases when we have optional
# field with empty repeated inside.
getattr(pb, field.name).SetInParent()
dict2pb(value, getattr(pb, field.name, None))
else:
setattr(pb, field.name, _dict2pb_cast(field, value))
return pb
dict2pb(value, getattr(pb, field.name, None))
else:
setattr(pb, field.name, _dict2pb_cast(field, value))
return pb

View File

@ -1,12 +1,11 @@
from distutils.core import setup
setup(name = "crit",
version = "0.0.1",
description = "CRiu Image Tool",
author = "CRIU team",
author_email = "criu@openvz.org",
url = "https://github.com/checkpoint-restore/criu",
package_dir = {'pycriu': 'lib/py'},
packages = ["pycriu", "pycriu.images"],
scripts = ["crit/crit"]
)
setup(name="crit",
version="0.0.1",
description="CRiu Image Tool",
author="CRIU team",
author_email="criu@openvz.org",
url="https://github.com/checkpoint-restore/criu",
package_dir={'pycriu': 'lib/py'},
packages=["pycriu", "pycriu.images"],
scripts=["crit/crit"])

View File

@ -1,61 +1,63 @@
#!/bin/env python2
import sys
# This program parses criu magic.h file and produces
# magic.py with all *_MAGIC constants except RAW and V1.
def main(argv):
if len(argv) != 3:
print("Usage: magic-gen.py path/to/image.h path/to/magic.py")
exit(1)
if len(argv) != 3:
print("Usage: magic-gen.py path/to/image.h path/to/magic.py")
exit(1)
magic_c_header = argv[1]
magic_py = argv[2]
magic_c_header = argv[1]
magic_py = argv[2]
out = open(magic_py, 'w+')
out = open(magic_py, 'w+')
# all_magic is used to parse constructions like:
# #define PAGEMAP_MAGIC 0x56084025
# #define SHMEM_PAGEMAP_MAGIC PAGEMAP_MAGIC
all_magic = {}
# and magic is used to store only unique magic.
magic = {}
# all_magic is used to parse constructions like:
# #define PAGEMAP_MAGIC 0x56084025
# #define SHMEM_PAGEMAP_MAGIC PAGEMAP_MAGIC
all_magic = {}
# and magic is used to store only unique magic.
magic = {}
f = open(magic_c_header, 'r')
for line in f:
split = line.split()
f = open(magic_c_header, 'r')
for line in f:
split = line.split()
if len(split) < 3:
continue
if len(split) < 3:
continue
if not '#define' in split[0]:
continue
if not '#define' in split[0]:
continue
key = split[1]
value = split[2]
key = split[1]
value = split[2]
if value in all_magic:
value = all_magic[value]
else:
magic[key] = value
if value in all_magic:
value = all_magic[value]
else:
magic[key] = value
all_magic[key] = value
all_magic[key] = value
out.write('#Autogenerated. Do not edit!\n')
out.write('by_name = {}\n')
out.write('by_val = {}\n')
for k, v in list(magic.items()):
# We don't need RAW or V1 magic, because
# they can't be used to identify images.
if v == '0x0' or v == '1' or k == '0x0' or v == '1':
continue
if k.endswith("_MAGIC"):
# Just cutting _MAGIC suffix
k = k[:-6]
v = int(v, 16)
out.write("by_name['" + k + "'] = " + str(v) + "\n")
out.write("by_val[" + str(v) + "] = '" + k + "'\n")
f.close()
out.close()
out.write('#Autogenerated. Do not edit!\n')
out.write('by_name = {}\n')
out.write('by_val = {}\n')
for k,v in list(magic.items()):
# We don't need RAW or V1 magic, because
# they can't be used to identify images.
if v == '0x0' or v == '1' or k == '0x0' or v == '1':
continue
if k.endswith("_MAGIC"):
# Just cutting _MAGIC suffix
k = k[:-6]
v = int(v, 16)
out.write("by_name['"+ k +"'] = "+ str(v) +"\n")
out.write("by_val["+ str(v) +"] = '"+ k +"'\n")
f.close()
out.close()
if __name__ == "__main__":
main(sys.argv)
main(sys.argv)

View File

@ -13,17 +13,17 @@ sport = os.getenv("TCP_SPORT", "12345")
dport = os.getenv("TCP_DPORT", "54321")
print(sys.argv[1])
args = [sys.argv[1],
"--addr", src, "--port", sport, "--seq", "555",
"--next",
"--addr", dst, "--port", dport, "--seq", "666",
"--reverse", "--", "./tcp-test.py"]
args = [
sys.argv[1], "--addr", src, "--port", sport, "--seq", "555", "--next",
"--addr", dst, "--port", dport, "--seq", "666", "--reverse", "--",
"./tcp-test.py"
]
p1 = Popen(args + ["dst"], stdout = PIPE, stdin = PIPE)
p1 = Popen(args + ["dst"], stdout=PIPE, stdin=PIPE)
args.remove("--reverse");
args.remove("--reverse")
p2 = Popen(args + ["src"], stdout = PIPE, stdin = PIPE)
p2 = Popen(args + ["src"], stdout=PIPE, stdin=PIPE)
p1.stdout.read(5)
p2.stdout.read(5)
@ -42,7 +42,7 @@ str2 = m.hexdigest()
if str2 != eval(s):
print("FAIL", repr(str2), repr(s))
sys.exit(5);
sys.exit(5)
s = p1.stdout.read()
m = hashlib.md5()
@ -52,7 +52,7 @@ str1 = m.hexdigest()
s = p2.stdout.read()
if str1 != eval(s):
print("FAIL", repr(str1), s)
sys.exit(5);
sys.exit(5)
if p1.wait():
sys.exit(1)

View File

@ -4,37 +4,38 @@ import sys
import os
actions = set(['pre-dump', 'pre-restore', 'post-dump', 'setup-namespaces', \
'post-setup-namespaces', 'post-restore', 'post-resume', \
'network-lock', 'network-unlock' ])
'post-setup-namespaces', 'post-restore', 'post-resume', \
'network-lock', 'network-unlock' ])
errors = []
af = os.path.dirname(os.path.abspath(__file__)) + '/actions_called.txt'
for act in open(af):
act = act.strip().split()
act.append('EMPTY')
act.append('EMPTY')
act = act.strip().split()
act.append('EMPTY')
act.append('EMPTY')
if act[0] == 'EMPTY':
raise Exception("Error in test, bogus actions line")
if act[0] == 'EMPTY':
raise Exception("Error in test, bogus actions line")
if act[1] == 'EMPTY':
errors.append('Action %s misses CRTOOLS_IMAGE_DIR' % act[0])
if act[1] == 'EMPTY':
errors.append('Action %s misses CRTOOLS_IMAGE_DIR' % act[0])
if act[0] in ('post-dump', 'setup-namespaces', 'post-setup-namespaces', \
'post-restore', 'post-resume', 'network-lock', 'network-unlock'):
if act[2] == 'EMPTY':
errors.append('Action %s misses CRTOOLS_INIT_PID' % act[0])
elif not act[2].isdigit() or int(act[2]) == 0:
errors.append('Action %s PID is not number (%s)' % (act[0], act[2]))
if act[0] in ('post-dump', 'setup-namespaces', 'post-setup-namespaces', \
'post-restore', 'post-resume', 'network-lock', 'network-unlock'):
if act[2] == 'EMPTY':
errors.append('Action %s misses CRTOOLS_INIT_PID' % act[0])
elif not act[2].isdigit() or int(act[2]) == 0:
errors.append('Action %s PID is not number (%s)' %
(act[0], act[2]))
actions -= set([act[0]])
actions -= set([act[0]])
if actions:
errors.append('Not all actions called: %r' % actions)
errors.append('Not all actions called: %r' % actions)
if errors:
for x in errors:
print(x)
sys.exit(1)
for x in errors:
print(x)
sys.exit(1)
print('PASS')

View File

@ -6,70 +6,72 @@ import sys
import os
import subprocess
find = subprocess.Popen(['find', 'test/dump/', '-size', '+0', '-name', '*.img'],
stdout = subprocess.PIPE)
find = subprocess.Popen(
['find', 'test/dump/', '-size', '+0', '-name', '*.img'],
stdout=subprocess.PIPE)
test_pass = True
def recode_and_check(imgf, o_img, pretty):
try:
pb = pycriu.images.loads(o_img, pretty)
except pycriu.images.MagicException as me:
print("%s magic %x error" % (imgf, me.magic))
return False
except Exception as e:
print("%s %sdecode fails: %s" % (imgf, pretty and 'pretty ' or '', e))
return False
try:
pb = pycriu.images.loads(o_img, pretty)
except pycriu.images.MagicException as me:
print("%s magic %x error" % (imgf, me.magic))
return False
except Exception as e:
print("%s %sdecode fails: %s" % (imgf, pretty and 'pretty ' or '', e))
return False
try:
r_img = pycriu.images.dumps(pb)
except Exception as e:
r_img = pycriu.images.dumps(pb)
print("%s %s encode fails: %s" % (imgf, pretty and 'pretty ' or '', e))
return False
try:
r_img = pycriu.images.dumps(pb)
except Exception as e:
r_img = pycriu.images.dumps(pb)
print("%s %s encode fails: %s" % (imgf, pretty and 'pretty ' or '', e))
return False
if o_img != r_img:
print("%s %s recode mismatch" % (imgf, pretty and 'pretty ' or ''))
return False
if o_img != r_img:
print("%s %s recode mismatch" % (imgf, pretty and 'pretty ' or ''))
return False
return True
return True
for imgf in find.stdout.readlines():
imgf = imgf.strip()
imgf_b = os.path.basename(imgf)
imgf = imgf.strip()
imgf_b = os.path.basename(imgf)
if imgf_b.startswith(b'pages-'):
continue
if imgf_b.startswith(b'iptables-'):
continue
if imgf_b.startswith(b'ip6tables-'):
continue
if imgf_b.startswith(b'route-'):
continue
if imgf_b.startswith(b'route6-'):
continue
if imgf_b.startswith(b'ifaddr-'):
continue
if imgf_b.startswith(b'tmpfs-'):
continue
if imgf_b.startswith(b'netns-ct-'):
continue
if imgf_b.startswith(b'netns-exp-'):
continue
if imgf_b.startswith(b'rule-'):
continue
if imgf_b.startswith(b'pages-'):
continue
if imgf_b.startswith(b'iptables-'):
continue
if imgf_b.startswith(b'ip6tables-'):
continue
if imgf_b.startswith(b'route-'):
continue
if imgf_b.startswith(b'route6-'):
continue
if imgf_b.startswith(b'ifaddr-'):
continue
if imgf_b.startswith(b'tmpfs-'):
continue
if imgf_b.startswith(b'netns-ct-'):
continue
if imgf_b.startswith(b'netns-exp-'):
continue
if imgf_b.startswith(b'rule-'):
continue
o_img = open(imgf.decode(), "rb").read()
if not recode_and_check(imgf, o_img, False):
test_pass = False
if not recode_and_check(imgf, o_img, True):
test_pass = False
o_img = open(imgf.decode(), "rb").read()
if not recode_and_check(imgf, o_img, False):
test_pass = False
if not recode_and_check(imgf, o_img, True):
test_pass = False
find.wait()
if not test_pass:
print("FAIL")
sys.exit(1)
print("FAIL")
sys.exit(1)
print("PASS")

View File

@ -8,125 +8,127 @@ import time
import sys
import subprocess
criu_bin='../../criu/criu'
criu_bin = '../../criu/criu'
def mix(nr_tasks, nr_pipes):
# Returned is the list of combinations.
# Each combination is the lists of pipe descriptors.
# Each pipe descriptor is a 2-elemtn tuple, that contains values
# for R and W ends of pipes, each being a bit-field denoting in
# which tasks the respective end should be opened or not.
# Returned is the list of combinations.
# Each combination is the lists of pipe descriptors.
# Each pipe descriptor is a 2-elemtn tuple, that contains values
# for R and W ends of pipes, each being a bit-field denoting in
# which tasks the respective end should be opened or not.
# First -- make a full set of combinations for a single pipe.
max_idx = 1 << nr_tasks
pipe_mix = [[(r, w)] for r in range(0, max_idx) for w in range(0, max_idx)]
# First -- make a full set of combinations for a single pipe.
max_idx = 1 << nr_tasks
pipe_mix = [[(r, w)] for r in range(0, max_idx) for w in range(0, max_idx)]
# Now, for every pipe throw another one into the game making
# all possible combinations of what was seen before with the
# newbie.
pipes_mix = pipe_mix
for t in range(1, nr_pipes):
pipes_mix = [ o + n for o in pipes_mix for n in pipe_mix ]
# Now, for every pipe throw another one into the game making
# all possible combinations of what was seen before with the
# newbie.
pipes_mix = pipe_mix
for t in range(1, nr_pipes):
pipes_mix = [o + n for o in pipes_mix for n in pipe_mix]
return pipes_mix
return pipes_mix
# Called by a test sub-process. It just closes the not needed ends
# of pipes and sleeps waiting for death.
def make_pipes(task_nr, nr_pipes, pipes, comb, status_pipe):
print('\t\tMake pipes for %d' % task_nr)
# We need to make sure that pipes have their
# ends according to comb for task_nr
print('\t\tMake pipes for %d' % task_nr)
# We need to make sure that pipes have their
# ends according to comb for task_nr
for i in range(0, nr_pipes):
# Read end
if not (comb[i][0] & (1 << task_nr)):
os.close(pipes[i][0])
# Write end
if not (comb[i][1] & (1 << task_nr)):
os.close(pipes[i][1])
for i in range(0, nr_pipes):
# Read end
if not (comb[i][0] & (1 << task_nr)):
os.close(pipes[i][0])
# Write end
if not (comb[i][1] & (1 << task_nr)):
os.close(pipes[i][1])
os.write(status_pipe, '0')
os.close(status_pipe)
while True:
time.sleep(100)
os.write(status_pipe, '0')
os.close(status_pipe)
while True:
time.sleep(100)
def get_pipe_ino(pid, fd):
try:
return os.stat('/proc/%d/fd/%d' % (pid, fd)).st_ino
except:
return None
try:
return os.stat('/proc/%d/fd/%d' % (pid, fd)).st_ino
except:
return None
def get_pipe_rw(pid, fd):
for l in open('/proc/%d/fdinfo/%d' % (pid, fd)):
if l.startswith('flags:'):
f = l.split(None, 1)[1][-2]
if f == '0':
return 0 # Read
elif f == '1':
return 1 # Write
break
for l in open('/proc/%d/fdinfo/%d' % (pid, fd)):
if l.startswith('flags:'):
f = l.split(None, 1)[1][-2]
if f == '0':
return 0 # Read
elif f == '1':
return 1 # Write
break
raise Exception('Unexpected fdinfo contents')
raise Exception('Unexpected fdinfo contents')
def check_pipe_y(pid, fd, rw, inos):
ino = get_pipe_ino(pid, fd)
if ino == None:
return 'missing '
if not inos.has_key(fd):
inos[fd] = ino
elif inos[fd] != ino:
return 'wrong '
mod = get_pipe_rw(pid, fd)
if mod != rw:
return 'badmode '
return None
ino = get_pipe_ino(pid, fd)
if ino == None:
return 'missing '
if not inos.has_key(fd):
inos[fd] = ino
elif inos[fd] != ino:
return 'wrong '
mod = get_pipe_rw(pid, fd)
if mod != rw:
return 'badmode '
return None
def check_pipe_n(pid, fd):
ino = get_pipe_ino(pid, fd)
if ino == None:
return None
else:
return 'present '
ino = get_pipe_ino(pid, fd)
if ino == None:
return None
else:
return 'present '
def check_pipe_end(kids, fd, comb, rw, inos):
t_nr = 0
for t_pid in kids:
if comb & (1 << t_nr):
res = check_pipe_y(t_pid, fd, rw, inos)
else:
res = check_pipe_n(t_pid, fd)
if res != None:
return res + 'kid(%d)' % t_nr
t_nr += 1
return None
t_nr = 0
for t_pid in kids:
if comb & (1 << t_nr):
res = check_pipe_y(t_pid, fd, rw, inos)
else:
res = check_pipe_n(t_pid, fd)
if res != None:
return res + 'kid(%d)' % t_nr
t_nr += 1
return None
def check_pipe(kids, fds, comb, inos):
for e in (0, 1): # 0 == R, 1 == W, see get_pipe_rw()
res = check_pipe_end(kids, fds[e], comb[e], e, inos)
if res != None:
return res + 'end(%d)' % e
return None
for e in (0, 1): # 0 == R, 1 == W, see get_pipe_rw()
res = check_pipe_end(kids, fds[e], comb[e], e, inos)
if res != None:
return res + 'end(%d)' % e
return None
def check_pipes(kids, pipes, comb):
# Kids contain pids
# Pipes contain pipe FDs
# Comb contain list of pairs of bits for RW ends
p_nr = 0
p_inos = {}
for p_fds in pipes:
res = check_pipe(kids, p_fds, comb[p_nr], p_inos)
if res != None:
return res + 'pipe(%d)' % p_nr
p_nr += 1
# Kids contain pids
# Pipes contain pipe FDs
# Comb contain list of pairs of bits for RW ends
p_nr = 0
p_inos = {}
for p_fds in pipes:
res = check_pipe(kids, p_fds, comb[p_nr], p_inos)
if res != None:
return res + 'pipe(%d)' % p_nr
p_nr += 1
return None
return None
# Run by test main process. It opens pipes, then forks kids that
@ -134,128 +136,134 @@ def check_pipes(kids, pipes, comb):
# and waits for a signal (unix socket message) to start checking
# the kids' FD tables.
def make_comb(comb, opts, status_pipe):
print('\tMake pipes')
# 1st -- make needed pipes
pipes = []
for p in range(0, opts.pipes):
pipes.append(os.pipe())
print('\tMake pipes')
# 1st -- make needed pipes
pipes = []
for p in range(0, opts.pipes):
pipes.append(os.pipe())
# Fork the kids that'll make pipes
kc_pipe = os.pipe()
kids = []
for t in range(0, opts.tasks):
pid = os.fork()
if pid == 0:
os.close(status_pipe)
os.close(kc_pipe[0])
make_pipes(t, opts.pipes, pipes, comb, kc_pipe[1])
sys.exit(1)
kids.append(pid)
# Fork the kids that'll make pipes
kc_pipe = os.pipe()
kids = []
for t in range(0, opts.tasks):
pid = os.fork()
if pid == 0:
os.close(status_pipe)
os.close(kc_pipe[0])
make_pipes(t, opts.pipes, pipes, comb, kc_pipe[1])
sys.exit(1)
kids.append(pid)
os.close(kc_pipe[1])
for p in pipes:
os.close(p[0])
os.close(p[1])
os.close(kc_pipe[1])
for p in pipes:
os.close(p[0])
os.close(p[1])
# Wait for kids to get ready
k_res = ''
while True:
v = os.read(kc_pipe[0], 16)
if v == '':
break
k_res += v
os.close(kc_pipe[0])
# Wait for kids to get ready
k_res = ''
while True:
v = os.read(kc_pipe[0], 16)
if v == '':
break
k_res += v
os.close(kc_pipe[0])
ex_code = 1
if k_res == '0' * opts.tasks:
print('\tWait for C/R')
cmd_sk = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM, 0)
cmd_sk.bind('\0CRIUPCSK')
ex_code = 1
if k_res == '0' * opts.tasks:
print('\tWait for C/R')
cmd_sk = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM, 0)
cmd_sk.bind('\0CRIUPCSK')
# Kids are ready, so is socket for kicking us. Notify the
# parent task that we are good to go.
os.write(status_pipe, '0')
os.close(status_pipe)
v = cmd_sk.recv(16)
if v == '0':
print('\tCheck pipes')
res = check_pipes(kids, pipes, comb)
if res == None:
ex_code = 0
else:
print('\tFAIL %s' % res)
# Kids are ready, so is socket for kicking us. Notify the
# parent task that we are good to go.
os.write(status_pipe, '0')
os.close(status_pipe)
v = cmd_sk.recv(16)
if v == '0':
print('\tCheck pipes')
res = check_pipes(kids, pipes, comb)
if res == None:
ex_code = 0
else:
print('\tFAIL %s' % res)
# Just kill kids, all checks are done by us, we don't need'em any more
for t in kids:
os.kill(t, signal.SIGKILL)
os.waitpid(t, 0)
# Just kill kids, all checks are done by us, we don't need'em any more
for t in kids:
os.kill(t, signal.SIGKILL)
os.waitpid(t, 0)
return ex_code
return ex_code
def cr_test(pid):
print('C/R test')
img_dir = 'pimg_%d' % pid
try:
os.mkdir(img_dir)
subprocess.check_call([criu_bin, 'dump', '-t', '%d' % pid, '-D', img_dir, '-o', 'dump.log', '-v4', '-j'])
except:
print('`- dump fail')
return False
print('C/R test')
img_dir = 'pimg_%d' % pid
try:
os.mkdir(img_dir)
subprocess.check_call([
criu_bin, 'dump', '-t',
'%d' % pid, '-D', img_dir, '-o', 'dump.log', '-v4', '-j'
])
except:
print('`- dump fail')
return False
try:
os.waitpid(pid, 0)
subprocess.check_call([criu_bin, 'restore', '-D', img_dir, '-o', 'rst.log', '-v4', '-j', '-d', '-S'])
except:
print('`- restore fail')
return False
try:
os.waitpid(pid, 0)
subprocess.check_call([
criu_bin, 'restore', '-D', img_dir, '-o', 'rst.log', '-v4', '-j',
'-d', '-S'
])
except:
print('`- restore fail')
return False
return True
return True
def run(comb, opts):
print('Checking %r' % comb)
cpipe = os.pipe()
pid = os.fork()
if pid == 0:
os.close(cpipe[0])
ret = make_comb(comb, opts, cpipe[1])
sys.exit(ret)
print('Checking %r' % comb)
cpipe = os.pipe()
pid = os.fork()
if pid == 0:
os.close(cpipe[0])
ret = make_comb(comb, opts, cpipe[1])
sys.exit(ret)
# Wait for the main process to get ready
os.close(cpipe[1])
res = os.read(cpipe[0], 16)
os.close(cpipe[0])
# Wait for the main process to get ready
os.close(cpipe[1])
res = os.read(cpipe[0], 16)
os.close(cpipe[0])
if res == '0':
res = cr_test(pid)
if res == '0':
res = cr_test(pid)
print('Wake up test')
s = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM, 0)
if res:
res = '0'
else:
res = 'X'
try:
# Kick the test to check its state
s.sendto(res, '\0CRIUPCSK')
except:
# Restore might have failed or smth else happened
os.kill(pid, signal.SIGKILL)
s.close()
print('Wake up test')
s = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM, 0)
if res:
res = '0'
else:
res = 'X'
try:
# Kick the test to check its state
s.sendto(res, '\0CRIUPCSK')
except:
# Restore might have failed or smth else happened
os.kill(pid, signal.SIGKILL)
s.close()
# Wait for the guy to exit and get the result (PASS/FAIL)
p, st = os.waitpid(pid, 0)
if os.WIFEXITED(st):
st = os.WEXITSTATUS(st)
# Wait for the guy to exit and get the result (PASS/FAIL)
p, st = os.waitpid(pid, 0)
if os.WIFEXITED(st):
st = os.WEXITSTATUS(st)
print('Done (%d, pid == %d)' % (st, pid))
return st == 0
print('Done (%d, pid == %d)' % (st, pid))
return st == 0
p = argparse.ArgumentParser("CRIU test suite")
p.add_argument("--tasks", help = "Number of tasks", default = '2')
p.add_argument("--pipes", help = "Number of pipes", default = '2')
p.add_argument("--tasks", help="Number of tasks", default='2')
p.add_argument("--pipes", help="Number of pipes", default='2')
opts = p.parse_args()
opts.tasks = int(opts.tasks)
opts.pipes = int(opts.pipes)
@ -263,8 +271,8 @@ opts.pipes = int(opts.pipes)
pipe_combs = mix(opts.tasks, opts.pipes)
for comb in pipe_combs:
if not run(comb, opts):
print('FAIL')
break
if not run(comb, opts):
print('FAIL')
break
else:
print('PASS')
print('PASS')

File diff suppressed because it is too large Load Diff

View File

@ -5,35 +5,35 @@ id_str = ""
def create_fds():
tdir = tempfile.mkdtemp("zdtm.inhfd.XXXXXX")
if os.system("mount -t tmpfs zdtm.inhfd %s" % tdir) != 0:
raise Exception("Unable to mount tmpfs")
tfifo = os.path.join(tdir, "test_fifo")
os.mkfifo(tfifo)
fd2 = open(tfifo, "w+b", buffering=0)
fd1 = open(tfifo, "rb")
os.system("umount -l %s" % tdir)
os.rmdir(tdir)
tdir = tempfile.mkdtemp("zdtm.inhfd.XXXXXX")
if os.system("mount -t tmpfs zdtm.inhfd %s" % tdir) != 0:
raise Exception("Unable to mount tmpfs")
tfifo = os.path.join(tdir, "test_fifo")
os.mkfifo(tfifo)
fd2 = open(tfifo, "w+b", buffering=0)
fd1 = open(tfifo, "rb")
os.system("umount -l %s" % tdir)
os.rmdir(tdir)
mnt_id = -1
with open("/proc/self/fdinfo/%d" % fd1.fileno()) as f:
for line in f:
line = line.split()
if line[0] == "mnt_id:":
mnt_id = int(line[1])
break
else:
raise Exception("Unable to find mnt_id")
mnt_id = -1
with open("/proc/self/fdinfo/%d" % fd1.fileno()) as f:
for line in f:
line = line.split()
if line[0] == "mnt_id:":
mnt_id = int(line[1])
break
else:
raise Exception("Unable to find mnt_id")
global id_str
id_str = "file[%x:%x]" % (mnt_id, os.fstat(fd1.fileno()).st_ino)
global id_str
id_str = "file[%x:%x]" % (mnt_id, os.fstat(fd1.fileno()).st_ino)
return [(fd2, fd1)]
return [(fd2, fd1)]
def filename(pipef):
return id_str
return id_str
def dump_opts(sockf):
return ["--external", id_str]
return ["--external", id_str]

View File

@ -2,16 +2,16 @@ import os
def create_fds():
pipes = []
for i in range(10):
(fd1, fd2) = os.pipe()
pipes.append((os.fdopen(fd2, "wb"), os.fdopen(fd1, "rb")))
return pipes
pipes = []
for i in range(10):
(fd1, fd2) = os.pipe()
pipes.append((os.fdopen(fd2, "wb"), os.fdopen(fd1, "rb")))
return pipes
def filename(pipef):
return 'pipe:[%d]' % os.fstat(pipef.fileno()).st_ino
return 'pipe:[%d]' % os.fstat(pipef.fileno()).st_ino
def dump_opts(sockf):
return []
return []

View File

@ -3,19 +3,19 @@ import os
def create_fds():
(sk1, sk2) = socket.socketpair(socket.AF_UNIX, socket.SOCK_STREAM)
(sk3, sk4) = socket.socketpair(socket.AF_UNIX, socket.SOCK_STREAM)
return [(sk1.makefile("wb"), sk2.makefile("rb")),
(sk3.makefile("wb"), sk4.makefile("rb"))]
(sk1, sk2) = socket.socketpair(socket.AF_UNIX, socket.SOCK_STREAM)
(sk3, sk4) = socket.socketpair(socket.AF_UNIX, socket.SOCK_STREAM)
return [(sk1.makefile("wb"), sk2.makefile("rb")),
(sk3.makefile("wb"), sk4.makefile("rb"))]
def __sock_ino(sockf):
return os.fstat(sockf.fileno()).st_ino
return os.fstat(sockf.fileno()).st_ino
def filename(sockf):
return 'socket:[%d]' % __sock_ino(sockf)
return 'socket:[%d]' % __sock_ino(sockf)
def dump_opts(sockf):
return ['--external', 'unix[%d]' % __sock_ino(sockf)]
return ['--external', 'unix[%d]' % __sock_ino(sockf)]

View File

@ -4,34 +4,33 @@ import os
import pty
import termios
ctl = False
def child_prep(fd):
global ctl
if ctl:
return
ctl = True
fcntl.ioctl(fd.fileno(), termios.TIOCSCTTY, 1)
global ctl
if ctl:
return
ctl = True
fcntl.ioctl(fd.fileno(), termios.TIOCSCTTY, 1)
def create_fds():
ttys = []
for i in range(10):
(fd1, fd2) = pty.openpty()
newattr = termios.tcgetattr(fd1)
newattr[3] &= ~termios.ICANON & ~termios.ECHO
termios.tcsetattr(fd1, termios.TCSADRAIN, newattr)
ttys.append((os.fdopen(fd1, "wb"), os.fdopen(fd2, "rb")))
return ttys
ttys = []
for i in range(10):
(fd1, fd2) = pty.openpty()
newattr = termios.tcgetattr(fd1)
newattr[3] &= ~termios.ICANON & ~termios.ECHO
termios.tcsetattr(fd1, termios.TCSADRAIN, newattr)
ttys.append((os.fdopen(fd1, "wb"), os.fdopen(fd2, "rb")))
return ttys
def filename(pipef):
st = os.fstat(pipef.fileno())
return 'tty[%x:%x]' % (st.st_rdev, st.st_dev)
st = os.fstat(pipef.fileno())
return 'tty[%x:%x]' % (st.st_rdev, st.st_dev)
def dump_opts(sockf):
st = os.fstat(sockf.fileno())
return "--external", 'tty[%x:%x]' % (st.st_rdev, st.st_dev)
st = os.fstat(sockf.fileno())
return "--external", 'tty[%x:%x]' % (st.st_rdev, st.st_dev)

View File

@ -5,32 +5,41 @@ import os, sys, time, signal, pty
master, slave = pty.openpty()
p = subprocess.Popen(["setsid", "--ctty", "sleep", "10000"],
stdin = slave, stdout = slave, stderr = slave, close_fds = True)
stdin=slave,
stdout=slave,
stderr=slave,
close_fds=True)
st = os.stat("/proc/self/fd/%d" % slave)
ttyid = "tty[%x:%x]" % (st.st_rdev, st.st_dev)
os.close(slave)
time.sleep(1)
ret = subprocess.Popen(["../../../criu/criu", "dump", "-t", str(p.pid), "-v4", "--external", ttyid]).wait()
ret = subprocess.Popen([
"../../../criu/criu", "dump", "-t",
str(p.pid), "-v4", "--external", ttyid
]).wait()
if ret:
sys.exit(ret)
sys.exit(ret)
p.wait()
new_master, slave = pty.openpty() # get another pty pair
new_master, slave = pty.openpty() # get another pty pair
os.close(master)
ttyid = "fd[%d]:tty[%x:%x]" % (slave, st.st_rdev, st.st_dev)
ret = subprocess.Popen(["../../../criu/criu", "restore", "-v4", "--inherit-fd", ttyid, "--restore-sibling", "--restore-detach"]).wait()
ret = subprocess.Popen([
"../../../criu/criu", "restore", "-v4", "--inherit-fd", ttyid,
"--restore-sibling", "--restore-detach"
]).wait()
if ret:
sys.exit(ret)
sys.exit(ret)
os.close(slave)
os.waitpid(-1, os.WNOHANG) # is the process alive
os.waitpid(-1, os.WNOHANG) # is the process alive
os.close(new_master)
_, status = os.wait()
if not os.WIFSIGNALED(status) or os.WTERMSIG(status) != signal.SIGHUP:
print(status)
sys.exit(1)
print(status)
sys.exit(1)
print("PASS")

View File

@ -1,31 +1,36 @@
import os
import tempfile, random
def mount(src, dst, shared, private, slave):
cmd = "mount"
if shared:
cmd += " --make-shared"
if private:
cmd += " --make-private"
if slave:
cmd += " --make-slave"
if src:
cmd += " --bind '%s' '%s'" % (src, dst)
else:
cmd += " -t tmpfs none '%s'" % (dst)
cmd = "mount"
if shared:
cmd += " --make-shared"
if private:
cmd += " --make-private"
if slave:
cmd += " --make-slave"
if src:
cmd += " --bind '%s' '%s'" % (src, dst)
else:
cmd += " -t tmpfs none '%s'" % (dst)
print(cmd)
ret = os.system(cmd)
if ret:
print("failed")
print(cmd)
ret = os.system(cmd)
if ret:
print("failed")
root = tempfile.mkdtemp(prefix = "root.mount", dir = "/tmp")
root = tempfile.mkdtemp(prefix="root.mount", dir="/tmp")
mount(None, root, 1, 0, 0)
mounts = [root]
for i in range(10):
dstdir = random.choice(mounts)
dst = tempfile.mkdtemp(prefix = "mount", dir = dstdir)
src = random.choice(mounts + [None])
mount(src, dst, random.randint(0,100) > 50, random.randint(0,100) > 90, random.randint(0,100) > 50)
mounts.append(dst)
dstdir = random.choice(mounts)
dst = tempfile.mkdtemp(prefix="mount", dir=dstdir)
src = random.choice(mounts + [None])
mount(src, dst,
random.randint(0, 100) > 50,
random.randint(0, 100) > 90,
random.randint(0, 100) > 50)
mounts.append(dst)

View File

@ -14,169 +14,174 @@ does_not_exist = 'does-not.exist'
def setup_swrk():
print('Connecting to CRIU in swrk mode.')
css = socket.socketpair(socket.AF_UNIX, socket.SOCK_SEQPACKET)
swrk = subprocess.Popen(['./criu', "swrk", "%d" % css[0].fileno()])
css[0].close()
return swrk, css[1]
print('Connecting to CRIU in swrk mode.')
css = socket.socketpair(socket.AF_UNIX, socket.SOCK_SEQPACKET)
swrk = subprocess.Popen(['./criu', "swrk", "%d" % css[0].fileno()])
css[0].close()
return swrk, css[1]
def setup_config_file(content):
# Creating a temporary file which will be used as configuration file.
fd, path = mkstemp()
# Creating a temporary file which will be used as configuration file.
fd, path = mkstemp()
with os.fdopen(fd, 'w') as f:
f.write(content)
with os.fdopen(fd, 'w') as f:
f.write(content)
os.environ['CRIU_CONFIG_FILE'] = path
os.environ['CRIU_CONFIG_FILE'] = path
return path
return path
def cleanup_config_file(path):
if os.environ.get('CRIU_CONFIG_FILE', None) is not None:
del os.environ['CRIU_CONFIG_FILE']
os.unlink(path)
if os.environ.get('CRIU_CONFIG_FILE', None) is not None:
del os.environ['CRIU_CONFIG_FILE']
os.unlink(path)
def cleanup_output(path):
for f in (does_not_exist, log_file):
f = os.path.join(path, f)
if os.access(f, os.F_OK):
os.unlink(f)
for f in (does_not_exist, log_file):
f = os.path.join(path, f)
if os.access(f, os.F_OK):
os.unlink(f)
def setup_criu_dump_request():
# Create criu msg, set it's type to dump request
# and set dump options. Checkout more options in protobuf/rpc.proto
req = rpc.criu_req()
req.type = rpc.DUMP
req.opts.leave_running = True
req.opts.log_level = 4
req.opts.log_file = log_file
req.opts.images_dir_fd = os.open(args['dir'], os.O_DIRECTORY)
# Not necessary, just for testing
req.opts.tcp_established = True
req.opts.shell_job = True
return req
# Create criu msg, set it's type to dump request
# and set dump options. Checkout more options in protobuf/rpc.proto
req = rpc.criu_req()
req.type = rpc.DUMP
req.opts.leave_running = True
req.opts.log_level = 4
req.opts.log_file = log_file
req.opts.images_dir_fd = os.open(args['dir'], os.O_DIRECTORY)
# Not necessary, just for testing
req.opts.tcp_established = True
req.opts.shell_job = True
return req
def do_rpc(s, req):
# Send request
s.send(req.SerializeToString())
# Send request
s.send(req.SerializeToString())
# Recv response
resp = rpc.criu_resp()
MAX_MSG_SIZE = 1024
resp.ParseFromString(s.recv(MAX_MSG_SIZE))
# Recv response
resp = rpc.criu_resp()
MAX_MSG_SIZE = 1024
resp.ParseFromString(s.recv(MAX_MSG_SIZE))
s.close()
return resp
s.close()
return resp
def test_broken_configuration_file():
# Testing RPC configuration file mode with a broken configuration file.
# This should fail
content = 'hopefully-this-option-will-never=exist'
path = setup_config_file(content)
swrk, s = setup_swrk()
s.close()
# This test is only about detecting wrong configuration files.
# If we do not sleep it might happen that we kill CRIU before
# it parses the configuration file. A short sleep makes sure
# that the configuration file has been parsed. Hopefully.
# (I am sure this will fail horribly at some point)
time.sleep(0.3)
swrk.kill()
return_code = swrk.wait()
# delete temporary file again
cleanup_config_file(path)
if return_code != 1:
print('FAIL: CRIU should have returned 1 instead of %d' % return_code)
sys.exit(-1)
# Testing RPC configuration file mode with a broken configuration file.
# This should fail
content = 'hopefully-this-option-will-never=exist'
path = setup_config_file(content)
swrk, s = setup_swrk()
s.close()
# This test is only about detecting wrong configuration files.
# If we do not sleep it might happen that we kill CRIU before
# it parses the configuration file. A short sleep makes sure
# that the configuration file has been parsed. Hopefully.
# (I am sure this will fail horribly at some point)
time.sleep(0.3)
swrk.kill()
return_code = swrk.wait()
# delete temporary file again
cleanup_config_file(path)
if return_code != 1:
print('FAIL: CRIU should have returned 1 instead of %d' % return_code)
sys.exit(-1)
def search_in_log_file(log, message):
with open(os.path.join(args['dir'], log)) as f:
if message not in f.read():
print('FAIL: Missing the expected error message (%s) in the log file' % message)
sys.exit(-1)
with open(os.path.join(args['dir'], log)) as f:
if message not in f.read():
print(
'FAIL: Missing the expected error message (%s) in the log file'
% message)
sys.exit(-1)
def check_results(resp, log):
# Check if the specified log file exists
if not os.path.isfile(os.path.join(args['dir'], log)):
print('FAIL: Expected log file %s does not exist' % log)
sys.exit(-1)
# Dump should have failed with: 'The criu itself is within dumped tree'
if resp.type != rpc.DUMP:
print('FAIL: Unexpected msg type %r' % resp.type)
sys.exit(-1)
if 'The criu itself is within dumped tree' not in resp.cr_errmsg:
print('FAIL: Missing the expected error message in RPC response')
sys.exit(-1)
# Look into the log file for the same message
search_in_log_file(log, 'The criu itself is within dumped tree')
# Check if the specified log file exists
if not os.path.isfile(os.path.join(args['dir'], log)):
print('FAIL: Expected log file %s does not exist' % log)
sys.exit(-1)
# Dump should have failed with: 'The criu itself is within dumped tree'
if resp.type != rpc.DUMP:
print('FAIL: Unexpected msg type %r' % resp.type)
sys.exit(-1)
if 'The criu itself is within dumped tree' not in resp.cr_errmsg:
print('FAIL: Missing the expected error message in RPC response')
sys.exit(-1)
# Look into the log file for the same message
search_in_log_file(log, 'The criu itself is within dumped tree')
def test_rpc_without_configuration_file():
# Testing without configuration file
# Just doing a dump and checking for the logfile
req = setup_criu_dump_request()
_, s = setup_swrk()
resp = do_rpc(s, req)
s.close()
check_results(resp, log_file)
# Testing without configuration file
# Just doing a dump and checking for the logfile
req = setup_criu_dump_request()
_, s = setup_swrk()
resp = do_rpc(s, req)
s.close()
check_results(resp, log_file)
def test_rpc_with_configuration_file():
# Testing with configuration file
# Just doing a dump and checking for the logfile
# Testing with configuration file
# Just doing a dump and checking for the logfile
# Setting a different log file via configuration file
# This should not work as RPC settings overwrite configuration
# file settings in the default configuration.
log = does_not_exist
content = 'log-file ' + log + '\n'
content += 'no-tcp-established\nno-shell-job'
path = setup_config_file(content)
req = setup_criu_dump_request()
_, s = setup_swrk()
do_rpc(s, req)
s.close()
cleanup_config_file(path)
# Check if the specified log file exists
# It should not as configuration files do not overwrite RPC values.
if os.path.isfile(os.path.join(args['dir'], log)):
print('FAIL: log file %s should not exist' % log)
sys.exit(-1)
# Setting a different log file via configuration file
# This should not work as RPC settings overwrite configuration
# file settings in the default configuration.
log = does_not_exist
content = 'log-file ' + log + '\n'
content += 'no-tcp-established\nno-shell-job'
path = setup_config_file(content)
req = setup_criu_dump_request()
_, s = setup_swrk()
do_rpc(s, req)
s.close()
cleanup_config_file(path)
# Check if the specified log file exists
# It should not as configuration files do not overwrite RPC values.
if os.path.isfile(os.path.join(args['dir'], log)):
print('FAIL: log file %s should not exist' % log)
sys.exit(-1)
def test_rpc_with_configuration_file_overwriting_rpc():
# Testing with configuration file
# Just doing a dump and checking for the logfile
# Testing with configuration file
# Just doing a dump and checking for the logfile
# Setting a different log file via configuration file
# This should not work as RPC settings overwrite configuration
# file settings in the default configuration.
log = does_not_exist
content = 'log-file ' + log + '\n'
content += 'no-tcp-established\nno-shell-job'
path = setup_config_file(content)
# Only set the configuration file via RPC;
# not via environment variable
del os.environ['CRIU_CONFIG_FILE']
req = setup_criu_dump_request()
req.opts.config_file = path
_, s = setup_swrk()
resp = do_rpc(s, req)
s.close()
cleanup_config_file(path)
check_results(resp, log)
# Setting a different log file via configuration file
# This should not work as RPC settings overwrite configuration
# file settings in the default configuration.
log = does_not_exist
content = 'log-file ' + log + '\n'
content += 'no-tcp-established\nno-shell-job'
path = setup_config_file(content)
# Only set the configuration file via RPC;
# not via environment variable
del os.environ['CRIU_CONFIG_FILE']
req = setup_criu_dump_request()
req.opts.config_file = path
_, s = setup_swrk()
resp = do_rpc(s, req)
s.close()
cleanup_config_file(path)
check_results(resp, log)
parser = argparse.ArgumentParser(description="Test config files using CRIU RPC")
parser.add_argument('dir', type = str, help = "Directory where CRIU images should be placed")
parser = argparse.ArgumentParser(
description="Test config files using CRIU RPC")
parser.add_argument('dir',
type=str,
help="Directory where CRIU images should be placed")
args = vars(parser.parse_args())

View File

@ -6,130 +6,136 @@ import rpc_pb2 as rpc
import argparse
parser = argparse.ArgumentParser(description="Test errno reported by CRIU RPC")
parser.add_argument('socket', type = str, help = "CRIU service socket")
parser.add_argument('dir', type = str, help = "Directory where CRIU images should be placed")
parser.add_argument('socket', type=str, help="CRIU service socket")
parser.add_argument('dir',
type=str,
help="Directory where CRIU images should be placed")
args = vars(parser.parse_args())
# Prepare dir for images
class test:
def __init__(self):
self.imgs_fd = os.open(args['dir'], os.O_DIRECTORY)
self.s = -1
self._MAX_MSG_SIZE = 1024
def __init__(self):
self.imgs_fd = os.open(args['dir'], os.O_DIRECTORY)
self.s = -1
self._MAX_MSG_SIZE = 1024
def connect(self):
self.s = socket.socket(socket.AF_UNIX, socket.SOCK_SEQPACKET)
self.s.connect(args['socket'])
def connect(self):
self.s = socket.socket(socket.AF_UNIX, socket.SOCK_SEQPACKET)
self.s.connect(args['socket'])
def get_base_req(self):
req = rpc.criu_req()
req.opts.log_level = 4
req.opts.images_dir_fd = self.imgs_fd
return req
def get_base_req(self):
req = rpc.criu_req()
req.opts.log_level = 4
req.opts.images_dir_fd = self.imgs_fd
return req
def send_req(self, req):
self.connect()
self.s.send(req.SerializeToString())
def send_req(self, req):
self.connect()
self.s.send(req.SerializeToString())
def recv_resp(self):
resp = rpc.criu_resp()
resp.ParseFromString(self.s.recv(self._MAX_MSG_SIZE))
return resp
def recv_resp(self):
resp = rpc.criu_resp()
resp.ParseFromString(self.s.recv(self._MAX_MSG_SIZE))
return resp
def check_resp(self, resp, typ, err):
if resp.type != typ:
raise Exception('Unexpected responce type ' + str(resp.type))
def check_resp(self, resp, typ, err):
if resp.type != typ:
raise Exception('Unexpected responce type ' + str(resp.type))
if resp.success:
raise Exception('Unexpected success = True')
if resp.success:
raise Exception('Unexpected success = True')
if err and resp.cr_errno != err:
raise Exception('Unexpected cr_errno ' + str(resp.cr_errno))
if err and resp.cr_errno != err:
raise Exception('Unexpected cr_errno ' + str(resp.cr_errno))
def no_process(self):
print('Try to dump unexisting process')
# Get pid of non-existing process.
# Suppose max_pid is not taken by any process.
with open("/proc/sys/kernel/pid_max", "r") as f:
pid = int(f.readline())
try:
os.kill(pid, 0)
except OSError:
pass
else:
raise Exception('max pid is taken')
def no_process(self):
print('Try to dump unexisting process')
# Get pid of non-existing process.
# Suppose max_pid is not taken by any process.
with open("/proc/sys/kernel/pid_max", "r") as f:
pid = int(f.readline())
try:
os.kill(pid, 0)
except OSError:
pass
else:
raise Exception('max pid is taken')
# Ask criu to dump non-existing process.
req = self.get_base_req()
req.type = rpc.DUMP
req.opts.pid = pid
# Ask criu to dump non-existing process.
req = self.get_base_req()
req.type = rpc.DUMP
req.opts.pid = pid
self.send_req(req)
resp = self.recv_resp()
self.send_req(req)
resp = self.recv_resp()
self.check_resp(resp, rpc.DUMP, errno.ESRCH)
self.check_resp(resp, rpc.DUMP, errno.ESRCH)
print('Success')
print('Success')
def process_exists(self):
print('Try to restore process which pid is already taken by other process')
def process_exists(self):
print(
'Try to restore process which pid is already taken by other process'
)
# Perform self-dump
req = self.get_base_req()
req.type = rpc.DUMP
req.opts.leave_running = True
# Perform self-dump
req = self.get_base_req()
req.type = rpc.DUMP
req.opts.leave_running = True
self.send_req(req)
resp = self.recv_resp()
self.send_req(req)
resp = self.recv_resp()
if resp.success != True:
raise Exception('Self-dump failed')
if resp.success != True:
raise Exception('Self-dump failed')
# Ask to restore process from images of ourselves
req = self.get_base_req()
req.type = rpc.RESTORE
# Ask to restore process from images of ourselves
req = self.get_base_req()
req.type = rpc.RESTORE
self.send_req(req)
resp = self.recv_resp()
self.send_req(req)
resp = self.recv_resp()
self.check_resp(resp, rpc.RESTORE, errno.EEXIST)
self.check_resp(resp, rpc.RESTORE, errno.EEXIST)
print('Success')
print('Success')
def bad_options(self):
print('Try to send criu invalid opts')
def bad_options(self):
print('Try to send criu invalid opts')
# Subdirs are not allowed in log_file
req = self.get_base_req()
req.type = rpc.DUMP
req.opts.log_file = "../file.log"
# Subdirs are not allowed in log_file
req = self.get_base_req()
req.type = rpc.DUMP
req.opts.log_file = "../file.log"
self.send_req(req)
resp = self.recv_resp()
self.send_req(req)
resp = self.recv_resp()
self.check_resp(resp, rpc.DUMP, errno.EBADRQC)
self.check_resp(resp, rpc.DUMP, errno.EBADRQC)
print('Success')
print('Success')
def bad_request(self):
print('Try to send criu invalid request type')
def bad_request(self):
print('Try to send criu invalid request type')
req = self.get_base_req()
req.type = rpc.NOTIFY
req = self.get_base_req()
req.type = rpc.NOTIFY
self.send_req(req)
resp = self.recv_resp()
self.send_req(req)
resp = self.recv_resp()
self.check_resp(resp, rpc.EMPTY, None)
self.check_resp(resp, rpc.EMPTY, None)
print('Success')
print('Success')
def run(self):
self.no_process()
self.process_exists()
self.bad_options()
self.bad_request()
def run(self):
self.no_process()
self.process_exists()
self.bad_options()
self.bad_request()
t = test()
t.run()

View File

@ -5,8 +5,10 @@ import rpc_pb2 as rpc
import argparse
parser = argparse.ArgumentParser(description="Test page-server using CRIU RPC")
parser.add_argument('socket', type = str, help = "CRIU service socket")
parser.add_argument('dir', type = str, help = "Directory where CRIU images should be placed")
parser.add_argument('socket', type=str, help="CRIU service socket")
parser.add_argument('dir',
type=str,
help="Directory where CRIU images should be placed")
args = vars(parser.parse_args())
@ -16,45 +18,45 @@ s.connect(args['socket'])
# Start page-server
print('Starting page-server')
req = rpc.criu_req()
req.type = rpc.PAGE_SERVER
req.opts.log_file = 'page-server.log'
req.opts.log_level = 4
req.opts.images_dir_fd = os.open(args['dir'], os.O_DIRECTORY)
req = rpc.criu_req()
req.type = rpc.PAGE_SERVER
req.opts.log_file = 'page-server.log'
req.opts.log_level = 4
req.opts.images_dir_fd = os.open(args['dir'], os.O_DIRECTORY)
s.send(req.SerializeToString())
resp = rpc.criu_resp()
resp = rpc.criu_resp()
MAX_MSG_SIZE = 1024
resp.ParseFromString(s.recv(MAX_MSG_SIZE))
if resp.type != rpc.PAGE_SERVER:
print('Unexpected msg type')
sys.exit(1)
print('Unexpected msg type')
sys.exit(1)
else:
if resp.success:
# check if pid even exists
try:
os.kill(resp.ps.pid, 0)
except OSError as err:
if err.errno == errno.ESRCH:
print('No process with page-server pid %d' %(resp.ps.pid))
else:
print('Can\'t check that process %d exists' %(resp.ps.pid))
sys.exit(1)
print('Success, page-server pid %d started on port %u' %(resp.ps.pid, resp.ps.port))
else:
print('Failed to start page-server')
sys.exit(1)
if resp.success:
# check if pid even exists
try:
os.kill(resp.ps.pid, 0)
except OSError as err:
if err.errno == errno.ESRCH:
print('No process with page-server pid %d' % (resp.ps.pid))
else:
print('Can\'t check that process %d exists' % (resp.ps.pid))
sys.exit(1)
print('Success, page-server pid %d started on port %u' %
(resp.ps.pid, resp.ps.port))
else:
print('Failed to start page-server')
sys.exit(1)
# Perform self-dump
print('Dumping myself using page-server')
req.type = rpc.DUMP
req.opts.ps.port = resp.ps.port
req.opts.ps.address = "127.0.0.1"
req.opts.log_file = 'dump.log'
req.opts.leave_running = True
req.type = rpc.DUMP
req.opts.ps.port = resp.ps.port
req.opts.ps.address = "127.0.0.1"
req.opts.log_file = 'dump.log'
req.opts.leave_running = True
s.close()
s = socket.socket(socket.AF_UNIX, socket.SOCK_SEQPACKET)
@ -64,11 +66,11 @@ s.send(req.SerializeToString())
resp.ParseFromString(s.recv(MAX_MSG_SIZE))
if resp.type != rpc.DUMP:
print('Unexpected msg type')
sys.exit(1)
print('Unexpected msg type')
sys.exit(1)
else:
if resp.success:
print('Success')
else:
print('Fail')
sys.exit(1)
if resp.success:
print('Success')
else:
print('Fail')
sys.exit(1)

View File

@ -12,6 +12,6 @@ r = f.read(1)
f.close()
if r == '\0':
sys.exit(0)
sys.exit(0)
sys.exit(-1)

View File

@ -4,9 +4,12 @@ import socket, os, sys
import rpc_pb2 as rpc
import argparse
parser = argparse.ArgumentParser(description="Test ability to restore a process from images using CRIU RPC")
parser.add_argument('socket', type = str, help = "CRIU service socket")
parser.add_argument('dir', type = str, help = "Directory where CRIU images could be found")
parser = argparse.ArgumentParser(
description="Test ability to restore a process from images using CRIU RPC")
parser.add_argument('socket', type=str, help="CRIU service socket")
parser.add_argument('dir',
type=str,
help="Directory where CRIU images could be found")
args = vars(parser.parse_args())
@ -16,30 +19,30 @@ s.connect(args['socket'])
# Create criu msg, set it's type to dump request
# and set dump options. Checkout more options in protobuf/rpc.proto
req = rpc.criu_req()
req.type = rpc.RESTORE
req.opts.images_dir_fd = os.open(args['dir'], os.O_DIRECTORY)
req = rpc.criu_req()
req.type = rpc.RESTORE
req.opts.images_dir_fd = os.open(args['dir'], os.O_DIRECTORY)
# As the dumped process is running with setsid this should not
# be necessary. There seems to be a problem for this testcase
# in combination with alpine's setsid.
# The dump is now done with -j and the restore also.
req.opts.shell_job = True
req.opts.shell_job = True
# Send request
s.send(req.SerializeToString())
# Recv response
resp = rpc.criu_resp()
MAX_MSG_SIZE = 1024
resp = rpc.criu_resp()
MAX_MSG_SIZE = 1024
resp.ParseFromString(s.recv(MAX_MSG_SIZE))
if resp.type != rpc.RESTORE:
print('Unexpected msg type')
sys.exit(-1)
print('Unexpected msg type')
sys.exit(-1)
else:
if resp.success:
print('Restore success')
else:
print('Restore fail')
sys.exit(-1)
print("PID of the restored program is %d\n" %(resp.restore.pid))
if resp.success:
print('Restore success')
else:
print('Restore fail')
sys.exit(-1)
print("PID of the restored program is %d\n" % (resp.restore.pid))

View File

@ -4,9 +4,12 @@ import socket, os, sys
import rpc_pb2 as rpc
import argparse
parser = argparse.ArgumentParser(description="Test dump/restore using CRIU RPC")
parser.add_argument('socket', type = str, help = "CRIU service socket")
parser.add_argument('dir', type = str, help = "Directory where CRIU images should be placed")
parser = argparse.ArgumentParser(
description="Test dump/restore using CRIU RPC")
parser.add_argument('socket', type=str, help="CRIU service socket")
parser.add_argument('dir',
type=str,
help="Directory where CRIU images should be placed")
args = vars(parser.parse_args())
@ -16,32 +19,32 @@ s.connect(args['socket'])
# Create criu msg, set it's type to dump request
# and set dump options. Checkout more options in protobuf/rpc.proto
req = rpc.criu_req()
req.type = rpc.DUMP
req.opts.leave_running = True
req.opts.log_level = 4
req.opts.images_dir_fd = os.open(args['dir'], os.O_DIRECTORY)
req = rpc.criu_req()
req.type = rpc.DUMP
req.opts.leave_running = True
req.opts.log_level = 4
req.opts.images_dir_fd = os.open(args['dir'], os.O_DIRECTORY)
# Send request
s.send(req.SerializeToString())
# Recv response
resp = rpc.criu_resp()
MAX_MSG_SIZE = 1024
resp = rpc.criu_resp()
MAX_MSG_SIZE = 1024
resp.ParseFromString(s.recv(MAX_MSG_SIZE))
if resp.type != rpc.DUMP:
print('Unexpected msg type')
sys.exit(-1)
print('Unexpected msg type')
sys.exit(-1)
else:
if resp.success:
print('Success')
else:
print('Fail')
sys.exit(-1)
if resp.success:
print('Success')
else:
print('Fail')
sys.exit(-1)
if resp.dump.restored:
print('Restored')
if resp.dump.restored:
print('Restored')
# Connect to service socket
s = socket.socket(socket.AF_UNIX, socket.SOCK_SEQPACKET)
@ -61,21 +64,21 @@ MAX_MSG_SIZE = 1024
resp.ParseFromString(s.recv(MAX_MSG_SIZE))
if resp.type != rpc.VERSION:
print('RPC: Unexpected msg type')
sys.exit(-1)
print('RPC: Unexpected msg type')
sys.exit(-1)
else:
if resp.success:
print('RPC: Success')
print('CRIU major %d' % resp.version.major_number)
print('CRIU minor %d' % resp.version.minor_number)
if resp.version.HasField('gitid'):
print('CRIU gitid %s' % resp.version.gitid)
if resp.version.HasField('sublevel'):
print('CRIU sublevel %s' % resp.version.sublevel)
if resp.version.HasField('extra'):
print('CRIU extra %s' % resp.version.extra)
if resp.version.HasField('name'):
print('CRIU name %s' % resp.version.name)
else:
print('Fail')
sys.exit(-1)
if resp.success:
print('RPC: Success')
print('CRIU major %d' % resp.version.major_number)
print('CRIU minor %d' % resp.version.minor_number)
if resp.version.HasField('gitid'):
print('CRIU gitid %s' % resp.version.gitid)
if resp.version.HasField('sublevel'):
print('CRIU sublevel %s' % resp.version.sublevel)
if resp.version.HasField('extra'):
print('CRIU extra %s' % resp.version.extra)
if resp.version.HasField('name'):
print('CRIU name %s' % resp.version.name)
else:
print('Fail')
sys.exit(-1)

View File

@ -27,21 +27,21 @@ MAX_MSG_SIZE = 1024
resp.ParseFromString(s.recv(MAX_MSG_SIZE))
if resp.type != rpc.VERSION:
print('RPC: Unexpected msg type')
sys.exit(-1)
print('RPC: Unexpected msg type')
sys.exit(-1)
else:
if resp.success:
print('RPC: Success')
print('CRIU major %d' % resp.version.major_number)
print('CRIU minor %d' % resp.version.minor_number)
if resp.version.HasField('gitid'):
print('CRIU gitid %s' % resp.version.gitid)
if resp.version.HasField('sublevel'):
print('CRIU sublevel %s' % resp.version.sublevel)
if resp.version.HasField('extra'):
print('CRIU extra %s' % resp.version.extra)
if resp.version.HasField('name'):
print('CRIU name %s' % resp.version.name)
else:
print('Fail')
sys.exit(-1)
if resp.success:
print('RPC: Success')
print('CRIU major %d' % resp.version.major_number)
print('CRIU minor %d' % resp.version.minor_number)
if resp.version.HasField('gitid'):
print('CRIU gitid %s' % resp.version.gitid)
if resp.version.HasField('sublevel'):
print('CRIU sublevel %s' % resp.version.sublevel)
if resp.version.HasField('extra'):
print('CRIU extra %s' % resp.version.extra)
if resp.version.HasField('name'):
print('CRIU name %s' % resp.version.name)
else:
print('Fail')
sys.exit(-1)

View File

@ -6,15 +6,17 @@ cr_bin = "../../../criu/criu"
os.chdir(os.getcwd())
def create_pty():
(fd1, fd2) = pty.openpty()
return (os.fdopen(fd1, "w+"), os.fdopen(fd2, "w+"))
(fd1, fd2) = pty.openpty()
return (os.fdopen(fd1, "w+"), os.fdopen(fd2, "w+"))
if not os.access("work", os.X_OK):
os.mkdir("work", 0755)
open("running", "w").close()
m,s = create_pty()
m, s = create_pty()
p = os.pipe()
pr = os.fdopen(p[0], "r")
pw = os.fdopen(p[1], "w")
@ -46,14 +48,15 @@ if ret != 0:
os.wait()
os.unlink("running")
m,s = create_pty()
m, s = create_pty()
cpid = os.fork()
if cpid == 0:
os.setsid()
fcntl.ioctl(m.fileno(), termios.TIOCSCTTY, 1)
cmd = [cr_bin, "restore", "-j", "-D", "work", "-v"]
print("Run: %s" % " ".join(cmd))
ret = subprocess.Popen([cr_bin, "restore", "-j", "-D", "work", "-v"]).wait()
ret = subprocess.Popen([cr_bin, "restore", "-j", "-D", "work",
"-v"]).wait()
if ret != 0:
sys.exit(1)
sys.exit(0)

File diff suppressed because it is too large Load Diff