2015-11-02 22:04:00 +03:00
|
|
|
#!/usr/bin/env python
|
2015-11-03 14:20:34 -07:00
|
|
|
# vim: noet
|
2015-10-05 21:55:00 +03:00
|
|
|
import argparse
|
|
|
|
import yaml
|
|
|
|
import os
|
|
|
|
import subprocess
|
|
|
|
import time
|
|
|
|
import tempfile
|
|
|
|
import shutil
|
|
|
|
import re
|
|
|
|
import stat
|
|
|
|
import signal
|
|
|
|
import atexit
|
|
|
|
import sys
|
|
|
|
import linecache
|
2015-10-12 21:54:33 +03:00
|
|
|
import random
|
|
|
|
import string
|
|
|
|
import imp
|
|
|
|
import socket
|
2015-11-12 21:21:00 +03:00
|
|
|
import fcntl
|
2015-10-05 21:55:00 +03:00
|
|
|
|
2015-10-29 14:07:00 +03:00
|
|
|
os.chdir(os.path.dirname(os.path.abspath(__file__)))
|
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
prev_line = None
|
|
|
|
def traceit(f, e, a):
|
|
|
|
if e == "line":
|
|
|
|
lineno = f.f_lineno
|
|
|
|
fil = f.f_globals["__file__"]
|
|
|
|
if fil.endswith("zdtm.py"):
|
|
|
|
global prev_line
|
|
|
|
line = linecache.getline(fil, lineno)
|
|
|
|
if line == prev_line:
|
|
|
|
print " ..."
|
|
|
|
else:
|
|
|
|
prev_line = line
|
|
|
|
print "+%4d: %s" % (lineno, line.rstrip())
|
|
|
|
|
|
|
|
return traceit
|
|
|
|
|
|
|
|
|
|
|
|
# Root dir for ns and uns flavors. All tests
|
|
|
|
# sit in the same dir
|
2015-10-08 17:28:55 +03:00
|
|
|
tests_root = None
|
2015-10-05 21:55:00 +03:00
|
|
|
|
2015-10-08 17:28:55 +03:00
|
|
|
def clean_tests_root():
|
|
|
|
global tests_root
|
zdtm.py: unmount tests_root before rm-ing it
This avoids problems like:
$ sudo ./zdtm.py run -a
Skipping test zdtm/live/static/mem-touch (manual run only)
./sock_opts00 --pidfile=sock_opts00.pid --outfile=sock_opts00.out
./sock_opts00 --pidfile=sock_opts00.pid --outfile=sock_opts00.out
==================== Run zdtm/live/static/sock_opts00 in h =====================
Start test
Test is SUID
Run CRIU: [dump -o dump.log -D dump/zdtm/live/static/sock_opts00/74/1 -v4 -t 74 --pidfile /home/ubuntu/criu/test/zdtm/live/static/sock_opts00.pid]
Run CRIU: [restore -o restore.log -D dump/zdtm/live/static/sock_opts00/74/1 -v4 --pidfile /home/ubuntu/criu/test/zdtm/live/static/sock_opts00.pid --restore-detached]
Wait for zdtm/live/static/sock_opts00 to die for 0.100000
Remvoing dump/zdtm/live/static/sock_opts00/74
==================== Test zdtm/live/static/sock_opts00 PASS ====================
==================== Run zdtm/live/static/sock_opts00 in ns ====================
Construct root for zdtm/live/static/sock_opts00
Start test
Test is SUID
Traceback (most recent call last):
File "zdtm.py", line 850, in <module>
do_run_test(tinfo[0], tinfo[1], tinfo[2], tinfo[3])
File "zdtm.py", line 641, in do_run_test
cr(cr_api, t, opts)
File "zdtm.py", line 562, in cr
cr_api.set_test(test)
File "zdtm.py", line 469, in set_test
os.makedirs(self.__dump_path)
File "/usr/lib/python2.7/os.py", line 157, in makedirs
mkdir(name, mode)
OSError: [Errno 17] File exists: 'dump/zdtm/live/static/sock_opts00/153'
Error in atexit._run_exitfuncs:
Traceback (most recent call last):
File "/usr/lib/python2.7/atexit.py", line 24, in _run_exitfuncs
func(*targs, **kargs)
File "zdtm.py", line 46, in clean_tests_root
os.rmdir(tests_root)
OSError: [Errno 16] Device or resource busy: '/tmp/criu-root-MmGEQD'
Error in sys.exitfunc:
Traceback (most recent call last):
File "/usr/lib/python2.7/atexit.py", line 24, in _run_exitfuncs
func(*targs, **kargs)
File "zdtm.py", line 46, in clean_tests_root
os.rmdir(tests_root)
OSError: [Errno 16] Device or resource busy: '/tmp/criu-root-MmGEQD'
Signed-off-by: Tycho Andersen <tycho.andersen@canonical.com>
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-11-03 14:20:33 -07:00
|
|
|
subprocess.call(["umount", tests_root])
|
2015-10-08 17:28:55 +03:00
|
|
|
if tests_root:
|
|
|
|
os.rmdir(tests_root)
|
2015-10-05 21:55:00 +03:00
|
|
|
|
2015-10-08 17:28:55 +03:00
|
|
|
def make_tests_root():
|
|
|
|
global tests_root
|
|
|
|
if not tests_root:
|
|
|
|
tests_root = tempfile.mkdtemp("", "criu-root-", "/tmp")
|
|
|
|
atexit.register(clean_tests_root)
|
|
|
|
return tests_root
|
2015-10-05 21:55:00 +03:00
|
|
|
|
2015-10-28 16:39:00 +03:00
|
|
|
# Report generation
|
|
|
|
|
|
|
|
report_dir = None
|
|
|
|
|
|
|
|
def init_report(path):
|
|
|
|
global report_dir
|
|
|
|
report_dir = path
|
|
|
|
if not os.access(report_dir, os.F_OK):
|
|
|
|
os.makedirs(report_dir)
|
|
|
|
|
|
|
|
def add_to_report(path, tgt_name):
|
|
|
|
global report_dir
|
|
|
|
if report_dir:
|
|
|
|
tgt_path = os.path.join(report_dir, tgt_name)
|
|
|
|
att = 0
|
|
|
|
while os.access(tgt_path, os.F_OK):
|
|
|
|
tgt_path = os.path.join(report_dir, tgt_name + ".%d" % att)
|
|
|
|
att += 1
|
|
|
|
|
|
|
|
if os.path.isdir(path):
|
|
|
|
shutil.copytree(path, tgt_path)
|
|
|
|
else:
|
|
|
|
shutil.copy2(path, tgt_path)
|
|
|
|
|
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
# Arch we run on
|
|
|
|
arch = os.uname()[4]
|
|
|
|
|
|
|
|
#
|
|
|
|
# Flavors
|
|
|
|
# h -- host, test is run in the same set of namespaces as criu
|
|
|
|
# ns -- namespaces, test is run in itw own set of namespaces
|
|
|
|
# uns -- user namespace, the same as above plus user namespace
|
|
|
|
#
|
|
|
|
|
|
|
|
class host_flavor:
|
|
|
|
def __init__(self, opts):
|
|
|
|
self.name = "host"
|
|
|
|
self.ns = False
|
|
|
|
self.root = None
|
|
|
|
|
2015-10-26 13:41:32 +03:00
|
|
|
def init(self, test_bin, deps):
|
2015-10-05 21:55:00 +03:00
|
|
|
pass
|
|
|
|
|
|
|
|
def fini(self):
|
|
|
|
pass
|
|
|
|
|
|
|
|
class ns_flavor:
|
|
|
|
def __init__(self, opts):
|
|
|
|
self.name = "ns"
|
|
|
|
self.ns = True
|
|
|
|
self.uns = False
|
2015-10-08 17:28:55 +03:00
|
|
|
self.root = make_tests_root()
|
2015-10-05 21:55:00 +03:00
|
|
|
|
2015-10-26 13:41:32 +03:00
|
|
|
def __copy_one(self, fname):
|
2015-11-12 11:11:00 +03:00
|
|
|
if not os.access(fname, os.F_OK):
|
|
|
|
raise test_fail_exc("Deps check (%s doesn't exist)" % fname)
|
|
|
|
|
2015-10-26 13:41:32 +03:00
|
|
|
tfname = self.root + fname
|
|
|
|
if not os.access(tfname, os.F_OK):
|
|
|
|
# Copying should be atomic as tests can be
|
|
|
|
# run in parallel
|
|
|
|
try:
|
|
|
|
os.makedirs(self.root + os.path.dirname(fname))
|
|
|
|
except:
|
|
|
|
pass
|
|
|
|
dst = tempfile.mktemp(".tso", "", self.root + os.path.dirname(fname))
|
|
|
|
shutil.copy2(fname, dst)
|
|
|
|
os.rename(dst, tfname)
|
|
|
|
|
|
|
|
def __copy_libs(self, binary):
|
|
|
|
ldd = subprocess.Popen(["ldd", binary], stdout = subprocess.PIPE)
|
2015-10-05 21:55:00 +03:00
|
|
|
xl = re.compile('^(linux-gate.so|linux-vdso(64)?.so|not a dynamic)')
|
|
|
|
|
|
|
|
# This Mayakovsky-style code gets list of libraries a binary
|
|
|
|
# needs minus vdso and gate .so-s
|
|
|
|
libs = map(lambda x: x[1] == '=>' and x[2] or x[0], \
|
|
|
|
map(lambda x: x.split(), \
|
|
|
|
filter(lambda x: not xl.match(x), \
|
|
|
|
map(lambda x: x.strip(), \
|
|
|
|
filter(lambda x: x.startswith('\t'), ldd.stdout.readlines())))))
|
|
|
|
ldd.wait()
|
|
|
|
|
|
|
|
for lib in libs:
|
2015-10-26 13:41:32 +03:00
|
|
|
self.__copy_one(lib)
|
|
|
|
|
|
|
|
def init(self, test_bin, deps):
|
|
|
|
subprocess.check_call(["mount", "--make-private", "--bind", ".", self.root])
|
|
|
|
|
|
|
|
if not os.access(self.root + "/.constructed", os.F_OK):
|
2015-11-12 21:21:00 +03:00
|
|
|
with open(os.path.abspath(__file__)) as o:
|
|
|
|
fcntl.flock(o, fcntl.LOCK_EX)
|
|
|
|
if not os.access(self.root + "/.constructed", os.F_OK):
|
|
|
|
print "Construct root for %s" % test_bin
|
|
|
|
for dir in ["/bin", "/sbin", "/etc", "/lib", "/lib64", "/dev", "/tmp", "/usr"]:
|
|
|
|
os.mkdir(self.root + dir)
|
|
|
|
os.chmod(self.root + dir, 0777)
|
|
|
|
|
|
|
|
os.mknod(self.root + "/dev/tty", stat.S_IFCHR, os.makedev(5, 0))
|
|
|
|
os.chmod(self.root + "/dev/tty", 0666)
|
|
|
|
for ldir in [ "/bin", "/sbin", "/lib", "/lib64" ]:
|
|
|
|
os.symlink(".." + ldir, self.root + "/usr" + ldir)
|
|
|
|
os.mknod(self.root + "/.constructed", stat.S_IFREG | 0600)
|
2015-10-26 13:41:32 +03:00
|
|
|
|
|
|
|
self.__copy_libs(test_bin)
|
|
|
|
for dep in deps:
|
|
|
|
self.__copy_one(dep)
|
|
|
|
self.__copy_libs(dep)
|
2015-10-05 21:55:00 +03:00
|
|
|
|
|
|
|
def fini(self):
|
|
|
|
subprocess.check_call(["mount", "--make-private", self.root])
|
|
|
|
subprocess.check_call(["umount", "-l", self.root])
|
|
|
|
|
|
|
|
class userns_flavor(ns_flavor):
|
|
|
|
def __init__(self, opts):
|
|
|
|
ns_flavor.__init__(self, opts)
|
|
|
|
self.name = "userns"
|
|
|
|
self.uns = True
|
|
|
|
|
2015-10-28 17:05:05 +03:00
|
|
|
def init(self, test_bin, deps):
|
2015-10-28 12:36:40 +04:00
|
|
|
# To be able to create roots_yard in CRIU
|
|
|
|
os.chmod(".", os.stat(".").st_mode | 0077)
|
2015-10-28 17:05:05 +03:00
|
|
|
ns_flavor.init(self, test_bin, deps)
|
2015-10-28 12:36:40 +04:00
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
flavors = { 'h': host_flavor, 'ns': ns_flavor, 'uns': userns_flavor }
|
|
|
|
|
|
|
|
#
|
|
|
|
# Helpers
|
|
|
|
#
|
|
|
|
|
|
|
|
def tail(path):
|
|
|
|
p = subprocess.Popen(['tail', '-n1', path],
|
|
|
|
stdout = subprocess.PIPE)
|
|
|
|
return p.stdout.readline()
|
|
|
|
|
|
|
|
def rpidfile(path):
|
|
|
|
return open(path).readline().strip()
|
|
|
|
|
2015-10-28 10:42:27 +03:00
|
|
|
def wait_pid_die(pid, who, tmo = 30):
|
2015-10-05 21:55:00 +03:00
|
|
|
stime = 0.1
|
|
|
|
while stime < tmo:
|
|
|
|
try:
|
|
|
|
os.kill(int(pid), 0)
|
|
|
|
except: # Died
|
|
|
|
break
|
|
|
|
|
|
|
|
print "Wait for %s to die for %f" % (who, stime)
|
|
|
|
time.sleep(stime)
|
|
|
|
stime *= 2
|
|
|
|
else:
|
|
|
|
raise test_fail_exc("%s die" % who)
|
|
|
|
|
|
|
|
def test_flag(tdesc, flag):
|
|
|
|
return flag in tdesc.get('flags', '').split()
|
|
|
|
|
|
|
|
#
|
|
|
|
# Exception thrown when something inside the test goes wrong,
|
|
|
|
# e.g. test doesn't start, criu returns with non zero code or
|
|
|
|
# test checks fail
|
|
|
|
#
|
|
|
|
|
|
|
|
class test_fail_exc:
|
|
|
|
def __init__(self, step):
|
|
|
|
self.step = step
|
|
|
|
|
2015-10-20 17:10:22 +04:00
|
|
|
class test_fail_expected_exc:
|
2015-10-13 18:06:44 +03:00
|
|
|
def __init__(self, cr_action):
|
|
|
|
self.cr_action = cr_action
|
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
#
|
|
|
|
# A test from zdtm/ directory.
|
|
|
|
#
|
|
|
|
|
|
|
|
class zdtm_test:
|
|
|
|
def __init__(self, name, desc, flavor):
|
|
|
|
self.__name = name
|
|
|
|
self.__desc = desc
|
|
|
|
self.__make_action('cleanout')
|
|
|
|
self.__pid = 0
|
|
|
|
self.__flavor = flavor
|
2015-10-08 15:22:53 +03:00
|
|
|
self.auto_reap = True
|
2015-10-05 21:55:00 +03:00
|
|
|
|
|
|
|
def __make_action(self, act, env = None, root = None):
|
2015-10-30 18:50:00 +03:00
|
|
|
sys.stdout.flush() # Not to let make's messages appear before ours
|
zdtm.py: Rework tests list generation
Hand-made list of tests is not great. As Christopher suggested we can find
all executables in a directory and treat them as tests. This idea is good,
but requires a little bit more trickery.
First, some executables in directory are not tests, e.g. these are per-test
scripts, which should be skipped. Next, not all tests in zdtm can be right
now run in continuous manner (they fail), but we want to run them manually.
To fix that such tests are marked with 'noauto' flag in the description.
So we have the test test descriptor file, which states in which file to
look for tests (executable) and which of them to exclude. Maybe more, can
be added on demand.
And, finally, the per-test description goes in a ${test}.desc file in native
python eval/repr-able format. If the file is absent a default description
is used.
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-10-08 17:25:00 +03:00
|
|
|
tpath = self.__name + '.' + act
|
2015-10-05 21:55:00 +03:00
|
|
|
s_args = ['make', '--no-print-directory', \
|
|
|
|
'-C', os.path.dirname(tpath), \
|
|
|
|
os.path.basename(tpath)]
|
|
|
|
|
|
|
|
if env:
|
|
|
|
env = dict(os.environ, **env)
|
|
|
|
|
|
|
|
s = subprocess.Popen(s_args, env = env, cwd = root)
|
|
|
|
s.wait()
|
|
|
|
|
|
|
|
def __pidfile(self):
|
|
|
|
if self.__flavor.ns:
|
zdtm.py: Rework tests list generation
Hand-made list of tests is not great. As Christopher suggested we can find
all executables in a directory and treat them as tests. This idea is good,
but requires a little bit more trickery.
First, some executables in directory are not tests, e.g. these are per-test
scripts, which should be skipped. Next, not all tests in zdtm can be right
now run in continuous manner (they fail), but we want to run them manually.
To fix that such tests are marked with 'noauto' flag in the description.
So we have the test test descriptor file, which states in which file to
look for tests (executable) and which of them to exclude. Maybe more, can
be added on demand.
And, finally, the per-test description goes in a ${test}.desc file in native
python eval/repr-able format. If the file is absent a default description
is used.
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-10-08 17:25:00 +03:00
|
|
|
return self.__name + '.init.pid'
|
2015-10-05 21:55:00 +03:00
|
|
|
else:
|
zdtm.py: Rework tests list generation
Hand-made list of tests is not great. As Christopher suggested we can find
all executables in a directory and treat them as tests. This idea is good,
but requires a little bit more trickery.
First, some executables in directory are not tests, e.g. these are per-test
scripts, which should be skipped. Next, not all tests in zdtm can be right
now run in continuous manner (they fail), but we want to run them manually.
To fix that such tests are marked with 'noauto' flag in the description.
So we have the test test descriptor file, which states in which file to
look for tests (executable) and which of them to exclude. Maybe more, can
be added on demand.
And, finally, the per-test description goes in a ${test}.desc file in native
python eval/repr-able format. If the file is absent a default description
is used.
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-10-08 17:25:00 +03:00
|
|
|
return self.__name + '.pid'
|
2015-10-05 21:55:00 +03:00
|
|
|
|
|
|
|
def __wait_task_die(self):
|
|
|
|
wait_pid_die(int(self.__pid), self.__name)
|
|
|
|
|
|
|
|
def start(self):
|
|
|
|
env = {}
|
2015-10-26 13:41:32 +03:00
|
|
|
self.__flavor.init(self.__name, self.__desc.get('deps', []))
|
2015-10-05 21:55:00 +03:00
|
|
|
|
|
|
|
print "Start test"
|
|
|
|
|
|
|
|
env['ZDTM_THREAD_BOMB'] = "100"
|
|
|
|
if not test_flag(self.__desc, 'suid'):
|
|
|
|
env['ZDTM_UID'] = "18943"
|
|
|
|
env['ZDTM_GID'] = "58467"
|
|
|
|
env['ZDTM_GROUPS'] = "27495 48244"
|
2015-10-28 12:36:40 +04:00
|
|
|
|
|
|
|
# Add write perms for .out and .pid files
|
|
|
|
p = os.path.dirname(self.__name)
|
|
|
|
os.chmod(p, os.stat(p).st_mode | 0222)
|
2015-10-05 21:55:00 +03:00
|
|
|
else:
|
|
|
|
print "Test is SUID"
|
|
|
|
|
|
|
|
if self.__flavor.ns:
|
|
|
|
env['ZDTM_NEWNS'] = "1"
|
zdtm.py: Rework tests list generation
Hand-made list of tests is not great. As Christopher suggested we can find
all executables in a directory and treat them as tests. This idea is good,
but requires a little bit more trickery.
First, some executables in directory are not tests, e.g. these are per-test
scripts, which should be skipped. Next, not all tests in zdtm can be right
now run in continuous manner (they fail), but we want to run them manually.
To fix that such tests are marked with 'noauto' flag in the description.
So we have the test test descriptor file, which states in which file to
look for tests (executable) and which of them to exclude. Maybe more, can
be added on demand.
And, finally, the per-test description goes in a ${test}.desc file in native
python eval/repr-able format. If the file is absent a default description
is used.
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-10-08 17:25:00 +03:00
|
|
|
env['ZDTM_PIDFILE'] = os.path.realpath(self.__name + '.init.pid')
|
2015-10-05 21:55:00 +03:00
|
|
|
env['ZDTM_ROOT'] = self.__flavor.root
|
|
|
|
|
|
|
|
if self.__flavor.uns:
|
|
|
|
env['ZDTM_USERNS'] = "1"
|
2015-10-28 18:04:52 +03:00
|
|
|
p = os.path.dirname(self.__name)
|
|
|
|
os.chmod(p, os.stat(p).st_mode | 0222)
|
2015-10-05 21:55:00 +03:00
|
|
|
|
|
|
|
self.__make_action('pid', env, self.__flavor.root)
|
|
|
|
|
|
|
|
try:
|
|
|
|
os.kill(int(self.getpid()), 0)
|
|
|
|
except:
|
|
|
|
raise test_fail_exc("start")
|
|
|
|
|
|
|
|
def kill(self, sig = signal.SIGKILL):
|
|
|
|
if self.__pid:
|
|
|
|
os.kill(int(self.__pid), sig)
|
|
|
|
self.gone(sig == signal.SIGKILL)
|
|
|
|
|
|
|
|
self.__flavor.fini()
|
|
|
|
|
|
|
|
def stop(self):
|
2015-10-20 15:52:59 +04:00
|
|
|
self.getpid() # Read the pid from pidfile back
|
2015-10-05 21:55:00 +03:00
|
|
|
self.kill(signal.SIGTERM)
|
|
|
|
|
zdtm.py: Rework tests list generation
Hand-made list of tests is not great. As Christopher suggested we can find
all executables in a directory and treat them as tests. This idea is good,
but requires a little bit more trickery.
First, some executables in directory are not tests, e.g. these are per-test
scripts, which should be skipped. Next, not all tests in zdtm can be right
now run in continuous manner (they fail), but we want to run them manually.
To fix that such tests are marked with 'noauto' flag in the description.
So we have the test test descriptor file, which states in which file to
look for tests (executable) and which of them to exclude. Maybe more, can
be added on demand.
And, finally, the per-test description goes in a ${test}.desc file in native
python eval/repr-able format. If the file is absent a default description
is used.
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-10-08 17:25:00 +03:00
|
|
|
res = tail(self.__name + '.out')
|
2015-10-05 21:55:00 +03:00
|
|
|
if not 'PASS' in res.split():
|
|
|
|
raise test_fail_exc("result check")
|
|
|
|
|
|
|
|
def getpid(self):
|
|
|
|
if self.__pid == 0:
|
|
|
|
self.__pid = rpidfile(self.__pidfile())
|
|
|
|
|
|
|
|
return self.__pid
|
|
|
|
|
|
|
|
def getname(self):
|
|
|
|
return self.__name
|
|
|
|
|
2015-10-12 21:53:13 +03:00
|
|
|
def __getcropts(self):
|
2015-10-05 21:55:00 +03:00
|
|
|
opts = self.__desc.get('opts', '').split() + ["--pidfile", os.path.realpath(self.__pidfile())]
|
|
|
|
if self.__flavor.ns:
|
|
|
|
opts += ["--root", self.__flavor.root]
|
2015-10-08 23:09:32 +03:00
|
|
|
if test_flag(self.__desc, 'crlib'):
|
|
|
|
opts += ["-L", os.path.dirname(os.path.realpath(self.__name)) + '/lib']
|
2015-10-05 21:55:00 +03:00
|
|
|
return opts
|
|
|
|
|
2015-10-12 21:53:13 +03:00
|
|
|
def getdopts(self):
|
|
|
|
return self.__getcropts()
|
|
|
|
|
|
|
|
def getropts(self):
|
|
|
|
return self.__getcropts()
|
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
def gone(self, force = True):
|
2015-10-08 15:22:53 +03:00
|
|
|
if not self.auto_reap:
|
|
|
|
pid, status = os.waitpid(int(self.__pid), 0)
|
|
|
|
if pid != int(self.__pid):
|
|
|
|
raise test_fail_exc("kill pid mess")
|
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
self.__wait_task_die()
|
|
|
|
self.__pid = 0
|
|
|
|
if force or self.__flavor.ns:
|
|
|
|
os.unlink(self.__pidfile())
|
|
|
|
|
|
|
|
def print_output(self):
|
2015-10-08 23:07:56 +03:00
|
|
|
if os.access(self.__name + '.out', os.R_OK):
|
|
|
|
print "Test output: " + "=" * 32
|
|
|
|
print open(self.__name + '.out').read()
|
|
|
|
print " <<< " + "=" * 32
|
2015-10-05 21:55:00 +03:00
|
|
|
|
2015-10-20 15:52:59 +04:00
|
|
|
def static(self):
|
|
|
|
return self.__name.split('/')[2] == 'static'
|
|
|
|
|
2015-10-20 17:10:22 +04:00
|
|
|
def blocking(self):
|
|
|
|
return test_flag(self.__desc, 'crfail')
|
|
|
|
|
2015-10-29 14:15:00 +03:00
|
|
|
@staticmethod
|
|
|
|
def available():
|
|
|
|
if not os.access("zdtm_ct", os.X_OK):
|
|
|
|
subprocess.check_call(["make", "zdtm_ct"])
|
|
|
|
if not os.access("zdtm/lib/libzdtmtst.a", os.F_OK):
|
|
|
|
subprocess.check_call(["make", "-C", "zdtm/"])
|
2015-10-30 15:55:41 +04:00
|
|
|
subprocess.check_call(["flock", "zdtm_mount_cgroups", "./zdtm_mount_cgroups"])
|
2015-10-20 15:52:59 +04:00
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
|
2015-10-12 21:54:33 +03:00
|
|
|
class inhfd_test:
|
|
|
|
def __init__(self, name, desc, flavor):
|
|
|
|
self.__name = os.path.basename(name)
|
|
|
|
print "Load %s" % name
|
|
|
|
self.__fdtyp = imp.load_source(self.__name, name)
|
|
|
|
self.__my_file = None
|
|
|
|
self.__peer_pid = 0
|
|
|
|
self.__peer_file = None
|
|
|
|
self.__peer_file_name = None
|
|
|
|
self.__dump_opts = None
|
|
|
|
|
|
|
|
def start(self):
|
|
|
|
self.__message = "".join([random.choice(string.ascii_letters) for _ in range(16)])
|
|
|
|
(self.__my_file, peer_file) = self.__fdtyp.create_fds()
|
|
|
|
|
|
|
|
# Check FDs returned for inter-connection
|
|
|
|
self.__my_file.write(self.__message)
|
|
|
|
self.__my_file.flush()
|
|
|
|
if peer_file.read(16) != self.__message:
|
|
|
|
raise test_fail_exc("FDs screwup")
|
|
|
|
|
|
|
|
start_pipe = os.pipe()
|
|
|
|
self.__peer_pid = os.fork()
|
|
|
|
if self.__peer_pid == 0:
|
|
|
|
os.setsid()
|
|
|
|
os.close(0)
|
|
|
|
os.close(1)
|
|
|
|
os.close(2)
|
|
|
|
self.__my_file.close()
|
|
|
|
os.close(start_pipe[0])
|
|
|
|
os.close(start_pipe[1])
|
|
|
|
try:
|
|
|
|
data = peer_file.read(16)
|
|
|
|
except:
|
|
|
|
sys.exit(1)
|
|
|
|
|
|
|
|
sys.exit(data == self.__message and 42 or 2)
|
|
|
|
|
|
|
|
os.close(start_pipe[1])
|
|
|
|
os.read(start_pipe[0], 12)
|
|
|
|
os.close(start_pipe[0])
|
|
|
|
|
|
|
|
self.__peer_file_name = self.__fdtyp.filename(peer_file)
|
|
|
|
self.__dump_opts = self.__fdtyp.dump_opts(peer_file)
|
|
|
|
|
|
|
|
def stop(self):
|
|
|
|
self.__my_file.write(self.__message)
|
|
|
|
self.__my_file.flush()
|
|
|
|
pid, status = os.waitpid(self.__peer_pid, 0)
|
|
|
|
if not os.WIFEXITED(status) or os.WEXITSTATUS(status) != 42:
|
|
|
|
raise test_fail_exc("test failed with %d" % status)
|
|
|
|
|
|
|
|
def kill(self):
|
|
|
|
if self.__peer_pid:
|
|
|
|
os.kill(self.__peer_pid, signal.SIGKILL)
|
|
|
|
|
|
|
|
def getname(self):
|
|
|
|
return self.__name
|
|
|
|
|
|
|
|
def getpid(self):
|
|
|
|
return "%s" % self.__peer_pid
|
|
|
|
|
|
|
|
def gone(self, force = True):
|
|
|
|
os.waitpid(self.__peer_pid, 0)
|
|
|
|
wait_pid_die(self.__peer_pid, self.__name)
|
|
|
|
self.__my_file = None
|
|
|
|
self.__peer_file = None
|
|
|
|
|
|
|
|
def getdopts(self):
|
|
|
|
return self.__dump_opts
|
|
|
|
|
|
|
|
def getropts(self):
|
|
|
|
(self.__my_file, self.__peer_file) = self.__fdtyp.create_fds()
|
|
|
|
return ["--restore-sibling", "--inherit-fd", "fd[%d]:%s" % (self.__peer_file.fileno(), self.__peer_file_name)]
|
|
|
|
|
|
|
|
def print_output(self):
|
|
|
|
pass
|
|
|
|
|
2015-10-20 15:52:59 +04:00
|
|
|
def static(self):
|
|
|
|
return True
|
|
|
|
|
2015-10-20 17:10:22 +04:00
|
|
|
def blocking(self):
|
|
|
|
return False
|
|
|
|
|
2015-10-29 14:15:00 +03:00
|
|
|
@staticmethod
|
|
|
|
def available():
|
|
|
|
pass
|
|
|
|
|
2015-10-12 21:54:33 +03:00
|
|
|
|
|
|
|
test_classes = { 'zdtm': zdtm_test, 'inhfd': inhfd_test }
|
2015-10-08 17:28:55 +03:00
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
#
|
|
|
|
# CRIU when launched using CLI
|
|
|
|
#
|
|
|
|
|
2015-10-29 14:15:00 +03:00
|
|
|
criu_bin = "../criu"
|
2015-10-05 21:55:00 +03:00
|
|
|
class criu_cli:
|
2015-10-15 17:27:00 +03:00
|
|
|
def __init__(self, opts):
|
|
|
|
self.__test = None
|
|
|
|
self.__dump_path = None
|
2015-10-05 21:55:00 +03:00
|
|
|
self.__iter = 0
|
2015-10-27 16:29:00 +03:00
|
|
|
self.__prev_dump_iter = None
|
2015-10-05 21:55:00 +03:00
|
|
|
self.__page_server = (opts['page_server'] and True or False)
|
2015-10-08 15:22:53 +03:00
|
|
|
self.__restore_sibling = (opts['sibling'] and True or False)
|
2015-10-13 18:06:44 +03:00
|
|
|
self.__fault = (opts['fault'])
|
2015-10-05 21:55:00 +03:00
|
|
|
|
2015-10-28 16:39:00 +03:00
|
|
|
def logs(self):
|
|
|
|
return self.__dump_path
|
|
|
|
|
2015-10-15 17:27:00 +03:00
|
|
|
def set_test(self, test):
|
|
|
|
self.__test = test
|
|
|
|
self.__dump_path = "dump/" + test.getname() + "/" + test.getpid()
|
2015-11-03 14:20:36 -07:00
|
|
|
if os.path.exists(self.__dump_path):
|
|
|
|
for i in xrange(100):
|
|
|
|
newpath = self.__dump_path + "." + str(i)
|
|
|
|
if not os.path.exists(newpath):
|
|
|
|
os.rename(self.__dump_path, newpath)
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
raise test_fail_exc("couldn't find dump dir %s" % self.__dump_path)
|
|
|
|
|
2015-10-15 17:27:00 +03:00
|
|
|
os.makedirs(self.__dump_path)
|
|
|
|
|
|
|
|
def cleanup(self):
|
|
|
|
if self.__dump_path:
|
2015-11-03 14:20:35 -07:00
|
|
|
print "Removing %s" % self.__dump_path
|
2015-10-15 17:27:00 +03:00
|
|
|
shutil.rmtree(self.__dump_path)
|
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
def __ddir(self):
|
|
|
|
return os.path.join(self.__dump_path, "%d" % self.__iter)
|
|
|
|
|
|
|
|
@staticmethod
|
2015-10-13 18:06:44 +03:00
|
|
|
def __criu(action, args, fault = None):
|
|
|
|
env = None
|
|
|
|
if fault:
|
|
|
|
print "Forcing %s fault" % fault
|
|
|
|
env = dict(os.environ, CRIU_FAULT = fault)
|
2015-10-29 14:15:00 +03:00
|
|
|
cr = subprocess.Popen([criu_bin, action] + args, env = env)
|
2015-10-05 21:55:00 +03:00
|
|
|
return cr.wait()
|
|
|
|
|
|
|
|
def __criu_act(self, action, opts, log = None):
|
|
|
|
if not log:
|
|
|
|
log = action + ".log"
|
|
|
|
|
|
|
|
s_args = ["-o", log, "-D", self.__ddir(), "-v4"] + opts
|
|
|
|
|
2015-10-30 18:51:00 +03:00
|
|
|
with open(os.path.join(self.__ddir(), action + '.cropt'), 'w') as f:
|
|
|
|
f.write(' '.join(s_args) + '\n')
|
|
|
|
print "Run criu " + action
|
|
|
|
|
2015-10-13 18:06:44 +03:00
|
|
|
ret = self.__criu(action, s_args, self.__fault)
|
2015-11-10 16:01:07 +03:00
|
|
|
grep_errors(os.path.join(self.__ddir(), log))
|
2015-10-05 21:55:00 +03:00
|
|
|
if ret != 0:
|
2015-10-20 17:10:22 +04:00
|
|
|
if self.__fault or self.__test.blocking():
|
|
|
|
raise test_fail_expected_exc(action)
|
2015-10-13 18:06:44 +03:00
|
|
|
else:
|
|
|
|
raise test_fail_exc("CRIU %s" % action)
|
2015-10-05 21:55:00 +03:00
|
|
|
|
|
|
|
def dump(self, action, opts = []):
|
|
|
|
self.__iter += 1
|
|
|
|
os.mkdir(self.__ddir())
|
|
|
|
|
|
|
|
a_opts = ["-t", self.__test.getpid()]
|
2015-10-27 16:29:00 +03:00
|
|
|
if self.__prev_dump_iter:
|
|
|
|
a_opts += ["--prev-images-dir", "../%d" % self.__prev_dump_iter, "--track-mem"]
|
|
|
|
self.__prev_dump_iter = self.__iter
|
2015-10-05 21:55:00 +03:00
|
|
|
|
|
|
|
if self.__page_server:
|
|
|
|
print "Adding page server"
|
|
|
|
self.__criu_act("page-server", opts = [ "--port", "12345", \
|
|
|
|
"--daemon", "--pidfile", "ps.pid"])
|
|
|
|
a_opts += ["--page-server", "--address", "127.0.0.1", "--port", "12345"]
|
|
|
|
|
2015-10-12 21:53:13 +03:00
|
|
|
a_opts += self.__test.getdopts()
|
|
|
|
|
|
|
|
self.__criu_act(action, opts = a_opts + opts)
|
2015-10-05 21:55:00 +03:00
|
|
|
|
|
|
|
if self.__page_server:
|
|
|
|
wait_pid_die(int(rpidfile(self.__ddir() + "/ps.pid")), "page server")
|
|
|
|
|
|
|
|
def restore(self):
|
2015-10-08 15:22:53 +03:00
|
|
|
r_opts = []
|
|
|
|
if self.__restore_sibling:
|
|
|
|
r_opts = ["--restore-sibling"]
|
|
|
|
self.__test.auto_reap = False
|
2015-10-12 21:53:13 +03:00
|
|
|
r_opts += self.__test.getropts()
|
|
|
|
|
2015-10-27 16:29:00 +03:00
|
|
|
self.__prev_dump_iter = None
|
2015-10-12 21:53:13 +03:00
|
|
|
self.__criu_act("restore", opts = r_opts + ["--restore-detached"])
|
2015-10-05 21:55:00 +03:00
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def check(feature):
|
|
|
|
return criu_cli.__criu("check", ["-v0", "--feature", feature]) == 0
|
|
|
|
|
2015-10-29 14:15:00 +03:00
|
|
|
@staticmethod
|
|
|
|
def available():
|
|
|
|
if not os.access(criu_bin, os.X_OK):
|
|
|
|
print "CRIU binary not built"
|
|
|
|
sys.exit(1)
|
|
|
|
|
2015-10-08 23:08:18 +03:00
|
|
|
|
|
|
|
def try_run_hook(test, args):
|
|
|
|
hname = test.getname() + '.hook'
|
|
|
|
if os.access(hname, os.X_OK):
|
|
|
|
print "Running %s(%s)" % (hname, ', '.join(args))
|
|
|
|
hook = subprocess.Popen([hname] + args)
|
|
|
|
if hook.wait() != 0:
|
|
|
|
raise test_fail_exc("hook " + " ".join(args))
|
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
#
|
|
|
|
# Main testing entity -- dump (probably with pre-dumps) and restore
|
|
|
|
#
|
|
|
|
|
2015-10-27 16:29:00 +03:00
|
|
|
def iter_parm(opt, dflt):
|
|
|
|
x = ((opt or str(dflt)) + ":0").split(':')
|
|
|
|
return (xrange(0, int(x[0])), float(x[1]))
|
|
|
|
|
2015-10-15 17:27:00 +03:00
|
|
|
def cr(cr_api, test, opts):
|
2015-10-05 21:55:00 +03:00
|
|
|
if opts['nocr']:
|
|
|
|
return
|
|
|
|
|
2015-10-15 17:27:00 +03:00
|
|
|
cr_api.set_test(test)
|
2015-10-05 21:55:00 +03:00
|
|
|
|
2015-10-27 16:29:00 +03:00
|
|
|
iters = iter_parm(opts['iters'], 1)
|
|
|
|
for i in iters[0]:
|
|
|
|
pres = iter_parm(opts['pre'], 0)
|
|
|
|
for p in pres[0]:
|
2015-11-12 18:11:38 +04:00
|
|
|
if opts['snaps']:
|
|
|
|
cr_api.dump("dump", opts = ["--leave-running"])
|
|
|
|
else:
|
|
|
|
cr_api.dump("pre-dump")
|
2015-10-27 16:29:00 +03:00
|
|
|
time.sleep(pres[1])
|
2015-10-05 21:55:00 +03:00
|
|
|
|
|
|
|
if opts['norst']:
|
|
|
|
cr_api.dump("dump", opts = ["--leave-running"])
|
|
|
|
else:
|
|
|
|
cr_api.dump("dump")
|
|
|
|
test.gone()
|
2015-10-08 23:08:18 +03:00
|
|
|
try_run_hook(test, ["--pre-restore"])
|
2015-10-05 21:55:00 +03:00
|
|
|
cr_api.restore()
|
|
|
|
|
2015-10-27 16:29:00 +03:00
|
|
|
time.sleep(iters[1])
|
|
|
|
|
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
# Additional checks that can be done outside of test process
|
|
|
|
|
|
|
|
def get_maps(test):
|
|
|
|
maps = [[0,0]]
|
|
|
|
last = 0
|
|
|
|
for mp in open("/proc/%s/maps" % test.getpid()).readlines():
|
|
|
|
m = map(lambda x: int('0x' + x, 0), mp.split()[0].split('-'))
|
|
|
|
if maps[last][1] == m[0]:
|
|
|
|
maps[last][1] = m[1]
|
|
|
|
else:
|
|
|
|
maps.append(m)
|
|
|
|
last += 1
|
|
|
|
maps.pop(0)
|
|
|
|
return maps
|
|
|
|
|
|
|
|
def get_fds(test):
|
|
|
|
return map(lambda x: int(x), os.listdir("/proc/%s/fdinfo" % test.getpid()))
|
|
|
|
|
|
|
|
def cmp_lists(m1, m2):
|
2015-11-03 10:38:48 +03:00
|
|
|
return len(m1) != len(m2) or filter(lambda x: x[0] != x[1], zip(m1, m2))
|
2015-10-05 21:55:00 +03:00
|
|
|
|
|
|
|
def get_visible_state(test):
|
2015-10-20 15:52:59 +04:00
|
|
|
if test.static():
|
|
|
|
fds = get_fds(test)
|
|
|
|
maps = get_maps(test)
|
|
|
|
return (fds, maps)
|
|
|
|
else:
|
|
|
|
return ([], [])
|
2015-10-05 21:55:00 +03:00
|
|
|
|
|
|
|
def check_visible_state(test, state):
|
|
|
|
new = get_visible_state(test)
|
|
|
|
if cmp_lists(new[0], state[0]):
|
|
|
|
raise test_fail_exc("fds compare")
|
|
|
|
if cmp_lists(new[1], state[1]):
|
2015-11-03 10:40:19 +03:00
|
|
|
s_new = set(map(lambda x: '%x-%x' % (x[0], x[1]), new[1]))
|
|
|
|
s_old = set(map(lambda x: '%x-%x' % (x[0], x[1]), state[1]))
|
2015-11-02 22:19:02 +04:00
|
|
|
|
|
|
|
print "Old maps lost:"
|
|
|
|
print s_old - s_new
|
|
|
|
print "New maps appeared:"
|
|
|
|
print s_new - s_old
|
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
raise test_fail_exc("maps compare")
|
|
|
|
|
|
|
|
def do_run_test(tname, tdesc, flavs, opts):
|
2015-10-08 17:28:55 +03:00
|
|
|
tcname = tname.split('/')[0]
|
|
|
|
tclass = test_classes.get(tcname, None)
|
|
|
|
if not tclass:
|
|
|
|
print "Unknown test class %s" % tcname
|
|
|
|
return
|
2015-10-05 21:55:00 +03:00
|
|
|
|
2015-10-28 16:39:00 +03:00
|
|
|
if opts['report']:
|
|
|
|
init_report(opts['report'])
|
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
for f in flavs:
|
2015-10-13 11:09:00 +03:00
|
|
|
print
|
|
|
|
print_sep("Run %s in %s" % (tname, f))
|
2015-10-05 21:55:00 +03:00
|
|
|
flav = flavors[f](opts)
|
2015-10-08 17:28:55 +03:00
|
|
|
t = tclass(tname, tdesc, flav)
|
2015-10-15 17:27:00 +03:00
|
|
|
cr_api = criu_cli(opts)
|
2015-10-05 21:55:00 +03:00
|
|
|
|
|
|
|
try:
|
|
|
|
t.start()
|
|
|
|
s = get_visible_state(t)
|
2015-10-13 18:06:44 +03:00
|
|
|
try:
|
|
|
|
cr(cr_api, t, opts)
|
2015-10-20 17:10:22 +04:00
|
|
|
except test_fail_expected_exc as e:
|
2015-10-13 18:06:44 +03:00
|
|
|
if e.cr_action == "dump":
|
|
|
|
t.stop()
|
|
|
|
try_run_hook(t, ["--fault", e.cr_action])
|
|
|
|
else:
|
|
|
|
check_visible_state(t, s)
|
|
|
|
t.stop()
|
|
|
|
try_run_hook(t, ["--clean"])
|
2015-10-05 21:55:00 +03:00
|
|
|
except test_fail_exc as e:
|
2015-10-28 17:55:30 +03:00
|
|
|
print_sep("Test %s FAIL at %s" % (tname, e.step), '#')
|
2015-10-05 21:55:00 +03:00
|
|
|
t.print_output()
|
|
|
|
t.kill()
|
2015-10-28 16:39:00 +03:00
|
|
|
add_to_report(cr_api.logs(), "cr_logs")
|
2015-10-15 17:27:00 +03:00
|
|
|
if opts['keep_img'] == 'never':
|
|
|
|
cr_api.cleanup()
|
2015-10-05 21:55:00 +03:00
|
|
|
# This exit does two things -- exits from subprocess and
|
|
|
|
# aborts the main script execution on the 1st error met
|
|
|
|
sys.exit(1)
|
|
|
|
else:
|
2015-10-15 17:27:00 +03:00
|
|
|
if opts['keep_img'] != 'always':
|
|
|
|
cr_api.cleanup()
|
2015-10-13 11:09:00 +03:00
|
|
|
print_sep("Test %s PASS" % tname)
|
2015-10-05 21:55:00 +03:00
|
|
|
|
|
|
|
class launcher:
|
2015-10-30 18:50:00 +03:00
|
|
|
def __init__(self, opts, nr_tests):
|
2015-10-05 21:55:00 +03:00
|
|
|
self.__opts = opts
|
2015-10-30 18:50:00 +03:00
|
|
|
self.__total = nr_tests
|
|
|
|
self.__nr = 0
|
2015-10-28 16:39:00 +03:00
|
|
|
self.__max = int(opts['parallel'] or 1)
|
2015-10-05 21:55:00 +03:00
|
|
|
self.__subs = {}
|
|
|
|
self.__fail = False
|
|
|
|
|
2015-10-30 18:50:00 +03:00
|
|
|
def __show_progress(self):
|
|
|
|
perc = self.__nr * 16 / self.__total
|
|
|
|
print "=== Run %d/%d %s" % (self.__nr, self.__total, '=' * perc + '-' * (16 - perc))
|
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
def run_test(self, name, desc, flavor):
|
2015-11-12 20:58:00 +03:00
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
if len(self.__subs) >= self.__max:
|
|
|
|
self.wait()
|
2015-11-12 20:58:00 +03:00
|
|
|
|
|
|
|
if test_flag(desc, 'excl'):
|
|
|
|
self.wait_all()
|
2015-10-05 21:55:00 +03:00
|
|
|
|
2015-10-30 18:50:00 +03:00
|
|
|
self.__nr += 1
|
|
|
|
self.__show_progress()
|
|
|
|
|
2015-11-12 18:11:38 +04:00
|
|
|
nd = ('nocr', 'norst', 'pre', 'iters', 'page_server', 'sibling', 'fault', 'keep_img', 'report', 'snaps')
|
2015-10-05 21:55:00 +03:00
|
|
|
arg = repr((name, desc, flavor, { d: self.__opts[d] for d in nd }))
|
|
|
|
log = name.replace('/', '_') + ".log"
|
2015-10-08 15:19:44 +03:00
|
|
|
sub = subprocess.Popen(["./zdtm_ct", "zdtm.py"], \
|
2015-10-08 17:28:55 +03:00
|
|
|
env = dict(os.environ, CR_CT_TEST_INFO = arg ), \
|
2015-10-05 21:55:00 +03:00
|
|
|
stdout = open(log, "w"), stderr = subprocess.STDOUT)
|
|
|
|
self.__subs[sub.pid] = { 'sub': sub, 'log': log }
|
|
|
|
|
2015-11-12 20:58:00 +03:00
|
|
|
if test_flag(desc, 'excl'):
|
|
|
|
self.wait()
|
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
def __wait_one(self, flags):
|
|
|
|
pid, status = os.waitpid(0, flags)
|
|
|
|
if pid != 0:
|
|
|
|
sub = self.__subs.pop(pid)
|
|
|
|
if status != 0:
|
|
|
|
self.__fail = True
|
2015-10-28 16:39:00 +03:00
|
|
|
add_to_report(sub['log'], "output")
|
2015-10-05 21:55:00 +03:00
|
|
|
|
|
|
|
print open(sub['log']).read()
|
|
|
|
os.unlink(sub['log'])
|
|
|
|
return True
|
|
|
|
|
|
|
|
return False
|
|
|
|
|
2015-11-12 20:58:00 +03:00
|
|
|
def __wait_all(self):
|
|
|
|
while self.__subs:
|
|
|
|
self.__wait_one(0)
|
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
def wait(self):
|
|
|
|
self.__wait_one(0)
|
|
|
|
while self.__subs:
|
|
|
|
if not self.__wait_one(os.WNOHANG):
|
|
|
|
break
|
2015-11-12 20:58:00 +03:00
|
|
|
if self.__fail:
|
|
|
|
raise test_fail_exc('')
|
|
|
|
|
|
|
|
def wait_all(self):
|
|
|
|
self.__wait_all()
|
|
|
|
if self.__fail:
|
|
|
|
raise test_fail_exc('')
|
2015-10-05 21:55:00 +03:00
|
|
|
|
|
|
|
def finish(self):
|
2015-11-12 20:58:00 +03:00
|
|
|
self.__wait_all()
|
2015-10-05 21:55:00 +03:00
|
|
|
if self.__fail:
|
2015-10-30 17:12:53 +03:00
|
|
|
print_sep("FAIL", "#")
|
2015-10-05 21:55:00 +03:00
|
|
|
sys.exit(1)
|
|
|
|
|
zdtm.py: Rework tests list generation
Hand-made list of tests is not great. As Christopher suggested we can find
all executables in a directory and treat them as tests. This idea is good,
but requires a little bit more trickery.
First, some executables in directory are not tests, e.g. these are per-test
scripts, which should be skipped. Next, not all tests in zdtm can be right
now run in continuous manner (they fail), but we want to run them manually.
To fix that such tests are marked with 'noauto' flag in the description.
So we have the test test descriptor file, which states in which file to
look for tests (executable) and which of them to exclude. Maybe more, can
be added on demand.
And, finally, the per-test description goes in a ${test}.desc file in native
python eval/repr-able format. If the file is absent a default description
is used.
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-10-08 17:25:00 +03:00
|
|
|
def all_tests(opts):
|
2015-10-08 17:29:40 +03:00
|
|
|
desc = eval(open(opts['set'] + '.desc').read())
|
zdtm.py: Rework tests list generation
Hand-made list of tests is not great. As Christopher suggested we can find
all executables in a directory and treat them as tests. This idea is good,
but requires a little bit more trickery.
First, some executables in directory are not tests, e.g. these are per-test
scripts, which should be skipped. Next, not all tests in zdtm can be right
now run in continuous manner (they fail), but we want to run them manually.
To fix that such tests are marked with 'noauto' flag in the description.
So we have the test test descriptor file, which states in which file to
look for tests (executable) and which of them to exclude. Maybe more, can
be added on demand.
And, finally, the per-test description goes in a ${test}.desc file in native
python eval/repr-able format. If the file is absent a default description
is used.
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-10-08 17:25:00 +03:00
|
|
|
lst = subprocess.Popen(['find', desc['dir'], '-type', 'f', '-executable' ], \
|
|
|
|
stdout = subprocess.PIPE)
|
|
|
|
excl = map(lambda x: os.path.join(desc['dir'], x), desc['exclude'])
|
|
|
|
tlist = filter(lambda x: \
|
|
|
|
not x.endswith('.checkskip') and \
|
|
|
|
not x.endswith('.hook') and \
|
|
|
|
not x in excl, \
|
|
|
|
map(lambda x: x.strip(), lst.stdout.readlines()) \
|
|
|
|
)
|
|
|
|
lst.wait()
|
|
|
|
return tlist
|
|
|
|
|
|
|
|
|
|
|
|
# Descriptor for abstract test not in list
|
|
|
|
default_test={ }
|
|
|
|
|
|
|
|
|
|
|
|
def get_test_desc(tname):
|
|
|
|
d_path = tname + '.desc'
|
|
|
|
if os.access(d_path, os.F_OK):
|
|
|
|
return eval(open(d_path).read())
|
|
|
|
|
|
|
|
return default_test
|
|
|
|
|
|
|
|
|
|
|
|
def self_checkskip(tname):
|
|
|
|
chs = tname + '.checkskip'
|
|
|
|
if os.access(chs, os.X_OK):
|
|
|
|
ch = subprocess.Popen([chs])
|
|
|
|
return ch.wait() == 0 and False or True
|
|
|
|
|
|
|
|
return False
|
|
|
|
|
2015-11-10 16:01:08 +03:00
|
|
|
def print_sep(title, sep = "=", width = 80):
|
|
|
|
print (" " + title + " ").center(width, sep)
|
2015-10-13 09:51:00 +03:00
|
|
|
|
|
|
|
def grep_errors(fname):
|
2015-11-10 16:01:07 +03:00
|
|
|
first = True
|
2015-10-13 09:51:00 +03:00
|
|
|
for l in open(fname):
|
|
|
|
if "Error" in l:
|
2015-11-10 16:01:07 +03:00
|
|
|
if first:
|
2015-11-10 16:01:08 +03:00
|
|
|
print_sep("grep Error", "-", 60)
|
2015-11-10 16:01:07 +03:00
|
|
|
first = False
|
2015-10-13 09:51:00 +03:00
|
|
|
print l,
|
2015-11-10 16:01:07 +03:00
|
|
|
if not first:
|
2015-11-10 16:01:08 +03:00
|
|
|
print_sep("ERROR OVER", "-", 60)
|
zdtm.py: Rework tests list generation
Hand-made list of tests is not great. As Christopher suggested we can find
all executables in a directory and treat them as tests. This idea is good,
but requires a little bit more trickery.
First, some executables in directory are not tests, e.g. these are per-test
scripts, which should be skipped. Next, not all tests in zdtm can be right
now run in continuous manner (they fail), but we want to run them manually.
To fix that such tests are marked with 'noauto' flag in the description.
So we have the test test descriptor file, which states in which file to
look for tests (executable) and which of them to exclude. Maybe more, can
be added on demand.
And, finally, the per-test description goes in a ${test}.desc file in native
python eval/repr-able format. If the file is absent a default description
is used.
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-10-08 17:25:00 +03:00
|
|
|
|
|
|
|
def run_tests(opts):
|
2015-10-05 21:55:00 +03:00
|
|
|
excl = None
|
|
|
|
features = {}
|
|
|
|
|
|
|
|
if opts['all']:
|
zdtm.py: Rework tests list generation
Hand-made list of tests is not great. As Christopher suggested we can find
all executables in a directory and treat them as tests. This idea is good,
but requires a little bit more trickery.
First, some executables in directory are not tests, e.g. these are per-test
scripts, which should be skipped. Next, not all tests in zdtm can be right
now run in continuous manner (they fail), but we want to run them manually.
To fix that such tests are marked with 'noauto' flag in the description.
So we have the test test descriptor file, which states in which file to
look for tests (executable) and which of them to exclude. Maybe more, can
be added on demand.
And, finally, the per-test description goes in a ${test}.desc file in native
python eval/repr-able format. If the file is absent a default description
is used.
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-10-08 17:25:00 +03:00
|
|
|
torun = all_tests(opts)
|
|
|
|
run_all = True
|
2015-10-15 17:25:00 +03:00
|
|
|
elif opts['tests']:
|
|
|
|
r = re.compile(opts['tests'])
|
|
|
|
torun = filter(lambda x: r.match(x), all_tests(opts))
|
|
|
|
run_all = True
|
2015-10-05 21:55:00 +03:00
|
|
|
elif opts['test']:
|
|
|
|
torun = opts['test']
|
zdtm.py: Rework tests list generation
Hand-made list of tests is not great. As Christopher suggested we can find
all executables in a directory and treat them as tests. This idea is good,
but requires a little bit more trickery.
First, some executables in directory are not tests, e.g. these are per-test
scripts, which should be skipped. Next, not all tests in zdtm can be right
now run in continuous manner (they fail), but we want to run them manually.
To fix that such tests are marked with 'noauto' flag in the description.
So we have the test test descriptor file, which states in which file to
look for tests (executable) and which of them to exclude. Maybe more, can
be added on demand.
And, finally, the per-test description goes in a ${test}.desc file in native
python eval/repr-able format. If the file is absent a default description
is used.
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-10-08 17:25:00 +03:00
|
|
|
run_all = False
|
2015-10-05 21:55:00 +03:00
|
|
|
else:
|
|
|
|
print "Specify test with -t <name> or -a"
|
|
|
|
return
|
|
|
|
|
|
|
|
if opts['exclude']:
|
|
|
|
excl = re.compile(".*(" + "|".join(opts['exclude']) + ")")
|
|
|
|
print "Compiled exclusion list"
|
|
|
|
|
2015-10-28 16:39:00 +03:00
|
|
|
if opts['report']:
|
|
|
|
init_report(opts['report'])
|
|
|
|
|
2015-10-30 18:50:00 +03:00
|
|
|
l = launcher(opts, len(torun))
|
2015-10-05 21:55:00 +03:00
|
|
|
try:
|
|
|
|
for t in torun:
|
|
|
|
global arch
|
|
|
|
|
|
|
|
if excl and excl.match(t):
|
|
|
|
print "Skipping %s (exclude)" % t
|
|
|
|
continue
|
|
|
|
|
zdtm.py: Rework tests list generation
Hand-made list of tests is not great. As Christopher suggested we can find
all executables in a directory and treat them as tests. This idea is good,
but requires a little bit more trickery.
First, some executables in directory are not tests, e.g. these are per-test
scripts, which should be skipped. Next, not all tests in zdtm can be right
now run in continuous manner (they fail), but we want to run them manually.
To fix that such tests are marked with 'noauto' flag in the description.
So we have the test test descriptor file, which states in which file to
look for tests (executable) and which of them to exclude. Maybe more, can
be added on demand.
And, finally, the per-test description goes in a ${test}.desc file in native
python eval/repr-able format. If the file is absent a default description
is used.
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-10-08 17:25:00 +03:00
|
|
|
tdesc = get_test_desc(t)
|
2015-10-05 21:55:00 +03:00
|
|
|
if tdesc.get('arch', arch) != arch:
|
|
|
|
print "Skipping %s (arch %s)" % (t, tdesc['arch'])
|
|
|
|
continue
|
|
|
|
|
zdtm.py: Rework tests list generation
Hand-made list of tests is not great. As Christopher suggested we can find
all executables in a directory and treat them as tests. This idea is good,
but requires a little bit more trickery.
First, some executables in directory are not tests, e.g. these are per-test
scripts, which should be skipped. Next, not all tests in zdtm can be right
now run in continuous manner (they fail), but we want to run them manually.
To fix that such tests are marked with 'noauto' flag in the description.
So we have the test test descriptor file, which states in which file to
look for tests (executable) and which of them to exclude. Maybe more, can
be added on demand.
And, finally, the per-test description goes in a ${test}.desc file in native
python eval/repr-able format. If the file is absent a default description
is used.
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-10-08 17:25:00 +03:00
|
|
|
if run_all and test_flag(tdesc, 'noauto'):
|
|
|
|
print "Skipping test %s (manual run only)" % t
|
|
|
|
continue
|
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
feat = tdesc.get('feature', None)
|
|
|
|
if feat:
|
|
|
|
if not features.has_key(feat):
|
|
|
|
print "Checking feature %s" % feat
|
|
|
|
features[feat] = criu_cli.check(feat)
|
|
|
|
|
|
|
|
if not features[feat]:
|
|
|
|
print "Skipping %s (no %s feature)" % (t, feat)
|
|
|
|
continue
|
|
|
|
|
zdtm.py: Rework tests list generation
Hand-made list of tests is not great. As Christopher suggested we can find
all executables in a directory and treat them as tests. This idea is good,
but requires a little bit more trickery.
First, some executables in directory are not tests, e.g. these are per-test
scripts, which should be skipped. Next, not all tests in zdtm can be right
now run in continuous manner (they fail), but we want to run them manually.
To fix that such tests are marked with 'noauto' flag in the description.
So we have the test test descriptor file, which states in which file to
look for tests (executable) and which of them to exclude. Maybe more, can
be added on demand.
And, finally, the per-test description goes in a ${test}.desc file in native
python eval/repr-able format. If the file is absent a default description
is used.
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-10-08 17:25:00 +03:00
|
|
|
if self_checkskip(t):
|
2015-10-05 21:55:00 +03:00
|
|
|
print "Skipping %s (self)" % t
|
|
|
|
continue
|
|
|
|
|
|
|
|
test_flavs = tdesc.get('flavor', 'h ns uns').split()
|
|
|
|
opts_flavs = (opts['flavor'] or 'h,ns,uns').split(',')
|
|
|
|
run_flavs = set(test_flavs) & set(opts_flavs)
|
|
|
|
|
|
|
|
if run_flavs:
|
|
|
|
l.run_test(t, tdesc, run_flavs)
|
|
|
|
finally:
|
|
|
|
l.finish()
|
|
|
|
|
zdtm.py: Rework tests list generation
Hand-made list of tests is not great. As Christopher suggested we can find
all executables in a directory and treat them as tests. This idea is good,
but requires a little bit more trickery.
First, some executables in directory are not tests, e.g. these are per-test
scripts, which should be skipped. Next, not all tests in zdtm can be right
now run in continuous manner (they fail), but we want to run them manually.
To fix that such tests are marked with 'noauto' flag in the description.
So we have the test test descriptor file, which states in which file to
look for tests (executable) and which of them to exclude. Maybe more, can
be added on demand.
And, finally, the per-test description goes in a ${test}.desc file in native
python eval/repr-able format. If the file is absent a default description
is used.
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-10-08 17:25:00 +03:00
|
|
|
|
2015-10-08 17:27:24 +03:00
|
|
|
sti_fmt = "%-40s%-10s%s"
|
|
|
|
|
|
|
|
def show_test_info(t):
|
|
|
|
tdesc = get_test_desc(t)
|
|
|
|
flavs = tdesc.get('flavor', '')
|
|
|
|
return sti_fmt % (t, flavs, tdesc.get('flags', ''))
|
|
|
|
|
|
|
|
|
zdtm.py: Rework tests list generation
Hand-made list of tests is not great. As Christopher suggested we can find
all executables in a directory and treat them as tests. This idea is good,
but requires a little bit more trickery.
First, some executables in directory are not tests, e.g. these are per-test
scripts, which should be skipped. Next, not all tests in zdtm can be right
now run in continuous manner (they fail), but we want to run them manually.
To fix that such tests are marked with 'noauto' flag in the description.
So we have the test test descriptor file, which states in which file to
look for tests (executable) and which of them to exclude. Maybe more, can
be added on demand.
And, finally, the per-test description goes in a ${test}.desc file in native
python eval/repr-able format. If the file is absent a default description
is used.
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-10-08 17:25:00 +03:00
|
|
|
def list_tests(opts):
|
|
|
|
tlist = all_tests(opts)
|
2015-10-08 17:27:24 +03:00
|
|
|
if opts['info']:
|
|
|
|
print sti_fmt % ('Name', 'Flavors', 'Flags')
|
|
|
|
tlist = map(lambda x: show_test_info(x), tlist)
|
zdtm.py: Rework tests list generation
Hand-made list of tests is not great. As Christopher suggested we can find
all executables in a directory and treat them as tests. This idea is good,
but requires a little bit more trickery.
First, some executables in directory are not tests, e.g. these are per-test
scripts, which should be skipped. Next, not all tests in zdtm can be right
now run in continuous manner (they fail), but we want to run them manually.
To fix that such tests are marked with 'noauto' flag in the description.
So we have the test test descriptor file, which states in which file to
look for tests (executable) and which of them to exclude. Maybe more, can
be added on demand.
And, finally, the per-test description goes in a ${test}.desc file in native
python eval/repr-able format. If the file is absent a default description
is used.
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-10-08 17:25:00 +03:00
|
|
|
print '\n'.join(tlist)
|
2015-10-05 21:55:00 +03:00
|
|
|
|
|
|
|
#
|
|
|
|
# main() starts here
|
|
|
|
#
|
|
|
|
|
2015-10-08 17:28:55 +03:00
|
|
|
if os.environ.has_key('CR_CT_TEST_INFO'):
|
2015-10-08 15:18:51 +03:00
|
|
|
# Fork here, since we're new pidns init and are supposed to
|
|
|
|
# collect this namespace's zombies
|
2015-10-20 15:40:10 +04:00
|
|
|
status = 0
|
2015-10-08 15:18:51 +03:00
|
|
|
pid = os.fork()
|
|
|
|
if pid == 0:
|
2015-10-08 17:28:55 +03:00
|
|
|
tinfo = eval(os.environ['CR_CT_TEST_INFO'])
|
2015-10-08 15:18:51 +03:00
|
|
|
do_run_test(tinfo[0], tinfo[1], tinfo[2], tinfo[3])
|
|
|
|
else:
|
|
|
|
while True:
|
|
|
|
wpid, status = os.wait()
|
|
|
|
if wpid == pid:
|
2015-10-20 15:40:10 +04:00
|
|
|
if not os.WIFEXITED(status) or os.WEXITSTATUS(status) != 0:
|
|
|
|
status = 1
|
2015-10-08 15:18:51 +03:00
|
|
|
break;
|
|
|
|
|
2015-10-20 15:40:10 +04:00
|
|
|
sys.exit(status)
|
2015-10-05 21:55:00 +03:00
|
|
|
|
2015-10-08 17:28:55 +03:00
|
|
|
p = argparse.ArgumentParser("CRIU test suite")
|
2015-10-05 21:55:00 +03:00
|
|
|
p.add_argument("--debug", help = "Print what's being executed", action = 'store_true')
|
2015-10-08 17:29:40 +03:00
|
|
|
p.add_argument("--set", help = "Which set of tests to use", default = 'zdtm')
|
2015-10-05 21:55:00 +03:00
|
|
|
|
|
|
|
sp = p.add_subparsers(help = "Use --help for list of actions")
|
|
|
|
|
|
|
|
rp = sp.add_parser("run", help = "Run test(s)")
|
|
|
|
rp.set_defaults(action = run_tests)
|
|
|
|
rp.add_argument("-a", "--all", action = 'store_true')
|
|
|
|
rp.add_argument("-t", "--test", help = "Test name", action = 'append')
|
2015-10-15 17:25:00 +03:00
|
|
|
rp.add_argument("-T", "--tests", help = "Regexp")
|
2015-10-05 21:55:00 +03:00
|
|
|
rp.add_argument("-f", "--flavor", help = "Flavor to run")
|
|
|
|
rp.add_argument("-x", "--exclude", help = "Exclude tests from --all run", action = 'append')
|
|
|
|
|
2015-10-08 15:22:53 +03:00
|
|
|
rp.add_argument("--sibling", help = "Restore tests as siblings", action = 'store_true')
|
2015-10-27 16:29:00 +03:00
|
|
|
rp.add_argument("--pre", help = "Do some pre-dumps before dump (n[:pause])")
|
2015-11-12 18:11:38 +04:00
|
|
|
rp.add_argument("--snaps", help = "Instead of pre-dumps do full dumps", action = 'store_true')
|
2015-10-05 21:55:00 +03:00
|
|
|
rp.add_argument("--nocr", help = "Do not CR anything, just check test works", action = 'store_true')
|
|
|
|
rp.add_argument("--norst", help = "Don't restore tasks, leave them running after dump", action = 'store_true')
|
2015-10-27 16:29:00 +03:00
|
|
|
rp.add_argument("--iters", help = "Do CR cycle several times before check (n[:pause])")
|
2015-10-13 18:06:44 +03:00
|
|
|
rp.add_argument("--fault", help = "Test fault injection")
|
2015-10-05 21:55:00 +03:00
|
|
|
|
|
|
|
rp.add_argument("--page-server", help = "Use page server dump", action = 'store_true')
|
|
|
|
rp.add_argument("-p", "--parallel", help = "Run test in parallel")
|
|
|
|
|
2015-10-15 17:27:00 +03:00
|
|
|
rp.add_argument("-k", "--keep-img", help = "Whether or not to keep images after test",
|
|
|
|
choices = [ 'always', 'never', 'failed' ], default = 'failed')
|
2015-10-28 16:39:00 +03:00
|
|
|
rp.add_argument("--report", help = "Generate summary report in directory")
|
2015-10-15 17:27:00 +03:00
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
lp = sp.add_parser("list", help = "List tests")
|
|
|
|
lp.set_defaults(action = list_tests)
|
2015-10-08 17:27:24 +03:00
|
|
|
lp.add_argument('-i', '--info', help = "Show more info about tests", action = 'store_true')
|
2015-10-05 21:55:00 +03:00
|
|
|
|
|
|
|
opts = vars(p.parse_args())
|
|
|
|
|
|
|
|
if opts['debug']:
|
|
|
|
sys.settrace(traceit)
|
|
|
|
|
2015-10-29 14:15:00 +03:00
|
|
|
criu_cli.available()
|
|
|
|
for tst in test_classes.values():
|
|
|
|
tst.available()
|
|
|
|
|
zdtm.py: Rework tests list generation
Hand-made list of tests is not great. As Christopher suggested we can find
all executables in a directory and treat them as tests. This idea is good,
but requires a little bit more trickery.
First, some executables in directory are not tests, e.g. these are per-test
scripts, which should be skipped. Next, not all tests in zdtm can be right
now run in continuous manner (they fail), but we want to run them manually.
To fix that such tests are marked with 'noauto' flag in the description.
So we have the test test descriptor file, which states in which file to
look for tests (executable) and which of them to exclude. Maybe more, can
be added on demand.
And, finally, the per-test description goes in a ${test}.desc file in native
python eval/repr-able format. If the file is absent a default description
is used.
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-10-08 17:25:00 +03:00
|
|
|
opts['action'](opts)
|