2016-02-11 19:48:17 +03:00
|
|
|
#!/usr/bin/env python2
|
2015-11-03 14:20:34 -07:00
|
|
|
# vim: noet
|
2015-10-05 21:55:00 +03:00
|
|
|
import argparse
|
|
|
|
import os
|
|
|
|
import subprocess
|
|
|
|
import time
|
|
|
|
import tempfile
|
|
|
|
import shutil
|
|
|
|
import re
|
|
|
|
import stat
|
|
|
|
import signal
|
|
|
|
import atexit
|
|
|
|
import sys
|
|
|
|
import linecache
|
2015-10-12 21:54:33 +03:00
|
|
|
import random
|
|
|
|
import string
|
|
|
|
import imp
|
|
|
|
import socket
|
2015-11-12 21:21:00 +03:00
|
|
|
import fcntl
|
2015-12-15 10:35:00 +03:00
|
|
|
import errno
|
2015-10-05 21:55:00 +03:00
|
|
|
|
2015-10-29 14:07:00 +03:00
|
|
|
os.chdir(os.path.dirname(os.path.abspath(__file__)))
|
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
prev_line = None
|
|
|
|
def traceit(f, e, a):
|
|
|
|
if e == "line":
|
|
|
|
lineno = f.f_lineno
|
|
|
|
fil = f.f_globals["__file__"]
|
|
|
|
if fil.endswith("zdtm.py"):
|
|
|
|
global prev_line
|
|
|
|
line = linecache.getline(fil, lineno)
|
|
|
|
if line == prev_line:
|
|
|
|
print " ..."
|
|
|
|
else:
|
|
|
|
prev_line = line
|
|
|
|
print "+%4d: %s" % (lineno, line.rstrip())
|
|
|
|
|
|
|
|
return traceit
|
|
|
|
|
|
|
|
|
|
|
|
# Root dir for ns and uns flavors. All tests
|
|
|
|
# sit in the same dir
|
2015-10-08 17:28:55 +03:00
|
|
|
tests_root = None
|
2015-10-05 21:55:00 +03:00
|
|
|
|
2015-10-08 17:28:55 +03:00
|
|
|
def clean_tests_root():
|
|
|
|
global tests_root
|
|
|
|
if tests_root:
|
|
|
|
os.rmdir(tests_root)
|
2015-10-05 21:55:00 +03:00
|
|
|
|
2015-10-08 17:28:55 +03:00
|
|
|
def make_tests_root():
|
|
|
|
global tests_root
|
|
|
|
if not tests_root:
|
|
|
|
tests_root = tempfile.mkdtemp("", "criu-root-", "/tmp")
|
|
|
|
atexit.register(clean_tests_root)
|
|
|
|
return tests_root
|
2015-10-05 21:55:00 +03:00
|
|
|
|
2015-10-28 16:39:00 +03:00
|
|
|
# Report generation
|
|
|
|
|
|
|
|
report_dir = None
|
|
|
|
|
|
|
|
def init_report(path):
|
|
|
|
global report_dir
|
|
|
|
report_dir = path
|
|
|
|
if not os.access(report_dir, os.F_OK):
|
|
|
|
os.makedirs(report_dir)
|
|
|
|
|
|
|
|
def add_to_report(path, tgt_name):
|
|
|
|
global report_dir
|
|
|
|
if report_dir:
|
|
|
|
tgt_path = os.path.join(report_dir, tgt_name)
|
|
|
|
att = 0
|
|
|
|
while os.access(tgt_path, os.F_OK):
|
|
|
|
tgt_path = os.path.join(report_dir, tgt_name + ".%d" % att)
|
|
|
|
att += 1
|
|
|
|
|
|
|
|
if os.path.isdir(path):
|
|
|
|
shutil.copytree(path, tgt_path)
|
|
|
|
else:
|
|
|
|
shutil.copy2(path, tgt_path)
|
|
|
|
|
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
# Arch we run on
|
|
|
|
arch = os.uname()[4]
|
|
|
|
|
|
|
|
#
|
|
|
|
# Flavors
|
|
|
|
# h -- host, test is run in the same set of namespaces as criu
|
|
|
|
# ns -- namespaces, test is run in itw own set of namespaces
|
|
|
|
# uns -- user namespace, the same as above plus user namespace
|
|
|
|
#
|
|
|
|
|
|
|
|
class host_flavor:
|
|
|
|
def __init__(self, opts):
|
|
|
|
self.name = "host"
|
|
|
|
self.ns = False
|
|
|
|
self.root = None
|
|
|
|
|
2015-12-03 16:08:41 +03:00
|
|
|
def init(self, l_bins, x_bins):
|
2015-10-05 21:55:00 +03:00
|
|
|
pass
|
|
|
|
|
|
|
|
def fini(self):
|
|
|
|
pass
|
|
|
|
|
2015-12-11 19:05:00 +03:00
|
|
|
@staticmethod
|
|
|
|
def clean():
|
|
|
|
pass
|
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
class ns_flavor:
|
2015-12-11 19:05:00 +03:00
|
|
|
__root_dirs = ["/bin", "/sbin", "/etc", "/lib", "/lib64", "/dev", "/dev/pts", "/dev/net", "/tmp", "/usr", "/proc"]
|
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
def __init__(self, opts):
|
|
|
|
self.name = "ns"
|
|
|
|
self.ns = True
|
|
|
|
self.uns = False
|
2015-10-08 17:28:55 +03:00
|
|
|
self.root = make_tests_root()
|
zdtm.py: umount root only if it was mounted
We get the stack trace if a test failed:
Test output: ================================
09:11:34.584: 4: PASS
<<< ================================
Traceback (most recent call last):
File "zdtm.py", line 922, in <module>
do_run_test(tinfo[0], tinfo[1], tinfo[2], tinfo[3])
File "zdtm.py", line 696, in do_run_test
t.kill()
File "zdtm.py", line 302, in kill
self.__flavor.fini()
File "zdtm.py", line 168, in fini
subprocess.check_call(["mount", "--make-private", self.root])
File "/usr/lib64/python2.7/subprocess.py", line 540, in check_call
raise CalledProcessError(retcode, cmd)
subprocess.CalledProcessError: Command '['mount', '--make-private', '/tmp/criu-root-YN1t3X']' returned non-zero exit status 32
umount: /tmp/criu-root-YN1t3X: not mounted
Cc: Tycho Andersen <tycho.andersen@canonical.com>
Signed-off-by: Andrew Vagin <avagin@virtuozzo.com>
Acked-by: Tycho Andersen <tycho.andersen@canonical.com>
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-11-24 12:13:00 +03:00
|
|
|
self.root_mounted = False
|
2015-10-05 21:55:00 +03:00
|
|
|
|
2015-10-26 13:41:32 +03:00
|
|
|
def __copy_one(self, fname):
|
|
|
|
tfname = self.root + fname
|
|
|
|
if not os.access(tfname, os.F_OK):
|
|
|
|
# Copying should be atomic as tests can be
|
|
|
|
# run in parallel
|
|
|
|
try:
|
|
|
|
os.makedirs(self.root + os.path.dirname(fname))
|
|
|
|
except:
|
|
|
|
pass
|
|
|
|
dst = tempfile.mktemp(".tso", "", self.root + os.path.dirname(fname))
|
|
|
|
shutil.copy2(fname, dst)
|
|
|
|
os.rename(dst, tfname)
|
|
|
|
|
|
|
|
def __copy_libs(self, binary):
|
|
|
|
ldd = subprocess.Popen(["ldd", binary], stdout = subprocess.PIPE)
|
2015-10-05 21:55:00 +03:00
|
|
|
xl = re.compile('^(linux-gate.so|linux-vdso(64)?.so|not a dynamic)')
|
|
|
|
|
|
|
|
# This Mayakovsky-style code gets list of libraries a binary
|
|
|
|
# needs minus vdso and gate .so-s
|
|
|
|
libs = map(lambda x: x[1] == '=>' and x[2] or x[0], \
|
|
|
|
map(lambda x: x.split(), \
|
|
|
|
filter(lambda x: not xl.match(x), \
|
|
|
|
map(lambda x: x.strip(), \
|
|
|
|
filter(lambda x: x.startswith('\t'), ldd.stdout.readlines())))))
|
|
|
|
ldd.wait()
|
|
|
|
|
|
|
|
for lib in libs:
|
2016-01-15 09:33:42 +01:00
|
|
|
if not os.access(lib, os.F_OK):
|
|
|
|
raise test_fail_exc("Can't find lib %s required by %s" % (lib, binary))
|
2015-10-26 13:41:32 +03:00
|
|
|
self.__copy_one(lib)
|
|
|
|
|
2015-12-01 13:03:10 +03:00
|
|
|
def __mknod(self, name, rdev = None):
|
|
|
|
name = "/dev/" + name
|
|
|
|
if not rdev:
|
|
|
|
if not os.access(name, os.F_OK):
|
|
|
|
print "Skipping %s at root" % name
|
|
|
|
return
|
|
|
|
else:
|
|
|
|
rdev = os.stat(name).st_rdev
|
|
|
|
|
|
|
|
name = self.root + name
|
|
|
|
os.mknod(name, stat.S_IFCHR, rdev)
|
|
|
|
os.chmod(name, 0666)
|
|
|
|
|
|
|
|
def __construct_root(self):
|
2015-12-11 19:05:00 +03:00
|
|
|
for dir in self.__root_dirs:
|
2015-12-01 13:03:10 +03:00
|
|
|
os.mkdir(self.root + dir)
|
|
|
|
os.chmod(self.root + dir, 0777)
|
|
|
|
|
|
|
|
for ldir in [ "/bin", "/sbin", "/lib", "/lib64" ]:
|
|
|
|
os.symlink(".." + ldir, self.root + "/usr" + ldir)
|
|
|
|
|
|
|
|
self.__mknod("tty", os.makedev(5, 0))
|
|
|
|
self.__mknod("null", os.makedev(1, 3))
|
|
|
|
self.__mknod("net/tun")
|
|
|
|
self.__mknod("rtc")
|
|
|
|
|
2016-01-15 09:33:42 +01:00
|
|
|
def __copy_deps(self, deps):
|
|
|
|
for d in deps.split('|'):
|
|
|
|
if os.access(d, os.F_OK):
|
|
|
|
self.__copy_one(d)
|
|
|
|
self.__copy_libs(d)
|
|
|
|
return
|
|
|
|
raise test_fail_exc("Deps check %s failed" % deps)
|
|
|
|
|
2015-12-03 16:08:41 +03:00
|
|
|
def init(self, l_bins, x_bins):
|
2015-12-15 14:01:00 +03:00
|
|
|
subprocess.check_call(["mount", "--make-slave", "--bind", ".", self.root])
|
zdtm.py: umount root only if it was mounted
We get the stack trace if a test failed:
Test output: ================================
09:11:34.584: 4: PASS
<<< ================================
Traceback (most recent call last):
File "zdtm.py", line 922, in <module>
do_run_test(tinfo[0], tinfo[1], tinfo[2], tinfo[3])
File "zdtm.py", line 696, in do_run_test
t.kill()
File "zdtm.py", line 302, in kill
self.__flavor.fini()
File "zdtm.py", line 168, in fini
subprocess.check_call(["mount", "--make-private", self.root])
File "/usr/lib64/python2.7/subprocess.py", line 540, in check_call
raise CalledProcessError(retcode, cmd)
subprocess.CalledProcessError: Command '['mount', '--make-private', '/tmp/criu-root-YN1t3X']' returned non-zero exit status 32
umount: /tmp/criu-root-YN1t3X: not mounted
Cc: Tycho Andersen <tycho.andersen@canonical.com>
Signed-off-by: Andrew Vagin <avagin@virtuozzo.com>
Acked-by: Tycho Andersen <tycho.andersen@canonical.com>
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-11-24 12:13:00 +03:00
|
|
|
self.root_mounted = True
|
2015-10-26 13:41:32 +03:00
|
|
|
|
|
|
|
if not os.access(self.root + "/.constructed", os.F_OK):
|
2015-11-12 21:21:00 +03:00
|
|
|
with open(os.path.abspath(__file__)) as o:
|
|
|
|
fcntl.flock(o, fcntl.LOCK_EX)
|
|
|
|
if not os.access(self.root + "/.constructed", os.F_OK):
|
2015-12-03 16:08:41 +03:00
|
|
|
print "Construct root for %s" % l_bins[0]
|
2015-12-01 13:03:10 +03:00
|
|
|
self.__construct_root()
|
2015-11-12 21:21:00 +03:00
|
|
|
os.mknod(self.root + "/.constructed", stat.S_IFREG | 0600)
|
2015-10-26 13:41:32 +03:00
|
|
|
|
2015-12-03 16:08:41 +03:00
|
|
|
for b in l_bins:
|
|
|
|
self.__copy_libs(b)
|
|
|
|
for b in x_bins:
|
2016-01-15 09:33:42 +01:00
|
|
|
self.__copy_deps(b)
|
2015-10-05 21:55:00 +03:00
|
|
|
|
|
|
|
def fini(self):
|
zdtm.py: umount root only if it was mounted
We get the stack trace if a test failed:
Test output: ================================
09:11:34.584: 4: PASS
<<< ================================
Traceback (most recent call last):
File "zdtm.py", line 922, in <module>
do_run_test(tinfo[0], tinfo[1], tinfo[2], tinfo[3])
File "zdtm.py", line 696, in do_run_test
t.kill()
File "zdtm.py", line 302, in kill
self.__flavor.fini()
File "zdtm.py", line 168, in fini
subprocess.check_call(["mount", "--make-private", self.root])
File "/usr/lib64/python2.7/subprocess.py", line 540, in check_call
raise CalledProcessError(retcode, cmd)
subprocess.CalledProcessError: Command '['mount', '--make-private', '/tmp/criu-root-YN1t3X']' returned non-zero exit status 32
umount: /tmp/criu-root-YN1t3X: not mounted
Cc: Tycho Andersen <tycho.andersen@canonical.com>
Signed-off-by: Andrew Vagin <avagin@virtuozzo.com>
Acked-by: Tycho Andersen <tycho.andersen@canonical.com>
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-11-24 12:13:00 +03:00
|
|
|
if self.root_mounted:
|
|
|
|
subprocess.check_call(["mount", "--make-private", self.root])
|
|
|
|
subprocess.check_call(["umount", "-l", self.root])
|
|
|
|
self.root_mounted = False
|
2015-10-05 21:55:00 +03:00
|
|
|
|
2015-12-11 19:05:00 +03:00
|
|
|
@staticmethod
|
|
|
|
def clean():
|
|
|
|
for d in ns_flavor.__root_dirs:
|
|
|
|
p = './' + d
|
|
|
|
print 'Remove %s' % p
|
|
|
|
if os.access(p, os.F_OK):
|
|
|
|
shutil.rmtree('./' + d)
|
|
|
|
|
|
|
|
if os.access('./.constructed', os.F_OK):
|
|
|
|
os.unlink('./.constructed')
|
|
|
|
|
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
class userns_flavor(ns_flavor):
|
|
|
|
def __init__(self, opts):
|
|
|
|
ns_flavor.__init__(self, opts)
|
|
|
|
self.name = "userns"
|
|
|
|
self.uns = True
|
|
|
|
|
2015-12-03 16:08:41 +03:00
|
|
|
def init(self, l_bins, x_bins):
|
2015-10-28 12:36:40 +04:00
|
|
|
# To be able to create roots_yard in CRIU
|
|
|
|
os.chmod(".", os.stat(".").st_mode | 0077)
|
2015-12-03 16:08:41 +03:00
|
|
|
ns_flavor.init(self, l_bins, x_bins)
|
2015-10-28 12:36:40 +04:00
|
|
|
|
2015-12-11 19:05:00 +03:00
|
|
|
@staticmethod
|
|
|
|
def clean():
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
flavors = { 'h': host_flavor, 'ns': ns_flavor, 'uns': userns_flavor }
|
|
|
|
|
|
|
|
#
|
|
|
|
# Helpers
|
|
|
|
#
|
|
|
|
|
|
|
|
def tail(path):
|
|
|
|
p = subprocess.Popen(['tail', '-n1', path],
|
|
|
|
stdout = subprocess.PIPE)
|
|
|
|
return p.stdout.readline()
|
|
|
|
|
|
|
|
def rpidfile(path):
|
|
|
|
return open(path).readline().strip()
|
|
|
|
|
2015-10-28 10:42:27 +03:00
|
|
|
def wait_pid_die(pid, who, tmo = 30):
|
2015-10-05 21:55:00 +03:00
|
|
|
stime = 0.1
|
|
|
|
while stime < tmo:
|
|
|
|
try:
|
|
|
|
os.kill(int(pid), 0)
|
|
|
|
except: # Died
|
|
|
|
break
|
|
|
|
|
|
|
|
print "Wait for %s to die for %f" % (who, stime)
|
|
|
|
time.sleep(stime)
|
|
|
|
stime *= 2
|
|
|
|
else:
|
|
|
|
raise test_fail_exc("%s die" % who)
|
|
|
|
|
|
|
|
def test_flag(tdesc, flag):
|
|
|
|
return flag in tdesc.get('flags', '').split()
|
|
|
|
|
|
|
|
#
|
|
|
|
# Exception thrown when something inside the test goes wrong,
|
|
|
|
# e.g. test doesn't start, criu returns with non zero code or
|
|
|
|
# test checks fail
|
|
|
|
#
|
|
|
|
|
|
|
|
class test_fail_exc:
|
|
|
|
def __init__(self, step):
|
|
|
|
self.step = step
|
|
|
|
|
2015-10-20 17:10:22 +04:00
|
|
|
class test_fail_expected_exc:
|
2015-10-13 18:06:44 +03:00
|
|
|
def __init__(self, cr_action):
|
|
|
|
self.cr_action = cr_action
|
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
#
|
|
|
|
# A test from zdtm/ directory.
|
|
|
|
#
|
|
|
|
|
|
|
|
class zdtm_test:
|
2015-12-21 14:03:00 +03:00
|
|
|
def __init__(self, name, desc, flavor, freezer):
|
2015-10-05 21:55:00 +03:00
|
|
|
self.__name = name
|
|
|
|
self.__desc = desc
|
2015-12-21 14:03:00 +03:00
|
|
|
self.__freezer = None
|
2015-10-05 21:55:00 +03:00
|
|
|
self.__make_action('cleanout')
|
|
|
|
self.__pid = 0
|
|
|
|
self.__flavor = flavor
|
2015-12-21 14:03:00 +03:00
|
|
|
self.__freezer = freezer
|
2015-12-03 16:08:41 +03:00
|
|
|
self._bins = [ name ]
|
2015-12-08 15:19:25 +03:00
|
|
|
self._env = {}
|
2015-12-09 15:57:08 +03:00
|
|
|
self._deps = desc.get('deps', [])
|
2015-10-08 15:22:53 +03:00
|
|
|
self.auto_reap = True
|
2015-10-05 21:55:00 +03:00
|
|
|
|
|
|
|
def __make_action(self, act, env = None, root = None):
|
2015-10-30 18:50:00 +03:00
|
|
|
sys.stdout.flush() # Not to let make's messages appear before ours
|
zdtm.py: Rework tests list generation
Hand-made list of tests is not great. As Christopher suggested we can find
all executables in a directory and treat them as tests. This idea is good,
but requires a little bit more trickery.
First, some executables in directory are not tests, e.g. these are per-test
scripts, which should be skipped. Next, not all tests in zdtm can be right
now run in continuous manner (they fail), but we want to run them manually.
To fix that such tests are marked with 'noauto' flag in the description.
So we have the test test descriptor file, which states in which file to
look for tests (executable) and which of them to exclude. Maybe more, can
be added on demand.
And, finally, the per-test description goes in a ${test}.desc file in native
python eval/repr-able format. If the file is absent a default description
is used.
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-10-08 17:25:00 +03:00
|
|
|
tpath = self.__name + '.' + act
|
2015-10-05 21:55:00 +03:00
|
|
|
s_args = ['make', '--no-print-directory', \
|
|
|
|
'-C', os.path.dirname(tpath), \
|
|
|
|
os.path.basename(tpath)]
|
|
|
|
|
|
|
|
if env:
|
|
|
|
env = dict(os.environ, **env)
|
|
|
|
|
2015-12-21 14:03:00 +03:00
|
|
|
s = subprocess.Popen(s_args, env = env, cwd = root, close_fds = True,
|
|
|
|
preexec_fn = self.__freezer and self.__freezer.attach or None)
|
2015-10-05 21:55:00 +03:00
|
|
|
s.wait()
|
|
|
|
|
2015-12-21 14:03:00 +03:00
|
|
|
if self.__freezer:
|
|
|
|
self.__freezer.freeze()
|
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
def __pidfile(self):
|
|
|
|
if self.__flavor.ns:
|
zdtm.py: Rework tests list generation
Hand-made list of tests is not great. As Christopher suggested we can find
all executables in a directory and treat them as tests. This idea is good,
but requires a little bit more trickery.
First, some executables in directory are not tests, e.g. these are per-test
scripts, which should be skipped. Next, not all tests in zdtm can be right
now run in continuous manner (they fail), but we want to run them manually.
To fix that such tests are marked with 'noauto' flag in the description.
So we have the test test descriptor file, which states in which file to
look for tests (executable) and which of them to exclude. Maybe more, can
be added on demand.
And, finally, the per-test description goes in a ${test}.desc file in native
python eval/repr-able format. If the file is absent a default description
is used.
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-10-08 17:25:00 +03:00
|
|
|
return self.__name + '.init.pid'
|
2015-10-05 21:55:00 +03:00
|
|
|
else:
|
zdtm.py: Rework tests list generation
Hand-made list of tests is not great. As Christopher suggested we can find
all executables in a directory and treat them as tests. This idea is good,
but requires a little bit more trickery.
First, some executables in directory are not tests, e.g. these are per-test
scripts, which should be skipped. Next, not all tests in zdtm can be right
now run in continuous manner (they fail), but we want to run them manually.
To fix that such tests are marked with 'noauto' flag in the description.
So we have the test test descriptor file, which states in which file to
look for tests (executable) and which of them to exclude. Maybe more, can
be added on demand.
And, finally, the per-test description goes in a ${test}.desc file in native
python eval/repr-able format. If the file is absent a default description
is used.
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-10-08 17:25:00 +03:00
|
|
|
return self.__name + '.pid'
|
2015-10-05 21:55:00 +03:00
|
|
|
|
|
|
|
def __wait_task_die(self):
|
|
|
|
wait_pid_die(int(self.__pid), self.__name)
|
|
|
|
|
2015-12-11 17:39:05 +03:00
|
|
|
def __add_wperms(self):
|
|
|
|
# Add write perms for .out and .pid files
|
|
|
|
for b in self._bins:
|
|
|
|
p = os.path.dirname(b)
|
|
|
|
os.chmod(p, os.stat(p).st_mode | 0222)
|
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
def start(self):
|
2015-12-09 15:57:08 +03:00
|
|
|
self.__flavor.init(self._bins, self._deps)
|
2015-10-05 21:55:00 +03:00
|
|
|
|
|
|
|
print "Start test"
|
|
|
|
|
2015-12-03 16:08:41 +03:00
|
|
|
env = self._env
|
2015-12-21 14:03:00 +03:00
|
|
|
if not self.__freezer.kernel:
|
|
|
|
env['ZDTM_THREAD_BOMB'] = "5"
|
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
if not test_flag(self.__desc, 'suid'):
|
2015-12-15 22:26:48 +03:00
|
|
|
# Numbers should match those in criu_cli
|
2015-10-05 21:55:00 +03:00
|
|
|
env['ZDTM_UID'] = "18943"
|
|
|
|
env['ZDTM_GID'] = "58467"
|
|
|
|
env['ZDTM_GROUPS'] = "27495 48244"
|
2015-12-11 17:39:05 +03:00
|
|
|
self.__add_wperms()
|
2015-10-05 21:55:00 +03:00
|
|
|
else:
|
|
|
|
print "Test is SUID"
|
|
|
|
|
|
|
|
if self.__flavor.ns:
|
|
|
|
env['ZDTM_NEWNS'] = "1"
|
zdtm.py: Rework tests list generation
Hand-made list of tests is not great. As Christopher suggested we can find
all executables in a directory and treat them as tests. This idea is good,
but requires a little bit more trickery.
First, some executables in directory are not tests, e.g. these are per-test
scripts, which should be skipped. Next, not all tests in zdtm can be right
now run in continuous manner (they fail), but we want to run them manually.
To fix that such tests are marked with 'noauto' flag in the description.
So we have the test test descriptor file, which states in which file to
look for tests (executable) and which of them to exclude. Maybe more, can
be added on demand.
And, finally, the per-test description goes in a ${test}.desc file in native
python eval/repr-able format. If the file is absent a default description
is used.
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-10-08 17:25:00 +03:00
|
|
|
env['ZDTM_PIDFILE'] = os.path.realpath(self.__name + '.init.pid')
|
2015-10-05 21:55:00 +03:00
|
|
|
env['ZDTM_ROOT'] = self.__flavor.root
|
|
|
|
|
|
|
|
if self.__flavor.uns:
|
|
|
|
env['ZDTM_USERNS'] = "1"
|
2015-12-11 17:39:05 +03:00
|
|
|
self.__add_wperms()
|
2015-10-05 21:55:00 +03:00
|
|
|
|
|
|
|
self.__make_action('pid', env, self.__flavor.root)
|
|
|
|
|
|
|
|
try:
|
|
|
|
os.kill(int(self.getpid()), 0)
|
|
|
|
except:
|
|
|
|
raise test_fail_exc("start")
|
|
|
|
|
|
|
|
def kill(self, sig = signal.SIGKILL):
|
2015-12-26 01:14:00 +03:00
|
|
|
self.__freezer.thaw()
|
2015-10-05 21:55:00 +03:00
|
|
|
if self.__pid:
|
|
|
|
os.kill(int(self.__pid), sig)
|
|
|
|
self.gone(sig == signal.SIGKILL)
|
|
|
|
|
|
|
|
self.__flavor.fini()
|
|
|
|
|
|
|
|
def stop(self):
|
2015-12-21 14:03:00 +03:00
|
|
|
self.__freezer.thaw()
|
2015-10-20 15:52:59 +04:00
|
|
|
self.getpid() # Read the pid from pidfile back
|
2015-10-05 21:55:00 +03:00
|
|
|
self.kill(signal.SIGTERM)
|
|
|
|
|
zdtm.py: Rework tests list generation
Hand-made list of tests is not great. As Christopher suggested we can find
all executables in a directory and treat them as tests. This idea is good,
but requires a little bit more trickery.
First, some executables in directory are not tests, e.g. these are per-test
scripts, which should be skipped. Next, not all tests in zdtm can be right
now run in continuous manner (they fail), but we want to run them manually.
To fix that such tests are marked with 'noauto' flag in the description.
So we have the test test descriptor file, which states in which file to
look for tests (executable) and which of them to exclude. Maybe more, can
be added on demand.
And, finally, the per-test description goes in a ${test}.desc file in native
python eval/repr-able format. If the file is absent a default description
is used.
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-10-08 17:25:00 +03:00
|
|
|
res = tail(self.__name + '.out')
|
2015-10-05 21:55:00 +03:00
|
|
|
if not 'PASS' in res.split():
|
|
|
|
raise test_fail_exc("result check")
|
|
|
|
|
|
|
|
def getpid(self):
|
|
|
|
if self.__pid == 0:
|
|
|
|
self.__pid = rpidfile(self.__pidfile())
|
|
|
|
|
|
|
|
return self.__pid
|
|
|
|
|
|
|
|
def getname(self):
|
|
|
|
return self.__name
|
|
|
|
|
2015-10-12 21:53:13 +03:00
|
|
|
def __getcropts(self):
|
2015-10-05 21:55:00 +03:00
|
|
|
opts = self.__desc.get('opts', '').split() + ["--pidfile", os.path.realpath(self.__pidfile())]
|
|
|
|
if self.__flavor.ns:
|
|
|
|
opts += ["--root", self.__flavor.root]
|
2015-10-08 23:09:32 +03:00
|
|
|
if test_flag(self.__desc, 'crlib'):
|
|
|
|
opts += ["-L", os.path.dirname(os.path.realpath(self.__name)) + '/lib']
|
2015-10-05 21:55:00 +03:00
|
|
|
return opts
|
|
|
|
|
2015-10-12 21:53:13 +03:00
|
|
|
def getdopts(self):
|
2015-12-21 14:03:00 +03:00
|
|
|
return self.__getcropts() + self.__freezer.getdopts()
|
2015-10-12 21:53:13 +03:00
|
|
|
|
|
|
|
def getropts(self):
|
2015-12-21 14:03:00 +03:00
|
|
|
return self.__getcropts() + self.__freezer.getropts()
|
2015-10-12 21:53:13 +03:00
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
def gone(self, force = True):
|
2015-10-08 15:22:53 +03:00
|
|
|
if not self.auto_reap:
|
|
|
|
pid, status = os.waitpid(int(self.__pid), 0)
|
|
|
|
if pid != int(self.__pid):
|
|
|
|
raise test_fail_exc("kill pid mess")
|
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
self.__wait_task_die()
|
|
|
|
self.__pid = 0
|
|
|
|
if force or self.__flavor.ns:
|
|
|
|
os.unlink(self.__pidfile())
|
|
|
|
|
|
|
|
def print_output(self):
|
2015-10-08 23:07:56 +03:00
|
|
|
if os.access(self.__name + '.out', os.R_OK):
|
|
|
|
print "Test output: " + "=" * 32
|
|
|
|
print open(self.__name + '.out').read()
|
|
|
|
print " <<< " + "=" * 32
|
2015-10-05 21:55:00 +03:00
|
|
|
|
2015-10-20 15:52:59 +04:00
|
|
|
def static(self):
|
2016-02-19 16:36:37 +03:00
|
|
|
return self.__name.split('/')[1] == 'static'
|
2015-10-20 15:52:59 +04:00
|
|
|
|
2015-12-15 00:36:00 +03:00
|
|
|
def ns(self):
|
|
|
|
return self.__flavor.ns
|
|
|
|
|
2015-10-20 17:10:22 +04:00
|
|
|
def blocking(self):
|
|
|
|
return test_flag(self.__desc, 'crfail')
|
|
|
|
|
2015-10-29 14:15:00 +03:00
|
|
|
@staticmethod
|
|
|
|
def available():
|
|
|
|
if not os.access("zdtm_ct", os.X_OK):
|
|
|
|
subprocess.check_call(["make", "zdtm_ct"])
|
|
|
|
if not os.access("zdtm/lib/libzdtmtst.a", os.F_OK):
|
|
|
|
subprocess.check_call(["make", "-C", "zdtm/"])
|
2015-10-30 15:55:41 +04:00
|
|
|
subprocess.check_call(["flock", "zdtm_mount_cgroups", "./zdtm_mount_cgroups"])
|
2015-10-20 15:52:59 +04:00
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
|
2015-10-12 21:54:33 +03:00
|
|
|
class inhfd_test:
|
2015-12-21 14:03:00 +03:00
|
|
|
def __init__(self, name, desc, flavor, freezer):
|
2015-10-12 21:54:33 +03:00
|
|
|
self.__name = os.path.basename(name)
|
|
|
|
print "Load %s" % name
|
|
|
|
self.__fdtyp = imp.load_source(self.__name, name)
|
|
|
|
self.__my_file = None
|
|
|
|
self.__peer_pid = 0
|
|
|
|
self.__peer_file = None
|
|
|
|
self.__peer_file_name = None
|
|
|
|
self.__dump_opts = None
|
|
|
|
|
|
|
|
def start(self):
|
|
|
|
self.__message = "".join([random.choice(string.ascii_letters) for _ in range(16)])
|
|
|
|
(self.__my_file, peer_file) = self.__fdtyp.create_fds()
|
|
|
|
|
|
|
|
# Check FDs returned for inter-connection
|
|
|
|
self.__my_file.write(self.__message)
|
|
|
|
self.__my_file.flush()
|
|
|
|
if peer_file.read(16) != self.__message:
|
|
|
|
raise test_fail_exc("FDs screwup")
|
|
|
|
|
|
|
|
start_pipe = os.pipe()
|
|
|
|
self.__peer_pid = os.fork()
|
|
|
|
if self.__peer_pid == 0:
|
|
|
|
os.setsid()
|
2015-12-25 19:23:00 +03:00
|
|
|
|
|
|
|
getattr(self.__fdtyp, "child_prep", lambda fd : None)(peer_file)
|
|
|
|
|
2015-10-12 21:54:33 +03:00
|
|
|
os.close(0)
|
|
|
|
os.close(1)
|
|
|
|
os.close(2)
|
2015-12-15 00:42:00 +03:00
|
|
|
self.__my_file.close()
|
2015-10-12 21:54:33 +03:00
|
|
|
os.close(start_pipe[0])
|
|
|
|
os.close(start_pipe[1])
|
|
|
|
try:
|
|
|
|
data = peer_file.read(16)
|
|
|
|
except:
|
|
|
|
sys.exit(1)
|
|
|
|
|
|
|
|
sys.exit(data == self.__message and 42 or 2)
|
|
|
|
|
|
|
|
os.close(start_pipe[1])
|
|
|
|
os.read(start_pipe[0], 12)
|
|
|
|
os.close(start_pipe[0])
|
|
|
|
|
|
|
|
self.__peer_file_name = self.__fdtyp.filename(peer_file)
|
|
|
|
self.__dump_opts = self.__fdtyp.dump_opts(peer_file)
|
|
|
|
|
|
|
|
def stop(self):
|
|
|
|
self.__my_file.write(self.__message)
|
|
|
|
self.__my_file.flush()
|
|
|
|
pid, status = os.waitpid(self.__peer_pid, 0)
|
|
|
|
if not os.WIFEXITED(status) or os.WEXITSTATUS(status) != 42:
|
|
|
|
raise test_fail_exc("test failed with %d" % status)
|
|
|
|
|
|
|
|
def kill(self):
|
|
|
|
if self.__peer_pid:
|
|
|
|
os.kill(self.__peer_pid, signal.SIGKILL)
|
|
|
|
|
|
|
|
def getname(self):
|
|
|
|
return self.__name
|
|
|
|
|
|
|
|
def getpid(self):
|
|
|
|
return "%s" % self.__peer_pid
|
|
|
|
|
|
|
|
def gone(self, force = True):
|
|
|
|
os.waitpid(self.__peer_pid, 0)
|
|
|
|
wait_pid_die(self.__peer_pid, self.__name)
|
|
|
|
self.__my_file = None
|
|
|
|
self.__peer_file = None
|
|
|
|
|
|
|
|
def getdopts(self):
|
|
|
|
return self.__dump_opts
|
|
|
|
|
|
|
|
def getropts(self):
|
|
|
|
(self.__my_file, self.__peer_file) = self.__fdtyp.create_fds()
|
|
|
|
return ["--restore-sibling", "--inherit-fd", "fd[%d]:%s" % (self.__peer_file.fileno(), self.__peer_file_name)]
|
|
|
|
|
|
|
|
def print_output(self):
|
|
|
|
pass
|
|
|
|
|
2015-10-20 15:52:59 +04:00
|
|
|
def static(self):
|
|
|
|
return True
|
|
|
|
|
2015-10-20 17:10:22 +04:00
|
|
|
def blocking(self):
|
|
|
|
return False
|
|
|
|
|
2015-10-29 14:15:00 +03:00
|
|
|
@staticmethod
|
|
|
|
def available():
|
|
|
|
pass
|
|
|
|
|
2015-10-12 21:54:33 +03:00
|
|
|
|
2015-12-03 16:09:04 +03:00
|
|
|
class groups_test(zdtm_test):
|
2015-12-21 14:03:00 +03:00
|
|
|
def __init__(self, name, desc, flavor, freezer):
|
|
|
|
zdtm_test.__init__(self, 'zdtm/lib/groups', desc, flavor, freezer)
|
2015-12-03 16:09:04 +03:00
|
|
|
if flavor.ns:
|
|
|
|
self.__real_name = name
|
|
|
|
self.__subs = map(lambda x: x.strip(), open(name).readlines())
|
|
|
|
print "Subs:\n%s" % '\n'.join(self.__subs)
|
|
|
|
else:
|
|
|
|
self.__real_name = ''
|
|
|
|
self.__subs = []
|
|
|
|
|
|
|
|
self._bins += self.__subs
|
2015-12-09 15:57:08 +03:00
|
|
|
self._deps += get_test_desc('zdtm/lib/groups')['deps']
|
2015-12-03 16:09:04 +03:00
|
|
|
self._env = { 'ZDTM_TESTS': self.__real_name }
|
|
|
|
|
|
|
|
def __get_start_cmd(self, name):
|
|
|
|
tdir = os.path.dirname(name)
|
|
|
|
tname = os.path.basename(name)
|
|
|
|
|
|
|
|
s_args = ['make', '--no-print-directory', '-C', tdir]
|
|
|
|
subprocess.check_call(s_args + [ tname + '.cleanout' ])
|
|
|
|
s = subprocess.Popen(s_args + [ '--dry-run', tname + '.pid' ], stdout = subprocess.PIPE)
|
|
|
|
cmd = s.stdout.readlines().pop().strip()
|
|
|
|
s.wait()
|
|
|
|
|
|
|
|
return 'cd /' + tdir + ' && ' + cmd
|
|
|
|
|
|
|
|
def start(self):
|
|
|
|
if (self.__subs):
|
|
|
|
with open(self.__real_name + '.start', 'w') as f:
|
|
|
|
for test in self.__subs:
|
|
|
|
cmd = self.__get_start_cmd(test)
|
|
|
|
f.write(cmd + '\n')
|
|
|
|
|
|
|
|
with open(self.__real_name + '.stop', 'w') as f:
|
|
|
|
for test in self.__subs:
|
|
|
|
f.write('kill -TERM `cat /%s.pid`\n' % test)
|
|
|
|
|
|
|
|
zdtm_test.start(self)
|
|
|
|
|
|
|
|
def stop(self):
|
|
|
|
zdtm_test.stop(self)
|
|
|
|
|
|
|
|
for test in self.__subs:
|
|
|
|
res = tail(test + '.out')
|
|
|
|
if not 'PASS' in res.split():
|
|
|
|
raise test_fail_exc("sub %s result check" % test)
|
|
|
|
|
|
|
|
|
|
|
|
test_classes = { 'zdtm': zdtm_test, 'inhfd': inhfd_test, 'groups': groups_test }
|
2015-10-08 17:28:55 +03:00
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
#
|
|
|
|
# CRIU when launched using CLI
|
|
|
|
#
|
|
|
|
|
2016-02-15 16:26:08 +03:00
|
|
|
criu_bin = "../criu/criu"
|
2015-10-05 21:55:00 +03:00
|
|
|
class criu_cli:
|
2015-10-15 17:27:00 +03:00
|
|
|
def __init__(self, opts):
|
|
|
|
self.__test = None
|
|
|
|
self.__dump_path = None
|
2015-10-05 21:55:00 +03:00
|
|
|
self.__iter = 0
|
2015-10-27 16:29:00 +03:00
|
|
|
self.__prev_dump_iter = None
|
2015-10-05 21:55:00 +03:00
|
|
|
self.__page_server = (opts['page_server'] and True or False)
|
2015-10-08 15:22:53 +03:00
|
|
|
self.__restore_sibling = (opts['sibling'] and True or False)
|
2015-10-13 18:06:44 +03:00
|
|
|
self.__fault = (opts['fault'])
|
2015-11-09 21:27:00 +03:00
|
|
|
self.__sat = (opts['sat'] and True or False)
|
2015-11-26 21:55:00 +03:00
|
|
|
self.__dedup = (opts['dedup'] and True or False)
|
2015-12-15 22:26:48 +03:00
|
|
|
self.__user = (opts['user'] and True or False)
|
2015-10-05 21:55:00 +03:00
|
|
|
|
2015-10-28 16:39:00 +03:00
|
|
|
def logs(self):
|
|
|
|
return self.__dump_path
|
|
|
|
|
2015-10-15 17:27:00 +03:00
|
|
|
def set_test(self, test):
|
|
|
|
self.__test = test
|
|
|
|
self.__dump_path = "dump/" + test.getname() + "/" + test.getpid()
|
2015-11-03 14:20:36 -07:00
|
|
|
if os.path.exists(self.__dump_path):
|
|
|
|
for i in xrange(100):
|
|
|
|
newpath = self.__dump_path + "." + str(i)
|
|
|
|
if not os.path.exists(newpath):
|
|
|
|
os.rename(self.__dump_path, newpath)
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
raise test_fail_exc("couldn't find dump dir %s" % self.__dump_path)
|
|
|
|
|
2015-10-15 17:27:00 +03:00
|
|
|
os.makedirs(self.__dump_path)
|
|
|
|
|
|
|
|
def cleanup(self):
|
|
|
|
if self.__dump_path:
|
2015-11-03 14:20:35 -07:00
|
|
|
print "Removing %s" % self.__dump_path
|
2015-10-15 17:27:00 +03:00
|
|
|
shutil.rmtree(self.__dump_path)
|
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
def __ddir(self):
|
|
|
|
return os.path.join(self.__dump_path, "%d" % self.__iter)
|
|
|
|
|
|
|
|
@staticmethod
|
2015-12-15 22:26:48 +03:00
|
|
|
def __criu(action, args, fault = None, strace = [], preexec = None):
|
2015-10-13 18:06:44 +03:00
|
|
|
env = None
|
|
|
|
if fault:
|
|
|
|
print "Forcing %s fault" % fault
|
|
|
|
env = dict(os.environ, CRIU_FAULT = fault)
|
2015-12-15 22:26:48 +03:00
|
|
|
cr = subprocess.Popen(strace + [criu_bin, action] + args, env = env, preexec_fn = preexec)
|
2015-10-05 21:55:00 +03:00
|
|
|
return cr.wait()
|
|
|
|
|
2015-12-15 22:26:48 +03:00
|
|
|
def set_user_id(self):
|
|
|
|
# Numbers should match those in zdtm_test
|
|
|
|
os.setresgid(58467, 58467, 58467)
|
|
|
|
os.setresuid(18943, 18943, 18943)
|
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
def __criu_act(self, action, opts, log = None):
|
|
|
|
if not log:
|
|
|
|
log = action + ".log"
|
|
|
|
|
|
|
|
s_args = ["-o", log, "-D", self.__ddir(), "-v4"] + opts
|
|
|
|
|
2015-10-30 18:51:00 +03:00
|
|
|
with open(os.path.join(self.__ddir(), action + '.cropt'), 'w') as f:
|
|
|
|
f.write(' '.join(s_args) + '\n')
|
|
|
|
print "Run criu " + action
|
|
|
|
|
2015-11-09 21:27:00 +03:00
|
|
|
strace = []
|
|
|
|
if self.__sat:
|
2015-12-09 19:04:58 +03:00
|
|
|
fname = os.path.join(self.__ddir(), action + '.strace')
|
|
|
|
print_fname(fname, 'strace')
|
|
|
|
strace = ["strace", "-o", fname, '-T']
|
2015-11-09 21:27:00 +03:00
|
|
|
if action == 'restore':
|
|
|
|
strace += [ '-f' ]
|
|
|
|
s_args += [ '--action-script', os.getcwd() + '/../scripts/fake-restore.sh' ]
|
|
|
|
|
2015-12-15 22:26:48 +03:00
|
|
|
preexec = self.__user and self.set_user_id or None
|
|
|
|
|
|
|
|
ret = self.__criu(action, s_args, self.__fault, strace, preexec)
|
2015-11-10 16:01:07 +03:00
|
|
|
grep_errors(os.path.join(self.__ddir(), log))
|
2015-10-05 21:55:00 +03:00
|
|
|
if ret != 0:
|
2015-11-09 21:27:00 +03:00
|
|
|
if self.__fault or self.__test.blocking() or (self.__sat and action == 'restore'):
|
2015-10-20 17:10:22 +04:00
|
|
|
raise test_fail_expected_exc(action)
|
2015-10-13 18:06:44 +03:00
|
|
|
else:
|
|
|
|
raise test_fail_exc("CRIU %s" % action)
|
2015-10-05 21:55:00 +03:00
|
|
|
|
|
|
|
def dump(self, action, opts = []):
|
|
|
|
self.__iter += 1
|
|
|
|
os.mkdir(self.__ddir())
|
2015-12-15 22:26:48 +03:00
|
|
|
os.chmod(self.__ddir(), 0777)
|
2015-10-05 21:55:00 +03:00
|
|
|
|
|
|
|
a_opts = ["-t", self.__test.getpid()]
|
2015-10-27 16:29:00 +03:00
|
|
|
if self.__prev_dump_iter:
|
|
|
|
a_opts += ["--prev-images-dir", "../%d" % self.__prev_dump_iter, "--track-mem"]
|
|
|
|
self.__prev_dump_iter = self.__iter
|
2015-10-05 21:55:00 +03:00
|
|
|
|
|
|
|
if self.__page_server:
|
|
|
|
print "Adding page server"
|
2015-11-26 21:55:00 +03:00
|
|
|
|
|
|
|
ps_opts = [ "--port", "12345", "--daemon", "--pidfile", "ps.pid" ]
|
|
|
|
if self.__dedup:
|
|
|
|
ps_opts += [ "--auto-dedup" ]
|
|
|
|
|
|
|
|
self.__criu_act("page-server", opts = ps_opts)
|
2015-10-05 21:55:00 +03:00
|
|
|
a_opts += ["--page-server", "--address", "127.0.0.1", "--port", "12345"]
|
|
|
|
|
2015-10-12 21:53:13 +03:00
|
|
|
a_opts += self.__test.getdopts()
|
|
|
|
|
2015-11-26 21:55:00 +03:00
|
|
|
if self.__dedup:
|
|
|
|
a_opts += [ "--auto-dedup" ]
|
|
|
|
|
2015-10-12 21:53:13 +03:00
|
|
|
self.__criu_act(action, opts = a_opts + opts)
|
2015-10-05 21:55:00 +03:00
|
|
|
|
|
|
|
if self.__page_server:
|
|
|
|
wait_pid_die(int(rpidfile(self.__ddir() + "/ps.pid")), "page server")
|
|
|
|
|
|
|
|
def restore(self):
|
2015-10-08 15:22:53 +03:00
|
|
|
r_opts = []
|
|
|
|
if self.__restore_sibling:
|
|
|
|
r_opts = ["--restore-sibling"]
|
|
|
|
self.__test.auto_reap = False
|
2015-10-12 21:53:13 +03:00
|
|
|
r_opts += self.__test.getropts()
|
|
|
|
|
2015-10-27 16:29:00 +03:00
|
|
|
self.__prev_dump_iter = None
|
2015-10-12 21:53:13 +03:00
|
|
|
self.__criu_act("restore", opts = r_opts + ["--restore-detached"])
|
2015-10-05 21:55:00 +03:00
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def check(feature):
|
|
|
|
return criu_cli.__criu("check", ["-v0", "--feature", feature]) == 0
|
|
|
|
|
2015-10-29 14:15:00 +03:00
|
|
|
@staticmethod
|
|
|
|
def available():
|
|
|
|
if not os.access(criu_bin, os.X_OK):
|
|
|
|
print "CRIU binary not built"
|
|
|
|
sys.exit(1)
|
|
|
|
|
2015-10-08 23:08:18 +03:00
|
|
|
|
|
|
|
def try_run_hook(test, args):
|
|
|
|
hname = test.getname() + '.hook'
|
|
|
|
if os.access(hname, os.X_OK):
|
|
|
|
print "Running %s(%s)" % (hname, ', '.join(args))
|
|
|
|
hook = subprocess.Popen([hname] + args)
|
|
|
|
if hook.wait() != 0:
|
|
|
|
raise test_fail_exc("hook " + " ".join(args))
|
|
|
|
|
2015-12-09 19:05:57 +03:00
|
|
|
#
|
|
|
|
# Step by step execution
|
|
|
|
#
|
|
|
|
|
|
|
|
do_sbs = False
|
|
|
|
|
|
|
|
def init_sbs():
|
|
|
|
if sys.stdout.isatty():
|
|
|
|
global do_sbs
|
|
|
|
do_sbs = True
|
|
|
|
else:
|
2015-12-15 00:42:00 +03:00
|
|
|
print "Can't do step-by-step in this runtime"
|
2015-12-09 19:05:57 +03:00
|
|
|
|
|
|
|
def sbs(what):
|
|
|
|
if do_sbs:
|
|
|
|
raw_input("Pause at %s. Press any key to continue." % what)
|
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
#
|
|
|
|
# Main testing entity -- dump (probably with pre-dumps) and restore
|
|
|
|
#
|
|
|
|
|
2015-10-27 16:29:00 +03:00
|
|
|
def iter_parm(opt, dflt):
|
|
|
|
x = ((opt or str(dflt)) + ":0").split(':')
|
|
|
|
return (xrange(0, int(x[0])), float(x[1]))
|
|
|
|
|
2015-10-15 17:27:00 +03:00
|
|
|
def cr(cr_api, test, opts):
|
2015-10-05 21:55:00 +03:00
|
|
|
if opts['nocr']:
|
|
|
|
return
|
|
|
|
|
2015-10-15 17:27:00 +03:00
|
|
|
cr_api.set_test(test)
|
2015-10-05 21:55:00 +03:00
|
|
|
|
2015-10-27 16:29:00 +03:00
|
|
|
iters = iter_parm(opts['iters'], 1)
|
|
|
|
for i in iters[0]:
|
|
|
|
pres = iter_parm(opts['pre'], 0)
|
|
|
|
for p in pres[0]:
|
2015-11-12 18:11:38 +04:00
|
|
|
if opts['snaps']:
|
|
|
|
cr_api.dump("dump", opts = ["--leave-running"])
|
|
|
|
else:
|
|
|
|
cr_api.dump("pre-dump")
|
2015-10-27 16:29:00 +03:00
|
|
|
time.sleep(pres[1])
|
2015-10-05 21:55:00 +03:00
|
|
|
|
2015-12-09 19:05:57 +03:00
|
|
|
sbs('pre-dump')
|
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
if opts['norst']:
|
|
|
|
cr_api.dump("dump", opts = ["--leave-running"])
|
|
|
|
else:
|
|
|
|
cr_api.dump("dump")
|
|
|
|
test.gone()
|
2015-12-09 19:05:57 +03:00
|
|
|
sbs('pre-restore')
|
2015-10-08 23:08:18 +03:00
|
|
|
try_run_hook(test, ["--pre-restore"])
|
2015-10-05 21:55:00 +03:00
|
|
|
cr_api.restore()
|
2015-12-09 19:05:57 +03:00
|
|
|
sbs('post-restore')
|
2015-10-05 21:55:00 +03:00
|
|
|
|
2015-10-27 16:29:00 +03:00
|
|
|
time.sleep(iters[1])
|
|
|
|
|
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
# Additional checks that can be done outside of test process
|
|
|
|
|
2015-12-15 10:35:00 +03:00
|
|
|
def get_visible_state(test):
|
2015-12-18 14:05:00 +03:00
|
|
|
maps = {}
|
|
|
|
files = {}
|
|
|
|
mounts = {}
|
2015-12-15 10:35:00 +03:00
|
|
|
|
2015-12-23 16:42:00 +03:00
|
|
|
if not getattr(test, "static", lambda : False)() or \
|
|
|
|
not getattr(test, "ns", lambda : False)():
|
2015-12-18 14:05:00 +03:00
|
|
|
return ({}, {}, {})
|
2015-12-15 00:36:00 +03:00
|
|
|
|
|
|
|
r = re.compile('^[0-9]+$')
|
|
|
|
pids = filter(lambda p: r.match(p), os.listdir("/proc/%s/root/proc/" % test.getpid()))
|
|
|
|
for pid in pids:
|
2015-12-18 14:05:00 +03:00
|
|
|
files[pid] = set(os.listdir("/proc/%s/root/proc/%s/fd" % (test.getpid(), pid)))
|
2015-12-15 10:35:00 +03:00
|
|
|
|
2015-12-18 14:05:00 +03:00
|
|
|
cmaps = [[0, 0]]
|
2015-12-15 00:36:00 +03:00
|
|
|
last = 0
|
2015-12-15 10:35:00 +03:00
|
|
|
for mp in open("/proc/%s/root/proc/%s/maps" % (test.getpid(), pid)):
|
2015-12-15 00:36:00 +03:00
|
|
|
m = map(lambda x: int('0x' + x, 0), mp.split()[0].split('-'))
|
2015-12-18 14:05:00 +03:00
|
|
|
if cmaps[last][1] == m[0]:
|
|
|
|
cmaps[last][1] = m[1]
|
2015-12-15 00:36:00 +03:00
|
|
|
else:
|
2015-12-18 14:05:00 +03:00
|
|
|
cmaps.append(m)
|
2015-12-15 00:36:00 +03:00
|
|
|
last += 1
|
2015-12-18 14:05:00 +03:00
|
|
|
maps[pid] = set(map(lambda x: '%x-%x' % (x[0], x[1]), cmaps))
|
2015-10-05 21:55:00 +03:00
|
|
|
|
2015-12-18 14:05:00 +03:00
|
|
|
cmounts = []
|
2015-12-15 10:35:00 +03:00
|
|
|
try:
|
|
|
|
r = re.compile("^\S+\s\S+\s\S+\s(\S+)\s(\S+)")
|
|
|
|
for m in open("/proc/%s/root/proc/%s/mountinfo" % (test.getpid(), pid)):
|
2015-12-18 14:05:00 +03:00
|
|
|
cmounts.append(r.match(m).groups())
|
2015-12-15 10:35:00 +03:00
|
|
|
except IOError, e:
|
|
|
|
if e.errno != errno.EINVAL:
|
|
|
|
raise e
|
2015-12-18 14:05:00 +03:00
|
|
|
mounts[pid] = set(cmounts)
|
2015-12-15 10:35:00 +03:00
|
|
|
return files, maps, mounts
|
2015-10-05 21:55:00 +03:00
|
|
|
|
|
|
|
def check_visible_state(test, state):
|
|
|
|
new = get_visible_state(test)
|
2015-12-18 14:05:00 +03:00
|
|
|
|
|
|
|
for pid in state[0].keys():
|
|
|
|
fnew = new[0][pid]
|
|
|
|
fold = state[0][pid]
|
|
|
|
if fnew != fold:
|
|
|
|
print "%s: Old files lost: %s" % (pid, fold - fnew)
|
|
|
|
print "%s: New files appeared: %s" % (pid, fnew - fold)
|
|
|
|
raise test_fail_exc("fds compare")
|
|
|
|
|
|
|
|
old_maps = state[1][pid]
|
|
|
|
new_maps = new[1][pid]
|
|
|
|
if old_maps != new_maps:
|
|
|
|
print "%s: Old maps lost: %s" % (pid, old_maps - new_maps)
|
|
|
|
print "%s: New maps appeared: %s" % (pid, new_maps - old_maps)
|
|
|
|
|
|
|
|
raise test_fail_exc("maps compare")
|
|
|
|
|
|
|
|
old_mounts = state[2][pid]
|
|
|
|
new_mounts = new[2][pid]
|
|
|
|
if old_mounts != new_mounts:
|
|
|
|
print "%s: Old mounts lost: %s" % (pid, old_mounts - new_mounts)
|
|
|
|
print "%s: New mounts appeared: %s" % (pid, new_mounts - old_mounts)
|
|
|
|
raise test_fail_exc("mounts compare")
|
2015-10-05 21:55:00 +03:00
|
|
|
|
2015-12-21 14:03:00 +03:00
|
|
|
|
|
|
|
class noop_freezer:
|
|
|
|
def __init__(self):
|
|
|
|
self.kernel = False
|
|
|
|
|
|
|
|
def attach(self):
|
|
|
|
pass
|
|
|
|
|
|
|
|
def freeze(self):
|
|
|
|
pass
|
|
|
|
|
|
|
|
def thaw(self):
|
|
|
|
pass
|
|
|
|
|
|
|
|
def getdopts(self):
|
|
|
|
return []
|
|
|
|
|
|
|
|
def getropts(self):
|
|
|
|
return []
|
|
|
|
|
|
|
|
|
|
|
|
class cg_freezer:
|
|
|
|
def __init__(self, path, state):
|
|
|
|
self.__path = '/sys/fs/cgroup/freezer/' + path
|
|
|
|
self.__state = state
|
|
|
|
self.kernel = True
|
|
|
|
|
|
|
|
def attach(self):
|
|
|
|
if not os.access(self.__path, os.F_OK):
|
|
|
|
os.makedirs(self.__path)
|
|
|
|
with open(self.__path + '/tasks', 'w') as f:
|
|
|
|
f.write('0')
|
|
|
|
|
|
|
|
def __set_state(self, state):
|
|
|
|
with open(self.__path + '/freezer.state', 'w') as f:
|
|
|
|
f.write(state)
|
|
|
|
|
|
|
|
def freeze(self):
|
|
|
|
if self.__state.startswith('f'):
|
|
|
|
self.__set_state('FROZEN')
|
|
|
|
|
|
|
|
def thaw(self):
|
|
|
|
if self.__state.startswith('f'):
|
|
|
|
self.__set_state('THAWED')
|
|
|
|
|
|
|
|
def getdopts(self):
|
|
|
|
return [ '--freeze-cgroup', self.__path, '--manage-cgroups' ]
|
|
|
|
|
|
|
|
def getropts(self):
|
|
|
|
return [ '--manage-cgroups' ]
|
|
|
|
|
|
|
|
|
|
|
|
def get_freezer(desc):
|
|
|
|
if not desc:
|
|
|
|
return noop_freezer()
|
|
|
|
|
|
|
|
fd = desc.split(':')
|
|
|
|
fr = cg_freezer(path = fd[0], state = fd[1])
|
|
|
|
return fr
|
|
|
|
|
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
def do_run_test(tname, tdesc, flavs, opts):
|
2015-10-08 17:28:55 +03:00
|
|
|
tcname = tname.split('/')[0]
|
|
|
|
tclass = test_classes.get(tcname, None)
|
|
|
|
if not tclass:
|
|
|
|
print "Unknown test class %s" % tcname
|
|
|
|
return
|
2015-10-05 21:55:00 +03:00
|
|
|
|
2015-10-28 16:39:00 +03:00
|
|
|
if opts['report']:
|
|
|
|
init_report(opts['report'])
|
2015-12-09 19:05:57 +03:00
|
|
|
if opts['sbs']:
|
|
|
|
init_sbs()
|
2015-10-28 16:39:00 +03:00
|
|
|
|
2015-12-21 14:03:00 +03:00
|
|
|
fcg = get_freezer(opts['freezecg'])
|
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
for f in flavs:
|
2015-10-13 11:09:00 +03:00
|
|
|
print
|
|
|
|
print_sep("Run %s in %s" % (tname, f))
|
2015-10-05 21:55:00 +03:00
|
|
|
flav = flavors[f](opts)
|
2015-12-21 14:03:00 +03:00
|
|
|
t = tclass(tname, tdesc, flav, fcg)
|
2015-10-15 17:27:00 +03:00
|
|
|
cr_api = criu_cli(opts)
|
2015-10-05 21:55:00 +03:00
|
|
|
|
|
|
|
try:
|
|
|
|
t.start()
|
|
|
|
s = get_visible_state(t)
|
2015-10-13 18:06:44 +03:00
|
|
|
try:
|
|
|
|
cr(cr_api, t, opts)
|
2015-10-20 17:10:22 +04:00
|
|
|
except test_fail_expected_exc as e:
|
2015-10-13 18:06:44 +03:00
|
|
|
if e.cr_action == "dump":
|
|
|
|
t.stop()
|
|
|
|
try_run_hook(t, ["--fault", e.cr_action])
|
|
|
|
else:
|
|
|
|
check_visible_state(t, s)
|
|
|
|
t.stop()
|
|
|
|
try_run_hook(t, ["--clean"])
|
2015-10-05 21:55:00 +03:00
|
|
|
except test_fail_exc as e:
|
2015-10-28 17:55:30 +03:00
|
|
|
print_sep("Test %s FAIL at %s" % (tname, e.step), '#')
|
2015-10-05 21:55:00 +03:00
|
|
|
t.print_output()
|
|
|
|
t.kill()
|
2015-10-28 16:39:00 +03:00
|
|
|
add_to_report(cr_api.logs(), "cr_logs")
|
2015-10-15 17:27:00 +03:00
|
|
|
if opts['keep_img'] == 'never':
|
|
|
|
cr_api.cleanup()
|
2015-10-05 21:55:00 +03:00
|
|
|
# This exit does two things -- exits from subprocess and
|
|
|
|
# aborts the main script execution on the 1st error met
|
|
|
|
sys.exit(1)
|
|
|
|
else:
|
2015-10-15 17:27:00 +03:00
|
|
|
if opts['keep_img'] != 'always':
|
|
|
|
cr_api.cleanup()
|
2015-10-13 11:09:00 +03:00
|
|
|
print_sep("Test %s PASS" % tname)
|
2015-10-05 21:55:00 +03:00
|
|
|
|
|
|
|
class launcher:
|
2015-10-30 18:50:00 +03:00
|
|
|
def __init__(self, opts, nr_tests):
|
2015-10-05 21:55:00 +03:00
|
|
|
self.__opts = opts
|
2015-10-30 18:50:00 +03:00
|
|
|
self.__total = nr_tests
|
|
|
|
self.__nr = 0
|
2015-10-28 16:39:00 +03:00
|
|
|
self.__max = int(opts['parallel'] or 1)
|
2015-10-05 21:55:00 +03:00
|
|
|
self.__subs = {}
|
|
|
|
self.__fail = False
|
2015-12-30 17:06:27 +03:00
|
|
|
if self.__max > 1 and self.__total > 1:
|
|
|
|
self.__use_log = True
|
|
|
|
elif opts['report']:
|
|
|
|
self.__use_log = True
|
|
|
|
else:
|
|
|
|
self.__use_log = False
|
2015-10-05 21:55:00 +03:00
|
|
|
|
2015-10-30 18:50:00 +03:00
|
|
|
def __show_progress(self):
|
|
|
|
perc = self.__nr * 16 / self.__total
|
|
|
|
print "=== Run %d/%d %s" % (self.__nr, self.__total, '=' * perc + '-' * (16 - perc))
|
|
|
|
|
2015-12-04 17:06:37 +03:00
|
|
|
def skip(self, name, reason):
|
|
|
|
print "Skipping %s (%s)" % (name, reason)
|
|
|
|
self.__nr += 1
|
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
def run_test(self, name, desc, flavor):
|
2015-11-12 20:58:00 +03:00
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
if len(self.__subs) >= self.__max:
|
|
|
|
self.wait()
|
2015-11-12 20:58:00 +03:00
|
|
|
|
|
|
|
if test_flag(desc, 'excl'):
|
|
|
|
self.wait_all()
|
2015-10-05 21:55:00 +03:00
|
|
|
|
2015-10-30 18:50:00 +03:00
|
|
|
self.__nr += 1
|
|
|
|
self.__show_progress()
|
|
|
|
|
2015-11-26 21:55:00 +03:00
|
|
|
nd = ('nocr', 'norst', 'pre', 'iters', 'page_server', 'sibling', \
|
2015-12-21 14:03:00 +03:00
|
|
|
'fault', 'keep_img', 'report', 'snaps', 'sat', \
|
2015-12-15 22:26:48 +03:00
|
|
|
'dedup', 'sbs', 'freezecg', 'user')
|
2015-10-05 21:55:00 +03:00
|
|
|
arg = repr((name, desc, flavor, { d: self.__opts[d] for d in nd }))
|
2015-12-09 19:04:11 +03:00
|
|
|
|
2015-12-30 17:06:27 +03:00
|
|
|
if self.__use_log:
|
2015-12-09 19:04:11 +03:00
|
|
|
logf = name.replace('/', '_') + ".log"
|
|
|
|
log = open(logf, "w")
|
|
|
|
else:
|
|
|
|
logf = None
|
|
|
|
log = None
|
|
|
|
|
2015-10-08 15:19:44 +03:00
|
|
|
sub = subprocess.Popen(["./zdtm_ct", "zdtm.py"], \
|
2015-10-08 17:28:55 +03:00
|
|
|
env = dict(os.environ, CR_CT_TEST_INFO = arg ), \
|
2015-12-09 19:04:11 +03:00
|
|
|
stdout = log, stderr = subprocess.STDOUT)
|
|
|
|
self.__subs[sub.pid] = { 'sub': sub, 'log': logf }
|
2015-10-05 21:55:00 +03:00
|
|
|
|
2015-11-12 20:58:00 +03:00
|
|
|
if test_flag(desc, 'excl'):
|
|
|
|
self.wait()
|
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
def __wait_one(self, flags):
|
|
|
|
pid, status = os.waitpid(0, flags)
|
|
|
|
if pid != 0:
|
|
|
|
sub = self.__subs.pop(pid)
|
|
|
|
if status != 0:
|
|
|
|
self.__fail = True
|
2015-12-24 14:47:24 +03:00
|
|
|
if sub['log']:
|
|
|
|
add_to_report(sub['log'], "output")
|
2015-10-05 21:55:00 +03:00
|
|
|
|
2015-12-09 19:04:11 +03:00
|
|
|
if sub['log']:
|
|
|
|
print open(sub['log']).read()
|
|
|
|
os.unlink(sub['log'])
|
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
return True
|
|
|
|
|
|
|
|
return False
|
|
|
|
|
2015-11-12 20:58:00 +03:00
|
|
|
def __wait_all(self):
|
|
|
|
while self.__subs:
|
|
|
|
self.__wait_one(0)
|
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
def wait(self):
|
|
|
|
self.__wait_one(0)
|
|
|
|
while self.__subs:
|
|
|
|
if not self.__wait_one(os.WNOHANG):
|
|
|
|
break
|
2015-11-12 20:58:00 +03:00
|
|
|
if self.__fail:
|
|
|
|
raise test_fail_exc('')
|
|
|
|
|
|
|
|
def wait_all(self):
|
|
|
|
self.__wait_all()
|
|
|
|
if self.__fail:
|
|
|
|
raise test_fail_exc('')
|
2015-10-05 21:55:00 +03:00
|
|
|
|
|
|
|
def finish(self):
|
2015-11-12 20:58:00 +03:00
|
|
|
self.__wait_all()
|
2015-10-05 21:55:00 +03:00
|
|
|
if self.__fail:
|
2015-10-30 17:12:53 +03:00
|
|
|
print_sep("FAIL", "#")
|
2015-10-05 21:55:00 +03:00
|
|
|
sys.exit(1)
|
|
|
|
|
zdtm.py: Rework tests list generation
Hand-made list of tests is not great. As Christopher suggested we can find
all executables in a directory and treat them as tests. This idea is good,
but requires a little bit more trickery.
First, some executables in directory are not tests, e.g. these are per-test
scripts, which should be skipped. Next, not all tests in zdtm can be right
now run in continuous manner (they fail), but we want to run them manually.
To fix that such tests are marked with 'noauto' flag in the description.
So we have the test test descriptor file, which states in which file to
look for tests (executable) and which of them to exclude. Maybe more, can
be added on demand.
And, finally, the per-test description goes in a ${test}.desc file in native
python eval/repr-able format. If the file is absent a default description
is used.
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-10-08 17:25:00 +03:00
|
|
|
def all_tests(opts):
|
2015-10-08 17:29:40 +03:00
|
|
|
desc = eval(open(opts['set'] + '.desc').read())
|
zdtm.py: Rework tests list generation
Hand-made list of tests is not great. As Christopher suggested we can find
all executables in a directory and treat them as tests. This idea is good,
but requires a little bit more trickery.
First, some executables in directory are not tests, e.g. these are per-test
scripts, which should be skipped. Next, not all tests in zdtm can be right
now run in continuous manner (they fail), but we want to run them manually.
To fix that such tests are marked with 'noauto' flag in the description.
So we have the test test descriptor file, which states in which file to
look for tests (executable) and which of them to exclude. Maybe more, can
be added on demand.
And, finally, the per-test description goes in a ${test}.desc file in native
python eval/repr-able format. If the file is absent a default description
is used.
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-10-08 17:25:00 +03:00
|
|
|
lst = subprocess.Popen(['find', desc['dir'], '-type', 'f', '-executable' ], \
|
|
|
|
stdout = subprocess.PIPE)
|
|
|
|
excl = map(lambda x: os.path.join(desc['dir'], x), desc['exclude'])
|
|
|
|
tlist = filter(lambda x: \
|
|
|
|
not x.endswith('.checkskip') and \
|
|
|
|
not x.endswith('.hook') and \
|
|
|
|
not x in excl, \
|
|
|
|
map(lambda x: x.strip(), lst.stdout.readlines()) \
|
|
|
|
)
|
|
|
|
lst.wait()
|
|
|
|
return tlist
|
|
|
|
|
|
|
|
|
|
|
|
# Descriptor for abstract test not in list
|
|
|
|
default_test={ }
|
|
|
|
|
|
|
|
|
|
|
|
def get_test_desc(tname):
|
|
|
|
d_path = tname + '.desc'
|
|
|
|
if os.access(d_path, os.F_OK):
|
|
|
|
return eval(open(d_path).read())
|
|
|
|
|
|
|
|
return default_test
|
|
|
|
|
|
|
|
|
|
|
|
def self_checkskip(tname):
|
|
|
|
chs = tname + '.checkskip'
|
|
|
|
if os.access(chs, os.X_OK):
|
|
|
|
ch = subprocess.Popen([chs])
|
2016-02-08 08:57:41 -07:00
|
|
|
return not ch.wait() == 0
|
zdtm.py: Rework tests list generation
Hand-made list of tests is not great. As Christopher suggested we can find
all executables in a directory and treat them as tests. This idea is good,
but requires a little bit more trickery.
First, some executables in directory are not tests, e.g. these are per-test
scripts, which should be skipped. Next, not all tests in zdtm can be right
now run in continuous manner (they fail), but we want to run them manually.
To fix that such tests are marked with 'noauto' flag in the description.
So we have the test test descriptor file, which states in which file to
look for tests (executable) and which of them to exclude. Maybe more, can
be added on demand.
And, finally, the per-test description goes in a ${test}.desc file in native
python eval/repr-able format. If the file is absent a default description
is used.
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-10-08 17:25:00 +03:00
|
|
|
|
|
|
|
return False
|
|
|
|
|
2015-12-09 19:04:58 +03:00
|
|
|
def print_fname(fname, typ):
|
|
|
|
print "=[%s]=> %s" % (typ, fname)
|
|
|
|
|
|
|
|
|
2015-11-10 16:01:08 +03:00
|
|
|
def print_sep(title, sep = "=", width = 80):
|
|
|
|
print (" " + title + " ").center(width, sep)
|
2015-10-13 09:51:00 +03:00
|
|
|
|
|
|
|
def grep_errors(fname):
|
2015-11-10 16:01:07 +03:00
|
|
|
first = True
|
2015-10-13 09:51:00 +03:00
|
|
|
for l in open(fname):
|
|
|
|
if "Error" in l:
|
2015-11-10 16:01:07 +03:00
|
|
|
if first:
|
2015-12-09 19:04:58 +03:00
|
|
|
print_fname(fname, 'log')
|
2015-11-10 16:01:08 +03:00
|
|
|
print_sep("grep Error", "-", 60)
|
2015-11-10 16:01:07 +03:00
|
|
|
first = False
|
2015-10-13 09:51:00 +03:00
|
|
|
print l,
|
2015-11-10 16:01:07 +03:00
|
|
|
if not first:
|
2015-11-10 16:01:08 +03:00
|
|
|
print_sep("ERROR OVER", "-", 60)
|
zdtm.py: Rework tests list generation
Hand-made list of tests is not great. As Christopher suggested we can find
all executables in a directory and treat them as tests. This idea is good,
but requires a little bit more trickery.
First, some executables in directory are not tests, e.g. these are per-test
scripts, which should be skipped. Next, not all tests in zdtm can be right
now run in continuous manner (they fail), but we want to run them manually.
To fix that such tests are marked with 'noauto' flag in the description.
So we have the test test descriptor file, which states in which file to
look for tests (executable) and which of them to exclude. Maybe more, can
be added on demand.
And, finally, the per-test description goes in a ${test}.desc file in native
python eval/repr-able format. If the file is absent a default description
is used.
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-10-08 17:25:00 +03:00
|
|
|
|
|
|
|
def run_tests(opts):
|
2015-10-05 21:55:00 +03:00
|
|
|
excl = None
|
|
|
|
features = {}
|
|
|
|
|
|
|
|
if opts['all']:
|
zdtm.py: Rework tests list generation
Hand-made list of tests is not great. As Christopher suggested we can find
all executables in a directory and treat them as tests. This idea is good,
but requires a little bit more trickery.
First, some executables in directory are not tests, e.g. these are per-test
scripts, which should be skipped. Next, not all tests in zdtm can be right
now run in continuous manner (they fail), but we want to run them manually.
To fix that such tests are marked with 'noauto' flag in the description.
So we have the test test descriptor file, which states in which file to
look for tests (executable) and which of them to exclude. Maybe more, can
be added on demand.
And, finally, the per-test description goes in a ${test}.desc file in native
python eval/repr-able format. If the file is absent a default description
is used.
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-10-08 17:25:00 +03:00
|
|
|
torun = all_tests(opts)
|
|
|
|
run_all = True
|
2015-10-15 17:25:00 +03:00
|
|
|
elif opts['tests']:
|
|
|
|
r = re.compile(opts['tests'])
|
|
|
|
torun = filter(lambda x: r.match(x), all_tests(opts))
|
|
|
|
run_all = True
|
2015-10-05 21:55:00 +03:00
|
|
|
elif opts['test']:
|
|
|
|
torun = opts['test']
|
zdtm.py: Rework tests list generation
Hand-made list of tests is not great. As Christopher suggested we can find
all executables in a directory and treat them as tests. This idea is good,
but requires a little bit more trickery.
First, some executables in directory are not tests, e.g. these are per-test
scripts, which should be skipped. Next, not all tests in zdtm can be right
now run in continuous manner (they fail), but we want to run them manually.
To fix that such tests are marked with 'noauto' flag in the description.
So we have the test test descriptor file, which states in which file to
look for tests (executable) and which of them to exclude. Maybe more, can
be added on demand.
And, finally, the per-test description goes in a ${test}.desc file in native
python eval/repr-able format. If the file is absent a default description
is used.
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-10-08 17:25:00 +03:00
|
|
|
run_all = False
|
2015-10-05 21:55:00 +03:00
|
|
|
else:
|
|
|
|
print "Specify test with -t <name> or -a"
|
|
|
|
return
|
|
|
|
|
|
|
|
if opts['exclude']:
|
|
|
|
excl = re.compile(".*(" + "|".join(opts['exclude']) + ")")
|
|
|
|
print "Compiled exclusion list"
|
|
|
|
|
2015-10-28 16:39:00 +03:00
|
|
|
if opts['report']:
|
|
|
|
init_report(opts['report'])
|
|
|
|
|
2015-12-21 14:03:00 +03:00
|
|
|
if opts['parallel'] and opts['freezecg']:
|
|
|
|
print "Parallel launch with freezer not supported"
|
|
|
|
opts['parallel'] = None
|
|
|
|
|
2015-10-30 18:50:00 +03:00
|
|
|
l = launcher(opts, len(torun))
|
2015-10-05 21:55:00 +03:00
|
|
|
try:
|
|
|
|
for t in torun:
|
|
|
|
global arch
|
|
|
|
|
|
|
|
if excl and excl.match(t):
|
2015-12-04 17:06:37 +03:00
|
|
|
l.skip(t, "exclude")
|
2015-10-05 21:55:00 +03:00
|
|
|
continue
|
|
|
|
|
zdtm.py: Rework tests list generation
Hand-made list of tests is not great. As Christopher suggested we can find
all executables in a directory and treat them as tests. This idea is good,
but requires a little bit more trickery.
First, some executables in directory are not tests, e.g. these are per-test
scripts, which should be skipped. Next, not all tests in zdtm can be right
now run in continuous manner (they fail), but we want to run them manually.
To fix that such tests are marked with 'noauto' flag in the description.
So we have the test test descriptor file, which states in which file to
look for tests (executable) and which of them to exclude. Maybe more, can
be added on demand.
And, finally, the per-test description goes in a ${test}.desc file in native
python eval/repr-able format. If the file is absent a default description
is used.
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-10-08 17:25:00 +03:00
|
|
|
tdesc = get_test_desc(t)
|
2015-10-05 21:55:00 +03:00
|
|
|
if tdesc.get('arch', arch) != arch:
|
2015-12-04 17:06:37 +03:00
|
|
|
l.skip(t, "arch %s" % tdesc['arch'])
|
2015-10-05 21:55:00 +03:00
|
|
|
continue
|
|
|
|
|
zdtm.py: Rework tests list generation
Hand-made list of tests is not great. As Christopher suggested we can find
all executables in a directory and treat them as tests. This idea is good,
but requires a little bit more trickery.
First, some executables in directory are not tests, e.g. these are per-test
scripts, which should be skipped. Next, not all tests in zdtm can be right
now run in continuous manner (they fail), but we want to run them manually.
To fix that such tests are marked with 'noauto' flag in the description.
So we have the test test descriptor file, which states in which file to
look for tests (executable) and which of them to exclude. Maybe more, can
be added on demand.
And, finally, the per-test description goes in a ${test}.desc file in native
python eval/repr-able format. If the file is absent a default description
is used.
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-10-08 17:25:00 +03:00
|
|
|
if run_all and test_flag(tdesc, 'noauto'):
|
2015-12-04 17:06:37 +03:00
|
|
|
l.skip(t, "manual run only")
|
zdtm.py: Rework tests list generation
Hand-made list of tests is not great. As Christopher suggested we can find
all executables in a directory and treat them as tests. This idea is good,
but requires a little bit more trickery.
First, some executables in directory are not tests, e.g. these are per-test
scripts, which should be skipped. Next, not all tests in zdtm can be right
now run in continuous manner (they fail), but we want to run them manually.
To fix that such tests are marked with 'noauto' flag in the description.
So we have the test test descriptor file, which states in which file to
look for tests (executable) and which of them to exclude. Maybe more, can
be added on demand.
And, finally, the per-test description goes in a ${test}.desc file in native
python eval/repr-able format. If the file is absent a default description
is used.
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-10-08 17:25:00 +03:00
|
|
|
continue
|
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
feat = tdesc.get('feature', None)
|
|
|
|
if feat:
|
|
|
|
if not features.has_key(feat):
|
|
|
|
print "Checking feature %s" % feat
|
|
|
|
features[feat] = criu_cli.check(feat)
|
|
|
|
|
|
|
|
if not features[feat]:
|
2015-12-04 17:06:37 +03:00
|
|
|
l.skip(t, "no %s feature" % feat)
|
2015-10-05 21:55:00 +03:00
|
|
|
continue
|
|
|
|
|
zdtm.py: Rework tests list generation
Hand-made list of tests is not great. As Christopher suggested we can find
all executables in a directory and treat them as tests. This idea is good,
but requires a little bit more trickery.
First, some executables in directory are not tests, e.g. these are per-test
scripts, which should be skipped. Next, not all tests in zdtm can be right
now run in continuous manner (they fail), but we want to run them manually.
To fix that such tests are marked with 'noauto' flag in the description.
So we have the test test descriptor file, which states in which file to
look for tests (executable) and which of them to exclude. Maybe more, can
be added on demand.
And, finally, the per-test description goes in a ${test}.desc file in native
python eval/repr-able format. If the file is absent a default description
is used.
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-10-08 17:25:00 +03:00
|
|
|
if self_checkskip(t):
|
2016-02-08 08:57:41 -07:00
|
|
|
l.skip(t, "checkskip failed")
|
2015-10-05 21:55:00 +03:00
|
|
|
continue
|
|
|
|
|
2015-12-22 12:31:03 +03:00
|
|
|
if opts['user']:
|
|
|
|
if test_flag(tdesc, 'suid'):
|
|
|
|
l.skip(t, "suid test in user mode")
|
|
|
|
continue
|
|
|
|
if test_flag(tdesc, 'nouser'):
|
|
|
|
l.skip(t, "criu root prio needed")
|
|
|
|
continue
|
2015-12-15 22:26:48 +03:00
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
test_flavs = tdesc.get('flavor', 'h ns uns').split()
|
|
|
|
opts_flavs = (opts['flavor'] or 'h,ns,uns').split(',')
|
2015-12-04 17:06:49 +03:00
|
|
|
if opts_flavs != ['best']:
|
|
|
|
run_flavs = set(test_flavs) & set(opts_flavs)
|
|
|
|
else:
|
|
|
|
run_flavs = set([test_flavs.pop()])
|
2015-12-07 08:17:00 +03:00
|
|
|
if not criu_cli.check("userns"):
|
2015-12-09 15:47:00 +03:00
|
|
|
try:
|
|
|
|
run_flavs.remove("uns")
|
2015-12-09 20:06:00 +03:00
|
|
|
except KeyError:
|
2015-12-09 15:47:00 +03:00
|
|
|
# don't worry if uns isn't in run_flavs
|
|
|
|
pass
|
2015-12-07 08:17:00 +03:00
|
|
|
|
2015-12-15 22:26:48 +03:00
|
|
|
if opts['user']:
|
|
|
|
# FIXME -- probably uns will make sense
|
|
|
|
try:
|
|
|
|
run_flavs.remove("ns")
|
|
|
|
except KeyError:
|
|
|
|
pass
|
|
|
|
try:
|
|
|
|
run_flavs.remove("uns")
|
|
|
|
except KeyError:
|
|
|
|
pass
|
2015-10-05 21:55:00 +03:00
|
|
|
|
|
|
|
if run_flavs:
|
|
|
|
l.run_test(t, tdesc, run_flavs)
|
2015-12-04 17:06:37 +03:00
|
|
|
else:
|
|
|
|
l.skip(t, "no flavors")
|
2015-10-05 21:55:00 +03:00
|
|
|
finally:
|
|
|
|
l.finish()
|
|
|
|
|
zdtm.py: Rework tests list generation
Hand-made list of tests is not great. As Christopher suggested we can find
all executables in a directory and treat them as tests. This idea is good,
but requires a little bit more trickery.
First, some executables in directory are not tests, e.g. these are per-test
scripts, which should be skipped. Next, not all tests in zdtm can be right
now run in continuous manner (they fail), but we want to run them manually.
To fix that such tests are marked with 'noauto' flag in the description.
So we have the test test descriptor file, which states in which file to
look for tests (executable) and which of them to exclude. Maybe more, can
be added on demand.
And, finally, the per-test description goes in a ${test}.desc file in native
python eval/repr-able format. If the file is absent a default description
is used.
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-10-08 17:25:00 +03:00
|
|
|
|
2015-10-08 17:27:24 +03:00
|
|
|
sti_fmt = "%-40s%-10s%s"
|
|
|
|
|
|
|
|
def show_test_info(t):
|
|
|
|
tdesc = get_test_desc(t)
|
|
|
|
flavs = tdesc.get('flavor', '')
|
|
|
|
return sti_fmt % (t, flavs, tdesc.get('flags', ''))
|
|
|
|
|
|
|
|
|
zdtm.py: Rework tests list generation
Hand-made list of tests is not great. As Christopher suggested we can find
all executables in a directory and treat them as tests. This idea is good,
but requires a little bit more trickery.
First, some executables in directory are not tests, e.g. these are per-test
scripts, which should be skipped. Next, not all tests in zdtm can be right
now run in continuous manner (they fail), but we want to run them manually.
To fix that such tests are marked with 'noauto' flag in the description.
So we have the test test descriptor file, which states in which file to
look for tests (executable) and which of them to exclude. Maybe more, can
be added on demand.
And, finally, the per-test description goes in a ${test}.desc file in native
python eval/repr-able format. If the file is absent a default description
is used.
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-10-08 17:25:00 +03:00
|
|
|
def list_tests(opts):
|
|
|
|
tlist = all_tests(opts)
|
2015-10-08 17:27:24 +03:00
|
|
|
if opts['info']:
|
|
|
|
print sti_fmt % ('Name', 'Flavors', 'Flags')
|
|
|
|
tlist = map(lambda x: show_test_info(x), tlist)
|
zdtm.py: Rework tests list generation
Hand-made list of tests is not great. As Christopher suggested we can find
all executables in a directory and treat them as tests. This idea is good,
but requires a little bit more trickery.
First, some executables in directory are not tests, e.g. these are per-test
scripts, which should be skipped. Next, not all tests in zdtm can be right
now run in continuous manner (they fail), but we want to run them manually.
To fix that such tests are marked with 'noauto' flag in the description.
So we have the test test descriptor file, which states in which file to
look for tests (executable) and which of them to exclude. Maybe more, can
be added on demand.
And, finally, the per-test description goes in a ${test}.desc file in native
python eval/repr-able format. If the file is absent a default description
is used.
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-10-08 17:25:00 +03:00
|
|
|
print '\n'.join(tlist)
|
2015-10-05 21:55:00 +03:00
|
|
|
|
2015-12-03 16:08:53 +03:00
|
|
|
|
|
|
|
class group:
|
|
|
|
def __init__(self, tname, tdesc):
|
|
|
|
self.__tests = [ tname ]
|
|
|
|
self.__desc = tdesc
|
|
|
|
self.__deps = set()
|
|
|
|
|
|
|
|
def __is_mergeable_desc(self, desc):
|
|
|
|
# For now make it full match
|
|
|
|
if self.__desc.get('flags') != desc.get('flags'):
|
|
|
|
return False
|
|
|
|
if self.__desc.get('flavor') != desc.get('flavor'):
|
|
|
|
return False
|
|
|
|
if self.__desc.get('arch') != desc.get('arch'):
|
|
|
|
return False
|
|
|
|
if self.__desc.get('opts') != desc.get('opts'):
|
|
|
|
return False
|
|
|
|
if self.__desc.get('feature') != desc.get('feature'):
|
|
|
|
return False
|
|
|
|
return True
|
|
|
|
|
|
|
|
def merge(self, tname, tdesc):
|
|
|
|
if not self.__is_mergeable_desc(tdesc):
|
|
|
|
return False
|
|
|
|
|
|
|
|
self.__deps |= set(tdesc.get('deps', []))
|
|
|
|
self.__tests.append(tname)
|
|
|
|
return True
|
|
|
|
|
|
|
|
def size(self):
|
|
|
|
return len(self.__tests)
|
|
|
|
|
|
|
|
def dump(self, fname):
|
|
|
|
f = open(fname, "w")
|
|
|
|
for t in self.__tests:
|
|
|
|
f.write(t + '\n')
|
|
|
|
f.close()
|
|
|
|
os.chmod(fname, 0700)
|
|
|
|
|
|
|
|
if len(self.__desc) or len(self.__deps):
|
|
|
|
f = open(fname + '.desc', "w")
|
|
|
|
if len(self.__deps):
|
|
|
|
self.__desc['deps'] = list(self.__deps)
|
|
|
|
f.write(repr(self.__desc))
|
|
|
|
f.close()
|
|
|
|
|
|
|
|
|
|
|
|
def group_tests(opts):
|
|
|
|
excl = None
|
|
|
|
groups = []
|
|
|
|
pend_groups = []
|
|
|
|
maxs = int(opts['max_size'])
|
|
|
|
|
|
|
|
if not os.access("groups", os.F_OK):
|
|
|
|
os.mkdir("groups")
|
|
|
|
|
|
|
|
tlist = all_tests(opts)
|
|
|
|
random.shuffle(tlist)
|
|
|
|
if opts['exclude']:
|
|
|
|
excl = re.compile(".*(" + "|".join(opts['exclude']) + ")")
|
|
|
|
print "Compiled exclusion list"
|
|
|
|
|
|
|
|
for t in tlist:
|
|
|
|
if excl and excl.match(t):
|
|
|
|
continue
|
|
|
|
|
|
|
|
td = get_test_desc(t)
|
|
|
|
|
|
|
|
for g in pend_groups:
|
|
|
|
if g.merge(t, td):
|
|
|
|
if g.size() == maxs:
|
|
|
|
pend_groups.remove(g)
|
|
|
|
groups.append(g)
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
g = group(t, td)
|
|
|
|
pend_groups.append(g)
|
|
|
|
|
|
|
|
groups += pend_groups
|
|
|
|
|
|
|
|
nr = 0
|
|
|
|
suf = opts['name'] or 'group'
|
|
|
|
|
|
|
|
for g in groups:
|
|
|
|
if g.size() == 1: # Not much point in group test for this
|
|
|
|
continue
|
|
|
|
|
|
|
|
fn = os.path.join("groups", "%s.%d" % (suf, nr))
|
|
|
|
g.dump(fn)
|
|
|
|
nr += 1
|
|
|
|
|
|
|
|
print "Generated %d group(s)" % nr
|
|
|
|
|
|
|
|
|
2015-12-11 19:05:00 +03:00
|
|
|
def clean_stuff(opts):
|
|
|
|
print "Cleaning %s" % opts['what']
|
|
|
|
if opts['what'] == 'nsroot':
|
|
|
|
for f in flavors:
|
|
|
|
f = flavors[f]
|
|
|
|
f.clean()
|
|
|
|
|
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
#
|
|
|
|
# main() starts here
|
|
|
|
#
|
|
|
|
|
2015-10-08 17:28:55 +03:00
|
|
|
if os.environ.has_key('CR_CT_TEST_INFO'):
|
2015-10-08 15:18:51 +03:00
|
|
|
# Fork here, since we're new pidns init and are supposed to
|
|
|
|
# collect this namespace's zombies
|
2015-10-20 15:40:10 +04:00
|
|
|
status = 0
|
2015-10-08 15:18:51 +03:00
|
|
|
pid = os.fork()
|
|
|
|
if pid == 0:
|
2015-10-08 17:28:55 +03:00
|
|
|
tinfo = eval(os.environ['CR_CT_TEST_INFO'])
|
2015-10-08 15:18:51 +03:00
|
|
|
do_run_test(tinfo[0], tinfo[1], tinfo[2], tinfo[3])
|
|
|
|
else:
|
|
|
|
while True:
|
|
|
|
wpid, status = os.wait()
|
|
|
|
if wpid == pid:
|
2015-10-20 15:40:10 +04:00
|
|
|
if not os.WIFEXITED(status) or os.WEXITSTATUS(status) != 0:
|
|
|
|
status = 1
|
2015-10-08 15:18:51 +03:00
|
|
|
break;
|
|
|
|
|
2015-10-20 15:40:10 +04:00
|
|
|
sys.exit(status)
|
2015-10-05 21:55:00 +03:00
|
|
|
|
2015-10-08 17:28:55 +03:00
|
|
|
p = argparse.ArgumentParser("CRIU test suite")
|
2015-10-05 21:55:00 +03:00
|
|
|
p.add_argument("--debug", help = "Print what's being executed", action = 'store_true')
|
2015-10-08 17:29:40 +03:00
|
|
|
p.add_argument("--set", help = "Which set of tests to use", default = 'zdtm')
|
2015-10-05 21:55:00 +03:00
|
|
|
|
|
|
|
sp = p.add_subparsers(help = "Use --help for list of actions")
|
|
|
|
|
|
|
|
rp = sp.add_parser("run", help = "Run test(s)")
|
|
|
|
rp.set_defaults(action = run_tests)
|
|
|
|
rp.add_argument("-a", "--all", action = 'store_true')
|
|
|
|
rp.add_argument("-t", "--test", help = "Test name", action = 'append')
|
2015-10-15 17:25:00 +03:00
|
|
|
rp.add_argument("-T", "--tests", help = "Regexp")
|
2015-10-05 21:55:00 +03:00
|
|
|
rp.add_argument("-f", "--flavor", help = "Flavor to run")
|
|
|
|
rp.add_argument("-x", "--exclude", help = "Exclude tests from --all run", action = 'append')
|
|
|
|
|
2015-10-08 15:22:53 +03:00
|
|
|
rp.add_argument("--sibling", help = "Restore tests as siblings", action = 'store_true')
|
2015-10-27 16:29:00 +03:00
|
|
|
rp.add_argument("--pre", help = "Do some pre-dumps before dump (n[:pause])")
|
2015-11-12 18:11:38 +04:00
|
|
|
rp.add_argument("--snaps", help = "Instead of pre-dumps do full dumps", action = 'store_true')
|
2015-11-26 21:55:00 +03:00
|
|
|
rp.add_argument("--dedup", help = "Auto-deduplicate images on iterations", action = 'store_true')
|
2015-10-05 21:55:00 +03:00
|
|
|
rp.add_argument("--nocr", help = "Do not CR anything, just check test works", action = 'store_true')
|
|
|
|
rp.add_argument("--norst", help = "Don't restore tasks, leave them running after dump", action = 'store_true')
|
2015-10-27 16:29:00 +03:00
|
|
|
rp.add_argument("--iters", help = "Do CR cycle several times before check (n[:pause])")
|
2015-10-13 18:06:44 +03:00
|
|
|
rp.add_argument("--fault", help = "Test fault injection")
|
2015-11-09 21:27:00 +03:00
|
|
|
rp.add_argument("--sat", help = "Generate criu strace-s for sat tool (restore is fake, images are kept)", action = 'store_true')
|
2015-12-09 19:05:57 +03:00
|
|
|
rp.add_argument("--sbs", help = "Do step-by-step execution, asking user for keypress to continue", action = 'store_true')
|
2015-12-21 14:03:00 +03:00
|
|
|
rp.add_argument("--freezecg", help = "Use freeze cgroup (path:state)")
|
2015-12-15 22:26:48 +03:00
|
|
|
rp.add_argument("--user", help = "Run CRIU as regular user", action = 'store_true')
|
2015-10-05 21:55:00 +03:00
|
|
|
|
|
|
|
rp.add_argument("--page-server", help = "Use page server dump", action = 'store_true')
|
|
|
|
rp.add_argument("-p", "--parallel", help = "Run test in parallel")
|
|
|
|
|
2015-10-15 17:27:00 +03:00
|
|
|
rp.add_argument("-k", "--keep-img", help = "Whether or not to keep images after test",
|
|
|
|
choices = [ 'always', 'never', 'failed' ], default = 'failed')
|
2015-10-28 16:39:00 +03:00
|
|
|
rp.add_argument("--report", help = "Generate summary report in directory")
|
2015-10-15 17:27:00 +03:00
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
lp = sp.add_parser("list", help = "List tests")
|
|
|
|
lp.set_defaults(action = list_tests)
|
2015-10-08 17:27:24 +03:00
|
|
|
lp.add_argument('-i', '--info', help = "Show more info about tests", action = 'store_true')
|
2015-10-05 21:55:00 +03:00
|
|
|
|
2015-12-03 16:08:53 +03:00
|
|
|
gp = sp.add_parser("group", help = "Generate groups")
|
|
|
|
gp.set_defaults(action = group_tests)
|
|
|
|
gp.add_argument("-m", "--max-size", help = "Maximum number of tests in group")
|
|
|
|
gp.add_argument("-n", "--name", help = "Common name for group tests")
|
|
|
|
gp.add_argument("-x", "--exclude", help = "Exclude tests from --all run", action = 'append')
|
|
|
|
|
2015-12-11 19:05:00 +03:00
|
|
|
cp = sp.add_parser("clean", help = "Clean something")
|
|
|
|
cp.set_defaults(action = clean_stuff)
|
|
|
|
cp.add_argument("what", choices = [ 'nsroot' ])
|
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
opts = vars(p.parse_args())
|
2015-12-03 15:29:00 +03:00
|
|
|
if opts.get('sat', False):
|
2015-11-09 21:27:00 +03:00
|
|
|
opts['keep_img'] = 'always'
|
2015-10-05 21:55:00 +03:00
|
|
|
|
|
|
|
if opts['debug']:
|
|
|
|
sys.settrace(traceit)
|
|
|
|
|
2015-10-29 14:15:00 +03:00
|
|
|
criu_cli.available()
|
|
|
|
for tst in test_classes.values():
|
|
|
|
tst.available()
|
|
|
|
|
zdtm.py: Rework tests list generation
Hand-made list of tests is not great. As Christopher suggested we can find
all executables in a directory and treat them as tests. This idea is good,
but requires a little bit more trickery.
First, some executables in directory are not tests, e.g. these are per-test
scripts, which should be skipped. Next, not all tests in zdtm can be right
now run in continuous manner (they fail), but we want to run them manually.
To fix that such tests are marked with 'noauto' flag in the description.
So we have the test test descriptor file, which states in which file to
look for tests (executable) and which of them to exclude. Maybe more, can
be added on demand.
And, finally, the per-test description goes in a ${test}.desc file in native
python eval/repr-able format. If the file is absent a default description
is used.
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-10-08 17:25:00 +03:00
|
|
|
opts['action'](opts)
|