2015-10-05 21:55:00 +03:00
|
|
|
#!/bin/env python
|
|
|
|
import argparse
|
|
|
|
import yaml
|
|
|
|
import os
|
|
|
|
import subprocess
|
|
|
|
import time
|
|
|
|
import tempfile
|
|
|
|
import shutil
|
|
|
|
import re
|
|
|
|
import stat
|
|
|
|
import signal
|
|
|
|
import atexit
|
|
|
|
import sys
|
|
|
|
import linecache
|
|
|
|
|
|
|
|
prev_line = None
|
|
|
|
def traceit(f, e, a):
|
|
|
|
if e == "line":
|
|
|
|
lineno = f.f_lineno
|
|
|
|
fil = f.f_globals["__file__"]
|
|
|
|
if fil.endswith("zdtm.py"):
|
|
|
|
global prev_line
|
|
|
|
line = linecache.getline(fil, lineno)
|
|
|
|
if line == prev_line:
|
|
|
|
print " ..."
|
|
|
|
else:
|
|
|
|
prev_line = line
|
|
|
|
print "+%4d: %s" % (lineno, line.rstrip())
|
|
|
|
|
|
|
|
return traceit
|
|
|
|
|
|
|
|
|
|
|
|
# Root dir for ns and uns flavors. All tests
|
|
|
|
# sit in the same dir
|
2015-10-08 17:28:55 +03:00
|
|
|
tests_root = None
|
2015-10-05 21:55:00 +03:00
|
|
|
|
2015-10-08 17:28:55 +03:00
|
|
|
def clean_tests_root():
|
|
|
|
global tests_root
|
|
|
|
if tests_root:
|
|
|
|
os.rmdir(tests_root)
|
2015-10-05 21:55:00 +03:00
|
|
|
|
2015-10-08 17:28:55 +03:00
|
|
|
def make_tests_root():
|
|
|
|
global tests_root
|
|
|
|
if not tests_root:
|
|
|
|
tests_root = tempfile.mkdtemp("", "criu-root-", "/tmp")
|
|
|
|
atexit.register(clean_tests_root)
|
|
|
|
return tests_root
|
2015-10-05 21:55:00 +03:00
|
|
|
|
|
|
|
# Arch we run on
|
|
|
|
arch = os.uname()[4]
|
|
|
|
|
|
|
|
#
|
|
|
|
# Flavors
|
|
|
|
# h -- host, test is run in the same set of namespaces as criu
|
|
|
|
# ns -- namespaces, test is run in itw own set of namespaces
|
|
|
|
# uns -- user namespace, the same as above plus user namespace
|
|
|
|
#
|
|
|
|
|
|
|
|
class host_flavor:
|
|
|
|
def __init__(self, opts):
|
|
|
|
self.name = "host"
|
|
|
|
self.ns = False
|
|
|
|
self.root = None
|
|
|
|
|
|
|
|
def init(self, test_bin):
|
|
|
|
pass
|
|
|
|
|
|
|
|
def fini(self):
|
|
|
|
pass
|
|
|
|
|
|
|
|
class ns_flavor:
|
|
|
|
def __init__(self, opts):
|
|
|
|
self.name = "ns"
|
|
|
|
self.ns = True
|
|
|
|
self.uns = False
|
2015-10-08 17:28:55 +03:00
|
|
|
self.root = make_tests_root()
|
2015-10-05 21:55:00 +03:00
|
|
|
|
|
|
|
def init(self, test_bin):
|
|
|
|
print "Construct root for %s" % test_bin
|
|
|
|
subprocess.check_call(["mount", "--make-private", "--bind", ".", self.root])
|
|
|
|
|
|
|
|
if not os.access(self.root + "/.constructed", os.F_OK):
|
|
|
|
for dir in ["/bin", "/etc", "/lib", "/lib64", "/dev", "/tmp"]:
|
|
|
|
os.mkdir(self.root + dir)
|
|
|
|
os.chmod(self.root + dir, 0777)
|
|
|
|
|
|
|
|
os.mknod(self.root + "/dev/tty", stat.S_IFCHR, os.makedev(5, 0))
|
|
|
|
os.chmod(self.root + "/dev/tty", 0666)
|
|
|
|
os.mknod(self.root + "/.constructed", stat.S_IFREG | 0600)
|
|
|
|
|
|
|
|
ldd = subprocess.Popen(["ldd", test_bin], stdout = subprocess.PIPE)
|
|
|
|
xl = re.compile('^(linux-gate.so|linux-vdso(64)?.so|not a dynamic)')
|
|
|
|
|
|
|
|
# This Mayakovsky-style code gets list of libraries a binary
|
|
|
|
# needs minus vdso and gate .so-s
|
|
|
|
libs = map(lambda x: x[1] == '=>' and x[2] or x[0], \
|
|
|
|
map(lambda x: x.split(), \
|
|
|
|
filter(lambda x: not xl.match(x), \
|
|
|
|
map(lambda x: x.strip(), \
|
|
|
|
filter(lambda x: x.startswith('\t'), ldd.stdout.readlines())))))
|
|
|
|
ldd.wait()
|
|
|
|
|
|
|
|
for lib in libs:
|
|
|
|
tlib = self.root + lib
|
|
|
|
if not os.access(tlib, os.F_OK):
|
|
|
|
# Copying should be atomic as tests can be
|
|
|
|
# run in parallel
|
|
|
|
dst = tempfile.mktemp(".tso", "", self.root + os.path.dirname(lib))
|
|
|
|
shutil.copy2(lib, dst)
|
|
|
|
os.rename(dst, tlib)
|
|
|
|
|
|
|
|
def fini(self):
|
|
|
|
subprocess.check_call(["mount", "--make-private", self.root])
|
|
|
|
subprocess.check_call(["umount", "-l", self.root])
|
|
|
|
|
|
|
|
class userns_flavor(ns_flavor):
|
|
|
|
def __init__(self, opts):
|
|
|
|
ns_flavor.__init__(self, opts)
|
|
|
|
self.name = "userns"
|
|
|
|
self.uns = True
|
|
|
|
|
|
|
|
flavors = { 'h': host_flavor, 'ns': ns_flavor, 'uns': userns_flavor }
|
|
|
|
|
|
|
|
#
|
|
|
|
# Helpers
|
|
|
|
#
|
|
|
|
|
|
|
|
def tail(path):
|
|
|
|
p = subprocess.Popen(['tail', '-n1', path],
|
|
|
|
stdout = subprocess.PIPE)
|
|
|
|
return p.stdout.readline()
|
|
|
|
|
|
|
|
def rpidfile(path):
|
|
|
|
return open(path).readline().strip()
|
|
|
|
|
2015-10-08 23:09:32 +03:00
|
|
|
def wait_pid_die(pid, who, tmo = 4):
|
2015-10-05 21:55:00 +03:00
|
|
|
stime = 0.1
|
|
|
|
while stime < tmo:
|
|
|
|
try:
|
|
|
|
os.kill(int(pid), 0)
|
|
|
|
except: # Died
|
|
|
|
break
|
|
|
|
|
|
|
|
print "Wait for %s to die for %f" % (who, stime)
|
|
|
|
time.sleep(stime)
|
|
|
|
stime *= 2
|
|
|
|
else:
|
|
|
|
raise test_fail_exc("%s die" % who)
|
|
|
|
|
|
|
|
def test_flag(tdesc, flag):
|
|
|
|
return flag in tdesc.get('flags', '').split()
|
|
|
|
|
|
|
|
#
|
|
|
|
# Exception thrown when something inside the test goes wrong,
|
|
|
|
# e.g. test doesn't start, criu returns with non zero code or
|
|
|
|
# test checks fail
|
|
|
|
#
|
|
|
|
|
|
|
|
class test_fail_exc:
|
|
|
|
def __init__(self, step):
|
|
|
|
self.step = step
|
|
|
|
|
|
|
|
#
|
|
|
|
# A test from zdtm/ directory.
|
|
|
|
#
|
|
|
|
|
|
|
|
class zdtm_test:
|
|
|
|
def __init__(self, name, desc, flavor):
|
|
|
|
self.__name = name
|
|
|
|
self.__desc = desc
|
|
|
|
self.__make_action('cleanout')
|
|
|
|
self.__pid = 0
|
|
|
|
self.__flavor = flavor
|
2015-10-08 15:22:53 +03:00
|
|
|
self.auto_reap = True
|
2015-10-05 21:55:00 +03:00
|
|
|
|
|
|
|
def __make_action(self, act, env = None, root = None):
|
zdtm.py: Rework tests list generation
Hand-made list of tests is not great. As Christopher suggested we can find
all executables in a directory and treat them as tests. This idea is good,
but requires a little bit more trickery.
First, some executables in directory are not tests, e.g. these are per-test
scripts, which should be skipped. Next, not all tests in zdtm can be right
now run in continuous manner (they fail), but we want to run them manually.
To fix that such tests are marked with 'noauto' flag in the description.
So we have the test test descriptor file, which states in which file to
look for tests (executable) and which of them to exclude. Maybe more, can
be added on demand.
And, finally, the per-test description goes in a ${test}.desc file in native
python eval/repr-able format. If the file is absent a default description
is used.
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-10-08 17:25:00 +03:00
|
|
|
tpath = self.__name + '.' + act
|
2015-10-05 21:55:00 +03:00
|
|
|
s_args = ['make', '--no-print-directory', \
|
|
|
|
'-C', os.path.dirname(tpath), \
|
|
|
|
os.path.basename(tpath)]
|
|
|
|
|
|
|
|
if env:
|
|
|
|
env = dict(os.environ, **env)
|
|
|
|
|
|
|
|
s = subprocess.Popen(s_args, env = env, cwd = root)
|
|
|
|
s.wait()
|
|
|
|
|
|
|
|
def __pidfile(self):
|
|
|
|
if self.__flavor.ns:
|
zdtm.py: Rework tests list generation
Hand-made list of tests is not great. As Christopher suggested we can find
all executables in a directory and treat them as tests. This idea is good,
but requires a little bit more trickery.
First, some executables in directory are not tests, e.g. these are per-test
scripts, which should be skipped. Next, not all tests in zdtm can be right
now run in continuous manner (they fail), but we want to run them manually.
To fix that such tests are marked with 'noauto' flag in the description.
So we have the test test descriptor file, which states in which file to
look for tests (executable) and which of them to exclude. Maybe more, can
be added on demand.
And, finally, the per-test description goes in a ${test}.desc file in native
python eval/repr-able format. If the file is absent a default description
is used.
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-10-08 17:25:00 +03:00
|
|
|
return self.__name + '.init.pid'
|
2015-10-05 21:55:00 +03:00
|
|
|
else:
|
zdtm.py: Rework tests list generation
Hand-made list of tests is not great. As Christopher suggested we can find
all executables in a directory and treat them as tests. This idea is good,
but requires a little bit more trickery.
First, some executables in directory are not tests, e.g. these are per-test
scripts, which should be skipped. Next, not all tests in zdtm can be right
now run in continuous manner (they fail), but we want to run them manually.
To fix that such tests are marked with 'noauto' flag in the description.
So we have the test test descriptor file, which states in which file to
look for tests (executable) and which of them to exclude. Maybe more, can
be added on demand.
And, finally, the per-test description goes in a ${test}.desc file in native
python eval/repr-able format. If the file is absent a default description
is used.
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-10-08 17:25:00 +03:00
|
|
|
return self.__name + '.pid'
|
2015-10-05 21:55:00 +03:00
|
|
|
|
|
|
|
def __wait_task_die(self):
|
|
|
|
wait_pid_die(int(self.__pid), self.__name)
|
|
|
|
|
|
|
|
def start(self):
|
|
|
|
env = {}
|
zdtm.py: Rework tests list generation
Hand-made list of tests is not great. As Christopher suggested we can find
all executables in a directory and treat them as tests. This idea is good,
but requires a little bit more trickery.
First, some executables in directory are not tests, e.g. these are per-test
scripts, which should be skipped. Next, not all tests in zdtm can be right
now run in continuous manner (they fail), but we want to run them manually.
To fix that such tests are marked with 'noauto' flag in the description.
So we have the test test descriptor file, which states in which file to
look for tests (executable) and which of them to exclude. Maybe more, can
be added on demand.
And, finally, the per-test description goes in a ${test}.desc file in native
python eval/repr-able format. If the file is absent a default description
is used.
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-10-08 17:25:00 +03:00
|
|
|
self.__flavor.init(self.__name)
|
2015-10-05 21:55:00 +03:00
|
|
|
|
|
|
|
print "Start test"
|
|
|
|
|
|
|
|
env['ZDTM_THREAD_BOMB'] = "100"
|
|
|
|
if not test_flag(self.__desc, 'suid'):
|
|
|
|
env['ZDTM_UID'] = "18943"
|
|
|
|
env['ZDTM_GID'] = "58467"
|
|
|
|
env['ZDTM_GROUPS'] = "27495 48244"
|
|
|
|
else:
|
|
|
|
print "Test is SUID"
|
|
|
|
|
|
|
|
if self.__flavor.ns:
|
|
|
|
env['ZDTM_NEWNS'] = "1"
|
zdtm.py: Rework tests list generation
Hand-made list of tests is not great. As Christopher suggested we can find
all executables in a directory and treat them as tests. This idea is good,
but requires a little bit more trickery.
First, some executables in directory are not tests, e.g. these are per-test
scripts, which should be skipped. Next, not all tests in zdtm can be right
now run in continuous manner (they fail), but we want to run them manually.
To fix that such tests are marked with 'noauto' flag in the description.
So we have the test test descriptor file, which states in which file to
look for tests (executable) and which of them to exclude. Maybe more, can
be added on demand.
And, finally, the per-test description goes in a ${test}.desc file in native
python eval/repr-able format. If the file is absent a default description
is used.
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-10-08 17:25:00 +03:00
|
|
|
env['ZDTM_PIDFILE'] = os.path.realpath(self.__name + '.init.pid')
|
2015-10-05 21:55:00 +03:00
|
|
|
env['ZDTM_ROOT'] = self.__flavor.root
|
|
|
|
|
|
|
|
if self.__flavor.uns:
|
|
|
|
env['ZDTM_USERNS'] = "1"
|
|
|
|
|
|
|
|
self.__make_action('pid', env, self.__flavor.root)
|
|
|
|
|
|
|
|
try:
|
|
|
|
os.kill(int(self.getpid()), 0)
|
|
|
|
except:
|
|
|
|
raise test_fail_exc("start")
|
|
|
|
|
|
|
|
def kill(self, sig = signal.SIGKILL):
|
|
|
|
if self.__pid:
|
|
|
|
os.kill(int(self.__pid), sig)
|
|
|
|
self.gone(sig == signal.SIGKILL)
|
|
|
|
|
|
|
|
self.__flavor.fini()
|
|
|
|
|
|
|
|
def stop(self):
|
|
|
|
print "Stop test"
|
|
|
|
self.kill(signal.SIGTERM)
|
|
|
|
|
zdtm.py: Rework tests list generation
Hand-made list of tests is not great. As Christopher suggested we can find
all executables in a directory and treat them as tests. This idea is good,
but requires a little bit more trickery.
First, some executables in directory are not tests, e.g. these are per-test
scripts, which should be skipped. Next, not all tests in zdtm can be right
now run in continuous manner (they fail), but we want to run them manually.
To fix that such tests are marked with 'noauto' flag in the description.
So we have the test test descriptor file, which states in which file to
look for tests (executable) and which of them to exclude. Maybe more, can
be added on demand.
And, finally, the per-test description goes in a ${test}.desc file in native
python eval/repr-able format. If the file is absent a default description
is used.
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-10-08 17:25:00 +03:00
|
|
|
res = tail(self.__name + '.out')
|
2015-10-05 21:55:00 +03:00
|
|
|
if not 'PASS' in res.split():
|
|
|
|
raise test_fail_exc("result check")
|
|
|
|
|
|
|
|
def getpid(self):
|
|
|
|
if self.__pid == 0:
|
|
|
|
self.__pid = rpidfile(self.__pidfile())
|
|
|
|
|
|
|
|
return self.__pid
|
|
|
|
|
|
|
|
def getname(self):
|
|
|
|
return self.__name
|
|
|
|
|
2015-10-12 21:53:13 +03:00
|
|
|
def __getcropts(self):
|
2015-10-05 21:55:00 +03:00
|
|
|
opts = self.__desc.get('opts', '').split() + ["--pidfile", os.path.realpath(self.__pidfile())]
|
|
|
|
if self.__flavor.ns:
|
|
|
|
opts += ["--root", self.__flavor.root]
|
2015-10-08 23:09:32 +03:00
|
|
|
if test_flag(self.__desc, 'crlib'):
|
|
|
|
opts += ["-L", os.path.dirname(os.path.realpath(self.__name)) + '/lib']
|
2015-10-05 21:55:00 +03:00
|
|
|
return opts
|
|
|
|
|
2015-10-12 21:53:13 +03:00
|
|
|
def getdopts(self):
|
|
|
|
return self.__getcropts()
|
|
|
|
|
|
|
|
def getropts(self):
|
|
|
|
return self.__getcropts()
|
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
def gone(self, force = True):
|
2015-10-08 15:22:53 +03:00
|
|
|
if not self.auto_reap:
|
|
|
|
pid, status = os.waitpid(int(self.__pid), 0)
|
|
|
|
if pid != int(self.__pid):
|
|
|
|
raise test_fail_exc("kill pid mess")
|
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
self.__wait_task_die()
|
|
|
|
self.__pid = 0
|
|
|
|
if force or self.__flavor.ns:
|
|
|
|
os.unlink(self.__pidfile())
|
|
|
|
|
|
|
|
def print_output(self):
|
2015-10-08 23:07:56 +03:00
|
|
|
if os.access(self.__name + '.out', os.R_OK):
|
|
|
|
print "Test output: " + "=" * 32
|
|
|
|
print open(self.__name + '.out').read()
|
|
|
|
print " <<< " + "=" * 32
|
2015-10-05 21:55:00 +03:00
|
|
|
|
|
|
|
|
2015-10-08 17:28:55 +03:00
|
|
|
test_classes = { 'zdtm': zdtm_test }
|
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
#
|
|
|
|
# CRIU when launched using CLI
|
|
|
|
#
|
|
|
|
|
|
|
|
class criu_cli:
|
2015-10-15 17:27:00 +03:00
|
|
|
def __init__(self, opts):
|
|
|
|
self.__test = None
|
|
|
|
self.__dump_path = None
|
2015-10-05 21:55:00 +03:00
|
|
|
self.__iter = 0
|
|
|
|
self.__page_server = (opts['page_server'] and True or False)
|
2015-10-08 15:22:53 +03:00
|
|
|
self.__restore_sibling = (opts['sibling'] and True or False)
|
2015-10-05 21:55:00 +03:00
|
|
|
|
2015-10-15 17:27:00 +03:00
|
|
|
def set_test(self, test):
|
|
|
|
self.__test = test
|
|
|
|
self.__dump_path = "dump/" + test.getname() + "/" + test.getpid()
|
|
|
|
os.makedirs(self.__dump_path)
|
|
|
|
|
|
|
|
def cleanup(self):
|
|
|
|
if self.__dump_path:
|
|
|
|
print "Remvoing %s" % self.__dump_path
|
|
|
|
shutil.rmtree(self.__dump_path)
|
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
def __ddir(self):
|
|
|
|
return os.path.join(self.__dump_path, "%d" % self.__iter)
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def __criu(action, args):
|
|
|
|
cr = subprocess.Popen(["../criu", action] + args)
|
|
|
|
return cr.wait()
|
|
|
|
|
|
|
|
def __criu_act(self, action, opts, log = None):
|
|
|
|
if not log:
|
|
|
|
log = action + ".log"
|
|
|
|
|
|
|
|
s_args = ["-o", log, "-D", self.__ddir(), "-v4"] + opts
|
|
|
|
|
2015-10-08 15:21:13 +03:00
|
|
|
print "Run CRIU: [" + action + " " + " ".join(s_args) + "]"
|
2015-10-05 21:55:00 +03:00
|
|
|
ret = self.__criu(action, s_args)
|
|
|
|
if ret != 0:
|
2015-10-13 09:51:00 +03:00
|
|
|
grep_errors(os.path.join(self.__ddir(), log))
|
2015-10-05 21:55:00 +03:00
|
|
|
raise test_fail_exc("CRIU %s" % action)
|
|
|
|
|
|
|
|
def dump(self, action, opts = []):
|
|
|
|
self.__iter += 1
|
|
|
|
os.mkdir(self.__ddir())
|
|
|
|
|
|
|
|
a_opts = ["-t", self.__test.getpid()]
|
|
|
|
if self.__iter > 1:
|
|
|
|
a_opts += ["--prev-images-dir", "../%d" % (self.__iter - 1), "--track-mem"]
|
|
|
|
|
|
|
|
if self.__page_server:
|
|
|
|
print "Adding page server"
|
|
|
|
self.__criu_act("page-server", opts = [ "--port", "12345", \
|
|
|
|
"--daemon", "--pidfile", "ps.pid"])
|
|
|
|
a_opts += ["--page-server", "--address", "127.0.0.1", "--port", "12345"]
|
|
|
|
|
2015-10-12 21:53:13 +03:00
|
|
|
a_opts += self.__test.getdopts()
|
|
|
|
|
|
|
|
self.__criu_act(action, opts = a_opts + opts)
|
2015-10-05 21:55:00 +03:00
|
|
|
|
|
|
|
if self.__page_server:
|
|
|
|
wait_pid_die(int(rpidfile(self.__ddir() + "/ps.pid")), "page server")
|
|
|
|
|
|
|
|
def restore(self):
|
2015-10-08 15:22:53 +03:00
|
|
|
r_opts = []
|
|
|
|
if self.__restore_sibling:
|
|
|
|
r_opts = ["--restore-sibling"]
|
|
|
|
self.__test.auto_reap = False
|
2015-10-12 21:53:13 +03:00
|
|
|
r_opts += self.__test.getropts()
|
|
|
|
|
|
|
|
self.__criu_act("restore", opts = r_opts + ["--restore-detached"])
|
2015-10-05 21:55:00 +03:00
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def check(feature):
|
|
|
|
return criu_cli.__criu("check", ["-v0", "--feature", feature]) == 0
|
|
|
|
|
2015-10-08 23:08:18 +03:00
|
|
|
|
|
|
|
def try_run_hook(test, args):
|
|
|
|
hname = test.getname() + '.hook'
|
|
|
|
if os.access(hname, os.X_OK):
|
|
|
|
print "Running %s(%s)" % (hname, ', '.join(args))
|
|
|
|
hook = subprocess.Popen([hname] + args)
|
|
|
|
if hook.wait() != 0:
|
|
|
|
raise test_fail_exc("hook " + " ".join(args))
|
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
#
|
|
|
|
# Main testing entity -- dump (probably with pre-dumps) and restore
|
|
|
|
#
|
|
|
|
|
2015-10-15 17:27:00 +03:00
|
|
|
def cr(cr_api, test, opts):
|
2015-10-05 21:55:00 +03:00
|
|
|
if opts['nocr']:
|
|
|
|
return
|
|
|
|
|
2015-10-15 17:27:00 +03:00
|
|
|
cr_api.set_test(test)
|
2015-10-05 21:55:00 +03:00
|
|
|
|
|
|
|
for i in xrange(0, int(opts['iters'] or 1)):
|
|
|
|
for p in xrange(0, int(opts['pre'] or 0)):
|
|
|
|
cr_api.dump("pre-dump")
|
|
|
|
|
|
|
|
if opts['norst']:
|
|
|
|
cr_api.dump("dump", opts = ["--leave-running"])
|
|
|
|
else:
|
|
|
|
cr_api.dump("dump")
|
|
|
|
test.gone()
|
2015-10-08 23:08:18 +03:00
|
|
|
try_run_hook(test, ["--pre-restore"])
|
2015-10-05 21:55:00 +03:00
|
|
|
cr_api.restore()
|
|
|
|
|
|
|
|
# Additional checks that can be done outside of test process
|
|
|
|
|
|
|
|
def get_maps(test):
|
|
|
|
maps = [[0,0]]
|
|
|
|
last = 0
|
|
|
|
for mp in open("/proc/%s/maps" % test.getpid()).readlines():
|
|
|
|
m = map(lambda x: int('0x' + x, 0), mp.split()[0].split('-'))
|
|
|
|
if maps[last][1] == m[0]:
|
|
|
|
maps[last][1] = m[1]
|
|
|
|
else:
|
|
|
|
maps.append(m)
|
|
|
|
last += 1
|
|
|
|
maps.pop(0)
|
|
|
|
return maps
|
|
|
|
|
|
|
|
def get_fds(test):
|
|
|
|
return map(lambda x: int(x), os.listdir("/proc/%s/fdinfo" % test.getpid()))
|
|
|
|
|
|
|
|
def cmp_lists(m1, m2):
|
|
|
|
return filter(lambda x: x[0] != x[1], zip(m1, m2))
|
|
|
|
|
|
|
|
def get_visible_state(test):
|
|
|
|
fds = get_fds(test)
|
|
|
|
maps = get_maps(test)
|
|
|
|
return (fds, maps)
|
|
|
|
|
|
|
|
def check_visible_state(test, state):
|
|
|
|
new = get_visible_state(test)
|
|
|
|
if cmp_lists(new[0], state[0]):
|
|
|
|
raise test_fail_exc("fds compare")
|
|
|
|
if cmp_lists(new[1], state[1]):
|
|
|
|
raise test_fail_exc("maps compare")
|
|
|
|
|
|
|
|
def do_run_test(tname, tdesc, flavs, opts):
|
2015-10-08 17:28:55 +03:00
|
|
|
tcname = tname.split('/')[0]
|
|
|
|
tclass = test_classes.get(tcname, None)
|
|
|
|
if not tclass:
|
|
|
|
print "Unknown test class %s" % tcname
|
|
|
|
return
|
2015-10-05 21:55:00 +03:00
|
|
|
|
|
|
|
for f in flavs:
|
2015-10-13 11:09:00 +03:00
|
|
|
print
|
|
|
|
print_sep("Run %s in %s" % (tname, f))
|
2015-10-05 21:55:00 +03:00
|
|
|
flav = flavors[f](opts)
|
2015-10-08 17:28:55 +03:00
|
|
|
t = tclass(tname, tdesc, flav)
|
2015-10-15 17:27:00 +03:00
|
|
|
cr_api = criu_cli(opts)
|
2015-10-05 21:55:00 +03:00
|
|
|
|
|
|
|
try:
|
|
|
|
t.start()
|
|
|
|
s = get_visible_state(t)
|
2015-10-15 17:27:00 +03:00
|
|
|
cr(cr_api, t, opts)
|
2015-10-05 21:55:00 +03:00
|
|
|
check_visible_state(t, s)
|
|
|
|
t.stop()
|
2015-10-08 23:08:18 +03:00
|
|
|
try_run_hook(t, ["--clean"])
|
2015-10-05 21:55:00 +03:00
|
|
|
except test_fail_exc as e:
|
2015-10-08 15:20:37 +03:00
|
|
|
print "Test %s FAIL at %s" % (tname, e.step)
|
2015-10-05 21:55:00 +03:00
|
|
|
t.print_output()
|
|
|
|
t.kill()
|
2015-10-15 17:27:00 +03:00
|
|
|
if opts['keep_img'] == 'never':
|
|
|
|
cr_api.cleanup()
|
2015-10-05 21:55:00 +03:00
|
|
|
# This exit does two things -- exits from subprocess and
|
|
|
|
# aborts the main script execution on the 1st error met
|
|
|
|
sys.exit(1)
|
|
|
|
else:
|
2015-10-15 17:27:00 +03:00
|
|
|
if opts['keep_img'] != 'always':
|
|
|
|
cr_api.cleanup()
|
2015-10-13 11:09:00 +03:00
|
|
|
print_sep("Test %s PASS" % tname)
|
2015-10-05 21:55:00 +03:00
|
|
|
|
|
|
|
class launcher:
|
|
|
|
def __init__(self, opts):
|
|
|
|
self.__opts = opts
|
|
|
|
self.__max = int(opts['parallel'] or 0)
|
|
|
|
self.__subs = {}
|
|
|
|
self.__fail = False
|
|
|
|
|
|
|
|
def run_test(self, name, desc, flavor):
|
|
|
|
if self.__max == 0:
|
|
|
|
do_run_test(name, desc, flavor, self.__opts)
|
|
|
|
return
|
|
|
|
|
|
|
|
if len(self.__subs) >= self.__max:
|
|
|
|
self.wait()
|
|
|
|
if self.__fail:
|
|
|
|
raise test_fail_exc('')
|
|
|
|
|
2015-10-08 15:22:53 +03:00
|
|
|
nd = ('nocr', 'norst', 'pre', 'iters', 'page_server', 'sibling')
|
2015-10-05 21:55:00 +03:00
|
|
|
arg = repr((name, desc, flavor, { d: self.__opts[d] for d in nd }))
|
|
|
|
log = name.replace('/', '_') + ".log"
|
2015-10-08 15:19:44 +03:00
|
|
|
sub = subprocess.Popen(["./zdtm_ct", "zdtm.py"], \
|
2015-10-08 17:28:55 +03:00
|
|
|
env = dict(os.environ, CR_CT_TEST_INFO = arg ), \
|
2015-10-05 21:55:00 +03:00
|
|
|
stdout = open(log, "w"), stderr = subprocess.STDOUT)
|
|
|
|
self.__subs[sub.pid] = { 'sub': sub, 'log': log }
|
|
|
|
|
|
|
|
def __wait_one(self, flags):
|
|
|
|
pid, status = os.waitpid(0, flags)
|
|
|
|
if pid != 0:
|
|
|
|
sub = self.__subs.pop(pid)
|
|
|
|
if status != 0:
|
|
|
|
self.__fail = True
|
|
|
|
|
|
|
|
print open(sub['log']).read()
|
|
|
|
os.unlink(sub['log'])
|
|
|
|
return True
|
|
|
|
|
|
|
|
return False
|
|
|
|
|
|
|
|
def wait(self):
|
|
|
|
self.__wait_one(0)
|
|
|
|
while self.__subs:
|
|
|
|
if not self.__wait_one(os.WNOHANG):
|
|
|
|
break
|
|
|
|
|
|
|
|
def finish(self):
|
|
|
|
while self.__subs:
|
|
|
|
self.__wait_one(0)
|
|
|
|
if self.__fail:
|
|
|
|
sys.exit(1)
|
|
|
|
|
zdtm.py: Rework tests list generation
Hand-made list of tests is not great. As Christopher suggested we can find
all executables in a directory and treat them as tests. This idea is good,
but requires a little bit more trickery.
First, some executables in directory are not tests, e.g. these are per-test
scripts, which should be skipped. Next, not all tests in zdtm can be right
now run in continuous manner (they fail), but we want to run them manually.
To fix that such tests are marked with 'noauto' flag in the description.
So we have the test test descriptor file, which states in which file to
look for tests (executable) and which of them to exclude. Maybe more, can
be added on demand.
And, finally, the per-test description goes in a ${test}.desc file in native
python eval/repr-able format. If the file is absent a default description
is used.
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-10-08 17:25:00 +03:00
|
|
|
def all_tests(opts):
|
2015-10-08 17:29:40 +03:00
|
|
|
desc = eval(open(opts['set'] + '.desc').read())
|
zdtm.py: Rework tests list generation
Hand-made list of tests is not great. As Christopher suggested we can find
all executables in a directory and treat them as tests. This idea is good,
but requires a little bit more trickery.
First, some executables in directory are not tests, e.g. these are per-test
scripts, which should be skipped. Next, not all tests in zdtm can be right
now run in continuous manner (they fail), but we want to run them manually.
To fix that such tests are marked with 'noauto' flag in the description.
So we have the test test descriptor file, which states in which file to
look for tests (executable) and which of them to exclude. Maybe more, can
be added on demand.
And, finally, the per-test description goes in a ${test}.desc file in native
python eval/repr-able format. If the file is absent a default description
is used.
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-10-08 17:25:00 +03:00
|
|
|
lst = subprocess.Popen(['find', desc['dir'], '-type', 'f', '-executable' ], \
|
|
|
|
stdout = subprocess.PIPE)
|
|
|
|
excl = map(lambda x: os.path.join(desc['dir'], x), desc['exclude'])
|
|
|
|
tlist = filter(lambda x: \
|
|
|
|
not x.endswith('.checkskip') and \
|
|
|
|
not x.endswith('.hook') and \
|
|
|
|
not x in excl, \
|
|
|
|
map(lambda x: x.strip(), lst.stdout.readlines()) \
|
|
|
|
)
|
|
|
|
lst.wait()
|
|
|
|
return tlist
|
|
|
|
|
|
|
|
|
|
|
|
# Descriptor for abstract test not in list
|
|
|
|
default_test={ }
|
|
|
|
|
|
|
|
|
|
|
|
def get_test_desc(tname):
|
|
|
|
d_path = tname + '.desc'
|
|
|
|
if os.access(d_path, os.F_OK):
|
|
|
|
return eval(open(d_path).read())
|
|
|
|
|
|
|
|
return default_test
|
|
|
|
|
|
|
|
|
|
|
|
def self_checkskip(tname):
|
|
|
|
chs = tname + '.checkskip'
|
|
|
|
if os.access(chs, os.X_OK):
|
|
|
|
ch = subprocess.Popen([chs])
|
|
|
|
return ch.wait() == 0 and False or True
|
|
|
|
|
|
|
|
return False
|
|
|
|
|
2015-10-13 09:51:00 +03:00
|
|
|
def print_sep(title, sep = "="):
|
|
|
|
sep_len = (80 - len(title) - 2) / 2
|
|
|
|
sep = sep * sep_len
|
|
|
|
print "%s %s %s" % (sep, title, sep)
|
|
|
|
|
|
|
|
def grep_errors(fname):
|
|
|
|
print_sep("grep Error")
|
|
|
|
for l in open(fname):
|
|
|
|
if "Error" in l:
|
|
|
|
print l,
|
|
|
|
print_sep("ERROR OVER")
|
zdtm.py: Rework tests list generation
Hand-made list of tests is not great. As Christopher suggested we can find
all executables in a directory and treat them as tests. This idea is good,
but requires a little bit more trickery.
First, some executables in directory are not tests, e.g. these are per-test
scripts, which should be skipped. Next, not all tests in zdtm can be right
now run in continuous manner (they fail), but we want to run them manually.
To fix that such tests are marked with 'noauto' flag in the description.
So we have the test test descriptor file, which states in which file to
look for tests (executable) and which of them to exclude. Maybe more, can
be added on demand.
And, finally, the per-test description goes in a ${test}.desc file in native
python eval/repr-able format. If the file is absent a default description
is used.
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-10-08 17:25:00 +03:00
|
|
|
|
|
|
|
def run_tests(opts):
|
2015-10-05 21:55:00 +03:00
|
|
|
excl = None
|
|
|
|
features = {}
|
|
|
|
|
|
|
|
if opts['all']:
|
zdtm.py: Rework tests list generation
Hand-made list of tests is not great. As Christopher suggested we can find
all executables in a directory and treat them as tests. This idea is good,
but requires a little bit more trickery.
First, some executables in directory are not tests, e.g. these are per-test
scripts, which should be skipped. Next, not all tests in zdtm can be right
now run in continuous manner (they fail), but we want to run them manually.
To fix that such tests are marked with 'noauto' flag in the description.
So we have the test test descriptor file, which states in which file to
look for tests (executable) and which of them to exclude. Maybe more, can
be added on demand.
And, finally, the per-test description goes in a ${test}.desc file in native
python eval/repr-able format. If the file is absent a default description
is used.
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-10-08 17:25:00 +03:00
|
|
|
torun = all_tests(opts)
|
|
|
|
run_all = True
|
2015-10-15 17:25:00 +03:00
|
|
|
elif opts['tests']:
|
|
|
|
r = re.compile(opts['tests'])
|
|
|
|
torun = filter(lambda x: r.match(x), all_tests(opts))
|
|
|
|
run_all = True
|
2015-10-05 21:55:00 +03:00
|
|
|
elif opts['test']:
|
|
|
|
torun = opts['test']
|
zdtm.py: Rework tests list generation
Hand-made list of tests is not great. As Christopher suggested we can find
all executables in a directory and treat them as tests. This idea is good,
but requires a little bit more trickery.
First, some executables in directory are not tests, e.g. these are per-test
scripts, which should be skipped. Next, not all tests in zdtm can be right
now run in continuous manner (they fail), but we want to run them manually.
To fix that such tests are marked with 'noauto' flag in the description.
So we have the test test descriptor file, which states in which file to
look for tests (executable) and which of them to exclude. Maybe more, can
be added on demand.
And, finally, the per-test description goes in a ${test}.desc file in native
python eval/repr-able format. If the file is absent a default description
is used.
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-10-08 17:25:00 +03:00
|
|
|
run_all = False
|
2015-10-05 21:55:00 +03:00
|
|
|
else:
|
|
|
|
print "Specify test with -t <name> or -a"
|
|
|
|
return
|
|
|
|
|
|
|
|
if opts['exclude']:
|
|
|
|
excl = re.compile(".*(" + "|".join(opts['exclude']) + ")")
|
|
|
|
print "Compiled exclusion list"
|
|
|
|
|
|
|
|
l = launcher(opts)
|
|
|
|
try:
|
|
|
|
for t in torun:
|
|
|
|
global arch
|
|
|
|
|
|
|
|
if excl and excl.match(t):
|
|
|
|
print "Skipping %s (exclude)" % t
|
|
|
|
continue
|
|
|
|
|
zdtm.py: Rework tests list generation
Hand-made list of tests is not great. As Christopher suggested we can find
all executables in a directory and treat them as tests. This idea is good,
but requires a little bit more trickery.
First, some executables in directory are not tests, e.g. these are per-test
scripts, which should be skipped. Next, not all tests in zdtm can be right
now run in continuous manner (they fail), but we want to run them manually.
To fix that such tests are marked with 'noauto' flag in the description.
So we have the test test descriptor file, which states in which file to
look for tests (executable) and which of them to exclude. Maybe more, can
be added on demand.
And, finally, the per-test description goes in a ${test}.desc file in native
python eval/repr-able format. If the file is absent a default description
is used.
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-10-08 17:25:00 +03:00
|
|
|
tdesc = get_test_desc(t)
|
2015-10-05 21:55:00 +03:00
|
|
|
if tdesc.get('arch', arch) != arch:
|
|
|
|
print "Skipping %s (arch %s)" % (t, tdesc['arch'])
|
|
|
|
continue
|
|
|
|
|
zdtm.py: Rework tests list generation
Hand-made list of tests is not great. As Christopher suggested we can find
all executables in a directory and treat them as tests. This idea is good,
but requires a little bit more trickery.
First, some executables in directory are not tests, e.g. these are per-test
scripts, which should be skipped. Next, not all tests in zdtm can be right
now run in continuous manner (they fail), but we want to run them manually.
To fix that such tests are marked with 'noauto' flag in the description.
So we have the test test descriptor file, which states in which file to
look for tests (executable) and which of them to exclude. Maybe more, can
be added on demand.
And, finally, the per-test description goes in a ${test}.desc file in native
python eval/repr-able format. If the file is absent a default description
is used.
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-10-08 17:25:00 +03:00
|
|
|
if run_all and test_flag(tdesc, 'noauto'):
|
|
|
|
print "Skipping test %s (manual run only)" % t
|
|
|
|
continue
|
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
feat = tdesc.get('feature', None)
|
|
|
|
if feat:
|
|
|
|
if not features.has_key(feat):
|
|
|
|
print "Checking feature %s" % feat
|
|
|
|
features[feat] = criu_cli.check(feat)
|
|
|
|
|
|
|
|
if not features[feat]:
|
|
|
|
print "Skipping %s (no %s feature)" % (t, feat)
|
|
|
|
continue
|
|
|
|
|
zdtm.py: Rework tests list generation
Hand-made list of tests is not great. As Christopher suggested we can find
all executables in a directory and treat them as tests. This idea is good,
but requires a little bit more trickery.
First, some executables in directory are not tests, e.g. these are per-test
scripts, which should be skipped. Next, not all tests in zdtm can be right
now run in continuous manner (they fail), but we want to run them manually.
To fix that such tests are marked with 'noauto' flag in the description.
So we have the test test descriptor file, which states in which file to
look for tests (executable) and which of them to exclude. Maybe more, can
be added on demand.
And, finally, the per-test description goes in a ${test}.desc file in native
python eval/repr-able format. If the file is absent a default description
is used.
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-10-08 17:25:00 +03:00
|
|
|
if self_checkskip(t):
|
2015-10-05 21:55:00 +03:00
|
|
|
print "Skipping %s (self)" % t
|
|
|
|
continue
|
|
|
|
|
|
|
|
test_flavs = tdesc.get('flavor', 'h ns uns').split()
|
|
|
|
opts_flavs = (opts['flavor'] or 'h,ns,uns').split(',')
|
|
|
|
run_flavs = set(test_flavs) & set(opts_flavs)
|
|
|
|
|
|
|
|
if run_flavs:
|
|
|
|
l.run_test(t, tdesc, run_flavs)
|
|
|
|
finally:
|
|
|
|
l.finish()
|
|
|
|
|
zdtm.py: Rework tests list generation
Hand-made list of tests is not great. As Christopher suggested we can find
all executables in a directory and treat them as tests. This idea is good,
but requires a little bit more trickery.
First, some executables in directory are not tests, e.g. these are per-test
scripts, which should be skipped. Next, not all tests in zdtm can be right
now run in continuous manner (they fail), but we want to run them manually.
To fix that such tests are marked with 'noauto' flag in the description.
So we have the test test descriptor file, which states in which file to
look for tests (executable) and which of them to exclude. Maybe more, can
be added on demand.
And, finally, the per-test description goes in a ${test}.desc file in native
python eval/repr-able format. If the file is absent a default description
is used.
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-10-08 17:25:00 +03:00
|
|
|
|
2015-10-08 17:27:24 +03:00
|
|
|
sti_fmt = "%-40s%-10s%s"
|
|
|
|
|
|
|
|
def show_test_info(t):
|
|
|
|
tdesc = get_test_desc(t)
|
|
|
|
flavs = tdesc.get('flavor', '')
|
|
|
|
return sti_fmt % (t, flavs, tdesc.get('flags', ''))
|
|
|
|
|
|
|
|
|
zdtm.py: Rework tests list generation
Hand-made list of tests is not great. As Christopher suggested we can find
all executables in a directory and treat them as tests. This idea is good,
but requires a little bit more trickery.
First, some executables in directory are not tests, e.g. these are per-test
scripts, which should be skipped. Next, not all tests in zdtm can be right
now run in continuous manner (they fail), but we want to run them manually.
To fix that such tests are marked with 'noauto' flag in the description.
So we have the test test descriptor file, which states in which file to
look for tests (executable) and which of them to exclude. Maybe more, can
be added on demand.
And, finally, the per-test description goes in a ${test}.desc file in native
python eval/repr-able format. If the file is absent a default description
is used.
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-10-08 17:25:00 +03:00
|
|
|
def list_tests(opts):
|
|
|
|
tlist = all_tests(opts)
|
2015-10-08 17:27:24 +03:00
|
|
|
if opts['info']:
|
|
|
|
print sti_fmt % ('Name', 'Flavors', 'Flags')
|
|
|
|
tlist = map(lambda x: show_test_info(x), tlist)
|
zdtm.py: Rework tests list generation
Hand-made list of tests is not great. As Christopher suggested we can find
all executables in a directory and treat them as tests. This idea is good,
but requires a little bit more trickery.
First, some executables in directory are not tests, e.g. these are per-test
scripts, which should be skipped. Next, not all tests in zdtm can be right
now run in continuous manner (they fail), but we want to run them manually.
To fix that such tests are marked with 'noauto' flag in the description.
So we have the test test descriptor file, which states in which file to
look for tests (executable) and which of them to exclude. Maybe more, can
be added on demand.
And, finally, the per-test description goes in a ${test}.desc file in native
python eval/repr-able format. If the file is absent a default description
is used.
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-10-08 17:25:00 +03:00
|
|
|
print '\n'.join(tlist)
|
2015-10-05 21:55:00 +03:00
|
|
|
|
|
|
|
#
|
|
|
|
# main() starts here
|
|
|
|
#
|
|
|
|
|
2015-10-08 17:28:55 +03:00
|
|
|
if os.environ.has_key('CR_CT_TEST_INFO'):
|
2015-10-08 15:18:51 +03:00
|
|
|
# Fork here, since we're new pidns init and are supposed to
|
|
|
|
# collect this namespace's zombies
|
|
|
|
pid = os.fork()
|
|
|
|
if pid == 0:
|
2015-10-08 17:28:55 +03:00
|
|
|
tinfo = eval(os.environ['CR_CT_TEST_INFO'])
|
2015-10-08 15:18:51 +03:00
|
|
|
do_run_test(tinfo[0], tinfo[1], tinfo[2], tinfo[3])
|
|
|
|
else:
|
|
|
|
while True:
|
|
|
|
wpid, status = os.wait()
|
|
|
|
if wpid == pid:
|
|
|
|
break;
|
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
sys.exit(0)
|
|
|
|
|
2015-10-08 17:28:55 +03:00
|
|
|
p = argparse.ArgumentParser("CRIU test suite")
|
2015-10-05 21:55:00 +03:00
|
|
|
p.add_argument("--debug", help = "Print what's being executed", action = 'store_true')
|
2015-10-08 17:29:40 +03:00
|
|
|
p.add_argument("--set", help = "Which set of tests to use", default = 'zdtm')
|
2015-10-05 21:55:00 +03:00
|
|
|
|
|
|
|
sp = p.add_subparsers(help = "Use --help for list of actions")
|
|
|
|
|
|
|
|
rp = sp.add_parser("run", help = "Run test(s)")
|
|
|
|
rp.set_defaults(action = run_tests)
|
|
|
|
rp.add_argument("-a", "--all", action = 'store_true')
|
|
|
|
rp.add_argument("-t", "--test", help = "Test name", action = 'append')
|
2015-10-15 17:25:00 +03:00
|
|
|
rp.add_argument("-T", "--tests", help = "Regexp")
|
2015-10-05 21:55:00 +03:00
|
|
|
rp.add_argument("-f", "--flavor", help = "Flavor to run")
|
|
|
|
rp.add_argument("-x", "--exclude", help = "Exclude tests from --all run", action = 'append')
|
|
|
|
|
2015-10-08 15:22:53 +03:00
|
|
|
rp.add_argument("--sibling", help = "Restore tests as siblings", action = 'store_true')
|
2015-10-05 21:55:00 +03:00
|
|
|
rp.add_argument("--pre", help = "Do some pre-dumps before dump")
|
|
|
|
rp.add_argument("--nocr", help = "Do not CR anything, just check test works", action = 'store_true')
|
|
|
|
rp.add_argument("--norst", help = "Don't restore tasks, leave them running after dump", action = 'store_true')
|
|
|
|
rp.add_argument("--iters", help = "Do CR cycle several times before check")
|
|
|
|
|
|
|
|
rp.add_argument("--page-server", help = "Use page server dump", action = 'store_true')
|
|
|
|
rp.add_argument("-p", "--parallel", help = "Run test in parallel")
|
|
|
|
|
2015-10-15 17:27:00 +03:00
|
|
|
rp.add_argument("-k", "--keep-img", help = "Whether or not to keep images after test",
|
|
|
|
choices = [ 'always', 'never', 'failed' ], default = 'failed')
|
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
lp = sp.add_parser("list", help = "List tests")
|
|
|
|
lp.set_defaults(action = list_tests)
|
2015-10-08 17:27:24 +03:00
|
|
|
lp.add_argument('-i', '--info', help = "Show more info about tests", action = 'store_true')
|
2015-10-05 21:55:00 +03:00
|
|
|
|
|
|
|
opts = vars(p.parse_args())
|
|
|
|
|
|
|
|
if opts['debug']:
|
|
|
|
sys.settrace(traceit)
|
|
|
|
|
zdtm.py: Rework tests list generation
Hand-made list of tests is not great. As Christopher suggested we can find
all executables in a directory and treat them as tests. This idea is good,
but requires a little bit more trickery.
First, some executables in directory are not tests, e.g. these are per-test
scripts, which should be skipped. Next, not all tests in zdtm can be right
now run in continuous manner (they fail), but we want to run them manually.
To fix that such tests are marked with 'noauto' flag in the description.
So we have the test test descriptor file, which states in which file to
look for tests (executable) and which of them to exclude. Maybe more, can
be added on demand.
And, finally, the per-test description goes in a ${test}.desc file in native
python eval/repr-able format. If the file is absent a default description
is used.
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-10-08 17:25:00 +03:00
|
|
|
opts['action'](opts)
|