2015-11-02 22:04:00 +03:00
|
|
|
#!/usr/bin/env python
|
2015-11-03 14:20:34 -07:00
|
|
|
# vim: noet
|
2015-10-05 21:55:00 +03:00
|
|
|
import argparse
|
|
|
|
import os
|
|
|
|
import subprocess
|
|
|
|
import time
|
|
|
|
import tempfile
|
|
|
|
import shutil
|
|
|
|
import re
|
|
|
|
import stat
|
|
|
|
import signal
|
|
|
|
import atexit
|
|
|
|
import sys
|
|
|
|
import linecache
|
2015-10-12 21:54:33 +03:00
|
|
|
import random
|
|
|
|
import string
|
|
|
|
import imp
|
|
|
|
import socket
|
2015-11-12 21:21:00 +03:00
|
|
|
import fcntl
|
2015-10-05 21:55:00 +03:00
|
|
|
|
2015-10-29 14:07:00 +03:00
|
|
|
os.chdir(os.path.dirname(os.path.abspath(__file__)))
|
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
prev_line = None
|
|
|
|
def traceit(f, e, a):
|
|
|
|
if e == "line":
|
|
|
|
lineno = f.f_lineno
|
|
|
|
fil = f.f_globals["__file__"]
|
|
|
|
if fil.endswith("zdtm.py"):
|
|
|
|
global prev_line
|
|
|
|
line = linecache.getline(fil, lineno)
|
|
|
|
if line == prev_line:
|
|
|
|
print " ..."
|
|
|
|
else:
|
|
|
|
prev_line = line
|
|
|
|
print "+%4d: %s" % (lineno, line.rstrip())
|
|
|
|
|
|
|
|
return traceit
|
|
|
|
|
|
|
|
|
|
|
|
# Root dir for ns and uns flavors. All tests
|
|
|
|
# sit in the same dir
|
2015-10-08 17:28:55 +03:00
|
|
|
tests_root = None
|
2015-10-05 21:55:00 +03:00
|
|
|
|
2015-10-08 17:28:55 +03:00
|
|
|
def clean_tests_root():
|
|
|
|
global tests_root
|
|
|
|
if tests_root:
|
|
|
|
os.rmdir(tests_root)
|
2015-10-05 21:55:00 +03:00
|
|
|
|
2015-10-08 17:28:55 +03:00
|
|
|
def make_tests_root():
|
|
|
|
global tests_root
|
|
|
|
if not tests_root:
|
|
|
|
tests_root = tempfile.mkdtemp("", "criu-root-", "/tmp")
|
|
|
|
atexit.register(clean_tests_root)
|
|
|
|
return tests_root
|
2015-10-05 21:55:00 +03:00
|
|
|
|
2015-10-28 16:39:00 +03:00
|
|
|
# Report generation
|
|
|
|
|
|
|
|
report_dir = None
|
|
|
|
|
|
|
|
def init_report(path):
|
|
|
|
global report_dir
|
|
|
|
report_dir = path
|
|
|
|
if not os.access(report_dir, os.F_OK):
|
|
|
|
os.makedirs(report_dir)
|
|
|
|
|
|
|
|
def add_to_report(path, tgt_name):
|
|
|
|
global report_dir
|
|
|
|
if report_dir:
|
|
|
|
tgt_path = os.path.join(report_dir, tgt_name)
|
|
|
|
att = 0
|
|
|
|
while os.access(tgt_path, os.F_OK):
|
|
|
|
tgt_path = os.path.join(report_dir, tgt_name + ".%d" % att)
|
|
|
|
att += 1
|
|
|
|
|
|
|
|
if os.path.isdir(path):
|
|
|
|
shutil.copytree(path, tgt_path)
|
|
|
|
else:
|
|
|
|
shutil.copy2(path, tgt_path)
|
|
|
|
|
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
# Arch we run on
|
|
|
|
arch = os.uname()[4]
|
|
|
|
|
|
|
|
#
|
|
|
|
# Flavors
|
|
|
|
# h -- host, test is run in the same set of namespaces as criu
|
|
|
|
# ns -- namespaces, test is run in itw own set of namespaces
|
|
|
|
# uns -- user namespace, the same as above plus user namespace
|
|
|
|
#
|
|
|
|
|
|
|
|
class host_flavor:
|
|
|
|
def __init__(self, opts):
|
|
|
|
self.name = "host"
|
|
|
|
self.ns = False
|
|
|
|
self.root = None
|
|
|
|
|
2015-12-03 16:08:41 +03:00
|
|
|
def init(self, l_bins, x_bins):
|
2015-10-05 21:55:00 +03:00
|
|
|
pass
|
|
|
|
|
|
|
|
def fini(self):
|
|
|
|
pass
|
|
|
|
|
2015-12-11 19:05:00 +03:00
|
|
|
@staticmethod
|
|
|
|
def clean():
|
|
|
|
pass
|
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
class ns_flavor:
|
2015-12-11 19:05:00 +03:00
|
|
|
__root_dirs = ["/bin", "/sbin", "/etc", "/lib", "/lib64", "/dev", "/dev/pts", "/dev/net", "/tmp", "/usr", "/proc"]
|
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
def __init__(self, opts):
|
|
|
|
self.name = "ns"
|
|
|
|
self.ns = True
|
|
|
|
self.uns = False
|
2015-10-08 17:28:55 +03:00
|
|
|
self.root = make_tests_root()
|
zdtm.py: umount root only if it was mounted
We get the stack trace if a test failed:
Test output: ================================
09:11:34.584: 4: PASS
<<< ================================
Traceback (most recent call last):
File "zdtm.py", line 922, in <module>
do_run_test(tinfo[0], tinfo[1], tinfo[2], tinfo[3])
File "zdtm.py", line 696, in do_run_test
t.kill()
File "zdtm.py", line 302, in kill
self.__flavor.fini()
File "zdtm.py", line 168, in fini
subprocess.check_call(["mount", "--make-private", self.root])
File "/usr/lib64/python2.7/subprocess.py", line 540, in check_call
raise CalledProcessError(retcode, cmd)
subprocess.CalledProcessError: Command '['mount', '--make-private', '/tmp/criu-root-YN1t3X']' returned non-zero exit status 32
umount: /tmp/criu-root-YN1t3X: not mounted
Cc: Tycho Andersen <tycho.andersen@canonical.com>
Signed-off-by: Andrew Vagin <avagin@virtuozzo.com>
Acked-by: Tycho Andersen <tycho.andersen@canonical.com>
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-11-24 12:13:00 +03:00
|
|
|
self.root_mounted = False
|
2015-10-05 21:55:00 +03:00
|
|
|
|
2015-10-26 13:41:32 +03:00
|
|
|
def __copy_one(self, fname):
|
2015-11-12 11:11:00 +03:00
|
|
|
if not os.access(fname, os.F_OK):
|
|
|
|
raise test_fail_exc("Deps check (%s doesn't exist)" % fname)
|
|
|
|
|
2015-10-26 13:41:32 +03:00
|
|
|
tfname = self.root + fname
|
|
|
|
if not os.access(tfname, os.F_OK):
|
|
|
|
# Copying should be atomic as tests can be
|
|
|
|
# run in parallel
|
|
|
|
try:
|
|
|
|
os.makedirs(self.root + os.path.dirname(fname))
|
|
|
|
except:
|
|
|
|
pass
|
|
|
|
dst = tempfile.mktemp(".tso", "", self.root + os.path.dirname(fname))
|
|
|
|
shutil.copy2(fname, dst)
|
|
|
|
os.rename(dst, tfname)
|
|
|
|
|
|
|
|
def __copy_libs(self, binary):
|
|
|
|
ldd = subprocess.Popen(["ldd", binary], stdout = subprocess.PIPE)
|
2015-10-05 21:55:00 +03:00
|
|
|
xl = re.compile('^(linux-gate.so|linux-vdso(64)?.so|not a dynamic)')
|
|
|
|
|
|
|
|
# This Mayakovsky-style code gets list of libraries a binary
|
|
|
|
# needs minus vdso and gate .so-s
|
|
|
|
libs = map(lambda x: x[1] == '=>' and x[2] or x[0], \
|
|
|
|
map(lambda x: x.split(), \
|
|
|
|
filter(lambda x: not xl.match(x), \
|
|
|
|
map(lambda x: x.strip(), \
|
|
|
|
filter(lambda x: x.startswith('\t'), ldd.stdout.readlines())))))
|
|
|
|
ldd.wait()
|
|
|
|
|
|
|
|
for lib in libs:
|
2015-10-26 13:41:32 +03:00
|
|
|
self.__copy_one(lib)
|
|
|
|
|
2015-12-01 13:03:10 +03:00
|
|
|
def __mknod(self, name, rdev = None):
|
|
|
|
name = "/dev/" + name
|
|
|
|
if not rdev:
|
|
|
|
if not os.access(name, os.F_OK):
|
|
|
|
print "Skipping %s at root" % name
|
|
|
|
return
|
|
|
|
else:
|
|
|
|
rdev = os.stat(name).st_rdev
|
|
|
|
|
|
|
|
name = self.root + name
|
|
|
|
os.mknod(name, stat.S_IFCHR, rdev)
|
|
|
|
os.chmod(name, 0666)
|
|
|
|
|
|
|
|
def __construct_root(self):
|
2015-12-11 19:05:00 +03:00
|
|
|
for dir in self.__root_dirs:
|
2015-12-01 13:03:10 +03:00
|
|
|
os.mkdir(self.root + dir)
|
|
|
|
os.chmod(self.root + dir, 0777)
|
|
|
|
|
|
|
|
for ldir in [ "/bin", "/sbin", "/lib", "/lib64" ]:
|
|
|
|
os.symlink(".." + ldir, self.root + "/usr" + ldir)
|
|
|
|
|
|
|
|
self.__mknod("tty", os.makedev(5, 0))
|
|
|
|
self.__mknod("null", os.makedev(1, 3))
|
|
|
|
self.__mknod("net/tun")
|
|
|
|
self.__mknod("rtc")
|
|
|
|
|
2015-12-03 16:08:41 +03:00
|
|
|
def init(self, l_bins, x_bins):
|
2015-10-26 13:41:32 +03:00
|
|
|
subprocess.check_call(["mount", "--make-private", "--bind", ".", self.root])
|
zdtm.py: umount root only if it was mounted
We get the stack trace if a test failed:
Test output: ================================
09:11:34.584: 4: PASS
<<< ================================
Traceback (most recent call last):
File "zdtm.py", line 922, in <module>
do_run_test(tinfo[0], tinfo[1], tinfo[2], tinfo[3])
File "zdtm.py", line 696, in do_run_test
t.kill()
File "zdtm.py", line 302, in kill
self.__flavor.fini()
File "zdtm.py", line 168, in fini
subprocess.check_call(["mount", "--make-private", self.root])
File "/usr/lib64/python2.7/subprocess.py", line 540, in check_call
raise CalledProcessError(retcode, cmd)
subprocess.CalledProcessError: Command '['mount', '--make-private', '/tmp/criu-root-YN1t3X']' returned non-zero exit status 32
umount: /tmp/criu-root-YN1t3X: not mounted
Cc: Tycho Andersen <tycho.andersen@canonical.com>
Signed-off-by: Andrew Vagin <avagin@virtuozzo.com>
Acked-by: Tycho Andersen <tycho.andersen@canonical.com>
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-11-24 12:13:00 +03:00
|
|
|
self.root_mounted = True
|
2015-10-26 13:41:32 +03:00
|
|
|
|
|
|
|
if not os.access(self.root + "/.constructed", os.F_OK):
|
2015-11-12 21:21:00 +03:00
|
|
|
with open(os.path.abspath(__file__)) as o:
|
|
|
|
fcntl.flock(o, fcntl.LOCK_EX)
|
|
|
|
if not os.access(self.root + "/.constructed", os.F_OK):
|
2015-12-03 16:08:41 +03:00
|
|
|
print "Construct root for %s" % l_bins[0]
|
2015-12-01 13:03:10 +03:00
|
|
|
self.__construct_root()
|
2015-11-12 21:21:00 +03:00
|
|
|
os.mknod(self.root + "/.constructed", stat.S_IFREG | 0600)
|
2015-10-26 13:41:32 +03:00
|
|
|
|
2015-12-03 16:08:41 +03:00
|
|
|
for b in l_bins:
|
|
|
|
self.__copy_libs(b)
|
|
|
|
for b in x_bins:
|
|
|
|
self.__copy_one(b)
|
|
|
|
self.__copy_libs(b)
|
2015-10-05 21:55:00 +03:00
|
|
|
|
|
|
|
def fini(self):
|
zdtm.py: umount root only if it was mounted
We get the stack trace if a test failed:
Test output: ================================
09:11:34.584: 4: PASS
<<< ================================
Traceback (most recent call last):
File "zdtm.py", line 922, in <module>
do_run_test(tinfo[0], tinfo[1], tinfo[2], tinfo[3])
File "zdtm.py", line 696, in do_run_test
t.kill()
File "zdtm.py", line 302, in kill
self.__flavor.fini()
File "zdtm.py", line 168, in fini
subprocess.check_call(["mount", "--make-private", self.root])
File "/usr/lib64/python2.7/subprocess.py", line 540, in check_call
raise CalledProcessError(retcode, cmd)
subprocess.CalledProcessError: Command '['mount', '--make-private', '/tmp/criu-root-YN1t3X']' returned non-zero exit status 32
umount: /tmp/criu-root-YN1t3X: not mounted
Cc: Tycho Andersen <tycho.andersen@canonical.com>
Signed-off-by: Andrew Vagin <avagin@virtuozzo.com>
Acked-by: Tycho Andersen <tycho.andersen@canonical.com>
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-11-24 12:13:00 +03:00
|
|
|
if self.root_mounted:
|
|
|
|
subprocess.check_call(["mount", "--make-private", self.root])
|
|
|
|
subprocess.check_call(["umount", "-l", self.root])
|
|
|
|
self.root_mounted = False
|
2015-10-05 21:55:00 +03:00
|
|
|
|
2015-12-11 19:05:00 +03:00
|
|
|
@staticmethod
|
|
|
|
def clean():
|
|
|
|
for d in ns_flavor.__root_dirs:
|
|
|
|
p = './' + d
|
|
|
|
print 'Remove %s' % p
|
|
|
|
if os.access(p, os.F_OK):
|
|
|
|
shutil.rmtree('./' + d)
|
|
|
|
|
|
|
|
if os.access('./.constructed', os.F_OK):
|
|
|
|
os.unlink('./.constructed')
|
|
|
|
|
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
class userns_flavor(ns_flavor):
|
|
|
|
def __init__(self, opts):
|
|
|
|
ns_flavor.__init__(self, opts)
|
|
|
|
self.name = "userns"
|
|
|
|
self.uns = True
|
|
|
|
|
2015-12-03 16:08:41 +03:00
|
|
|
def init(self, l_bins, x_bins):
|
2015-10-28 12:36:40 +04:00
|
|
|
# To be able to create roots_yard in CRIU
|
|
|
|
os.chmod(".", os.stat(".").st_mode | 0077)
|
2015-12-03 16:08:41 +03:00
|
|
|
ns_flavor.init(self, l_bins, x_bins)
|
2015-10-28 12:36:40 +04:00
|
|
|
|
2015-12-11 19:05:00 +03:00
|
|
|
@staticmethod
|
|
|
|
def clean():
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
flavors = { 'h': host_flavor, 'ns': ns_flavor, 'uns': userns_flavor }
|
|
|
|
|
|
|
|
#
|
|
|
|
# Helpers
|
|
|
|
#
|
|
|
|
|
|
|
|
def tail(path):
|
|
|
|
p = subprocess.Popen(['tail', '-n1', path],
|
|
|
|
stdout = subprocess.PIPE)
|
|
|
|
return p.stdout.readline()
|
|
|
|
|
|
|
|
def rpidfile(path):
|
|
|
|
return open(path).readline().strip()
|
|
|
|
|
2015-10-28 10:42:27 +03:00
|
|
|
def wait_pid_die(pid, who, tmo = 30):
|
2015-10-05 21:55:00 +03:00
|
|
|
stime = 0.1
|
|
|
|
while stime < tmo:
|
|
|
|
try:
|
|
|
|
os.kill(int(pid), 0)
|
|
|
|
except: # Died
|
|
|
|
break
|
|
|
|
|
|
|
|
print "Wait for %s to die for %f" % (who, stime)
|
|
|
|
time.sleep(stime)
|
|
|
|
stime *= 2
|
|
|
|
else:
|
|
|
|
raise test_fail_exc("%s die" % who)
|
|
|
|
|
|
|
|
def test_flag(tdesc, flag):
|
|
|
|
return flag in tdesc.get('flags', '').split()
|
|
|
|
|
|
|
|
#
|
|
|
|
# Exception thrown when something inside the test goes wrong,
|
|
|
|
# e.g. test doesn't start, criu returns with non zero code or
|
|
|
|
# test checks fail
|
|
|
|
#
|
|
|
|
|
|
|
|
class test_fail_exc:
|
|
|
|
def __init__(self, step):
|
|
|
|
self.step = step
|
|
|
|
|
2015-10-20 17:10:22 +04:00
|
|
|
class test_fail_expected_exc:
|
2015-10-13 18:06:44 +03:00
|
|
|
def __init__(self, cr_action):
|
|
|
|
self.cr_action = cr_action
|
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
#
|
|
|
|
# A test from zdtm/ directory.
|
|
|
|
#
|
|
|
|
|
|
|
|
class zdtm_test:
|
|
|
|
def __init__(self, name, desc, flavor):
|
|
|
|
self.__name = name
|
|
|
|
self.__desc = desc
|
|
|
|
self.__make_action('cleanout')
|
|
|
|
self.__pid = 0
|
|
|
|
self.__flavor = flavor
|
2015-12-03 16:08:41 +03:00
|
|
|
self._bins = [ name ]
|
2015-12-08 15:19:25 +03:00
|
|
|
self._env = {}
|
2015-12-09 15:57:08 +03:00
|
|
|
self._deps = desc.get('deps', [])
|
2015-10-08 15:22:53 +03:00
|
|
|
self.auto_reap = True
|
2015-10-05 21:55:00 +03:00
|
|
|
|
|
|
|
def __make_action(self, act, env = None, root = None):
|
2015-10-30 18:50:00 +03:00
|
|
|
sys.stdout.flush() # Not to let make's messages appear before ours
|
zdtm.py: Rework tests list generation
Hand-made list of tests is not great. As Christopher suggested we can find
all executables in a directory and treat them as tests. This idea is good,
but requires a little bit more trickery.
First, some executables in directory are not tests, e.g. these are per-test
scripts, which should be skipped. Next, not all tests in zdtm can be right
now run in continuous manner (they fail), but we want to run them manually.
To fix that such tests are marked with 'noauto' flag in the description.
So we have the test test descriptor file, which states in which file to
look for tests (executable) and which of them to exclude. Maybe more, can
be added on demand.
And, finally, the per-test description goes in a ${test}.desc file in native
python eval/repr-able format. If the file is absent a default description
is used.
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-10-08 17:25:00 +03:00
|
|
|
tpath = self.__name + '.' + act
|
2015-10-05 21:55:00 +03:00
|
|
|
s_args = ['make', '--no-print-directory', \
|
|
|
|
'-C', os.path.dirname(tpath), \
|
|
|
|
os.path.basename(tpath)]
|
|
|
|
|
|
|
|
if env:
|
|
|
|
env = dict(os.environ, **env)
|
|
|
|
|
|
|
|
s = subprocess.Popen(s_args, env = env, cwd = root)
|
|
|
|
s.wait()
|
|
|
|
|
|
|
|
def __pidfile(self):
|
|
|
|
if self.__flavor.ns:
|
zdtm.py: Rework tests list generation
Hand-made list of tests is not great. As Christopher suggested we can find
all executables in a directory and treat them as tests. This idea is good,
but requires a little bit more trickery.
First, some executables in directory are not tests, e.g. these are per-test
scripts, which should be skipped. Next, not all tests in zdtm can be right
now run in continuous manner (they fail), but we want to run them manually.
To fix that such tests are marked with 'noauto' flag in the description.
So we have the test test descriptor file, which states in which file to
look for tests (executable) and which of them to exclude. Maybe more, can
be added on demand.
And, finally, the per-test description goes in a ${test}.desc file in native
python eval/repr-able format. If the file is absent a default description
is used.
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-10-08 17:25:00 +03:00
|
|
|
return self.__name + '.init.pid'
|
2015-10-05 21:55:00 +03:00
|
|
|
else:
|
zdtm.py: Rework tests list generation
Hand-made list of tests is not great. As Christopher suggested we can find
all executables in a directory and treat them as tests. This idea is good,
but requires a little bit more trickery.
First, some executables in directory are not tests, e.g. these are per-test
scripts, which should be skipped. Next, not all tests in zdtm can be right
now run in continuous manner (they fail), but we want to run them manually.
To fix that such tests are marked with 'noauto' flag in the description.
So we have the test test descriptor file, which states in which file to
look for tests (executable) and which of them to exclude. Maybe more, can
be added on demand.
And, finally, the per-test description goes in a ${test}.desc file in native
python eval/repr-able format. If the file is absent a default description
is used.
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-10-08 17:25:00 +03:00
|
|
|
return self.__name + '.pid'
|
2015-10-05 21:55:00 +03:00
|
|
|
|
|
|
|
def __wait_task_die(self):
|
|
|
|
wait_pid_die(int(self.__pid), self.__name)
|
|
|
|
|
2015-12-11 17:39:05 +03:00
|
|
|
def __add_wperms(self):
|
|
|
|
# Add write perms for .out and .pid files
|
|
|
|
for b in self._bins:
|
|
|
|
p = os.path.dirname(b)
|
|
|
|
os.chmod(p, os.stat(p).st_mode | 0222)
|
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
def start(self):
|
2015-12-09 15:57:08 +03:00
|
|
|
self.__flavor.init(self._bins, self._deps)
|
2015-10-05 21:55:00 +03:00
|
|
|
|
|
|
|
print "Start test"
|
|
|
|
|
2015-12-03 16:08:41 +03:00
|
|
|
env = self._env
|
2015-11-11 19:23:00 +03:00
|
|
|
env['ZDTM_THREAD_BOMB'] = "5"
|
2015-10-05 21:55:00 +03:00
|
|
|
if not test_flag(self.__desc, 'suid'):
|
|
|
|
env['ZDTM_UID'] = "18943"
|
|
|
|
env['ZDTM_GID'] = "58467"
|
|
|
|
env['ZDTM_GROUPS'] = "27495 48244"
|
2015-12-11 17:39:05 +03:00
|
|
|
self.__add_wperms()
|
2015-10-05 21:55:00 +03:00
|
|
|
else:
|
|
|
|
print "Test is SUID"
|
|
|
|
|
|
|
|
if self.__flavor.ns:
|
|
|
|
env['ZDTM_NEWNS'] = "1"
|
zdtm.py: Rework tests list generation
Hand-made list of tests is not great. As Christopher suggested we can find
all executables in a directory and treat them as tests. This idea is good,
but requires a little bit more trickery.
First, some executables in directory are not tests, e.g. these are per-test
scripts, which should be skipped. Next, not all tests in zdtm can be right
now run in continuous manner (they fail), but we want to run them manually.
To fix that such tests are marked with 'noauto' flag in the description.
So we have the test test descriptor file, which states in which file to
look for tests (executable) and which of them to exclude. Maybe more, can
be added on demand.
And, finally, the per-test description goes in a ${test}.desc file in native
python eval/repr-able format. If the file is absent a default description
is used.
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-10-08 17:25:00 +03:00
|
|
|
env['ZDTM_PIDFILE'] = os.path.realpath(self.__name + '.init.pid')
|
2015-10-05 21:55:00 +03:00
|
|
|
env['ZDTM_ROOT'] = self.__flavor.root
|
|
|
|
|
|
|
|
if self.__flavor.uns:
|
|
|
|
env['ZDTM_USERNS'] = "1"
|
2015-12-11 17:39:05 +03:00
|
|
|
self.__add_wperms()
|
2015-10-05 21:55:00 +03:00
|
|
|
|
|
|
|
self.__make_action('pid', env, self.__flavor.root)
|
|
|
|
|
|
|
|
try:
|
|
|
|
os.kill(int(self.getpid()), 0)
|
|
|
|
except:
|
|
|
|
raise test_fail_exc("start")
|
|
|
|
|
|
|
|
def kill(self, sig = signal.SIGKILL):
|
|
|
|
if self.__pid:
|
|
|
|
os.kill(int(self.__pid), sig)
|
|
|
|
self.gone(sig == signal.SIGKILL)
|
|
|
|
|
|
|
|
self.__flavor.fini()
|
|
|
|
|
|
|
|
def stop(self):
|
2015-10-20 15:52:59 +04:00
|
|
|
self.getpid() # Read the pid from pidfile back
|
2015-10-05 21:55:00 +03:00
|
|
|
self.kill(signal.SIGTERM)
|
|
|
|
|
zdtm.py: Rework tests list generation
Hand-made list of tests is not great. As Christopher suggested we can find
all executables in a directory and treat them as tests. This idea is good,
but requires a little bit more trickery.
First, some executables in directory are not tests, e.g. these are per-test
scripts, which should be skipped. Next, not all tests in zdtm can be right
now run in continuous manner (they fail), but we want to run them manually.
To fix that such tests are marked with 'noauto' flag in the description.
So we have the test test descriptor file, which states in which file to
look for tests (executable) and which of them to exclude. Maybe more, can
be added on demand.
And, finally, the per-test description goes in a ${test}.desc file in native
python eval/repr-able format. If the file is absent a default description
is used.
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-10-08 17:25:00 +03:00
|
|
|
res = tail(self.__name + '.out')
|
2015-10-05 21:55:00 +03:00
|
|
|
if not 'PASS' in res.split():
|
|
|
|
raise test_fail_exc("result check")
|
|
|
|
|
|
|
|
def getpid(self):
|
|
|
|
if self.__pid == 0:
|
|
|
|
self.__pid = rpidfile(self.__pidfile())
|
|
|
|
|
|
|
|
return self.__pid
|
|
|
|
|
|
|
|
def getname(self):
|
|
|
|
return self.__name
|
|
|
|
|
2015-10-12 21:53:13 +03:00
|
|
|
def __getcropts(self):
|
2015-10-05 21:55:00 +03:00
|
|
|
opts = self.__desc.get('opts', '').split() + ["--pidfile", os.path.realpath(self.__pidfile())]
|
|
|
|
if self.__flavor.ns:
|
|
|
|
opts += ["--root", self.__flavor.root]
|
2015-10-08 23:09:32 +03:00
|
|
|
if test_flag(self.__desc, 'crlib'):
|
|
|
|
opts += ["-L", os.path.dirname(os.path.realpath(self.__name)) + '/lib']
|
2015-10-05 21:55:00 +03:00
|
|
|
return opts
|
|
|
|
|
2015-10-12 21:53:13 +03:00
|
|
|
def getdopts(self):
|
|
|
|
return self.__getcropts()
|
|
|
|
|
|
|
|
def getropts(self):
|
|
|
|
return self.__getcropts()
|
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
def gone(self, force = True):
|
2015-10-08 15:22:53 +03:00
|
|
|
if not self.auto_reap:
|
|
|
|
pid, status = os.waitpid(int(self.__pid), 0)
|
|
|
|
if pid != int(self.__pid):
|
|
|
|
raise test_fail_exc("kill pid mess")
|
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
self.__wait_task_die()
|
|
|
|
self.__pid = 0
|
|
|
|
if force or self.__flavor.ns:
|
|
|
|
os.unlink(self.__pidfile())
|
|
|
|
|
|
|
|
def print_output(self):
|
2015-10-08 23:07:56 +03:00
|
|
|
if os.access(self.__name + '.out', os.R_OK):
|
|
|
|
print "Test output: " + "=" * 32
|
|
|
|
print open(self.__name + '.out').read()
|
|
|
|
print " <<< " + "=" * 32
|
2015-10-05 21:55:00 +03:00
|
|
|
|
2015-10-20 15:52:59 +04:00
|
|
|
def static(self):
|
|
|
|
return self.__name.split('/')[2] == 'static'
|
|
|
|
|
2015-10-20 17:10:22 +04:00
|
|
|
def blocking(self):
|
|
|
|
return test_flag(self.__desc, 'crfail')
|
|
|
|
|
2015-10-29 14:15:00 +03:00
|
|
|
@staticmethod
|
|
|
|
def available():
|
|
|
|
if not os.access("zdtm_ct", os.X_OK):
|
|
|
|
subprocess.check_call(["make", "zdtm_ct"])
|
|
|
|
if not os.access("zdtm/lib/libzdtmtst.a", os.F_OK):
|
|
|
|
subprocess.check_call(["make", "-C", "zdtm/"])
|
2015-10-30 15:55:41 +04:00
|
|
|
subprocess.check_call(["flock", "zdtm_mount_cgroups", "./zdtm_mount_cgroups"])
|
2015-10-20 15:52:59 +04:00
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
|
2015-10-12 21:54:33 +03:00
|
|
|
class inhfd_test:
|
|
|
|
def __init__(self, name, desc, flavor):
|
|
|
|
self.__name = os.path.basename(name)
|
|
|
|
print "Load %s" % name
|
|
|
|
self.__fdtyp = imp.load_source(self.__name, name)
|
|
|
|
self.__my_file = None
|
|
|
|
self.__peer_pid = 0
|
|
|
|
self.__peer_file = None
|
|
|
|
self.__peer_file_name = None
|
|
|
|
self.__dump_opts = None
|
|
|
|
|
|
|
|
def start(self):
|
|
|
|
self.__message = "".join([random.choice(string.ascii_letters) for _ in range(16)])
|
|
|
|
(self.__my_file, peer_file) = self.__fdtyp.create_fds()
|
|
|
|
|
|
|
|
# Check FDs returned for inter-connection
|
|
|
|
self.__my_file.write(self.__message)
|
|
|
|
self.__my_file.flush()
|
|
|
|
if peer_file.read(16) != self.__message:
|
|
|
|
raise test_fail_exc("FDs screwup")
|
|
|
|
|
|
|
|
start_pipe = os.pipe()
|
|
|
|
self.__peer_pid = os.fork()
|
|
|
|
if self.__peer_pid == 0:
|
|
|
|
os.setsid()
|
|
|
|
os.close(0)
|
|
|
|
os.close(1)
|
|
|
|
os.close(2)
|
|
|
|
self.__my_file.close()
|
|
|
|
os.close(start_pipe[0])
|
|
|
|
os.close(start_pipe[1])
|
|
|
|
try:
|
|
|
|
data = peer_file.read(16)
|
|
|
|
except:
|
|
|
|
sys.exit(1)
|
|
|
|
|
|
|
|
sys.exit(data == self.__message and 42 or 2)
|
|
|
|
|
|
|
|
os.close(start_pipe[1])
|
|
|
|
os.read(start_pipe[0], 12)
|
|
|
|
os.close(start_pipe[0])
|
|
|
|
|
|
|
|
self.__peer_file_name = self.__fdtyp.filename(peer_file)
|
|
|
|
self.__dump_opts = self.__fdtyp.dump_opts(peer_file)
|
|
|
|
|
|
|
|
def stop(self):
|
|
|
|
self.__my_file.write(self.__message)
|
|
|
|
self.__my_file.flush()
|
|
|
|
pid, status = os.waitpid(self.__peer_pid, 0)
|
|
|
|
if not os.WIFEXITED(status) or os.WEXITSTATUS(status) != 42:
|
|
|
|
raise test_fail_exc("test failed with %d" % status)
|
|
|
|
|
|
|
|
def kill(self):
|
|
|
|
if self.__peer_pid:
|
|
|
|
os.kill(self.__peer_pid, signal.SIGKILL)
|
|
|
|
|
|
|
|
def getname(self):
|
|
|
|
return self.__name
|
|
|
|
|
|
|
|
def getpid(self):
|
|
|
|
return "%s" % self.__peer_pid
|
|
|
|
|
|
|
|
def gone(self, force = True):
|
|
|
|
os.waitpid(self.__peer_pid, 0)
|
|
|
|
wait_pid_die(self.__peer_pid, self.__name)
|
|
|
|
self.__my_file = None
|
|
|
|
self.__peer_file = None
|
|
|
|
|
|
|
|
def getdopts(self):
|
|
|
|
return self.__dump_opts
|
|
|
|
|
|
|
|
def getropts(self):
|
|
|
|
(self.__my_file, self.__peer_file) = self.__fdtyp.create_fds()
|
|
|
|
return ["--restore-sibling", "--inherit-fd", "fd[%d]:%s" % (self.__peer_file.fileno(), self.__peer_file_name)]
|
|
|
|
|
|
|
|
def print_output(self):
|
|
|
|
pass
|
|
|
|
|
2015-10-20 15:52:59 +04:00
|
|
|
def static(self):
|
|
|
|
return True
|
|
|
|
|
2015-10-20 17:10:22 +04:00
|
|
|
def blocking(self):
|
|
|
|
return False
|
|
|
|
|
2015-10-29 14:15:00 +03:00
|
|
|
@staticmethod
|
|
|
|
def available():
|
|
|
|
pass
|
|
|
|
|
2015-10-12 21:54:33 +03:00
|
|
|
|
2015-12-03 16:09:04 +03:00
|
|
|
class groups_test(zdtm_test):
|
|
|
|
def __init__(self, name, desc, flavor):
|
|
|
|
zdtm_test.__init__(self, 'zdtm/lib/groups', desc, flavor)
|
|
|
|
if flavor.ns:
|
|
|
|
self.__real_name = name
|
|
|
|
self.__subs = map(lambda x: x.strip(), open(name).readlines())
|
|
|
|
print "Subs:\n%s" % '\n'.join(self.__subs)
|
|
|
|
else:
|
|
|
|
self.__real_name = ''
|
|
|
|
self.__subs = []
|
|
|
|
|
|
|
|
self._bins += self.__subs
|
2015-12-09 15:57:08 +03:00
|
|
|
self._deps += get_test_desc('zdtm/lib/groups')['deps']
|
2015-12-03 16:09:04 +03:00
|
|
|
self._env = { 'ZDTM_TESTS': self.__real_name }
|
|
|
|
|
|
|
|
def __get_start_cmd(self, name):
|
|
|
|
tdir = os.path.dirname(name)
|
|
|
|
tname = os.path.basename(name)
|
|
|
|
|
|
|
|
s_args = ['make', '--no-print-directory', '-C', tdir]
|
|
|
|
subprocess.check_call(s_args + [ tname + '.cleanout' ])
|
|
|
|
s = subprocess.Popen(s_args + [ '--dry-run', tname + '.pid' ], stdout = subprocess.PIPE)
|
|
|
|
cmd = s.stdout.readlines().pop().strip()
|
|
|
|
s.wait()
|
|
|
|
|
|
|
|
return 'cd /' + tdir + ' && ' + cmd
|
|
|
|
|
|
|
|
def start(self):
|
|
|
|
if (self.__subs):
|
|
|
|
with open(self.__real_name + '.start', 'w') as f:
|
|
|
|
for test in self.__subs:
|
|
|
|
cmd = self.__get_start_cmd(test)
|
|
|
|
f.write(cmd + '\n')
|
|
|
|
|
|
|
|
with open(self.__real_name + '.stop', 'w') as f:
|
|
|
|
for test in self.__subs:
|
|
|
|
f.write('kill -TERM `cat /%s.pid`\n' % test)
|
|
|
|
|
|
|
|
zdtm_test.start(self)
|
|
|
|
|
|
|
|
def stop(self):
|
|
|
|
zdtm_test.stop(self)
|
|
|
|
|
|
|
|
for test in self.__subs:
|
|
|
|
res = tail(test + '.out')
|
|
|
|
if not 'PASS' in res.split():
|
|
|
|
raise test_fail_exc("sub %s result check" % test)
|
|
|
|
|
|
|
|
|
|
|
|
test_classes = { 'zdtm': zdtm_test, 'inhfd': inhfd_test, 'groups': groups_test }
|
2015-10-08 17:28:55 +03:00
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
#
|
|
|
|
# CRIU when launched using CLI
|
|
|
|
#
|
|
|
|
|
2015-10-29 14:15:00 +03:00
|
|
|
criu_bin = "../criu"
|
2015-10-05 21:55:00 +03:00
|
|
|
class criu_cli:
|
2015-10-15 17:27:00 +03:00
|
|
|
def __init__(self, opts):
|
|
|
|
self.__test = None
|
|
|
|
self.__dump_path = None
|
2015-10-05 21:55:00 +03:00
|
|
|
self.__iter = 0
|
2015-10-27 16:29:00 +03:00
|
|
|
self.__prev_dump_iter = None
|
2015-10-05 21:55:00 +03:00
|
|
|
self.__page_server = (opts['page_server'] and True or False)
|
2015-10-08 15:22:53 +03:00
|
|
|
self.__restore_sibling = (opts['sibling'] and True or False)
|
2015-10-13 18:06:44 +03:00
|
|
|
self.__fault = (opts['fault'])
|
2015-11-09 21:27:00 +03:00
|
|
|
self.__sat = (opts['sat'] and True or False)
|
2015-11-26 21:55:00 +03:00
|
|
|
self.__dedup = (opts['dedup'] and True or False)
|
2015-10-05 21:55:00 +03:00
|
|
|
|
2015-10-28 16:39:00 +03:00
|
|
|
def logs(self):
|
|
|
|
return self.__dump_path
|
|
|
|
|
2015-10-15 17:27:00 +03:00
|
|
|
def set_test(self, test):
|
|
|
|
self.__test = test
|
|
|
|
self.__dump_path = "dump/" + test.getname() + "/" + test.getpid()
|
2015-11-03 14:20:36 -07:00
|
|
|
if os.path.exists(self.__dump_path):
|
|
|
|
for i in xrange(100):
|
|
|
|
newpath = self.__dump_path + "." + str(i)
|
|
|
|
if not os.path.exists(newpath):
|
|
|
|
os.rename(self.__dump_path, newpath)
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
raise test_fail_exc("couldn't find dump dir %s" % self.__dump_path)
|
|
|
|
|
2015-10-15 17:27:00 +03:00
|
|
|
os.makedirs(self.__dump_path)
|
|
|
|
|
|
|
|
def cleanup(self):
|
|
|
|
if self.__dump_path:
|
2015-11-03 14:20:35 -07:00
|
|
|
print "Removing %s" % self.__dump_path
|
2015-10-15 17:27:00 +03:00
|
|
|
shutil.rmtree(self.__dump_path)
|
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
def __ddir(self):
|
|
|
|
return os.path.join(self.__dump_path, "%d" % self.__iter)
|
|
|
|
|
|
|
|
@staticmethod
|
2015-11-09 21:27:00 +03:00
|
|
|
def __criu(action, args, fault = None, strace = []):
|
2015-10-13 18:06:44 +03:00
|
|
|
env = None
|
|
|
|
if fault:
|
|
|
|
print "Forcing %s fault" % fault
|
|
|
|
env = dict(os.environ, CRIU_FAULT = fault)
|
2015-11-09 21:27:00 +03:00
|
|
|
cr = subprocess.Popen(strace + [criu_bin, action] + args, env = env)
|
2015-10-05 21:55:00 +03:00
|
|
|
return cr.wait()
|
|
|
|
|
|
|
|
def __criu_act(self, action, opts, log = None):
|
|
|
|
if not log:
|
|
|
|
log = action + ".log"
|
|
|
|
|
|
|
|
s_args = ["-o", log, "-D", self.__ddir(), "-v4"] + opts
|
|
|
|
|
2015-10-30 18:51:00 +03:00
|
|
|
with open(os.path.join(self.__ddir(), action + '.cropt'), 'w') as f:
|
|
|
|
f.write(' '.join(s_args) + '\n')
|
|
|
|
print "Run criu " + action
|
|
|
|
|
2015-11-09 21:27:00 +03:00
|
|
|
strace = []
|
|
|
|
if self.__sat:
|
2015-12-09 19:04:58 +03:00
|
|
|
fname = os.path.join(self.__ddir(), action + '.strace')
|
|
|
|
print_fname(fname, 'strace')
|
|
|
|
strace = ["strace", "-o", fname, '-T']
|
2015-11-09 21:27:00 +03:00
|
|
|
if action == 'restore':
|
|
|
|
strace += [ '-f' ]
|
|
|
|
s_args += [ '--action-script', os.getcwd() + '/../scripts/fake-restore.sh' ]
|
|
|
|
|
|
|
|
ret = self.__criu(action, s_args, self.__fault, strace)
|
2015-11-10 16:01:07 +03:00
|
|
|
grep_errors(os.path.join(self.__ddir(), log))
|
2015-10-05 21:55:00 +03:00
|
|
|
if ret != 0:
|
2015-11-09 21:27:00 +03:00
|
|
|
if self.__fault or self.__test.blocking() or (self.__sat and action == 'restore'):
|
2015-10-20 17:10:22 +04:00
|
|
|
raise test_fail_expected_exc(action)
|
2015-10-13 18:06:44 +03:00
|
|
|
else:
|
|
|
|
raise test_fail_exc("CRIU %s" % action)
|
2015-10-05 21:55:00 +03:00
|
|
|
|
|
|
|
def dump(self, action, opts = []):
|
|
|
|
self.__iter += 1
|
|
|
|
os.mkdir(self.__ddir())
|
|
|
|
|
|
|
|
a_opts = ["-t", self.__test.getpid()]
|
2015-10-27 16:29:00 +03:00
|
|
|
if self.__prev_dump_iter:
|
|
|
|
a_opts += ["--prev-images-dir", "../%d" % self.__prev_dump_iter, "--track-mem"]
|
|
|
|
self.__prev_dump_iter = self.__iter
|
2015-10-05 21:55:00 +03:00
|
|
|
|
|
|
|
if self.__page_server:
|
|
|
|
print "Adding page server"
|
2015-11-26 21:55:00 +03:00
|
|
|
|
|
|
|
ps_opts = [ "--port", "12345", "--daemon", "--pidfile", "ps.pid" ]
|
|
|
|
if self.__dedup:
|
|
|
|
ps_opts += [ "--auto-dedup" ]
|
|
|
|
|
|
|
|
self.__criu_act("page-server", opts = ps_opts)
|
2015-10-05 21:55:00 +03:00
|
|
|
a_opts += ["--page-server", "--address", "127.0.0.1", "--port", "12345"]
|
|
|
|
|
2015-10-12 21:53:13 +03:00
|
|
|
a_opts += self.__test.getdopts()
|
|
|
|
|
2015-11-26 21:55:00 +03:00
|
|
|
if self.__dedup:
|
|
|
|
a_opts += [ "--auto-dedup" ]
|
|
|
|
|
2015-10-12 21:53:13 +03:00
|
|
|
self.__criu_act(action, opts = a_opts + opts)
|
2015-10-05 21:55:00 +03:00
|
|
|
|
|
|
|
if self.__page_server:
|
|
|
|
wait_pid_die(int(rpidfile(self.__ddir() + "/ps.pid")), "page server")
|
|
|
|
|
|
|
|
def restore(self):
|
2015-10-08 15:22:53 +03:00
|
|
|
r_opts = []
|
|
|
|
if self.__restore_sibling:
|
|
|
|
r_opts = ["--restore-sibling"]
|
|
|
|
self.__test.auto_reap = False
|
2015-10-12 21:53:13 +03:00
|
|
|
r_opts += self.__test.getropts()
|
|
|
|
|
2015-10-27 16:29:00 +03:00
|
|
|
self.__prev_dump_iter = None
|
2015-10-12 21:53:13 +03:00
|
|
|
self.__criu_act("restore", opts = r_opts + ["--restore-detached"])
|
2015-10-05 21:55:00 +03:00
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def check(feature):
|
|
|
|
return criu_cli.__criu("check", ["-v0", "--feature", feature]) == 0
|
|
|
|
|
2015-10-29 14:15:00 +03:00
|
|
|
@staticmethod
|
|
|
|
def available():
|
|
|
|
if not os.access(criu_bin, os.X_OK):
|
|
|
|
print "CRIU binary not built"
|
|
|
|
sys.exit(1)
|
|
|
|
|
2015-10-08 23:08:18 +03:00
|
|
|
|
|
|
|
def try_run_hook(test, args):
|
|
|
|
hname = test.getname() + '.hook'
|
|
|
|
if os.access(hname, os.X_OK):
|
|
|
|
print "Running %s(%s)" % (hname, ', '.join(args))
|
|
|
|
hook = subprocess.Popen([hname] + args)
|
|
|
|
if hook.wait() != 0:
|
|
|
|
raise test_fail_exc("hook " + " ".join(args))
|
|
|
|
|
2015-12-09 19:05:57 +03:00
|
|
|
#
|
|
|
|
# Step by step execution
|
|
|
|
#
|
|
|
|
|
|
|
|
do_sbs = False
|
|
|
|
|
|
|
|
def init_sbs():
|
|
|
|
if sys.stdout.isatty():
|
|
|
|
global do_sbs
|
|
|
|
do_sbs = True
|
|
|
|
else:
|
|
|
|
print "Can't do step-by-step in this runtime"
|
|
|
|
|
|
|
|
def sbs(what):
|
|
|
|
if do_sbs:
|
|
|
|
raw_input("Pause at %s. Press any key to continue." % what)
|
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
#
|
|
|
|
# Main testing entity -- dump (probably with pre-dumps) and restore
|
|
|
|
#
|
|
|
|
|
2015-10-27 16:29:00 +03:00
|
|
|
def iter_parm(opt, dflt):
|
|
|
|
x = ((opt or str(dflt)) + ":0").split(':')
|
|
|
|
return (xrange(0, int(x[0])), float(x[1]))
|
|
|
|
|
2015-10-15 17:27:00 +03:00
|
|
|
def cr(cr_api, test, opts):
|
2015-10-05 21:55:00 +03:00
|
|
|
if opts['nocr']:
|
|
|
|
return
|
|
|
|
|
2015-10-15 17:27:00 +03:00
|
|
|
cr_api.set_test(test)
|
2015-10-05 21:55:00 +03:00
|
|
|
|
2015-10-27 16:29:00 +03:00
|
|
|
iters = iter_parm(opts['iters'], 1)
|
|
|
|
for i in iters[0]:
|
|
|
|
pres = iter_parm(opts['pre'], 0)
|
|
|
|
for p in pres[0]:
|
2015-11-12 18:11:38 +04:00
|
|
|
if opts['snaps']:
|
|
|
|
cr_api.dump("dump", opts = ["--leave-running"])
|
|
|
|
else:
|
|
|
|
cr_api.dump("pre-dump")
|
2015-10-27 16:29:00 +03:00
|
|
|
time.sleep(pres[1])
|
2015-10-05 21:55:00 +03:00
|
|
|
|
2015-12-09 19:05:57 +03:00
|
|
|
sbs('pre-dump')
|
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
if opts['norst']:
|
|
|
|
cr_api.dump("dump", opts = ["--leave-running"])
|
|
|
|
else:
|
|
|
|
cr_api.dump("dump")
|
|
|
|
test.gone()
|
2015-12-09 19:05:57 +03:00
|
|
|
sbs('pre-restore')
|
2015-10-08 23:08:18 +03:00
|
|
|
try_run_hook(test, ["--pre-restore"])
|
2015-10-05 21:55:00 +03:00
|
|
|
cr_api.restore()
|
2015-12-09 19:05:57 +03:00
|
|
|
sbs('post-restore')
|
2015-10-05 21:55:00 +03:00
|
|
|
|
2015-10-27 16:29:00 +03:00
|
|
|
time.sleep(iters[1])
|
|
|
|
|
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
# Additional checks that can be done outside of test process
|
|
|
|
|
|
|
|
def get_maps(test):
|
|
|
|
maps = [[0,0]]
|
|
|
|
last = 0
|
|
|
|
for mp in open("/proc/%s/maps" % test.getpid()).readlines():
|
|
|
|
m = map(lambda x: int('0x' + x, 0), mp.split()[0].split('-'))
|
|
|
|
if maps[last][1] == m[0]:
|
|
|
|
maps[last][1] = m[1]
|
|
|
|
else:
|
|
|
|
maps.append(m)
|
|
|
|
last += 1
|
|
|
|
maps.pop(0)
|
|
|
|
return maps
|
|
|
|
|
|
|
|
def get_fds(test):
|
|
|
|
return map(lambda x: int(x), os.listdir("/proc/%s/fdinfo" % test.getpid()))
|
|
|
|
|
|
|
|
def cmp_lists(m1, m2):
|
2015-11-03 10:38:48 +03:00
|
|
|
return len(m1) != len(m2) or filter(lambda x: x[0] != x[1], zip(m1, m2))
|
2015-10-05 21:55:00 +03:00
|
|
|
|
|
|
|
def get_visible_state(test):
|
2015-10-20 15:52:59 +04:00
|
|
|
if test.static():
|
|
|
|
fds = get_fds(test)
|
|
|
|
maps = get_maps(test)
|
|
|
|
return (fds, maps)
|
|
|
|
else:
|
|
|
|
return ([], [])
|
2015-10-05 21:55:00 +03:00
|
|
|
|
|
|
|
def check_visible_state(test, state):
|
|
|
|
new = get_visible_state(test)
|
|
|
|
if cmp_lists(new[0], state[0]):
|
|
|
|
raise test_fail_exc("fds compare")
|
|
|
|
if cmp_lists(new[1], state[1]):
|
2015-11-03 10:40:19 +03:00
|
|
|
s_new = set(map(lambda x: '%x-%x' % (x[0], x[1]), new[1]))
|
|
|
|
s_old = set(map(lambda x: '%x-%x' % (x[0], x[1]), state[1]))
|
2015-11-02 22:19:02 +04:00
|
|
|
|
|
|
|
print "Old maps lost:"
|
|
|
|
print s_old - s_new
|
|
|
|
print "New maps appeared:"
|
|
|
|
print s_new - s_old
|
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
raise test_fail_exc("maps compare")
|
|
|
|
|
|
|
|
def do_run_test(tname, tdesc, flavs, opts):
|
2015-10-08 17:28:55 +03:00
|
|
|
tcname = tname.split('/')[0]
|
|
|
|
tclass = test_classes.get(tcname, None)
|
|
|
|
if not tclass:
|
|
|
|
print "Unknown test class %s" % tcname
|
|
|
|
return
|
2015-10-05 21:55:00 +03:00
|
|
|
|
2015-10-28 16:39:00 +03:00
|
|
|
if opts['report']:
|
|
|
|
init_report(opts['report'])
|
2015-12-09 19:05:57 +03:00
|
|
|
if opts['sbs']:
|
|
|
|
init_sbs()
|
2015-10-28 16:39:00 +03:00
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
for f in flavs:
|
2015-10-13 11:09:00 +03:00
|
|
|
print
|
|
|
|
print_sep("Run %s in %s" % (tname, f))
|
2015-10-05 21:55:00 +03:00
|
|
|
flav = flavors[f](opts)
|
2015-10-08 17:28:55 +03:00
|
|
|
t = tclass(tname, tdesc, flav)
|
2015-10-15 17:27:00 +03:00
|
|
|
cr_api = criu_cli(opts)
|
2015-10-05 21:55:00 +03:00
|
|
|
|
|
|
|
try:
|
|
|
|
t.start()
|
|
|
|
s = get_visible_state(t)
|
2015-10-13 18:06:44 +03:00
|
|
|
try:
|
|
|
|
cr(cr_api, t, opts)
|
2015-10-20 17:10:22 +04:00
|
|
|
except test_fail_expected_exc as e:
|
2015-10-13 18:06:44 +03:00
|
|
|
if e.cr_action == "dump":
|
|
|
|
t.stop()
|
|
|
|
try_run_hook(t, ["--fault", e.cr_action])
|
|
|
|
else:
|
|
|
|
check_visible_state(t, s)
|
|
|
|
t.stop()
|
|
|
|
try_run_hook(t, ["--clean"])
|
2015-10-05 21:55:00 +03:00
|
|
|
except test_fail_exc as e:
|
2015-10-28 17:55:30 +03:00
|
|
|
print_sep("Test %s FAIL at %s" % (tname, e.step), '#')
|
2015-10-05 21:55:00 +03:00
|
|
|
t.print_output()
|
|
|
|
t.kill()
|
2015-10-28 16:39:00 +03:00
|
|
|
add_to_report(cr_api.logs(), "cr_logs")
|
2015-10-15 17:27:00 +03:00
|
|
|
if opts['keep_img'] == 'never':
|
|
|
|
cr_api.cleanup()
|
2015-10-05 21:55:00 +03:00
|
|
|
# This exit does two things -- exits from subprocess and
|
|
|
|
# aborts the main script execution on the 1st error met
|
|
|
|
sys.exit(1)
|
|
|
|
else:
|
2015-10-15 17:27:00 +03:00
|
|
|
if opts['keep_img'] != 'always':
|
|
|
|
cr_api.cleanup()
|
2015-10-13 11:09:00 +03:00
|
|
|
print_sep("Test %s PASS" % tname)
|
2015-10-05 21:55:00 +03:00
|
|
|
|
|
|
|
class launcher:
|
2015-10-30 18:50:00 +03:00
|
|
|
def __init__(self, opts, nr_tests):
|
2015-10-05 21:55:00 +03:00
|
|
|
self.__opts = opts
|
2015-10-30 18:50:00 +03:00
|
|
|
self.__total = nr_tests
|
|
|
|
self.__nr = 0
|
2015-10-28 16:39:00 +03:00
|
|
|
self.__max = int(opts['parallel'] or 1)
|
2015-10-05 21:55:00 +03:00
|
|
|
self.__subs = {}
|
|
|
|
self.__fail = False
|
|
|
|
|
2015-10-30 18:50:00 +03:00
|
|
|
def __show_progress(self):
|
|
|
|
perc = self.__nr * 16 / self.__total
|
|
|
|
print "=== Run %d/%d %s" % (self.__nr, self.__total, '=' * perc + '-' * (16 - perc))
|
|
|
|
|
2015-12-04 17:06:37 +03:00
|
|
|
def skip(self, name, reason):
|
|
|
|
print "Skipping %s (%s)" % (name, reason)
|
|
|
|
self.__nr += 1
|
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
def run_test(self, name, desc, flavor):
|
2015-11-12 20:58:00 +03:00
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
if len(self.__subs) >= self.__max:
|
|
|
|
self.wait()
|
2015-11-12 20:58:00 +03:00
|
|
|
|
|
|
|
if test_flag(desc, 'excl'):
|
|
|
|
self.wait_all()
|
2015-10-05 21:55:00 +03:00
|
|
|
|
2015-10-30 18:50:00 +03:00
|
|
|
self.__nr += 1
|
|
|
|
self.__show_progress()
|
|
|
|
|
2015-11-26 21:55:00 +03:00
|
|
|
nd = ('nocr', 'norst', 'pre', 'iters', 'page_server', 'sibling', \
|
2015-12-09 19:05:57 +03:00
|
|
|
'fault', 'keep_img', 'report', 'snaps', 'sat', 'dedup', 'sbs')
|
2015-10-05 21:55:00 +03:00
|
|
|
arg = repr((name, desc, flavor, { d: self.__opts[d] for d in nd }))
|
2015-12-09 19:04:11 +03:00
|
|
|
|
|
|
|
if self.__max > 1 and self.__total > 1:
|
|
|
|
logf = name.replace('/', '_') + ".log"
|
|
|
|
log = open(logf, "w")
|
|
|
|
else:
|
|
|
|
logf = None
|
|
|
|
log = None
|
|
|
|
|
2015-10-08 15:19:44 +03:00
|
|
|
sub = subprocess.Popen(["./zdtm_ct", "zdtm.py"], \
|
2015-10-08 17:28:55 +03:00
|
|
|
env = dict(os.environ, CR_CT_TEST_INFO = arg ), \
|
2015-12-09 19:04:11 +03:00
|
|
|
stdout = log, stderr = subprocess.STDOUT)
|
|
|
|
self.__subs[sub.pid] = { 'sub': sub, 'log': logf }
|
2015-10-05 21:55:00 +03:00
|
|
|
|
2015-11-12 20:58:00 +03:00
|
|
|
if test_flag(desc, 'excl'):
|
|
|
|
self.wait()
|
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
def __wait_one(self, flags):
|
|
|
|
pid, status = os.waitpid(0, flags)
|
|
|
|
if pid != 0:
|
|
|
|
sub = self.__subs.pop(pid)
|
|
|
|
if status != 0:
|
|
|
|
self.__fail = True
|
2015-10-28 16:39:00 +03:00
|
|
|
add_to_report(sub['log'], "output")
|
2015-10-05 21:55:00 +03:00
|
|
|
|
2015-12-09 19:04:11 +03:00
|
|
|
if sub['log']:
|
|
|
|
print open(sub['log']).read()
|
|
|
|
os.unlink(sub['log'])
|
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
return True
|
|
|
|
|
|
|
|
return False
|
|
|
|
|
2015-11-12 20:58:00 +03:00
|
|
|
def __wait_all(self):
|
|
|
|
while self.__subs:
|
|
|
|
self.__wait_one(0)
|
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
def wait(self):
|
|
|
|
self.__wait_one(0)
|
|
|
|
while self.__subs:
|
|
|
|
if not self.__wait_one(os.WNOHANG):
|
|
|
|
break
|
2015-11-12 20:58:00 +03:00
|
|
|
if self.__fail:
|
|
|
|
raise test_fail_exc('')
|
|
|
|
|
|
|
|
def wait_all(self):
|
|
|
|
self.__wait_all()
|
|
|
|
if self.__fail:
|
|
|
|
raise test_fail_exc('')
|
2015-10-05 21:55:00 +03:00
|
|
|
|
|
|
|
def finish(self):
|
2015-11-12 20:58:00 +03:00
|
|
|
self.__wait_all()
|
2015-10-05 21:55:00 +03:00
|
|
|
if self.__fail:
|
2015-10-30 17:12:53 +03:00
|
|
|
print_sep("FAIL", "#")
|
2015-10-05 21:55:00 +03:00
|
|
|
sys.exit(1)
|
|
|
|
|
zdtm.py: Rework tests list generation
Hand-made list of tests is not great. As Christopher suggested we can find
all executables in a directory and treat them as tests. This idea is good,
but requires a little bit more trickery.
First, some executables in directory are not tests, e.g. these are per-test
scripts, which should be skipped. Next, not all tests in zdtm can be right
now run in continuous manner (they fail), but we want to run them manually.
To fix that such tests are marked with 'noauto' flag in the description.
So we have the test test descriptor file, which states in which file to
look for tests (executable) and which of them to exclude. Maybe more, can
be added on demand.
And, finally, the per-test description goes in a ${test}.desc file in native
python eval/repr-able format. If the file is absent a default description
is used.
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-10-08 17:25:00 +03:00
|
|
|
def all_tests(opts):
|
2015-10-08 17:29:40 +03:00
|
|
|
desc = eval(open(opts['set'] + '.desc').read())
|
zdtm.py: Rework tests list generation
Hand-made list of tests is not great. As Christopher suggested we can find
all executables in a directory and treat them as tests. This idea is good,
but requires a little bit more trickery.
First, some executables in directory are not tests, e.g. these are per-test
scripts, which should be skipped. Next, not all tests in zdtm can be right
now run in continuous manner (they fail), but we want to run them manually.
To fix that such tests are marked with 'noauto' flag in the description.
So we have the test test descriptor file, which states in which file to
look for tests (executable) and which of them to exclude. Maybe more, can
be added on demand.
And, finally, the per-test description goes in a ${test}.desc file in native
python eval/repr-able format. If the file is absent a default description
is used.
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-10-08 17:25:00 +03:00
|
|
|
lst = subprocess.Popen(['find', desc['dir'], '-type', 'f', '-executable' ], \
|
|
|
|
stdout = subprocess.PIPE)
|
|
|
|
excl = map(lambda x: os.path.join(desc['dir'], x), desc['exclude'])
|
|
|
|
tlist = filter(lambda x: \
|
|
|
|
not x.endswith('.checkskip') and \
|
|
|
|
not x.endswith('.hook') and \
|
|
|
|
not x in excl, \
|
|
|
|
map(lambda x: x.strip(), lst.stdout.readlines()) \
|
|
|
|
)
|
|
|
|
lst.wait()
|
|
|
|
return tlist
|
|
|
|
|
|
|
|
|
|
|
|
# Descriptor for abstract test not in list
|
|
|
|
default_test={ }
|
|
|
|
|
|
|
|
|
|
|
|
def get_test_desc(tname):
|
|
|
|
d_path = tname + '.desc'
|
|
|
|
if os.access(d_path, os.F_OK):
|
|
|
|
return eval(open(d_path).read())
|
|
|
|
|
|
|
|
return default_test
|
|
|
|
|
|
|
|
|
|
|
|
def self_checkskip(tname):
|
|
|
|
chs = tname + '.checkskip'
|
|
|
|
if os.access(chs, os.X_OK):
|
|
|
|
ch = subprocess.Popen([chs])
|
|
|
|
return ch.wait() == 0 and False or True
|
|
|
|
|
|
|
|
return False
|
|
|
|
|
2015-12-09 19:04:58 +03:00
|
|
|
def print_fname(fname, typ):
|
|
|
|
print "=[%s]=> %s" % (typ, fname)
|
|
|
|
|
|
|
|
|
2015-11-10 16:01:08 +03:00
|
|
|
def print_sep(title, sep = "=", width = 80):
|
|
|
|
print (" " + title + " ").center(width, sep)
|
2015-10-13 09:51:00 +03:00
|
|
|
|
|
|
|
def grep_errors(fname):
|
2015-11-10 16:01:07 +03:00
|
|
|
first = True
|
2015-10-13 09:51:00 +03:00
|
|
|
for l in open(fname):
|
|
|
|
if "Error" in l:
|
2015-11-10 16:01:07 +03:00
|
|
|
if first:
|
2015-12-09 19:04:58 +03:00
|
|
|
print_fname(fname, 'log')
|
2015-11-10 16:01:08 +03:00
|
|
|
print_sep("grep Error", "-", 60)
|
2015-11-10 16:01:07 +03:00
|
|
|
first = False
|
2015-10-13 09:51:00 +03:00
|
|
|
print l,
|
2015-11-10 16:01:07 +03:00
|
|
|
if not first:
|
2015-11-10 16:01:08 +03:00
|
|
|
print_sep("ERROR OVER", "-", 60)
|
zdtm.py: Rework tests list generation
Hand-made list of tests is not great. As Christopher suggested we can find
all executables in a directory and treat them as tests. This idea is good,
but requires a little bit more trickery.
First, some executables in directory are not tests, e.g. these are per-test
scripts, which should be skipped. Next, not all tests in zdtm can be right
now run in continuous manner (they fail), but we want to run them manually.
To fix that such tests are marked with 'noauto' flag in the description.
So we have the test test descriptor file, which states in which file to
look for tests (executable) and which of them to exclude. Maybe more, can
be added on demand.
And, finally, the per-test description goes in a ${test}.desc file in native
python eval/repr-able format. If the file is absent a default description
is used.
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-10-08 17:25:00 +03:00
|
|
|
|
|
|
|
def run_tests(opts):
|
2015-10-05 21:55:00 +03:00
|
|
|
excl = None
|
|
|
|
features = {}
|
|
|
|
|
|
|
|
if opts['all']:
|
zdtm.py: Rework tests list generation
Hand-made list of tests is not great. As Christopher suggested we can find
all executables in a directory and treat them as tests. This idea is good,
but requires a little bit more trickery.
First, some executables in directory are not tests, e.g. these are per-test
scripts, which should be skipped. Next, not all tests in zdtm can be right
now run in continuous manner (they fail), but we want to run them manually.
To fix that such tests are marked with 'noauto' flag in the description.
So we have the test test descriptor file, which states in which file to
look for tests (executable) and which of them to exclude. Maybe more, can
be added on demand.
And, finally, the per-test description goes in a ${test}.desc file in native
python eval/repr-able format. If the file is absent a default description
is used.
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-10-08 17:25:00 +03:00
|
|
|
torun = all_tests(opts)
|
|
|
|
run_all = True
|
2015-10-15 17:25:00 +03:00
|
|
|
elif opts['tests']:
|
|
|
|
r = re.compile(opts['tests'])
|
|
|
|
torun = filter(lambda x: r.match(x), all_tests(opts))
|
|
|
|
run_all = True
|
2015-10-05 21:55:00 +03:00
|
|
|
elif opts['test']:
|
|
|
|
torun = opts['test']
|
zdtm.py: Rework tests list generation
Hand-made list of tests is not great. As Christopher suggested we can find
all executables in a directory and treat them as tests. This idea is good,
but requires a little bit more trickery.
First, some executables in directory are not tests, e.g. these are per-test
scripts, which should be skipped. Next, not all tests in zdtm can be right
now run in continuous manner (they fail), but we want to run them manually.
To fix that such tests are marked with 'noauto' flag in the description.
So we have the test test descriptor file, which states in which file to
look for tests (executable) and which of them to exclude. Maybe more, can
be added on demand.
And, finally, the per-test description goes in a ${test}.desc file in native
python eval/repr-able format. If the file is absent a default description
is used.
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-10-08 17:25:00 +03:00
|
|
|
run_all = False
|
2015-10-05 21:55:00 +03:00
|
|
|
else:
|
|
|
|
print "Specify test with -t <name> or -a"
|
|
|
|
return
|
|
|
|
|
|
|
|
if opts['exclude']:
|
|
|
|
excl = re.compile(".*(" + "|".join(opts['exclude']) + ")")
|
|
|
|
print "Compiled exclusion list"
|
|
|
|
|
2015-10-28 16:39:00 +03:00
|
|
|
if opts['report']:
|
|
|
|
init_report(opts['report'])
|
|
|
|
|
2015-10-30 18:50:00 +03:00
|
|
|
l = launcher(opts, len(torun))
|
2015-10-05 21:55:00 +03:00
|
|
|
try:
|
|
|
|
for t in torun:
|
|
|
|
global arch
|
|
|
|
|
|
|
|
if excl and excl.match(t):
|
2015-12-04 17:06:37 +03:00
|
|
|
l.skip(t, "exclude")
|
2015-10-05 21:55:00 +03:00
|
|
|
continue
|
|
|
|
|
zdtm.py: Rework tests list generation
Hand-made list of tests is not great. As Christopher suggested we can find
all executables in a directory and treat them as tests. This idea is good,
but requires a little bit more trickery.
First, some executables in directory are not tests, e.g. these are per-test
scripts, which should be skipped. Next, not all tests in zdtm can be right
now run in continuous manner (they fail), but we want to run them manually.
To fix that such tests are marked with 'noauto' flag in the description.
So we have the test test descriptor file, which states in which file to
look for tests (executable) and which of them to exclude. Maybe more, can
be added on demand.
And, finally, the per-test description goes in a ${test}.desc file in native
python eval/repr-able format. If the file is absent a default description
is used.
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-10-08 17:25:00 +03:00
|
|
|
tdesc = get_test_desc(t)
|
2015-10-05 21:55:00 +03:00
|
|
|
if tdesc.get('arch', arch) != arch:
|
2015-12-04 17:06:37 +03:00
|
|
|
l.skip(t, "arch %s" % tdesc['arch'])
|
2015-10-05 21:55:00 +03:00
|
|
|
continue
|
|
|
|
|
zdtm.py: Rework tests list generation
Hand-made list of tests is not great. As Christopher suggested we can find
all executables in a directory and treat them as tests. This idea is good,
but requires a little bit more trickery.
First, some executables in directory are not tests, e.g. these are per-test
scripts, which should be skipped. Next, not all tests in zdtm can be right
now run in continuous manner (they fail), but we want to run them manually.
To fix that such tests are marked with 'noauto' flag in the description.
So we have the test test descriptor file, which states in which file to
look for tests (executable) and which of them to exclude. Maybe more, can
be added on demand.
And, finally, the per-test description goes in a ${test}.desc file in native
python eval/repr-able format. If the file is absent a default description
is used.
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-10-08 17:25:00 +03:00
|
|
|
if run_all and test_flag(tdesc, 'noauto'):
|
2015-12-04 17:06:37 +03:00
|
|
|
l.skip(t, "manual run only")
|
zdtm.py: Rework tests list generation
Hand-made list of tests is not great. As Christopher suggested we can find
all executables in a directory and treat them as tests. This idea is good,
but requires a little bit more trickery.
First, some executables in directory are not tests, e.g. these are per-test
scripts, which should be skipped. Next, not all tests in zdtm can be right
now run in continuous manner (they fail), but we want to run them manually.
To fix that such tests are marked with 'noauto' flag in the description.
So we have the test test descriptor file, which states in which file to
look for tests (executable) and which of them to exclude. Maybe more, can
be added on demand.
And, finally, the per-test description goes in a ${test}.desc file in native
python eval/repr-able format. If the file is absent a default description
is used.
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-10-08 17:25:00 +03:00
|
|
|
continue
|
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
feat = tdesc.get('feature', None)
|
|
|
|
if feat:
|
|
|
|
if not features.has_key(feat):
|
|
|
|
print "Checking feature %s" % feat
|
|
|
|
features[feat] = criu_cli.check(feat)
|
|
|
|
|
|
|
|
if not features[feat]:
|
2015-12-04 17:06:37 +03:00
|
|
|
l.skip(t, "no %s feature" % feat)
|
2015-10-05 21:55:00 +03:00
|
|
|
continue
|
|
|
|
|
zdtm.py: Rework tests list generation
Hand-made list of tests is not great. As Christopher suggested we can find
all executables in a directory and treat them as tests. This idea is good,
but requires a little bit more trickery.
First, some executables in directory are not tests, e.g. these are per-test
scripts, which should be skipped. Next, not all tests in zdtm can be right
now run in continuous manner (they fail), but we want to run them manually.
To fix that such tests are marked with 'noauto' flag in the description.
So we have the test test descriptor file, which states in which file to
look for tests (executable) and which of them to exclude. Maybe more, can
be added on demand.
And, finally, the per-test description goes in a ${test}.desc file in native
python eval/repr-able format. If the file is absent a default description
is used.
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-10-08 17:25:00 +03:00
|
|
|
if self_checkskip(t):
|
2015-12-04 17:06:37 +03:00
|
|
|
l.skip(t, "self")
|
2015-10-05 21:55:00 +03:00
|
|
|
continue
|
|
|
|
|
|
|
|
test_flavs = tdesc.get('flavor', 'h ns uns').split()
|
|
|
|
opts_flavs = (opts['flavor'] or 'h,ns,uns').split(',')
|
2015-12-04 17:06:49 +03:00
|
|
|
if opts_flavs != ['best']:
|
|
|
|
run_flavs = set(test_flavs) & set(opts_flavs)
|
|
|
|
else:
|
|
|
|
run_flavs = set([test_flavs.pop()])
|
2015-12-07 08:17:00 +03:00
|
|
|
if not criu_cli.check("userns"):
|
2015-12-09 15:47:00 +03:00
|
|
|
try:
|
|
|
|
run_flavs.remove("uns")
|
2015-12-09 20:06:00 +03:00
|
|
|
except KeyError:
|
2015-12-09 15:47:00 +03:00
|
|
|
# don't worry if uns isn't in run_flavs
|
|
|
|
pass
|
2015-12-07 08:17:00 +03:00
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
|
|
|
|
if run_flavs:
|
|
|
|
l.run_test(t, tdesc, run_flavs)
|
2015-12-04 17:06:37 +03:00
|
|
|
else:
|
|
|
|
l.skip(t, "no flavors")
|
2015-10-05 21:55:00 +03:00
|
|
|
finally:
|
|
|
|
l.finish()
|
|
|
|
|
zdtm.py: Rework tests list generation
Hand-made list of tests is not great. As Christopher suggested we can find
all executables in a directory and treat them as tests. This idea is good,
but requires a little bit more trickery.
First, some executables in directory are not tests, e.g. these are per-test
scripts, which should be skipped. Next, not all tests in zdtm can be right
now run in continuous manner (they fail), but we want to run them manually.
To fix that such tests are marked with 'noauto' flag in the description.
So we have the test test descriptor file, which states in which file to
look for tests (executable) and which of them to exclude. Maybe more, can
be added on demand.
And, finally, the per-test description goes in a ${test}.desc file in native
python eval/repr-able format. If the file is absent a default description
is used.
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-10-08 17:25:00 +03:00
|
|
|
|
2015-10-08 17:27:24 +03:00
|
|
|
sti_fmt = "%-40s%-10s%s"
|
|
|
|
|
|
|
|
def show_test_info(t):
|
|
|
|
tdesc = get_test_desc(t)
|
|
|
|
flavs = tdesc.get('flavor', '')
|
|
|
|
return sti_fmt % (t, flavs, tdesc.get('flags', ''))
|
|
|
|
|
|
|
|
|
zdtm.py: Rework tests list generation
Hand-made list of tests is not great. As Christopher suggested we can find
all executables in a directory and treat them as tests. This idea is good,
but requires a little bit more trickery.
First, some executables in directory are not tests, e.g. these are per-test
scripts, which should be skipped. Next, not all tests in zdtm can be right
now run in continuous manner (they fail), but we want to run them manually.
To fix that such tests are marked with 'noauto' flag in the description.
So we have the test test descriptor file, which states in which file to
look for tests (executable) and which of them to exclude. Maybe more, can
be added on demand.
And, finally, the per-test description goes in a ${test}.desc file in native
python eval/repr-able format. If the file is absent a default description
is used.
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-10-08 17:25:00 +03:00
|
|
|
def list_tests(opts):
|
|
|
|
tlist = all_tests(opts)
|
2015-10-08 17:27:24 +03:00
|
|
|
if opts['info']:
|
|
|
|
print sti_fmt % ('Name', 'Flavors', 'Flags')
|
|
|
|
tlist = map(lambda x: show_test_info(x), tlist)
|
zdtm.py: Rework tests list generation
Hand-made list of tests is not great. As Christopher suggested we can find
all executables in a directory and treat them as tests. This idea is good,
but requires a little bit more trickery.
First, some executables in directory are not tests, e.g. these are per-test
scripts, which should be skipped. Next, not all tests in zdtm can be right
now run in continuous manner (they fail), but we want to run them manually.
To fix that such tests are marked with 'noauto' flag in the description.
So we have the test test descriptor file, which states in which file to
look for tests (executable) and which of them to exclude. Maybe more, can
be added on demand.
And, finally, the per-test description goes in a ${test}.desc file in native
python eval/repr-able format. If the file is absent a default description
is used.
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-10-08 17:25:00 +03:00
|
|
|
print '\n'.join(tlist)
|
2015-10-05 21:55:00 +03:00
|
|
|
|
2015-12-03 16:08:53 +03:00
|
|
|
|
|
|
|
class group:
|
|
|
|
def __init__(self, tname, tdesc):
|
|
|
|
self.__tests = [ tname ]
|
|
|
|
self.__desc = tdesc
|
|
|
|
self.__deps = set()
|
|
|
|
|
|
|
|
def __is_mergeable_desc(self, desc):
|
|
|
|
# For now make it full match
|
|
|
|
if self.__desc.get('flags') != desc.get('flags'):
|
|
|
|
return False
|
|
|
|
if self.__desc.get('flavor') != desc.get('flavor'):
|
|
|
|
return False
|
|
|
|
if self.__desc.get('arch') != desc.get('arch'):
|
|
|
|
return False
|
|
|
|
if self.__desc.get('opts') != desc.get('opts'):
|
|
|
|
return False
|
|
|
|
if self.__desc.get('feature') != desc.get('feature'):
|
|
|
|
return False
|
|
|
|
return True
|
|
|
|
|
|
|
|
def merge(self, tname, tdesc):
|
|
|
|
if not self.__is_mergeable_desc(tdesc):
|
|
|
|
return False
|
|
|
|
|
|
|
|
self.__deps |= set(tdesc.get('deps', []))
|
|
|
|
self.__tests.append(tname)
|
|
|
|
return True
|
|
|
|
|
|
|
|
def size(self):
|
|
|
|
return len(self.__tests)
|
|
|
|
|
|
|
|
def dump(self, fname):
|
|
|
|
f = open(fname, "w")
|
|
|
|
for t in self.__tests:
|
|
|
|
f.write(t + '\n')
|
|
|
|
f.close()
|
|
|
|
os.chmod(fname, 0700)
|
|
|
|
|
|
|
|
if len(self.__desc) or len(self.__deps):
|
|
|
|
f = open(fname + '.desc', "w")
|
|
|
|
if len(self.__deps):
|
|
|
|
self.__desc['deps'] = list(self.__deps)
|
|
|
|
f.write(repr(self.__desc))
|
|
|
|
f.close()
|
|
|
|
|
|
|
|
|
|
|
|
def group_tests(opts):
|
|
|
|
excl = None
|
|
|
|
groups = []
|
|
|
|
pend_groups = []
|
|
|
|
maxs = int(opts['max_size'])
|
|
|
|
|
|
|
|
if not os.access("groups", os.F_OK):
|
|
|
|
os.mkdir("groups")
|
|
|
|
|
|
|
|
tlist = all_tests(opts)
|
|
|
|
random.shuffle(tlist)
|
|
|
|
if opts['exclude']:
|
|
|
|
excl = re.compile(".*(" + "|".join(opts['exclude']) + ")")
|
|
|
|
print "Compiled exclusion list"
|
|
|
|
|
|
|
|
for t in tlist:
|
|
|
|
if excl and excl.match(t):
|
|
|
|
continue
|
|
|
|
|
|
|
|
td = get_test_desc(t)
|
|
|
|
|
|
|
|
for g in pend_groups:
|
|
|
|
if g.merge(t, td):
|
|
|
|
if g.size() == maxs:
|
|
|
|
pend_groups.remove(g)
|
|
|
|
groups.append(g)
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
g = group(t, td)
|
|
|
|
pend_groups.append(g)
|
|
|
|
|
|
|
|
groups += pend_groups
|
|
|
|
|
|
|
|
nr = 0
|
|
|
|
suf = opts['name'] or 'group'
|
|
|
|
|
|
|
|
for g in groups:
|
|
|
|
if g.size() == 1: # Not much point in group test for this
|
|
|
|
continue
|
|
|
|
|
|
|
|
fn = os.path.join("groups", "%s.%d" % (suf, nr))
|
|
|
|
g.dump(fn)
|
|
|
|
nr += 1
|
|
|
|
|
|
|
|
print "Generated %d group(s)" % nr
|
|
|
|
|
|
|
|
|
2015-12-11 19:05:00 +03:00
|
|
|
def clean_stuff(opts):
|
|
|
|
print "Cleaning %s" % opts['what']
|
|
|
|
if opts['what'] == 'nsroot':
|
|
|
|
for f in flavors:
|
|
|
|
f = flavors[f]
|
|
|
|
f.clean()
|
|
|
|
|
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
#
|
|
|
|
# main() starts here
|
|
|
|
#
|
|
|
|
|
2015-10-08 17:28:55 +03:00
|
|
|
if os.environ.has_key('CR_CT_TEST_INFO'):
|
2015-10-08 15:18:51 +03:00
|
|
|
# Fork here, since we're new pidns init and are supposed to
|
|
|
|
# collect this namespace's zombies
|
2015-10-20 15:40:10 +04:00
|
|
|
status = 0
|
2015-10-08 15:18:51 +03:00
|
|
|
pid = os.fork()
|
|
|
|
if pid == 0:
|
2015-10-08 17:28:55 +03:00
|
|
|
tinfo = eval(os.environ['CR_CT_TEST_INFO'])
|
2015-10-08 15:18:51 +03:00
|
|
|
do_run_test(tinfo[0], tinfo[1], tinfo[2], tinfo[3])
|
|
|
|
else:
|
|
|
|
while True:
|
|
|
|
wpid, status = os.wait()
|
|
|
|
if wpid == pid:
|
2015-10-20 15:40:10 +04:00
|
|
|
if not os.WIFEXITED(status) or os.WEXITSTATUS(status) != 0:
|
|
|
|
status = 1
|
2015-10-08 15:18:51 +03:00
|
|
|
break;
|
|
|
|
|
2015-10-20 15:40:10 +04:00
|
|
|
sys.exit(status)
|
2015-10-05 21:55:00 +03:00
|
|
|
|
2015-10-08 17:28:55 +03:00
|
|
|
p = argparse.ArgumentParser("CRIU test suite")
|
2015-10-05 21:55:00 +03:00
|
|
|
p.add_argument("--debug", help = "Print what's being executed", action = 'store_true')
|
2015-10-08 17:29:40 +03:00
|
|
|
p.add_argument("--set", help = "Which set of tests to use", default = 'zdtm')
|
2015-10-05 21:55:00 +03:00
|
|
|
|
|
|
|
sp = p.add_subparsers(help = "Use --help for list of actions")
|
|
|
|
|
|
|
|
rp = sp.add_parser("run", help = "Run test(s)")
|
|
|
|
rp.set_defaults(action = run_tests)
|
|
|
|
rp.add_argument("-a", "--all", action = 'store_true')
|
|
|
|
rp.add_argument("-t", "--test", help = "Test name", action = 'append')
|
2015-10-15 17:25:00 +03:00
|
|
|
rp.add_argument("-T", "--tests", help = "Regexp")
|
2015-10-05 21:55:00 +03:00
|
|
|
rp.add_argument("-f", "--flavor", help = "Flavor to run")
|
|
|
|
rp.add_argument("-x", "--exclude", help = "Exclude tests from --all run", action = 'append')
|
|
|
|
|
2015-10-08 15:22:53 +03:00
|
|
|
rp.add_argument("--sibling", help = "Restore tests as siblings", action = 'store_true')
|
2015-10-27 16:29:00 +03:00
|
|
|
rp.add_argument("--pre", help = "Do some pre-dumps before dump (n[:pause])")
|
2015-11-12 18:11:38 +04:00
|
|
|
rp.add_argument("--snaps", help = "Instead of pre-dumps do full dumps", action = 'store_true')
|
2015-11-26 21:55:00 +03:00
|
|
|
rp.add_argument("--dedup", help = "Auto-deduplicate images on iterations", action = 'store_true')
|
2015-10-05 21:55:00 +03:00
|
|
|
rp.add_argument("--nocr", help = "Do not CR anything, just check test works", action = 'store_true')
|
|
|
|
rp.add_argument("--norst", help = "Don't restore tasks, leave them running after dump", action = 'store_true')
|
2015-10-27 16:29:00 +03:00
|
|
|
rp.add_argument("--iters", help = "Do CR cycle several times before check (n[:pause])")
|
2015-10-13 18:06:44 +03:00
|
|
|
rp.add_argument("--fault", help = "Test fault injection")
|
2015-11-09 21:27:00 +03:00
|
|
|
rp.add_argument("--sat", help = "Generate criu strace-s for sat tool (restore is fake, images are kept)", action = 'store_true')
|
2015-12-09 19:05:57 +03:00
|
|
|
rp.add_argument("--sbs", help = "Do step-by-step execution, asking user for keypress to continue", action = 'store_true')
|
2015-10-05 21:55:00 +03:00
|
|
|
|
|
|
|
rp.add_argument("--page-server", help = "Use page server dump", action = 'store_true')
|
|
|
|
rp.add_argument("-p", "--parallel", help = "Run test in parallel")
|
|
|
|
|
2015-10-15 17:27:00 +03:00
|
|
|
rp.add_argument("-k", "--keep-img", help = "Whether or not to keep images after test",
|
|
|
|
choices = [ 'always', 'never', 'failed' ], default = 'failed')
|
2015-10-28 16:39:00 +03:00
|
|
|
rp.add_argument("--report", help = "Generate summary report in directory")
|
2015-10-15 17:27:00 +03:00
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
lp = sp.add_parser("list", help = "List tests")
|
|
|
|
lp.set_defaults(action = list_tests)
|
2015-10-08 17:27:24 +03:00
|
|
|
lp.add_argument('-i', '--info', help = "Show more info about tests", action = 'store_true')
|
2015-10-05 21:55:00 +03:00
|
|
|
|
2015-12-03 16:08:53 +03:00
|
|
|
gp = sp.add_parser("group", help = "Generate groups")
|
|
|
|
gp.set_defaults(action = group_tests)
|
|
|
|
gp.add_argument("-m", "--max-size", help = "Maximum number of tests in group")
|
|
|
|
gp.add_argument("-n", "--name", help = "Common name for group tests")
|
|
|
|
gp.add_argument("-x", "--exclude", help = "Exclude tests from --all run", action = 'append')
|
|
|
|
|
2015-12-11 19:05:00 +03:00
|
|
|
cp = sp.add_parser("clean", help = "Clean something")
|
|
|
|
cp.set_defaults(action = clean_stuff)
|
|
|
|
cp.add_argument("what", choices = [ 'nsroot' ])
|
|
|
|
|
2015-10-05 21:55:00 +03:00
|
|
|
opts = vars(p.parse_args())
|
2015-12-03 15:29:00 +03:00
|
|
|
if opts.get('sat', False):
|
2015-11-09 21:27:00 +03:00
|
|
|
opts['keep_img'] = 'always'
|
2015-10-05 21:55:00 +03:00
|
|
|
|
|
|
|
if opts['debug']:
|
|
|
|
sys.settrace(traceit)
|
|
|
|
|
2015-10-29 14:15:00 +03:00
|
|
|
criu_cli.available()
|
|
|
|
for tst in test_classes.values():
|
|
|
|
tst.available()
|
|
|
|
|
zdtm.py: Rework tests list generation
Hand-made list of tests is not great. As Christopher suggested we can find
all executables in a directory and treat them as tests. This idea is good,
but requires a little bit more trickery.
First, some executables in directory are not tests, e.g. these are per-test
scripts, which should be skipped. Next, not all tests in zdtm can be right
now run in continuous manner (they fail), but we want to run them manually.
To fix that such tests are marked with 'noauto' flag in the description.
So we have the test test descriptor file, which states in which file to
look for tests (executable) and which of them to exclude. Maybe more, can
be added on demand.
And, finally, the per-test description goes in a ${test}.desc file in native
python eval/repr-able format. If the file is absent a default description
is used.
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
2015-10-08 17:25:00 +03:00
|
|
|
opts['action'](opts)
|