2
0
mirror of https://github.com/openvswitch/ovs synced 2025-08-22 01:51:26 +00:00
ovs/tests/ovsdb-server.at
Mike Pattrick 0add983b38 ovsdb: Use table indexes if available for ovsdb_query().
Currently all OVSDB database queries except for UUID lookups all result
in linear lookups over the entire table, even if an index is present.

This patch modifies ovsdb_query() to attempt an index lookup first, if
possible. If no matching indexes are present then a linear index is
still conducted.

To test this, I set up an ovsdb database with a variable number of rows
and timed the average of how long ovsdb-client took to query a single
row. The first two tests involved a linear scan that didn't match any
rows, so there was no overhead associated with sending or encoding
output. The post-patch linear scan was a worst case scenario where the
table did have an appropriate index but the conditions made its usage
impossible. The indexed lookup test was for a matching row, which did
also include overhead associated with a match. The results are included
in the table below.

Rows                   | 100k | 200k | 300k | 400k | 500k
-----------------------+------+------+------+------+-----
Pre-patch linear scan  |  9ms | 24ms | 37ms | 49ms | 61ms
Post-patch linear scan |  9ms | 24ms | 38ms | 49ms | 61ms
Indexed lookup         |  3ms |  3ms |  3ms |  3ms |  3ms

I also tested the performance of ovsdb_query() by wrapping it in a loop
and measuring the time it took to perform 1000 linear scans on 1, 10,
100k, and 200k rows. This test showed that the new index checking code
did not slow down worst case lookups to a statistically detectable
degree.

Reported-at: https://issues.redhat.com/browse/FDP-590
Signed-off-by: Mike Pattrick <mkp@redhat.com>
Signed-off-by: Ilya Maximets <i.maximets@ovn.org>
2025-07-15 18:05:32 +02:00

3217 lines
114 KiB
Plaintext
Raw Blame History

This file contains invisible Unicode characters

This file contains invisible Unicode characters that are indistinguishable to humans but may be processed differently by a computer. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

AT_BANNER([OVSDB -- ovsdb-server transactions (Unix sockets)])
dnl OVSDB_SERVER_SHUTDOWN_N(N, [ALLOWLIST])
dnl
dnl Similar to OVSDB_SERVER_SHUTDOWN, but stops the server started with N.pid
dnl pidfile and unixctlN socket.
m4_define([OVSDB_SERVER_SHUTDOWN_N],
[AT_CHECK([check_logs $2])
cp $1.pid savepid$1
AT_CHECK([ovs-appctl -t "`pwd`"/unixctl$1 -e exit], [0], [ignore], [ignore])
OVS_WAIT_WHILE([kill -0 `cat savepid$1`], [kill `cat savepid$1`])])
m4_define([OVSDB_SERVER_SHUTDOWN2],
[OVSDB_SERVER_SHUTDOWN_N([2], $1)])
# OVSDB_CHECK_EXECUTION(TITLE, SCHEMA, TRANSACTIONS, OUTPUT, [KEYWORDS])
#
# Creates a database with the given SCHEMA, starts an ovsdb-server on
# that database, and runs each of the TRANSACTIONS (which should be a
# quoted list of quoted strings) against it with ovsdb-client one at a
# time.
#
# Checks that the overall output is OUTPUT, but UUIDs in the output
# are replaced by markers of the form <N> where N is a number. The
# first unique UUID is replaced by <0>, the next by <1>, and so on.
# If a given UUID appears more than once it is always replaced by the
# same marker.
#
# Additionally, checks that records written to a database file can be
# read back producing the same in-memory database content.
#
# TITLE is provided to AT_SETUP and KEYWORDS to AT_KEYWORDS.
m4_define([OVSDB_CHECK_EXECUTION],
[AT_SETUP([$1])
AT_KEYWORDS([ovsdb server positive unix $5])
$2 > schema
AT_CHECK([ovsdb-tool create db schema], [0], [stdout], [ignore])
on_exit 'kill `cat *.pid`'
AT_CHECK([ovsdb-server --detach --no-chdir --log-file --pidfile \
--remote=punix:socket db], [0], [ignore], [ignore])
m4_foreach([txn], [$3],
[AT_CHECK([ovsdb-client transact unix:socket 'txn'], [0], [stdout], [ignore])
cat stdout >> output
])
AT_CHECK([uuidfilt output], [0], [$4], [ignore])
AT_CHECK([ovsdb-client dump unix:socket], [0], [stdout], [ignore])
OVSDB_SERVER_SHUTDOWN
AT_CHECK([ovsdb-server --detach --no-chdir --log-file --pidfile \
--remote=punix:socket db], [0], [ignore], [ignore])
OVS_WAIT_UNTIL([ovsdb-client dump unix:socket > dump2; diff stdout dump2])
OVSDB_SERVER_SHUTDOWN
AT_CLEANUP])
EXECUTION_EXAMPLES
AT_BANNER([ovsdb-server miscellaneous features])
AT_SETUP([truncating corrupted database log])
AT_KEYWORDS([ovsdb server positive unix])
AT_SKIP_IF([test "$IS_WIN32" = "yes"])
ordinal_schema > schema
AT_CHECK([ovsdb-tool create db schema], [0], [stdout], [ignore])
dnl Do one transaction and save the output.
AT_DATA([txnfile], [[ovsdb-client transact unix:socket \
'["ordinals",
{"op": "insert",
"table": "ordinals",
"row": {"number": 0, "name": "zero"}}]'
]])
AT_CHECK([ovsdb-server --remote=punix:socket db --run="sh txnfile"], [0], [stdout], [])
cat stdout >> output
dnl Add some crap to the database log and run another transaction, which should
dnl ignore the crap and truncate it out of the log.
echo 'xxx' >> db
AT_DATA([txnfile], [[ovsdb-client transact unix:socket \
'["ordinals",
{"op": "insert",
"table": "ordinals",
"row": {"number": 1, "name": "one"}}]'
]])
AT_CHECK([ovsdb-server --remote=punix:socket db --run="sh txnfile"], [0], [stdout], [stderr])
AT_CHECK([grep 'syntax error: db: parse error.* in header line "xxx"' stderr],
[0], [ignore])
cat stdout >> output
dnl Run a final transaction to verify that both transactions succeeeded.
dnl The crap that we added should have been truncated by the previous run,
dnl so ovsdb-server shouldn't log a warning this time.
AT_DATA([txnfile], [[ovsdb-client transact unix:socket \
'["ordinals",
{"op": "select",
"table": "ordinals",
"where": [],
"sort": ["number"]}]'
]])
AT_CHECK([ovsdb-server --remote=punix:socket db --run="sh txnfile"], [0], [stdout], [])
cat stdout >> output
AT_CHECK([uuidfilt output], [0],
[[[{"uuid":["uuid","<0>"]}]
[{"uuid":["uuid","<1>"]}]
[{"rows":[{"_uuid":["uuid","<0>"],"_version":["uuid","<2>"],"name":"zero","number":0},{"_uuid":["uuid","<1>"],"_version":["uuid","<3>"],"name":"one","number":1}]}]
]], [])
AT_CLEANUP
AT_SETUP([truncating database log with bad transaction])
AT_KEYWORDS([ovsdb server positive unix])
AT_SKIP_IF([test "$IS_WIN32" = "yes"])
ordinal_schema > schema
AT_CHECK([ovsdb-tool create db schema], [0], [stdout], [ignore])
dnl Do one transaction and save the output.
AT_DATA([txnfile], [[ovsdb-client transact unix:socket \
'["ordinals",
{"op": "insert",
"table": "ordinals",
"row": {"number": 0, "name": "zero"}}]'
]])
AT_CHECK([ovsdb-server --remote=punix:socket db --run="sh txnfile"], [0], [stdout], [])
cat stdout >> output
dnl Add some crap to the database log and run another transaction, which should
dnl ignore the crap and truncate it out of the log.
echo 'OVSDB JSON 15 ffbcdae4b0386265f9ea3280dd7c8f0b72a20e56
{"invalid":{}}' >> db
AT_DATA([txnfile], [[ovsdb-client transact unix:socket \
'["ordinals",
{"op": "insert",
"table": "ordinals",
"row": {"number": 1, "name": "one"}}]'
]])
AT_CHECK([ovsdb-server --remote=punix:socket db --run="sh txnfile"], [0], [stdout], [stderr])
AT_CHECK([grep 'syntax "{"invalid":{}}": unknown table: No table named invalid.' stderr],
[0], [ignore])
cat stdout >> output
dnl Run a final transaction to verify that both transactions succeeeded.
dnl The crap that we added should have been truncated by the previous run,
dnl so ovsdb-server shouldn't log a warning this time.
AT_DATA([txnfile], [[ovsdb-client transact unix:socket \
'["ordinals",
{"op": "select",
"table": "ordinals",
"where": [],
"sort": ["number"]}]'
]])
AT_CHECK([ovsdb-server --remote=punix:socket db --run="sh txnfile"], [0], [stdout], [])
cat stdout >> output
AT_CHECK([uuidfilt output], [0],
[[[{"uuid":["uuid","<0>"]}]
[{"uuid":["uuid","<1>"]}]
[{"rows":[{"_uuid":["uuid","<0>"],"_version":["uuid","<2>"],"name":"zero","number":0},{"_uuid":["uuid","<1>"],"_version":["uuid","<3>"],"name":"one","number":1}]}]
]], [])
AT_CLEANUP
dnl CHECK_DBS([databases])
dnl
dnl Checks that ovsdb-server hosts the given 'databases', each of which
dnl needs to be followed by a newline.
m4_define([CHECK_DBS],
[AT_CHECK([ovs-appctl -t ovsdb-server ovsdb-server/list-dbs],
[0], [_Server
$1])
AT_CHECK([ovsdb-client --no-headings dump _Server Database name | sort], [0], [dnl
Database table
_Server
$1])])
AT_SETUP([database multiplexing implementation])
AT_KEYWORDS([ovsdb server positive])
ordinal_schema > schema1
constraint_schema > schema2
AT_CHECK([ovsdb-tool create db1 schema1], [0], [ignore], [ignore])
AT_CHECK([ovsdb-tool create db2 schema2], [0], [ignore], [ignore])
on_exit 'kill `cat *.pid`'
AT_CHECK([ovsdb-server --detach --no-chdir --log-file --pidfile --remote=punix:db.sock db1 db2], [0], [ignore], [ignore])
CHECK_DBS([constraints
ordinals
])
AT_CHECK(
[[ovstest test-jsonrpc request unix:db.sock get_schema [\"nonexistent\"]]], [0],
[[{"error":{"details":"get_schema request specifies unknown database nonexistent","error":"unknown database","syntax":"[\"nonexistent\"]"},"id":0,"result":null}
]], [])
OVSDB_SERVER_SHUTDOWN
AT_CLEANUP
AT_SETUP([database multiplexing implementation with config file])
AT_KEYWORDS([ovsdb server positive config-file])
ordinal_schema > schema1
constraint_schema > schema2
AT_CHECK([ovsdb-tool create db1 schema1], [0], [ignore], [ignore])
AT_CHECK([ovsdb-tool create db2 schema2], [0], [ignore], [ignore])
on_exit 'kill $(cat *.pid)'
AT_DATA([config.json], [
{"remotes" : { "punix:db.sock": {} },
"databases": { "db1": {}, "db2": { "service-model": "standalone" } } }
])
AT_CHECK([ovsdb-server --detach --no-chdir --log-file --pidfile \
--config-file=config.json], [0], [ignore], [ignore])
CHECK_DBS([constraints
ordinals
])
AT_CHECK(
[[ovstest test-jsonrpc request unix:db.sock get_schema [\"nonexistent\"]]], [0],
[[{"error":{"details":"get_schema request specifies unknown database nonexistent","error":"unknown database","syntax":"[\"nonexistent\"]"},"id":0,"result":null}
]])
OVSDB_SERVER_SHUTDOWN
AT_CLEANUP
AT_SETUP([ovsdb-server/add-db and remove-db])
AT_KEYWORDS([ovsdb server positive])
on_exit 'kill `cat *.pid`'
ordinal_schema > schema1
constraint_schema > schema2
AT_CHECK([ovsdb-tool create db1 schema1], [0], [ignore], [ignore])
AT_CHECK([ovsdb-tool create db2 schema2], [0], [ignore], [ignore])
# Start ovsdb-server with just a single database - db1.
AT_CHECK([ovsdb-server -vfile -vvlog:off --log-file --detach --no-chdir --pidfile --remote=punix:db.sock db1], [0], [ignore], [ignore])
CHECK_DBS([ordinals
])
# Remove the database.
AT_CHECK([ovs-appctl -t ovsdb-server ovsdb-server/remove-db ordinals], [0])
CHECK_DBS([])
# Start monitoring processes.
AT_CHECK([ovsdb-client --detach --no-chdir --pidfile=ovsdb-client-1.pid --no-db-change-aware --no-headings monitor _Server Database name > db-change-unaware.stdout 2> db-change-unaware.stderr])
AT_CHECK([ovsdb-client --detach --no-chdir --pidfile=ovsdb-client-2.pid --db-change-aware --no-headings monitor _Server Database name > db-change-aware.stdout 2> db-change-aware.stderr])
AT_CAPTURE_FILE([db-change-unaware.stdout])
AT_CAPTURE_FILE([db-change-unaware.stderr])
AT_CAPTURE_FILE([db-change-aware.stdout])
AT_CAPTURE_FILE([db-change-aware.stderr])
# Add the first database back.
AT_CHECK([ovs-appctl -t ovsdb-server ovsdb-server/add-db db1], [0])
CHECK_DBS([ordinals
])
# Add the second database.
AT_CHECK([ovs-appctl -t ovsdb-server ovsdb-server/add-db db2], [0])
CHECK_DBS([constraints
ordinals
])
# The databases are responsive.
AT_CHECK([ovsdb-client list-tables unix:db.sock constraints], [0], [ignore], [ignore])
AT_CHECK([ovsdb-client list-tables unix:db.sock ordinals], [0], [ignore], [ignore])
# Add an already added database.
if test $IS_WIN32 = "yes"; then
AT_CHECK([ovs-appctl -t ovsdb-server ovsdb-server/add-db db2], 2, [],
[I/O error: db2: failed to lock lockfile (Resource deadlock avoided)
ovs-appctl: ovsdb-server: server returned an error
])
else
AT_CHECK([ovs-appctl -t ovsdb-server ovsdb-server/add-db db2], 2, [],
[ovsdb error: db2: already open
ovs-appctl: ovsdb-server: server returned an error
])
fi
# Add a non-existing database.
AT_CHECK([ovs-appctl -t ovsdb-server ovsdb-server/add-db db3], 2, [], [stderr])
AT_CHECK([sed 's/(.*)/(...)/' stderr], [0],
[I/O error: db3: open failed (...)
ovs-appctl: ovsdb-server: server returned an error
])
# Add a remote through a db path in db1.
AT_CHECK([ovs-appctl -t ovsdb-server ovsdb-server/add-remote db:ordinals,ordinals,name], [0])
AT_CHECK([ovs-appctl -t ovsdb-server ovsdb-server/list-remotes],
[0], [db:ordinals,ordinals,name
punix:db.sock
])
# Removing db1 has no effect on its remote.
AT_CHECK([ovs-appctl -t ovsdb-server ovsdb-server/remove-db ordinals], [0])
CHECK_DBS([constraints
])
AT_CHECK([ovs-appctl -t ovsdb-server ovsdb-server/list-remotes],
[0], [db:ordinals,ordinals,name
punix:db.sock
])
AT_CHECK([ovsdb-client list-tables unix:db.sock ordinals], [1], [ignore], [ignore])
# Remove db2.
AT_CHECK([ovs-appctl -t ovsdb-server ovsdb-server/remove-db constraints], [0])
CHECK_DBS()
AT_CHECK([ovsdb-client list-tables unix:db.sock constraints], [1], [ignore], [ignore])
# Remove a non-existent database.
AT_CHECK([ovs-appctl -t ovsdb-server ovsdb-server/remove-db ordinals], [2],
[], [Failed to find the database.
ovs-appctl: ovsdb-server: server returned an error
])
# Add a removed database.
AT_CHECK([ovs-appctl -t ovsdb-server ovsdb-server/add-db db2], [0])
CHECK_DBS([constraints
])
AT_CHECK([ovsdb-client list-tables unix:db.sock constraints], [0], [ignore], [ignore])
# Check the monitoring results.
AT_CHECK([uuidfilt db-change-aware.stdout], [0], [dnl
<0> initial _Server
<1> insert ordinals
<2> insert constraints
<1> delete ordinals
<2> delete constraints
<3> insert constraints
])
AT_CHECK([uuidfilt db-change-unaware.stdout], [0], [dnl
<0> initial _Server
])
OVSDB_SERVER_SHUTDOWN(["/no database named ordinals/d"])
AT_CLEANUP
AT_SETUP([ovsdb-server/add-db and remove-db with a config file])
AT_KEYWORDS([ovsdb server positive config-file])
on_exit 'kill $(cat *.pid)'
ordinal_schema > schema1
constraint_schema > schema2
AT_CHECK([ovsdb-tool create db1 schema1], [0], [ignore], [ignore])
AT_CHECK([ovsdb-tool create db2 schema2], [0], [ignore], [ignore])
dnl Start ovsdb-server with just a single database - db1.
AT_DATA([config.json], [
{
"remotes": {
"punix:db.sock": {}
},
"databases": {
"db1": {}
}
}
])
AT_CAPTURE_FILE([config.json])
AT_CHECK([ovsdb-server -vfile -vvlog:off --log-file --detach --no-chdir \
--pidfile --config-file=config.json], [0], [ignore], [ignore])
CHECK_DBS([ordinals
])
dnl Remove the database.
AT_CHECK([sed -i'back' '/db1/d' config.json])
AT_CHECK([ovs-appctl -t ovsdb-server ovsdb-server/reload])
CHECK_DBS([])
dnl Start monitoring processes.
AT_CHECK([ovsdb-client --detach --no-chdir --pidfile=ovsdb-client-1.pid \
--no-db-change-aware --no-headings monitor _Server Database name \
> db-change-unaware.stdout 2> db-change-unaware.stderr])
AT_CHECK([ovsdb-client --detach --no-chdir --pidfile=ovsdb-client-2.pid \
--db-change-aware --no-headings monitor _Server Database name \
> db-change-aware.stdout 2> db-change-aware.stderr])
AT_CAPTURE_FILE([db-change-unaware.stdout])
AT_CAPTURE_FILE([db-change-unaware.stderr])
AT_CAPTURE_FILE([db-change-aware.stdout])
AT_CAPTURE_FILE([db-change-aware.stderr])
dnl Add the first database back.
AT_CHECK([sed -i'back' '/"databases"/a\
"db1": {}
' config.json])
AT_CHECK([ovs-appctl -t ovsdb-server ovsdb-server/reload])
CHECK_DBS([ordinals
])
dnl Add the second database.
AT_CHECK([sed -i'back' '/"databases"/a\
"db2": {},
' config.json])
AT_CHECK([ovs-appctl -t ovsdb-server ovsdb-server/reload])
CHECK_DBS([constraints
ordinals
])
dnl The databases are responsive.
AT_CHECK([ovsdb-client list-tables unix:db.sock constraints], [0], [ignore], [ignore])
AT_CHECK([ovsdb-client list-tables unix:db.sock ordinals], [0], [ignore], [ignore])
dnl Add an already added database.
AT_CHECK([sed -i'back' '/"databases"/a\
"db2": {},
' config.json])
AT_CHECK([ovs-appctl -t ovsdb-server ovsdb-server/reload])
dnl Fix the config back.
AT_CHECK([sed -i'back' '/db2/d' config.json])
AT_CHECK([sed -i'back' '/"databases"/a\
"db2": {},
' config.json])
AT_CHECK([ovs-appctl -t ovsdb-server ovsdb-server/reload])
dnl Add a non-existing database.
AT_CHECK([sed -i'back' '/"databases"/a\
"db3": {},
' config.json])
AT_CHECK([ovs-appctl -t ovsdb-server ovsdb-server/reload], [2], [ignore], [ignore])
OVS_WAIT_UNTIL([grep -q 'failed to configure databases' ovsdb-server.log])
AT_CHECK([sed -i'back' '/db3/d' config.json])
dnl Add a remote through a db path in db1.
AT_CHECK([sed -i'back' '/"remotes"/a\
"db:ordinals,ordinals,name": {},
' config.json])
AT_CHECK([ovs-appctl -t ovsdb-server ovsdb-server/reload])
AT_CHECK([ovs-appctl -t ovsdb-server ovsdb-server/list-remotes],
[0], [db:ordinals,ordinals,name
punix:db.sock
])
dnl Removing db1 has no effect on its remote.
AT_CHECK([sed -i'back' '/db1/d' config.json])
AT_CHECK([sed -i'back' 's/"db2": {},/"db2": {}/' config.json])
AT_CHECK([ovs-appctl -t ovsdb-server ovsdb-server/reload], [2], [ignore], [ignore])
CHECK_DBS([constraints
])
AT_CHECK([ovs-appctl -t ovsdb-server ovsdb-server/list-remotes],
[0], [db:ordinals,ordinals,name
punix:db.sock
])
AT_CHECK([ovsdb-client list-tables unix:db.sock ordinals], [1], [ignore], [ignore])
dnl Remove now missing remote.
AT_CHECK([sed -i'back' '/db:ordinals,ordinals,name/d' config.json])
dnl Remove db2.
AT_CHECK([sed -i'back' '/db2/d' config.json])
AT_CHECK([ovs-appctl -t ovsdb-server ovsdb-server/reload])
CHECK_DBS()
AT_CHECK([ovsdb-client list-tables unix:db.sock constraints], [1], [ignore], [ignore])
dnl Add a removed database.
AT_CHECK([sed -i'back' '/"databases"/a\
"db2": {}
' config.json])
AT_CHECK([ovs-appctl -t ovsdb-server ovsdb-server/reload])
CHECK_DBS([constraints
])
AT_CHECK([ovsdb-client list-tables unix:db.sock constraints], [0], [ignore], [ignore])
# Check the monitoring results.
AT_CHECK([uuidfilt db-change-aware.stdout], [0], [dnl
<0> initial _Server
<1> insert ordinals
<2> insert constraints
<1> delete ordinals
<2> delete constraints
<3> insert constraints
])
AT_CHECK([uuidfilt db-change-unaware.stdout], [0], [dnl
<0> initial _Server
])
OVSDB_SERVER_SHUTDOWN(["
/no database named ordinals/d
/failed to open database 'db3'/d
/failed to configure databases/d
"])
AT_CLEANUP
AT_SETUP([ovsdb-server/add-db with --monitor])
AT_KEYWORDS([ovsdb server positive])
AT_SKIP_IF([test "$IS_WIN32" = "yes"])
# This test intentionally causes SIGSEGV, so make sanitizers ignore it.
ASAN_OPTIONS=$ASAN_OPTIONS:handle_segv=0; export ASAN_OPTIONS
UBSAN_OPTIONS=$UBSAN_OPTIONS:handle_segv=0; export UBSAN_OPTIONS
# Start ovsdb-server, initially with one db.
ordinal_schema > schema
AT_CHECK([ovsdb-tool create db1 schema], [0], [ignore], [ignore])
on_exit 'kill `cat *.pid`'
AT_CHECK([ovsdb-server -vfile -vvlog:off --monitor --detach --no-chdir --pidfile --log-file --remote=punix:db.sock db1], [0], [ignore], [ignore])
# Add the second database.
constraint_schema > schema2
AT_CHECK([ovsdb-tool create db2 schema2], [0], [ignore], [ignore])
AT_CHECK([ovs-appctl -t ovsdb-server ovsdb-server/add-db db2], [0])
CHECK_DBS([constraints
ordinals
])
# Kill the daemon process, making it look like a segfault,
# and wait for a new daemon process to get spawned.
cp ovsdb-server.pid old.pid
AT_CHECK([kill -SEGV `cat ovsdb-server.pid`])
OVS_WAIT_WHILE([kill -0 `cat old.pid`])
OVS_WAIT_UNTIL(
[test -s ovsdb-server.pid && test `cat ovsdb-server.pid` != `cat old.pid`])
OVS_WAIT_UNTIL([ovs-appctl -t ovsdb-server version])
CHECK_DBS([constraints
ordinals
])
OVSDB_SERVER_SHUTDOWN(["
/backtrace/d
/killed/d
"])
AT_CLEANUP
AT_SETUP([ovsdb-server/add-db and remove-db with --monitor])
AT_KEYWORDS([ovsdb server positive])
AT_SKIP_IF([test "$IS_WIN32" = "yes"])
# This test intentionally causes SIGSEGV, so make sanitizers ignore it.
ASAN_OPTIONS=$ASAN_OPTIONS:handle_segv=0; export ASAN_OPTIONS
UBSAN_OPTIONS=$UBSAN_OPTIONS:handle_segv=0; export UBSAN_OPTIONS
# Start ovsdb-server, initially with one db.
ordinal_schema > schema
AT_CHECK([ovsdb-tool create db1 schema], [0], [ignore], [ignore])
constraint_schema > schema2
AT_CHECK([ovsdb-tool create db2 schema2], [0], [ignore], [ignore])
on_exit 'kill `cat *.pid`'
AT_CHECK([ovsdb-server -vfile -vvlog:off --monitor --detach --no-chdir --pidfile --log-file --remote=punix:db.sock db1 db2], [0], [ignore], [ignore])
# Remove the second database.
AT_CHECK([ovs-appctl -t ovsdb-server ovsdb-server/remove-db constraints])
CHECK_DBS([ordinals
])
# Kill the daemon process, making it look like a segfault,
# and wait for a new daemon process to get spawned.
cp ovsdb-server.pid old.pid
AT_CHECK([kill -SEGV `cat ovsdb-server.pid`])
OVS_WAIT_WHILE([kill -0 `cat old.pid`])
OVS_WAIT_UNTIL(
[test -s ovsdb-server.pid && test `cat ovsdb-server.pid` != `cat old.pid`])
OVS_WAIT_UNTIL([ovs-appctl -t ovsdb-server version])
CHECK_DBS([ordinals
])
OVSDB_SERVER_SHUTDOWN(["
/backtrace/d
/killed/d
"])
AT_CLEANUP
AT_SETUP([--remote=db: implementation])
AT_KEYWORDS([ovsdb server positive])
AT_DATA([schema],
[[{"name": "mydb",
"tables": {
"Root": {
"columns": {
"managers": {
"type": {
"key": "string",
"min": 0,
"max": "unlimited"}},
"manager_options": {
"type": {
"key": {"type": "uuid", "refTable": "Manager"},
"min": 0,
"max": "unlimited"}}}},
"Manager": {
"columns": {
"target": {
"type": "string"},
"is_connected": {
"type": {
"key": "boolean",
"min": 0,
"max": 1}}}}}}
]])
AT_CHECK([ovsdb-tool create db schema], [0], [ignore], [ignore])
AT_CHECK(
[[ovsdb-tool transact db \
'["mydb",
{"op": "insert",
"table": "Root",
"row": {
"managers": "punix:socket1",
"manager_options": ["set", [["named-uuid", "x"]]]}},
{"op": "insert",
"table": "Manager",
"uuid-name": "x",
"row": {"target": "punix:socket2"}}]']], [0], [ignore], [ignore])
on_exit 'kill `cat *.pid`'
AT_CHECK([ovsdb-server --detach --no-chdir --log-file --pidfile --remote=db:mydb,Root,managers --remote=db:mydb,Root,manager_options db], [0], [ignore], [ignore])
ovs-appctl -t ovsdb-server time/warp 6000 1000
AT_CHECK(
[[ovsdb-client transact unix:socket1 \
'["mydb",
{"op": "select",
"table": "Root",
"where": [],
"columns": ["managers"]},
{"op": "select",
"table": "Manager",
"where": [],
"columns": ["target", "is_connected"]}]']],
[0], [stdout], [ignore])
AT_CHECK(
[uuidfilt stdout],
[0],
[[[{"rows":[{"managers":"punix:socket1"}]},{"rows":[{"is_connected":false,"target":"punix:socket2"}]}]
]],
[ignore])
OVSDB_SERVER_SHUTDOWN(["
/No status column present in the Manager table/d
"])
AT_CLEANUP
AT_SETUP([ovsdb-server/add-remote and remove-remote])
AT_KEYWORDS([ovsdb server positive])
ordinal_schema > schema
AT_CHECK([ovsdb-tool create db schema], [0], [ignore], [ignore])
on_exit 'kill `cat *.pid`'
AT_CHECK([ovsdb-server --detach --no-chdir --log-file --pidfile db], [0], [ignore], [ignore])
AT_CHECK([test ! -e socket1])
AT_CHECK([ovs-appctl -t ovsdb-server ovsdb-server/add-remote punix:socket1])
if test "$IS_WIN32" = "yes"; then
OVS_WAIT_UNTIL([test -e socket1])
else
OVS_WAIT_UNTIL([test -S socket1])
fi
AT_CHECK([ovs-appctl -t ovsdb-server ovsdb-server/list-remotes],
[0], [punix:socket1
])
AT_CHECK([test ! -e socket2])
AT_CHECK([ovs-appctl -t ovsdb-server ovsdb-server/add-remote punix:socket2])
if test "$IS_WIN32" = "yes"; then
OVS_WAIT_UNTIL([test -e socket2])
else
OVS_WAIT_UNTIL([test -S socket2])
fi
AT_CHECK([ovs-appctl -t ovsdb-server ovsdb-server/list-remotes],
[0], [punix:socket1
punix:socket2
])
AT_CHECK([ovs-appctl -t ovsdb-server ovsdb-server/add-remote db:x,y,z], [2],
[], ["db:x,y,z": no database named x
ovs-appctl: ovsdb-server: server returned an error
])
AT_CHECK([ovs-appctl -t ovsdb-server ovsdb-server/remove-remote punix:socket1])
OVS_WAIT_UNTIL([test ! -e socket1])
if test "$IS_WIN32" = "yes"; then
AT_CHECK([test -e socket2])
else
AT_CHECK([test -S socket2])
fi
AT_CHECK([ovs-appctl -t ovsdb-server ovsdb-server/list-remotes],
[0], [punix:socket2
])
AT_CHECK([ovs-appctl -t ovsdb-server ovsdb-server/remove-remote punix:socket2])
OVS_WAIT_UNTIL([test ! -e socket2])
AT_CHECK([test ! -e socket1])
AT_CHECK([ovs-appctl -t ovsdb-server ovsdb-server/list-remotes])
OVSDB_SERVER_SHUTDOWN
AT_CLEANUP
AT_SETUP([ovsdb-server/add-remote and remove-remote with config file])
AT_KEYWORDS([ovsdb server positive config-file])
ordinal_schema > schema
AT_CHECK([ovsdb-tool create db schema], [0], [ignore], [ignore])
on_exit 'kill $(cat *.pid)'
AT_DATA([config.json], [
{
"remotes": {
},
"databases": { "db": {} }
}
])
AT_CAPTURE_FILE([config.json])
AT_CHECK([ovsdb-server -vfile --detach --no-chdir --log-file --pidfile \
--config-file=config.json], [0], [ignore], [ignore])
AT_CHECK([test ! -e socket1])
AT_CHECK([sed -i'back' '/"remotes"/a\
"punix:socket1": {}
' config.json])
AT_CHECK([ovs-appctl -t ovsdb-server ovsdb-server/reload])
if test "$IS_WIN32" = "yes"; then
OVS_WAIT_UNTIL([test -e socket1])
else
OVS_WAIT_UNTIL([test -S socket1])
fi
AT_CHECK([ovs-appctl -t ovsdb-server ovsdb-server/list-remotes],
[0], [punix:socket1
])
AT_CHECK([test ! -e socket2])
AT_CHECK([sed -i'back' '/"remotes"/a\
"punix:socket2": {},
' config.json])
AT_CHECK([ovs-appctl -t ovsdb-server ovsdb-server/reload])
if test "$IS_WIN32" = "yes"; then
OVS_WAIT_UNTIL([test -e socket2])
else
OVS_WAIT_UNTIL([test -S socket2])
fi
AT_CHECK([ovs-appctl -t ovsdb-server ovsdb-server/list-remotes],
[0], [punix:socket1
punix:socket2
])
AT_CHECK([sed -i'back' '/"remotes"/a\
"db:x,y,z": {},
' config.json])
AT_CHECK([ovs-appctl -t ovsdb-server ovsdb-server/reload], [2], [ignore], [ignore])
OVS_WAIT_UNTIL([grep -q '"db:x,y,z": no database named x' ovsdb-server.log])
AT_CHECK([sed -i'back' '/db:x,y,z/d' config.json])
AT_CHECK([sed -i'back' '/punix:socket1/d' config.json])
AT_CHECK([sed -i'back' 's/"punix:socket2": {},/"punix:socket2": {}/' config.json])
AT_CHECK([ovs-appctl -t ovsdb-server ovsdb-server/reload])
OVS_WAIT_UNTIL([test ! -e socket1])
if test "$IS_WIN32" = "yes"; then
AT_CHECK([test -e socket2])
else
AT_CHECK([test -S socket2])
fi
AT_CHECK([ovs-appctl -t ovsdb-server ovsdb-server/list-remotes],
[0], [punix:socket2
])
AT_CHECK([sed -i'back' '/punix:socket2/d' config.json])
AT_CHECK([ovs-appctl -t ovsdb-server ovsdb-server/reload])
OVS_WAIT_UNTIL([test ! -e socket2])
AT_CHECK([test ! -e socket1])
AT_CHECK([ovs-appctl -t ovsdb-server ovsdb-server/list-remotes])
OVSDB_SERVER_SHUTDOWN(['/"db:x,y,z": no database named x/d'])
AT_CLEANUP
AT_SETUP([ovsdb-server/add-remote with --monitor])
AT_KEYWORDS([ovsdb server positive])
AT_SKIP_IF([test "$IS_WIN32" = "yes"])
# This test intentionally causes SIGSEGV, so make sanitizers ignore it.
ASAN_OPTIONS=$ASAN_OPTIONS:handle_segv=0; export ASAN_OPTIONS
UBSAN_OPTIONS=$UBSAN_OPTIONS:handle_segv=0; export UBSAN_OPTIONS
# Start ovsdb-server, initially with no remotes.
ordinal_schema > schema
AT_CHECK([ovsdb-tool create db schema], [0], [ignore], [ignore])
on_exit 'kill `cat *.pid`'
AT_CHECK([ovsdb-server -vfile -vvlog:off --monitor --detach --no-chdir --pidfile --log-file db], [0], [ignore], [ignore])
# Add a remote.
AT_CHECK([test ! -e socket1])
AT_CHECK([ovs-appctl -t ovsdb-server ovsdb-server/add-remote punix:socket1])
OVS_WAIT_UNTIL([test -S socket1])
AT_CHECK([ovs-appctl -t ovsdb-server ovsdb-server/list-remotes],
[0], [punix:socket1
])
# Kill the daemon process, making it look like a segfault,
# and wait for a new daemon process to get spawned and for it to
# start listening on 'socket1'.
cp ovsdb-server.pid old.pid
rm socket1
AT_CHECK([kill -SEGV `cat ovsdb-server.pid`])
OVS_WAIT_WHILE([kill -0 `cat old.pid`])
OVS_WAIT_UNTIL(
[test -s ovsdb-server.pid && test `cat ovsdb-server.pid` != `cat old.pid`])
OVS_WAIT_UNTIL([ovs-appctl -t ovsdb-server version])
OVS_WAIT_UNTIL([test -S socket1])
OVSDB_SERVER_SHUTDOWN(["
/backtrace/d
/killed/d
"])
AT_CLEANUP
AT_SETUP([ovsdb-server/add-remote and remove-remote with --monitor])
AT_KEYWORDS([ovsdb server positive])
AT_SKIP_IF([test "$IS_WIN32" = "yes"])
# This test intentionally causes SIGSEGV, so make sanitizers ignore it.
ASAN_OPTIONS=$ASAN_OPTIONS:handle_segv=0; export ASAN_OPTIONS
UBSAN_OPTIONS=$UBSAN_OPTIONS:handle_segv=0; export UBSAN_OPTIONS
# Start ovsdb-server, initially with no remotes.
ordinal_schema > schema
AT_CHECK([ovsdb-tool create db schema], [0], [ignore], [ignore])
on_exit 'kill `cat *.pid`'
AT_CHECK([ovsdb-server -vfile -vvlog:off --monitor --detach --no-chdir --pidfile --log-file db], [0], [ignore], [ignore])
# Add a remote.
AT_CHECK([test ! -e socket1])
AT_CHECK([ovs-appctl -t ovsdb-server ovsdb-server/add-remote punix:socket1])
OVS_WAIT_UNTIL([test -S socket1])
AT_CHECK([ovs-appctl -t ovsdb-server ovsdb-server/list-remotes],
[0], [punix:socket1
])
# Remove the remote.
AT_CHECK([ovs-appctl -t ovsdb-server ovsdb-server/remove-remote punix:socket1])
OVS_WAIT_UNTIL([test ! -e socket1])
AT_CHECK([ovs-appctl -t ovsdb-server ovsdb-server/list-remotes])
# Kill the daemon process, making it look like a segfault,
# and wait for a new daemon process to get spawned and make sure that it
# does not listen on 'socket1'.
cp ovsdb-server.pid old.pid
AT_CHECK([kill -SEGV `cat ovsdb-server.pid`])
OVS_WAIT_WHILE([kill -0 `cat old.pid`])
OVS_WAIT_UNTIL(
[test -s ovsdb-server.pid && test `cat ovsdb-server.pid` != `cat old.pid`])
OVS_WAIT_UNTIL([ovs-appctl -t ovsdb-server version])
AT_CHECK([test ! -e socket1])
OVSDB_SERVER_SHUTDOWN(["
/backtrace/d
/killed/d
"])
AT_CLEANUP
AT_SETUP([SSL/TLS db: implementation])
AT_KEYWORDS([ovsdb server positive ssl tls $5])
AT_SKIP_IF([test "$HAVE_OPENSSL" = no])
# For this test, we pass PKIDIR through a ovsdb-tool transact and
# msys on Windows does not convert the path style automatically.
# So, do that forcefully with a 'pwd -W' (called through pwd() function).
PKIDIR="$(cd $abs_top_builddir/tests && pwd)"
AT_SKIP_IF([expr "$PKIDIR" : ".*[[ '\"
\\]]"])
AT_DATA([schema],
[[{"name": "mydb",
"tables": {
"SSL": {
"columns": {
"private_key": {"type": "string"},
"certificate": {"type": "string"},
"ca_cert": {"type": "string"},
"ssl_protocols" : {"type": "string"},
"ssl_ciphers" : {"type" : "string"},
"ssl_ciphersuites" : {"type": "string"}
}}}}
]])
AT_CHECK([ovsdb-tool create db schema], [0], [stdout], [ignore])
# The !ECDHE-ECDSA-AES256-GCM-SHA384 in the ssl_ciphers is so that
# a cipher negotiation failure can be tested for later. Same for
# the TLS_CHACHA20_POLY1305_SHA256 ciphersuite.
AT_CHECK(
[[ovsdb-tool transact db \
'["mydb",
{"op": "insert",
"table": "SSL",
"row": {"private_key": "'"$PKIDIR/testpki-privkey2.pem"'",
"certificate": "'"$PKIDIR/testpki-cert2.pem"'",
"ca_cert": "'"$PKIDIR/testpki-cacert.pem"'",
"ssl_protocols": "'"TLSv1.3,TLSv1.2"'",
"ssl_ciphers": "'"DEFAULT:@SECLEVEL=2:!ECDHE-ECDSA-AES256-GCM-SHA384"'",
"ssl_ciphersuites": "TLS_CHACHA20_POLY1305_SHA256"
}}]']],
[0], [ignore], [ignore])
on_exit 'kill `cat *.pid`'
AT_CHECK(
[ovsdb-server --log-file --detach --no-chdir --pidfile \
--private-key=db:mydb,SSL,private_key \
--certificate=db:mydb,SSL,certificate \
--ca-cert=db:mydb,SSL,ca_cert \
--ssl-protocols=db:mydb,SSL,ssl_protocols \
--ssl-ciphers=db:mydb,SSL,ssl_ciphers \
--ssl-ciphersuites=db:mydb,SSL,ssl_ciphersuites \
--remote=pssl:0:127.0.0.1 db],
[0], [ignore], [ignore])
PARSE_LISTENING_PORT([ovsdb-server.log], [SSL_PORT])
# SSL_OVSDB_CLIENT(PROTOCOL, [CIPHERS], [CIPHERSUITES])
m4_define([SSL_OVSDB_CLIENT], [dnl
ovsdb-client -vconsole:stream_ssl:dbg \
--private-key=$PKIDIR/testpki-privkey.pem \
--certificate=$PKIDIR/testpki-cert.pem \
--ca-cert=$PKIDIR/testpki-cacert.pem \
--ssl-protocols=[$1] \
m4_if([$2], [], [], [--ssl-ciphers=$2]) \
m4_if([$3], [], [], [--ssl-ciphersuites=$3]) \
transact ssl:127.0.0.1:$SSL_PORT \
'[[["mydb",
{"op": "select",
"table": "SSL",
"where": [],
"columns": ["private_key"]}]]]'])
AT_CHECK(SSL_OVSDB_CLIENT([TLSv1.3,TLSv1.2]), [0], [stdout], [ignore])
AT_CHECK_UNQUOTED(
[cat stdout], [0],
[[@<:@{"rows":@<:@{"private_key":"$PKIDIR/testpki-privkey2.pem"}@:>@}@:>@
]], [ignore])
# Check that connection over TLSv1.2 works.
AT_CHECK(SSL_OVSDB_CLIENT([TLSv1.2]), [0], [stdout], [ignore])
AT_CHECK_UNQUOTED(
[cat stdout], [0],
[[@<:@{"rows":@<:@{"private_key":"$PKIDIR/testpki-privkey2.pem"}@:>@}@:>@
]], [ignore])
# Check that connection over TLSv1.3 works.
AT_CHECK(SSL_OVSDB_CLIENT([TLSv1.3]), [0], [stdout], [ignore])
AT_CHECK_UNQUOTED(
[cat stdout], [0],
[[@<:@{"rows":@<:@{"private_key":"$PKIDIR/testpki-privkey2.pem"}@:>@}@:>@
]], [ignore])
# Check that when ciphers are not compatible, a negotiation
# failure occurs.
AT_CHECK(SSL_OVSDB_CLIENT([TLSv1.2], [ECDHE-ECDSA-AES256-GCM-SHA384]),
[1], [stdout], [stderr])
cat stderr > output
AT_CHECK_UNQUOTED(
[sed -n "/failed to connect/s/ (.*)//p" output], [0],
[ovsdb-client: failed to connect to "ssl:127.0.0.1:$SSL_PORT"
],
[ignore])
# The error message for being unable to negotiate a shared ciphersuite
# is 'sslv3 alert handshake failure'. This is not the clearest message.
# In openssl 3.2.0 all the error messages were updated to replace 'sslv3'
# with 'ssl/tls'.
AT_CHECK_UNQUOTED(
[grep -E "(sslv3|ssl/tls) alert handshake failure" output], [0],
[stdout],
[ignore])
# Check that when ciphersuites are not compatible, a negotiation
# failure occurs.
AT_CHECK(SSL_OVSDB_CLIENT([TLSv1.3], [DEFAULT], [TLS_AES_256_GCM_SHA384]),
[1], [stdout], [stderr])
cat stderr > output
AT_CHECK_UNQUOTED(
[sed -n "/failed to connect/s/ (.*)//p" output], [0],
[ovsdb-client: failed to connect to "ssl:127.0.0.1:$SSL_PORT"
], [ignore])
AT_CHECK([grep -q -E "(sslv3|ssl/tls) alert handshake failure" output])
# Checking parsing of different protocol ranges.
AT_CHECK(SSL_OVSDB_CLIENT([TLSv1.2,TLSv1.3]), [0], [stdout], [stderr])
AT_CHECK([grep -q 'Enabled protocol range: TLSv1.2 - TLSv1.3' stderr])
AT_CHECK(SSL_OVSDB_CLIENT([TLSv1.2-TLSv1.3]), [0], [stdout], [stderr])
AT_CHECK([grep -q 'Enabled protocol range: TLSv1.2 - TLSv1.3' stderr])
AT_CHECK(SSL_OVSDB_CLIENT([TLSv1.3-TLSv1.2]), [0], [stdout], [stderr])
AT_CHECK([grep -q 'Enabled protocol range: TLSv1.2 - TLSv1.3' stderr])
AT_CHECK(SSL_OVSDB_CLIENT([TLSv1.3+]), [0], [stdout], [stderr])
AT_CHECK([grep -q 'Enabled protocol range: TLSv1.3 or later' stderr])
AT_CHECK(SSL_OVSDB_CLIENT([TLSv1.2-TLSv1.3,TLSv1.3+]), [0], [stdout], [stderr])
AT_CHECK([grep -q 'SSL/TLS protocol not recognized' stderr])
AT_CHECK(SSL_OVSDB_CLIENT([TLSv1.2+TLSv1.3]), [0], [stdout], [stderr])
AT_CHECK([grep -q 'SSL/TLS protocol not recognized' stderr])
AT_CHECK(SSL_OVSDB_CLIENT([TLSv1+]), [0], [stdout], [stderr])
AT_CHECK([grep -q 'SSL/TLS protocol not recognized' stderr])
AT_CHECK(SSL_OVSDB_CLIENT([TLSv1.1]), [0], [stdout], [stderr])
AT_CHECK([grep -q 'SSL/TLS protocol not recognized' stderr])
OVSDB_SERVER_SHUTDOWN(["
/stream_ssl|WARN/d
/Protocol error/d
"])
AT_CLEANUP
AT_SETUP([SSL/TLS db: implementation (TLSv1.3 only)])
AT_KEYWORDS([ovsdb server positive ssl tls $5])
AT_SKIP_IF([test "$HAVE_OPENSSL" = no])
# For this test, we pass PKIDIR through a ovsdb-tool transact and
# msys on Windows does not convert the path style automatically.
# So, do that forcefully with a 'pwd -W' (called through pwd() function).
PKIDIR="$(cd $abs_top_builddir/tests && pwd)"
AT_SKIP_IF([expr "$PKIDIR" : ".*[[ '\"
\\]]"])
AT_DATA([schema],
[[{"name": "mydb",
"tables": {
"SSL": {
"columns": {
"private_key": {"type": "string"},
"certificate": {"type": "string"},
"ca_cert": {"type": "string"},
"ssl_protocols" : {"type": "string"},
"ssl_ciphers" : {"type" : "string"},
"ssl_ciphersuites" : {"type": "string"}
}}}}
]])
AT_CHECK([ovsdb-tool create db schema], [0], [stdout], [ignore])
AT_CHECK(
[[ovsdb-tool transact db \
'["mydb",
{"op": "insert",
"table": "SSL",
"row": {"private_key": "'"$PKIDIR/testpki-privkey2.pem"'",
"certificate": "'"$PKIDIR/testpki-cert2.pem"'",
"ca_cert": "'"$PKIDIR/testpki-cacert.pem"'",
"ssl_protocols": "'"TLSv1.3"'"
}}]']],
[0], [ignore], [ignore])
on_exit 'kill `cat *.pid`'
AT_CHECK(
[ovsdb-server --log-file --detach --no-chdir --pidfile \
--private-key=db:mydb,SSL,private_key \
--certificate=db:mydb,SSL,certificate \
--ca-cert=db:mydb,SSL,ca_cert \
--ssl-protocols=db:mydb,SSL,ssl_protocols \
--ssl-ciphers=db:mydb,SSL,ssl_ciphers \
--ssl-ciphersuites=db:mydb,SSL,ssl_ciphersuites \
--remote=pssl:0:127.0.0.1 db],
[0], [ignore], [ignore])
PARSE_LISTENING_PORT([ovsdb-server.log], [SSL_PORT])
# SSL_OVSDB_CLIENT(PROTOCOL)
m4_define([SSL_OVSDB_CLIENT], [dnl
ovsdb-client -vconsole:stream_ssl:dbg \
--private-key=$PKIDIR/testpki-privkey.pem \
--certificate=$PKIDIR/testpki-cert.pem \
--ca-cert=$PKIDIR/testpki-cacert.pem \
--ssl-protocols=[$1] \
transact ssl:127.0.0.1:$SSL_PORT \
'[[["mydb",
{"op": "select",
"table": "SSL",
"where": [],
"columns": ["private_key"]}]]]'])
# Check that, when the server has TLSv1.3 and the client has
# TLSv1.2, connection fails.
AT_CHECK(SSL_OVSDB_CLIENT([TLSv1.2]), [1], [stdout], [stderr])
cat stderr > output
AT_CHECK_UNQUOTED(
[sed -n "/failed to connect/s/ (.*)//p" output], [0],
[ovsdb-client: failed to connect to "ssl:127.0.0.1:$SSL_PORT"
],
[ignore])
AT_CHECK([grep -q 'Enabled protocol range: TLSv1.2 - TLSv1.2' stderr])
OVSDB_SERVER_SHUTDOWN(["
/stream_ssl|WARN/d
/Protocol error/d
"])
AT_CLEANUP
OVS_START_SHELL_HELPERS
# ovsdb_check_online_compaction MODEL
#
# where MODEL is "standalone" or "cluster"
ovsdb_check_online_compaction() {
local model=$1
ordinal_schema > schema
dnl Make sure that "ovsdb-tool create" works with a dangling symlink for
dnl the database and the lockfile, creating the target of each symlink rather
dnl than replacing the symlinks with regular files.
mkdir dir
if test "$IS_WIN32" = "no"; then
ln -s dir/db db
ln -s dir/.db.~lock~ .db.~lock~
AT_SKIP_IF([test ! -h db || test ! -h .db.~lock~])
fi
AT_CHECK([if test $model = standalone; then
ovsdb-tool create db schema
else
ovsdb-tool create-cluster db schema unix:s1.raft
fi])
dnl Start ovsdb-server.
on_exit 'kill `cat *.pid`'
AT_CHECK([ovsdb-server -vvlog:off -vconsole:off --detach --no-chdir --pidfile --remote=punix:socket --log-file db], [0], [ignore], [ignore])
AT_CHECK([ovsdb_client_wait unix:socket ordinals connected])
AT_CAPTURE_FILE([ovsdb-server.log])
dnl Do a bunch of random transactions that put crap in the database log.
AT_CHECK(
[[for pair in 'zero 0' 'one 1' 'two 2' 'three 3' 'four 4' 'five 5'; do
set -- $pair
ovsdb-client transact unix:socket '
["ordinals",
{"op": "insert",
"table": "ordinals",
"row": {"name": "'$1'", "number": '$2'}},
{"op": "comment",
"comment": "add row for '"$pair"'"}]'
ovsdb-client transact unix:socket '
["ordinals",
{"op": "delete",
"table": "ordinals",
"where": [["number", "==", '$2']]},
{"op": "comment",
"comment": "delete row for '"$2"'"}]'
ovsdb-client transact unix:socket '
["ordinals",
{"op": "insert",
"table": "ordinals",
"row": {"name": "'$1'", "number": '$2'}},
{"op": "comment",
"comment": "add back row for '"$pair"'"}]'
done]],
[0], [stdout])
if test $model = standalone; then
dnl Check that all the crap is in fact in the database log.
AT_CHECK([[uuidfilt db | grep -v ^OVSDB | \
sed 's/"_date":[0-9]*/"_date":0/' | sed 's/"_is_diff":true,//' | \
ovstest test-json --multiple -]], [0],
[[{"cksum":"12345678 9","name":"ordinals","tables":{"ordinals":{"columns":{"name":{"type":"string"},"number":{"type":"integer"}},"indexes":[["number"],["number","name"]]}},"version":"5.1.3"}
{"_comment":"add row for zero 0","_date":0,"ordinals":{"<0>":{"name":"zero"}}}
{"_comment":"delete row for 0","_date":0,"ordinals":{"<0>":null}}
{"_comment":"add back row for zero 0","_date":0,"ordinals":{"<1>":{"name":"zero"}}}
{"_comment":"add row for one 1","_date":0,"ordinals":{"<2>":{"name":"one","number":1}}}
{"_comment":"delete row for 1","_date":0,"ordinals":{"<2>":null}}
{"_comment":"add back row for one 1","_date":0,"ordinals":{"<3>":{"name":"one","number":1}}}
{"_comment":"add row for two 2","_date":0,"ordinals":{"<4>":{"name":"two","number":2}}}
{"_comment":"delete row for 2","_date":0,"ordinals":{"<4>":null}}
{"_comment":"add back row for two 2","_date":0,"ordinals":{"<5>":{"name":"two","number":2}}}
{"_comment":"add row for three 3","_date":0,"ordinals":{"<6>":{"name":"three","number":3}}}
{"_comment":"delete row for 3","_date":0,"ordinals":{"<6>":null}}
{"_comment":"add back row for three 3","_date":0,"ordinals":{"<7>":{"name":"three","number":3}}}
{"_comment":"add row for four 4","_date":0,"ordinals":{"<8>":{"name":"four","number":4}}}
{"_comment":"delete row for 4","_date":0,"ordinals":{"<8>":null}}
{"_comment":"add back row for four 4","_date":0,"ordinals":{"<9>":{"name":"four","number":4}}}
{"_comment":"add row for five 5","_date":0,"ordinals":{"<10>":{"name":"five","number":5}}}
{"_comment":"delete row for 5","_date":0,"ordinals":{"<10>":null}}
{"_comment":"add back row for five 5","_date":0,"ordinals":{"<11>":{"name":"five","number":5}}}
]])
else
dnl Check that at least there's a lot of transactions.
AT_CHECK([test `wc -l < db` -gt 50])
fi
dnl Dump out and check the actual database contents.
AT_CHECK([ovsdb-client dump unix:socket ordinals], [0], [stdout])
AT_CHECK([uuidfilt stdout], [0], [dnl
ordinals table
_uuid name number
------------------------------------ ----- ------
<0> five 5
<1> four 4
<2> one 1
<3> three 3
<4> two 2
<5> zero 0
])
cp db db.pre-compaction
dnl Now compact the database in-place.
AT_CHECK([[ovs-appctl -t ovsdb-server ovsdb-server/compact]],
[0], [], [ignore])
dnl Negative test.
AT_CHECK([[ovs-appctl -t ovsdb-server ovsdb-server/compact _Server]],
[2], [], [cannot compact built-in databases
ovs-appctl: ovsdb-server: server returned an error
])
dnl Make sure that "db" is still a symlink to dir/db instead of getting
dnl replaced by a regular file, ditto for .db.~lock~.
if test "$IS_WIN32" = "no"; then
AT_CHECK([test -h db])
AT_CHECK([test -h .db.~lock~])
AT_CHECK([test -f dir/db])
AT_CHECK([test -f dir/.db.~lock~])
fi
# We can't fully re-check the contents of the database log, because the
# order of the records is not predictable, but there should only be 4 lines
# in it now in the standalone case
AT_CAPTURE_FILE([db])
compacted_lines=`wc -l < db`
echo compacted_lines=$compacted_lines
if test $model = standalone; then
AT_CHECK([test $compacted_lines -eq 4])
fi
dnl And check that the dumped data is the same too:
AT_CHECK([ovsdb-client dump unix:socket ordinals], [0], [stdout])
AT_CHECK([uuidfilt stdout], [0], [dnl
ordinals table
_uuid name number
------------------------------------ ----- ------
<0> five 5
<1> four 4
<2> one 1
<3> three 3
<4> two 2
<5> zero 0
])
dnl Now do some more transactions.
AT_CHECK(
[[ovsdb-client transact unix:socket '
["ordinals",
{"op": "delete",
"table": "ordinals",
"where": [["number", "<", 3]]}]']],
[0], [[[{"count":3}]
]], [ignore])
dnl There should be 6 lines in the log now, for the standalone case,
dnl and for the clustered case the file should at least have grown.
updated_lines=`wc -l < db`
echo compacted_lines=$compacted_lines updated_lines=$updated_lines
if test $model = standalone; then
AT_CHECK([test $updated_lines -eq 6])
else
AT_CHECK([test $updated_lines -gt $compacted_lines])
fi
dnl Then check that the dumped data is correct. This time first kill
dnl and restart the database server to ensure that the data is correct on
dnl disk as well as in memory.
OVSDB_SERVER_SHUTDOWN
AT_CHECK([ovsdb-server -vvlog:off -vconsole:off --detach --no-chdir --pidfile --remote=punix:socket --log-file db], [0], [ignore], [ignore])
AT_CHECK([ovsdb-client dump unix:socket ordinals], [0], [stdout])
AT_CHECK([uuidfilt stdout], [0], [dnl
ordinals table
_uuid name number
------------------------------------ ----- ------
<0> five 5
<1> four 4
<2> three 3
], [])
OVSDB_SERVER_SHUTDOWN
}
OVS_END_SHELL_HELPERS
AT_SETUP([compacting online - standalone])
AT_KEYWORDS([ovsdb server compact])
ovsdb_check_online_compaction standalone
AT_CLEANUP
AT_SETUP([compacting online - cluster])
AT_KEYWORDS([ovsdb server compact])
ovsdb_check_online_compaction cluster
AT_CLEANUP
OVS_START_SHELL_HELPERS
# ovsdb_check_online_conversion MODEL
#
# where MODEL is "standalone" or "cluster"
ovsdb_check_online_conversion() {
local model=$1
on_exit 'kill `cat *.pid`'
ordinal_schema > schema
AT_DATA([new-schema],
[[{"name": "ordinals",
"tables": {
"ordinals": {
"columns": {
"number": {"type": "integer"}}}}}
]])
dnl Make sure that "ovsdb-tool create" works with a dangling symlink for
dnl the database and the lockfile, creating the target of each symlink
dnl rather than replacing the symlinks with regular files.
mkdir dir
if test "$IS_WIN32" = "no"; then
ln -s dir/db db
ln -s dir/.db.~lock~ .db.~lock~
AT_SKIP_IF([test ! -h db || test ! -h .db.~lock~])
fi
AT_CHECK([if test $model = standalone; then
ovsdb-tool create db schema
else
ovsdb-tool create-cluster db schema unix:s1.raft
fi])
dnl Start the database server.
AT_CHECK([ovsdb-server -vfile -vvlog:off -vconsole:off --detach --no-chdir --pidfile --log-file --remote=punix:db.sock db], [0], [ignore], [ignore])
AT_CAPTURE_FILE([ovsdb-server.log])
dnl Put some data in the database.
AT_CHECK(
[[for pair in 'zero 0' 'one 1' 'two 2' 'three 3' 'four 4' 'five 5'; do
set -- $pair
ovsdb-client transact '
["ordinals",
{"op": "insert",
"table": "ordinals",
"row": {"name": "'$1'", "number": '$2'}},
{"op": "comment",
"comment": "add row for '"$pair"'"}]'
done | uuidfilt]], [0],
[[[{"uuid":["uuid","<0>"]},{}]
[{"uuid":["uuid","<1>"]},{}]
[{"uuid":["uuid","<2>"]},{}]
[{"uuid":["uuid","<3>"]},{}]
[{"uuid":["uuid","<4>"]},{}]
[{"uuid":["uuid","<5>"]},{}]
]], [ignore])
dnl Try "needs-conversion".
AT_CHECK([ovsdb-client needs-conversion schema], [0], [no
])
AT_CHECK([ovsdb-client needs-conversion new-schema], [0], [yes
])
dnl Start two monitors on the 'ordinals' db, one that is database
dnl change aware and one that is not.
AT_CHECK([ovsdb-client -vfile -vvlog:off --detach --no-chdir --pidfile=monitor-ordinals-aware.pid --log-file=monitor-ordinals-aware.log --db-change-aware --no-headings monitor ordinals ordinals number name > monitor-ordinals-aware.stdout 2> monitor-ordinals-aware.stderr])
AT_CAPTURE_FILE([monitor-ordinals-aware.stdout])
AT_CAPTURE_FILE([monitor-ordinals-aware.log])
AT_CAPTURE_FILE([monitor-ordinals-aware.stderr])
AT_CHECK([ovsdb-client -vfile -vvlog:off --detach --no-chdir --pidfile=monitor-ordinals-unaware.pid --log-file=monitor-ordinals-unaware.log --no-db-change-aware --no-headings monitor ordinals ordinals number name > monitor-ordinals-unaware.stdout 2> monitor-ordinals-unaware.stderr])
AT_CAPTURE_FILE([monitor-ordinals-unaware.stdout])
AT_CAPTURE_FILE([monitor-ordinals-unaware.log])
AT_CAPTURE_FILE([monitor-ordinals-unaware.stderr])
dnl Start two monitors on the '_Server' db, one that is database
dnl change aware and one that is not.
AT_CHECK([ovsdb-client -vfile -vvlog:off --detach --no-chdir --pidfile=monitor-server-aware.pid --log-file=monitor-server-aware.log --db-change-aware --no-headings monitor _Server Database name > monitor-server-aware.stdout 2> monitor-server-aware.stderr])
AT_CAPTURE_FILE([monitor-server-aware.stdout])
AT_CAPTURE_FILE([monitor-server-aware.log])
AT_CAPTURE_FILE([monitor-server-aware.stderr])
AT_CHECK([ovsdb-client -vfile -vvlog:off --detach --no-chdir --pidfile=monitor-server-unaware.pid --log-file=monitor-server-unaware.log --no-db-change-aware --no-headings monitor _Server Database name > monitor-server-unaware.stdout 2> monitor-server-unaware.stderr])
AT_CAPTURE_FILE([monitor-server-unaware.stdout])
AT_CAPTURE_FILE([monitor-server-unaware.log])
AT_CAPTURE_FILE([monitor-server-unaware.stderr])
dnl Start two long-running transactions (triggers) on the 'ordinals' db,
dnl one that is database change aware and one that is not.
ordinals_txn='[["ordinals",
{"op": "wait",
"table": "ordinals",
"where": [["name", "==", "seven"]],
"columns": ["name", "number"],
"rows": [],
"until": "!="}]]'
AT_CHECK([ovsdb-client -vfile -vvlog:off --detach --no-chdir --pidfile=trigger-ordinals-aware.pid --log-file=trigger-ordinals-aware.log --db-change-aware transact "$ordinals_txn" > trigger-ordinals-aware.stdout 2> trigger-ordinals-aware.stderr])
AT_CAPTURE_FILE([trigger-ordinals-aware.stdout])
AT_CAPTURE_FILE([trigger-ordinals-aware.log])
AT_CAPTURE_FILE([trigger-ordinals-aware.stderr])
AT_CHECK([ovsdb-client -vfile -vvlog:off --detach --no-chdir --pidfile=trigger-ordinals-unaware.pid --log-file=trigger-ordinals-unaware.log --no-db-change-aware transact "$ordinals_txn" > trigger-ordinals-unaware.stdout 2> trigger-ordinals-unaware.stderr])
AT_CAPTURE_FILE([trigger-ordinals-unaware.stdout])
AT_CAPTURE_FILE([trigger-ordinals-unaware.log])
AT_CAPTURE_FILE([trigger-ordinals-unaware.stderr])
dnl Start two long-running transactions (triggers) on the _Server db,
dnl one that is database change aware and one that is not.
server_txn='[["_Server",
{"op": "wait",
"table": "Database",
"where": [["name", "==", "xyzzy"]],
"columns": ["name"],
"rows": [],
"until": "!="}]]'
AT_CHECK([ovsdb-client -vfile -vvlog:off --detach --no-chdir --pidfile=trigger-server-aware.pid --log-file=trigger-server-aware.log --db-change-aware transact "$server_txn" > trigger-server-aware.stdout 2> trigger-server-aware.stderr])
AT_CAPTURE_FILE([trigger-server-aware.stdout])
AT_CAPTURE_FILE([trigger-server-aware.log])
AT_CAPTURE_FILE([trigger-server-aware.stderr])
AT_CHECK([ovsdb-client -vfile -vvlog:off --detach --no-chdir --pidfile=trigger-server-unaware.pid --log-file=trigger-server-unaware.log --no-db-change-aware transact "$server_txn" > trigger-server-unaware.stdout 2> trigger-server-unaware.stderr])
AT_CAPTURE_FILE([trigger-server-unaware.stdout])
AT_CAPTURE_FILE([trigger-server-unaware.log])
AT_CAPTURE_FILE([trigger-server-unaware.stderr])
dnl Dump out and check the actual database contents.
AT_CHECK([ovsdb-client dump unix:db.sock ordinals], [0], [stdout])
AT_CHECK([uuidfilt stdout], [0], [dnl
ordinals table
_uuid name number
------------------------------------ ----- ------
<0> five 5
<1> four 4
<2> one 1
<3> three 3
<4> two 2
<5> zero 0
])
dnl Convert the database.
AT_CHECK([ovsdb-client convert new-schema])
dnl Try "needs-conversion".
AT_CHECK([ovsdb-client needs-conversion schema], [0], [yes
])
AT_CHECK([ovsdb-client needs-conversion new-schema], [0], [no
])
dnl Verify that the "ordinals" monitors behaved as they should have.
dnl Both should have exited, for different reasons.
for x in aware unaware; do
echo $x
OVS_WAIT_WHILE([test -e monitor-ordinals-$x.pid])
AT_CHECK([sort -k 3 monitor-ordinals-$x.stdout | uuidfilt], [0],
[<0> initial 0 zero
<1> initial 1 one
<2> initial 2 two
<3> initial 3 three
<4> initial 4 four
<5> initial 5 five
])
done
AT_CHECK([sed 's/.*: //' monitor-ordinals-unaware.stderr], [0], [receive failed (End of file)
])
AT_CHECK([sed 's/.*: //' monitor-ordinals-aware.stderr], [0], [ordinals database was removed
])
dnl Verify that the _Server monitors behaved as they should have.
dnl The db-aware monitor should still be running, but not the unaware one.
for x in aware unaware; do
AT_CHECK([sort -k 3 monitor-server-$x.stdout | uuidfilt], [0],
[<0> initial _Server
<1> initial ordinals
])
done
OVS_WAIT_WHILE([test -e monitor-server-unaware.pid])
AT_CHECK([sed 's/.*: //' monitor-ordinals-unaware.stderr], [0], [receive failed (End of file)
])
AT_CHECK([test -e monitor-server-aware.pid])
dnl Verify that the "ordinals" triggers behaved as they should have:
dnl Both should have exited, for different reasons.
for x in unaware aware; do
OVS_WAIT_WHILE([test -e trigger-ordinals-$x.pid])
AT_CHECK([cat trigger-ordinals-$x.stdout])
done
AT_CHECK([cat trigger-ordinals-unaware.stderr], [0], [ovsdb-client: transaction failed (End of file)
])
AT_CHECK([cat trigger-ordinals-aware.stderr], [0], [ovsdb-client: transaction returned error: "canceled"
])
dnl Verify that the _Server triggers behaved as they should have:
dnl The db-aware trigger should still be waiting, but not the unaware one.
for x in aware unaware; do
AT_CHECK([cat trigger-server-$x.stdout])
done
OVS_WAIT_WHILE([test -e trigger-server-unaware.pid])
AT_CHECK([sed 's/.*: //' trigger-ordinals-unaware.stderr], [0], [transaction failed (End of file)
])
AT_CHECK([test -e trigger-server-aware.pid])
AT_CAPTURE_FILE([db])
if test $model = standalone; then
dnl We can't fully re-check the contents of the database log, because the
dnl order of the records is not predictable, but there should only be 4 lines
dnl in it now.
AT_CHECK([test `wc -l < db` -eq 4])
fi
dnl Check that the dumped data is the same except for the removed column:
AT_CHECK([ovsdb-client dump unix:db.sock ordinals | uuidfilt], [0], [dnl
ordinals table
_uuid number
------------------------------------ ------
<0> 0
<1> 1
<2> 2
<3> 3
<4> 4
<5> 5
])
dnl Now check that the converted database is still online and can be modified,
dnl then check that the database log has one more record and that the data
dnl is as expected.
AT_CHECK(
[[ovsdb-client transact '
["ordinals",
{"op": "insert",
"table": "ordinals",
"row": {"number": 6}},
{"op": "comment",
"comment": "add row for 6"}]' | uuidfilt]], [0],
[[[{"uuid":["uuid","<0>"]},{}]
]])
if test $model = standalone; then
AT_CHECK([test `wc -l < db` -eq 6])
fi
AT_CHECK([ovsdb-client dump unix:db.sock ordinals | uuidfilt], [0], [dnl
ordinals table
_uuid number
------------------------------------ ------
<0> 0
<1> 1
<2> 2
<3> 3
<4> 4
<5> 5
<6> 6
])
dnl Now kill and restart the database server to ensure that the data is
dnl correct on disk as well as in memory.
OVSDB_SERVER_SHUTDOWN
AT_CHECK([[ovsdb-server -vfile -vvlog:off -vconsole:off --detach --no-chdir --pidfile --log-file --remote=punix:db.sock db]],
[0], [ignore], [ignore])
AT_CHECK([ovsdb-client dump unix:db.sock ordinals | uuidfilt], [0], [dnl
ordinals table
_uuid number
------------------------------------ ------
<0> 0
<1> 1
<2> 2
<3> 3
<4> 4
<5> 5
<6> 6
])
dnl Make sure that "db" is still a symlink to dir/db instead of getting
dnl replaced by a regular file, ditto for .db.~lock~.
if test "$IS_WIN32" = "no"; then
AT_CHECK([test -h db])
AT_CHECK([test -h .db.~lock~])
AT_CHECK([test -f dir/db])
AT_CHECK([test -f dir/.db.~lock~])
fi
OVSDB_SERVER_SHUTDOWN
}
OVS_END_SHELL_HELPERS
AT_SETUP([schema conversion online - standalone])
AT_KEYWORDS([ovsdb server convert needs-conversion standalone])
ovsdb_check_online_conversion standalone
AT_CLEANUP
AT_SETUP([schema conversion online - clustered])
AT_KEYWORDS([ovsdb server convert needs-conversion cluster])
ovsdb_check_online_conversion cluster
AT_CLEANUP
AT_SETUP([ovsdb-server combines updates on backlogged connections])
AT_KEYWORDS([ovsdb server])
on_exit 'kill `cat *.pid`'
# The maximum socket receive buffer size is important for this test, which
# tests behavior when the receive buffer overflows.
if test -e /proc/sys/net/core/rmem_max; then
# Linux
rmem_max=`cat /proc/sys/net/core/rmem_max`
elif rmem_max=`sysctl -n net.inet.tcp.recvbuf_max 2>/dev/null`; then
: # FreeBSD, NetBSD
else
# Don't know how to get maximum socket receive buffer on this OS
AT_SKIP_IF([:])
fi
# Calculate the number of iterations we need to queue. Each of the
# iterations we execute, by itself, yields a monitor update of about
# 25 kB, so fill up that much space plus a few for luck.
n_iterations=`expr $rmem_max / 25000 + 5`
echo rmem_max=$rmem_max n_iterations=$n_iterations
# If there's too much queuing skip the test to avoid timing out.
AT_SKIP_IF([test $rmem_max -gt 1048576])
# Calculate the exact number of monitor updates expected for $n_iterations,
# assuming no updates are combined. The "extra" update is for the initial
# contents of the database.
n_updates=`expr $n_iterations \* 3 + 1`
# Start an ovsdb-server with the vswitchd schema.
OVSDB_INIT([db])
AT_CHECK([ovsdb-server --detach --no-chdir --pidfile --log-file --remote=punix:db.sock db],
[0], [ignore], [ignore])
# Executes a set of transactions that add a bridge with 100 ports, and
# then deletes that bridge. This yields three monitor updates that
# add up to about 25 kB in size.
#
# The update also increments a counter held in the database so that we can
# verify that the overall effect of the transactions took effect (e.g.
# monitor updates at the end weren't just dropped). We add an arbitrary
# string to the counter to make grepping for it more reliable.
counter=0
trigger_big_update () {
counter=`expr $counter + 1`
ovs-vsctl --no-wait -- set open_vswitch . system_version=xyzzy$counter
ovs-vsctl --no-wait -- add-br br0 $add
ovs-vsctl --no-wait -- del-br br0
}
add_ports () {
for j in `seq 1 100`; do
printf " -- add-port br0 p%d" $j
done
}
add=`add_ports`
AT_CAPTURE_FILE([ovsdb-client.err])
AT_CAPTURE_FILE([ovsdb-client-nonblock.err])
# Start an ovsdb-client monitoring all changes to the database,
# By default, it is non-blocking, and will get update message
# for each ovsdb-server transaactions.
AT_CHECK([ovsdb-client --detach --no-chdir --pidfile=nonblock.pid monitor ALL >ovsdb-client-nonblock.out 2>ovsdb-client-nonblock.err])
# Start an ovsdb-client monitoring all changes to the database,
# make it block to force the buffers to fill up, and then execute
# enough iterations that ovsdb-server starts combining updates.
AT_CHECK([ovsdb-client --detach --no-chdir --pidfile monitor ALL >ovsdb-client.out 2>ovsdb-client.err])
AT_CHECK([ovs-appctl -t ovsdb-client ovsdb-client/block])
for i in `seq 1 $n_iterations`; do
echo "blocked update ($i of $n_iterations)"
trigger_big_update $i
done
AT_CHECK([ovs-appctl -t ovsdb-client ovsdb-client/unblock])
OVS_WAIT_UNTIL([grep "xyzzy$counter" ovsdb-client.out])
OVS_WAIT_UNTIL([grep "xyzzy$counter" ovsdb-client-nonblock.out])
OVS_APP_EXIT_AND_WAIT([ovsdb-client])
AT_CHECK([kill `cat nonblock.pid`])
# Count the number of updates in the ovsdb-client output, by counting
# the number of changes to the Open_vSwitch table. (All of our
# transactions modify the Open_vSwitch table.) It should be less than
# $n_updates updates.
#
# Check that the counter is what we expect.
logged_updates=`grep -c '^Open_vSwitch' ovsdb-client.out`
logged_nonblock_updates=`grep -c '^Open_vSwitch' ovsdb-client-nonblock.out`
echo "logged_nonblock_updates=$logged_nonblock_updates (expected less or equal to $n_updates)"
echo "logged_updates=$logged_updates (expected less than $logged_nonblock_updates)"
AT_CHECK([test $logged_nonblock_updates -le $n_updates])
AT_CHECK([test $logged_updates -lt $logged_nonblock_updates])
AT_CHECK_UNQUOTED([ovs-vsctl get open_vswitch . system_version], [0],
[xyzzy$counter
])
OVSDB_SERVER_SHUTDOWN
AT_CLEANUP
AT_SETUP([ovsdb-server transaction history size])
AT_KEYWORDS([ovsdb server transaction])
on_exit 'kill `cat *.pid`'
dnl Start an ovsdb-server with the clustered vswitchd schema.
AT_CHECK([ovsdb-tool create-cluster db dnl
$abs_top_srcdir/vswitchd/vswitch.ovsschema unix:s1.raft],
[0], [ignore], [ignore])
AT_CHECK([ovsdb-server --detach --no-chdir --pidfile dnl
--log-file --remote=punix:db.sock db],
[0], [ignore], [ignore])
AT_CHECK([ovs-vsctl --no-wait init])
dnl Create a bridge with N ports per transaction. Increase N every 4
dnl iterations. And then remove the bridges. By increasing the size of
dnl transactions, ensuring that they take up a significant percentage of
dnl the total database size, so the transaction history will not be able
dnl to hold all of them.
dnl
dnl The test verifies that the number of atoms in the transaction history
dnl is always less than the number of atoms in the database, except for
dnl a case where there is only one transaction in a history.
get_memory_value () {
n=$(ovs-appctl -t ovsdb-server memory/show dnl
| tr ' ' '\n' | grep "^$1:" | cut -d ':' -f 2)
if test X"$n" = "X"; then
n=0
fi
echo $n
}
check_atoms () {
if test $(get_memory_value txn-history) -eq 1; then return; fi
n_db_atoms=$(get_memory_value atoms)
n_txn_history_atoms=$(get_memory_value txn-history-atoms)
echo "n_db_atoms: $n_db_atoms"
echo "n_txn_history_atoms: $n_txn_history_atoms"
AT_CHECK([test $n_txn_history_atoms -le $n_db_atoms])
}
add_ports () {
for j in $(seq 1 $2); do
printf " -- add-port br$1 p$1-%d" $j
done
}
initial_db_atoms=$(get_memory_value atoms)
for i in $(seq 1 100); do
cmd=$(add_ports $i $(($i / 4 + 1)))
AT_CHECK([ovs-vsctl --no-wait add-br br$i $cmd])
check_atoms
done
for i in $(seq 1 100); do
AT_CHECK([ovs-vsctl --no-wait del-br br$i])
check_atoms
done
dnl After removing all the bridges, the number of atoms in the database
dnl should return to its initial value.
AT_CHECK([test $(get_memory_value atoms) -eq $initial_db_atoms])
dnl Add a few more resources.
for i in $(seq 1 10); do
cmd=$(add_ports $i $(($i / 4 + 1)))
AT_CHECK([ovs-vsctl --no-wait add-br br$i $cmd])
done
check_atoms
db_atoms_before_conversion=$(get_memory_value atoms)
dnl Trigger online conversion.
AT_CHECK([ovsdb-client convert $abs_top_srcdir/vswitchd/vswitch.ovsschema],
[0], [ignore], [ignore])
dnl Check that conversion didn't change the number of atoms and the history
dnl still has a reasonable size.
check_atoms
AT_CHECK([test $(get_memory_value atoms) -eq $db_atoms_before_conversion])
OVSDB_SERVER_SHUTDOWN
AT_CLEANUP
AT_BANNER([OVSDB -- ovsdb-server transactions (SSL/TLS IPv4 sockets)])
# OVSDB_CHECK_EXECUTION(TITLE, SCHEMA, TRANSACTIONS, OUTPUT, [KEYWORDS])
#
# Creates a database with the given SCHEMA, starts an ovsdb-server on
# that database, and runs each of the TRANSACTIONS (which should be a
# quoted list of quoted strings) against it with ovsdb-client one at a
# time.
#
# Checks that the overall output is OUTPUT, but UUIDs in the output
# are replaced by markers of the form <N> where N is a number. The
# first unique UUID is replaced by <0>, the next by <1>, and so on.
# If a given UUID appears more than once it is always replaced by the
# same marker.
#
# TITLE is provided to AT_SETUP and KEYWORDS to AT_KEYWORDS.
m4_define([OVSDB_CHECK_EXECUTION],
[AT_SETUP([$1])
AT_KEYWORDS([ovsdb server positive ssl tls $5])
AT_SKIP_IF([test "$HAVE_OPENSSL" = no])
$2 > schema
PKIDIR=$abs_top_builddir/tests
AT_CHECK([ovsdb-tool create db schema], [0], [stdout], [ignore])
on_exit 'kill `cat *.pid`'
AT_CHECK([ovsdb-server --log-file --detach --no-chdir --pidfile --private-key=$PKIDIR/testpki-privkey2.pem --certificate=$PKIDIR/testpki-cert2.pem --ca-cert=$PKIDIR/testpki-cacert.pem --remote=pssl:0:127.0.0.1 db], [0], [ignore], [ignore])
PARSE_LISTENING_PORT([ovsdb-server.log], [SSL_PORT])
m4_foreach([txn], [$3],
[AT_CHECK([ovsdb-client --private-key=$PKIDIR/testpki-privkey.pem --certificate=$PKIDIR/testpki-cert.pem --ca-cert=$PKIDIR/testpki-cacert.pem transact ssl:127.0.0.1:$SSL_PORT 'txn'], [0], [stdout], [ignore])
cat stdout >> output
])
AT_CHECK([uuidfilt output], [0], [$4], [ignore])
OVSDB_SERVER_SHUTDOWN
AT_CLEANUP])
EXECUTION_EXAMPLES
AT_BANNER([OVSDB -- ovsdb-server transactions (SSL/TLS IPv6 sockets)])
# OVSDB_CHECK_EXECUTION(TITLE, SCHEMA, TRANSACTIONS, OUTPUT, [KEYWORDS])
#
# Creates a database with the given SCHEMA, starts an ovsdb-server on
# that database, and runs each of the TRANSACTIONS (which should be a
# quoted list of quoted strings) against it with ovsdb-client one at a
# time.
#
# Checks that the overall output is OUTPUT, but UUIDs in the output
# are replaced by markers of the form <N> where N is a number. The
# first unique UUID is replaced by <0>, the next by <1>, and so on.
# If a given UUID appears more than once it is always replaced by the
# same marker.
#
# TITLE is provided to AT_SETUP and KEYWORDS to AT_KEYWORDS.
m4_define([OVSDB_CHECK_EXECUTION],
[AT_SETUP([$1])
AT_KEYWORDS([ovsdb server positive ssl6 ssl tls $5])
AT_SKIP_IF([test "$HAVE_OPENSSL" = no])
AT_SKIP_IF([test $HAVE_IPV6 = no])
$2 > schema
PKIDIR=$abs_top_builddir/tests
on_exit 'kill `cat *.pid`'
AT_CHECK([ovsdb-tool create db schema], [0], [stdout], [ignore])
AT_CHECK([ovsdb-server --log-file --detach --no-chdir --pidfile --private-key=$PKIDIR/testpki-privkey2.pem --certificate=$PKIDIR/testpki-cert2.pem --ca-cert=$PKIDIR/testpki-cacert.pem --remote=pssl:0:[[::1]] db], [0], [ignore], [ignore])
PARSE_LISTENING_PORT([ovsdb-server.log], [SSL_PORT])
m4_foreach([txn], [$3],
[AT_CHECK([ovsdb-client --private-key=$PKIDIR/testpki-privkey.pem --certificate=$PKIDIR/testpki-cert.pem --ca-cert=$PKIDIR/testpki-cacert.pem transact ssl:[[::1]]:$SSL_PORT 'txn'], [0], [stdout], [ignore])
cat stdout >> output
])
AT_CHECK([uuidfilt output], [0], [$4], [ignore])
OVSDB_SERVER_SHUTDOWN
AT_CLEANUP])
ONE_EXECUTION_EXAMPLE
AT_BANNER([OVSDB -- ovsdb-server transactions (TCP IPv4 sockets)])
# OVSDB_CHECK_EXECUTION(TITLE, SCHEMA, TRANSACTIONS, OUTPUT, [KEYWORDS])
#
# Creates a database with the given SCHEMA, starts an ovsdb-server on
# that database, and runs each of the TRANSACTIONS (which should be a
# quoted list of quoted strings) against it with ovsdb-client one at a
# time.
#
# Checks that the overall output is OUTPUT, but UUIDs in the output
# are replaced by markers of the form <N> where N is a number. The
# first unique UUID is replaced by <0>, the next by <1>, and so on.
# If a given UUID appears more than once it is always replaced by the
# same marker.
#
# TITLE is provided to AT_SETUP and KEYWORDS to AT_KEYWORDS.
m4_define([OVSDB_CHECK_EXECUTION],
[AT_SETUP([$1])
AT_KEYWORDS([ovsdb server positive tcp $5])
$2 > schema
PKIDIR=$abs_top_builddir/tests
on_exit 'kill `cat *.pid`'
AT_CHECK([ovsdb-tool create db schema], [0], [stdout], [ignore])
AT_CHECK([ovsdb-server --log-file --detach --no-chdir --pidfile --remote=ptcp:0:127.0.0.1 db], [0], [ignore], [ignore])
PARSE_LISTENING_PORT([ovsdb-server.log], [TCP_PORT])
m4_foreach([txn], [$3],
[AT_CHECK([ovsdb-client transact tcp:127.0.0.1:$TCP_PORT 'txn'], [0], [stdout], [ignore])
cat stdout >> output
])
AT_CHECK([uuidfilt output], [0], [$4], [ignore])
OVSDB_SERVER_SHUTDOWN
AT_CLEANUP])
EXECUTION_EXAMPLES
AT_BANNER([OVSDB -- ovsdb-server transactions (TCP IPv6 sockets)])
# OVSDB_CHECK_EXECUTION(TITLE, SCHEMA, TRANSACTIONS, OUTPUT, [KEYWORDS])
#
# Creates a database with the given SCHEMA, starts an ovsdb-server on
# that database, and runs each of the TRANSACTIONS (which should be a
# quoted list of quoted strings) against it with ovsdb-client one at a
# time.
#
# Checks that the overall output is OUTPUT, but UUIDs in the output
# are replaced by markers of the form <N> where N is a number. The
# first unique UUID is replaced by <0>, the next by <1>, and so on.
# If a given UUID appears more than once it is always replaced by the
# same marker.
#
# TITLE is provided to AT_SETUP and KEYWORDS to AT_KEYWORDS.
m4_define([OVSDB_CHECK_EXECUTION],
[AT_SETUP([$1])
AT_KEYWORDS([ovsdb server positive tcp6 $5])
AT_SKIP_IF([test $HAVE_IPV6 = no])
$2 > schema
PKIDIR=$abs_top_builddir/tests
on_exit 'kill `cat *.pid`'
AT_CHECK([ovsdb-tool create db schema], [0], [stdout], [ignore])
AT_CHECK([ovsdb-server --log-file --detach --no-chdir --pidfile --remote=ptcp:0:[[::1]] db], [0], [ignore], [ignore])
PARSE_LISTENING_PORT([ovsdb-server.log], [TCP_PORT])
m4_foreach([txn], [$3],
[AT_CHECK([ovsdb-client transact tcp:[[::1]]:$TCP_PORT 'txn'], [0], [stdout], [ignore])
cat stdout >> output
])
AT_CHECK([uuidfilt output], [0], [$4], [ignore])
OVSDB_SERVER_SHUTDOWN
AT_CLEANUP])
ONE_EXECUTION_EXAMPLE
AT_BANNER([OVSDB -- transactions on transient ovsdb-server])
# OVSDB_CHECK_EXECUTION(TITLE, SCHEMA, TRANSACTIONS, OUTPUT, [KEYWORDS])
#
# Creates a database with the given SCHEMA and runs each of the
# TRANSACTIONS (which should be a quoted list of quoted strings)
# against it with ovsdb-client one at a time. Each ovsdb-client
# is run against a separately started ovsdb-server that executes
# only that single transaction. (The idea is that this should
# help to ferret out any differences between what ovsdb-server has
# in memory and what actually gets committed to disk.)
#
# Checks that the overall output is OUTPUT, but UUIDs in the output
# are replaced by markers of the form <N> where N is a number. The
# first unique UUID is replaced by <0>, the next by <1>, and so on.
# If a given UUID appears more than once it is always replaced by the
# same marker.
#
# TITLE is provided to AT_SETUP and KEYWORDS to AT_KEYWORDS.
m4_define([OVSDB_CHECK_EXECUTION],
[AT_SETUP([$1])
AT_SKIP_IF([test "$IS_WIN32" = "yes"])
AT_KEYWORDS([ovsdb server positive transient $5])
$2 > schema
AT_CHECK([ovsdb-tool create db schema], [0], [stdout], [ignore])
m4_foreach([txn], [$3],
[AT_DATA([txnfile], [ovsdb-client transact unix:socket 'txn'
])
AT_CHECK([ovsdb-server --remote=punix:socket db --run="sh txnfile"], [0], [stdout], [ignore])
cat stdout >> output
])
AT_CHECK([uuidfilt output], [0], [$4], [ignore])
AT_CLEANUP])
EXECUTION_EXAMPLES
AT_BANNER([OVSDB -- ovsdb-server relay])
# OVSDB_CHECK_EXECUTION_RELAY(MODEL, TITLE, SCHEMA, TRANSACTIONS,
# OUTPUT, [KEYWORDS])
#
# Creates a clustered or standalone (MODEL) database with the given SCHEMA
# and starts an ovsdb-server on it. Also starts a daisy chain of
# ovsdb-servers in relay mode where the first relay server is connected to
# the main non-relay ovsdb-server.
#
# Runs each of the TRANSACTIONS (which should be a quoted list of
# quoted strings) against one of relay servers in the middle with
# ovsdb-client one at a time. The server executes read-only transactions
# and forwards rest of them to the previous ovsdb-server in a chain.
# The main ovsdb-server executes 'write' transactions. Transaction
# reply with data updates propagates back through the chain to all
# the servers and the client.
#
# main relay relay relay relay relay
# server1 <-- server2 <-- server3 <-- server4 <-- server5 <-- server6
# ^
# |
# ovsdb-client
#
# Checks that the overall output is OUTPUT, but UUIDs in the output
# are replaced by markers of the form <N> where N is a number. The
# first unique UUID is replaced by <0>, the next by <1>, and so on.
# If a given UUID appears more than once it is always replaced by the
# same marker.
#
# Checks that the dump of all databases and transaction ids are the same.
#
# TITLE is provided to AT_SETUP and KEYWORDS to AT_KEYWORDS.
m4_define([OVSDB_CHECK_EXECUTION_RELAY],
[AT_SETUP([$2 - relay - $1])
AT_KEYWORDS([ovsdb server tcp relay $6 $1])
n_servers=6
target=4
$3 > schema
schema_name=`ovsdb-tool schema-name schema`
on_exit 'kill `cat *.pid`'
AT_CHECK([if test $1 = standalone; then
ovsdb-tool create db1 schema
else
ovsdb-tool create-cluster db1 schema unix:s1.raft
fi], [0], [stdout], [ignore])
AT_CHECK([ovsdb-server --detach --no-chdir --log-file=ovsdb-server1.log dnl
--pidfile --remote=punix:db1.sock db1
], [0], [ignore], [ignore])
for i in $(seq 2 ${n_servers}); do
dnl Run every second relay with a config file.
if test $(expr $i % 2) -eq 0; then
echo "{
\"remotes\": { \"punix:db${i}.sock\": {} },
\"databases\": {
\"${schema_name}\": {
\"service-model\": \"relay\",
\"source\": { \"unix:db$((i-1)).sock\": {} }
}
}
}" > config${i}.json
AT_CHECK([ovsdb-server --detach --no-chdir --pidfile=${i}.pid \
--log-file=ovsdb-server$i.log \
--unixctl=unixctl${i} -vjsonrpc:file:dbg \
--config-file=config${i}.json
], [0], [ignore], [ignore])
else
AT_CHECK([ovsdb-server --detach --no-chdir \
--log-file=ovsdb-server$i.log \
--pidfile=${i}.pid --remote=punix:db${i}.sock \
--unixctl=unixctl${i} -vjsonrpc:file:dbg \
relay:${schema_name}:unix:db$((i-1)).sock
], [0], [ignore], [ignore])
fi
done
m4_foreach([txn], [$4],
[AT_CHECK([ovsdb-client transact unix:db${target}.sock 'txn'], [0],
[stdout], [ignore])
cat stdout >> output
])
AT_CHECK([uuidfilt output], [0], [$5], [ignore])
AT_CHECK([ovsdb-client dump unix:db1.sock], [0], [stdout], [ignore])
for i in $(seq 2 ${n_servers}); do
OVS_WAIT_UNTIL([ovsdb-client dump unix:db${i}.sock > dump${i}; dnl
diff stdout dump${i}])
done
dnl Check that transaction ids in notifications are the same on all relays.
last_id_pattern='s/\(.*"monid","[[a-z]]*".,"\)\(.*\)\(",{".*\)/\2/'
AT_CHECK([grep 'received notification, method="update3"' ovsdb-server2.log dnl
| sed $last_id_pattern > txn_ids2])
if test $1 = clustered; then
dnl Check that transaction ids are not all zeroes in clustered mode.
AT_CHECK([! grep -q "00000000-0000-0000-0000-000000000000" txn_ids2])
fi
for i in $(seq 3 ${n_servers}); do
AT_CHECK([grep 'received notification, method="update3"' ovsdb-server$i.log dnl
| sed $last_id_pattern > txn_ids$i])
AT_CHECK([diff txn_ids2 txn_ids$i])
done
OVSDB_SERVER_SHUTDOWN
for i in $(seq 2 ${n_servers}); do
OVSDB_SERVER_SHUTDOWN_N([$i])
done
AT_CLEANUP])
m4_define([OVSDB_CHECK_EXECUTION],
[OVSDB_CHECK_EXECUTION_RELAY(standalone, $@)
OVSDB_CHECK_EXECUTION_RELAY(clustered, $@)])
EXECUTION_EXAMPLES
AT_BANNER([OVSDB -- ovsdb-server replication])
# OVSDB_CHECK_EXECUTION(TITLE, SCHEMA, TRANSACTIONS, OUTPUT, [KEYWORDS])
#
# Creates three databases with the given SCHEMA, and starts an ovsdb-server on
# each database.
# Runs each of the TRANSACTIONS (which should be a quoted list of
# quoted strings) against one of the servers with ovsdb-client one at a
# time. The server replicates its database to the other two ovsdb-servers,
# one of which is configured via command line and the other via --config-file.
#
# Checks that the dump of all databases are the same.
#
# TITLE is provided to AT_SETUP and KEYWORDS to AT_KEYWORDS.
m4_define([OVSDB_CHECK_EXECUTION],
[AT_SETUP([$1])
AT_KEYWORDS([ovsdb server tcp replication $5])
$2 > schema
AT_CHECK([ovsdb-tool create db1 schema], [0], [stdout], [ignore])
AT_CHECK([ovsdb-tool create db2 schema], [0], [stdout], [ignore])
AT_CHECK([ovsdb-tool create db3 schema], [0], [stdout], [ignore])
on_exit 'kill $(cat *.pid)'
AT_CHECK([ovsdb-server -vfile --detach --no-chdir --log-file=ovsdb-server1.log \
--pidfile --remote=punix:db.sock db1], [0], [ignore], [ignore])
AT_CHECK([ovsdb-server -vfile --detach --no-chdir --log-file=ovsdb-server2.log \
--pidfile=2.pid --remote=punix:db2.sock --unixctl=unixctl2 \
--sync-from=unix:db.sock db2], [0], [ignore], [ignore])
AT_DATA([config3.json], [
{
"remotes": { "punix:db3.sock": {} },
"databases": {
"db3": {
"service-model": "active-backup",
"backup": true,
"source": { "unix:db.sock": {} }
}
}
}
])
AT_CHECK([ovsdb-server -vfile --detach --no-chdir --log-file=ovsdb-server3.log \
--pidfile=3.pid --unixctl=unixctl3 --config-file=config3.json],
[0], [ignore], [ignore])
m4_foreach([txn], [$3],
[AT_CHECK([ovsdb-client transact 'txn'], [0], [stdout], [ignore])
])
AT_CHECK([ovsdb-client dump], [0], [stdout], [ignore])
OVS_WAIT_UNTIL([ ovsdb-client dump unix:db2.sock > dump2; diff -u stdout dump2])
OVS_WAIT_UNTIL([ ovsdb-client dump unix:db3.sock > dump3; diff -u stdout dump3])
OVSDB_SERVER_SHUTDOWN
OVSDB_SERVER_SHUTDOWN2
OVSDB_SERVER_SHUTDOWN_N([3])
AT_CLEANUP])
EXECUTION_EXAMPLES
AT_BANNER([OVSDB -- ovsdb-server replication table-exclusion])
# OVSDB_CHECK_REPLICATION(TITLE, SCHEMA, TRANSACTIONS, OUTPUT, [KEYWORDS])
#
# Creates three databases with the given SCHEMA, and starts an
# ovsdb-server on each database.
# Runs each of the TRANSACTIONS (which should be a quoted list of
# quoted strings) against one of the servers with ovsdb-client one at a
# time. The server replicates its database to the other two ovsdb-servers,
# one of which is configured via command line and the other via --config-file.
#
# Checks that the difference between the dump of the first and the other two
# databases is OUTPUT, but UUIDs in the output are replaced by markers of the
# form <N> where N is a number. The first unique UUID is replaced by <0>,
# the next by <1>, and so on.
# If a given UUID appears more than once it is always replaced by the
# same marker.
#
# Also checks that the dumps of the second and third databases are the same.
#
# TITLE is provided to AT_SETUP and KEYWORDS to AT_KEYWORDS.
m4_define([OVSDB_CHECK_REPLICATION],
[AT_SETUP([$1])
AT_KEYWORDS([ovsdb server tcp replication table-exclusion])
AT_SKIP_IF([test $DIFF_SUPPORTS_NORMAL_FORMAT = no])
$2 > schema
AT_CHECK([ovsdb-tool create db1 schema], [0], [stdout], [ignore])
AT_CHECK([ovsdb-tool create db2 schema], [0], [stdout], [ignore])
AT_CHECK([ovsdb-tool create db3 schema], [0], [stdout], [ignore])
on_exit 'kill $(cat *.pid)'
AT_CHECK([ovsdb-server -vfile --detach --no-chdir --log-file=ovsdb-server1.log \
--pidfile --remote=punix:db.sock db1], [0], [ignore], [ignore])
AT_CHECK([ovsdb-server -vfile --detach --no-chdir --log-file=ovsdb-server2.log \
--pidfile=2.pid --remote=punix:db2.sock --unixctl=unixctl2 \
--sync-from=unix:db.sock --sync-exclude-tables=mydb:b db2],
[0], [ignore], [ignore])
AT_DATA([config3.json], [
{
"remotes": { "punix:db3.sock": {} },
"databases": {
"db3": {
"service-model": "active-backup",
"backup": true,
"source": { "unix:db.sock": {} },
"exclude-tables": [["b"]]
}
}
}
])
AT_CHECK([ovsdb-server -vfile --detach --no-chdir --log-file=ovsdb-server3.log \
--pidfile=3.pid --unixctl=unixctl3 --config-file=config3.json],
[0], [ignore], [ignore])
m4_foreach([txn], [$3],
[AT_CHECK([ ovsdb-client transact 'txn' ], [0], [stdout], [ignore])
])
AT_CHECK([ovsdb-client dump], [0], [stdout], [ignore])
cat stdout > dump1
OVS_WAIT_UNTIL([ ovsdb-client dump unix:db2.sock | grep one ])
AT_CHECK([ovsdb-client dump unix:db2.sock], [0], [stdout], [ignore])
cat stdout > dump2
OVS_WAIT_UNTIL([ ovsdb-client dump unix:db3.sock | grep one ])
AT_CHECK([ovsdb-client dump unix:db3.sock], [0], [stdout], [ignore])
cat stdout > dump3
AT_CHECK([diff -u dump2 dump3])
AT_CHECK([diff dump1 dump2], [1], [stdout], [ignore])
cat stdout > output
AT_CHECK([uuidfilt output], [0], [$4], [ignore])
OVSDB_SERVER_SHUTDOWN
OVSDB_SERVER_SHUTDOWN2
OVSDB_SERVER_SHUTDOWN_N([3])
AT_CLEANUP])
REPLICATION_EXAMPLES
AT_BANNER([OVSDB -- ovsdb-server replication runtime management commands])
#ovsdb-server/get-active-ovsdb-server command
AT_SETUP([ovsdb-server/get-active-ovsdb-server])
AT_KEYWORDS([ovsdb server replication get-active])
ordinal_schema > schema
AT_CHECK([ovsdb-tool create db schema], [0], [ignore], [ignore])
on_exit 'kill `cat *.pid`'
AT_CHECK([ovsdb-server --detach --no-chdir --log-file --pidfile --sync-from=tcp:127.0.0.1:9999 db], [0], [ignore], [ignore])
AT_CHECK([ovs-appctl -t ovsdb-server ovsdb-server/get-active-ovsdb-server],
[0], [tcp:127.0.0.1:9999
])
AT_CLEANUP
#*ovsdb-server/set-active-ovsdb-server command
AT_SETUP([ovsdb-server/set-active-ovsdb-server])
AT_KEYWORDS([ovsdb server replication set-active])
ordinal_schema > schema
AT_CHECK([ovsdb-tool create db schema], [0], [ignore], [ignore])
on_exit 'kill `cat *.pid`'
AT_CHECK([ovsdb-server --detach --no-chdir --log-file --pidfile db], [0], [ignore], [ignore])
AT_CHECK([ovs-appctl -t ovsdb-server ovsdb-server/set-active-ovsdb-server tcp:127.0.0.1:9999])
AT_CHECK([ovs-appctl -t ovsdb-server ovsdb-server/get-active-ovsdb-server],
[0], [tcp:127.0.0.1:9999
])
AT_CLEANUP
#ovsdb-server/get-sync-exclude-tables command
AT_SETUP([ovsdb-server/get-sync-exclude-tables])
AT_KEYWORDS([ovsdb server replication get-exclude-tables])
ordinal_schema > schema
AT_CHECK([ovsdb-tool create db schema], [0], [ignore], [ignore])
on_exit 'kill `cat *.pid`'
AT_CHECK([ovsdb-server --detach --no-chdir --log-file --pidfile --sync-exclude-tables=mydb:db1,mydb:db2 db], [0], [ignore], [ignore])
AT_CHECK([ovs-appctl -t ovsdb-server ovsdb-server/get-sync-exclude-tables],
[0], [mydb:db1,mydb:db2
])
AT_CLEANUP
#ovsdb-server/set-sync-exclude-tables command
AT_SETUP([ovsdb-server/set-sync-exclude-tables])
on_exit 'kill `cat *.pid`'
AT_KEYWORDS([ovsdb server replication set-exclude-tables])
AT_SKIP_IF([test $DIFF_SUPPORTS_NORMAL_FORMAT = no])
replication_schema > schema
AT_CHECK([ovsdb-tool create db1 schema], [0], [stdout], [ignore])
AT_CHECK([ovsdb-tool create db2 schema], [0], [stdout], [ignore])
AT_CHECK([ovsdb-server --detach --no-chdir --log-file=ovsdb-server1.log --pidfile --remote=punix:db.sock db1], [0], [ignore], [ignore])
AT_CHECK([ovsdb-server --detach --no-chdir --log-file=ovsdb-server2.log --pidfile=2.pid --remote=punix:db2.sock --unixctl=unixctl2 --sync-from=unix:db.sock db2], [0], [ignore], [ignore])
AT_CHECK([ovs-appctl -t "`pwd`"/unixctl2 ovsdb-server/set-sync-exclude-tables mydb:b], [0], [ignore], [ignore])
AT_CHECK([ovsdb-client transact unix:db.sock \
'[["mydb",
{"op": "insert",
"table": "a",
"row": {"number": 0, "name": "zero"}},
{"op": "insert",
"table": "b",
"row": {"number": 1, "name": "one"}}]]'], [0], [stdout], [ignore])
AT_CHECK([ovsdb-client dump unix:db.sock], [0], [stdout], [ignore])
cat stdout > dump1
OVS_WAIT_UNTIL([ ovsdb-client dump unix:db2.sock | grep zero ])
AT_CHECK([ovsdb-client dump unix:db2.sock], [0], [stdout], [ignore])
cat stdout > dump2
AT_CHECK([diff dump1 dump2], [1], [stdout], [ignore])
cat stdout > output
AT_CHECK([uuidfilt output], [0], [7,9c7,8
< _uuid name number
< ------------------------------------ ---- ------
< <0> one 1
---
> _uuid name number
> ----- ---- ------
])
OVSDB_SERVER_SHUTDOWN
OVSDB_SERVER_SHUTDOWN2
AT_CLEANUP
#ovsdb-server/connect-active-ovsdb-server
AT_SETUP([ovsdb-server/connect-active-server])
on_exit 'kill `cat *.pid`'
AT_KEYWORDS([ovsdb server replication connect-active-server])
replication_schema > schema
AT_CHECK([ovsdb-tool create db1 schema], [0], [stdout], [ignore])
AT_CHECK([ovsdb-tool create db2 schema], [0], [stdout], [ignore])
AT_CHECK([ovsdb-server -vfile --detach --no-chdir \
--log-file=ovsdb-server1.log --pidfile --remote=punix:db.sock db1],
[0], [ignore], [ignore])
AT_CHECK([ovsdb-server -vfile --detach --no-chdir \
--log-file=ovsdb-server2.log --pidfile=2.pid \
--remote=punix:db2.sock --unixctl=unixctl2 db2],
[0], [ignore], [ignore])
dnl Try to connect without specifying the active server.
AT_CHECK([ovs-appctl -t "`pwd`"/unixctl2 ovsdb-server/connect-active-ovsdb-server], [0],
[Unable to connect: active server is not specified.
], [ignore])
AT_CHECK([ovs-appctl -t "`pwd`"/unixctl2 ovsdb-server/set-active-ovsdb-server unix:db.sock], [0], [stdout], [ignore])
AT_CHECK([ovs-appctl -t "`pwd`"/unixctl2 ovsdb-server/connect-active-ovsdb-server], [0], [stdout], [ignore])
AT_CHECK([ovsdb-client transact unix:db.sock \
'[["mydb",
{"op": "insert",
"table": "a",
"row": {"number": 0, "name": "zero"}}]]'], [0], [stdout], [ignore])
AT_CHECK([ovsdb-client dump unix:db.sock], [0], [stdout], [ignore])
cat stdout > dump1
OVS_WAIT_UNTIL([ ovsdb-client dump unix:db2.sock | grep zero ])
AT_CHECK([ovsdb-client dump unix:db2.sock], [0], [stdout], [ignore])
cat stdout > dump2
AT_CHECK([diff dump1 dump2], [0], [], [ignore])
OVSDB_SERVER_SHUTDOWN
OVSDB_SERVER_SHUTDOWN2
AT_CLEANUP
#ovsdb-server/disconnect-active-server command
AT_SETUP([ovsdb-server/disconnect-active-server])
on_exit 'kill `cat *.pid`'
AT_KEYWORDS([ovsdb server replication disconnect-active-server])
AT_SKIP_IF([test $DIFF_SUPPORTS_NORMAL_FORMAT = no])
replication_schema > schema
AT_CHECK([ovsdb-tool create db1 schema], [0], [stdout], [ignore])
AT_CHECK([ovsdb-tool create db2 schema], [0], [stdout], [ignore])
AT_CHECK([ovsdb-server --detach --no-chdir --log-file=ovsdb-server1.log --pidfile --remote=punix:db.sock db1], [0], [ignore], [ignore])
AT_CHECK([ovsdb-server --detach --no-chdir --log-file=ovsdb-server2.log --pidfile=2.pid --remote=punix:db2.sock --unixctl=unixctl2 --sync-from=unix:db.sock db2], [0], [ignore], [ignore])
AT_CHECK([ovsdb-client transact unix:db.sock \
'[["mydb",
{"op": "insert",
"table": "a",
"row": {"number": 0, "name": "zero"}}]]'], [0], [stdout], [ignore])
dnl Make sure the transaction shows up in db2. This also tests the back up server
dnl can be read.
OVS_WAIT_UNTIL([ovsdb-client dump unix:db2.sock | grep zero])
dnl The backup server does not accept any write transaction
AT_CHECK([ovsdb-client transact unix:db2.sock \
'[["mydb",
{"op": "insert",
"table": "b",
"row": {"number": 1, "name": "one"}}]]'], [0],
[[[{"details":"insert operation not allowed when database server is in read only mode","error":"not allowed"}]]
])
AT_CHECK([ovs-appctl -t "`pwd`"/unixctl2 ovsdb-server/disconnect-active-ovsdb-server], [0], [ignore], [ignore])
AT_CHECK([ovsdb-client transact unix:db.sock \
'[["mydb",
{"op": "insert",
"table": "b",
"row": {"number": 1, "name": "one"}}]]'], [0], [stdout], [ignore])
AT_CHECK([ovsdb-client dump unix:db.sock], [0], [stdout], [ignore])
cat stdout > dump1
sleep 1
AT_CHECK([ovsdb-client dump unix:db2.sock], [0], [stdout], [ignore])
cat stdout > dump2
AT_CHECK([diff dump1 dump2], [1], [stdout], [ignore])
cat stdout > output
AT_CHECK([uuidfilt output], [0], [7,9c7,8
< _uuid name number
< ------------------------------------ ---- ------
< <0> one 1
---
> _uuid name number
> ----- ---- ------
], [ignore])
dnl The backup server now become active, and can accept write transactions.
AT_CHECK([ovsdb-client transact unix:db2.sock \
'[["mydb",
{"op": "insert",
"table": "b",
"row": {"number": 1, "name": "one"}}]]'], [0], [stdout], [ignore])
AT_CHECK([ovsdb-client dump unix:db2.sock], [0], [stdout])
cat stdout > output
AT_CHECK([uuidfilt output], [0], [a table
_uuid name number
------------------------------------ ---- ------
<0> zero 0
b table
_uuid name number
------------------------------------ ---- ------
<1> one 1
])
OVSDB_SERVER_SHUTDOWN
OVSDB_SERVER_SHUTDOWN2
AT_CLEANUP
#ovsdb-server/active-backup-role-switching
AT_SETUP([ovsdb-server/active-backup-role-switching])
AT_KEYWORDS([ovsdb server replication active-backup-switching])
replication_schema > schema
AT_CHECK([ovsdb-tool create db1 schema], [0], [stdout], [ignore])
AT_CHECK([ovsdb-tool create db2 schema], [0], [stdout], [ignore])
dnl Add some data to both DBs
AT_CHECK([ovsdb-tool transact db1 \
'[["mydb",
{"op": "insert",
"table": "a",
"row": {"number": 9, "name": "nine"}}]]'], [0], [ignore], [ignore])
AT_CHECK([ovsdb-tool transact db2 \
'[["mydb",
{"op": "insert",
"table": "a",
"row": {"number": 9, "name": "nine"}}]]'], [0], [ignore], [ignore])
dnl Start both 'db1' and 'db2' in backup mode. Let them backup from each
dnl other. This is not an supported operation state, but to simulate a start
dnl up condition where an HA manger can select which one to be an active
dnl server soon after.
on_exit 'kill `cat *.pid`'
AT_CHECK([ovsdb-server --detach --no-chdir --log-file=ovsdb-server1.log --pidfile --remote=punix:db.sock --unixctl="`pwd`"/unixctl db1 --sync-from=unix:db2.sock --active ], [0], [ignore], [ignore])
AT_CHECK([ovs-appctl -t "`pwd`"/unixctl ovsdb-server/connect-active-ovsdb-server])
AT_CHECK([ovsdb-server --detach --no-chdir --log-file=ovsdb-server2.log --pidfile=2.pid --remote=punix:db2.sock --unixctl="`pwd`"/unixctl2 --sync-from=unix:db.sock db2], [0], [ignore], [ignore])
dnl
dnl make sure both servers reached the replication state
OVS_WAIT_UNTIL([ovs-appctl -t "`pwd`"/unixctl ovsdb-server/sync-status |grep replicating])
OVS_WAIT_UNTIL([ovs-appctl -t "`pwd`"/unixctl2 ovsdb-server/sync-status |grep replicating])
dnl Switch the 'db1' to active
AT_CHECK([ovs-appctl -t "`pwd`"/unixctl ovsdb-server/disconnect-active-ovsdb-server])
AT_CHECK([ovs-appctl -t "`pwd`"/unixctl ovsdb-server/sync-status], [0], [dnl
database: mydb
state: active
])
dnl Issue a transaction to 'db1'
AT_CHECK([ovsdb-client transact unix:db.sock \
'[["mydb",
{"op": "insert",
"table": "a",
"row": {"number": 0, "name": "zero"}}]]'], [0], [ignore])
dnl It should be replicated to 'db2'
OVS_WAIT_UNTIL([ovsdb-client dump unix:db2.sock | grep zero])
dnl Flip the role of 'db1' and 'db2'. 'db1' becomes backup, and db2 becomes active
AT_CHECK([ovs-appctl -t "`pwd`"/unixctl2 ovsdb-server/disconnect-active-ovsdb-server])
AT_CHECK([ovs-appctl -t "`pwd`"/unixctl ovsdb-server/connect-active-ovsdb-server])
dnl Verify the change happend
OVS_WAIT_UNTIL([ovs-appctl -t "`pwd`"/unixctl ovsdb-server/sync-status |grep replicating])
AT_CHECK([ovs-appctl -t "`pwd`"/unixctl2 ovsdb-server/sync-status], [0], [dnl
database: mydb
state: active
])
dnl Issue an transaction to 'db2' which is now active.
AT_CHECK([ovsdb-client transact unix:db2.sock \
'[["mydb",
{"op": "insert",
"table": "b",
"row": {"number": 1, "name": "one"}}]]'], [0], [ignore])
dnl The transaction should be replicated to 'db1'
OVS_WAIT_UNTIL([ovsdb-client dump unix:db.sock | grep one])
dnl Both servers should have the same content.
AT_CHECK([ovsdb-client dump unix:db.sock], [0], [stdout])
cat stdout > dump1
AT_CHECK([ovsdb-client dump unix:db2.sock], [0], [stdout])
cat stdout > dump2
AT_CHECK([diff dump1 dump2])
dnl OVSDB_SERVER_SHUTDOWN
dnl OVSDB_SERVER_SHUTDOWN2
AT_CLEANUP
AT_SETUP([ovsdb-server/active-backup-role-switching with config file])
AT_KEYWORDS([ovsdb server replication active-backup-switching config-file])
replication_schema > schema
AT_CHECK([ovsdb-tool create db1 schema], [0], [stdout], [ignore])
AT_CHECK([ovsdb-tool create db2 schema], [0], [stdout], [ignore])
dnl Add some data to both DBs.
AT_CHECK([ovsdb-tool transact db1 \
'[["mydb",
{"op": "insert",
"table": "a",
"row": {"number": 9, "name": "nine"}}]]'], [0], [ignore], [ignore])
AT_CHECK([ovsdb-tool transact db2 \
'[["mydb",
{"op": "insert",
"table": "a",
"row": {"number": 9, "name": "nine"}}]]'], [0], [ignore], [ignore])
dnl Start both 'db1' and 'db2' in backup mode. Let them backup from each
dnl other. This is not a supported operation state, but to simulate a start
dnl up condition where an HA manger can select which one to be an active
dnl server soon after.
on_exit 'kill $(cat *.pid)'
AT_DATA([config1.json], [
{
"remotes": { "punix:db.sock": {} },
"databases": {
"db1": {
"service-model": "active-backup",
"backup": true,
"source": { "unix:db2.sock": {} }
}
}
}
])
AT_CHECK([ovsdb-server -vfile --detach --no-chdir --log-file=ovsdb-server1.log \
--pidfile=1.pid --unixctl=unixctl1 --config-file=config1.json],
[0], [ignore], [ignore])
AT_DATA([config2.json], [
{
"remotes": { "punix:db2.sock": {} },
"databases": {
"db2": {
"service-model": "active-backup",
"backup": true,
"source": { "unix:db.sock": {} }
}
}
}
])
AT_CHECK([ovsdb-server -vfile --detach --no-chdir --log-file=ovsdb-server2.log \
--pidfile=2.pid --unixctl=unixctl2 --config-file=config2.json],
[0], [ignore], [ignore])
dnl Make sure both servers reached the replication state.
OVS_WAIT_UNTIL([ovs-appctl -t $(pwd)/unixctl1 ovsdb-server/sync-status | grep replicating])
OVS_WAIT_UNTIL([ovs-appctl -t $(pwd)/unixctl2 ovsdb-server/sync-status | grep replicating])
dnl Switch the 'db1' to active.
AT_CHECK([sed -i'back' 's/"backup": true/"backup": false/' config1.json])
AT_CHECK([ovs-appctl -t $(pwd)/unixctl1 ovsdb-server/reload])
AT_CHECK([ovs-appctl -t $(pwd)/unixctl1 ovsdb-server/sync-status], [0], [dnl
database: mydb
state: active
])
dnl Issue a transaction to 'db1'.
AT_CHECK([ovsdb-client transact unix:db.sock \
'[["mydb",
{"op": "insert",
"table": "a",
"row": {"number": 0, "name": "zero"}}]]'], [0], [ignore])
dnl It should be replicated to 'db2'.
OVS_WAIT_UNTIL([ovsdb-client dump unix:db2.sock | grep zero])
dnl Issue a transaction to 'db2', it should fail.
AT_CHECK([ovsdb-client transact unix:db2.sock \
'[["mydb",
{"op": "insert",
"table": "a",
"row": {"number": 1, "name": "one"}}]]'], [0], [dnl
[[{"details":"insert operation not allowed when database server is in read only mode","error":"not allowed"}]]
])
dnl Flip the role of 'db1' and 'db2'. 'db1' becomes backup, and 'db2' becomes active.
AT_CHECK([sed -i'back' 's/"backup": true/"backup": false/' config2.json])
AT_CHECK([ovs-appctl -t $(pwd)/unixctl2 ovsdb-server/reload])
AT_CHECK([sed -i'back' 's/"backup": false/"backup": true/' config1.json])
AT_CHECK([ovs-appctl -t $(pwd)/unixctl1 ovsdb-server/reload])
dnl Verify the change happend.
OVS_WAIT_UNTIL([ovs-appctl -t $(pwd)/unixctl1 ovsdb-server/sync-status | grep replicating])
AT_CHECK([ovs-appctl -t $(pwd)/unixctl2 ovsdb-server/sync-status], [0], [dnl
database: mydb
state: active
])
dnl Issue a transaction to 'db2' which is now active.
AT_CHECK([ovsdb-client transact unix:db2.sock \
'[["mydb",
{"op": "insert",
"table": "b",
"row": {"number": 1, "name": "one"}}]]'], [0], [ignore])
dnl The transaction should be replicated to 'db1'.
OVS_WAIT_UNTIL([ovsdb-client dump unix:db.sock | grep one])
dnl Issue a transaction to 'db1', it should fail.
AT_CHECK([ovsdb-client transact unix:db.sock \
'[["mydb",
{"op": "insert",
"table": "a",
"row": {"number": 2, "name": "two"}}]]'], [0], [dnl
[[{"details":"insert operation not allowed when database server is in read only mode","error":"not allowed"}]]
])
dnl Both servers should have the same content.
AT_CHECK([ovsdb-client dump unix:db.sock], [0], [stdout])
cat stdout > dump1
AT_CHECK([ovsdb-client dump unix:db2.sock], [0], [stdout])
cat stdout > dump2
AT_CHECK([diff -u dump1 dump2])
OVSDB_SERVER_SHUTDOWN_N([1])
OVSDB_SERVER_SHUTDOWN2
AT_CLEANUP
#ovsdb-server prevent self replicating
AT_SETUP([ovsdb-server prevent self replicating])
AT_KEYWORDS([ovsdb server replication])
replication_schema > schema
AT_CHECK([ovsdb-tool create db schema], [0], [stdout], [ignore])
dnl Add some data to both DBs
AT_CHECK([ovsdb-tool transact db \
'[["mydb",
{"op": "insert",
"table": "a",
"row": {"number": 9, "name": "nine"}}]]'], [0], [ignore], [ignore])
dnl Start 'db', then try to be a back up server of itself.
on_exit 'kill `cat *.pid`'
AT_CHECK([ovsdb-server --detach --no-chdir --log-file=ovsdb-server.log --pidfile --remote=punix:db.sock --unixctl="`pwd`"/unixctl db --sync-from=unix:db.sock --active ], [0], [ignore], [ignore])
dnl Save the current content
AT_CHECK([ovsdb-client dump unix:db.sock], [0], [stdout])
cp stdout dump1
AT_CHECK([ovs-appctl -t "`pwd`"/unixctl ovsdb-server/connect-active-ovsdb-server])
dnl Check that self replicating is blocked.
AT_CHECK([grep "Self replicating is not allowed" ovsdb-server.log], [0], [stdout])
dnl Check current DB content is preserved.
AT_CHECK([ovsdb-client dump unix:db.sock], [0], [stdout])
cat stdout > dump2
AT_CHECK([diff dump1 dump2])
AT_CLEANUP
AT_SETUP([ovsdb-server/read-only db:ptcp connection])
on_exit 'kill `cat *.pid`'
AT_KEYWORDS([ovsdb server read-only])
AT_DATA([schema],
[[{"name": "mydb",
"tables": {
"Root": {
"columns": {
"managers": {
"type": {
"key": {"type": "uuid", "refTable": "Manager"},
"min": 0,
"max": "unlimited"}}}},
"Manager": {
"columns": {
"target": {
"type": "string"},
"read_only": {
"type": {
"key": "boolean",
"min": 0,
"max": 1}},
"is_connected": {
"type": {
"key": "boolean",
"min": 0,
"max": 1}}}},
"ordinals": {
"columns": {
"number": {"type": "integer"},
"name": {"type": "string"}},
"indexes": [["number"]]}
},
"version": "5.1.3",
"cksum": "12345678 9"
}
]])
AT_CHECK([ovsdb-tool create db schema], [0], [ignore], [ignore])
AT_CHECK(
[[ovsdb-tool transact db \
'["mydb",
{"op": "insert",
"table": "Root",
"row": {
"managers": ["set", [["named-uuid", "x"]]]}},
{"op": "insert",
"table": "Manager",
"uuid-name": "x",
"row": {"target": "ptcp:0:127.0.0.1",
"read_only": true}}]']], [0], [ignore], [ignore])
AT_CHECK([ovsdb-server --detach --no-chdir --log-file --pidfile --remote=db:mydb,Root,managers db], [0], [ignore], [ignore])
PARSE_LISTENING_PORT([ovsdb-server.log], [TCP_PORT])
AT_CHECK([ovsdb-client get-schema-version tcp:127.0.0.1:$TCP_PORT mydb], [0], [5.1.3
])
AT_CHECK([ovsdb-client transact tcp:127.0.0.1:$TCP_PORT \
['["mydb",
{"op": "insert",
"table": "ordinals",
"row": {"name": "two", "number": '2'}}
]']], [0], [stdout], [ignore])
cat stdout >> output
AT_CHECK([uuidfilt output], [0], [[[{"details":"insert operation not allowed when database server is in read only mode","error":"not allowed"}]]
], [ignore])
OVSDB_SERVER_SHUTDOWN(["
/No status column present in the Manager table/d
"])
AT_CLEANUP
AT_SETUP([ovsdb-server replication with schema mismatch])
AT_KEYWORDS([ovsdb server replication])
replication_schema > subset_schema
replication_schema_v2 > superset_schema
AT_CHECK([ovsdb-tool create db1 subset_schema], [0], [stdout], [ignore])
AT_CHECK([ovsdb-tool create db2 superset_schema], [0], [stdout], [ignore])
dnl Add some data to both DBs
AT_CHECK([ovsdb-tool transact db1 \
'[["mydb",
{"op": "insert",
"table": "a",
"row": {"number": 9, "name": "nine"}}]]'], [0], [ignore], [ignore])
AT_CHECK([ovsdb-tool transact db2 \
'[["mydb",
{"op": "insert",
"table": "a",
"row": {"number": 10, "name": "ten"}}]]'], [0], [ignore], [ignore])
dnl Start both 'db1' and 'db2'.
on_exit 'kill `cat *.pid`'
AT_CHECK([ovsdb-server -vfile --detach --no-chdir \
--log-file=ovsdb-server1.log --pidfile \
--remote=punix:db.sock \
--unixctl="$(pwd)"/unixctl db1 --active ],
[0], [ignore], [ignore])
AT_CHECK([ovsdb-server -vfile --detach --no-chdir \
--log-file=ovsdb-server2.log --pidfile=2.pid \
--remote=punix:db2.sock --unixctl="$(pwd)"/unixctl2 db2],
[0], [ignore], [ignore])
OVS_WAIT_UNTIL([ovs-appctl -t "`pwd`"/unixctl ovsdb-server/sync-status |grep active])
OVS_WAIT_UNTIL([ovs-appctl -t "`pwd`"/unixctl2 ovsdb-server/sync-status |grep active])
AT_CHECK([ovsdb-client dump unix:db.sock a number name], 0, [dnl
a table
name number
---- ------
nine 9
])
AT_CHECK([ovsdb-client dump unix:db2.sock a number name], 0, [dnl
a table
name number
---- ------
ten 10
])
# Replicate db1 from db2. It should fail since db2 schema
# doesn't match with db1 and has additional tables/columns.
AT_CHECK([ovs-appctl -t "`pwd`"/unixctl ovsdb-server/set-active-ovsdb-server unix:db2.sock])
AT_CHECK([ovs-appctl -t "`pwd`"/unixctl ovsdb-server/connect-active-ovsdb-server])
OVS_WAIT_UNTIL(
[test 1 = `cat ovsdb-server1.log | grep "Schema version mismatch, checking if mydb can still be replicated or not" | wc -l]`
)
OVS_WAIT_UNTIL(
[test 1 = `cat ovsdb-server1.log | grep "mydb cannot be replicated" | wc -l]`
)
OVS_WAIT_UNTIL([ovs-appctl -t "`pwd`"/unixctl ovsdb-server/sync-status |grep active])
# Replicate db2 from db1. This should be successful.
AT_CHECK([ovs-appctl -t "`pwd`"/unixctl ovsdb-server/disconnect-active-ovsdb-server])
AT_CHECK([ovs-appctl -t "`pwd`"/unixctl2 ovsdb-server/set-active-ovsdb-server unix:db.sock])
AT_CHECK([ovs-appctl -t "`pwd`"/unixctl2 ovsdb-server/connect-active-ovsdb-server])
OVS_WAIT_UNTIL(
[test 1 = `cat ovsdb-server2.log | grep "Schema version mismatch, checking if mydb can still be replicated or not" | wc -l]`
)
OVS_WAIT_UNTIL(
[test 1 = `cat ovsdb-server2.log | grep "mydb can be replicated" | wc -l]`
)
OVS_WAIT_UNTIL([ovs-appctl -t "`pwd`"/unixctl2 ovsdb-server/sync-status |grep replicating])
AT_CHECK([ovsdb-client dump unix:db.sock a number name], 0, [dnl
a table
name number
---- ------
nine 9
])
AT_CHECK([ovsdb-client dump unix:db2.sock a number name], 0, [dnl
a table
name number
---- ------
nine 9
])
AT_CHECK([ovsdb-client transact unix:db.sock \
'[["mydb",
{"op": "insert",
"table": "a",
"row": {"number": 6, "name": "six"}}]]'], [0], [ignore], [ignore])
OVS_WAIT_UNTIL([test 1 = `ovsdb-client dump unix:db2.sock a number name | grep six | wc -l`])
AT_CHECK([
ovsdb-client dump unix:db2.sock a number name], 0, [dnl
a table
name number
---- ------
nine 9
six 6
])
AT_CLEANUP
AT_BANNER([OVSDB -- ovsdb-server stream record/replay])
AT_SETUP([ovsdb-server record/replay])
AT_KEYWORDS([ovsdb server record replay])
on_exit 'kill `cat *.pid`'
ordinal_schema > schema
AT_CHECK([ovsdb-tool create db schema], [0], [ignore], [ignore])
dnl Create a directory for replay files.
AT_CHECK([mkdir replay_dir])
dnl Make a copy of a database for later replay.
AT_CHECK([cp db ./replay_dir/db.copy])
dnl Starting a dummy server only to reserve some tcp port.
AT_CHECK([cp db db.tmp])
AT_CHECK([ovsdb-server -vfile -vvlog:off --log-file=listener.log dnl
--detach --no-chdir dnl
--pidfile=2.pid --unixctl=unixctl2 dnl
--remote=ptcp:0:127.0.0.1 dnl
db.tmp], [0], [stdout], [stderr])
PARSE_LISTENING_PORT([listener.log], [BAD_TCP_PORT])
dnl Start ovsdb-server with recording enabled.
dnl Trying to start a tcp session on already used port to record the error.
AT_CHECK([ovsdb-server --record=./replay_dir dnl
-vfile -vvlog:off -vjsonrpc:file:dbg --log-file=1.log dnl
--detach --no-chdir --pidfile dnl
--remote=punix:db.sock dnl
--remote=ptcp:$BAD_TCP_PORT:127.0.0.1 dnl
--remote=ptcp:0:127.0.0.1 dnl
db], [0], [stdout], [stderr])
CHECK_DBS([ordinals
])
PARSE_LISTENING_PORT([1.log], [TCP_PORT])
dnl Start a monitor on the 'ordinals' db to check recording of this kind
dnl of messages.
AT_CHECK([ovsdb-client -vfile -vvlog:off --detach --no-chdir dnl
--pidfile=monitor.pid --log-file=monitor.log dnl
--db-change-aware --no-headings dnl
monitor tcp:127.0.0.1:$TCP_PORT dnl
ordinals ordinals number name dnl
> monitor.stdout 2> monitor.stderr])
OVS_WAIT_UNTIL([test -e monitor.pid])
dnl Do a bunch of random transactions.
AT_CHECK(
[[for pair in 'zero 0' 'one 1' 'two 2' 'three 3' 'four 4' 'five 5'; do
set -- $pair
if test "$2" -eq "5"; then
# killing the monitor to check if this correctly recorded.
kill -9 $(cat monitor.pid)
fi
ovsdb-client --db-change-aware transact unix:db.sock '
["ordinals",
{"op": "insert",
"table": "ordinals",
"row": {"name": "'$1'", "number": '$2'}},
{"op": "comment",
"comment": "add row for '"$pair"'"}]'
ovsdb-client transact unix:db.sock '
["ordinals",
{"op": "delete",
"table": "ordinals",
"where": [["number", "==", '$2']]},
{"op": "comment",
"comment": "delete row for '"$2"'"}]'
ovsdb-client transact unix:db.sock '
["ordinals",
{"op": "insert",
"table": "ordinals",
"row": {"name": "'$1'", "number": '$2'}},
{"op": "comment",
"comment": "add back row for '"$pair"'"}]'
done]],
[0], [stdout])
AT_CHECK([ovsdb-client dump unix:db.sock ordinals | uuidfilt], 0, [dnl
ordinals table
_uuid name number
------------------------------------ ----- ------
<0> five 5
<1> four 4
<2> one 1
<3> three 3
<4> two 2
<5> zero 0
])
AT_CHECK([uuidfilt monitor.stdout | sed '/^$/d'], [0], [dnl
<0> insert 0 zero
<0> delete 0 zero
<1> insert 0 zero
<2> insert 1 one
<2> delete 1 one
<3> insert 1 one
<4> insert 2 two
<4> delete 2 two
<5> insert 2 two
<6> insert 3 three
<6> delete 3 three
<7> insert 3 three
<8> insert 4 four
<8> delete 4 four
<9> insert 4 four
])
OVSDB_SERVER_SHUTDOWN(["/Address already in use/d"])
OVSDB_SERVER_SHUTDOWN2(["/Address already in use/d"])
dnl Starting a replay.
AT_CHECK([ovsdb-server --replay=./replay_dir dnl
-vfile -vvlog:off -vjsonrpc:file:dbg --log-file=2.log dnl
--detach --no-chdir --pidfile dnl
--remote=punix:db.sock dnl
--remote=ptcp:$BAD_TCP_PORT:127.0.0.1 dnl
--remote=ptcp:0:127.0.0.1 dnl
./replay_dir/db.copy], [0], [stdout], [stderr])
dnl Waiting for process termination. Process should exit after correct
dnl processing of the 'exit' unixctl command from the recorded session.
OVS_WAIT_WHILE([test -e ovsdb-server.pid])
dnl Stripping out timestamps from database files. Also clearing record
dnl hashes in database files, since dates inside are different.
m4_define([CLEAN_DB_FILE],
[sed 's/\(OVSDB JSON [[0-9]]*\).*$/\1/g' $1 | dnl
sed 's/"_date":[[0-9]]*/"_date":<clared>/g' > $2])
CLEAN_DB_FILE([db], [db.clear])
CLEAN_DB_FILE([./replay_dir/db.copy], [./replay_dir/db.copy.clear])
dnl Stripping out timestamps, PIDs and poll_loop warnings from the log.
dnl Also stripping socket_util errors as sockets are not used in replay.
m4_define([CLEAN_LOG_FILE],
[sed 's/[[0-9\-]]*T[[0-9:\.]]*Z|[[0-9]]*\(|.*$\)/\1/g' $1 | dnl
sed '/|poll_loop|/d' | dnl
sed '/|socket_util|/d' | dnl
sed '/|cooperative_multitasking|DBG|/d' | dnl
sed 's/[[0-9]]*\.ctl/<cleared>\.ctl/g'> $2])
CLEAN_LOG_FILE([1.log], [1.log.clear])
CLEAN_LOG_FILE([2.log], [2.log.clear])
dnl Checking that databases and logs are equal.
AT_CHECK([diff db.clear ./replay_dir/db.copy.clear])
AT_CHECK([diff -u 1.log.clear 2.log.clear])
AT_CLEANUP
AT_BANNER([OVSDB -- ovsdb-server configuration file])
dnl TEST_CONFIG_FILE([NAME], [config], [EXIT_CODE], [FAILURE_STRINGS])
dnl
dnl Tries the config as a data for --config-file, checks the EXIT_CODE
dnl of the ovsdb-server and checks the stderr for FAILURE_STRINGS.
dnl NAME is added to the test name and keywords.
m4_define([TEST_CONFIG_FILE],
[
AT_SETUP([ovsdb-server config-file - $1])
AT_KEYWORDS([ovsdb server config-file $1])
on_exit 'kill $(cat *.pid)'
echo '$2' > config.json
AT_CAPTURE_FILE([config.json])
ordinal_schema > schema
constraint_schema > schema2
AT_CHECK([ovsdb-tool create db schema], [0], [ignore], [ignore])
AT_CHECK([ovsdb-tool create db2 schema], [0], [ignore], [ignore])
AT_CHECK([ovsdb-tool create-cluster db_cluster schema2 unix:s1.raft],
[0], [ignore], [ignore])
AT_CHECK([ovsdb-server -vfile -vPATTERN:console:'%p|%m' -vvlog:off \
--log-file --detach --no-chdir --pidfile \
--config-file=config.json], [$3], [ignore], [stderr])
m4_if([$4], [], [], [
AT_CHECK([cat stderr | grep -v -E 'INFO|DBG' \
| grep -v 'failed to load configuration from' \
| sed -e "/duplicate database name/ s/'db'/'db2'/" \
> warnings])
AT_CHECK([cat warnings], [0], [m4_if([$3], [0], [$4], [$4
ovsdb-server: server configuration failed
])])])
m4_if([$3$4], [0], [OVSDB_SERVER_SHUTDOWN])
AT_CLEANUP
])
TEST_CONFIG_FILE([simple], [
{
"remotes": { "punix:db.sock": {} },
"databases": { "db": null, "db_cluster": {} }
}
], [0])
TEST_CONFIG_FILE([standalone], [
{
"remotes": { "punix:db.sock": {} },
"databases": { "db": { "service-model": "standalone" } }
}
], [0])
TEST_CONFIG_FILE([clustered], [
{
"remotes": { "punix:db.sock": {} },
"databases": { "db_cluster": { "service-model": "clustered" } }
}
], [0])
TEST_CONFIG_FILE([unknown service model], [
{
"remotes": { "punix:db.sock": {} },
"databases": { "db": { "service-model": "not-a-service-model" } }
}
], [1], [dnl
WARN|Unrecognized database service model: 'not-a-service-model'
WARN|syntax "{"service-model":"not-a-service-model"}": syntax error:dnl
Parsing database db failed: 'not-a-service-model' is not a valid service model
WARN|config: failed to parse 'databases'])
TEST_CONFIG_FILE([same schema], [
{
"remotes": { "punix:db.sock": {} },
"databases": { "db": null, "db2": {} }
}
], [1], [dnl
WARN|failed to open database 'db2': ovsdb error: ordinals: duplicate database name
WARN|failed to configure databases])
TEST_CONFIG_FILE([model mismatch], [
{
"remotes": { "punix:db.sock": {} },
"databases": { "db": { "service-model": "clustered" } }
}
], [1], [dnl
WARN|failed to open database 'db': ovsdb error: db: database is standalone and not clustered
WARN|failed to configure databases])
TEST_CONFIG_FILE([model mismatch clustered], [
{
"remotes": { "punix:db.sock": {} },
"databases": { "db_cluster": { "service-model": "standalone" } }
}
], [1], [dnl
WARN|failed to open database 'db_cluster': ovsdb error: db_cluster: database is clustered and not standalone
WARN|failed to configure databases])
TEST_CONFIG_FILE([relay], [
{
"remotes": { "punix:db.sock": {} },
"databases": {
"RelaySchema": {
"service-model": "relay",
"source": { "unix:db2.sock": {} }
}
}
}
], [0])
TEST_CONFIG_FILE([relay without source], [
{
"remotes": { "punix:db.sock": {} },
"databases": {
"RelaySchema": {
"service-model": "relay"
}
}
}
], [1], [dnl
WARN|syntax "{"service-model":"relay"}": syntax error: Parsing database RelaySchema failed:dnl
Required 'source' member is missing.
WARN|config: failed to parse 'databases'])
TEST_CONFIG_FILE([relay with options], [
{
"remotes": { "punix:db.sock": {} },
"databases": {
"RelaySchema": {
"service-model": "relay",
"source": {
"punix:db2.sock": {
"inactivity-probe": 10000,
"max-backoff": 8000,
"dscp": 42
}
}
}
}
}
], [0])
TEST_CONFIG_FILE([relay with unrelated options], [
{
"remotes": { "punix:db.sock": {} },
"databases": {
"RelaySchema": {
"service-model": "relay",
"source": {
"punix:db2.sock": {
"inactivity-probe": 10000,
"max-backoff": 8000,
"dscp": 42,
"role": "My-RBAC-role"
}
}
}
}
}
], [0], [dnl
WARN|syntax "{"dscp":42,"inactivity-probe":10000,"max-backoff":8000,"role":"My-RBAC-role"}":dnl
syntax error: Parsing JSON-RPC options failed: Member 'role' is present but not allowed here.
])
TEST_CONFIG_FILE([unknown config], [
{
"remotes": { "punix:db.sock": {} },
"databases": {
"db": { "unknnown": "unknown" }
}
}
], [1], [dnl
WARN|syntax "{"unknnown":"unknown"}": syntax error: Parsing database db failed:dnl
Member 'unknnown' is present but not allowed here.
WARN|config: failed to parse 'databases'])
TEST_CONFIG_FILE([active-backup active], [
{
"remotes": { "punix:db.sock": {} },
"databases": {
"db": {
"service-model": "active-backup",
"backup": false
}
}
}
], [0])
TEST_CONFIG_FILE([active-backup backup], [
{
"remotes": { "punix:db.sock": {} },
"databases": {
"db": {
"service-model": "active-backup",
"backup": true,
"source": {
"punix:db2.sock": {
"inactivity-probe": 100000,
"max-backoff": 16000,
"dscp": 42
}
}
}
}
}
], [0])
TEST_CONFIG_FILE([active-backup backup without source], [
{
"remotes": { "punix:db.sock": {} },
"databases": {
"db": {
"service-model": "active-backup",
"backup": true
}
}
}
], [1], [dnl
WARN|syntax "{"backup":true,"service-model":"active-backup"}": syntax error:dnl
Parsing database db failed: Required 'source' member is missing.
WARN|config: failed to parse 'databases'])
TEST_CONFIG_FILE([syntax error], [
{
"remotes": { "punix:db.sock": {}, },
"databases": { "db": {}, "db_cluster": {} }
}
], [1], [dnl
WARN|config: reading JSON failed (line 2, column 38, byte 41: syntax error parsing object expecting string)])
TEST_CONFIG_FILE([complex config], [
{
"remotes": {
"punix:db.sock": {
"inactivity-probe": 0,
"read-only": false
},
"pssl:0:127.0.0.1": {
"inactivity-probe": 5000,
"max-backoff": 8000,
"read-only": true,
"role": "ovn-controller",
"dscp": 48
},
"db:ordinals,ordinals,name": null
},
"databases": {
"db_cluster": {
"service-model": "clustered"
},
"OVN_Northbound": {
"service-model": "relay",
"source": {
"unix:nb.sock": {
"max-backoff": 3000,
"inactivity-probe": 16000
}
}
},
"db": {
"service-model": "active-backup",
"backup": true,
"source": {
"unix:active.sock": {
"max-backoff": 16000,
"inactivity-probe": 180000
}
},
"exclude-tables": [["IC_SB_Global", "Availability_Zone"]]
}
}
}
], [0])