2016-12-19 20:55:35 -08:00
|
|
|
# Copyright (c) 2009, 2010, 2011, 2012, 2016 Nicira, Inc.
|
2010-08-25 10:26:40 -07:00
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at:
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
|
|
|
|
import getopt
|
|
|
|
import os
|
2017-02-24 10:03:26 +08:00
|
|
|
import re
|
2010-08-25 10:26:40 -07:00
|
|
|
import sys
|
2011-09-21 10:43:03 -07:00
|
|
|
import uuid
|
2010-08-25 10:26:40 -07:00
|
|
|
|
|
|
|
import ovs.db.idl
|
|
|
|
import ovs.db.schema
|
2015-12-12 12:54:31 -05:00
|
|
|
import ovs.db.types
|
2010-08-25 10:26:40 -07:00
|
|
|
import ovs.ovsuuid
|
|
|
|
import ovs.poller
|
2016-10-05 17:50:24 +05:30
|
|
|
import ovs.stream
|
2010-08-25 10:26:40 -07:00
|
|
|
import ovs.util
|
2018-12-25 20:54:15 +03:00
|
|
|
import ovs.vlog
|
2017-02-24 10:03:26 +08:00
|
|
|
from ovs.db import data
|
|
|
|
from ovs.db import error
|
2020-03-20 15:22:38 +00:00
|
|
|
from ovs.db.idl import _row_to_uuid as row_to_uuid
|
2016-08-02 17:45:39 +00:00
|
|
|
from ovs.fatal_signal import signal_alarm
|
2017-02-24 10:03:26 +08:00
|
|
|
|
2018-12-25 20:54:15 +03:00
|
|
|
vlog = ovs.vlog.Vlog("test-ovsdb")
|
|
|
|
vlog.set_levels_from_string("console:dbg")
|
|
|
|
vlog.init(None)
|
|
|
|
|
2011-09-23 23:43:12 -07:00
|
|
|
|
2024-06-28 14:18:41 -05:00
|
|
|
def substitute_object_text(data, quotechar='"', obj_chars=("{}", "[]"),
|
|
|
|
tag_format="_OBJECT_{}_"):
|
|
|
|
"""Replace objects in strings with tags that can later be retrieved
|
|
|
|
|
|
|
|
Given data like:
|
|
|
|
'cmd1 1, cmd2 {"a": {"a": "b"}}, cmd3 1 2, cmd4 ["a", "b"]'
|
|
|
|
|
|
|
|
Return an output string:
|
|
|
|
'cmd1 1, cmd2 _OBJECT_0_, cmd3 1 2, cmd4 _OBJECT_1_'
|
|
|
|
|
|
|
|
and a dictionary of replaced text:
|
|
|
|
{'_OBJECT_0_': '{"a": {"a": "b"}}', '_OBJECT_1_': '["a", "b"]'}
|
|
|
|
"""
|
|
|
|
|
|
|
|
obj_chars = dict(obj_chars)
|
|
|
|
in_quote = False
|
|
|
|
in_object = [] # Stack of nested outer object opening characters.
|
|
|
|
replaced_text = {}
|
|
|
|
output = ""
|
|
|
|
start = end = 0
|
|
|
|
for i, c in enumerate(data):
|
|
|
|
if not in_object:
|
|
|
|
if not in_quote and c in obj_chars:
|
|
|
|
# This is the start of a non-quoted outer object that will
|
|
|
|
# be replaced by a tag.
|
|
|
|
in_object.append(c)
|
|
|
|
start = i
|
|
|
|
else:
|
|
|
|
# Regular output.
|
|
|
|
output += c
|
|
|
|
if c == quotechar:
|
|
|
|
in_quote = not in_quote
|
|
|
|
elif not in_quote: # Unquoted object.
|
|
|
|
if c == in_object[0]:
|
|
|
|
# Record on the stack that we are in a nested object of the
|
|
|
|
# same type as the outer object, this object will not be
|
|
|
|
# substituted with a tag.
|
|
|
|
in_object.append(c)
|
|
|
|
elif c == obj_chars[in_object[0]]:
|
|
|
|
# This is the closing character to this potentially nested
|
|
|
|
# object's opening character, so pop it off the stack.
|
|
|
|
in_object.pop()
|
|
|
|
if not in_object:
|
|
|
|
# This is the outer object's closing character, so record
|
|
|
|
# the substituted text and generate the tagged text.
|
|
|
|
end = i + 1
|
|
|
|
tag = tag_format.format(len(replaced_text))
|
|
|
|
replaced_text[tag] = data[start:end]
|
|
|
|
output += tag
|
|
|
|
return output, replaced_text
|
|
|
|
|
|
|
|
|
|
|
|
def recover_object_text_from_list(words, json):
|
|
|
|
if not json:
|
|
|
|
return words
|
|
|
|
# NOTE(twilson) This does not handle the case of having multiple replaced
|
|
|
|
# objects in the same word, e.g. two json adjacent json strings.
|
|
|
|
return [json.get(word, word) for word in words]
|
|
|
|
|
|
|
|
|
2010-08-25 10:26:40 -07:00
|
|
|
def unbox_json(json):
|
2023-10-31 17:12:34 +00:00
|
|
|
if type(json) is list and len(json) == 1:
|
2010-08-25 10:26:40 -07:00
|
|
|
return json[0]
|
|
|
|
else:
|
|
|
|
return json
|
|
|
|
|
2011-09-23 23:43:12 -07:00
|
|
|
|
2010-08-25 10:26:40 -07:00
|
|
|
def do_default_atoms():
|
2015-12-12 12:54:31 -05:00
|
|
|
for type_ in ovs.db.types.ATOMIC_TYPES:
|
|
|
|
if type_ == ovs.db.types.VoidType:
|
2010-08-25 10:26:40 -07:00
|
|
|
continue
|
|
|
|
|
2011-09-21 10:43:03 -07:00
|
|
|
sys.stdout.write("%s: " % type_.to_string())
|
2010-08-25 10:26:40 -07:00
|
|
|
|
2011-09-21 10:43:03 -07:00
|
|
|
atom = data.Atom.default(type_)
|
|
|
|
if atom != data.Atom.default(type_):
|
2010-08-25 10:26:40 -07:00
|
|
|
sys.stdout.write("wrong\n")
|
|
|
|
sys.exit(1)
|
|
|
|
|
|
|
|
sys.stdout.write("OK\n")
|
|
|
|
|
2011-09-23 23:43:12 -07:00
|
|
|
|
2010-08-25 10:26:40 -07:00
|
|
|
def do_default_data():
|
|
|
|
any_errors = False
|
|
|
|
for n_min in 0, 1:
|
2015-12-12 12:54:31 -05:00
|
|
|
for key in ovs.db.types.ATOMIC_TYPES:
|
|
|
|
if key == ovs.db.types.VoidType:
|
2010-08-25 10:26:40 -07:00
|
|
|
continue
|
2015-12-12 12:54:31 -05:00
|
|
|
for value in ovs.db.types.ATOMIC_TYPES:
|
|
|
|
if value == ovs.db.types.VoidType:
|
2010-08-25 10:26:40 -07:00
|
|
|
valueBase = None
|
|
|
|
else:
|
2015-12-12 12:54:31 -05:00
|
|
|
valueBase = ovs.db.types.BaseType(value)
|
|
|
|
type_ = ovs.db.types.Type(ovs.db.types.BaseType(key),
|
|
|
|
valueBase, n_min, 1)
|
2011-09-21 10:43:03 -07:00
|
|
|
assert type_.is_valid()
|
2010-08-25 10:26:40 -07:00
|
|
|
|
|
|
|
sys.stdout.write("key %s, value %s, n_min %d: "
|
|
|
|
% (key.to_string(), value.to_string(), n_min))
|
|
|
|
|
2011-09-21 10:43:03 -07:00
|
|
|
datum = data.Datum.default(type_)
|
|
|
|
if datum != data.Datum.default(type_):
|
2010-08-25 10:26:40 -07:00
|
|
|
sys.stdout.write("wrong\n")
|
|
|
|
any_errors = True
|
|
|
|
else:
|
|
|
|
sys.stdout.write("OK\n")
|
|
|
|
if any_errors:
|
|
|
|
sys.exit(1)
|
|
|
|
|
2011-09-23 23:43:12 -07:00
|
|
|
|
2010-08-25 10:26:40 -07:00
|
|
|
def do_parse_atomic_type(type_string):
|
|
|
|
type_json = unbox_json(ovs.json.from_string(type_string))
|
2015-12-12 12:54:31 -05:00
|
|
|
atomic_type = ovs.db.types.AtomicType.from_json(type_json)
|
2015-12-14 10:21:53 -05:00
|
|
|
print(ovs.json.to_string(atomic_type.to_json(), sort_keys=True))
|
2010-08-25 10:26:40 -07:00
|
|
|
|
2011-09-23 23:43:12 -07:00
|
|
|
|
2010-08-25 10:26:40 -07:00
|
|
|
def do_parse_base_type(type_string):
|
|
|
|
type_json = unbox_json(ovs.json.from_string(type_string))
|
2015-12-12 12:54:31 -05:00
|
|
|
base_type = ovs.db.types.BaseType.from_json(type_json)
|
2015-12-14 10:21:53 -05:00
|
|
|
print(ovs.json.to_string(base_type.to_json(), sort_keys=True))
|
2010-08-25 10:26:40 -07:00
|
|
|
|
2011-09-23 23:43:12 -07:00
|
|
|
|
2010-08-25 10:26:40 -07:00
|
|
|
def do_parse_type(type_string):
|
|
|
|
type_json = unbox_json(ovs.json.from_string(type_string))
|
2015-12-12 12:54:31 -05:00
|
|
|
type_ = ovs.db.types.Type.from_json(type_json)
|
2015-12-14 10:21:53 -05:00
|
|
|
print(ovs.json.to_string(type_.to_json(), sort_keys=True))
|
2010-08-25 10:26:40 -07:00
|
|
|
|
2011-09-23 23:43:12 -07:00
|
|
|
|
2010-08-25 10:26:40 -07:00
|
|
|
def do_parse_atoms(type_string, *atom_strings):
|
|
|
|
type_json = unbox_json(ovs.json.from_string(type_string))
|
2015-12-12 12:54:31 -05:00
|
|
|
base = ovs.db.types.BaseType.from_json(type_json)
|
2010-08-25 10:26:40 -07:00
|
|
|
for atom_string in atom_strings:
|
|
|
|
atom_json = unbox_json(ovs.json.from_string(atom_string))
|
|
|
|
try:
|
|
|
|
atom = data.Atom.from_json(base, atom_json)
|
2015-12-14 10:21:53 -05:00
|
|
|
print(ovs.json.to_string(atom.to_json()))
|
2016-01-06 13:48:16 -05:00
|
|
|
except error.Error as e:
|
2015-12-17 12:22:31 -05:00
|
|
|
print(e.args[0])
|
2010-08-25 10:26:40 -07:00
|
|
|
|
2011-09-23 23:43:12 -07:00
|
|
|
|
2010-08-25 10:26:40 -07:00
|
|
|
def do_parse_data(type_string, *data_strings):
|
|
|
|
type_json = unbox_json(ovs.json.from_string(type_string))
|
2015-12-12 12:54:31 -05:00
|
|
|
type_ = ovs.db.types.Type.from_json(type_json)
|
2010-08-25 10:26:40 -07:00
|
|
|
for datum_string in data_strings:
|
|
|
|
datum_json = unbox_json(ovs.json.from_string(datum_string))
|
2011-09-21 10:43:03 -07:00
|
|
|
datum = data.Datum.from_json(type_, datum_json)
|
2015-12-14 10:21:53 -05:00
|
|
|
print(ovs.json.to_string(datum.to_json()))
|
2010-08-25 10:26:40 -07:00
|
|
|
|
2011-09-23 23:43:12 -07:00
|
|
|
|
2010-08-25 10:26:40 -07:00
|
|
|
def do_sort_atoms(type_string, atom_strings):
|
|
|
|
type_json = unbox_json(ovs.json.from_string(type_string))
|
2015-12-12 12:54:31 -05:00
|
|
|
base = ovs.db.types.BaseType.from_json(type_json)
|
2010-08-25 10:26:40 -07:00
|
|
|
atoms = [data.Atom.from_json(base, atom_json)
|
|
|
|
for atom_json in unbox_json(ovs.json.from_string(atom_strings))]
|
2015-12-14 10:21:53 -05:00
|
|
|
print(ovs.json.to_string([data.Atom.to_json(atom)
|
|
|
|
for atom in sorted(atoms)]))
|
2010-08-25 10:26:40 -07:00
|
|
|
|
2011-09-23 23:43:12 -07:00
|
|
|
|
2010-08-25 10:26:40 -07:00
|
|
|
def do_parse_column(name, column_string):
|
|
|
|
column_json = unbox_json(ovs.json.from_string(column_string))
|
|
|
|
column = ovs.db.schema.ColumnSchema.from_json(column_json, name)
|
2015-12-14 10:21:53 -05:00
|
|
|
print(ovs.json.to_string(column.to_json(), sort_keys=True))
|
2010-08-25 10:26:40 -07:00
|
|
|
|
2011-09-23 23:43:12 -07:00
|
|
|
|
2011-03-10 11:15:01 -08:00
|
|
|
def do_parse_table(name, table_string, default_is_root_string='false'):
|
|
|
|
default_is_root = default_is_root_string == 'true'
|
2010-08-25 10:26:40 -07:00
|
|
|
table_json = unbox_json(ovs.json.from_string(table_string))
|
|
|
|
table = ovs.db.schema.TableSchema.from_json(table_json, name)
|
2015-12-14 10:21:53 -05:00
|
|
|
print(ovs.json.to_string(table.to_json(default_is_root), sort_keys=True))
|
2010-08-25 10:26:40 -07:00
|
|
|
|
2011-09-23 23:43:12 -07:00
|
|
|
|
2010-08-25 10:26:40 -07:00
|
|
|
def do_parse_schema(schema_string):
|
|
|
|
schema_json = unbox_json(ovs.json.from_string(schema_string))
|
|
|
|
schema = ovs.db.schema.DbSchema.from_json(schema_json)
|
2015-12-14 10:21:53 -05:00
|
|
|
print(ovs.json.to_string(schema.to_json(), sort_keys=True))
|
2010-08-25 10:26:40 -07:00
|
|
|
|
2011-09-23 23:43:12 -07:00
|
|
|
|
2016-08-15 11:03:30 -07:00
|
|
|
def get_simple_printable_row_string(row, columns):
|
2025-06-18 15:03:35 -05:00
|
|
|
# NOTE(twilson):This turns out to be a particularly good place to test that
|
|
|
|
# Row object stringification doesn't crash on a large variety of row types.
|
|
|
|
assert str(row)
|
2016-07-26 23:28:14 +05:30
|
|
|
s = ""
|
2016-08-15 11:03:30 -07:00
|
|
|
for column in columns:
|
2016-07-26 23:28:14 +05:30
|
|
|
if hasattr(row, column) and not (type(getattr(row, column))
|
|
|
|
is ovs.db.data.Atom):
|
2016-08-15 11:03:30 -07:00
|
|
|
value = getattr(row, column)
|
|
|
|
if isinstance(value, dict):
|
2020-03-20 15:22:38 +00:00
|
|
|
value = sorted((row_to_uuid(k), row_to_uuid(v))
|
|
|
|
for k, v in value.items())
|
2021-03-09 14:34:16 +00:00
|
|
|
if isinstance(value, (list, tuple)):
|
|
|
|
value = sorted((row_to_uuid(v) for v in value))
|
ovsdb-idl: Preserve references for deleted rows.
Considering two DB rows, 'a' from table A and 'b' from table B (with
column 'ref_a' a reference to table A):
a = {A._uuid=<U1>}
b = {B._uuid=<U2>, B.ref_a=<U1>}
Assuming both records are present in the IDL client's in-memory view of
the database, depending whether row 'b' is also deleted in the same
transaction or not, deletion of row 'a' should generate the following
tracked changes:
1. only row 'a' is deleted:
- for table A:
- deleted records: a = {A._uuid=<U1>}
- for table B:
- updated records: b = {B._uuid=<U2>, B.ref_a=[]}
2. row 'a' and row 'b' are deleted in the same update:
- for table A:
- deleted records: a = {A._uuid=<U1>}
- for table B:
- deleted records: b = {B._uuid=<U2>, B.ref_a=<U1>}
To ensure this, we now delay reparsing row backrefs for deleted rows
until all updates in the current run have been processed.
Without this change, in scenario 2 above, the tracked changes for table
B would be:
- deleted records: b = {B._uuid=<U2>, B.ref_a=[]}
In particular, for strong references, row 'a' can never be deleted in
a transaction that happens strictly before row 'b' is deleted. In some
cases [0] both rows are deleted in the same transaction and having
B.ref_a=[] would violate the integrity of the database from client
perspective. This would force the client to always validate that
strong reference fields are non-NULL. This is not really an option
because the information in the original reference is required for
incrementally processing the record deletion.
[0] with ovn-monitor-all=true, the following command triggers a crash
in ovn-controller because a strong reference field becomes NULL:
$ ovn-nbctl --wait=hv -- lr-add r -- lrp-add r rp 00:00:00:00:00:01 1.0.0.1/24
$ ovn-nbctl lr-del r
Reported-at: https://bugzilla.redhat.com/1932642
Fixes: 72aeb243a52a ("ovsdb-idl: Tracking - preserve data for deleted rows.")
Signed-off-by: Dumitru Ceara <dceara@redhat.com>
Acked-by: Han Zhou <hzhou@ovn.org>
Signed-off-by: Ilya Maximets <i.maximets@ovn.org>
2021-03-24 10:33:22 +01:00
|
|
|
elif isinstance(value, list):
|
|
|
|
value = sorted(row_to_uuid(v) for v in value)
|
2016-08-15 11:03:30 -07:00
|
|
|
s += "%s=%s " % (column, value)
|
2016-07-26 23:28:14 +05:30
|
|
|
s = s.strip()
|
|
|
|
s = re.sub('""|,|u?\'', "", s)
|
2019-01-10 15:23:45 -08:00
|
|
|
s = re.sub(r'UUID\(([^)]+)\)', r'\1', s)
|
2016-07-26 23:28:14 +05:30
|
|
|
s = re.sub('False', 'false', s)
|
|
|
|
s = re.sub('True', 'true', s)
|
|
|
|
s = re.sub(r'(ba)=([^[][^ ]*) ', r'\1=[\2] ', s)
|
|
|
|
return s
|
|
|
|
|
|
|
|
|
2021-03-09 14:34:16 +00:00
|
|
|
def get_simple_table_printable_row(row, *additional_columns):
|
2016-08-15 11:03:30 -07:00
|
|
|
simple_columns = ["i", "r", "b", "s", "u", "ia",
|
2021-03-24 10:33:08 +01:00
|
|
|
"ra", "ba", "sa", "ua"]
|
2021-03-09 14:34:16 +00:00
|
|
|
simple_columns.extend(additional_columns)
|
2016-08-15 11:03:30 -07:00
|
|
|
return get_simple_printable_row_string(row, simple_columns)
|
|
|
|
|
|
|
|
|
2016-08-06 17:46:30 -05:00
|
|
|
def get_simple2_table_printable_row(row):
|
|
|
|
simple2_columns = ["name", "smap", "imap"]
|
2016-08-15 11:03:30 -07:00
|
|
|
return get_simple_printable_row_string(row, simple2_columns)
|
2016-08-06 17:46:30 -05:00
|
|
|
|
|
|
|
|
|
|
|
def get_simple3_table_printable_row(row):
|
ovsdb-idl: Preserve references for deleted rows.
Considering two DB rows, 'a' from table A and 'b' from table B (with
column 'ref_a' a reference to table A):
a = {A._uuid=<U1>}
b = {B._uuid=<U2>, B.ref_a=<U1>}
Assuming both records are present in the IDL client's in-memory view of
the database, depending whether row 'b' is also deleted in the same
transaction or not, deletion of row 'a' should generate the following
tracked changes:
1. only row 'a' is deleted:
- for table A:
- deleted records: a = {A._uuid=<U1>}
- for table B:
- updated records: b = {B._uuid=<U2>, B.ref_a=[]}
2. row 'a' and row 'b' are deleted in the same update:
- for table A:
- deleted records: a = {A._uuid=<U1>}
- for table B:
- deleted records: b = {B._uuid=<U2>, B.ref_a=<U1>}
To ensure this, we now delay reparsing row backrefs for deleted rows
until all updates in the current run have been processed.
Without this change, in scenario 2 above, the tracked changes for table
B would be:
- deleted records: b = {B._uuid=<U2>, B.ref_a=[]}
In particular, for strong references, row 'a' can never be deleted in
a transaction that happens strictly before row 'b' is deleted. In some
cases [0] both rows are deleted in the same transaction and having
B.ref_a=[] would violate the integrity of the database from client
perspective. This would force the client to always validate that
strong reference fields are non-NULL. This is not really an option
because the information in the original reference is required for
incrementally processing the record deletion.
[0] with ovn-monitor-all=true, the following command triggers a crash
in ovn-controller because a strong reference field becomes NULL:
$ ovn-nbctl --wait=hv -- lr-add r -- lrp-add r rp 00:00:00:00:00:01 1.0.0.1/24
$ ovn-nbctl lr-del r
Reported-at: https://bugzilla.redhat.com/1932642
Fixes: 72aeb243a52a ("ovsdb-idl: Tracking - preserve data for deleted rows.")
Signed-off-by: Dumitru Ceara <dceara@redhat.com>
Acked-by: Han Zhou <hzhou@ovn.org>
Signed-off-by: Ilya Maximets <i.maximets@ovn.org>
2021-03-24 10:33:22 +01:00
|
|
|
simple3_columns = ["name", "uset", "uref"]
|
2016-08-15 11:03:30 -07:00
|
|
|
return get_simple_printable_row_string(row, simple3_columns)
|
2016-08-06 17:46:30 -05:00
|
|
|
|
|
|
|
|
ovsdb-idl: Preserve references for deleted rows.
Considering two DB rows, 'a' from table A and 'b' from table B (with
column 'ref_a' a reference to table A):
a = {A._uuid=<U1>}
b = {B._uuid=<U2>, B.ref_a=<U1>}
Assuming both records are present in the IDL client's in-memory view of
the database, depending whether row 'b' is also deleted in the same
transaction or not, deletion of row 'a' should generate the following
tracked changes:
1. only row 'a' is deleted:
- for table A:
- deleted records: a = {A._uuid=<U1>}
- for table B:
- updated records: b = {B._uuid=<U2>, B.ref_a=[]}
2. row 'a' and row 'b' are deleted in the same update:
- for table A:
- deleted records: a = {A._uuid=<U1>}
- for table B:
- deleted records: b = {B._uuid=<U2>, B.ref_a=<U1>}
To ensure this, we now delay reparsing row backrefs for deleted rows
until all updates in the current run have been processed.
Without this change, in scenario 2 above, the tracked changes for table
B would be:
- deleted records: b = {B._uuid=<U2>, B.ref_a=[]}
In particular, for strong references, row 'a' can never be deleted in
a transaction that happens strictly before row 'b' is deleted. In some
cases [0] both rows are deleted in the same transaction and having
B.ref_a=[] would violate the integrity of the database from client
perspective. This would force the client to always validate that
strong reference fields are non-NULL. This is not really an option
because the information in the original reference is required for
incrementally processing the record deletion.
[0] with ovn-monitor-all=true, the following command triggers a crash
in ovn-controller because a strong reference field becomes NULL:
$ ovn-nbctl --wait=hv -- lr-add r -- lrp-add r rp 00:00:00:00:00:01 1.0.0.1/24
$ ovn-nbctl lr-del r
Reported-at: https://bugzilla.redhat.com/1932642
Fixes: 72aeb243a52a ("ovsdb-idl: Tracking - preserve data for deleted rows.")
Signed-off-by: Dumitru Ceara <dceara@redhat.com>
Acked-by: Han Zhou <hzhou@ovn.org>
Signed-off-by: Ilya Maximets <i.maximets@ovn.org>
2021-03-24 10:33:22 +01:00
|
|
|
def get_simple4_table_printable_row(row):
|
|
|
|
simple4_columns = ["name"]
|
|
|
|
return get_simple_printable_row_string(row, simple4_columns)
|
|
|
|
|
|
|
|
|
2021-03-24 10:33:08 +01:00
|
|
|
def get_simple5_table_printable_row(row):
|
|
|
|
simple5_columns = ["name", "irefmap"]
|
|
|
|
return get_simple_printable_row_string(row, simple5_columns)
|
|
|
|
|
|
|
|
|
ovsdb-idl: Preserve references for deleted rows.
Considering two DB rows, 'a' from table A and 'b' from table B (with
column 'ref_a' a reference to table A):
a = {A._uuid=<U1>}
b = {B._uuid=<U2>, B.ref_a=<U1>}
Assuming both records are present in the IDL client's in-memory view of
the database, depending whether row 'b' is also deleted in the same
transaction or not, deletion of row 'a' should generate the following
tracked changes:
1. only row 'a' is deleted:
- for table A:
- deleted records: a = {A._uuid=<U1>}
- for table B:
- updated records: b = {B._uuid=<U2>, B.ref_a=[]}
2. row 'a' and row 'b' are deleted in the same update:
- for table A:
- deleted records: a = {A._uuid=<U1>}
- for table B:
- deleted records: b = {B._uuid=<U2>, B.ref_a=<U1>}
To ensure this, we now delay reparsing row backrefs for deleted rows
until all updates in the current run have been processed.
Without this change, in scenario 2 above, the tracked changes for table
B would be:
- deleted records: b = {B._uuid=<U2>, B.ref_a=[]}
In particular, for strong references, row 'a' can never be deleted in
a transaction that happens strictly before row 'b' is deleted. In some
cases [0] both rows are deleted in the same transaction and having
B.ref_a=[] would violate the integrity of the database from client
perspective. This would force the client to always validate that
strong reference fields are non-NULL. This is not really an option
because the information in the original reference is required for
incrementally processing the record deletion.
[0] with ovn-monitor-all=true, the following command triggers a crash
in ovn-controller because a strong reference field becomes NULL:
$ ovn-nbctl --wait=hv -- lr-add r -- lrp-add r rp 00:00:00:00:00:01 1.0.0.1/24
$ ovn-nbctl lr-del r
Reported-at: https://bugzilla.redhat.com/1932642
Fixes: 72aeb243a52a ("ovsdb-idl: Tracking - preserve data for deleted rows.")
Signed-off-by: Dumitru Ceara <dceara@redhat.com>
Acked-by: Han Zhou <hzhou@ovn.org>
Signed-off-by: Ilya Maximets <i.maximets@ovn.org>
2021-03-24 10:33:22 +01:00
|
|
|
def get_simple6_table_printable_row(row):
|
|
|
|
simple6_columns = ["name", "weak_ref"]
|
|
|
|
return get_simple_printable_row_string(row, simple6_columns)
|
|
|
|
|
|
|
|
|
2021-03-24 10:33:08 +01:00
|
|
|
def get_link1_table_printable_row(row):
|
|
|
|
s = ["i=%s k=" % row.i]
|
|
|
|
if hasattr(row, "k") and row.k:
|
|
|
|
s.append(str(row.k.i))
|
|
|
|
if hasattr(row, "ka"):
|
|
|
|
s.append(" ka=[")
|
|
|
|
s.append(' '.join(sorted(str(ka.i) for ka in row.ka)))
|
|
|
|
s.append("] l2=")
|
|
|
|
if hasattr(row, "l2") and row.l2:
|
|
|
|
s.append(str(row.l2[0].i))
|
|
|
|
return ''.join(s)
|
|
|
|
|
|
|
|
|
|
|
|
def get_link2_table_printable_row(row):
|
|
|
|
s = "i=%s l1=" % row.i
|
|
|
|
if hasattr(row, "l1") and row.l1:
|
|
|
|
s += str(row.l1[0].i)
|
|
|
|
return s
|
|
|
|
|
|
|
|
|
python: idl: Fix index not being updated on row modification.
When a row is modified, python IDL doesn't perform any operations on
existing client-side indexes. This means that if the column on which
index is created changes, the old value will remain in the index and
the new one will not be added to the index. Beside lookup failures
this is also causing inability to remove modified rows, because the
new column value doesn't exist in the index causing an exception on
attempt to remove it:
Traceback (most recent call last):
File "ovsdbapp/backend/ovs_idl/connection.py", line 110, in run
self.idl.run()
File "ovs/db/idl.py", line 465, in run
self.__parse_update(msg.params[2], OVSDB_UPDATE3)
File "ovs/db/idl.py", line 924, in __parse_update
self.__do_parse_update(update, version, self.tables)
File "ovs/db/idl.py", line 964, in __do_parse_update
changes = self.__process_update2(table, uuid, row_update)
File "ovs/db/idl.py", line 991, in __process_update2
del table.rows[uuid]
File "ovs/db/custom_index.py", line 102, in __delitem__
index.remove(val)
File "ovs/db/custom_index.py", line 66, in remove
self.values.remove(self.index_entry_from_row(row))
File "sortedcontainers/sortedlist.py", line 2015, in remove
raise ValueError('{0!r} not in list'.format(value))
ValueError: Datapath_Binding(
uuid=UUID('498e66a2-70bc-4587-a66f-0433baf82f60'),
tunnel_key=16711683, load_balancers=[], external_ids={}) not in list
Fix that by always removing an existing row from indexes before
modification and adding back afterwards. This ensures that old
values are removed from the index and new ones are added.
This behavior is consistent with the C implementation.
The new test that reproduces the removal issue is added. Some extra
testing infrastructure added to be able to handle and print out the
'indexed' table from the idltest schema.
Fixes: 13973bc41524 ("Add multi-column index support for the Python IDL")
Reported-at: https://mail.openvswitch.org/pipermail/ovs-discuss/2024-May/053159.html
Reported-by: Roberto Bartzen Acosta <roberto.acosta@luizalabs.com>
Acked-by: Mike Pattrick <mkp@redhat.com>
Acked-by: Dumitru Ceara <dceara@redhat.com>
Acked-by: Terry Wilson <twilson@redhat.com>
Signed-off-by: Ilya Maximets <i.maximets@ovn.org>
2024-05-27 23:39:06 +02:00
|
|
|
def get_indexed_table_printable_row(row):
|
|
|
|
return "i=%s" % row.i
|
|
|
|
|
|
|
|
|
2021-03-24 10:33:08 +01:00
|
|
|
def get_singleton_table_printable_row(row):
|
|
|
|
return "name=%s" % row.name
|
|
|
|
|
|
|
|
|
2022-01-11 17:37:54 +01:00
|
|
|
def print_row(table, row, step, contents, terse):
|
|
|
|
if terse:
|
|
|
|
s = "%03d: table %s" % (step, table)
|
|
|
|
else:
|
|
|
|
s = "%03d: table %s: %s " % (step, table, contents)
|
|
|
|
s += get_simple_printable_row_string(row, ["uuid"])
|
2021-03-24 10:33:08 +01:00
|
|
|
print(s)
|
|
|
|
|
|
|
|
|
2022-01-11 17:37:54 +01:00
|
|
|
def print_idl(idl, step, terse=False):
|
2010-08-25 10:26:40 -07:00
|
|
|
n = 0
|
ovsdb-idl: Support for readonly columns that are fetched on-demand
There is currently no mechanism in IDL to fetch specific column values
on-demand without having to register them for monitoring. In the case
where the column represent a frequently changing entity (e.g. counter),
and the reads are relatively infrequent (e.g. CLI client), there is a
significant overhead in replication.
This patch adds support in the Python IDL to register a subset of the
columns of a table as "readonly". Readonly columns are not replicated.
Users may "fetch" the readonly columns of a row on-demand. Once fetched,
the columns are not updated until the next fetch by the user. Writes by
the user to readonly columns does not change the value (both locally or
on the server).
The two main user visible changes in this patch are:
- The SchemaHelper.register_columns() method now takes an optionaly
argument to specify the subset of readonly column(s)
- A new Row.fetch(columns) method to fetch values of readonly columns(s)
Usage:
------
# Schema file includes all columns, including readonly
schema_helper = ovs.db.idl.SchemaHelper(schema_file)
# Register interest in columns with 'r' and 's' as readonly
schema_helper.register_columns("simple", [i, r, s], [r, s])
# Create Idl and jsonrpc, and wait for update, as usual
...
# Fetch value of column 'r' for a specific row
row.fetch('r')
txn.commit_block()
print row.r
print getattr(row, 'r')
# Writing to readonly column has no effect (locally or on server)
row.r = 3
print row.r # prints fetched value not 3
Signed-off-by: Shad Ansari <shad.ansari@hp.com>
Signed-off-by: Ben Pfaff <blp@ovn.org>
2015-10-22 14:35:24 -07:00
|
|
|
if "simple" in idl.tables:
|
|
|
|
simple = idl.tables["simple"].rows
|
2019-12-20 18:35:08 +01:00
|
|
|
for row in simple.values():
|
2021-03-24 10:33:08 +01:00
|
|
|
print_row("simple", row, step,
|
2022-01-11 17:37:54 +01:00
|
|
|
get_simple_table_printable_row(row),
|
|
|
|
terse)
|
ovsdb-idl: Support for readonly columns that are fetched on-demand
There is currently no mechanism in IDL to fetch specific column values
on-demand without having to register them for monitoring. In the case
where the column represent a frequently changing entity (e.g. counter),
and the reads are relatively infrequent (e.g. CLI client), there is a
significant overhead in replication.
This patch adds support in the Python IDL to register a subset of the
columns of a table as "readonly". Readonly columns are not replicated.
Users may "fetch" the readonly columns of a row on-demand. Once fetched,
the columns are not updated until the next fetch by the user. Writes by
the user to readonly columns does not change the value (both locally or
on the server).
The two main user visible changes in this patch are:
- The SchemaHelper.register_columns() method now takes an optionaly
argument to specify the subset of readonly column(s)
- A new Row.fetch(columns) method to fetch values of readonly columns(s)
Usage:
------
# Schema file includes all columns, including readonly
schema_helper = ovs.db.idl.SchemaHelper(schema_file)
# Register interest in columns with 'r' and 's' as readonly
schema_helper.register_columns("simple", [i, r, s], [r, s])
# Create Idl and jsonrpc, and wait for update, as usual
...
# Fetch value of column 'r' for a specific row
row.fetch('r')
txn.commit_block()
print row.r
print getattr(row, 'r')
# Writing to readonly column has no effect (locally or on server)
row.r = 3
print row.r # prints fetched value not 3
Signed-off-by: Shad Ansari <shad.ansari@hp.com>
Signed-off-by: Ben Pfaff <blp@ovn.org>
2015-10-22 14:35:24 -07:00
|
|
|
n += 1
|
|
|
|
|
2016-08-06 17:46:30 -05:00
|
|
|
if "simple2" in idl.tables:
|
|
|
|
simple2 = idl.tables["simple2"].rows
|
2019-12-20 18:35:08 +01:00
|
|
|
for row in simple2.values():
|
2021-03-24 10:33:08 +01:00
|
|
|
print_row("simple2", row, step,
|
2022-01-11 17:37:54 +01:00
|
|
|
get_simple2_table_printable_row(row),
|
|
|
|
terse)
|
2016-08-06 17:46:30 -05:00
|
|
|
n += 1
|
|
|
|
|
|
|
|
if "simple3" in idl.tables:
|
|
|
|
simple3 = idl.tables["simple3"].rows
|
2019-12-20 18:35:08 +01:00
|
|
|
for row in simple3.values():
|
2021-03-24 10:33:08 +01:00
|
|
|
print_row("simple3", row, step,
|
2022-01-11 17:37:54 +01:00
|
|
|
get_simple3_table_printable_row(row),
|
|
|
|
terse)
|
2016-08-06 17:46:30 -05:00
|
|
|
n += 1
|
|
|
|
|
ovsdb-idl: Preserve references for deleted rows.
Considering two DB rows, 'a' from table A and 'b' from table B (with
column 'ref_a' a reference to table A):
a = {A._uuid=<U1>}
b = {B._uuid=<U2>, B.ref_a=<U1>}
Assuming both records are present in the IDL client's in-memory view of
the database, depending whether row 'b' is also deleted in the same
transaction or not, deletion of row 'a' should generate the following
tracked changes:
1. only row 'a' is deleted:
- for table A:
- deleted records: a = {A._uuid=<U1>}
- for table B:
- updated records: b = {B._uuid=<U2>, B.ref_a=[]}
2. row 'a' and row 'b' are deleted in the same update:
- for table A:
- deleted records: a = {A._uuid=<U1>}
- for table B:
- deleted records: b = {B._uuid=<U2>, B.ref_a=<U1>}
To ensure this, we now delay reparsing row backrefs for deleted rows
until all updates in the current run have been processed.
Without this change, in scenario 2 above, the tracked changes for table
B would be:
- deleted records: b = {B._uuid=<U2>, B.ref_a=[]}
In particular, for strong references, row 'a' can never be deleted in
a transaction that happens strictly before row 'b' is deleted. In some
cases [0] both rows are deleted in the same transaction and having
B.ref_a=[] would violate the integrity of the database from client
perspective. This would force the client to always validate that
strong reference fields are non-NULL. This is not really an option
because the information in the original reference is required for
incrementally processing the record deletion.
[0] with ovn-monitor-all=true, the following command triggers a crash
in ovn-controller because a strong reference field becomes NULL:
$ ovn-nbctl --wait=hv -- lr-add r -- lrp-add r rp 00:00:00:00:00:01 1.0.0.1/24
$ ovn-nbctl lr-del r
Reported-at: https://bugzilla.redhat.com/1932642
Fixes: 72aeb243a52a ("ovsdb-idl: Tracking - preserve data for deleted rows.")
Signed-off-by: Dumitru Ceara <dceara@redhat.com>
Acked-by: Han Zhou <hzhou@ovn.org>
Signed-off-by: Ilya Maximets <i.maximets@ovn.org>
2021-03-24 10:33:22 +01:00
|
|
|
if "simple4" in idl.tables:
|
|
|
|
simple4 = idl.tables["simple4"].rows
|
|
|
|
for row in simple4.values():
|
|
|
|
print_row("simple4", row, step,
|
2022-01-11 17:37:54 +01:00
|
|
|
get_simple4_table_printable_row(row),
|
|
|
|
terse)
|
ovsdb-idl: Preserve references for deleted rows.
Considering two DB rows, 'a' from table A and 'b' from table B (with
column 'ref_a' a reference to table A):
a = {A._uuid=<U1>}
b = {B._uuid=<U2>, B.ref_a=<U1>}
Assuming both records are present in the IDL client's in-memory view of
the database, depending whether row 'b' is also deleted in the same
transaction or not, deletion of row 'a' should generate the following
tracked changes:
1. only row 'a' is deleted:
- for table A:
- deleted records: a = {A._uuid=<U1>}
- for table B:
- updated records: b = {B._uuid=<U2>, B.ref_a=[]}
2. row 'a' and row 'b' are deleted in the same update:
- for table A:
- deleted records: a = {A._uuid=<U1>}
- for table B:
- deleted records: b = {B._uuid=<U2>, B.ref_a=<U1>}
To ensure this, we now delay reparsing row backrefs for deleted rows
until all updates in the current run have been processed.
Without this change, in scenario 2 above, the tracked changes for table
B would be:
- deleted records: b = {B._uuid=<U2>, B.ref_a=[]}
In particular, for strong references, row 'a' can never be deleted in
a transaction that happens strictly before row 'b' is deleted. In some
cases [0] both rows are deleted in the same transaction and having
B.ref_a=[] would violate the integrity of the database from client
perspective. This would force the client to always validate that
strong reference fields are non-NULL. This is not really an option
because the information in the original reference is required for
incrementally processing the record deletion.
[0] with ovn-monitor-all=true, the following command triggers a crash
in ovn-controller because a strong reference field becomes NULL:
$ ovn-nbctl --wait=hv -- lr-add r -- lrp-add r rp 00:00:00:00:00:01 1.0.0.1/24
$ ovn-nbctl lr-del r
Reported-at: https://bugzilla.redhat.com/1932642
Fixes: 72aeb243a52a ("ovsdb-idl: Tracking - preserve data for deleted rows.")
Signed-off-by: Dumitru Ceara <dceara@redhat.com>
Acked-by: Han Zhou <hzhou@ovn.org>
Signed-off-by: Ilya Maximets <i.maximets@ovn.org>
2021-03-24 10:33:22 +01:00
|
|
|
n += 1
|
|
|
|
|
2020-03-20 15:22:38 +00:00
|
|
|
if "simple5" in idl.tables:
|
|
|
|
simple5 = idl.tables["simple5"].rows
|
|
|
|
for row in simple5.values():
|
2021-03-24 10:33:08 +01:00
|
|
|
print_row("simple5", row, step,
|
2022-01-11 17:37:54 +01:00
|
|
|
get_simple5_table_printable_row(row),
|
|
|
|
terse)
|
2020-03-20 15:22:38 +00:00
|
|
|
n += 1
|
|
|
|
|
ovsdb-idl: Preserve references for deleted rows.
Considering two DB rows, 'a' from table A and 'b' from table B (with
column 'ref_a' a reference to table A):
a = {A._uuid=<U1>}
b = {B._uuid=<U2>, B.ref_a=<U1>}
Assuming both records are present in the IDL client's in-memory view of
the database, depending whether row 'b' is also deleted in the same
transaction or not, deletion of row 'a' should generate the following
tracked changes:
1. only row 'a' is deleted:
- for table A:
- deleted records: a = {A._uuid=<U1>}
- for table B:
- updated records: b = {B._uuid=<U2>, B.ref_a=[]}
2. row 'a' and row 'b' are deleted in the same update:
- for table A:
- deleted records: a = {A._uuid=<U1>}
- for table B:
- deleted records: b = {B._uuid=<U2>, B.ref_a=<U1>}
To ensure this, we now delay reparsing row backrefs for deleted rows
until all updates in the current run have been processed.
Without this change, in scenario 2 above, the tracked changes for table
B would be:
- deleted records: b = {B._uuid=<U2>, B.ref_a=[]}
In particular, for strong references, row 'a' can never be deleted in
a transaction that happens strictly before row 'b' is deleted. In some
cases [0] both rows are deleted in the same transaction and having
B.ref_a=[] would violate the integrity of the database from client
perspective. This would force the client to always validate that
strong reference fields are non-NULL. This is not really an option
because the information in the original reference is required for
incrementally processing the record deletion.
[0] with ovn-monitor-all=true, the following command triggers a crash
in ovn-controller because a strong reference field becomes NULL:
$ ovn-nbctl --wait=hv -- lr-add r -- lrp-add r rp 00:00:00:00:00:01 1.0.0.1/24
$ ovn-nbctl lr-del r
Reported-at: https://bugzilla.redhat.com/1932642
Fixes: 72aeb243a52a ("ovsdb-idl: Tracking - preserve data for deleted rows.")
Signed-off-by: Dumitru Ceara <dceara@redhat.com>
Acked-by: Han Zhou <hzhou@ovn.org>
Signed-off-by: Ilya Maximets <i.maximets@ovn.org>
2021-03-24 10:33:22 +01:00
|
|
|
if "simple6" in idl.tables:
|
|
|
|
simple6 = idl.tables["simple6"].rows
|
|
|
|
for row in simple6.values():
|
|
|
|
print_row("simple6", row, step,
|
2022-01-11 17:37:54 +01:00
|
|
|
get_simple6_table_printable_row(row),
|
|
|
|
terse)
|
ovsdb-idl: Preserve references for deleted rows.
Considering two DB rows, 'a' from table A and 'b' from table B (with
column 'ref_a' a reference to table A):
a = {A._uuid=<U1>}
b = {B._uuid=<U2>, B.ref_a=<U1>}
Assuming both records are present in the IDL client's in-memory view of
the database, depending whether row 'b' is also deleted in the same
transaction or not, deletion of row 'a' should generate the following
tracked changes:
1. only row 'a' is deleted:
- for table A:
- deleted records: a = {A._uuid=<U1>}
- for table B:
- updated records: b = {B._uuid=<U2>, B.ref_a=[]}
2. row 'a' and row 'b' are deleted in the same update:
- for table A:
- deleted records: a = {A._uuid=<U1>}
- for table B:
- deleted records: b = {B._uuid=<U2>, B.ref_a=<U1>}
To ensure this, we now delay reparsing row backrefs for deleted rows
until all updates in the current run have been processed.
Without this change, in scenario 2 above, the tracked changes for table
B would be:
- deleted records: b = {B._uuid=<U2>, B.ref_a=[]}
In particular, for strong references, row 'a' can never be deleted in
a transaction that happens strictly before row 'b' is deleted. In some
cases [0] both rows are deleted in the same transaction and having
B.ref_a=[] would violate the integrity of the database from client
perspective. This would force the client to always validate that
strong reference fields are non-NULL. This is not really an option
because the information in the original reference is required for
incrementally processing the record deletion.
[0] with ovn-monitor-all=true, the following command triggers a crash
in ovn-controller because a strong reference field becomes NULL:
$ ovn-nbctl --wait=hv -- lr-add r -- lrp-add r rp 00:00:00:00:00:01 1.0.0.1/24
$ ovn-nbctl lr-del r
Reported-at: https://bugzilla.redhat.com/1932642
Fixes: 72aeb243a52a ("ovsdb-idl: Tracking - preserve data for deleted rows.")
Signed-off-by: Dumitru Ceara <dceara@redhat.com>
Acked-by: Han Zhou <hzhou@ovn.org>
Signed-off-by: Ilya Maximets <i.maximets@ovn.org>
2021-03-24 10:33:22 +01:00
|
|
|
n += 1
|
|
|
|
|
ovsdb-idl: Support for readonly columns that are fetched on-demand
There is currently no mechanism in IDL to fetch specific column values
on-demand without having to register them for monitoring. In the case
where the column represent a frequently changing entity (e.g. counter),
and the reads are relatively infrequent (e.g. CLI client), there is a
significant overhead in replication.
This patch adds support in the Python IDL to register a subset of the
columns of a table as "readonly". Readonly columns are not replicated.
Users may "fetch" the readonly columns of a row on-demand. Once fetched,
the columns are not updated until the next fetch by the user. Writes by
the user to readonly columns does not change the value (both locally or
on the server).
The two main user visible changes in this patch are:
- The SchemaHelper.register_columns() method now takes an optionaly
argument to specify the subset of readonly column(s)
- A new Row.fetch(columns) method to fetch values of readonly columns(s)
Usage:
------
# Schema file includes all columns, including readonly
schema_helper = ovs.db.idl.SchemaHelper(schema_file)
# Register interest in columns with 'r' and 's' as readonly
schema_helper.register_columns("simple", [i, r, s], [r, s])
# Create Idl and jsonrpc, and wait for update, as usual
...
# Fetch value of column 'r' for a specific row
row.fetch('r')
txn.commit_block()
print row.r
print getattr(row, 'r')
# Writing to readonly column has no effect (locally or on server)
row.r = 3
print row.r # prints fetched value not 3
Signed-off-by: Shad Ansari <shad.ansari@hp.com>
Signed-off-by: Ben Pfaff <blp@ovn.org>
2015-10-22 14:35:24 -07:00
|
|
|
if "link1" in idl.tables:
|
|
|
|
l1 = idl.tables["link1"].rows
|
2019-12-20 18:35:08 +01:00
|
|
|
for row in l1.values():
|
2021-03-24 10:33:08 +01:00
|
|
|
print_row("link1", row, step,
|
2022-01-11 17:37:54 +01:00
|
|
|
get_link1_table_printable_row(row),
|
|
|
|
terse)
|
ovsdb-idl: Support for readonly columns that are fetched on-demand
There is currently no mechanism in IDL to fetch specific column values
on-demand without having to register them for monitoring. In the case
where the column represent a frequently changing entity (e.g. counter),
and the reads are relatively infrequent (e.g. CLI client), there is a
significant overhead in replication.
This patch adds support in the Python IDL to register a subset of the
columns of a table as "readonly". Readonly columns are not replicated.
Users may "fetch" the readonly columns of a row on-demand. Once fetched,
the columns are not updated until the next fetch by the user. Writes by
the user to readonly columns does not change the value (both locally or
on the server).
The two main user visible changes in this patch are:
- The SchemaHelper.register_columns() method now takes an optionaly
argument to specify the subset of readonly column(s)
- A new Row.fetch(columns) method to fetch values of readonly columns(s)
Usage:
------
# Schema file includes all columns, including readonly
schema_helper = ovs.db.idl.SchemaHelper(schema_file)
# Register interest in columns with 'r' and 's' as readonly
schema_helper.register_columns("simple", [i, r, s], [r, s])
# Create Idl and jsonrpc, and wait for update, as usual
...
# Fetch value of column 'r' for a specific row
row.fetch('r')
txn.commit_block()
print row.r
print getattr(row, 'r')
# Writing to readonly column has no effect (locally or on server)
row.r = 3
print row.r # prints fetched value not 3
Signed-off-by: Shad Ansari <shad.ansari@hp.com>
Signed-off-by: Ben Pfaff <blp@ovn.org>
2015-10-22 14:35:24 -07:00
|
|
|
n += 1
|
|
|
|
|
|
|
|
if "link2" in idl.tables:
|
|
|
|
l2 = idl.tables["link2"].rows
|
2019-12-20 18:35:08 +01:00
|
|
|
for row in l2.values():
|
2021-03-24 10:33:08 +01:00
|
|
|
print_row("link2", row, step,
|
2022-01-11 17:37:54 +01:00
|
|
|
get_link2_table_printable_row(row),
|
|
|
|
terse)
|
ovsdb-idl: Support for readonly columns that are fetched on-demand
There is currently no mechanism in IDL to fetch specific column values
on-demand without having to register them for monitoring. In the case
where the column represent a frequently changing entity (e.g. counter),
and the reads are relatively infrequent (e.g. CLI client), there is a
significant overhead in replication.
This patch adds support in the Python IDL to register a subset of the
columns of a table as "readonly". Readonly columns are not replicated.
Users may "fetch" the readonly columns of a row on-demand. Once fetched,
the columns are not updated until the next fetch by the user. Writes by
the user to readonly columns does not change the value (both locally or
on the server).
The two main user visible changes in this patch are:
- The SchemaHelper.register_columns() method now takes an optionaly
argument to specify the subset of readonly column(s)
- A new Row.fetch(columns) method to fetch values of readonly columns(s)
Usage:
------
# Schema file includes all columns, including readonly
schema_helper = ovs.db.idl.SchemaHelper(schema_file)
# Register interest in columns with 'r' and 's' as readonly
schema_helper.register_columns("simple", [i, r, s], [r, s])
# Create Idl and jsonrpc, and wait for update, as usual
...
# Fetch value of column 'r' for a specific row
row.fetch('r')
txn.commit_block()
print row.r
print getattr(row, 'r')
# Writing to readonly column has no effect (locally or on server)
row.r = 3
print row.r # prints fetched value not 3
Signed-off-by: Shad Ansari <shad.ansari@hp.com>
Signed-off-by: Ben Pfaff <blp@ovn.org>
2015-10-22 14:35:24 -07:00
|
|
|
n += 1
|
2011-09-21 10:43:03 -07:00
|
|
|
|
python: idl: Fix index not being updated on row modification.
When a row is modified, python IDL doesn't perform any operations on
existing client-side indexes. This means that if the column on which
index is created changes, the old value will remain in the index and
the new one will not be added to the index. Beside lookup failures
this is also causing inability to remove modified rows, because the
new column value doesn't exist in the index causing an exception on
attempt to remove it:
Traceback (most recent call last):
File "ovsdbapp/backend/ovs_idl/connection.py", line 110, in run
self.idl.run()
File "ovs/db/idl.py", line 465, in run
self.__parse_update(msg.params[2], OVSDB_UPDATE3)
File "ovs/db/idl.py", line 924, in __parse_update
self.__do_parse_update(update, version, self.tables)
File "ovs/db/idl.py", line 964, in __do_parse_update
changes = self.__process_update2(table, uuid, row_update)
File "ovs/db/idl.py", line 991, in __process_update2
del table.rows[uuid]
File "ovs/db/custom_index.py", line 102, in __delitem__
index.remove(val)
File "ovs/db/custom_index.py", line 66, in remove
self.values.remove(self.index_entry_from_row(row))
File "sortedcontainers/sortedlist.py", line 2015, in remove
raise ValueError('{0!r} not in list'.format(value))
ValueError: Datapath_Binding(
uuid=UUID('498e66a2-70bc-4587-a66f-0433baf82f60'),
tunnel_key=16711683, load_balancers=[], external_ids={}) not in list
Fix that by always removing an existing row from indexes before
modification and adding back afterwards. This ensures that old
values are removed from the index and new ones are added.
This behavior is consistent with the C implementation.
The new test that reproduces the removal issue is added. Some extra
testing infrastructure added to be able to handle and print out the
'indexed' table from the idltest schema.
Fixes: 13973bc41524 ("Add multi-column index support for the Python IDL")
Reported-at: https://mail.openvswitch.org/pipermail/ovs-discuss/2024-May/053159.html
Reported-by: Roberto Bartzen Acosta <roberto.acosta@luizalabs.com>
Acked-by: Mike Pattrick <mkp@redhat.com>
Acked-by: Dumitru Ceara <dceara@redhat.com>
Acked-by: Terry Wilson <twilson@redhat.com>
Signed-off-by: Ilya Maximets <i.maximets@ovn.org>
2024-05-27 23:39:06 +02:00
|
|
|
if "indexed" in idl.tables:
|
|
|
|
ind = idl.tables["indexed"].rows
|
|
|
|
for row in ind.values():
|
|
|
|
print_row("indexed", row, step,
|
|
|
|
get_indexed_table_printable_row(row),
|
|
|
|
terse)
|
|
|
|
n += 1
|
|
|
|
|
2018-05-17 13:16:55 -04:00
|
|
|
if "singleton" in idl.tables:
|
|
|
|
sng = idl.tables["singleton"].rows
|
2019-12-20 18:35:08 +01:00
|
|
|
for row in sng.values():
|
2021-03-24 10:33:08 +01:00
|
|
|
print_row("singleton", row, step,
|
2022-01-11 17:37:54 +01:00
|
|
|
get_singleton_table_printable_row(row),
|
|
|
|
terse)
|
2018-05-17 13:16:55 -04:00
|
|
|
n += 1
|
|
|
|
|
2010-08-25 10:26:40 -07:00
|
|
|
if not n:
|
|
|
|
print("%03d: empty" % step)
|
2011-09-21 10:43:03 -07:00
|
|
|
sys.stdout.flush()
|
2010-08-25 10:26:40 -07:00
|
|
|
|
2011-09-23 23:43:12 -07:00
|
|
|
|
2010-08-25 10:26:40 -07:00
|
|
|
def substitute_uuids(json, symtab):
|
2019-12-20 18:35:08 +01:00
|
|
|
if isinstance(json, str):
|
2010-08-25 10:26:40 -07:00
|
|
|
symbol = symtab.get(json)
|
|
|
|
if symbol:
|
|
|
|
return str(symbol)
|
2023-10-31 17:12:34 +00:00
|
|
|
elif type(json) is list:
|
2010-08-25 10:26:40 -07:00
|
|
|
return [substitute_uuids(element, symtab) for element in json]
|
2023-10-31 17:12:34 +00:00
|
|
|
elif type(json) is dict:
|
2010-08-25 10:26:40 -07:00
|
|
|
d = {}
|
2019-12-20 18:35:08 +01:00
|
|
|
for key, value in json.items():
|
2010-08-25 10:26:40 -07:00
|
|
|
d[key] = substitute_uuids(value, symtab)
|
|
|
|
return d
|
|
|
|
return json
|
|
|
|
|
2011-09-23 23:43:12 -07:00
|
|
|
|
2010-08-25 10:26:40 -07:00
|
|
|
def parse_uuids(json, symtab):
|
2019-12-20 18:35:08 +01:00
|
|
|
if (isinstance(json, str)
|
2015-12-14 17:01:11 -05:00
|
|
|
and ovs.ovsuuid.is_valid_string(json)):
|
2010-08-25 10:26:40 -07:00
|
|
|
name = "#%d#" % len(symtab)
|
|
|
|
sys.stderr.write("%s = %s\n" % (name, json))
|
|
|
|
symtab[name] = json
|
2023-10-31 17:12:34 +00:00
|
|
|
elif type(json) is list:
|
2010-08-25 10:26:40 -07:00
|
|
|
for element in json:
|
|
|
|
parse_uuids(element, symtab)
|
2023-10-31 17:12:34 +00:00
|
|
|
elif type(json) is dict:
|
2019-12-20 18:35:08 +01:00
|
|
|
for value in json.values():
|
2010-08-25 10:26:40 -07:00
|
|
|
parse_uuids(value, symtab)
|
|
|
|
|
2011-09-23 23:43:12 -07:00
|
|
|
|
2011-09-21 10:43:03 -07:00
|
|
|
def idltest_find_simple(idl, i):
|
2019-12-20 18:35:08 +01:00
|
|
|
for row in idl.tables["simple"].rows.values():
|
2011-09-21 10:43:03 -07:00
|
|
|
if row.i == i:
|
|
|
|
return row
|
|
|
|
return None
|
|
|
|
|
2011-09-23 23:43:12 -07:00
|
|
|
|
2016-08-06 17:46:30 -05:00
|
|
|
def idltest_find_simple2(idl, i):
|
2019-12-20 18:35:08 +01:00
|
|
|
for row in idl.tables["simple2"].rows.values():
|
2016-08-06 17:46:30 -05:00
|
|
|
if row.name == i:
|
|
|
|
return row
|
|
|
|
return None
|
|
|
|
|
|
|
|
|
|
|
|
def idltest_find_simple3(idl, i):
|
2018-04-12 19:24:27 -05:00
|
|
|
return next(idl.index_equal("simple3", "simple3_by_name", i), None)
|
2016-08-06 17:46:30 -05:00
|
|
|
|
|
|
|
|
2020-03-20 15:22:38 +00:00
|
|
|
def idltest_find(idl, table, col, match):
|
|
|
|
return next((r for r in idl.tables[table].rows.values() if
|
|
|
|
getattr(r, col) == match), None)
|
|
|
|
|
|
|
|
|
2011-09-21 10:43:03 -07:00
|
|
|
def idl_set(idl, commands, step):
|
|
|
|
txn = ovs.db.idl.Transaction(idl)
|
|
|
|
increment = False
|
ovsdb-idl: Support for readonly columns that are fetched on-demand
There is currently no mechanism in IDL to fetch specific column values
on-demand without having to register them for monitoring. In the case
where the column represent a frequently changing entity (e.g. counter),
and the reads are relatively infrequent (e.g. CLI client), there is a
significant overhead in replication.
This patch adds support in the Python IDL to register a subset of the
columns of a table as "readonly". Readonly columns are not replicated.
Users may "fetch" the readonly columns of a row on-demand. Once fetched,
the columns are not updated until the next fetch by the user. Writes by
the user to readonly columns does not change the value (both locally or
on the server).
The two main user visible changes in this patch are:
- The SchemaHelper.register_columns() method now takes an optionaly
argument to specify the subset of readonly column(s)
- A new Row.fetch(columns) method to fetch values of readonly columns(s)
Usage:
------
# Schema file includes all columns, including readonly
schema_helper = ovs.db.idl.SchemaHelper(schema_file)
# Register interest in columns with 'r' and 's' as readonly
schema_helper.register_columns("simple", [i, r, s], [r, s])
# Create Idl and jsonrpc, and wait for update, as usual
...
# Fetch value of column 'r' for a specific row
row.fetch('r')
txn.commit_block()
print row.r
print getattr(row, 'r')
# Writing to readonly column has no effect (locally or on server)
row.r = 3
print row.r # prints fetched value not 3
Signed-off-by: Shad Ansari <shad.ansari@hp.com>
Signed-off-by: Ben Pfaff <blp@ovn.org>
2015-10-22 14:35:24 -07:00
|
|
|
fetch_cmds = []
|
2015-04-25 14:57:44 -05:00
|
|
|
events = []
|
2024-06-28 14:18:41 -05:00
|
|
|
# `commands` is a comma-separated list of space-separated arguments. To
|
|
|
|
# handle commands that take arguments that may contain spaces or commas,
|
|
|
|
# e.g. JSON, it is necessary to process `commands` to extract those
|
|
|
|
# arguments before splitting by ',' or ' ' below, and then re-insert them
|
|
|
|
# after the arguments are split.
|
|
|
|
commands, data = substitute_object_text(commands)
|
2011-09-21 10:43:03 -07:00
|
|
|
for command in commands.split(','):
|
|
|
|
words = command.split()
|
2024-06-28 14:18:41 -05:00
|
|
|
words = recover_object_text_from_list(words, data)
|
2011-09-21 10:43:03 -07:00
|
|
|
name = words[0]
|
|
|
|
args = words[1:]
|
|
|
|
|
2015-04-25 14:57:44 -05:00
|
|
|
if name == "notifytest":
|
|
|
|
name = args[0]
|
|
|
|
args = args[1:]
|
|
|
|
old_notify = idl.notify
|
|
|
|
|
|
|
|
def notify(event, row, updates=None):
|
2015-04-29 10:41:39 -07:00
|
|
|
if updates:
|
python: Convert dict iterators.
In Python 2, dict.items(), dict.keys(), and dict.values() returned a
list. dict.iteritems(), dict.iterkeys(), and dict.itervalues() returned
an iterator.
As of Python 3, dict.iteritems(), dict.itervalues(), and dict.iterkeys()
are gone. items(), keys(), and values() now return an iterator.
In the case where we want an iterator, we now use the six.iter*()
helpers. If we want a list, we explicitly create a list from the
iterator.
Signed-off-by: Russell Bryant <russell@ovn.org>
Acked-by: Ben Pfaff <blp@ovn.org>
2015-12-14 15:13:20 -05:00
|
|
|
upcol = list(updates._data.keys())[0]
|
2015-04-29 10:41:39 -07:00
|
|
|
else:
|
|
|
|
upcol = None
|
2015-04-25 14:57:44 -05:00
|
|
|
events.append("%s|%s|%s" % (event, row.i, upcol))
|
|
|
|
idl.notify = old_notify
|
|
|
|
|
|
|
|
idl.notify = notify
|
|
|
|
|
2011-09-21 10:43:03 -07:00
|
|
|
if name == "set":
|
|
|
|
if len(args) != 3:
|
|
|
|
sys.stderr.write('"set" command requires 3 arguments\n')
|
|
|
|
sys.exit(1)
|
|
|
|
|
|
|
|
s = idltest_find_simple(idl, int(args[0]))
|
|
|
|
if not s:
|
|
|
|
sys.stderr.write('"set" command asks for nonexistent i=%d\n'
|
|
|
|
% int(args[0]))
|
|
|
|
sys.exit(1)
|
|
|
|
|
|
|
|
if args[1] == "b":
|
|
|
|
s.b = args[2] == "1"
|
|
|
|
elif args[1] == "s":
|
2019-12-20 18:35:08 +01:00
|
|
|
s.s = args[2].encode(sys.getfilesystemencoding(),
|
|
|
|
'surrogateescape') \
|
|
|
|
.decode('utf-8', 'replace')
|
2011-09-21 10:43:03 -07:00
|
|
|
elif args[1] == "u":
|
|
|
|
s.u = uuid.UUID(args[2])
|
|
|
|
elif args[1] == "r":
|
|
|
|
s.r = float(args[2])
|
|
|
|
else:
|
|
|
|
sys.stderr.write('"set" comamnd asks for unknown column %s\n'
|
|
|
|
% args[2])
|
|
|
|
sys.stderr.exit(1)
|
|
|
|
elif name == "insert":
|
|
|
|
if len(args) != 1:
|
|
|
|
sys.stderr.write('"set" command requires 1 argument\n')
|
|
|
|
sys.exit(1)
|
|
|
|
|
|
|
|
s = txn.insert(idl.tables["simple"])
|
|
|
|
s.i = int(args[0])
|
2025-02-27 11:23:48 -06:00
|
|
|
elif name == "insert_no_columns_changed":
|
|
|
|
txn.insert(idl.tables["simple"])
|
2022-11-27 22:56:13 -05:00
|
|
|
elif name == "insert_uuid":
|
|
|
|
if len(args) != 2:
|
|
|
|
sys.stderr.write('"set" command requires 2 argument\n')
|
|
|
|
sys.exit(1)
|
|
|
|
|
2024-04-10 16:38:26 -05:00
|
|
|
s = txn.insert(idl.tables["simple"], new_uuid=uuid.UUID(args[0]),
|
2022-11-27 22:56:13 -05:00
|
|
|
persist_uuid=True)
|
|
|
|
s.i = int(args[1])
|
2025-02-27 11:23:49 -06:00
|
|
|
elif name == "insert_uuid_uref":
|
|
|
|
if len(args) != 2:
|
|
|
|
sys.stderr.write('"set" command requires 2 argument\n')
|
|
|
|
sys.exit(1)
|
|
|
|
|
|
|
|
s4 = txn.insert(idl.tables["simple4"], new_uuid=uuid.UUID(args[1]),
|
|
|
|
persist_uuid=True)
|
|
|
|
s3 = txn.insert(idl.tables["simple3"], new_uuid=uuid.UUID(args[0]),
|
|
|
|
persist_uuid=True)
|
|
|
|
s3.uref = s4
|
|
|
|
|
2024-06-28 14:18:41 -05:00
|
|
|
elif name == "add_op":
|
|
|
|
if len(args) != 1:
|
|
|
|
sys.stderr.write('"add_op" command requires 1 argument\n')
|
|
|
|
sys.stderr.write(f"args={args}\n")
|
|
|
|
sys.exit(1)
|
|
|
|
txn.add_op(ovs.json.from_string(args[0]))
|
2011-09-21 10:43:03 -07:00
|
|
|
elif name == "delete":
|
|
|
|
if len(args) != 1:
|
|
|
|
sys.stderr.write('"delete" command requires 1 argument\n')
|
|
|
|
sys.exit(1)
|
|
|
|
|
|
|
|
s = idltest_find_simple(idl, int(args[0]))
|
|
|
|
if not s:
|
|
|
|
sys.stderr.write('"delete" command asks for nonexistent i=%d\n'
|
|
|
|
% int(args[0]))
|
|
|
|
sys.exit(1)
|
|
|
|
s.delete()
|
|
|
|
elif name == "verify":
|
|
|
|
if len(args) != 2:
|
|
|
|
sys.stderr.write('"verify" command requires 2 arguments\n')
|
|
|
|
sys.exit(1)
|
|
|
|
|
|
|
|
s = idltest_find_simple(idl, int(args[0]))
|
|
|
|
if not s:
|
|
|
|
sys.stderr.write('"verify" command asks for nonexistent i=%d\n'
|
|
|
|
% int(args[0]))
|
|
|
|
sys.exit(1)
|
|
|
|
|
|
|
|
if args[1] in ("i", "b", "s", "u", "r"):
|
|
|
|
s.verify(args[1])
|
|
|
|
else:
|
|
|
|
sys.stderr.write('"verify" command asks for unknown column '
|
|
|
|
'"%s"\n' % args[1])
|
|
|
|
sys.exit(1)
|
ovsdb-idl: Support for readonly columns that are fetched on-demand
There is currently no mechanism in IDL to fetch specific column values
on-demand without having to register them for monitoring. In the case
where the column represent a frequently changing entity (e.g. counter),
and the reads are relatively infrequent (e.g. CLI client), there is a
significant overhead in replication.
This patch adds support in the Python IDL to register a subset of the
columns of a table as "readonly". Readonly columns are not replicated.
Users may "fetch" the readonly columns of a row on-demand. Once fetched,
the columns are not updated until the next fetch by the user. Writes by
the user to readonly columns does not change the value (both locally or
on the server).
The two main user visible changes in this patch are:
- The SchemaHelper.register_columns() method now takes an optionaly
argument to specify the subset of readonly column(s)
- A new Row.fetch(columns) method to fetch values of readonly columns(s)
Usage:
------
# Schema file includes all columns, including readonly
schema_helper = ovs.db.idl.SchemaHelper(schema_file)
# Register interest in columns with 'r' and 's' as readonly
schema_helper.register_columns("simple", [i, r, s], [r, s])
# Create Idl and jsonrpc, and wait for update, as usual
...
# Fetch value of column 'r' for a specific row
row.fetch('r')
txn.commit_block()
print row.r
print getattr(row, 'r')
# Writing to readonly column has no effect (locally or on server)
row.r = 3
print row.r # prints fetched value not 3
Signed-off-by: Shad Ansari <shad.ansari@hp.com>
Signed-off-by: Ben Pfaff <blp@ovn.org>
2015-10-22 14:35:24 -07:00
|
|
|
elif name == "fetch":
|
|
|
|
if len(args) != 2:
|
|
|
|
sys.stderr.write('"fetch" command requires 2 argument\n')
|
|
|
|
sys.exit(1)
|
|
|
|
|
|
|
|
row = idltest_find_simple(idl, int(args[0]))
|
|
|
|
if not row:
|
|
|
|
sys.stderr.write('"fetch" command asks for nonexistent i=%d\n'
|
|
|
|
% int(args[0]))
|
|
|
|
sys.exit(1)
|
|
|
|
|
|
|
|
column = args[1]
|
|
|
|
row.fetch(column)
|
|
|
|
fetch_cmds.append([row, column])
|
2011-09-21 10:43:03 -07:00
|
|
|
elif name == "increment":
|
2012-04-12 08:25:10 -07:00
|
|
|
if len(args) != 1:
|
|
|
|
sys.stderr.write('"increment" command requires 1 argument\n')
|
|
|
|
sys.exit(1)
|
|
|
|
|
|
|
|
s = idltest_find_simple(idl, int(args[0]))
|
|
|
|
if not s:
|
|
|
|
sys.stderr.write('"set" command asks for nonexistent i=%d\n'
|
|
|
|
% int(args[0]))
|
2011-09-21 10:43:03 -07:00
|
|
|
sys.exit(1)
|
|
|
|
|
2012-04-12 08:25:10 -07:00
|
|
|
s.increment("i")
|
2011-09-21 10:43:03 -07:00
|
|
|
increment = True
|
|
|
|
elif name == "abort":
|
|
|
|
txn.abort()
|
|
|
|
break
|
|
|
|
elif name == "destroy":
|
2015-12-14 10:21:53 -05:00
|
|
|
print("%03d: destroy" % step)
|
2011-09-21 10:43:03 -07:00
|
|
|
sys.stdout.flush()
|
|
|
|
txn.abort()
|
2022-11-27 22:56:13 -05:00
|
|
|
return True
|
python/ovs/db/idl.py: Transaction._substitute doesn't handle list/tuple
Since Transaction._substitute doesn't substitute elements of list/tuple,
setting list references results in transaction error. Teach it such case.
Example:
{"op": "update",
"row":{"bridges":["set",[["uuid",
"1f42bc19-307f-42e7-a9c0-c12178bd8b51"],
["uuid",
"f97e0c76-7146-489d-9bed-29bc704f65fe"]]]},
"table": "Open_vSwitch",
"where":[["_uuid", "==", ["uuid",
"20c2a046-ae7e-4453-a576-11034db24985"]]]}
In the above case, uuid in "row" aren't replaced by "named-uuid" because
the function doesn't look into elements of lists.
When list/tuple is found, look into elements recursively.
Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
Signed-off-by: Ben Pfaff <blp@nicira.com>
2012-09-13 13:41:04 +09:00
|
|
|
elif name == "linktest":
|
|
|
|
l1_0 = txn.insert(idl.tables["link1"])
|
|
|
|
l1_0.i = 1
|
|
|
|
l1_0.k = [l1_0]
|
|
|
|
l1_0.ka = [l1_0]
|
|
|
|
l1_1 = txn.insert(idl.tables["link1"])
|
|
|
|
l1_1.i = 2
|
|
|
|
l1_1.k = [l1_0]
|
|
|
|
l1_1.ka = [l1_0, l1_1]
|
2012-09-27 18:29:45 +09:00
|
|
|
elif name == 'getattrtest':
|
|
|
|
l1 = txn.insert(idl.tables["link1"])
|
|
|
|
i = getattr(l1, 'i', 1)
|
|
|
|
assert i == 1
|
|
|
|
l1.i = 2
|
|
|
|
i = getattr(l1, 'i', 1)
|
|
|
|
assert i == 2
|
|
|
|
l1.k = [l1]
|
2016-08-06 17:46:30 -05:00
|
|
|
elif name == 'partialmapinsertelement':
|
|
|
|
row = idltest_find_simple2(idl, 'myString1')
|
2016-10-12 14:36:57 -07:00
|
|
|
len_smap = len(getattr(row, 'smap'))
|
2016-08-06 17:46:30 -05:00
|
|
|
row.setkey('smap', 'key1', 'myList1')
|
2016-10-12 14:36:57 -07:00
|
|
|
len_imap = len(getattr(row, 'imap'))
|
2016-08-06 17:46:30 -05:00
|
|
|
row.setkey('imap', 3, 'myids2')
|
|
|
|
row.__setattr__('name', 'String2')
|
2016-10-12 14:36:57 -07:00
|
|
|
assert len(getattr(row, 'smap')) == len_smap
|
|
|
|
assert len(getattr(row, 'imap')) == len_imap + 1
|
2016-08-15 11:03:30 -07:00
|
|
|
elif name == 'partialmapinsertmultipleelements':
|
|
|
|
row = idltest_find_simple2(idl, 'String2')
|
2016-10-12 14:36:57 -07:00
|
|
|
len_smap = len(getattr(row, 'smap'))
|
2016-08-15 11:03:30 -07:00
|
|
|
row.setkey('smap', 'key2', 'myList2')
|
|
|
|
row.setkey('smap', 'key3', 'myList3')
|
2016-10-12 14:36:57 -07:00
|
|
|
row.setkey('smap', 'key4', 'myList4')
|
|
|
|
assert len(getattr(row, 'smap')) == len_smap + 2
|
2016-08-15 11:03:30 -07:00
|
|
|
elif name == 'partialmapdelelements':
|
2016-08-06 17:46:30 -05:00
|
|
|
row = idltest_find_simple2(idl, 'String2')
|
2016-10-12 14:36:57 -07:00
|
|
|
len_smap = len(getattr(row, 'smap'))
|
2016-08-14 19:48:24 -05:00
|
|
|
row.delkey('smap', 'key1', 'myList1')
|
2016-08-15 11:03:30 -07:00
|
|
|
row.delkey('smap', 'key2', 'wrongvalue')
|
|
|
|
row.delkey('smap', 'key3')
|
2016-10-12 14:36:57 -07:00
|
|
|
row.delkey('smap', 'key4')
|
|
|
|
assert len(getattr(row, 'smap')) == len_smap - 3
|
|
|
|
elif name == 'partialmapmutatenew':
|
|
|
|
new_row2 = txn.insert(idl.tables["simple2"])
|
|
|
|
setattr(new_row2, 'name', 'String2New')
|
|
|
|
new_row2.setkey('smap', 'key1', 'newList1')
|
|
|
|
assert len(getattr(new_row2, 'smap')) == 1
|
|
|
|
new_row2.setkey('smap', 'key2', 'newList2')
|
|
|
|
assert len(getattr(new_row2, 'smap')) == 2
|
2016-08-06 17:46:30 -05:00
|
|
|
elif name == 'partialrenamesetadd':
|
|
|
|
row = idltest_find_simple3(idl, 'mySet1')
|
2016-10-12 14:36:57 -07:00
|
|
|
old_size = len(getattr(row, 'uset', []))
|
2016-08-06 17:46:30 -05:00
|
|
|
row.addvalue('uset',
|
|
|
|
uuid.UUID("001e43d2-dd3f-4616-ab6a-83a490bb0991"))
|
|
|
|
row.__setattr__('name', 'String2')
|
2016-10-12 14:36:57 -07:00
|
|
|
assert len(getattr(row, 'uset', [])) == old_size + 1
|
2016-08-15 11:03:30 -07:00
|
|
|
elif name == 'partialduplicateadd':
|
2016-08-06 17:46:30 -05:00
|
|
|
row = idltest_find_simple3(idl, 'String2')
|
2016-10-12 14:36:57 -07:00
|
|
|
old_size = len(getattr(row, 'uset', []))
|
2016-08-06 17:46:30 -05:00
|
|
|
row.addvalue('uset',
|
|
|
|
uuid.UUID("0026b3ba-571b-4729-8227-d860a5210ab8"))
|
2016-08-15 11:03:30 -07:00
|
|
|
row.addvalue('uset',
|
|
|
|
uuid.UUID("0026b3ba-571b-4729-8227-d860a5210ab8"))
|
2016-10-12 14:36:57 -07:00
|
|
|
assert len(getattr(row, 'uset', [])) == old_size + 1
|
2016-08-06 17:46:30 -05:00
|
|
|
elif name == 'partialsetdel':
|
|
|
|
row = idltest_find_simple3(idl, 'String2')
|
2016-10-12 14:36:57 -07:00
|
|
|
old_size = len(getattr(row, 'uset', []))
|
2016-08-06 17:46:30 -05:00
|
|
|
row.delvalue('uset',
|
|
|
|
uuid.UUID("001e43d2-dd3f-4616-ab6a-83a490bb0991"))
|
2016-10-12 14:36:57 -07:00
|
|
|
assert len(getattr(row, 'uset', [])) == old_size - 1
|
2016-08-06 17:46:30 -05:00
|
|
|
elif name == 'partialsetref':
|
|
|
|
new_row = txn.insert(idl.tables["simple4"])
|
|
|
|
new_row.__setattr__('name', 'test')
|
|
|
|
row = idltest_find_simple3(idl, 'String2')
|
2016-10-12 14:36:57 -07:00
|
|
|
old_size = len(getattr(row, 'uref', []))
|
2016-08-06 17:46:30 -05:00
|
|
|
row.addvalue('uref', new_row.uuid)
|
2016-10-12 14:36:57 -07:00
|
|
|
assert len(getattr(row, 'uref', [])) == old_size + 1
|
2016-08-15 11:03:30 -07:00
|
|
|
elif name == 'partialsetoverrideops':
|
|
|
|
row = idltest_find_simple3(idl, 'String2')
|
|
|
|
row.addvalue('uset',
|
|
|
|
uuid.UUID("579e978d-776c-4f19-a225-268e5890e670"))
|
|
|
|
row.delvalue('uset',
|
|
|
|
uuid.UUID("0026b3ba-571b-4729-8227-d860a5210ab8"))
|
|
|
|
row.__setattr__('uset',
|
|
|
|
[uuid.UUID("0026b3ba-571b-4729-8227-d860a5210ab8")])
|
2016-10-12 14:36:57 -07:00
|
|
|
assert len(getattr(row, 'uset', [])) == 1
|
|
|
|
elif name == 'partialsetadddelete':
|
|
|
|
row = idltest_find_simple3(idl, 'String2')
|
|
|
|
row.addvalue('uset',
|
|
|
|
uuid.UUID('b6272353-af9c-40b7-90fe-32a43e6518a1'))
|
|
|
|
row.addvalue('uset',
|
|
|
|
uuid.UUID('1d6a71a2-dffb-426e-b2fa-b727091f9901'))
|
|
|
|
row.delvalue('uset',
|
|
|
|
uuid.UUID('0026b3ba-571b-4729-8227-d860a5210ab8'))
|
|
|
|
assert len(getattr(row, 'uset', [])) == 2
|
2016-08-23 22:12:30 -07:00
|
|
|
elif name == 'partialsetmutatenew':
|
|
|
|
new_row41 = txn.insert(idl.tables["simple4"])
|
|
|
|
new_row41.__setattr__('name', 'new_row41')
|
|
|
|
new_row3 = txn.insert(idl.tables["simple3"])
|
|
|
|
setattr(new_row3, 'name', 'String3')
|
|
|
|
new_row3.addvalue('uset', new_row41.uuid)
|
2016-10-12 14:36:57 -07:00
|
|
|
assert len(getattr(new_row3, 'uset', [])) == 1
|
2020-03-20 15:22:38 +00:00
|
|
|
elif name == 'partialmapmutateirefmap':
|
|
|
|
row3 = idltest_find_simple3(idl, "myString1")
|
|
|
|
row5 = idltest_find(idl, "simple5", "name", "myString2")
|
|
|
|
row5.setkey('irefmap', 1, row3.uuid)
|
|
|
|
maplen = len(row5.irefmap)
|
|
|
|
assert maplen == 1, "expected 1, got %d" % maplen
|
2011-09-21 10:43:03 -07:00
|
|
|
else:
|
|
|
|
sys.stderr.write("unknown command %s\n" % name)
|
|
|
|
sys.exit(1)
|
|
|
|
|
|
|
|
status = txn.commit_block()
|
|
|
|
sys.stdout.write("%03d: commit, status=%s"
|
|
|
|
% (step, ovs.db.idl.Transaction.status_to_string(status)))
|
|
|
|
if increment and status == ovs.db.idl.Transaction.SUCCESS:
|
|
|
|
sys.stdout.write(", increment=%d" % txn.get_increment_new_value())
|
2015-04-25 14:57:44 -05:00
|
|
|
if events:
|
|
|
|
# Event notifications from operations in a single transaction are
|
|
|
|
# not in a gauranteed order due to update messages being dicts
|
|
|
|
sys.stdout.write(", events=" + ", ".join(sorted(events)))
|
2011-09-21 10:43:03 -07:00
|
|
|
sys.stdout.write("\n")
|
|
|
|
sys.stdout.flush()
|
|
|
|
|
2022-11-27 22:56:13 -05:00
|
|
|
return status != ovs.db.idl.Transaction.ERROR
|
|
|
|
|
2011-09-23 23:43:12 -07:00
|
|
|
|
2022-12-13 18:11:18 +01:00
|
|
|
def update_condition(idl, commands, step):
|
|
|
|
next_cond_seqno = 0
|
2016-12-19 20:55:35 -08:00
|
|
|
commands = commands[len("condition "):].split(";")
|
2016-07-18 11:45:58 +03:00
|
|
|
for command in commands:
|
|
|
|
command = command.split(" ")
|
2022-08-04 15:56:13 +02:00
|
|
|
if len(command) != 2:
|
2016-12-19 20:55:35 -08:00
|
|
|
sys.stderr.write("Error parsing condition %s\n" % command)
|
2016-07-18 11:45:58 +03:00
|
|
|
sys.exit(1)
|
|
|
|
|
|
|
|
table = command[0]
|
|
|
|
cond = ovs.json.from_string(command[1])
|
|
|
|
|
2022-12-13 18:11:18 +01:00
|
|
|
next_seqno = idl.cond_change(table, cond)
|
|
|
|
if idl.cond_seqno == next_seqno:
|
|
|
|
sys.stdout.write("%03d: %s: conditions unchanged\n" %
|
|
|
|
(step, table))
|
|
|
|
else:
|
|
|
|
sys.stdout.write("%03d: %s: change conditions\n" %
|
|
|
|
(step, table))
|
|
|
|
sys.stdout.flush()
|
|
|
|
|
|
|
|
assert next_seqno == idl.cond_change(table, cond), \
|
|
|
|
"condition expected seqno changed"
|
|
|
|
next_cond_seqno = max(next_cond_seqno, next_seqno)
|
|
|
|
|
|
|
|
return next_cond_seqno
|
2016-07-18 11:45:58 +03:00
|
|
|
|
|
|
|
|
2011-09-21 10:43:03 -07:00
|
|
|
def do_idl(schema_file, remote, *commands):
|
2012-03-02 17:50:59 -08:00
|
|
|
schema_helper = ovs.db.idl.SchemaHelper(schema_file)
|
2016-07-26 23:28:14 +05:30
|
|
|
track_notify = False
|
|
|
|
|
2016-10-05 17:50:24 +05:30
|
|
|
if remote.startswith("ssl:"):
|
2018-04-18 18:01:10 +02:00
|
|
|
if len(commands) < 3:
|
treewide: Refer to SSL configuration as SSL/TLS.
SSL protocol family is not actually being used or supported in OVS.
What we use is actually TLS.
Terms "SSL" and "TLS" are often used interchangeably in modern
software and refer to the same thing, which is normally just TLS.
Let's replace "SSL" with "SSL/TLS" in documentation and user-visible
messages, where it makes sense. This may make it more clear what
is meant for a less experienced user that may look for TLS support
in OVS and not find much.
We're not changing any actual code, because, for example, most of
OpenSSL APIs are using just SSL, for historical reasons. And our
database is using "SSL" table. We may consider migrating to "TLS"
naming for user-visible configuration like command line arguments
and database names, but that will require extra work on making sure
upgrades can still work. In general, a slightly more clear
documentation should be enough for now, especially since term SSL
is still widely used in the industry.
"SSL/TLS" is chosen over "TLS/SSL" simply because our user-visible
configuration knobs are using "SSL" naming, e.g. '--ssl-cyphers'
or 'ovs-vsctl set-ssl'. So, it might be less confusing this way.
We may switch that, if we decide on re-working the user-visible
commands towards "TLS" naming, or providing both alternatives.
Some other projects did similar changes. For example, the python ssl
library is now using "TLS/SSL" in the documentation whenever possible.
Same goes for OpenSSL itself.
Acked-by: Eelco Chaudron <echaudro@redhat.com>
Signed-off-by: Ilya Maximets <i.maximets@ovn.org>
2024-12-09 17:38:45 +01:00
|
|
|
sys.stderr.write("SSL/TLS connection requires private key, "
|
2018-04-18 18:01:10 +02:00
|
|
|
"certificate for private key, and peer CA "
|
|
|
|
"certificate as arguments\n")
|
|
|
|
sys.exit(1)
|
2016-10-05 17:50:24 +05:30
|
|
|
ovs.stream.Stream.ssl_set_private_key_file(commands[0])
|
|
|
|
ovs.stream.Stream.ssl_set_certificate_file(commands[1])
|
|
|
|
ovs.stream.Stream.ssl_set_ca_cert_file(commands[2])
|
|
|
|
commands = commands[3:]
|
|
|
|
|
2016-07-26 23:28:14 +05:30
|
|
|
if commands and commands[0] == "track-notify":
|
|
|
|
commands = commands[1:]
|
|
|
|
track_notify = True
|
|
|
|
|
2015-10-07 13:52:11 -07:00
|
|
|
if commands and commands[0].startswith("?"):
|
ovsdb-idl: Support for readonly columns that are fetched on-demand
There is currently no mechanism in IDL to fetch specific column values
on-demand without having to register them for monitoring. In the case
where the column represent a frequently changing entity (e.g. counter),
and the reads are relatively infrequent (e.g. CLI client), there is a
significant overhead in replication.
This patch adds support in the Python IDL to register a subset of the
columns of a table as "readonly". Readonly columns are not replicated.
Users may "fetch" the readonly columns of a row on-demand. Once fetched,
the columns are not updated until the next fetch by the user. Writes by
the user to readonly columns does not change the value (both locally or
on the server).
The two main user visible changes in this patch are:
- The SchemaHelper.register_columns() method now takes an optionaly
argument to specify the subset of readonly column(s)
- A new Row.fetch(columns) method to fetch values of readonly columns(s)
Usage:
------
# Schema file includes all columns, including readonly
schema_helper = ovs.db.idl.SchemaHelper(schema_file)
# Register interest in columns with 'r' and 's' as readonly
schema_helper.register_columns("simple", [i, r, s], [r, s])
# Create Idl and jsonrpc, and wait for update, as usual
...
# Fetch value of column 'r' for a specific row
row.fetch('r')
txn.commit_block()
print row.r
print getattr(row, 'r')
# Writing to readonly column has no effect (locally or on server)
row.r = 3
print row.r # prints fetched value not 3
Signed-off-by: Shad Ansari <shad.ansari@hp.com>
Signed-off-by: Ben Pfaff <blp@ovn.org>
2015-10-22 14:35:24 -07:00
|
|
|
readonly = {}
|
2015-10-07 13:52:11 -07:00
|
|
|
for x in commands[0][1:].split("?"):
|
ovsdb-idl: Support for readonly columns that are fetched on-demand
There is currently no mechanism in IDL to fetch specific column values
on-demand without having to register them for monitoring. In the case
where the column represent a frequently changing entity (e.g. counter),
and the reads are relatively infrequent (e.g. CLI client), there is a
significant overhead in replication.
This patch adds support in the Python IDL to register a subset of the
columns of a table as "readonly". Readonly columns are not replicated.
Users may "fetch" the readonly columns of a row on-demand. Once fetched,
the columns are not updated until the next fetch by the user. Writes by
the user to readonly columns does not change the value (both locally or
on the server).
The two main user visible changes in this patch are:
- The SchemaHelper.register_columns() method now takes an optionaly
argument to specify the subset of readonly column(s)
- A new Row.fetch(columns) method to fetch values of readonly columns(s)
Usage:
------
# Schema file includes all columns, including readonly
schema_helper = ovs.db.idl.SchemaHelper(schema_file)
# Register interest in columns with 'r' and 's' as readonly
schema_helper.register_columns("simple", [i, r, s], [r, s])
# Create Idl and jsonrpc, and wait for update, as usual
...
# Fetch value of column 'r' for a specific row
row.fetch('r')
txn.commit_block()
print row.r
print getattr(row, 'r')
# Writing to readonly column has no effect (locally or on server)
row.r = 3
print row.r # prints fetched value not 3
Signed-off-by: Shad Ansari <shad.ansari@hp.com>
Signed-off-by: Ben Pfaff <blp@ovn.org>
2015-10-22 14:35:24 -07:00
|
|
|
readonly = []
|
2015-10-07 13:52:11 -07:00
|
|
|
table, columns = x.split(":")
|
ovsdb-idl: Support for readonly columns that are fetched on-demand
There is currently no mechanism in IDL to fetch specific column values
on-demand without having to register them for monitoring. In the case
where the column represent a frequently changing entity (e.g. counter),
and the reads are relatively infrequent (e.g. CLI client), there is a
significant overhead in replication.
This patch adds support in the Python IDL to register a subset of the
columns of a table as "readonly". Readonly columns are not replicated.
Users may "fetch" the readonly columns of a row on-demand. Once fetched,
the columns are not updated until the next fetch by the user. Writes by
the user to readonly columns does not change the value (both locally or
on the server).
The two main user visible changes in this patch are:
- The SchemaHelper.register_columns() method now takes an optionaly
argument to specify the subset of readonly column(s)
- A new Row.fetch(columns) method to fetch values of readonly columns(s)
Usage:
------
# Schema file includes all columns, including readonly
schema_helper = ovs.db.idl.SchemaHelper(schema_file)
# Register interest in columns with 'r' and 's' as readonly
schema_helper.register_columns("simple", [i, r, s], [r, s])
# Create Idl and jsonrpc, and wait for update, as usual
...
# Fetch value of column 'r' for a specific row
row.fetch('r')
txn.commit_block()
print row.r
print getattr(row, 'r')
# Writing to readonly column has no effect (locally or on server)
row.r = 3
print row.r # prints fetched value not 3
Signed-off-by: Shad Ansari <shad.ansari@hp.com>
Signed-off-by: Ben Pfaff <blp@ovn.org>
2015-10-22 14:35:24 -07:00
|
|
|
columns = columns.split(",")
|
|
|
|
for index, column in enumerate(columns):
|
|
|
|
if column[-1] == '!':
|
|
|
|
columns[index] = columns[index][:-1]
|
|
|
|
readonly.append(columns[index])
|
|
|
|
schema_helper.register_columns(table, columns, readonly)
|
2015-10-07 13:52:11 -07:00
|
|
|
commands = commands[1:]
|
|
|
|
else:
|
|
|
|
schema_helper.register_all()
|
2021-06-10 18:47:14 +02:00
|
|
|
idl = ovs.db.idl.Idl(remote, schema_helper, leader_only=False)
|
2018-04-12 19:24:27 -05:00
|
|
|
if "simple3" in idl.tables:
|
|
|
|
idl.index_create("simple3", "simple3_by_name")
|
python: idl: Fix index not being updated on row modification.
When a row is modified, python IDL doesn't perform any operations on
existing client-side indexes. This means that if the column on which
index is created changes, the old value will remain in the index and
the new one will not be added to the index. Beside lookup failures
this is also causing inability to remove modified rows, because the
new column value doesn't exist in the index causing an exception on
attempt to remove it:
Traceback (most recent call last):
File "ovsdbapp/backend/ovs_idl/connection.py", line 110, in run
self.idl.run()
File "ovs/db/idl.py", line 465, in run
self.__parse_update(msg.params[2], OVSDB_UPDATE3)
File "ovs/db/idl.py", line 924, in __parse_update
self.__do_parse_update(update, version, self.tables)
File "ovs/db/idl.py", line 964, in __do_parse_update
changes = self.__process_update2(table, uuid, row_update)
File "ovs/db/idl.py", line 991, in __process_update2
del table.rows[uuid]
File "ovs/db/custom_index.py", line 102, in __delitem__
index.remove(val)
File "ovs/db/custom_index.py", line 66, in remove
self.values.remove(self.index_entry_from_row(row))
File "sortedcontainers/sortedlist.py", line 2015, in remove
raise ValueError('{0!r} not in list'.format(value))
ValueError: Datapath_Binding(
uuid=UUID('498e66a2-70bc-4587-a66f-0433baf82f60'),
tunnel_key=16711683, load_balancers=[], external_ids={}) not in list
Fix that by always removing an existing row from indexes before
modification and adding back afterwards. This ensures that old
values are removed from the index and new ones are added.
This behavior is consistent with the C implementation.
The new test that reproduces the removal issue is added. Some extra
testing infrastructure added to be able to handle and print out the
'indexed' table from the idltest schema.
Fixes: 13973bc41524 ("Add multi-column index support for the Python IDL")
Reported-at: https://mail.openvswitch.org/pipermail/ovs-discuss/2024-May/053159.html
Reported-by: Roberto Bartzen Acosta <roberto.acosta@luizalabs.com>
Acked-by: Mike Pattrick <mkp@redhat.com>
Acked-by: Dumitru Ceara <dceara@redhat.com>
Acked-by: Terry Wilson <twilson@redhat.com>
Signed-off-by: Ilya Maximets <i.maximets@ovn.org>
2024-05-27 23:39:06 +02:00
|
|
|
if "indexed" in idl.tables:
|
|
|
|
idx = idl.index_create("indexed", "indexed_by_i")
|
|
|
|
idx.add_column("i")
|
2010-08-25 10:26:40 -07:00
|
|
|
|
|
|
|
if commands:
|
2018-08-07 17:08:11 +05:30
|
|
|
remotes = remote.split(',')
|
|
|
|
stream = None
|
|
|
|
for r in remotes:
|
|
|
|
error, stream = ovs.stream.Stream.open_block(
|
2019-01-09 20:30:16 +03:00
|
|
|
ovs.stream.Stream.open(r), 2000)
|
2018-08-07 17:08:11 +05:30
|
|
|
if not error and stream:
|
|
|
|
break
|
|
|
|
stream = None
|
|
|
|
|
|
|
|
if not stream:
|
2010-08-25 10:26:40 -07:00
|
|
|
sys.stderr.write("failed to connect to \"%s\"" % remote)
|
|
|
|
sys.exit(1)
|
|
|
|
rpc = ovs.jsonrpc.Connection(stream)
|
|
|
|
else:
|
|
|
|
rpc = None
|
|
|
|
|
2022-12-13 18:11:18 +01:00
|
|
|
next_cond_seqno = 0
|
2010-08-25 10:26:40 -07:00
|
|
|
symtab = {}
|
|
|
|
seqno = 0
|
|
|
|
step = 0
|
2016-07-18 11:45:58 +03:00
|
|
|
|
2016-07-26 23:28:14 +05:30
|
|
|
def mock_notify(event, row, updates=None):
|
|
|
|
output = "%03d: " % step
|
|
|
|
output += "event:" + str(event) + ", row={"
|
2021-03-24 10:33:08 +01:00
|
|
|
output += get_simple_table_printable_row(row, 'l2', 'l1') + "}, "
|
|
|
|
output += get_simple_printable_row_string(row, ["uuid"]) + ", updates="
|
2016-07-26 23:28:14 +05:30
|
|
|
if updates is None:
|
|
|
|
output += "None"
|
|
|
|
else:
|
|
|
|
output += "{" + get_simple_table_printable_row(updates) + "}"
|
|
|
|
|
|
|
|
output += '\n'
|
|
|
|
sys.stdout.write(output)
|
|
|
|
sys.stdout.flush()
|
|
|
|
|
|
|
|
if track_notify and "simple" in idl.tables:
|
|
|
|
idl.notify = mock_notify
|
|
|
|
|
2016-07-18 11:45:58 +03:00
|
|
|
commands = list(commands)
|
|
|
|
if len(commands) >= 1 and "condition" in commands[0]:
|
2022-12-13 18:11:18 +01:00
|
|
|
next_cond_seqno = update_condition(idl, commands.pop(0), step)
|
2016-07-18 11:45:58 +03:00
|
|
|
step += 1
|
|
|
|
|
2010-08-25 10:26:40 -07:00
|
|
|
for command in commands:
|
2022-01-11 17:37:54 +01:00
|
|
|
terse = False
|
|
|
|
if command.startswith("?"):
|
|
|
|
# We're only interested in terse table contents.
|
|
|
|
terse = True
|
|
|
|
command = command[1:]
|
|
|
|
|
2010-08-25 10:26:40 -07:00
|
|
|
if command.startswith("+"):
|
|
|
|
# The previous transaction didn't change anything.
|
|
|
|
command = command[1:]
|
2022-12-13 18:11:18 +01:00
|
|
|
elif command.startswith("^"):
|
|
|
|
# Wait for condition change to be acked by the server.
|
|
|
|
command = command[1:]
|
|
|
|
while idl.cond_seqno != next_cond_seqno and not idl.run():
|
|
|
|
rpc.run()
|
|
|
|
|
|
|
|
poller = ovs.poller.Poller()
|
|
|
|
idl.wait(poller)
|
|
|
|
rpc.wait(poller)
|
|
|
|
poller.block()
|
2010-08-25 10:26:40 -07:00
|
|
|
else:
|
|
|
|
# Wait for update.
|
2024-01-08 15:52:23 +01:00
|
|
|
while True:
|
|
|
|
while idl.change_seqno == seqno and not idl.run():
|
|
|
|
rpc.run()
|
|
|
|
|
|
|
|
poller = ovs.poller.Poller()
|
|
|
|
idl.wait(poller)
|
|
|
|
rpc.wait(poller)
|
|
|
|
poller.block()
|
|
|
|
|
|
|
|
print_idl(idl, step, terse)
|
|
|
|
step += 1
|
|
|
|
|
|
|
|
# Run IDL forever in case of a simple monitor, otherwise
|
|
|
|
# break and execute the command.
|
|
|
|
seqno = idl.change_seqno
|
|
|
|
if command != "monitor":
|
|
|
|
break
|
2010-08-25 10:26:40 -07:00
|
|
|
|
2011-09-21 10:43:03 -07:00
|
|
|
seqno = idl.change_seqno
|
2010-08-25 10:26:40 -07:00
|
|
|
|
|
|
|
if command == "reconnect":
|
|
|
|
print("%03d: reconnect" % step)
|
2011-09-21 10:43:03 -07:00
|
|
|
sys.stdout.flush()
|
2010-08-25 10:26:40 -07:00
|
|
|
step += 1
|
|
|
|
idl.force_reconnect()
|
2016-07-18 11:45:58 +03:00
|
|
|
elif "condition" in command:
|
2022-12-13 18:11:18 +01:00
|
|
|
next_cond_seqno = update_condition(idl, command, step)
|
2016-07-18 11:45:58 +03:00
|
|
|
step += 1
|
2010-08-25 10:26:40 -07:00
|
|
|
elif not command.startswith("["):
|
2022-11-27 22:56:13 -05:00
|
|
|
if not idl_set(idl, command, step):
|
|
|
|
# If idl_set() returns false, then no transaction
|
|
|
|
# was sent to the server and most likely seqno
|
|
|
|
# would remain the same. And the above 'Wait for update'
|
|
|
|
# for loop poller.block() would never return.
|
|
|
|
# So set seqno to 0.
|
|
|
|
seqno = 0
|
2010-08-25 10:26:40 -07:00
|
|
|
step += 1
|
|
|
|
else:
|
|
|
|
json = ovs.json.from_string(command)
|
2019-12-20 18:35:08 +01:00
|
|
|
if isinstance(json, str):
|
2010-08-25 10:26:40 -07:00
|
|
|
sys.stderr.write("\"%s\": %s\n" % (command, json))
|
|
|
|
sys.exit(1)
|
|
|
|
json = substitute_uuids(json, symtab)
|
|
|
|
request = ovs.jsonrpc.Message.create_request("transact", json)
|
|
|
|
error, reply = rpc.transact_block(request)
|
|
|
|
if error:
|
2018-12-20 20:33:03 +03:00
|
|
|
sys.stderr.write("jsonrpc transaction failed: %s\n"
|
2010-08-25 10:26:40 -07:00
|
|
|
% os.strerror(error))
|
|
|
|
sys.exit(1)
|
2012-02-16 20:26:35 -08:00
|
|
|
elif reply.error is not None:
|
2018-12-20 20:33:03 +03:00
|
|
|
sys.stderr.write("jsonrpc transaction failed: %s\n"
|
2012-02-16 20:26:35 -08:00
|
|
|
% reply.error)
|
|
|
|
sys.exit(1)
|
|
|
|
|
2010-08-25 10:26:40 -07:00
|
|
|
sys.stdout.write("%03d: " % step)
|
|
|
|
sys.stdout.flush()
|
|
|
|
step += 1
|
|
|
|
if reply.result is not None:
|
|
|
|
parse_uuids(reply.result, symtab)
|
|
|
|
reply.id = None
|
|
|
|
sys.stdout.write("%s\n" % ovs.json.to_string(reply.to_json()))
|
2011-09-21 10:43:03 -07:00
|
|
|
sys.stdout.flush()
|
2010-08-25 10:26:40 -07:00
|
|
|
|
|
|
|
if rpc:
|
|
|
|
rpc.close()
|
2011-09-21 10:43:03 -07:00
|
|
|
while idl.change_seqno == seqno and not idl.run():
|
2010-08-25 10:26:40 -07:00
|
|
|
poller = ovs.poller.Poller()
|
|
|
|
idl.wait(poller)
|
|
|
|
poller.block()
|
|
|
|
print_idl(idl, step)
|
|
|
|
step += 1
|
|
|
|
idl.close()
|
|
|
|
print("%03d: done" % step)
|
|
|
|
|
2011-09-23 23:43:12 -07:00
|
|
|
|
2016-05-18 18:29:13 +03:00
|
|
|
def do_idl_passive(schema_file, remote, *commands):
|
|
|
|
symtab = {}
|
|
|
|
step = 0
|
|
|
|
schema_helper = ovs.db.idl.SchemaHelper(schema_file)
|
|
|
|
schema_helper.register_all()
|
|
|
|
idl = ovs.db.idl.Idl(remote, schema_helper)
|
|
|
|
|
|
|
|
while idl._session.rpc is None:
|
|
|
|
idl.run()
|
|
|
|
|
|
|
|
rpc = idl._session.rpc
|
|
|
|
|
|
|
|
print_idl(idl, step)
|
|
|
|
step += 1
|
|
|
|
|
|
|
|
for command in commands:
|
|
|
|
json = ovs.json.from_string(command)
|
2019-12-20 18:35:08 +01:00
|
|
|
if isinstance(json, str):
|
2016-05-18 18:29:13 +03:00
|
|
|
sys.stderr.write("\"%s\": %s\n" % (command, json))
|
|
|
|
sys.exit(1)
|
|
|
|
json = substitute_uuids(json, symtab)
|
|
|
|
request = ovs.jsonrpc.Message.create_request("transact", json)
|
|
|
|
error, reply = rpc.transact_block(request)
|
|
|
|
if error:
|
2018-12-20 20:33:03 +03:00
|
|
|
sys.stderr.write("jsonrpc transaction failed: %s\n"
|
2016-05-18 18:29:13 +03:00
|
|
|
% os.strerror(error))
|
|
|
|
sys.exit(1)
|
|
|
|
elif reply.error is not None:
|
2018-12-20 20:33:03 +03:00
|
|
|
sys.stderr.write("jsonrpc transaction failed: %s\n"
|
2016-05-18 18:29:13 +03:00
|
|
|
% reply.error)
|
|
|
|
sys.exit(1)
|
|
|
|
|
|
|
|
sys.stdout.write("%03d: " % step)
|
|
|
|
sys.stdout.flush()
|
|
|
|
step += 1
|
|
|
|
if reply.result is not None:
|
|
|
|
parse_uuids(reply.result, symtab)
|
|
|
|
reply.id = None
|
|
|
|
sys.stdout.write("%s\n" % ovs.json.to_string(reply.to_json()))
|
|
|
|
sys.stdout.flush()
|
|
|
|
|
|
|
|
idl.close()
|
|
|
|
print("%03d: done" % step)
|
|
|
|
|
|
|
|
|
2019-01-25 19:10:01 +00:00
|
|
|
def do_idl_cluster(schema_file, remote, pid, *commands):
|
|
|
|
schema_helper = ovs.db.idl.SchemaHelper(schema_file)
|
|
|
|
|
|
|
|
if remote.startswith("ssl:"):
|
|
|
|
if len(commands) < 3:
|
treewide: Refer to SSL configuration as SSL/TLS.
SSL protocol family is not actually being used or supported in OVS.
What we use is actually TLS.
Terms "SSL" and "TLS" are often used interchangeably in modern
software and refer to the same thing, which is normally just TLS.
Let's replace "SSL" with "SSL/TLS" in documentation and user-visible
messages, where it makes sense. This may make it more clear what
is meant for a less experienced user that may look for TLS support
in OVS and not find much.
We're not changing any actual code, because, for example, most of
OpenSSL APIs are using just SSL, for historical reasons. And our
database is using "SSL" table. We may consider migrating to "TLS"
naming for user-visible configuration like command line arguments
and database names, but that will require extra work on making sure
upgrades can still work. In general, a slightly more clear
documentation should be enough for now, especially since term SSL
is still widely used in the industry.
"SSL/TLS" is chosen over "TLS/SSL" simply because our user-visible
configuration knobs are using "SSL" naming, e.g. '--ssl-cyphers'
or 'ovs-vsctl set-ssl'. So, it might be less confusing this way.
We may switch that, if we decide on re-working the user-visible
commands towards "TLS" naming, or providing both alternatives.
Some other projects did similar changes. For example, the python ssl
library is now using "TLS/SSL" in the documentation whenever possible.
Same goes for OpenSSL itself.
Acked-by: Eelco Chaudron <echaudro@redhat.com>
Signed-off-by: Ilya Maximets <i.maximets@ovn.org>
2024-12-09 17:38:45 +01:00
|
|
|
sys.stderr.write("SSL/TLS connection requires private key, "
|
2019-01-25 19:10:01 +00:00
|
|
|
"certificate for private key, and peer CA "
|
|
|
|
"certificate as arguments\n")
|
|
|
|
sys.exit(1)
|
|
|
|
ovs.stream.Stream.ssl_set_private_key_file(commands[0])
|
|
|
|
ovs.stream.Stream.ssl_set_certificate_file(commands[1])
|
|
|
|
ovs.stream.Stream.ssl_set_ca_cert_file(commands[2])
|
|
|
|
commands = commands[3:]
|
|
|
|
|
|
|
|
schema_helper.register_all()
|
|
|
|
idl = ovs.db.idl.Idl(remote, schema_helper)
|
|
|
|
|
|
|
|
step = 0
|
|
|
|
seqno = 0
|
|
|
|
commands = list(commands)
|
|
|
|
for command in commands:
|
|
|
|
if command.startswith("+"):
|
|
|
|
# The previous transaction didn't change anything.
|
|
|
|
command = command[1:]
|
|
|
|
else:
|
|
|
|
# Wait for update.
|
|
|
|
while idl.change_seqno == seqno and not idl.run():
|
|
|
|
poller = ovs.poller.Poller()
|
|
|
|
idl.wait(poller)
|
|
|
|
poller.block()
|
|
|
|
step += 1
|
|
|
|
|
|
|
|
seqno = idl.change_seqno
|
|
|
|
|
|
|
|
if command == "reconnect":
|
|
|
|
print("%03d: reconnect" % step)
|
|
|
|
sys.stdout.flush()
|
|
|
|
step += 1
|
|
|
|
idl.force_reconnect()
|
|
|
|
elif command == "remote":
|
|
|
|
print("%03d: %s" % (step, idl.session_name()))
|
|
|
|
sys.stdout.flush()
|
|
|
|
step += 1
|
|
|
|
elif command == "remotestop":
|
|
|
|
r = idl.session_name()
|
|
|
|
remotes = remote.split(',')
|
|
|
|
i = remotes.index(r)
|
|
|
|
pids = pid.split(',')
|
|
|
|
command = None
|
|
|
|
try:
|
|
|
|
command = "kill %s" % pids[i]
|
|
|
|
except ValueError as error:
|
|
|
|
sys.stderr.write("Cannot find pid of remote: %s\n"
|
|
|
|
% os.strerror(error))
|
|
|
|
sys.exit(1)
|
|
|
|
os.popen(command)
|
|
|
|
print("%03d: stop %s" % (step, pids[i]))
|
|
|
|
sys.stdout.flush()
|
|
|
|
step += 1
|
|
|
|
|
|
|
|
idl.close()
|
|
|
|
print("%03d: done" % step)
|
|
|
|
|
|
|
|
|
2010-08-25 10:26:40 -07:00
|
|
|
def usage():
|
2015-12-14 10:21:53 -05:00
|
|
|
print("""\
|
2010-08-25 10:26:40 -07:00
|
|
|
%(program_name)s: test utility for Open vSwitch database Python bindings
|
|
|
|
usage: %(program_name)s [OPTIONS] COMMAND ARG...
|
|
|
|
|
|
|
|
The following commands are supported:
|
|
|
|
default-atoms
|
|
|
|
test ovsdb_atom_default()
|
|
|
|
default-data
|
|
|
|
test ovsdb_datum_default()
|
|
|
|
parse-atomic-type TYPE
|
|
|
|
parse TYPE as OVSDB atomic type, and re-serialize
|
|
|
|
parse-base-type TYPE
|
|
|
|
parse TYPE as OVSDB base type, and re-serialize
|
|
|
|
parse-type JSON
|
|
|
|
parse JSON as OVSDB type, and re-serialize
|
|
|
|
parse-atoms TYPE ATOM...
|
|
|
|
parse JSON ATOMs as atoms of TYPE, and re-serialize
|
|
|
|
parse-atom-strings TYPE ATOM...
|
|
|
|
parse string ATOMs as atoms of given TYPE, and re-serialize
|
|
|
|
sort-atoms TYPE ATOM...
|
|
|
|
print JSON ATOMs in sorted order
|
|
|
|
parse-data TYPE DATUM...
|
|
|
|
parse JSON DATUMs as data of given TYPE, and re-serialize
|
|
|
|
parse-column NAME OBJECT
|
|
|
|
parse column NAME with info OBJECT, and re-serialize
|
2011-03-10 11:15:01 -08:00
|
|
|
parse-table NAME OBJECT [DEFAULT-IS-ROOT]
|
2010-08-25 10:26:40 -07:00
|
|
|
parse table NAME with info OBJECT
|
|
|
|
parse-schema JSON
|
|
|
|
parse JSON as an OVSDB schema, and re-serialize
|
2015-10-07 13:52:11 -07:00
|
|
|
idl SCHEMA SERVER [?T1:C1,C2...[?T2:C1,C2,...]...] [TRANSACTION...]
|
2011-09-21 10:43:03 -07:00
|
|
|
connect to SERVER (which has the specified SCHEMA) and dump the
|
|
|
|
contents of the database as seen initially by the IDL implementation
|
|
|
|
and after executing each TRANSACTION. (Each TRANSACTION must modify
|
2010-08-25 10:26:40 -07:00
|
|
|
the database or this command will hang.)
|
2015-10-07 13:52:11 -07:00
|
|
|
By default, all columns of all tables are monitored. The "?" option
|
|
|
|
can be used to monitor specific Table:Column(s). The table and their
|
|
|
|
columns are listed as a string of the form starting with "?":
|
|
|
|
?<table-name>:<column-name>,<column-name>,...
|
|
|
|
e.g.:
|
|
|
|
?simple:b - Monitor column "b" in table "simple"
|
|
|
|
Entries for multiple tables are seperated by "?":
|
|
|
|
?<table-name>:<column-name>,...?<table-name>:<column-name>,...
|
|
|
|
e.g.:
|
|
|
|
?simple:b?link1:i,k - Monitor column "b" in table "simple",
|
|
|
|
and column "i", "k" in table "link1"
|
ovsdb-idl: Support for readonly columns that are fetched on-demand
There is currently no mechanism in IDL to fetch specific column values
on-demand without having to register them for monitoring. In the case
where the column represent a frequently changing entity (e.g. counter),
and the reads are relatively infrequent (e.g. CLI client), there is a
significant overhead in replication.
This patch adds support in the Python IDL to register a subset of the
columns of a table as "readonly". Readonly columns are not replicated.
Users may "fetch" the readonly columns of a row on-demand. Once fetched,
the columns are not updated until the next fetch by the user. Writes by
the user to readonly columns does not change the value (both locally or
on the server).
The two main user visible changes in this patch are:
- The SchemaHelper.register_columns() method now takes an optionaly
argument to specify the subset of readonly column(s)
- A new Row.fetch(columns) method to fetch values of readonly columns(s)
Usage:
------
# Schema file includes all columns, including readonly
schema_helper = ovs.db.idl.SchemaHelper(schema_file)
# Register interest in columns with 'r' and 's' as readonly
schema_helper.register_columns("simple", [i, r, s], [r, s])
# Create Idl and jsonrpc, and wait for update, as usual
...
# Fetch value of column 'r' for a specific row
row.fetch('r')
txn.commit_block()
print row.r
print getattr(row, 'r')
# Writing to readonly column has no effect (locally or on server)
row.r = 3
print row.r # prints fetched value not 3
Signed-off-by: Shad Ansari <shad.ansari@hp.com>
Signed-off-by: Ben Pfaff <blp@ovn.org>
2015-10-22 14:35:24 -07:00
|
|
|
Readonly columns: Suffixing a "!" after a column indicates that the
|
|
|
|
column is to be registered "readonly".
|
|
|
|
e.g.:
|
|
|
|
?simple:i,b! - Register interest in column "i" (monitoring) and
|
|
|
|
column "b" (readonly).
|
|
|
|
|
2010-08-25 10:26:40 -07:00
|
|
|
|
|
|
|
The following options are also available:
|
|
|
|
-t, --timeout=SECS give up after SECS seconds
|
|
|
|
-h, --help display this help message\
|
2015-12-14 10:21:53 -05:00
|
|
|
""" % {'program_name': ovs.util.PROGRAM_NAME})
|
2010-08-25 10:26:40 -07:00
|
|
|
sys.exit(0)
|
|
|
|
|
2011-09-23 23:43:12 -07:00
|
|
|
|
2010-08-25 10:26:40 -07:00
|
|
|
def main(argv):
|
|
|
|
try:
|
2018-10-15 19:44:36 +03:00
|
|
|
options, args = getopt.gnu_getopt(argv[1:], 't:h',
|
2010-08-25 10:26:40 -07:00
|
|
|
['timeout',
|
2018-10-15 19:44:36 +03:00
|
|
|
'help'])
|
2016-01-06 13:48:16 -05:00
|
|
|
except getopt.GetoptError as geo:
|
2010-08-25 10:26:40 -07:00
|
|
|
sys.stderr.write("%s: %s\n" % (ovs.util.PROGRAM_NAME, geo.msg))
|
|
|
|
sys.exit(1)
|
|
|
|
|
2018-08-14 10:53:16 +03:00
|
|
|
timeout = None
|
2010-08-25 10:26:40 -07:00
|
|
|
for key, value in options:
|
|
|
|
if key in ['-h', '--help']:
|
|
|
|
usage()
|
|
|
|
elif key in ['-t', '--timeout']:
|
|
|
|
try:
|
|
|
|
timeout = int(value)
|
|
|
|
if timeout < 1:
|
|
|
|
raise TypeError
|
|
|
|
except TypeError:
|
|
|
|
raise error.Error("value %s on -t or --timeout is not at "
|
|
|
|
"least 1" % value)
|
|
|
|
else:
|
|
|
|
sys.exit(0)
|
|
|
|
|
2018-08-14 10:53:16 +03:00
|
|
|
signal_alarm(timeout)
|
|
|
|
|
2010-08-25 10:26:40 -07:00
|
|
|
if not args:
|
|
|
|
sys.stderr.write("%s: missing command argument "
|
|
|
|
"(use --help for help)\n" % ovs.util.PROGRAM_NAME)
|
|
|
|
sys.exit(1)
|
|
|
|
|
|
|
|
commands = {"default-atoms": (do_default_atoms, 0),
|
|
|
|
"default-data": (do_default_data, 0),
|
|
|
|
"parse-atomic-type": (do_parse_atomic_type, 1),
|
|
|
|
"parse-base-type": (do_parse_base_type, 1),
|
|
|
|
"parse-type": (do_parse_type, 1),
|
|
|
|
"parse-atoms": (do_parse_atoms, (2,)),
|
|
|
|
"parse-data": (do_parse_data, (2,)),
|
|
|
|
"sort-atoms": (do_sort_atoms, 2),
|
|
|
|
"parse-column": (do_parse_column, 2),
|
2011-03-10 11:15:01 -08:00
|
|
|
"parse-table": (do_parse_table, (2, 3)),
|
2010-08-25 10:26:40 -07:00
|
|
|
"parse-schema": (do_parse_schema, 1),
|
2016-05-18 18:29:13 +03:00
|
|
|
"idl": (do_idl, (2,)),
|
2019-01-25 19:10:01 +00:00
|
|
|
"idl_passive": (do_idl_passive, (2,)),
|
|
|
|
"idl-cluster": (do_idl_cluster, (3,))}
|
2010-08-25 10:26:40 -07:00
|
|
|
|
|
|
|
command_name = args[0]
|
|
|
|
args = args[1:]
|
2015-12-22 11:30:32 -05:00
|
|
|
if command_name not in commands:
|
2010-08-25 10:26:40 -07:00
|
|
|
sys.stderr.write("%s: unknown command \"%s\" "
|
|
|
|
"(use --help for help)\n" % (ovs.util.PROGRAM_NAME,
|
|
|
|
command_name))
|
|
|
|
sys.exit(1)
|
|
|
|
|
|
|
|
func, n_args = commands[command_name]
|
2023-10-31 17:12:34 +00:00
|
|
|
if type(n_args) is tuple:
|
2010-08-25 10:26:40 -07:00
|
|
|
if len(args) < n_args[0]:
|
|
|
|
sys.stderr.write("%s: \"%s\" requires at least %d arguments but "
|
|
|
|
"only %d provided\n"
|
|
|
|
% (ovs.util.PROGRAM_NAME, command_name,
|
2018-04-18 12:35:08 +02:00
|
|
|
n_args[0], len(args)))
|
2010-08-25 10:26:40 -07:00
|
|
|
sys.exit(1)
|
2023-10-31 17:12:34 +00:00
|
|
|
elif type(n_args) is int:
|
2010-08-25 10:26:40 -07:00
|
|
|
if len(args) != n_args:
|
|
|
|
sys.stderr.write("%s: \"%s\" requires %d arguments but %d "
|
|
|
|
"provided\n"
|
|
|
|
% (ovs.util.PROGRAM_NAME, command_name,
|
|
|
|
n_args, len(args)))
|
|
|
|
sys.exit(1)
|
|
|
|
else:
|
|
|
|
assert False
|
|
|
|
|
|
|
|
func(*args)
|
|
|
|
|
2011-09-23 23:43:12 -07:00
|
|
|
|
2010-08-25 10:26:40 -07:00
|
|
|
if __name__ == '__main__':
|
|
|
|
try:
|
|
|
|
main(sys.argv)
|
2016-01-06 13:48:16 -05:00
|
|
|
except error.Error as e:
|
2010-08-25 10:26:40 -07:00
|
|
|
sys.stderr.write("%s\n" % e)
|
|
|
|
sys.exit(1)
|