mirror of
https://github.com/openvswitch/ovs
synced 2025-10-15 14:17:18 +00:00
python: Convert dict iterators.
In Python 2, dict.items(), dict.keys(), and dict.values() returned a list. dict.iteritems(), dict.iterkeys(), and dict.itervalues() returned an iterator. As of Python 3, dict.iteritems(), dict.itervalues(), and dict.iterkeys() are gone. items(), keys(), and values() now return an iterator. In the case where we want an iterator, we now use the six.iter*() helpers. If we want a list, we explicitly create a list from the iterator. Signed-off-by: Russell Bryant <russell@ovn.org> Acked-by: Ben Pfaff <blp@ovn.org>
This commit is contained in:
13
debian/ovs-monitor-ipsec
vendored
13
debian/ovs-monitor-ipsec
vendored
@@ -40,6 +40,7 @@ import ovs.unixctl
|
||||
import ovs.unixctl.server
|
||||
import ovs.vlog
|
||||
from six.moves import range
|
||||
import six
|
||||
|
||||
vlog = ovs.vlog.Vlog("ovs-monitor-ipsec")
|
||||
root_prefix = '' # Prefix for absolute file names, for testing.
|
||||
@@ -152,7 +153,7 @@ path certificate "%s";
|
||||
conf_file = open(root_prefix + self.conf_file, 'w')
|
||||
conf_file.write(Racoon.conf_header % (self.psk_file, self.cert_dir))
|
||||
|
||||
for host, vals in self.cert_hosts.iteritems():
|
||||
for host, vals in six.iteritems(self.cert_hosts):
|
||||
conf_file.write(Racoon.cert_entry % (host, vals["certificate"],
|
||||
vals["private_key"], vals["peer_cert_file"]))
|
||||
|
||||
@@ -169,7 +170,7 @@ path certificate "%s";
|
||||
|
||||
psk_file.write("# Generated by Open vSwitch...do not modify by hand!")
|
||||
psk_file.write("\n\n")
|
||||
for host, vals in self.psk_hosts.iteritems():
|
||||
for host, vals in six.iteritems(self.psk_hosts):
|
||||
psk_file.write("%s %s\n" % (host, vals["psk"]))
|
||||
psk_file.close()
|
||||
|
||||
@@ -354,11 +355,11 @@ class IPsec:
|
||||
|
||||
|
||||
def update_ipsec(ipsec, interfaces, new_interfaces):
|
||||
for name, vals in interfaces.iteritems():
|
||||
for name, vals in six.iteritems(interfaces):
|
||||
if name not in new_interfaces:
|
||||
ipsec.del_entry(vals["local_ip"], vals["remote_ip"])
|
||||
|
||||
for name, vals in new_interfaces.iteritems():
|
||||
for name, vals in six.iteritems(new_interfaces):
|
||||
orig_vals = interfaces.get(name)
|
||||
if orig_vals:
|
||||
# Configuration for this host already exists. Check if it's
|
||||
@@ -377,7 +378,7 @@ def update_ipsec(ipsec, interfaces, new_interfaces):
|
||||
|
||||
|
||||
def get_ssl_cert(data):
|
||||
for ovs_rec in data["Open_vSwitch"].rows.itervalues():
|
||||
for ovs_rec in data["Open_vSwitch"].rows.values():
|
||||
if ovs_rec.ssl:
|
||||
ssl = ovs_rec.ssl[0]
|
||||
if ssl.certificate and ssl.private_key:
|
||||
@@ -440,7 +441,7 @@ def main():
|
||||
ssl_cert = get_ssl_cert(idl.tables)
|
||||
|
||||
new_interfaces = {}
|
||||
for rec in idl.tables["Interface"].rows.itervalues():
|
||||
for rec in six.itervalues(idl.tables["Interface"].rows):
|
||||
if rec.type == "ipsec_gre":
|
||||
name = rec.name
|
||||
options = rec.options
|
||||
|
@@ -89,7 +89,7 @@ def inline_xml_to_nroff(node, font, to_upper=False, newline='\n'):
|
||||
s += node.attributes['db'].nodeValue
|
||||
else:
|
||||
raise error.Error("'ref' lacks required attributes: %s"
|
||||
% node.attributes.keys())
|
||||
% list(node.attributes.keys()))
|
||||
return s + font
|
||||
elif node.tagName in ['var', 'dfn', 'i']:
|
||||
s = r'\fI'
|
||||
|
@@ -15,6 +15,8 @@
|
||||
import re
|
||||
import uuid
|
||||
|
||||
import six
|
||||
|
||||
import ovs.poller
|
||||
import ovs.socket_util
|
||||
import ovs.json
|
||||
@@ -293,7 +295,7 @@ class Datum(object):
|
||||
This function is not commonly useful because the most ordinary way to
|
||||
obtain a datum is ultimately via Datum.from_json() or Atom.from_json(),
|
||||
which check constraints themselves."""
|
||||
for keyAtom, valueAtom in self.values.iteritems():
|
||||
for keyAtom, valueAtom in six.iteritems(self.values):
|
||||
keyAtom.check_constraints(self.type.key)
|
||||
if valueAtom is not None:
|
||||
valueAtom.check_constraints(self.type.value)
|
||||
@@ -354,7 +356,7 @@ class Datum(object):
|
||||
return ["map", [[k.to_json(), v.to_json()]
|
||||
for k, v in sorted(self.values.items())]]
|
||||
elif len(self.values) == 1:
|
||||
key = self.values.keys()[0]
|
||||
key = next(six.iterkeys(self.values))
|
||||
return key.to_json()
|
||||
else:
|
||||
return ["set", [k.to_json() for k in sorted(self.values.keys())]]
|
||||
@@ -388,9 +390,9 @@ class Datum(object):
|
||||
|
||||
def as_list(self):
|
||||
if self.type.is_map():
|
||||
return [[k.value, v.value] for k, v in self.values.iteritems()]
|
||||
return [[k.value, v.value] for k, v in six.iteritems(self.values)]
|
||||
else:
|
||||
return [k.value for k in self.values.iterkeys()]
|
||||
return [k.value for k in six.iterkeys(self.values)]
|
||||
|
||||
def as_dict(self):
|
||||
return dict(self.values)
|
||||
@@ -398,10 +400,10 @@ class Datum(object):
|
||||
def as_scalar(self):
|
||||
if len(self.values) == 1:
|
||||
if self.type.is_map():
|
||||
k, v = self.values.iteritems()[0]
|
||||
k, v = next(six.iteritems(self.values))
|
||||
return [k.value, v.value]
|
||||
else:
|
||||
return self.values.keys()[0].value
|
||||
return next(six.iterkeys(self.values)).value
|
||||
else:
|
||||
return None
|
||||
|
||||
@@ -448,7 +450,7 @@ class Datum(object):
|
||||
return value
|
||||
elif self.type.is_map():
|
||||
value = {}
|
||||
for k, v in self.values.iteritems():
|
||||
for k, v in six.iteritems(self.values):
|
||||
dk = uuid_to_row(k.value, self.type.key)
|
||||
dv = uuid_to_row(v.value, self.type.value)
|
||||
if dk is not None and dv is not None:
|
||||
@@ -476,7 +478,7 @@ class Datum(object):
|
||||
'type_'."""
|
||||
d = {}
|
||||
if type(value) == dict:
|
||||
for k, v in value.iteritems():
|
||||
for k, v in six.iteritems(value):
|
||||
ka = Atom.from_python(type_.key, row_to_uuid(k))
|
||||
va = Atom.from_python(type_.value, row_to_uuid(v))
|
||||
d[ka] = va
|
||||
|
@@ -14,6 +14,8 @@
|
||||
|
||||
import uuid
|
||||
|
||||
import six
|
||||
|
||||
import ovs.jsonrpc
|
||||
import ovs.db.parser
|
||||
import ovs.db.schema
|
||||
@@ -124,8 +126,8 @@ class Idl(object):
|
||||
self.txn = None
|
||||
self._outstanding_txns = {}
|
||||
|
||||
for table in schema.tables.itervalues():
|
||||
for column in table.columns.itervalues():
|
||||
for table in six.itervalues(schema.tables):
|
||||
for column in six.itervalues(table.columns):
|
||||
if not hasattr(column, 'alert'):
|
||||
column.alert = True
|
||||
table.need_table = False
|
||||
@@ -283,7 +285,7 @@ class Idl(object):
|
||||
def __clear(self):
|
||||
changed = False
|
||||
|
||||
for table in self.tables.itervalues():
|
||||
for table in six.itervalues(self.tables):
|
||||
if table.rows:
|
||||
changed = True
|
||||
table.rows = {}
|
||||
@@ -338,9 +340,9 @@ class Idl(object):
|
||||
|
||||
def __send_monitor_request(self):
|
||||
monitor_requests = {}
|
||||
for table in self.tables.itervalues():
|
||||
for table in six.itervalues(self.tables):
|
||||
columns = []
|
||||
for column in table.columns.keys():
|
||||
for column in six.iterkeys(table.columns):
|
||||
if ((table.name not in self.readonly) or
|
||||
(table.name in self.readonly) and
|
||||
(column not in self.readonly[table.name])):
|
||||
@@ -363,7 +365,7 @@ class Idl(object):
|
||||
raise error.Error("<table-updates> is not an object",
|
||||
table_updates)
|
||||
|
||||
for table_name, table_update in table_updates.iteritems():
|
||||
for table_name, table_update in six.iteritems(table_updates):
|
||||
table = self.tables.get(table_name)
|
||||
if not table:
|
||||
raise error.Error('<table-updates> includes unknown '
|
||||
@@ -373,7 +375,7 @@ class Idl(object):
|
||||
raise error.Error('<table-update> for table "%s" is not '
|
||||
'an object' % table_name, table_update)
|
||||
|
||||
for uuid_string, row_update in table_update.iteritems():
|
||||
for uuid_string, row_update in six.iteritems(table_update):
|
||||
if not ovs.ovsuuid.is_valid_string(uuid_string):
|
||||
raise error.Error('<table-update> for table "%s" '
|
||||
'contains bad UUID "%s" as member '
|
||||
@@ -441,7 +443,7 @@ class Idl(object):
|
||||
|
||||
def __row_update(self, table, row, row_json):
|
||||
changed = False
|
||||
for column_name, datum_json in row_json.iteritems():
|
||||
for column_name, datum_json in six.iteritems(row_json):
|
||||
column = table.columns.get(column_name)
|
||||
if not column:
|
||||
# XXX rate-limit
|
||||
@@ -469,7 +471,7 @@ class Idl(object):
|
||||
|
||||
def __create_row(self, table, uuid):
|
||||
data = {}
|
||||
for column in table.columns.itervalues():
|
||||
for column in six.itervalues(table.columns):
|
||||
data[column.name] = ovs.db.data.Datum.default(column.type)
|
||||
row = table.rows[uuid] = Row(self, table, uuid, data)
|
||||
return row
|
||||
@@ -610,7 +612,7 @@ class Row(object):
|
||||
@classmethod
|
||||
def from_json(cls, idl, table, uuid, row_json):
|
||||
data = {}
|
||||
for column_name, datum_json in row_json.iteritems():
|
||||
for column_name, datum_json in six.iteritems(row_json):
|
||||
column = table.columns.get(column_name)
|
||||
if not column:
|
||||
# XXX rate-limit
|
||||
@@ -840,7 +842,7 @@ class Transaction(object):
|
||||
def __disassemble(self):
|
||||
self.idl.txn = None
|
||||
|
||||
for row in self._txn_rows.itervalues():
|
||||
for row in six.itervalues(self._txn_rows):
|
||||
if row._changes is None:
|
||||
row._table.rows[row.uuid] = row
|
||||
elif row._data is None:
|
||||
@@ -919,7 +921,7 @@ class Transaction(object):
|
||||
"lock": self.idl.lock_name})
|
||||
|
||||
# Add prerequisites and declarations of new rows.
|
||||
for row in self._txn_rows.itervalues():
|
||||
for row in six.itervalues(self._txn_rows):
|
||||
if row._prereqs:
|
||||
rows = {}
|
||||
columns = []
|
||||
@@ -936,7 +938,7 @@ class Transaction(object):
|
||||
|
||||
# Add updates.
|
||||
any_updates = False
|
||||
for row in self._txn_rows.itervalues():
|
||||
for row in six.itervalues(self._txn_rows):
|
||||
if row._changes is None:
|
||||
if row._table.is_root:
|
||||
operations.append({"op": "delete",
|
||||
@@ -962,7 +964,7 @@ class Transaction(object):
|
||||
row_json = {}
|
||||
op["row"] = row_json
|
||||
|
||||
for column_name, datum in row._changes.iteritems():
|
||||
for column_name, datum in six.iteritems(row._changes):
|
||||
if row._data is not None or not datum.is_default():
|
||||
row_json[column_name] = (
|
||||
self._substitute_uuids(datum.to_json()))
|
||||
@@ -1190,7 +1192,7 @@ class Transaction(object):
|
||||
else:
|
||||
hard_errors = True
|
||||
|
||||
for insert in self._inserted_rows.itervalues():
|
||||
for insert in six.itervalues(self._inserted_rows):
|
||||
if not self.__process_insert_reply(insert, ops):
|
||||
hard_errors = True
|
||||
|
||||
@@ -1390,7 +1392,7 @@ class SchemaHelper(object):
|
||||
|
||||
if not self._all:
|
||||
schema_tables = {}
|
||||
for table, columns in self._tables.iteritems():
|
||||
for table, columns in six.iteritems(self._tables):
|
||||
schema_tables[table] = (
|
||||
self._keep_table_columns(schema, table, columns))
|
||||
|
||||
|
@@ -15,6 +15,8 @@
|
||||
import re
|
||||
import sys
|
||||
|
||||
import six
|
||||
|
||||
from ovs.db import error
|
||||
import ovs.db.parser
|
||||
import ovs.db.types
|
||||
@@ -40,7 +42,7 @@ class DbSchema(object):
|
||||
# backward compatibility, if the root set is empty then assume that
|
||||
# every table is in the root set.
|
||||
if self.__root_set_size() == 0:
|
||||
for table in self.tables.itervalues():
|
||||
for table in six.itervalues(self.tables):
|
||||
table.is_root = True
|
||||
|
||||
# Find the "ref_table"s referenced by "ref_table_name"s.
|
||||
@@ -48,15 +50,15 @@ class DbSchema(object):
|
||||
# Also force certain columns to be persistent, as explained in
|
||||
# __check_ref_table(). This requires 'is_root' to be known, so this
|
||||
# must follow the loop updating 'is_root' above.
|
||||
for table in self.tables.itervalues():
|
||||
for column in table.columns.itervalues():
|
||||
for table in six.itervalues(self.tables):
|
||||
for column in six.itervalues(table.columns):
|
||||
self.__follow_ref_table(column, column.type.key, "key")
|
||||
self.__follow_ref_table(column, column.type.value, "value")
|
||||
|
||||
def __root_set_size(self):
|
||||
"""Returns the number of tables in the schema's root set."""
|
||||
n_root = 0
|
||||
for table in self.tables.itervalues():
|
||||
for table in six.itervalues(self.tables):
|
||||
if table.is_root:
|
||||
n_root += 1
|
||||
return n_root
|
||||
@@ -76,7 +78,7 @@ class DbSchema(object):
|
||||
% version)
|
||||
|
||||
tables = {}
|
||||
for tableName, tableJson in tablesJson.iteritems():
|
||||
for tableName, tableJson in six.iteritems(tablesJson):
|
||||
_check_id(tableName, json)
|
||||
tables[tableName] = TableSchema.from_json(tableJson, tableName)
|
||||
|
||||
@@ -90,7 +92,7 @@ class DbSchema(object):
|
||||
default_is_root = self.__root_set_size() == len(self.tables)
|
||||
|
||||
tables = {}
|
||||
for table in self.tables.itervalues():
|
||||
for table in six.itervalues(self.tables):
|
||||
tables[table.name] = table.to_json(default_is_root)
|
||||
json = {"name": self.name, "tables": tables}
|
||||
if self.version:
|
||||
@@ -191,7 +193,7 @@ class TableSchema(object):
|
||||
raise error.Error("table must have at least one column", json)
|
||||
|
||||
columns = {}
|
||||
for column_name, column_json in columns_json.iteritems():
|
||||
for column_name, column_json in six.iteritems(columns_json):
|
||||
_check_id(column_name, json)
|
||||
columns[column_name] = ColumnSchema.from_json(column_json,
|
||||
column_name)
|
||||
@@ -230,7 +232,7 @@ class TableSchema(object):
|
||||
json["isRoot"] = self.is_root
|
||||
|
||||
json["columns"] = columns = {}
|
||||
for column in self.columns.itervalues():
|
||||
for column in six.itervalues(self.columns):
|
||||
if not column.name.startswith("_"):
|
||||
columns[column.name] = column.to_json()
|
||||
|
||||
|
@@ -16,6 +16,7 @@ import re
|
||||
import StringIO
|
||||
import sys
|
||||
|
||||
import six
|
||||
from six.moves import range
|
||||
|
||||
__pychecker__ = 'no-stringiter'
|
||||
@@ -73,7 +74,7 @@ class _Serializer(object):
|
||||
if self.sort_keys:
|
||||
items = sorted(obj.items())
|
||||
else:
|
||||
items = obj.iteritems()
|
||||
items = six.iteritems(obj)
|
||||
for i, (key, value) in enumerate(items):
|
||||
if i > 0:
|
||||
self.stream.write(u",")
|
||||
|
@@ -85,7 +85,7 @@ class _SelectSelect(object):
|
||||
events_dict[fd] = events_dict.get(fd, 0) | (POLLERR |
|
||||
POLLHUP |
|
||||
POLLNVAL)
|
||||
return events_dict.items()
|
||||
return list(events_dict.items())
|
||||
|
||||
|
||||
SelectPoll = _SelectSelect
|
||||
|
@@ -16,6 +16,8 @@ import errno
|
||||
import os
|
||||
import socket
|
||||
|
||||
import six
|
||||
|
||||
import ovs.poller
|
||||
import ovs.socket_util
|
||||
import ovs.vlog
|
||||
@@ -58,7 +60,7 @@ class Stream(object):
|
||||
|
||||
@staticmethod
|
||||
def _find_method(name):
|
||||
for method, cls in Stream._SOCKET_METHODS.items():
|
||||
for method, cls in six.iteritems(Stream._SOCKET_METHODS):
|
||||
if name.startswith(method):
|
||||
return cls
|
||||
return None
|
||||
|
@@ -22,6 +22,7 @@ import socket
|
||||
import sys
|
||||
import threading
|
||||
|
||||
import six
|
||||
from six.moves import range
|
||||
|
||||
import ovs.dirs
|
||||
@@ -80,7 +81,7 @@ class Vlog(object):
|
||||
msg_num = Vlog.__msg_num
|
||||
Vlog.__msg_num += 1
|
||||
|
||||
for f, f_level in Vlog.__mfl[self.name].iteritems():
|
||||
for f, f_level in six.iteritems(Vlog.__mfl[self.name]):
|
||||
f_level = LEVELS.get(f_level, logging.CRITICAL)
|
||||
if level_num >= f_level:
|
||||
msg = self._build_message(message, f, level, msg_num)
|
||||
@@ -184,7 +185,7 @@ class Vlog(object):
|
||||
|
||||
def __is_enabled(self, level):
|
||||
level = LEVELS.get(level.lower(), logging.DEBUG)
|
||||
for f, f_level in Vlog.__mfl[self.name].iteritems():
|
||||
for f, f_level in six.iteritems(Vlog.__mfl[self.name]):
|
||||
f_level = LEVELS.get(f_level, logging.CRITICAL)
|
||||
if level >= f_level:
|
||||
return True
|
||||
@@ -266,12 +267,12 @@ class Vlog(object):
|
||||
return
|
||||
|
||||
if module == "any":
|
||||
modules = Vlog.__mfl.keys()
|
||||
modules = list(Vlog.__mfl.keys())
|
||||
else:
|
||||
modules = [module]
|
||||
|
||||
if destination == "any":
|
||||
destinations = DESTINATIONS.keys()
|
||||
destinations = list(DESTINATIONS.keys())
|
||||
else:
|
||||
destinations = [destination]
|
||||
|
||||
|
@@ -29,6 +29,7 @@ import ovs.db.types
|
||||
import ovs.ovsuuid
|
||||
import ovs.poller
|
||||
import ovs.util
|
||||
import six
|
||||
|
||||
|
||||
def unbox_json(json):
|
||||
@@ -154,7 +155,7 @@ def print_idl(idl, step):
|
||||
simple_columns = ["i", "r", "b", "s", "u", "ia",
|
||||
"ra", "ba", "sa", "ua", "uuid"]
|
||||
simple = idl.tables["simple"].rows
|
||||
for row in simple.itervalues():
|
||||
for row in six.itervalues(simple):
|
||||
s = "%03d:" % step
|
||||
for column in simple_columns:
|
||||
if hasattr(row, column) and not (type(getattr(row, column))
|
||||
@@ -170,7 +171,7 @@ def print_idl(idl, step):
|
||||
|
||||
if "link1" in idl.tables:
|
||||
l1 = idl.tables["link1"].rows
|
||||
for row in l1.itervalues():
|
||||
for row in six.itervalues(l1):
|
||||
s = ["%03d: i=%s k=" % (step, row.i)]
|
||||
if hasattr(row, "k") and row.k:
|
||||
s.append(str(row.k.i))
|
||||
@@ -187,7 +188,7 @@ def print_idl(idl, step):
|
||||
|
||||
if "link2" in idl.tables:
|
||||
l2 = idl.tables["link2"].rows
|
||||
for row in l2.itervalues():
|
||||
for row in six.itervalues(l2):
|
||||
s = ["%03d:" % step]
|
||||
s.append(" i=%s l1=" % row.i)
|
||||
if hasattr(row, "l1") and row.l1:
|
||||
@@ -211,7 +212,7 @@ def substitute_uuids(json, symtab):
|
||||
return [substitute_uuids(element, symtab) for element in json]
|
||||
elif type(json) == dict:
|
||||
d = {}
|
||||
for key, value in json.iteritems():
|
||||
for key, value in six.iteritems(json):
|
||||
d[key] = substitute_uuids(value, symtab)
|
||||
return d
|
||||
return json
|
||||
@@ -226,12 +227,12 @@ def parse_uuids(json, symtab):
|
||||
for element in json:
|
||||
parse_uuids(element, symtab)
|
||||
elif type(json) == dict:
|
||||
for value in json.itervalues():
|
||||
for value in six.itervalues(json):
|
||||
parse_uuids(value, symtab)
|
||||
|
||||
|
||||
def idltest_find_simple(idl, i):
|
||||
for row in idl.tables["simple"].rows.itervalues():
|
||||
for row in six.itervalues(idl.tables["simple"].rows):
|
||||
if row.i == i:
|
||||
return row
|
||||
return None
|
||||
@@ -254,7 +255,7 @@ def idl_set(idl, commands, step):
|
||||
|
||||
def notify(event, row, updates=None):
|
||||
if updates:
|
||||
upcol = updates._data.keys()[0]
|
||||
upcol = list(updates._data.keys())[0]
|
||||
else:
|
||||
upcol = None
|
||||
events.append("%s|%s|%s" % (event, row.i, upcol))
|
||||
|
@@ -30,6 +30,7 @@ import ovs.daemon
|
||||
import ovs.unixctl.server
|
||||
import ovs.vlog
|
||||
from six.moves import range
|
||||
import six
|
||||
|
||||
|
||||
VERSION = "0.99"
|
||||
@@ -127,11 +128,11 @@ class Logical_Switch(object):
|
||||
ovs_ofctl("add-flow %s priority=0,action=drop" % self.short_name)
|
||||
|
||||
def cleanup_ls(self):
|
||||
for port_no, tun_name, remote_ip in self.tunnels.itervalues():
|
||||
for port_no, tun_name, remote_ip in six.itervalues(self.tunnels):
|
||||
del_bfd(remote_ip)
|
||||
|
||||
def update_flood(self):
|
||||
flood_ports = self.ports.values()
|
||||
flood_ports = list(self.ports.values())
|
||||
|
||||
# Traffic flowing from one 'unknown-dst' should not be flooded to
|
||||
# port belonging to another 'unknown-dst'.
|
||||
@@ -282,11 +283,11 @@ class Logical_Switch(object):
|
||||
for tunnel in old_tunnels.difference(tunnels):
|
||||
self.del_tunnel(tunnel)
|
||||
|
||||
for mac in remote_macs.keys():
|
||||
for mac in six.iterkeys(remote_macs):
|
||||
if (self.remote_macs.get(mac) != remote_macs[mac]):
|
||||
self.add_remote_mac(mac, remote_macs[mac])
|
||||
|
||||
for mac in self.remote_macs.keys():
|
||||
for mac in six.iterkeys(self.remote_macs):
|
||||
if mac not in remote_macs:
|
||||
self.del_remote_mac(mac)
|
||||
|
||||
@@ -308,7 +309,7 @@ class Logical_Switch(object):
|
||||
|
||||
# Go through all the logical switch's interfaces that end with "-l"
|
||||
# and copy the statistics to logical_binding_stats.
|
||||
for interface in self.ports.iterkeys():
|
||||
for interface in six.iterkeys(self.ports):
|
||||
if not interface.endswith("-l"):
|
||||
continue
|
||||
# Physical ports can have a '-' as part of its name.
|
||||
@@ -319,7 +320,7 @@ class Logical_Switch(object):
|
||||
if not uuid:
|
||||
continue
|
||||
|
||||
for (mapfrom, mapto) in stats_map.iteritems():
|
||||
for mapfrom, mapto in six.iteritems(stats_map):
|
||||
value = ovs_vsctl("get interface %s statistics:%s"
|
||||
% (interface, mapfrom)).strip('"')
|
||||
vtep_ctl("set logical_binding_stats %s %s=%s"
|
||||
@@ -435,7 +436,7 @@ def run_bfd():
|
||||
'bfd_params:check_tnl_key': 'false'}
|
||||
bfd_params_values = {}
|
||||
|
||||
for key, default in bfd_params_default.iteritems():
|
||||
for key, default in six.iteritems(bfd_params_default):
|
||||
column = vtep_ctl("--if-exists get tunnel %s %s"
|
||||
% (tunnel, key))
|
||||
if not column:
|
||||
@@ -443,7 +444,7 @@ def run_bfd():
|
||||
else:
|
||||
bfd_params_values[key] = column
|
||||
|
||||
for key, value in bfd_params_values.iteritems():
|
||||
for key, value in six.iteritems(bfd_params_values):
|
||||
new_key = key.replace('_params', '')
|
||||
ovs_vsctl("set interface %s %s=%s" % (port, new_key, value))
|
||||
|
||||
@@ -465,7 +466,7 @@ def run_bfd():
|
||||
bfd_lconf_default = {'bfd_config_local:bfd_dst_ip': '169.254.1.0',
|
||||
'bfd_config_local:bfd_dst_mac':
|
||||
'00:23:20:00:00:01'}
|
||||
for key, value in bfd_lconf_default.iteritems():
|
||||
for key, value in six.iteritems(bfd_lconf_default):
|
||||
vtep_ctl("set tunnel %s %s=%s" % (tunnel, key, value))
|
||||
|
||||
# bfd_config_remote options from VTEP DB should be populated to
|
||||
@@ -713,7 +714,7 @@ def main():
|
||||
|
||||
handle_physical()
|
||||
|
||||
for ls_name, ls in Lswitches.items():
|
||||
for ls_name, ls in six.iteritems(Lswitches):
|
||||
ls.run()
|
||||
|
||||
run_bfd()
|
||||
|
@@ -34,6 +34,7 @@ import ovs.daemon
|
||||
import ovs.db.idl
|
||||
import ovs.unixctl
|
||||
import ovs.unixctl.server
|
||||
import six
|
||||
|
||||
vlog = ovs.vlog.Vlog("ovs-xapi-sync")
|
||||
session = None
|
||||
@@ -84,7 +85,7 @@ def get_network_by_bridge(br_name):
|
||||
recs = session.xenapi.network.get_all_records_where(
|
||||
'field "bridge"="%s"' % br_name)
|
||||
if len(recs) > 0:
|
||||
return recs.values()[0]
|
||||
return next(six.itervalues(recs))
|
||||
|
||||
return None
|
||||
|
||||
@@ -294,7 +295,7 @@ def main():
|
||||
txn = ovs.db.idl.Transaction(idl)
|
||||
|
||||
new_bridges = {}
|
||||
for row in idl.tables["Bridge"].rows.itervalues():
|
||||
for row in six.itervalues(idl.tables["Bridge"].rows):
|
||||
bridge_id = bridges.get(row.name)
|
||||
if bridge_id is None:
|
||||
# Configure the new bridge.
|
||||
@@ -319,12 +320,12 @@ def main():
|
||||
bridges = new_bridges
|
||||
|
||||
iface_by_name = {}
|
||||
for row in idl.tables["Interface"].rows.itervalues():
|
||||
for row in six.itervalues(idl.tables["Interface"].rows):
|
||||
iface_by_name[row.name] = row
|
||||
|
||||
new_iface_ids = {}
|
||||
new_vm_ids = {}
|
||||
for row in idl.tables["Interface"].rows.itervalues():
|
||||
for row in six.itervalues(idl.tables["Interface"].rows):
|
||||
# Match up paired vif and tap devices.
|
||||
if row.name.startswith("vif"):
|
||||
vif = row
|
||||
|
Reference in New Issue
Block a user