2
0
mirror of https://github.com/openvswitch/ovs synced 2025-10-23 14:57:06 +00:00

build-aux: Split extract-ofp-fields.

In order to be able to reuse the core extraction logic, split the command
in two parts. The core extraction logic is moved to python/build while
the command that writes the different files out of the extracted field
info is kept in build-aux.

Acked-by: Eelco Chaudron <echaudro@redhat.com>
Signed-off-by: Adrian Moreno <amorenoz@redhat.com>
Signed-off-by: Ilya Maximets <i.maximets@ovn.org>
This commit is contained in:
Adrian Moreno
2022-07-08 20:03:03 +02:00
committed by Ilya Maximets
parent 7803743a0e
commit d542f0ea85
3 changed files with 615 additions and 513 deletions

View File

@@ -3,85 +3,23 @@
import getopt import getopt
import sys import sys
import os.path import os.path
import re
import xml.dom.minidom import xml.dom.minidom
import build.nroff import build.nroff
line = "" from build.extract_ofp_fields import (
extract_ofp_fields,
PREREQS,
OXM_CLASSES,
VERSION,
fatal,
n_errors,
)
# Maps from user-friendly version number to its protocol encoding. VERSION_REVERSE = dict((v, k) for k, v in VERSION.items())
VERSION = {"1.0": 0x01,
"1.1": 0x02,
"1.2": 0x03,
"1.3": 0x04,
"1.4": 0x05,
"1.5": 0x06}
VERSION_REVERSE = dict((v,k) for k, v in VERSION.items())
TYPES = {"u8": (1, False),
"be16": (2, False),
"be32": (4, False),
"MAC": (6, False),
"be64": (8, False),
"be128": (16, False),
"tunnelMD": (124, True)}
FORMATTING = {"decimal": ("MFS_DECIMAL", 1, 8),
"hexadecimal": ("MFS_HEXADECIMAL", 1, 127),
"ct state": ("MFS_CT_STATE", 4, 4),
"Ethernet": ("MFS_ETHERNET", 6, 6),
"IPv4": ("MFS_IPV4", 4, 4),
"IPv6": ("MFS_IPV6", 16, 16),
"OpenFlow 1.0 port": ("MFS_OFP_PORT", 2, 2),
"OpenFlow 1.1+ port": ("MFS_OFP_PORT_OXM", 4, 4),
"frag": ("MFS_FRAG", 1, 1),
"tunnel flags": ("MFS_TNL_FLAGS", 2, 2),
"TCP flags": ("MFS_TCP_FLAGS", 2, 2),
"packet type": ("MFS_PACKET_TYPE", 4, 4)}
PREREQS = {"none": "MFP_NONE",
"Ethernet": "MFP_ETHERNET",
"ARP": "MFP_ARP",
"VLAN VID": "MFP_VLAN_VID",
"IPv4": "MFP_IPV4",
"IPv6": "MFP_IPV6",
"IPv4/IPv6": "MFP_IP_ANY",
"NSH": "MFP_NSH",
"CT": "MFP_CT_VALID",
"MPLS": "MFP_MPLS",
"TCP": "MFP_TCP",
"UDP": "MFP_UDP",
"SCTP": "MFP_SCTP",
"ICMPv4": "MFP_ICMPV4",
"ICMPv6": "MFP_ICMPV6",
"ND": "MFP_ND",
"ND solicit": "MFP_ND_SOLICIT",
"ND advert": "MFP_ND_ADVERT"}
# Maps a name prefix into an (experimenter ID, class) pair, so:
#
# - Standard OXM classes are written as (0, <oxm_class>)
#
# - Experimenter OXM classes are written as (<oxm_vender>, 0xffff)
#
# If a name matches more than one prefix, the longest one is used.
OXM_CLASSES = {"NXM_OF_": (0, 0x0000, 'extension'),
"NXM_NX_": (0, 0x0001, 'extension'),
"NXOXM_NSH_": (0x005ad650, 0xffff, 'extension'),
"OXM_OF_": (0, 0x8000, 'standard'),
"OXM_OF_PKT_REG": (0, 0x8001, 'standard'),
"ONFOXM_ET_": (0x4f4e4600, 0xffff, 'standard'),
"ERICOXM_OF_": (0, 0x1000, 'extension'),
# This is the experimenter OXM class for Nicira, which is the
# one that OVS would be using instead of NXM_OF_ and NXM_NX_
# if OVS didn't have those grandfathered in. It is currently
# used only to test support for experimenter OXM, since there
# are barely any real uses of experimenter OXM in the wild.
"NXOXM_ET_": (0x00002320, 0xffff, 'extension')}
def oxm_name_to_class(name): def oxm_name_to_class(name):
prefix = '' prefix = ""
class_ = None class_ = None
for p, c in OXM_CLASSES.items(): for p, c in OXM_CLASSES.items():
if name.startswith(p) and len(p) > len(prefix): if name.startswith(p) and len(p) > len(prefix):
@@ -92,267 +30,76 @@ def oxm_name_to_class(name):
def is_standard_oxm(name): def is_standard_oxm(name):
oxm_vendor, oxm_class, oxm_class_type = oxm_name_to_class(name) oxm_vendor, oxm_class, oxm_class_type = oxm_name_to_class(name)
return oxm_class_type == 'standard' return oxm_class_type == "standard"
def decode_version_range(range):
if range in VERSION:
return (VERSION[range], VERSION[range])
elif range.endswith('+'):
return (VERSION[range[:-1]], max(VERSION.values()))
else:
a, b = re.match(r'^([^-]+)-([^-]+)$', range).groups()
return (VERSION[a], VERSION[b])
def get_line():
global line
global line_number
line = input_file.readline()
line_number += 1
if line == "":
fatal("unexpected end of input")
n_errors = 0
def error(msg):
global n_errors
sys.stderr.write("%s:%d: %s\n" % (file_name, line_number, msg))
n_errors += 1
def fatal(msg):
error(msg)
sys.exit(1)
def usage(): def usage():
argv0 = os.path.basename(sys.argv[0]) argv0 = os.path.basename(sys.argv[0])
print('''\ print(
"""\
%(argv0)s, for extracting OpenFlow field properties from meta-flow.h %(argv0)s, for extracting OpenFlow field properties from meta-flow.h
usage: %(argv0)s INPUT [--meta-flow | --nx-match] usage: %(argv0)s INPUT [--meta-flow | --nx-match]
where INPUT points to lib/meta-flow.h in the source directory. where INPUT points to lib/meta-flow.h in the source directory.
Depending on the option given, the output written to stdout is intended to be Depending on the option given, the output written to stdout is intended to be
saved either as lib/meta-flow.inc or lib/nx-match.inc for the respective C saved either as lib/meta-flow.inc or lib/nx-match.inc for the respective C
file to #include.\ file to #include.\
''' % {"argv0": argv0}) """
% {"argv0": argv0}
)
sys.exit(0) sys.exit(0)
def make_sizeof(s):
m = re.match(r'(.*) up to (.*)', s)
if m:
struct, member = m.groups()
return "offsetof(%s, %s)" % (struct, member)
else:
return "sizeof(%s)" % s
def parse_oxms(s, prefix, n_bytes):
if s == 'none':
return ()
return tuple(parse_oxm(s2.strip(), prefix, n_bytes) for s2 in s.split(','))
match_types = dict()
def parse_oxm(s, prefix, n_bytes):
global match_types
m = re.match('([A-Z0-9_]+)\(([0-9]+)\) since(?: OF(1\.[0-9]+) and)? v([12]\.[0-9]+)$', s)
if not m:
fatal("%s: syntax error parsing %s" % (s, prefix))
name, oxm_type, of_version, ovs_version = m.groups()
class_ = oxm_name_to_class(name)
if class_ is None:
fatal("unknown OXM class for %s" % name)
oxm_vendor, oxm_class, oxm_class_type = class_
if class_ in match_types:
if oxm_type in match_types[class_]:
fatal("duplicate match type for %s (conflicts with %s)" %
(name, match_types[class_][oxm_type]))
else:
match_types[class_] = dict()
match_types[class_][oxm_type] = name
# Normally the oxm_length is the size of the field, but for experimenter
# OXMs oxm_length also includes the 4-byte experimenter ID.
oxm_length = n_bytes
if oxm_class == 0xffff:
oxm_length += 4
header = (oxm_vendor, oxm_class, int(oxm_type), oxm_length)
if of_version:
if oxm_class_type == 'extension':
fatal("%s: OXM extension can't have OpenFlow version" % name)
if of_version not in VERSION:
fatal("%s: unknown OpenFlow version %s" % (name, of_version))
of_version_nr = VERSION[of_version]
if of_version_nr < VERSION['1.2']:
fatal("%s: claimed version %s predates OXM" % (name, of_version))
else:
if oxm_class_type == 'standard':
fatal("%s: missing OpenFlow version number" % name)
of_version_nr = 0
return (header, name, of_version_nr, ovs_version)
def parse_field(mff, comment):
f = {'mff': mff}
# First line of comment is the field name.
m = re.match(r'"([^"]+)"(?:\s+\(aka "([^"]+)"\))?(?:\s+\(.*\))?\.', comment[0])
if not m:
fatal("%s lacks field name" % mff)
f['name'], f['extra_name'] = m.groups()
# Find the last blank line the comment. The field definitions
# start after that.
blank = None
for i in range(len(comment)):
if not comment[i]:
blank = i
if not blank:
fatal("%s: missing blank line in comment" % mff)
d = {}
for key in ("Type", "Maskable", "Formatting", "Prerequisites",
"Access", "Prefix lookup member",
"OXM", "NXM", "OF1.0", "OF1.1"):
d[key] = None
for fline in comment[blank + 1:]:
m = re.match(r'([^:]+):\s+(.*)\.$', fline)
if not m:
fatal("%s: syntax error parsing key-value pair as part of %s"
% (fline, mff))
key, value = m.groups()
if key not in d:
fatal("%s: unknown key" % key)
elif key == 'Code point':
d[key] += [value]
elif d[key] is not None:
fatal("%s: duplicate key" % key)
d[key] = value
for key, value in d.items():
if not value and key not in ("OF1.0", "OF1.1",
"Prefix lookup member", "Notes"):
fatal("%s: missing %s" % (mff, key))
m = re.match(r'([a-zA-Z0-9]+)(?: \(low ([0-9]+) bits\))?$', d['Type'])
if not m:
fatal("%s: syntax error in type" % mff)
type_ = m.group(1)
if type_ not in TYPES:
fatal("%s: unknown type %s" % (mff, d['Type']))
f['n_bytes'] = TYPES[type_][0]
if m.group(2):
f['n_bits'] = int(m.group(2))
if f['n_bits'] > f['n_bytes'] * 8:
fatal("%s: more bits (%d) than field size (%d)"
% (mff, f['n_bits'], 8 * f['n_bytes']))
else:
f['n_bits'] = 8 * f['n_bytes']
f['variable'] = TYPES[type_][1]
if d['Maskable'] == 'no':
f['mask'] = 'MFM_NONE'
elif d['Maskable'] == 'bitwise':
f['mask'] = 'MFM_FULLY'
else:
fatal("%s: unknown maskable %s" % (mff, d['Maskable']))
fmt = FORMATTING.get(d['Formatting'])
if not fmt:
fatal("%s: unknown format %s" % (mff, d['Formatting']))
f['formatting'] = d['Formatting']
if f['n_bytes'] < fmt[1] or f['n_bytes'] > fmt[2]:
fatal("%s: %d-byte field can't be formatted as %s"
% (mff, f['n_bytes'], d['Formatting']))
f['string'] = fmt[0]
f['prereqs'] = d['Prerequisites']
if f['prereqs'] not in PREREQS:
fatal("%s: unknown prerequisites %s" % (mff, d['Prerequisites']))
if d['Access'] == 'read-only':
f['writable'] = False
elif d['Access'] == 'read/write':
f['writable'] = True
else:
fatal("%s: unknown access %s" % (mff, d['Access']))
f['OF1.0'] = d['OF1.0']
if not d['OF1.0'] in (None, 'exact match', 'CIDR mask'):
fatal("%s: unknown OF1.0 match type %s" % (mff, d['OF1.0']))
f['OF1.1'] = d['OF1.1']
if not d['OF1.1'] in (None, 'exact match', 'bitwise mask'):
fatal("%s: unknown OF1.1 match type %s" % (mff, d['OF1.1']))
f['OXM'] = (parse_oxms(d['OXM'], 'OXM', f['n_bytes']) +
parse_oxms(d['NXM'], 'NXM', f['n_bytes']))
f['prefix'] = d["Prefix lookup member"]
return f
def protocols_to_c(protocols): def protocols_to_c(protocols):
if protocols == set(['of10', 'of11', 'oxm']): if protocols == set(["of10", "of11", "oxm"]):
return 'OFPUTIL_P_ANY' return "OFPUTIL_P_ANY"
elif protocols == set(['of11', 'oxm']): elif protocols == set(["of11", "oxm"]):
return 'OFPUTIL_P_NXM_OF11_UP' return "OFPUTIL_P_NXM_OF11_UP"
elif protocols == set(['oxm']): elif protocols == set(["oxm"]):
return 'OFPUTIL_P_NXM_OXM_ANY' return "OFPUTIL_P_NXM_OXM_ANY"
elif protocols == set([]): elif protocols == set([]):
return 'OFPUTIL_P_NONE' return "OFPUTIL_P_NONE"
else: else:
assert False assert False
def autogen_c_comment(): def autogen_c_comment():
return [ return [
"/* Generated automatically; do not modify! -*- buffer-read-only: t -*- */", "/* Generated automatically; do not modify! "
""] "-*- buffer-read-only: t -*- */",
"",
]
def make_meta_flow(meta_flow_h): def make_meta_flow(meta_flow_h):
fields = extract_ofp_fields(meta_flow_h) fields = extract_ofp_fields(meta_flow_h)
output = autogen_c_comment() output = autogen_c_comment()
for f in fields: for f in fields:
output += ["{"] output += ["{"]
output += [" %s," % f['mff']] output += [" %s," % f["mff"]]
if f['extra_name']: if f["extra_name"]:
output += [" \"%s\", \"%s\"," % (f['name'], f['extra_name'])] output += [' "%s", "%s",' % (f["name"], f["extra_name"])]
else: else:
output += [" \"%s\", NULL," % f['name']] output += [' "%s", NULL,' % f["name"]]
if f['variable']: if f["variable"]:
variable = 'true' variable = "true"
else: else:
variable = 'false' variable = "false"
output += [" %d, %d, %s," % (f['n_bytes'], f['n_bits'], variable)] output += [" %d, %d, %s," % (f["n_bytes"], f["n_bits"], variable)]
if f['writable']: if f["writable"]:
rw = 'true' rw = "true"
else: else:
rw = 'false' rw = "false"
output += [" %s, %s, %s, %s, false," output += [
% (f['mask'], f['string'], PREREQS[f['prereqs']], rw)] " %s, %s, %s, %s, false,"
% (f["mask"], f["string"], PREREQS[f["prereqs"]], rw)
]
oxm = f['OXM'] oxm = f["OXM"]
of10 = f['OF1.0'] of10 = f["OF1.0"]
of11 = f['OF1.1'] of11 = f["OF1.1"]
if f['mff'] in ('MFF_DL_VLAN', 'MFF_DL_VLAN_PCP'): if f["mff"] in ("MFF_DL_VLAN", "MFF_DL_VLAN_PCP"):
# MFF_DL_VLAN and MFF_DL_VLAN_PCP don't exactly correspond to # MFF_DL_VLAN and MFF_DL_VLAN_PCP don't exactly correspond to
# OF1.1, nor do they have NXM or OXM assignments, but their # OF1.1, nor do they have NXM or OXM assignments, but their
# meanings can be expressed in every protocol, which is the goal of # meanings can be expressed in every protocol, which is the goal of
@@ -367,25 +114,25 @@ def make_meta_flow(meta_flow_h):
if oxm: if oxm:
protocols |= set(["oxm"]) protocols |= set(["oxm"])
if f['mask'] == 'MFM_FULLY': if f["mask"] == "MFM_FULLY":
cidr_protocols = protocols.copy() cidr_protocols = protocols.copy()
bitwise_protocols = protocols.copy() bitwise_protocols = protocols.copy()
if of10 == 'exact match': if of10 == "exact match":
bitwise_protocols -= set(['of10']) bitwise_protocols -= set(["of10"])
cidr_protocols -= set(['of10']) cidr_protocols -= set(["of10"])
elif of10 == 'CIDR mask': elif of10 == "CIDR mask":
bitwise_protocols -= set(['of10']) bitwise_protocols -= set(["of10"])
else: else:
assert of10 is None assert of10 is None
if of11 == 'exact match': if of11 == "exact match":
bitwise_protocols -= set(['of11']) bitwise_protocols -= set(["of11"])
cidr_protocols -= set(['of11']) cidr_protocols -= set(["of11"])
else: else:
assert of11 in (None, 'bitwise mask') assert of11 in (None, "bitwise mask")
else: else:
assert f['mask'] == 'MFM_NONE' assert f["mask"] == "MFM_NONE"
cidr_protocols = set([]) cidr_protocols = set([])
bitwise_protocols = set([]) bitwise_protocols = set([])
@@ -393,8 +140,8 @@ def make_meta_flow(meta_flow_h):
output += [" %s," % protocols_to_c(cidr_protocols)] output += [" %s," % protocols_to_c(cidr_protocols)]
output += [" %s," % protocols_to_c(bitwise_protocols)] output += [" %s," % protocols_to_c(bitwise_protocols)]
if f['prefix']: if f["prefix"]:
output += [" FLOW_U32OFS(%s)," % f['prefix']] output += [" FLOW_U32OFS(%s)," % f["prefix"]]
else: else:
output += [" -1, /* not usable for prefix lookup */"] output += [" -1, /* not usable for prefix lookup */"]
@@ -409,147 +156,37 @@ def make_nx_match(meta_flow_h):
print("static struct nxm_field_index all_nxm_fields[] = {") print("static struct nxm_field_index all_nxm_fields[] = {")
for f in fields: for f in fields:
# Sort by OpenFlow version number (nx-match.c depends on this). # Sort by OpenFlow version number (nx-match.c depends on this).
for oxm in sorted(f['OXM'], key=lambda x: x[2]): for oxm in sorted(f["OXM"], key=lambda x: x[2]):
header = ("NXM_HEADER(0x%x,0x%x,%s,0,%d)" % oxm[0]) header = "NXM_HEADER(0x%x,0x%x,%s,0,%d)" % oxm[0]
print("""{ .nf = { %s, %d, "%s", %s } },""" % ( print(
header, oxm[2], oxm[1], f['mff'])) """{ .nf = { %s, %d, "%s", %s } },"""
% (header, oxm[2], oxm[1], f["mff"])
)
print("};") print("};")
for oline in output: for oline in output:
print(oline) print(oline)
def extract_ofp_fields(fn):
global file_name
global input_file
global line_number
global line
file_name = fn
input_file = open(file_name)
line_number = 0
fields = []
while True:
get_line()
if re.match('enum.*mf_field_id', line):
break
while True:
get_line()
first_line_number = line_number
here = '%s:%d' % (file_name, line_number)
if (line.startswith('/*')
or line.startswith(' *')
or line.startswith('#')
or not line
or line.isspace()):
continue
elif re.match('}', line) or re.match('\s+MFF_N_IDS', line):
break
# Parse the comment preceding an MFF_ constant into 'comment',
# one line to an array element.
line = line.strip()
if not line.startswith('/*'):
fatal("unexpected syntax between fields")
line = line[1:]
comment = []
end = False
while not end:
line = line.strip()
if line.startswith('*/'):
get_line()
break
if not line.startswith('*'):
fatal("unexpected syntax within field")
line = line[1:]
if line.startswith(' '):
line = line[1:]
if line.startswith(' ') and comment:
continuation = True
line = line.lstrip()
else:
continuation = False
if line.endswith('*/'):
line = line[:-2].rstrip()
end = True
else:
end = False
if continuation:
comment[-1] += " " + line
else:
comment += [line]
get_line()
# Drop blank lines at each end of comment.
while comment and not comment[0]:
comment = comment[1:]
while comment and not comment[-1]:
comment = comment[:-1]
# Parse the MFF_ constant(s).
mffs = []
while True:
m = re.match('\s+(MFF_[A-Z0-9_]+),?\s?$', line)
if not m:
break
mffs += [m.group(1)]
get_line()
if not mffs:
fatal("unexpected syntax looking for MFF_ constants")
if len(mffs) > 1 or '<N>' in comment[0]:
for mff in mffs:
# Extract trailing integer.
m = re.match('.*[^0-9]([0-9]+)$', mff)
if not m:
fatal("%s lacks numeric suffix in register group" % mff)
n = m.group(1)
# Search-and-replace <N> within the comment,
# and drop lines that have <x> for x != n.
instance = []
for x in comment:
y = x.replace('<N>', n)
if re.search('<[0-9]+>', y):
if ('<%s>' % n) not in y:
continue
y = re.sub('<[0-9]+>', '', y)
instance += [y.strip()]
fields += [parse_field(mff, instance)]
else:
fields += [parse_field(mffs[0], comment)]
continue
input_file.close()
if n_errors:
sys.exit(1)
return fields
## ------------------------ ## ## ------------------------ ##
## Documentation Generation ## ## Documentation Generation ##
## ------------------------ ## ## ------------------------ ##
def field_to_xml(field_node, f, body, summary): def field_to_xml(field_node, f, body, summary):
f["used"] = True f["used"] = True
# Summary. # Summary.
if field_node.hasAttribute('internal'): if field_node.hasAttribute("internal"):
return return
min_of_version = None min_of_version = None
min_ovs_version = None min_ovs_version = None
for header, name, of_version_nr, ovs_version_s in f['OXM']: for header, name, of_version_nr, ovs_version_s in f["OXM"]:
if (is_standard_oxm(name) if is_standard_oxm(name) and (
and (min_ovs_version is None or of_version_nr < min_of_version)): min_ovs_version is None or of_version_nr < min_of_version
):
min_of_version = of_version_nr min_of_version = of_version_nr
ovs_version = [int(x) for x in ovs_version_s.split('.')] ovs_version = [int(x) for x in ovs_version_s.split(".")]
if min_ovs_version is None or ovs_version < min_ovs_version: if min_ovs_version is None or ovs_version < min_ovs_version:
min_ovs_version = ovs_version min_ovs_version = ovs_version
summary += ["\\fB%s\\fR" % f["name"]] summary += ["\\fB%s\\fR" % f["name"]]
@@ -565,124 +202,152 @@ def field_to_xml(field_node, f, body, summary):
if min_of_version is not None: if min_of_version is not None:
support += ["OF %s+" % VERSION_REVERSE[min_of_version]] support += ["OF %s+" % VERSION_REVERSE[min_of_version]]
if min_ovs_version is not None: if min_ovs_version is not None:
support += ["OVS %s+" % '.'.join([str(x) for x in min_ovs_version])] support += ["OVS %s+" % ".".join([str(x) for x in min_ovs_version])]
summary += ' and '.join(support) summary += " and ".join(support)
summary += ["\n"] summary += ["\n"]
# Full description. # Full description.
if field_node.hasAttribute('hidden'): if field_node.hasAttribute("hidden"):
return return
title = field_node.attributes['title'].nodeValue title = field_node.attributes["title"].nodeValue
body += [""".PP body += [
""".PP
\\fB%s Field\\fR \\fB%s Field\\fR
.TS .TS
tab(;); tab(;);
l lx. l lx.
""" % title] """
% title
]
body += ["Name:;\\fB%s\\fR" % f["name"]] body += ["Name:;\\fB%s\\fR" % f["name"]]
if f["extra_name"]: if f["extra_name"]:
body += [" (aka \\fB%s\\fR)" % f["extra_name"]] body += [" (aka \\fB%s\\fR)" % f["extra_name"]]
body += ['\n'] body += ["\n"]
body += ["Width:;"] body += ["Width:;"]
if f["n_bits"] != 8 * f["n_bytes"]: if f["n_bits"] != 8 * f["n_bytes"]:
body += ["%d bits (only the least-significant %d bits " body += [
"may be nonzero)" % (f["n_bytes"] * 8, f["n_bits"])] "%d bits (only the least-significant %d bits "
"may be nonzero)" % (f["n_bytes"] * 8, f["n_bits"])
]
elif f["n_bits"] <= 128: elif f["n_bits"] <= 128:
body += ["%d bits" % f["n_bits"]] body += ["%d bits" % f["n_bits"]]
else: else:
body += ["%d bits (%d bytes)" % (f["n_bits"], f["n_bits"] / 8)] body += ["%d bits (%d bytes)" % (f["n_bits"], f["n_bits"] / 8)]
body += ['\n'] body += ["\n"]
body += ["Format:;%s\n" % f["formatting"]] body += ["Format:;%s\n" % f["formatting"]]
masks = {"MFM_NONE": "not maskable", masks = {
"MFM_FULLY": "arbitrary bitwise masks"} "MFM_NONE": "not maskable",
"MFM_FULLY": "arbitrary bitwise masks",
}
body += ["Masking:;%s\n" % masks[f["mask"]]] body += ["Masking:;%s\n" % masks[f["mask"]]]
body += ["Prerequisites:;%s\n" % f["prereqs"]] body += ["Prerequisites:;%s\n" % f["prereqs"]]
access = {True: "read/write", access = {True: "read/write", False: "read-only"}[f["writable"]]
False: "read-only"}[f["writable"]]
body += ["Access:;%s\n" % access] body += ["Access:;%s\n" % access]
of10 = {None: "not supported", of10 = {
None: "not supported",
"exact match": "yes (exact match only)", "exact match": "yes (exact match only)",
"CIDR mask": "yes (CIDR match only)"} "CIDR mask": "yes (CIDR match only)",
}
body += ["OpenFlow 1.0:;%s\n" % of10[f["OF1.0"]]] body += ["OpenFlow 1.0:;%s\n" % of10[f["OF1.0"]]]
of11 = {None: "not supported", of11 = {
None: "not supported",
"exact match": "yes (exact match only)", "exact match": "yes (exact match only)",
"bitwise mask": "yes"} "bitwise mask": "yes",
}
body += ["OpenFlow 1.1:;%s\n" % of11[f["OF1.1"]]] body += ["OpenFlow 1.1:;%s\n" % of11[f["OF1.1"]]]
oxms = [] oxms = []
for header, name, of_version_nr, ovs_version in [x for x in sorted(f['OXM'], key=lambda x: x[2]) if is_standard_oxm(x[1])]: for header, name, of_version_nr, ovs_version in [
x
for x in sorted(f["OXM"], key=lambda x: x[2])
if is_standard_oxm(x[1])
]:
of_version = VERSION_REVERSE[of_version_nr] of_version = VERSION_REVERSE[of_version_nr]
oxms += [r"\fB%s\fR (%d) since OpenFlow %s and Open vSwitch %s" % (name, header[2], of_version, ovs_version)] oxms += [
r"\fB%s\fR (%d) since OpenFlow %s and Open vSwitch %s"
% (name, header[2], of_version, ovs_version)
]
if not oxms: if not oxms:
oxms = ['none'] oxms = ["none"]
body += ['OXM:;T{\n%s\nT}\n' % r'\[char59] '.join(oxms)] body += ["OXM:;T{\n%s\nT}\n" % r"\[char59] ".join(oxms)]
nxms = [] nxms = []
for header, name, of_version_nr, ovs_version in [x for x in sorted(f['OXM'], key=lambda x: x[2]) if not is_standard_oxm(x[1])]: for header, name, of_version_nr, ovs_version in [
nxms += [r"\fB%s\fR (%d) since Open vSwitch %s" % (name, header[2], ovs_version)] x
for x in sorted(f["OXM"], key=lambda x: x[2])
if not is_standard_oxm(x[1])
]:
nxms += [
r"\fB%s\fR (%d) since Open vSwitch %s"
% (name, header[2], ovs_version)
]
if not nxms: if not nxms:
nxms = ['none'] nxms = ["none"]
body += ['NXM:;T{\n%s\nT}\n' % r'\[char59] '.join(nxms)] body += ["NXM:;T{\n%s\nT}\n" % r"\[char59] ".join(nxms)]
body += [".TE\n"] body += [".TE\n"]
body += ['.PP\n'] body += [".PP\n"]
body += [build.nroff.block_xml_to_nroff(field_node.childNodes)] body += [build.nroff.block_xml_to_nroff(field_node.childNodes)]
def group_xml_to_nroff(group_node, fields): def group_xml_to_nroff(group_node, fields):
title = group_node.attributes['title'].nodeValue title = group_node.attributes["title"].nodeValue
summary = [] summary = []
body = [] body = []
for node in group_node.childNodes: for node in group_node.childNodes:
if node.nodeType == node.ELEMENT_NODE and node.tagName == 'field': if node.nodeType == node.ELEMENT_NODE and node.tagName == "field":
id_ = node.attributes['id'].nodeValue id_ = node.attributes["id"].nodeValue
field_to_xml(node, fields[id_], body, summary) field_to_xml(node, fields[id_], body, summary)
else: else:
body += [build.nroff.block_xml_to_nroff([node])] body += [build.nroff.block_xml_to_nroff([node])]
content = [ content = [
'.bp\n', ".bp\n",
'.SH \"%s\"\n' % build.nroff.text_to_nroff(title.upper() + " FIELDS"), '.SH "%s"\n' % build.nroff.text_to_nroff(title.upper() + " FIELDS"),
'.SS "Summary:"\n', '.SS "Summary:"\n',
'.TS\n', ".TS\n",
'tab(;);\n', "tab(;);\n",
'l l l l l l l.\n', "l l l l l l l.\n",
'Name;Bytes;Mask;RW?;Prereqs;NXM/OXM Support\n', "Name;Bytes;Mask;RW?;Prereqs;NXM/OXM Support\n",
'\_;\_;\_;\_;\_;\_\n'] "\_;\_;\_;\_;\_;\_\n",
]
content += summary content += summary
content += ['.TE\n'] content += [".TE\n"]
content += body content += body
return ''.join(content) return "".join(content)
def make_oxm_classes_xml(document): def make_oxm_classes_xml(document):
s = '''tab(;); s = """tab(;);
l l l. l l l.
Prefix;Vendor;Class Prefix;Vendor;Class
\_;\_;\_ \_;\_;\_
''' """
for key in sorted(OXM_CLASSES, key=OXM_CLASSES.get): for key in sorted(OXM_CLASSES, key=OXM_CLASSES.get):
vendor, class_, class_type = OXM_CLASSES.get(key) vendor, class_, class_type = OXM_CLASSES.get(key)
s += r"\fB%s\fR;" % key.rstrip('_') s += r"\fB%s\fR;" % key.rstrip("_")
if vendor: if vendor:
s += r"\fL0x%08x\fR;" % vendor s += r"\fL0x%08x\fR;" % vendor
else: else:
s += "(none);" s += "(none);"
s += r"\fL0x%04x\fR;" % class_ s += r"\fL0x%04x\fR;" % class_
s += "\n" s += "\n"
e = document.createElement('tbl') e = document.createElement("tbl")
e.appendChild(document.createTextNode(s)) e.appendChild(document.createTextNode(s))
return e return e
def recursively_replace(node, name, replacement): def recursively_replace(node, name, replacement):
for child in node.childNodes: for child in node.childNodes:
if child.nodeType == node.ELEMENT_NODE: if child.nodeType == node.ELEMENT_NODE:
@@ -691,11 +356,12 @@ def recursively_replace(node, name, replacement):
else: else:
recursively_replace(child, name, replacement) recursively_replace(child, name, replacement)
def make_ovs_fields(meta_flow_h, meta_flow_xml): def make_ovs_fields(meta_flow_h, meta_flow_xml):
fields = extract_ofp_fields(meta_flow_h) fields = extract_ofp_fields(meta_flow_h)
fields_map = {} fields_map = {}
for f in fields: for f in fields:
fields_map[f['mff']] = f fields_map[f["mff"]] = f
document = xml.dom.minidom.parse(meta_flow_xml) document = xml.dom.minidom.parse(meta_flow_xml)
doc = document.documentElement doc = document.documentElement
@@ -704,7 +370,8 @@ def make_ovs_fields(meta_flow_h, meta_flow_xml):
if version == None: if version == None:
version = "UNKNOWN" version = "UNKNOWN"
print('''\ print(
"""\
'\\" tp '\\" tp
.\\" -*- mode: troff; coding: utf-8 -*- .\\" -*- mode: troff; coding: utf-8 -*-
.TH "ovs\-fields" 7 "%s" "Open vSwitch" "Open vSwitch Manual" .TH "ovs\-fields" 7 "%s" "Open vSwitch" "Open vSwitch Manual"
@@ -740,11 +407,13 @@ def make_ovs_fields(meta_flow_h, meta_flow_xml):
ovs\-fields \- protocol header fields in OpenFlow and Open vSwitch ovs\-fields \- protocol header fields in OpenFlow and Open vSwitch
. .
.PP .PP
''' % version) """
% version
)
recursively_replace(doc, 'oxm_classes', make_oxm_classes_xml(document)) recursively_replace(doc, "oxm_classes", make_oxm_classes_xml(document))
s = '' s = ""
for node in doc.childNodes: for node in doc.childNodes:
if node.nodeType == node.ELEMENT_NODE and node.tagName == "group": if node.nodeType == node.ELEMENT_NODE and node.tagName == "group":
s += group_xml_to_nroff(node, fields_map) s += group_xml_to_nroff(node, fields_map)
@@ -757,9 +426,10 @@ ovs\-fields \- protocol header fields in OpenFlow and Open vSwitch
for f in fields: for f in fields:
if "used" not in f: if "used" not in f:
fatal("%s: field not documented " fatal(
"(please add documentation in lib/meta-flow.xml)" "%s: field not documented "
% f["mff"]) "(please add documentation in lib/meta-flow.xml)" % f["mff"]
)
if n_errors: if n_errors:
sys.exit(1) sys.exit(1)
@@ -769,26 +439,27 @@ ovs\-fields \- protocol header fields in OpenFlow and Open vSwitch
# Life is easier with nroff if we don't try to feed it Unicode. # Life is easier with nroff if we don't try to feed it Unicode.
# Fortunately, we only use a few characters outside the ASCII range. # Fortunately, we only use a few characters outside the ASCII range.
oline = oline.replace(u'\u2208', r'\[mo]') oline = oline.replace(u"\u2208", r"\[mo]")
oline = oline.replace(u'\u2260', r'\[!=]') oline = oline.replace(u"\u2260", r"\[!=]")
oline = oline.replace(u'\u2264', r'\[<=]') oline = oline.replace(u"\u2264", r"\[<=]")
oline = oline.replace(u'\u2265', r'\[>=]') oline = oline.replace(u"\u2265", r"\[>=]")
oline = oline.replace(u'\u00d7', r'\[mu]') oline = oline.replace(u"\u00d7", r"\[mu]")
if len(oline): if len(oline):
output += [oline] output += [oline]
# nroff tends to ignore .bp requests if they come after .PP requests, # nroff tends to ignore .bp requests if they come after .PP requests,
# so remove .PPs that precede .bp. # so remove .PPs that precede .bp.
for i in range(len(output)): for i in range(len(output)):
if output[i] == '.bp': if output[i] == ".bp":
j = i - 1 j = i - 1
while j >= 0 and output[j] == '.PP': while j >= 0 and output[j] == ".PP":
output[j] = None output[j] = None
j -= 1 j -= 1
for i in range(len(output)): for i in range(len(output)):
if output[i] is not None: if output[i] is not None:
print(output[i]) print(output[i])
## ------------ ## ## ------------ ##
## Main Program ## ## Main Program ##
## ------------ ## ## ------------ ##
@@ -796,8 +467,9 @@ ovs\-fields \- protocol header fields in OpenFlow and Open vSwitch
if __name__ == "__main__": if __name__ == "__main__":
argv0 = sys.argv[0] argv0 = sys.argv[0]
try: try:
options, args = getopt.gnu_getopt(sys.argv[1:], 'h', options, args = getopt.gnu_getopt(
['help', 'ovs-version=']) sys.argv[1:], "h", ["help", "ovs-version="]
)
except getopt.GetoptError as geo: except getopt.GetoptError as geo:
sys.stderr.write("%s: %s\n" % (argv0, geo.msg)) sys.stderr.write("%s: %s\n" % (argv0, geo.msg))
sys.exit(1) sys.exit(1)
@@ -805,32 +477,38 @@ if __name__ == "__main__":
global version global version
version = None version = None
for key, value in options: for key, value in options:
if key in ['-h', '--help']: if key in ["-h", "--help"]:
usage() usage()
elif key == '--ovs-version': elif key == "--ovs-version":
version = value version = value
else: else:
sys.exit(0) sys.exit(0)
if not args: if not args:
sys.stderr.write("%s: missing command argument " sys.stderr.write(
"(use --help for help)\n" % argv0) "%s: missing command argument " "(use --help for help)\n" % argv0
)
sys.exit(1) sys.exit(1)
commands = {"meta-flow": (make_meta_flow, 1), commands = {
"meta-flow": (make_meta_flow, 1),
"nx-match": (make_nx_match, 1), "nx-match": (make_nx_match, 1),
"ovs-fields": (make_ovs_fields, 2)} "ovs-fields": (make_ovs_fields, 2),
}
if not args[0] in commands: if not args[0] in commands:
sys.stderr.write("%s: unknown command \"%s\" " sys.stderr.write(
"(use --help for help)\n" % (argv0, args[0])) '%s: unknown command "%s" '
"(use --help for help)\n" % (argv0, args[0])
)
sys.exit(1) sys.exit(1)
func, n_args = commands[args[0]] func, n_args = commands[args[0]]
if len(args) - 1 != n_args: if len(args) - 1 != n_args:
sys.stderr.write("%s: \"%s\" requires %d arguments but %d " sys.stderr.write(
"provided\n" '%s: "%s" requires %d arguments but %d '
% (argv0, args[0], n_args, len(args) - 1)) "provided\n" % (argv0, args[0], n_args, len(args) - 1)
)
sys.exit(1) sys.exit(1)
func(*args[1:]) func(*args[1:])

View File

@@ -51,6 +51,7 @@ ovs_pyfiles = \
# so they are not installed. # so they are not installed.
EXTRA_DIST += \ EXTRA_DIST += \
python/build/__init__.py \ python/build/__init__.py \
python/build/extract_ofp_fields.py \
python/build/nroff.py \ python/build/nroff.py \
python/build/soutil.py python/build/soutil.py
@@ -69,10 +70,12 @@ PYCOV_CLEAN_FILES += $(PYFILES:.py=.py,cover)
FLAKE8_PYFILES += \ FLAKE8_PYFILES += \
$(filter-out python/ovs/compat/% python/ovs/dirs.py,$(PYFILES)) \ $(filter-out python/ovs/compat/% python/ovs/dirs.py,$(PYFILES)) \
python/setup.py \
python/build/__init__.py \ python/build/__init__.py \
python/build/extract_ofp_fields.py \
python/build/nroff.py \ python/build/nroff.py \
python/ovs/dirs.py.template python/build/soutil.py \
python/ovs/dirs.py.template \
python/setup.py
nobase_pkgdata_DATA = $(ovs_pyfiles) $(ovstest_pyfiles) nobase_pkgdata_DATA = $(ovs_pyfiles) $(ovstest_pyfiles)
ovs-install-data-local: ovs-install-data-local:

View File

@@ -0,0 +1,421 @@
import sys
import re
line = ""
# Maps from user-friendly version number to its protocol encoding.
VERSION = {
"1.0": 0x01,
"1.1": 0x02,
"1.2": 0x03,
"1.3": 0x04,
"1.4": 0x05,
"1.5": 0x06,
}
VERSION_REVERSE = dict((v, k) for k, v in VERSION.items())
TYPES = {
"u8": (1, False),
"be16": (2, False),
"be32": (4, False),
"MAC": (6, False),
"be64": (8, False),
"be128": (16, False),
"tunnelMD": (124, True),
}
FORMATTING = {
"decimal": ("MFS_DECIMAL", 1, 8),
"hexadecimal": ("MFS_HEXADECIMAL", 1, 127),
"ct state": ("MFS_CT_STATE", 4, 4),
"Ethernet": ("MFS_ETHERNET", 6, 6),
"IPv4": ("MFS_IPV4", 4, 4),
"IPv6": ("MFS_IPV6", 16, 16),
"OpenFlow 1.0 port": ("MFS_OFP_PORT", 2, 2),
"OpenFlow 1.1+ port": ("MFS_OFP_PORT_OXM", 4, 4),
"frag": ("MFS_FRAG", 1, 1),
"tunnel flags": ("MFS_TNL_FLAGS", 2, 2),
"TCP flags": ("MFS_TCP_FLAGS", 2, 2),
"packet type": ("MFS_PACKET_TYPE", 4, 4),
}
PREREQS = {
"none": "MFP_NONE",
"Ethernet": "MFP_ETHERNET",
"ARP": "MFP_ARP",
"VLAN VID": "MFP_VLAN_VID",
"IPv4": "MFP_IPV4",
"IPv6": "MFP_IPV6",
"IPv4/IPv6": "MFP_IP_ANY",
"NSH": "MFP_NSH",
"CT": "MFP_CT_VALID",
"MPLS": "MFP_MPLS",
"TCP": "MFP_TCP",
"UDP": "MFP_UDP",
"SCTP": "MFP_SCTP",
"ICMPv4": "MFP_ICMPV4",
"ICMPv6": "MFP_ICMPV6",
"ND": "MFP_ND",
"ND solicit": "MFP_ND_SOLICIT",
"ND advert": "MFP_ND_ADVERT",
}
# Maps a name prefix into an (experimenter ID, class) pair, so:
#
# - Standard OXM classes are written as (0, <oxm_class>)
#
# - Experimenter OXM classes are written as (<oxm_vender>, 0xffff)
#
# If a name matches more than one prefix, the longest one is used.
OXM_CLASSES = {
"NXM_OF_": (0, 0x0000, "extension"),
"NXM_NX_": (0, 0x0001, "extension"),
"NXOXM_NSH_": (0x005AD650, 0xFFFF, "extension"),
"OXM_OF_": (0, 0x8000, "standard"),
"OXM_OF_PKT_REG": (0, 0x8001, "standard"),
"ONFOXM_ET_": (0x4F4E4600, 0xFFFF, "standard"),
"ERICOXM_OF_": (0, 0x1000, "extension"),
# This is the experimenter OXM class for Nicira, which is the
# one that OVS would be using instead of NXM_OF_ and NXM_NX_
# if OVS didn't have those grandfathered in. It is currently
# used only to test support for experimenter OXM, since there
# are barely any real uses of experimenter OXM in the wild.
"NXOXM_ET_": (0x00002320, 0xFFFF, "extension"),
}
def oxm_name_to_class(name):
prefix = ""
class_ = None
for p, c in OXM_CLASSES.items():
if name.startswith(p) and len(p) > len(prefix):
prefix = p
class_ = c
return class_
def is_standard_oxm(name):
oxm_vendor, oxm_class, oxm_class_type = oxm_name_to_class(name)
return oxm_class_type == "standard"
def get_line():
global line
global line_number
line = input_file.readline()
line_number += 1
if line == "":
fatal("unexpected end of input")
n_errors = 0
def error(msg):
global n_errors
sys.stderr.write("%s:%d: %s\n" % (file_name, line_number, msg))
n_errors += 1
def fatal(msg):
error(msg)
sys.exit(1)
def parse_oxms(s, prefix, n_bytes):
if s == "none":
return ()
return tuple(parse_oxm(s2.strip(), prefix, n_bytes) for s2 in s.split(","))
match_types = dict()
def parse_oxm(s, prefix, n_bytes):
global match_types
m = re.match(
r"([A-Z0-9_]+)\(([0-9]+)\) since(?: OF(1\.[0-9]+) and)? v([12]\.[0-9]+)$", # noqa: E501
s,
)
if not m:
fatal("%s: syntax error parsing %s" % (s, prefix))
name, oxm_type, of_version, ovs_version = m.groups()
class_ = oxm_name_to_class(name)
if class_ is None:
fatal("unknown OXM class for %s" % name)
oxm_vendor, oxm_class, oxm_class_type = class_
if class_ in match_types:
if oxm_type in match_types[class_]:
fatal(
"duplicate match type for %s (conflicts with %s)"
% (name, match_types[class_][oxm_type])
)
else:
match_types[class_] = dict()
match_types[class_][oxm_type] = name
# Normally the oxm_length is the size of the field, but for experimenter
# OXMs oxm_length also includes the 4-byte experimenter ID.
oxm_length = n_bytes
if oxm_class == 0xFFFF:
oxm_length += 4
header = (oxm_vendor, oxm_class, int(oxm_type), oxm_length)
if of_version:
if oxm_class_type == "extension":
fatal("%s: OXM extension can't have OpenFlow version" % name)
if of_version not in VERSION:
fatal("%s: unknown OpenFlow version %s" % (name, of_version))
of_version_nr = VERSION[of_version]
if of_version_nr < VERSION["1.2"]:
fatal("%s: claimed version %s predates OXM" % (name, of_version))
else:
if oxm_class_type == "standard":
fatal("%s: missing OpenFlow version number" % name)
of_version_nr = 0
return (header, name, of_version_nr, ovs_version)
def parse_field(mff, comment):
f = {"mff": mff}
# First line of comment is the field name.
m = re.match(
r'"([^"]+)"(?:\s+\(aka "([^"]+)"\))?(?:\s+\(.*\))?\.', comment[0]
)
if not m:
fatal("%s lacks field name" % mff)
f["name"], f["extra_name"] = m.groups()
# Find the last blank line the comment. The field definitions
# start after that.
blank = None
for i in range(len(comment)):
if not comment[i]:
blank = i
if not blank:
fatal("%s: missing blank line in comment" % mff)
d = {}
for key in (
"Type",
"Maskable",
"Formatting",
"Prerequisites",
"Access",
"Prefix lookup member",
"OXM",
"NXM",
"OF1.0",
"OF1.1",
):
d[key] = None
for fline in comment[blank + 1 :]:
m = re.match(r"([^:]+):\s+(.*)\.$", fline)
if not m:
fatal(
"%s: syntax error parsing key-value pair as part of %s"
% (fline, mff)
)
key, value = m.groups()
if key not in d:
fatal("%s: unknown key" % key)
elif key == "Code point":
d[key] += [value]
elif d[key] is not None:
fatal("%s: duplicate key" % key)
d[key] = value
for key, value in d.items():
if not value and key not in (
"OF1.0",
"OF1.1",
"Prefix lookup member",
"Notes",
):
fatal("%s: missing %s" % (mff, key))
m = re.match(r"([a-zA-Z0-9]+)(?: \(low ([0-9]+) bits\))?$", d["Type"])
if not m:
fatal("%s: syntax error in type" % mff)
type_ = m.group(1)
if type_ not in TYPES:
fatal("%s: unknown type %s" % (mff, d["Type"]))
f["n_bytes"] = TYPES[type_][0]
if m.group(2):
f["n_bits"] = int(m.group(2))
if f["n_bits"] > f["n_bytes"] * 8:
fatal(
"%s: more bits (%d) than field size (%d)"
% (mff, f["n_bits"], 8 * f["n_bytes"])
)
else:
f["n_bits"] = 8 * f["n_bytes"]
f["variable"] = TYPES[type_][1]
if d["Maskable"] == "no":
f["mask"] = "MFM_NONE"
elif d["Maskable"] == "bitwise":
f["mask"] = "MFM_FULLY"
else:
fatal("%s: unknown maskable %s" % (mff, d["Maskable"]))
fmt = FORMATTING.get(d["Formatting"])
if not fmt:
fatal("%s: unknown format %s" % (mff, d["Formatting"]))
f["formatting"] = d["Formatting"]
if f["n_bytes"] < fmt[1] or f["n_bytes"] > fmt[2]:
fatal(
"%s: %d-byte field can't be formatted as %s"
% (mff, f["n_bytes"], d["Formatting"])
)
f["string"] = fmt[0]
f["prereqs"] = d["Prerequisites"]
if f["prereqs"] not in PREREQS:
fatal("%s: unknown prerequisites %s" % (mff, d["Prerequisites"]))
if d["Access"] == "read-only":
f["writable"] = False
elif d["Access"] == "read/write":
f["writable"] = True
else:
fatal("%s: unknown access %s" % (mff, d["Access"]))
f["OF1.0"] = d["OF1.0"]
if not d["OF1.0"] in (None, "exact match", "CIDR mask"):
fatal("%s: unknown OF1.0 match type %s" % (mff, d["OF1.0"]))
f["OF1.1"] = d["OF1.1"]
if not d["OF1.1"] in (None, "exact match", "bitwise mask"):
fatal("%s: unknown OF1.1 match type %s" % (mff, d["OF1.1"]))
f["OXM"] = parse_oxms(d["OXM"], "OXM", f["n_bytes"]) + parse_oxms(
d["NXM"], "NXM", f["n_bytes"]
)
f["prefix"] = d["Prefix lookup member"]
return f
def extract_ofp_fields(fn):
global file_name
global input_file
global line_number
global line
file_name = fn
input_file = open(file_name)
line_number = 0
fields = []
while True:
get_line()
if re.match("enum.*mf_field_id", line):
break
while True:
get_line()
if (
line.startswith("/*")
or line.startswith(" *")
or line.startswith("#")
or not line
or line.isspace()
):
continue
elif re.match(r"}", line) or re.match(r"\s+MFF_N_IDS", line):
break
# Parse the comment preceding an MFF_ constant into 'comment',
# one line to an array element.
line = line.strip()
if not line.startswith("/*"):
fatal("unexpected syntax between fields")
line = line[1:]
comment = []
end = False
while not end:
line = line.strip()
if line.startswith("*/"):
get_line()
break
if not line.startswith("*"):
fatal("unexpected syntax within field")
line = line[1:]
if line.startswith(" "):
line = line[1:]
if line.startswith(" ") and comment:
continuation = True
line = line.lstrip()
else:
continuation = False
if line.endswith("*/"):
line = line[:-2].rstrip()
end = True
else:
end = False
if continuation:
comment[-1] += " " + line
else:
comment += [line]
get_line()
# Drop blank lines at each end of comment.
while comment and not comment[0]:
comment = comment[1:]
while comment and not comment[-1]:
comment = comment[:-1]
# Parse the MFF_ constant(s).
mffs = []
while True:
m = re.match(r"\s+(MFF_[A-Z0-9_]+),?\s?$", line)
if not m:
break
mffs += [m.group(1)]
get_line()
if not mffs:
fatal("unexpected syntax looking for MFF_ constants")
if len(mffs) > 1 or "<N>" in comment[0]:
for mff in mffs:
# Extract trailing integer.
m = re.match(".*[^0-9]([0-9]+)$", mff)
if not m:
fatal("%s lacks numeric suffix in register group" % mff)
n = m.group(1)
# Search-and-replace <N> within the comment,
# and drop lines that have <x> for x != n.
instance = []
for x in comment:
y = x.replace("<N>", n)
if re.search("<[0-9]+>", y):
if ("<%s>" % n) not in y:
continue
y = re.sub("<[0-9]+>", "", y)
instance += [y.strip()]
fields += [parse_field(mff, instance)]
else:
fields += [parse_field(mffs[0], comment)]
continue
input_file.close()
if n_errors:
sys.exit(1)
return fields