2009-07-08 13:19:16 -07:00
|
|
|
|
/*
|
2013-08-27 12:25:48 -07:00
|
|
|
|
* Copyright (c) 2009, 2010, 2011, 2012, 2013 Nicira, Inc.
|
2009-07-08 13:19:16 -07:00
|
|
|
|
*
|
2009-06-15 15:11:30 -07:00
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
|
* You may obtain a copy of the License at:
|
2009-07-08 13:19:16 -07:00
|
|
|
|
*
|
2009-06-15 15:11:30 -07:00
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
*
|
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
|
* limitations under the License.
|
2009-07-08 13:19:16 -07:00
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
#ifndef CLASSIFIER_H
|
|
|
|
|
#define CLASSIFIER_H 1
|
|
|
|
|
|
|
|
|
|
/* Flow classifier.
|
|
|
|
|
*
|
2013-09-25 15:07:21 -07:00
|
|
|
|
*
|
|
|
|
|
* What?
|
|
|
|
|
* =====
|
|
|
|
|
*
|
|
|
|
|
* A flow classifier holds any number of "rules", each of which specifies
|
2013-12-11 11:07:01 -08:00
|
|
|
|
* values to match for some fields or subfields and a priority. Each OpenFlow
|
|
|
|
|
* table is implemented as a flow classifier.
|
2013-09-25 15:07:21 -07:00
|
|
|
|
*
|
2013-12-11 11:07:01 -08:00
|
|
|
|
* The classifier has two primary design goals. The first is obvious: given a
|
|
|
|
|
* set of packet headers, as quickly as possible find the highest-priority rule
|
|
|
|
|
* that matches those headers. The following section describes the second
|
|
|
|
|
* goal.
|
2013-09-25 15:07:21 -07:00
|
|
|
|
*
|
|
|
|
|
*
|
2013-12-11 11:07:01 -08:00
|
|
|
|
* "Un-wildcarding"
|
|
|
|
|
* ================
|
|
|
|
|
*
|
|
|
|
|
* A primary goal of the flow classifier is to produce, as a side effect of a
|
|
|
|
|
* packet lookup, a wildcard mask that indicates which bits of the packet
|
|
|
|
|
* headers were essential to the classification result. Ideally, a 1-bit in
|
|
|
|
|
* any position of this mask means that, if the corresponding bit in the packet
|
|
|
|
|
* header were flipped, then the classification result might change. A 0-bit
|
|
|
|
|
* means that changing the packet header bit would have no effect. Thus, the
|
|
|
|
|
* wildcarded bits are the ones that played no role in the classification
|
|
|
|
|
* decision.
|
|
|
|
|
*
|
|
|
|
|
* Such a wildcard mask is useful with datapaths that support installing flows
|
|
|
|
|
* that wildcard fields or subfields. If an OpenFlow lookup for a TCP flow
|
|
|
|
|
* does not actually look at the TCP source or destination ports, for example,
|
|
|
|
|
* then the switch may install into the datapath a flow that wildcards the port
|
|
|
|
|
* numbers, which in turn allows the datapath to handle packets that arrive for
|
|
|
|
|
* other TCP source or destination ports without additional help from
|
|
|
|
|
* ovs-vswitchd. This is useful for the Open vSwitch software and,
|
|
|
|
|
* potentially, for ASIC-based switches as well.
|
|
|
|
|
*
|
|
|
|
|
* Some properties of the wildcard mask:
|
|
|
|
|
*
|
|
|
|
|
* - "False 1-bits" are acceptable, that is, setting a bit in the wildcard
|
|
|
|
|
* mask to 1 will never cause a packet to be forwarded the wrong way.
|
|
|
|
|
* As a corollary, a wildcard mask composed of all 1-bits will always
|
|
|
|
|
* yield correct (but often needlessly inefficient) behavior.
|
|
|
|
|
*
|
|
|
|
|
* - "False 0-bits" can cause problems, so they must be avoided. In the
|
|
|
|
|
* extreme case, a mask of all 0-bits is only correct if the classifier
|
|
|
|
|
* contains only a single flow that matches all packets.
|
|
|
|
|
*
|
|
|
|
|
* - 0-bits are desirable because they allow the datapath to act more
|
|
|
|
|
* autonomously, relying less on ovs-vswitchd to process flow setups,
|
|
|
|
|
* thereby improving performance.
|
|
|
|
|
*
|
|
|
|
|
* - We don't know a good way to generate wildcard masks with the maximum
|
|
|
|
|
* (correct) number of 0-bits. We use various approximations, described
|
|
|
|
|
* in later sections.
|
|
|
|
|
*
|
|
|
|
|
* - Wildcard masks for lookups in a given classifier yield a
|
|
|
|
|
* non-overlapping set of rules. More specifically:
|
|
|
|
|
*
|
|
|
|
|
* Consider an classifier C1 filled with an arbitrary collection of rules
|
|
|
|
|
* and an empty classifier C2. Now take a set of packet headers H and
|
|
|
|
|
* look it up in C1, yielding a highest-priority matching rule R1 and
|
|
|
|
|
* wildcard mask M. Form a new classifier rule R2 out of packet headers
|
|
|
|
|
* H and mask M, and add R2 to C2 with a fixed priority. If one were to
|
|
|
|
|
* do this for every possible set of packet headers H, then this
|
|
|
|
|
* process would not attempt to add any overlapping rules to C2, that is,
|
|
|
|
|
* any packet lookup using the rules generated by this process matches at
|
|
|
|
|
* most one rule in C2.
|
|
|
|
|
*
|
|
|
|
|
* During the lookup process, the classifier starts out with a wildcard mask
|
|
|
|
|
* that is all 0-bits, that is, fully wildcarded. As lookup proceeds, each
|
|
|
|
|
* step tends to add constraints to the wildcard mask, that is, change
|
|
|
|
|
* wildcarded 0-bits into exact-match 1-bits. We call this "un-wildcarding".
|
|
|
|
|
* A lookup step that examines a particular field must un-wildcard that field.
|
|
|
|
|
* In general, un-wildcarding is necessary for correctness but undesirable for
|
|
|
|
|
* performance.
|
|
|
|
|
*
|
|
|
|
|
*
|
|
|
|
|
* Basic Classifier Design
|
|
|
|
|
* =======================
|
2013-09-25 15:07:21 -07:00
|
|
|
|
*
|
|
|
|
|
* Suppose that all the rules in a classifier had the same form. For example,
|
|
|
|
|
* suppose that they all matched on the source and destination Ethernet address
|
|
|
|
|
* and wildcarded all the other fields. Then the obvious way to implement a
|
|
|
|
|
* classifier would be a hash table on the source and destination Ethernet
|
|
|
|
|
* addresses. If new classification rules came along with a different form,
|
|
|
|
|
* you could add a second hash table that hashed on the fields matched in those
|
|
|
|
|
* rules. With two hash tables, you look up a given flow in each hash table.
|
|
|
|
|
* If there are no matches, the classifier didn't contain a match; if you find
|
|
|
|
|
* a match in one of them, that's the result; if you find a match in both of
|
|
|
|
|
* them, then the result is the rule with the higher priority.
|
|
|
|
|
*
|
|
|
|
|
* This is how the classifier works. In a "struct classifier", each form of
|
|
|
|
|
* "struct cls_rule" present (based on its ->match.mask) goes into a separate
|
2013-10-29 16:39:52 -07:00
|
|
|
|
* "struct cls_subtable". A lookup does a hash lookup in every "struct
|
|
|
|
|
* cls_subtable" in the classifier and tracks the highest-priority match that
|
|
|
|
|
* it finds. The subtables are kept in a descending priority order according
|
|
|
|
|
* to the highest priority rule in each subtable, which allows lookup to skip
|
2013-12-11 11:07:01 -08:00
|
|
|
|
* over subtables that can't possibly have a higher-priority match than already
|
|
|
|
|
* found. Eliminating lookups through priority ordering aids both classifier
|
|
|
|
|
* primary design goals: skipping lookups saves time and avoids un-wildcarding
|
|
|
|
|
* fields that those lookups would have examined.
|
2013-09-25 15:07:21 -07:00
|
|
|
|
*
|
|
|
|
|
* One detail: a classifier can contain multiple rules that are identical other
|
|
|
|
|
* than their priority. When this happens, only the highest priority rule out
|
|
|
|
|
* of a group of otherwise identical rules is stored directly in the "struct
|
2013-10-29 16:39:52 -07:00
|
|
|
|
* cls_subtable", with the other almost-identical rules chained off a linked
|
|
|
|
|
* list inside that highest-priority rule.
|
2013-09-25 15:07:21 -07:00
|
|
|
|
*
|
|
|
|
|
*
|
2013-12-11 11:07:01 -08:00
|
|
|
|
* Staged Lookup (Wildcard Optimization)
|
|
|
|
|
* =====================================
|
2013-11-19 17:31:29 -08:00
|
|
|
|
*
|
|
|
|
|
* Subtable lookup is performed in ranges defined for struct flow, starting
|
|
|
|
|
* from metadata (registers, in_port, etc.), then L2 header, L3, and finally
|
|
|
|
|
* L4 ports. Whenever it is found that there are no matches in the current
|
2013-12-11 11:07:01 -08:00
|
|
|
|
* subtable, the rest of the subtable can be skipped.
|
|
|
|
|
*
|
|
|
|
|
* Staged lookup does not reduce lookup time, and it may increase it, because
|
|
|
|
|
* it changes a single hash table lookup into multiple hash table lookups.
|
|
|
|
|
* It reduces un-wildcarding significantly in important use cases.
|
2013-11-19 17:31:29 -08:00
|
|
|
|
*
|
|
|
|
|
*
|
2013-12-11 11:07:01 -08:00
|
|
|
|
* Prefix Tracking (Wildcard Optimization)
|
|
|
|
|
* =======================================
|
|
|
|
|
*
|
|
|
|
|
* Classifier uses prefix trees ("tries") for tracking the used
|
|
|
|
|
* address space, enabling skipping classifier tables containing
|
|
|
|
|
* longer masks than necessary for the given address. This reduces
|
|
|
|
|
* un-wildcarding for datapath flows in parts of the address space
|
|
|
|
|
* without host routes, but consulting extra data structures (the
|
|
|
|
|
* tries) may slightly increase lookup time.
|
|
|
|
|
*
|
|
|
|
|
* Trie lookup is interwoven with staged lookup, so that a trie is
|
|
|
|
|
* searched only when the configured trie field becomes relevant for
|
|
|
|
|
* the lookup. The trie lookup results are retained so that each trie
|
|
|
|
|
* is checked at most once for each classifier lookup.
|
|
|
|
|
*
|
|
|
|
|
* This implementation tracks the number of rules at each address
|
|
|
|
|
* prefix for the whole classifier. More aggressive table skipping
|
|
|
|
|
* would be possible by maintaining lists of tables that have prefixes
|
|
|
|
|
* at the lengths encountered on tree traversal, or by maintaining
|
|
|
|
|
* separate tries for subsets of rules separated by metadata fields.
|
|
|
|
|
*
|
|
|
|
|
* Prefix tracking is configured via OVSDB "Flow_Table" table,
|
|
|
|
|
* "fieldspec" column. "fieldspec" is a string map where a "prefix"
|
|
|
|
|
* key tells which fields should be used for prefix tracking. The
|
|
|
|
|
* value of the "prefix" key is a comma separated list of field names.
|
|
|
|
|
*
|
|
|
|
|
* There is a maximum number of fields that can be enabled for any one
|
|
|
|
|
* flow table. Currently this limit is 3.
|
|
|
|
|
*
|
|
|
|
|
*
|
|
|
|
|
* Partitioning (Lookup Time and Wildcard Optimization)
|
|
|
|
|
* ====================================================
|
2013-09-25 15:07:21 -07:00
|
|
|
|
*
|
|
|
|
|
* Suppose that a given classifier is being used to handle multiple stages in a
|
|
|
|
|
* pipeline using "resubmit", with metadata (that is, the OpenFlow 1.1+ field
|
|
|
|
|
* named "metadata") distinguishing between the different stages. For example,
|
|
|
|
|
* metadata value 1 might identify ingress rules, metadata value 2 might
|
|
|
|
|
* identify ACLs, and metadata value 3 might identify egress rules. Such a
|
|
|
|
|
* classifier is essentially partitioned into multiple sub-classifiers on the
|
|
|
|
|
* basis of the metadata value.
|
|
|
|
|
*
|
|
|
|
|
* The classifier has a special optimization to speed up matching in this
|
|
|
|
|
* scenario:
|
|
|
|
|
*
|
2013-10-29 16:39:52 -07:00
|
|
|
|
* - Each cls_subtable that matches on metadata gets a tag derived from the
|
|
|
|
|
* subtable's mask, so that it is likely that each subtable has a unique
|
|
|
|
|
* tag. (Duplicate tags have a performance cost but do not affect
|
2013-09-25 15:07:21 -07:00
|
|
|
|
* correctness.)
|
|
|
|
|
*
|
|
|
|
|
* - For each metadata value matched by any cls_rule, the classifier
|
|
|
|
|
* constructs a "struct cls_partition" indexed by the metadata value.
|
|
|
|
|
* The cls_partition has a 'tags' member whose value is the bitwise-OR of
|
2013-10-29 16:39:52 -07:00
|
|
|
|
* the tags of each cls_subtable that contains any rule that matches on
|
|
|
|
|
* the cls_partition's metadata value. In other words, struct
|
|
|
|
|
* cls_partition associates metadata values with subtables that need to
|
|
|
|
|
* be checked with flows with that specific metadata value.
|
2013-09-25 15:07:21 -07:00
|
|
|
|
*
|
|
|
|
|
* Thus, a flow lookup can start by looking up the partition associated with
|
2013-10-29 16:39:52 -07:00
|
|
|
|
* the flow's metadata, and then skip over any cls_subtable whose 'tag' does
|
|
|
|
|
* not intersect the partition's 'tags'. (The flow must also be looked up in
|
|
|
|
|
* any cls_subtable that doesn't match on metadata. We handle that by giving
|
|
|
|
|
* any such cls_subtable TAG_ALL as its 'tags' so that it matches any tag.)
|
2013-09-25 15:07:21 -07:00
|
|
|
|
*
|
2013-12-11 11:07:01 -08:00
|
|
|
|
* Partitioning saves lookup time by reducing the number of subtable lookups.
|
|
|
|
|
* Each eliminated subtable lookup also reduces the amount of un-wildcarding.
|
|
|
|
|
*
|
2013-07-11 14:19:11 -07:00
|
|
|
|
*
|
|
|
|
|
* Thread-safety
|
|
|
|
|
* =============
|
|
|
|
|
*
|
2013-09-25 15:38:57 -07:00
|
|
|
|
* The classifier may safely be accessed by many reader threads concurrently or
|
|
|
|
|
* by a single writer. */
|
2009-07-08 13:19:16 -07:00
|
|
|
|
|
2014-07-11 02:29:07 -07:00
|
|
|
|
#include "cmap.h"
|
2014-01-13 11:21:12 -08:00
|
|
|
|
#include "fat-rwlock.h"
|
2012-08-07 15:28:18 -07:00
|
|
|
|
#include "match.h"
|
2013-12-11 11:07:01 -08:00
|
|
|
|
#include "meta-flow.h"
|
2009-07-08 13:19:16 -07:00
|
|
|
|
|
2011-08-04 16:18:59 -07:00
|
|
|
|
#ifdef __cplusplus
|
|
|
|
|
extern "C" {
|
|
|
|
|
#endif
|
|
|
|
|
|
2013-09-12 20:45:15 -07:00
|
|
|
|
/* Needed only for the lock annotation in struct classifier. */
|
|
|
|
|
extern struct ovs_mutex ofproto_mutex;
|
2013-12-11 11:07:01 -08:00
|
|
|
|
|
2014-04-29 15:50:38 -07:00
|
|
|
|
/* Classifier internal data structures. */
|
|
|
|
|
struct cls_classifier;
|
|
|
|
|
struct cls_subtable;
|
|
|
|
|
struct cls_partition;
|
2014-04-29 15:50:38 -07:00
|
|
|
|
struct cls_match;
|
|
|
|
|
|
|
|
|
|
enum {
|
|
|
|
|
CLS_MAX_TRIES = 3 /* Maximum number of prefix trees per classifier. */
|
|
|
|
|
};
|
2013-11-19 17:31:29 -08:00
|
|
|
|
|
2009-07-08 13:19:16 -07:00
|
|
|
|
/* A flow classifier. */
|
|
|
|
|
struct classifier {
|
2014-01-13 11:21:12 -08:00
|
|
|
|
struct fat_rwlock rwlock OVS_ACQ_AFTER(ofproto_mutex);
|
2014-04-29 15:50:38 -07:00
|
|
|
|
struct cls_classifier *cls;
|
2009-07-08 13:19:16 -07:00
|
|
|
|
};
|
|
|
|
|
|
2014-04-29 15:50:38 -07:00
|
|
|
|
/* A rule to be inserted to the classifier. */
|
2009-07-08 13:19:16 -07:00
|
|
|
|
struct cls_rule {
|
2014-04-29 15:50:38 -07:00
|
|
|
|
struct minimatch match; /* Matching rule. */
|
|
|
|
|
unsigned int priority; /* Larger numbers are higher priorities. */
|
|
|
|
|
struct cls_match *cls_match; /* NULL if rule is not in a classifier. */
|
2013-09-25 15:07:21 -07:00
|
|
|
|
};
|
|
|
|
|
|
2012-09-04 12:43:53 -07:00
|
|
|
|
void cls_rule_init(struct cls_rule *, const struct match *,
|
|
|
|
|
unsigned int priority);
|
|
|
|
|
void cls_rule_init_from_minimatch(struct cls_rule *, const struct minimatch *,
|
|
|
|
|
unsigned int priority);
|
2012-08-20 11:29:43 -07:00
|
|
|
|
void cls_rule_clone(struct cls_rule *, const struct cls_rule *);
|
2013-08-27 12:25:48 -07:00
|
|
|
|
void cls_rule_move(struct cls_rule *dst, struct cls_rule *src);
|
2012-08-20 11:29:43 -07:00
|
|
|
|
void cls_rule_destroy(struct cls_rule *);
|
2010-10-26 16:41:44 -07:00
|
|
|
|
|
2010-11-08 16:35:34 -08:00
|
|
|
|
bool cls_rule_equal(const struct cls_rule *, const struct cls_rule *);
|
2011-05-26 16:24:38 -07:00
|
|
|
|
uint32_t cls_rule_hash(const struct cls_rule *, uint32_t basis);
|
2010-11-08 16:35:34 -08:00
|
|
|
|
|
2010-11-23 12:31:50 -08:00
|
|
|
|
void cls_rule_format(const struct cls_rule *, struct ds *);
|
2012-08-07 15:28:18 -07:00
|
|
|
|
|
2012-07-20 14:46:15 -07:00
|
|
|
|
bool cls_rule_is_catchall(const struct cls_rule *);
|
|
|
|
|
|
2012-08-07 15:28:18 -07:00
|
|
|
|
bool cls_rule_is_loose_match(const struct cls_rule *rule,
|
2012-09-04 12:43:53 -07:00
|
|
|
|
const struct minimatch *criteria);
|
2009-07-08 13:19:16 -07:00
|
|
|
|
|
2013-11-19 17:31:29 -08:00
|
|
|
|
void classifier_init(struct classifier *cls, const uint8_t *flow_segments);
|
2009-07-08 13:19:16 -07:00
|
|
|
|
void classifier_destroy(struct classifier *);
|
2013-12-11 11:07:01 -08:00
|
|
|
|
void classifier_set_prefix_fields(struct classifier *cls,
|
|
|
|
|
const enum mf_field_id *trie_fields,
|
|
|
|
|
unsigned int n_trie_fields)
|
|
|
|
|
OVS_REQ_WRLOCK(cls->rwlock);
|
|
|
|
|
|
2014-07-11 02:29:07 -07:00
|
|
|
|
bool classifier_is_empty(const struct classifier *cls);
|
2013-07-11 14:19:11 -07:00
|
|
|
|
int classifier_count(const struct classifier *cls)
|
|
|
|
|
OVS_REQ_RDLOCK(cls->rwlock);
|
|
|
|
|
void classifier_insert(struct classifier *cls, struct cls_rule *)
|
|
|
|
|
OVS_REQ_WRLOCK(cls->rwlock);
|
|
|
|
|
struct cls_rule *classifier_replace(struct classifier *cls, struct cls_rule *)
|
|
|
|
|
OVS_REQ_WRLOCK(cls->rwlock);
|
|
|
|
|
void classifier_remove(struct classifier *cls, struct cls_rule *)
|
|
|
|
|
OVS_REQ_WRLOCK(cls->rwlock);
|
|
|
|
|
struct cls_rule *classifier_lookup(const struct classifier *cls,
|
2013-05-09 19:15:54 -07:00
|
|
|
|
const struct flow *,
|
2013-07-11 14:19:11 -07:00
|
|
|
|
struct flow_wildcards *)
|
|
|
|
|
OVS_REQ_RDLOCK(cls->rwlock);
|
2014-06-23 18:40:47 -07:00
|
|
|
|
void classifier_lookup_miniflow_batch(const struct classifier *cls,
|
|
|
|
|
const struct miniflow **flows,
|
|
|
|
|
struct cls_rule **rules, size_t len)
|
2014-04-18 08:26:56 -07:00
|
|
|
|
OVS_REQ_RDLOCK(cls->rwlock);
|
2013-07-11 14:19:11 -07:00
|
|
|
|
bool classifier_rule_overlaps(const struct classifier *cls,
|
|
|
|
|
const struct cls_rule *)
|
|
|
|
|
OVS_REQ_RDLOCK(cls->rwlock);
|
2009-07-08 13:19:16 -07:00
|
|
|
|
|
2013-07-11 14:19:11 -07:00
|
|
|
|
struct cls_rule *classifier_find_rule_exactly(const struct classifier *cls,
|
|
|
|
|
const struct cls_rule *)
|
|
|
|
|
OVS_REQ_RDLOCK(cls->rwlock);
|
|
|
|
|
struct cls_rule *classifier_find_match_exactly(const struct classifier *cls,
|
2012-08-07 15:28:18 -07:00
|
|
|
|
const struct match *,
|
2013-07-11 14:19:11 -07:00
|
|
|
|
unsigned int priority)
|
|
|
|
|
OVS_REQ_RDLOCK(cls->rwlock);
|
2010-10-28 16:18:20 -07:00
|
|
|
|
|
|
|
|
|
/* Iteration. */
|
|
|
|
|
|
|
|
|
|
struct cls_cursor {
|
2014-07-11 02:29:07 -07:00
|
|
|
|
const struct classifier *cls;
|
2013-10-29 16:39:52 -07:00
|
|
|
|
const struct cls_subtable *subtable;
|
2010-10-28 16:18:20 -07:00
|
|
|
|
const struct cls_rule *target;
|
2014-07-11 02:29:07 -07:00
|
|
|
|
struct cmap_cursor subtables;
|
|
|
|
|
struct cmap_cursor rules;
|
2014-07-11 02:29:07 -07:00
|
|
|
|
bool safe;
|
2010-10-28 16:18:20 -07:00
|
|
|
|
};
|
|
|
|
|
|
2014-07-11 02:29:07 -07:00
|
|
|
|
/* Iteration requires mutual exclusion of the writers. We do this by taking
|
|
|
|
|
* the classifier read lock for the duration of the iteration, except for the
|
|
|
|
|
* 'SAFE' variant, where we release the lock for the body of the loop. */
|
|
|
|
|
struct cls_cursor cls_cursor_init(const struct classifier *cls,
|
|
|
|
|
const struct cls_rule *target,
|
|
|
|
|
void **pnode, const void *offset, bool safe);
|
|
|
|
|
|
|
|
|
|
struct cls_rule *cls_cursor_next(struct cls_cursor *cursor,
|
|
|
|
|
const struct cls_rule *);
|
|
|
|
|
|
|
|
|
|
#define CLS_CURSOR_START(RULE, MEMBER, CLS, TARGET) \
|
|
|
|
|
cls_cursor_init(CLS, (TARGET), (void **)&(RULE), \
|
|
|
|
|
OBJECT_CONTAINING(NULL, RULE, MEMBER), false)
|
2010-10-28 16:18:20 -07:00
|
|
|
|
|
2014-07-11 02:29:07 -07:00
|
|
|
|
#define CLS_CURSOR_START_SAFE(RULE, MEMBER, CLS, TARGET) \
|
|
|
|
|
cls_cursor_init(CLS, (TARGET), (void **)&(RULE), \
|
|
|
|
|
OBJECT_CONTAINING(NULL, RULE, MEMBER), true)
|
|
|
|
|
|
|
|
|
|
#define CLS_FOR_EACH(RULE, MEMBER, CLS) \
|
|
|
|
|
for (struct cls_cursor cursor__ = CLS_CURSOR_START(RULE, MEMBER, CLS, \
|
|
|
|
|
NULL); \
|
2013-07-22 09:19:56 -07:00
|
|
|
|
RULE != OBJECT_CONTAINING(NULL, RULE, MEMBER); \
|
2014-07-11 02:29:07 -07:00
|
|
|
|
ASSIGN_CONTAINER(RULE, cls_cursor_next(&cursor__, &(RULE)->MEMBER), \
|
2010-11-17 14:25:33 -08:00
|
|
|
|
MEMBER))
|
2010-10-28 16:18:20 -07:00
|
|
|
|
|
2014-07-11 02:29:07 -07:00
|
|
|
|
#define CLS_FOR_EACH_TARGET(RULE, MEMBER, CLS, TARGET) \
|
|
|
|
|
for (struct cls_cursor cursor__ = CLS_CURSOR_START(RULE, MEMBER, CLS, \
|
|
|
|
|
TARGET); \
|
|
|
|
|
RULE != OBJECT_CONTAINING(NULL, RULE, MEMBER); \
|
|
|
|
|
ASSIGN_CONTAINER(RULE, cls_cursor_next(&cursor__, &(RULE)->MEMBER), \
|
|
|
|
|
MEMBER))
|
|
|
|
|
|
|
|
|
|
/* This form allows classifier_remove() to be called within the loop. */
|
|
|
|
|
#define CLS_FOR_EACH_SAFE(RULE, NEXT, MEMBER, CLS) \
|
|
|
|
|
for (struct cls_cursor cursor__ = CLS_CURSOR_START_SAFE(RULE, MEMBER, \
|
|
|
|
|
CLS, NULL); \
|
2013-07-22 09:19:56 -07:00
|
|
|
|
(RULE != OBJECT_CONTAINING(NULL, RULE, MEMBER) \
|
2014-07-11 02:29:07 -07:00
|
|
|
|
? ASSIGN_CONTAINER(NEXT, cls_cursor_next(&cursor__, \
|
|
|
|
|
&(RULE)->MEMBER), \
|
|
|
|
|
MEMBER), true \
|
|
|
|
|
: false); \
|
2010-10-28 16:18:20 -07:00
|
|
|
|
(RULE) = (NEXT))
|
2009-07-08 13:19:16 -07:00
|
|
|
|
|
2014-07-11 02:29:07 -07:00
|
|
|
|
/* This form allows classifier_remove() to be called within the loop. */
|
|
|
|
|
#define CLS_FOR_EACH_TARGET_SAFE(RULE, NEXT, MEMBER, CLS, TARGET) \
|
|
|
|
|
for (struct cls_cursor cursor__ = CLS_CURSOR_START_SAFE(RULE, MEMBER, \
|
|
|
|
|
CLS, TARGET); \
|
|
|
|
|
(RULE != OBJECT_CONTAINING(NULL, RULE, MEMBER) \
|
|
|
|
|
? ASSIGN_CONTAINER(NEXT, cls_cursor_next(&cursor__, \
|
|
|
|
|
&(RULE)->MEMBER), \
|
|
|
|
|
MEMBER), true \
|
|
|
|
|
: false); \
|
|
|
|
|
(RULE) = (NEXT))
|
|
|
|
|
|
|
|
|
|
|
2011-08-04 16:18:59 -07:00
|
|
|
|
#ifdef __cplusplus
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
|
2009-07-08 13:19:16 -07:00
|
|
|
|
#endif /* classifier.h */
|