2
0
mirror of https://github.com/openvswitch/ovs synced 2025-09-02 15:25:22 +00:00

classifier: Constify fields.

Some struct cls_match and cls_subtable fields were already documented
of being const.  Make them const and use CONST_CAST where appropriate
to initialize them.

This will help catch future errors modifying those fields after
initialization.

Signed-off-by: Jarno Rajahalme <jrajahalme@nicira.com>
Acked-by: Ben Pfaff <blp@nicira.com>
This commit is contained in:
Jarno Rajahalme
2014-10-27 10:57:28 -07:00
parent 98abae4aa6
commit f80028fe28
2 changed files with 26 additions and 23 deletions

View File

@@ -33,22 +33,22 @@ struct cls_subtable {
/* The fields are only used by writers. */ /* The fields are only used by writers. */
int n_rules OVS_GUARDED; /* Number of rules, including int n_rules OVS_GUARDED; /* Number of rules, including
* duplicates. */ * duplicates. */
int max_priority OVS_GUARDED; /* Max priority of any rule in subtable. */ int max_priority OVS_GUARDED; /* Max priority of any rule in subtable. */
unsigned int max_count OVS_GUARDED; /* Count of max_priority rules. */ unsigned int max_count OVS_GUARDED; /* Count of max_priority rules. */
/* These fields are accessed by readers who care about wildcarding. */ /* These fields are accessed by readers who care about wildcarding. */
tag_type tag; /* Tag generated from mask for partitioning (const). */ const tag_type tag; /* Tag generated from mask for partitioning. */
uint8_t n_indices; /* How many indices to use (const). */ const uint8_t n_indices; /* How many indices to use. */
uint8_t index_ofs[CLS_MAX_INDICES]; /* u32 segment boundaries (const). */ const uint8_t index_ofs[CLS_MAX_INDICES]; /* u32 segment boundaries. */
unsigned int trie_plen[CLS_MAX_TRIES]; /* Trie prefix length in 'mask' unsigned int trie_plen[CLS_MAX_TRIES]; /* Trie prefix length in 'mask'
* (runtime configurable). */ * (runtime configurable). */
int ports_mask_len; /* (const) */ const int ports_mask_len;
struct cmap indices[CLS_MAX_INDICES]; /* Staged lookup indices. */ struct cmap indices[CLS_MAX_INDICES]; /* Staged lookup indices. */
rcu_trie_ptr ports_trie; /* NULL if none. */ rcu_trie_ptr ports_trie; /* NULL if none. */
/* These fields are accessed by all readers. */ /* These fields are accessed by all readers. */
struct cmap rules; /* Contains "struct cls_rule"s. */ struct cmap rules; /* Contains 'cls_match'es. */
struct minimask mask; /* Wildcards for fields (const). */ const struct minimask mask; /* Wildcards for fields. */
/* 'mask' must be the last field. */ /* 'mask' must be the last field. */
}; };
@@ -64,20 +64,20 @@ struct cls_partition {
/* Internal representation of a rule in a "struct cls_subtable". */ /* Internal representation of a rule in a "struct cls_subtable". */
struct cls_match { struct cls_match {
/* Accessed only by writers and iterators. */ /* Accessed by everybody. */
struct rculist list OVS_GUARDED; /* Identical, lower-priority rules. */ struct rculist list OVS_GUARDED; /* Identical, lower-priority rules. */
/* Accessed only by writers. */ /* Accessed only by writers. */
struct cls_partition *partition OVS_GUARDED; struct cls_partition *partition OVS_GUARDED;
/* Accessed by readers interested in wildcarding. */ /* Accessed by readers interested in wildcarding. */
int priority; /* Larger numbers are higher priorities. */ const int priority; /* Larger numbers are higher priorities. */
struct cmap_node index_nodes[CLS_MAX_INDICES]; /* Within subtable's struct cmap_node index_nodes[CLS_MAX_INDICES]; /* Within subtable's
* 'indices'. */ * 'indices'. */
/* Accessed by all readers. */ /* Accessed by all readers. */
struct cmap_node cmap_node; /* Within struct cls_subtable 'rules'. */ struct cmap_node cmap_node; /* Within struct cls_subtable 'rules'. */
struct cls_rule *cls_rule; const struct cls_rule *cls_rule;
struct miniflow flow; /* Matching rule. Mask is in the subtable. */ const struct miniflow flow; /* Matching rule. Mask is in the subtable. */
/* 'flow' must be the last field. */ /* 'flow' must be the last field. */
}; };

View File

@@ -44,9 +44,10 @@ cls_match_alloc(struct cls_rule *rule)
= xmalloc(sizeof *cls_match - sizeof cls_match->flow.inline_values = xmalloc(sizeof *cls_match - sizeof cls_match->flow.inline_values
+ MINIFLOW_VALUES_SIZE(count)); + MINIFLOW_VALUES_SIZE(count));
cls_match->cls_rule = rule; *CONST_CAST(const struct cls_rule **, &cls_match->cls_rule) = rule;
miniflow_clone_inline(&cls_match->flow, &rule->match.flow, count); *CONST_CAST(int *, &cls_match->priority) = rule->priority;
cls_match->priority = rule->priority; miniflow_clone_inline(CONST_CAST(struct miniflow *, &cls_match->flow),
&rule->match.flow, count);
rule->cls_match = cls_match; rule->cls_match = cls_match;
return cls_match; return cls_match;
@@ -1027,7 +1028,8 @@ insert_subtable(struct classifier *cls, const struct minimask *mask)
subtable = xzalloc(sizeof *subtable - sizeof mask->masks.inline_values subtable = xzalloc(sizeof *subtable - sizeof mask->masks.inline_values
+ MINIFLOW_VALUES_SIZE(count)); + MINIFLOW_VALUES_SIZE(count));
cmap_init(&subtable->rules); cmap_init(&subtable->rules);
miniflow_clone_inline(&subtable->mask.masks, &mask->masks, count); miniflow_clone_inline(CONST_CAST(struct miniflow *, &subtable->mask.masks),
&mask->masks, count);
/* Init indices for segmented lookup, if any. */ /* Init indices for segmented lookup, if any. */
flow_wildcards_init_catchall(&new); flow_wildcards_init_catchall(&new);
@@ -1039,7 +1041,8 @@ insert_subtable(struct classifier *cls, const struct minimask *mask)
/* Add an index if it adds mask bits. */ /* Add an index if it adds mask bits. */
if (!flow_wildcards_equal(&new, &old)) { if (!flow_wildcards_equal(&new, &old)) {
cmap_init(&subtable->indices[index]); cmap_init(&subtable->indices[index]);
subtable->index_ofs[index] = cls->flow_segments[i]; *CONST_CAST(uint8_t *, &subtable->index_ofs[index])
= cls->flow_segments[i];
index++; index++;
old = new; old = new;
} }
@@ -1051,15 +1054,16 @@ insert_subtable(struct classifier *cls, const struct minimask *mask)
flow_wildcards_fold_minimask_range(&new, mask, prev, FLOW_U32S); flow_wildcards_fold_minimask_range(&new, mask, prev, FLOW_U32S);
if (flow_wildcards_equal(&new, &old)) { if (flow_wildcards_equal(&new, &old)) {
--index; --index;
subtable->index_ofs[index] = 0; *CONST_CAST(uint8_t *, &subtable->index_ofs[index]) = 0;
cmap_destroy(&subtable->indices[index]); cmap_destroy(&subtable->indices[index]);
} }
} }
subtable->n_indices = index; *CONST_CAST(uint8_t *, &subtable->n_indices) = index;
subtable->tag = (minimask_get_metadata_mask(mask) == OVS_BE64_MAX *CONST_CAST(tag_type *, &subtable->tag) =
? tag_create_deterministic(hash) (minimask_get_metadata_mask(mask) == OVS_BE64_MAX
: TAG_ALL); ? tag_create_deterministic(hash)
: TAG_ALL);
for (i = 0; i < cls->n_tries; i++) { for (i = 0; i < cls->n_tries; i++) {
subtable->trie_plen[i] = minimask_get_prefix_len(mask, subtable->trie_plen[i] = minimask_get_prefix_len(mask,
@@ -1068,7 +1072,7 @@ insert_subtable(struct classifier *cls, const struct minimask *mask)
/* Ports trie. */ /* Ports trie. */
ovsrcu_set_hidden(&subtable->ports_trie, NULL); ovsrcu_set_hidden(&subtable->ports_trie, NULL);
subtable->ports_mask_len *CONST_CAST(int *, &subtable->ports_mask_len)
= 32 - ctz32(ntohl(MINIFLOW_GET_BE32(&mask->masks, tp_src))); = 32 - ctz32(ntohl(MINIFLOW_GET_BE32(&mask->masks, tp_src)));
cmap_insert(&cls->subtables_map, &subtable->cmap_node, hash); cmap_insert(&cls->subtables_map, &subtable->cmap_node, hash);
@@ -1090,7 +1094,6 @@ destroy_subtable(struct classifier *cls, struct cls_subtable *subtable)
} }
cmap_remove(&cls->subtables_map, &subtable->cmap_node, cmap_remove(&cls->subtables_map, &subtable->cmap_node,
minimask_hash(&subtable->mask, 0)); minimask_hash(&subtable->mask, 0));
minimask_destroy(&subtable->mask);
cmap_destroy(&subtable->rules); cmap_destroy(&subtable->rules);
ovsrcu_postpone(free, subtable); ovsrcu_postpone(free, subtable);
} }