2
0
mirror of https://github.com/openvswitch/ovs synced 2025-08-31 22:35:15 +00:00

cmap, classifier: Avoid unsafe aliasing in iterators.

CMAP_FOR_EACH and CLS_FOR_EACH and their variants tried to use void ** as
a "pointer to any kind of pointer".  That is a violation of the aliasing
rules in ISO C which technically yields undefined behavior.  With GCC 4.1,
it causes both warnings and actual misbehavior.  One option would to add
-fno-strict-aliasing to the compiler flags, but that would only help with
GCC; who knows whether this can be worked around with other compilers.

Instead, this commit rewrites the iterators to avoid disallowed pointer
aliasing.

VMware-BZ: #1287651
Signed-off-by: Ben Pfaff <blp@nicira.com>
Acked-by: Jarno Rajahalme <jrajahalme@nicira.com>
This commit is contained in:
Ben Pfaff
2014-07-21 21:00:04 -07:00
parent 480cda3a4a
commit 78c8df129c
9 changed files with 112 additions and 158 deletions

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2009, 2010, 2011, 2012, 2013 Nicira, Inc.
* Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014 Nicira, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -476,8 +476,8 @@ classifier_destroy(struct classifier *cls)
OVS_EXCLUDED(cls->mutex)
{
if (cls) {
struct cls_partition *partition, *next_partition;
struct cls_subtable *subtable, *next_subtable;
struct cls_partition *partition;
struct cls_subtable *subtable;
int i;
ovs_mutex_lock(&cls->mutex);
@@ -485,14 +485,12 @@ classifier_destroy(struct classifier *cls)
trie_destroy(&cls->tries[i].root);
}
CMAP_FOR_EACH_SAFE (subtable, next_subtable, cmap_node,
&cls->subtables_map) {
CMAP_FOR_EACH_SAFE (subtable, cmap_node, &cls->subtables_map) {
destroy_subtable(cls, subtable);
}
cmap_destroy(&cls->subtables_map);
CMAP_FOR_EACH_SAFE (partition, next_partition, cmap_node,
&cls->partitions) {
CMAP_FOR_EACH_SAFE (partition, cmap_node, &cls->partitions) {
ovsrcu_postpone(free, partition);
}
cmap_destroy(&cls->partitions);
@@ -1219,18 +1217,18 @@ search_subtable(const struct cls_subtable *subtable,
* such that cls_rule_is_loose_match(rule, target) returns true.
*
* Ignores target->priority. */
struct cls_cursor cls_cursor_init(const struct classifier *cls,
struct cls_cursor cls_cursor_start(const struct classifier *cls,
const struct cls_rule *target,
void **pnode, const void *offset, bool safe)
bool safe)
OVS_NO_THREAD_SAFETY_ANALYSIS
{
struct cls_cursor cursor;
struct cls_subtable *subtable;
struct cls_rule *cls_rule = NULL;
cursor.safe = safe;
cursor.cls = cls;
cursor.target = target && !cls_rule_is_catchall(target) ? target : NULL;
cursor.rule = NULL;
/* Find first rule. */
ovs_mutex_lock(&cursor.cls->mutex);
@@ -1240,14 +1238,13 @@ struct cls_cursor cls_cursor_init(const struct classifier *cls,
if (rule) {
cursor.subtable = subtable;
cls_rule = rule->cls_rule;
cursor.rule = rule->cls_rule;
break;
}
}
*pnode = (char *)cls_rule + (ptrdiff_t)offset;
/* Leave locked if requested and have a rule. */
if (safe || !cls_rule) {
if (safe || !cursor.rule) {
ovs_mutex_unlock(&cursor.cls->mutex);
}
return cursor;
@@ -1258,18 +1255,19 @@ cls_cursor_next_unlock(struct cls_cursor *cursor, struct cls_rule *rule)
OVS_NO_THREAD_SAFETY_ANALYSIS
{
/* Release the mutex if no rule, or 'safe' mode. */
cursor->rule = rule;
if (!rule || cursor->safe) {
ovs_mutex_unlock(&cursor->cls->mutex);
}
}
/* Returns the next matching cls_rule in 'cursor''s iteration, or a null
* pointer if there are no more matches. */
struct cls_rule *
cls_cursor_next(struct cls_cursor *cursor, const struct cls_rule *rule_)
/* Sets 'cursor->rule' to the next matching cls_rule in 'cursor''s iteration,
* or to null if all matching rules have been visited. */
void
cls_cursor_advance(struct cls_cursor *cursor)
OVS_NO_THREAD_SAFETY_ANALYSIS
{
struct cls_match *rule = CONST_CAST(struct cls_match *, rule_->cls_match);
struct cls_match *rule = cursor->rule->cls_match;
const struct cls_subtable *subtable;
struct cls_match *next;
@@ -1281,7 +1279,7 @@ cls_cursor_next(struct cls_cursor *cursor, const struct cls_rule *rule_)
next = next_rule_in_list__(rule);
if (next->priority < rule->priority) {
cls_cursor_next_unlock(cursor, next->cls_rule);
return next->cls_rule;
return;
}
/* 'next' is the head of the list, that is, the rule that is included in
@@ -1291,7 +1289,7 @@ cls_cursor_next(struct cls_cursor *cursor, const struct cls_rule *rule_)
CMAP_CURSOR_FOR_EACH_CONTINUE (rule, cmap_node, &cursor->rules) {
if (rule_matches(rule, cursor->target)) {
cls_cursor_next_unlock(cursor, rule->cls_rule);
return rule->cls_rule;
return;
}
}
@@ -1301,12 +1299,12 @@ cls_cursor_next(struct cls_cursor *cursor, const struct cls_rule *rule_)
if (rule) {
cursor->subtable = subtable;
cls_cursor_next_unlock(cursor, rule->cls_rule);
return rule->cls_rule;
return;
}
}
ovs_mutex_unlock(&cursor->cls->mutex);
return NULL;
cursor->rule = NULL;
}
static struct cls_subtable *

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2009, 2010, 2011, 2012, 2013 Nicira, Inc.
* Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014 Nicira, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -316,63 +316,40 @@ struct cls_cursor {
const struct cls_rule *target;
struct cmap_cursor subtables;
struct cmap_cursor rules;
struct cls_rule *rule;
bool safe;
};
/* Iteration requires mutual exclusion of writers. We do this by taking
* a mutex for the duration of the iteration, except for the
* 'SAFE' variant, where we release the mutex for the body of the loop. */
struct cls_cursor cls_cursor_init(const struct classifier *cls,
struct cls_cursor cls_cursor_start(const struct classifier *cls,
const struct cls_rule *target,
void **pnode, const void *offset, bool safe);
bool safe);
struct cls_rule *cls_cursor_next(struct cls_cursor *cursor,
const struct cls_rule *);
#define CLS_CURSOR_START(RULE, MEMBER, CLS, TARGET) \
cls_cursor_init(CLS, (TARGET), (void **)&(RULE), \
OBJECT_CONTAINING(NULL, RULE, MEMBER), false)
#define CLS_CURSOR_START_SAFE(RULE, MEMBER, CLS, TARGET) \
cls_cursor_init(CLS, (TARGET), (void **)&(RULE), \
OBJECT_CONTAINING(NULL, RULE, MEMBER), true)
void cls_cursor_advance(struct cls_cursor *);
#define CLS_FOR_EACH(RULE, MEMBER, CLS) \
for (struct cls_cursor cursor__ = CLS_CURSOR_START(RULE, MEMBER, CLS, \
NULL); \
RULE != OBJECT_CONTAINING(NULL, RULE, MEMBER); \
ASSIGN_CONTAINER(RULE, cls_cursor_next(&cursor__, &(RULE)->MEMBER), \
MEMBER))
CLS_FOR_EACH_TARGET(RULE, MEMBER, CLS, NULL)
#define CLS_FOR_EACH_TARGET(RULE, MEMBER, CLS, TARGET) \
for (struct cls_cursor cursor__ = CLS_CURSOR_START(RULE, MEMBER, CLS, \
TARGET); \
RULE != OBJECT_CONTAINING(NULL, RULE, MEMBER); \
ASSIGN_CONTAINER(RULE, cls_cursor_next(&cursor__, &(RULE)->MEMBER), \
MEMBER))
/* This form allows classifier_remove() to be called within the loop. */
#define CLS_FOR_EACH_SAFE(RULE, NEXT, MEMBER, CLS) \
for (struct cls_cursor cursor__ = CLS_CURSOR_START_SAFE(RULE, MEMBER, \
CLS, NULL); \
(RULE != OBJECT_CONTAINING(NULL, RULE, MEMBER) \
? ASSIGN_CONTAINER(NEXT, cls_cursor_next(&cursor__, \
&(RULE)->MEMBER), \
MEMBER), true \
for (struct cls_cursor cursor__ = cls_cursor_start(CLS, TARGET, false); \
(cursor__.rule \
? (ASSIGN_CONTAINER(RULE, cursor__.rule, MEMBER), \
true) \
: false); \
(RULE) = (NEXT))
cls_cursor_advance(&cursor__))
/* This form allows classifier_remove() to be called within the loop. */
#define CLS_FOR_EACH_TARGET_SAFE(RULE, NEXT, MEMBER, CLS, TARGET) \
for (struct cls_cursor cursor__ = CLS_CURSOR_START_SAFE(RULE, MEMBER, \
CLS, TARGET); \
(RULE != OBJECT_CONTAINING(NULL, RULE, MEMBER) \
? ASSIGN_CONTAINER(NEXT, cls_cursor_next(&cursor__, \
&(RULE)->MEMBER), \
MEMBER), true \
/* These forms allows classifier_remove() to be called within the loop. */
#define CLS_FOR_EACH_SAFE(RULE, MEMBER, CLS) \
CLS_FOR_EACH_TARGET_SAFE(RULE, MEMBER, CLS, NULL)
#define CLS_FOR_EACH_TARGET_SAFE(RULE, MEMBER, CLS, TARGET) \
for (struct cls_cursor cursor__ = cls_cursor_start(CLS, TARGET, true); \
(cursor__.rule \
? (ASSIGN_CONTAINER(RULE, cursor__.rule, MEMBER), \
cls_cursor_advance(&cursor__), \
true) \
: false); \
(RULE) = (NEXT))
) \
#ifdef __cplusplus
}

View File

@@ -784,31 +784,29 @@ cmap_rehash(struct cmap *cmap, uint32_t mask)
return new;
}
/* Initializes 'cursor' for iterating through 'cmap'.
*
* Use via CMAP_FOR_EACH. */
void
cmap_cursor_init(struct cmap_cursor *cursor, const struct cmap *cmap)
struct cmap_cursor
cmap_cursor_start(const struct cmap *cmap)
{
cursor->impl = cmap_get_impl(cmap);
cursor->bucket_idx = 0;
cursor->entry_idx = 0;
struct cmap_cursor cursor;
cursor.impl = cmap_get_impl(cmap);
cursor.bucket_idx = 0;
cursor.entry_idx = 0;
cursor.node = NULL;
cmap_cursor_advance(&cursor);
return cursor;
}
/* Returns the next node for 'cursor' to visit, following 'node', or NULL if
* the last node has been visited.
*
* Use via CMAP_FOR_EACH. */
struct cmap_node *
cmap_cursor_next(struct cmap_cursor *cursor, const struct cmap_node *node)
void
cmap_cursor_advance(struct cmap_cursor *cursor)
{
const struct cmap_impl *impl = cursor->impl;
if (node) {
struct cmap_node *next = cmap_node_next(node);
if (next) {
return next;
if (cursor->node) {
cursor->node = cmap_node_next(cursor->node);
if (cursor->node) {
return;
}
}
@@ -816,18 +814,15 @@ cmap_cursor_next(struct cmap_cursor *cursor, const struct cmap_node *node)
const struct cmap_bucket *b = &impl->buckets[cursor->bucket_idx];
while (cursor->entry_idx < CMAP_K) {
struct cmap_node *node = cmap_node_next(&b->nodes[cursor->entry_idx++]);
if (node) {
return node;
cursor->node = cmap_node_next(&b->nodes[cursor->entry_idx++]);
if (cursor->node) {
return;
}
}
cursor->bucket_idx++;
cursor->entry_idx = 0;
}
return NULL;
}
/* Returns the next node in 'cmap' in hash order, or NULL if no nodes remain in

View File

@@ -169,69 +169,53 @@ struct cmap_node *cmap_find_protected(const struct cmap *, uint32_t hash);
*/
#define CMAP_CURSOR_FOR_EACH(NODE, MEMBER, CURSOR, CMAP) \
for ((cmap_cursor_init(CURSOR, CMAP), \
ASSIGN_CONTAINER(NODE, cmap_cursor_next(CURSOR, NULL), MEMBER)); \
NODE != OBJECT_CONTAINING(NULL, NODE, MEMBER); \
ASSIGN_CONTAINER(NODE, cmap_cursor_next(CURSOR, &(NODE)->MEMBER), \
MEMBER))
#define CMAP_CURSOR_FOR_EACH_SAFE(NODE, NEXT, MEMBER, CURSOR, CMAP) \
for ((cmap_cursor_init(CURSOR, CMAP), \
ASSIGN_CONTAINER(NODE, cmap_cursor_next(CURSOR, NULL), MEMBER)); \
(NODE != OBJECT_CONTAINING(NULL, NODE, MEMBER) \
? ASSIGN_CONTAINER(NEXT, cmap_cursor_next(CURSOR, &(NODE)->MEMBER), \
MEMBER), true \
for (*(CURSOR) = cmap_cursor_start(CMAP); \
((CURSOR)->node \
? (ASSIGN_CONTAINER(NODE, (CURSOR)->node, MEMBER), true) \
: false); \
(NODE) = (NEXT))
cmap_cursor_advance(CURSOR))
#define CMAP_CURSOR_FOR_EACH_SAFE(NODE, MEMBER, CURSOR, CMAP) \
for (*(CURSOR) = cmap_cursor_start(CMAP); \
((CURSOR)->node \
? (ASSIGN_CONTAINER(NODE, (CURSOR)->node, MEMBER), \
cmap_cursor_advance(CURSOR), \
true) \
: false); \
)
#define CMAP_CURSOR_FOR_EACH_CONTINUE(NODE, MEMBER, CURSOR) \
for (ASSIGN_CONTAINER(NODE, cmap_cursor_next(CURSOR, &(NODE)->MEMBER), \
MEMBER); \
NODE != OBJECT_CONTAINING(NULL, NODE, MEMBER); \
ASSIGN_CONTAINER(NODE, cmap_cursor_next(CURSOR, &(NODE)->MEMBER), \
MEMBER))
for (cmap_cursor_advance(CURSOR); \
((CURSOR)->node \
? (ASSIGN_CONTAINER(NODE, (CURSOR)->node, MEMBER), true) \
: false); \
cmap_cursor_advance(CURSOR))
struct cmap_cursor {
const struct cmap_impl *impl;
uint32_t bucket_idx;
int entry_idx;
struct cmap_node *node;
};
void cmap_cursor_init(struct cmap_cursor *, const struct cmap *);
struct cmap_node *cmap_cursor_next(struct cmap_cursor *,
const struct cmap_node *);
static inline struct cmap_cursor cmap_cursor_start(const struct cmap *cmap,
void **pnode,
const void *offset)
{
struct cmap_cursor cursor;
cmap_cursor_init(&cursor, cmap);
*pnode = (char *)cmap_cursor_next(&cursor, NULL) + (ptrdiff_t)offset;
return cursor;
}
#define CMAP_CURSOR_START(NODE, MEMBER, CMAP) \
cmap_cursor_start(CMAP, (void **)&(NODE), \
OBJECT_CONTAINING(NULL, NODE, MEMBER))
struct cmap_cursor cmap_cursor_start(const struct cmap *);
void cmap_cursor_advance(struct cmap_cursor *);
#define CMAP_FOR_EACH(NODE, MEMBER, CMAP) \
for (struct cmap_cursor cursor__ = CMAP_CURSOR_START(NODE, MEMBER, CMAP); \
NODE != OBJECT_CONTAINING(NULL, NODE, MEMBER); \
ASSIGN_CONTAINER(NODE, cmap_cursor_next(&cursor__, &(NODE)->MEMBER), \
MEMBER))
#define CMAP_FOR_EACH_SAFE(NODE, NEXT, MEMBER, CMAP) \
for (struct cmap_cursor cursor__ = CMAP_CURSOR_START(NODE, MEMBER, CMAP); \
(NODE != OBJECT_CONTAINING(NULL, NODE, MEMBER) \
? ASSIGN_CONTAINER(NEXT, \
cmap_cursor_next(&cursor__, &(NODE)->MEMBER), \
MEMBER), true \
for (struct cmap_cursor cursor__ = cmap_cursor_start(CMAP); \
(cursor__.node \
? (ASSIGN_CONTAINER(NODE, cursor__.node, MEMBER), true) \
: false); \
(NODE) = (NEXT))
cmap_cursor_advance(&cursor__))
#define CMAP_FOR_EACH_SAFE(NODE, MEMBER, CMAP) \
for (struct cmap_cursor cursor__ = cmap_cursor_start(CMAP); \
(cursor__.node \
? (ASSIGN_CONTAINER(NODE, cursor__.node, MEMBER), \
cmap_cursor_advance(&cursor__), \
true) \
: false); \
)
static inline struct cmap_node *cmap_first(const struct cmap *);

View File

@@ -962,10 +962,10 @@ dp_netdev_remove_flow(struct dp_netdev *dp, struct dp_netdev_flow *flow)
static void
dp_netdev_flow_flush(struct dp_netdev *dp)
{
struct dp_netdev_flow *netdev_flow, *next;
struct dp_netdev_flow *netdev_flow;
ovs_mutex_lock(&dp->flow_mutex);
CMAP_FOR_EACH_SAFE (netdev_flow, next, node, &dp->flow_table) {
CMAP_FOR_EACH_SAFE (netdev_flow, node, &dp->flow_table) {
dp_netdev_remove_flow(dp, netdev_flow);
}
ovs_mutex_unlock(&dp->flow_mutex);

View File

@@ -1280,8 +1280,8 @@ static void
destruct(struct ofproto *ofproto_)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
struct rule_dpif *rule, *next_rule;
struct ofproto_packet_in *pin, *next_pin;
struct rule_dpif *rule;
struct oftable *table;
struct list pins;
@@ -1297,7 +1297,7 @@ destruct(struct ofproto *ofproto_)
hmap_remove(&all_ofproto_dpifs, &ofproto->all_ofproto_dpifs_node);
OFPROTO_FOR_EACH_TABLE (table, &ofproto->up) {
CLS_FOR_EACH_SAFE (rule, next_rule, up.cr, &table->cls) {
CLS_FOR_EACH_SAFE (rule, up.cr, &table->cls) {
ofproto_rule_delete(&ofproto->up, &rule->up);
}
}

View File

@@ -1283,13 +1283,13 @@ ofproto_flush__(struct ofproto *ofproto)
ovs_mutex_lock(&ofproto_mutex);
OFPROTO_FOR_EACH_TABLE (table, ofproto) {
struct rule *rule, *next_rule;
struct rule *rule;
if (table->flags & OFTABLE_HIDDEN) {
continue;
}
CLS_FOR_EACH_SAFE (rule, next_rule, cr, &table->cls) {
CLS_FOR_EACH_SAFE (rule, cr, &table->cls) {
ofproto_rule_delete__(rule, OFPRR_DELETE);
}
}

View File

@@ -452,9 +452,9 @@ compare_classifiers(struct classifier *cls, struct tcls *tcls)
static void
destroy_classifier(struct classifier *cls)
{
struct test_rule *rule, *next_rule;
struct test_rule *rule;
CLS_FOR_EACH_SAFE (rule, next_rule, cls_rule, cls) {
CLS_FOR_EACH_SAFE (rule, cls_rule, cls) {
classifier_remove(cls, &rule->cls_rule);
free_rule(rule);
}
@@ -1069,12 +1069,12 @@ test_many_rules_in_n_tables(int n_tables)
}
while (!classifier_is_empty(&cls)) {
struct test_rule *rule, *next_rule;
struct test_rule *target;
struct test_rule *rule;
target = clone_rule(tcls.rules[random_range(tcls.n_rules)]);
CLS_FOR_EACH_TARGET_SAFE (rule, next_rule, cls_rule, &cls,
CLS_FOR_EACH_TARGET_SAFE (rule, cls_rule, &cls,
&target->cls_rule) {
classifier_remove(&cls, &rule->cls_rule);
free_rule(rule);

View File

@@ -2316,9 +2316,9 @@ fte_free(struct fte *fte)
static void
fte_free_all(struct classifier *cls)
{
struct fte *fte, *next;
struct fte *fte;
CLS_FOR_EACH_SAFE (fte, next, rule, cls) {
CLS_FOR_EACH_SAFE (fte, rule, cls) {
classifier_remove(cls, &fte->rule);
fte_free(fte);
}