2
0
mirror of https://github.com/openvswitch/ovs synced 2025-09-02 15:25:22 +00:00

cmap: Merge CMAP_FOR_EACH_SAFE into CMAP_FOR_EACH.

There isn't any significant downside to making cmap iteration "safe" all
the time, so this drops the _SAFE variant.

Similar changes to CMAP_CURSOR_FOR_EACH and CMAP_CURSOR_FOR_EACH_CONTINUE.

Signed-off-by: Ben Pfaff <blp@nicira.com>
Acked-by: Jarno Rajahalme <jrajahalme@nicira.com>
This commit is contained in:
Ben Pfaff
2014-07-29 09:02:23 -07:00
parent 022ad2b9ce
commit 6bc3bb829c
3 changed files with 23 additions and 38 deletions

View File

@@ -485,12 +485,12 @@ classifier_destroy(struct classifier *cls)
trie_destroy(&cls->tries[i].root); trie_destroy(&cls->tries[i].root);
} }
CMAP_FOR_EACH_SAFE (subtable, cmap_node, &cls->subtables_map) { CMAP_FOR_EACH (subtable, cmap_node, &cls->subtables_map) {
destroy_subtable(cls, subtable); destroy_subtable(cls, subtable);
} }
cmap_destroy(&cls->subtables_map); cmap_destroy(&cls->subtables_map);
CMAP_FOR_EACH_SAFE (partition, cmap_node, &cls->partitions) { CMAP_FOR_EACH (partition, cmap_node, &cls->partitions) {
ovsrcu_postpone(free, partition); ovsrcu_postpone(free, partition);
} }
cmap_destroy(&cls->partitions); cmap_destroy(&cls->partitions);

View File

@@ -163,33 +163,29 @@ struct cmap_node *cmap_find_protected(const struct cmap *, uint32_t hash);
* ...operate on my_node... * ...operate on my_node...
* } * }
* *
* CMAP_FOR_EACH_SAFE variant is useful only in deallocation code already * CMAP_FOR_EACH is "safe" in the sense of HMAP_FOR_EACH_SAFE. That is, it is
* executing at postponed time, when it is known that the RCU grace period * safe to free the current node before going on to the next iteration. Most
* has already expired. * of the time, though, this doesn't matter for a cmap because node
* deallocation has to be postponed until the next grace period. This means
* that this guarantee is useful only in deallocation code already executing at
* postponed time, when it is known that the RCU grace period has already
* expired.
*/ */
#define CMAP_CURSOR_FOR_EACH(NODE, MEMBER, CURSOR, CMAP) \ #define CMAP_CURSOR_FOR_EACH__(NODE, CURSOR, MEMBER) \
for (*(CURSOR) = cmap_cursor_start(CMAP); \
((CURSOR)->node \
? (ASSIGN_CONTAINER(NODE, (CURSOR)->node, MEMBER), true) \
: false); \
cmap_cursor_advance(CURSOR))
#define CMAP_CURSOR_FOR_EACH_SAFE(NODE, MEMBER, CURSOR, CMAP) \
for (*(CURSOR) = cmap_cursor_start(CMAP); \
((CURSOR)->node \ ((CURSOR)->node \
? (ASSIGN_CONTAINER(NODE, (CURSOR)->node, MEMBER), \ ? (ASSIGN_CONTAINER(NODE, (CURSOR)->node, MEMBER), \
cmap_cursor_advance(CURSOR), \ cmap_cursor_advance(CURSOR), \
true) \ true) \
: false); \ : false)
#define CMAP_CURSOR_FOR_EACH(NODE, MEMBER, CURSOR, CMAP) \
for (*(CURSOR) = cmap_cursor_start(CMAP); \
CMAP_CURSOR_FOR_EACH__(NODE, CURSOR, MEMBER); \
) )
#define CMAP_CURSOR_FOR_EACH_CONTINUE(NODE, MEMBER, CURSOR) \ #define CMAP_CURSOR_FOR_EACH_CONTINUE(NODE, MEMBER, CURSOR) \
for (cmap_cursor_advance(CURSOR); \ while (CMAP_CURSOR_FOR_EACH__(NODE, CURSOR, MEMBER))
((CURSOR)->node \
? (ASSIGN_CONTAINER(NODE, (CURSOR)->node, MEMBER), true) \
: false); \
cmap_cursor_advance(CURSOR))
struct cmap_cursor { struct cmap_cursor {
const struct cmap_impl *impl; const struct cmap_impl *impl;
@@ -203,18 +199,7 @@ void cmap_cursor_advance(struct cmap_cursor *);
#define CMAP_FOR_EACH(NODE, MEMBER, CMAP) \ #define CMAP_FOR_EACH(NODE, MEMBER, CMAP) \
for (struct cmap_cursor cursor__ = cmap_cursor_start(CMAP); \ for (struct cmap_cursor cursor__ = cmap_cursor_start(CMAP); \
(cursor__.node \ CMAP_CURSOR_FOR_EACH__(NODE, &cursor__, MEMBER); \
? (ASSIGN_CONTAINER(NODE, cursor__.node, MEMBER), true) \
: false); \
cmap_cursor_advance(&cursor__))
#define CMAP_FOR_EACH_SAFE(NODE, MEMBER, CMAP) \
for (struct cmap_cursor cursor__ = cmap_cursor_start(CMAP); \
(cursor__.node \
? (ASSIGN_CONTAINER(NODE, cursor__.node, MEMBER), \
cmap_cursor_advance(&cursor__), \
true) \
: false); \
) )
static inline struct cmap_node *cmap_first(const struct cmap *); static inline struct cmap_node *cmap_first(const struct cmap *);

View File

@@ -925,7 +925,7 @@ dp_netdev_flow_flush(struct dp_netdev *dp)
struct dp_netdev_flow *netdev_flow; struct dp_netdev_flow *netdev_flow;
ovs_mutex_lock(&dp->flow_mutex); ovs_mutex_lock(&dp->flow_mutex);
CMAP_FOR_EACH_SAFE (netdev_flow, node, &dp->flow_table) { CMAP_FOR_EACH (netdev_flow, node, &dp->flow_table) {
dp_netdev_remove_flow(dp, netdev_flow); dp_netdev_remove_flow(dp, netdev_flow);
} }
ovs_mutex_unlock(&dp->flow_mutex); ovs_mutex_unlock(&dp->flow_mutex);