mirror of
https://github.com/openvswitch/ovs
synced 2025-08-22 18:07:40 +00:00
hmap: use short version of safe loops if possible.
Using SHORT version of the *_SAFE loops makes the code cleaner and less error prone. So, use the SHORT version and remove the extra variable when possible for hmap and all its derived types. In order to be able to use both long and short versions without changing the name of the macro for all the clients, overload the existing name and select the appropriate version depending on the number of arguments. Acked-by: Dumitru Ceara <dceara@redhat.com> Acked-by: Eelco Chaudron <echaudro@redhat.com> Signed-off-by: Adrian Moreno <amorenoz@redhat.com> Signed-off-by: Ilya Maximets <i.maximets@ovn.org>
This commit is contained in:
parent
860e69a8c3
commit
9e56549c2b
@ -178,10 +178,10 @@ bool hmap_contains(const struct hmap *, const struct hmap_node *);
|
||||
|
||||
/* Safe when NODE may be freed (not needed when NODE may be removed from the
|
||||
* hash map but its members remain accessible and intact). */
|
||||
#define HMAP_FOR_EACH_SAFE(NODE, NEXT, MEMBER, HMAP) \
|
||||
HMAP_FOR_EACH_SAFE_INIT (NODE, NEXT, MEMBER, HMAP, (void) NEXT)
|
||||
#define HMAP_FOR_EACH_SAFE_LONG(NODE, NEXT, MEMBER, HMAP) \
|
||||
HMAP_FOR_EACH_SAFE_LONG_INIT (NODE, NEXT, MEMBER, HMAP, (void) NEXT)
|
||||
|
||||
#define HMAP_FOR_EACH_SAFE_INIT(NODE, NEXT, MEMBER, HMAP, ...) \
|
||||
#define HMAP_FOR_EACH_SAFE_LONG_INIT(NODE, NEXT, MEMBER, HMAP, ...) \
|
||||
for (INIT_MULTIVAR_SAFE_LONG_EXP(NODE, NEXT, MEMBER, hmap_first(HMAP), \
|
||||
struct hmap_node, __VA_ARGS__); \
|
||||
CONDITION_MULTIVAR_SAFE_LONG(NODE, NEXT, MEMBER, \
|
||||
@ -190,6 +190,24 @@ bool hmap_contains(const struct hmap *, const struct hmap_node *);
|
||||
ITER_VAR(NEXT) != NULL); \
|
||||
UPDATE_MULTIVAR_SAFE_LONG(NODE, NEXT))
|
||||
|
||||
/* Short versions of HMAP_FOR_EACH_SAFE. */
|
||||
#define HMAP_FOR_EACH_SAFE_SHORT(NODE, MEMBER, HMAP) \
|
||||
HMAP_FOR_EACH_SAFE_SHORT_INIT (NODE, MEMBER, HMAP, (void) 0)
|
||||
|
||||
#define HMAP_FOR_EACH_SAFE_SHORT_INIT(NODE, MEMBER, HMAP, ...) \
|
||||
for (INIT_MULTIVAR_SAFE_SHORT_EXP(NODE, MEMBER, hmap_first(HMAP), \
|
||||
struct hmap_node, __VA_ARGS__); \
|
||||
CONDITION_MULTIVAR_SAFE_SHORT(NODE, MEMBER, \
|
||||
ITER_VAR(NODE) != NULL, \
|
||||
ITER_NEXT_VAR(NODE) = hmap_next(HMAP, ITER_VAR(NODE))); \
|
||||
UPDATE_MULTIVAR_SAFE_SHORT(NODE))
|
||||
|
||||
#define HMAP_FOR_EACH_SAFE(...) \
|
||||
OVERLOAD_SAFE_MACRO(HMAP_FOR_EACH_SAFE_LONG, \
|
||||
HMAP_FOR_EACH_SAFE_SHORT, \
|
||||
4, __VA_ARGS__)
|
||||
|
||||
|
||||
/* Continues an iteration from just after NODE. */
|
||||
#define HMAP_FOR_EACH_CONTINUE(NODE, MEMBER, HMAP) \
|
||||
HMAP_FOR_EACH_CONTINUE_INIT(NODE, MEMBER, HMAP, (void) 0)
|
||||
|
@ -41,13 +41,24 @@ struct shash {
|
||||
BUILD_ASSERT_TYPE(SHASH_NODE, struct shash_node *), \
|
||||
BUILD_ASSERT_TYPE(SHASH, struct shash *))
|
||||
|
||||
#define SHASH_FOR_EACH_SAFE(SHASH_NODE, NEXT, SHASH) \
|
||||
HMAP_FOR_EACH_SAFE_INIT ( \
|
||||
#define SHASH_FOR_EACH_SAFE_SHORT(SHASH_NODE, SHASH) \
|
||||
HMAP_FOR_EACH_SAFE_SHORT_INIT ( \
|
||||
SHASH_NODE, node, &(SHASH)->map, \
|
||||
BUILD_ASSERT_TYPE(SHASH_NODE, struct shash_node *), \
|
||||
BUILD_ASSERT_TYPE(SHASH, struct shash *))
|
||||
|
||||
#define SHASH_FOR_EACH_SAFE_LONG(SHASH_NODE, NEXT, SHASH) \
|
||||
HMAP_FOR_EACH_SAFE_LONG_INIT ( \
|
||||
SHASH_NODE, NEXT, node, &(SHASH)->map, \
|
||||
BUILD_ASSERT_TYPE(SHASH_NODE, struct shash_node *), \
|
||||
BUILD_ASSERT_TYPE(NEXT, struct shash_node *), \
|
||||
BUILD_ASSERT_TYPE(SHASH, struct shash *))
|
||||
|
||||
#define SHASH_FOR_EACH_SAFE(...) \
|
||||
OVERLOAD_SAFE_MACRO(SHASH_FOR_EACH_SAFE_LONG, \
|
||||
SHASH_FOR_EACH_SAFE_SHORT, \
|
||||
3, __VA_ARGS__)
|
||||
|
||||
void shash_init(struct shash *);
|
||||
void shash_destroy(struct shash *);
|
||||
void shash_destroy_free_data(struct shash *);
|
||||
|
@ -416,7 +416,7 @@ cfm_run(struct cfm *cfm) OVS_EXCLUDED(mutex)
|
||||
ovs_mutex_lock(&mutex);
|
||||
if (timer_expired(&cfm->fault_timer)) {
|
||||
long long int interval = cfm_fault_interval(cfm);
|
||||
struct remote_mp *rmp, *rmp_next;
|
||||
struct remote_mp *rmp;
|
||||
enum cfm_fault_reason old_cfm_fault = cfm->fault;
|
||||
uint64_t old_flap_count = cfm->flap_count;
|
||||
int old_health = cfm->health;
|
||||
@ -475,7 +475,7 @@ cfm_run(struct cfm *cfm) OVS_EXCLUDED(mutex)
|
||||
cfm->rx_packets = rx_packets;
|
||||
}
|
||||
|
||||
HMAP_FOR_EACH_SAFE (rmp, rmp_next, node, &cfm->remote_mps) {
|
||||
HMAP_FOR_EACH_SAFE (rmp, node, &cfm->remote_mps) {
|
||||
if (!rmp->recv) {
|
||||
VLOG_INFO("%s: Received no CCM from RMP %"PRIu64" in the last"
|
||||
" %lldms", cfm->name, rmp->mpid,
|
||||
|
@ -916,9 +916,9 @@ free_conjunctive_matches(struct hmap *matches,
|
||||
struct conjunctive_match *cm_stubs, size_t n_cm_stubs)
|
||||
{
|
||||
if (hmap_count(matches) > n_cm_stubs) {
|
||||
struct conjunctive_match *cm, *next;
|
||||
struct conjunctive_match *cm;
|
||||
|
||||
HMAP_FOR_EACH_SAFE (cm, next, hmap_node, matches) {
|
||||
HMAP_FOR_EACH_SAFE (cm, hmap_node, matches) {
|
||||
if (!(cm >= cm_stubs && cm < &cm_stubs[n_cm_stubs])) {
|
||||
free(cm);
|
||||
}
|
||||
|
@ -189,8 +189,8 @@ dns_resolve_destroy(void)
|
||||
ub_ctx_delete(ub_ctx__);
|
||||
ub_ctx__ = NULL;
|
||||
|
||||
struct resolve_request *req, *next;
|
||||
HMAP_FOR_EACH_SAFE (req, next, hmap_node, &all_reqs__) {
|
||||
struct resolve_request *req;
|
||||
HMAP_FOR_EACH_SAFE (req, hmap_node, &all_reqs__) {
|
||||
ub_resolve_free(req->ub_result);
|
||||
free(req->addr);
|
||||
free(req->name);
|
||||
|
@ -1930,13 +1930,13 @@ static void
|
||||
dp_netdev_free(struct dp_netdev *dp)
|
||||
OVS_REQUIRES(dp_netdev_mutex)
|
||||
{
|
||||
struct dp_netdev_port *port, *next;
|
||||
struct dp_netdev_port *port;
|
||||
struct tx_bond *bond;
|
||||
|
||||
shash_find_and_delete(&dp_netdevs, dp->name);
|
||||
|
||||
ovs_rwlock_wrlock(&dp->port_rwlock);
|
||||
HMAP_FOR_EACH_SAFE (port, next, node, &dp->ports) {
|
||||
HMAP_FOR_EACH_SAFE (port, node, &dp->ports) {
|
||||
do_del_port(dp, port);
|
||||
}
|
||||
ovs_rwlock_unlock(&dp->port_rwlock);
|
||||
@ -6341,11 +6341,11 @@ pmd_remove_stale_ports(struct dp_netdev *dp,
|
||||
OVS_EXCLUDED(pmd->port_mutex)
|
||||
OVS_REQ_RDLOCK(dp->port_rwlock)
|
||||
{
|
||||
struct rxq_poll *poll, *poll_next;
|
||||
struct tx_port *tx, *tx_next;
|
||||
struct rxq_poll *poll;
|
||||
struct tx_port *tx;
|
||||
|
||||
ovs_mutex_lock(&pmd->port_mutex);
|
||||
HMAP_FOR_EACH_SAFE (poll, poll_next, node, &pmd->poll_list) {
|
||||
HMAP_FOR_EACH_SAFE (poll, node, &pmd->poll_list) {
|
||||
struct dp_netdev_port *port = poll->rxq->port;
|
||||
|
||||
if (port->need_reconfigure
|
||||
@ -6353,7 +6353,7 @@ pmd_remove_stale_ports(struct dp_netdev *dp,
|
||||
dp_netdev_del_rxq_from_pmd(pmd, poll);
|
||||
}
|
||||
}
|
||||
HMAP_FOR_EACH_SAFE (tx, tx_next, node, &pmd->tx_ports) {
|
||||
HMAP_FOR_EACH_SAFE (tx, node, &pmd->tx_ports) {
|
||||
struct dp_netdev_port *port = tx->port;
|
||||
|
||||
if (port->need_reconfigure
|
||||
@ -6429,8 +6429,7 @@ reconfigure_datapath(struct dp_netdev *dp)
|
||||
/* We only reconfigure the ports that we determined above, because they're
|
||||
* not being used by any pmd thread at the moment. If a port fails to
|
||||
* reconfigure we remove it from the datapath. */
|
||||
struct dp_netdev_port *next_port;
|
||||
HMAP_FOR_EACH_SAFE (port, next_port, node, &dp->ports) {
|
||||
HMAP_FOR_EACH_SAFE (port, node, &dp->ports) {
|
||||
int err;
|
||||
|
||||
if (!port->need_reconfigure) {
|
||||
@ -6486,10 +6485,10 @@ reconfigure_datapath(struct dp_netdev *dp)
|
||||
}
|
||||
|
||||
CMAP_FOR_EACH (pmd, node, &dp->poll_threads) {
|
||||
struct rxq_poll *poll, *poll_next;
|
||||
struct rxq_poll *poll;
|
||||
|
||||
ovs_mutex_lock(&pmd->port_mutex);
|
||||
HMAP_FOR_EACH_SAFE (poll, poll_next, node, &pmd->poll_list) {
|
||||
HMAP_FOR_EACH_SAFE (poll, node, &pmd->poll_list) {
|
||||
if (poll->rxq->pmd != pmd) {
|
||||
dp_netdev_del_rxq_from_pmd(pmd, poll);
|
||||
|
||||
|
@ -123,9 +123,9 @@ hmapx_add_assert(struct hmapx *map, void *data)
|
||||
void
|
||||
hmapx_clear(struct hmapx *map)
|
||||
{
|
||||
struct hmapx_node *node, *next;
|
||||
struct hmapx_node *node;
|
||||
|
||||
HMAPX_FOR_EACH_SAFE (node, next, map) {
|
||||
HMAPX_FOR_EACH_SAFE (node, map) {
|
||||
hmapx_delete(map, node);
|
||||
}
|
||||
}
|
||||
|
14
lib/hmapx.h
14
lib/hmapx.h
@ -67,10 +67,20 @@ bool hmapx_equals(const struct hmapx *, const struct hmapx *);
|
||||
|
||||
/* Safe when NODE may be freed (not needed when NODE may be removed from the
|
||||
* hash map but its members remain accessible and intact). */
|
||||
#define HMAPX_FOR_EACH_SAFE(NODE, NEXT, HMAPX) \
|
||||
HMAP_FOR_EACH_SAFE_INIT(NODE, NEXT, hmap_node, &(HMAPX)->map, \
|
||||
#define HMAPX_FOR_EACH_SAFE_SHORT(NODE, HMAPX) \
|
||||
HMAP_FOR_EACH_SAFE_SHORT_INIT (NODE, hmap_node, &(HMAPX)->map, \
|
||||
BUILD_ASSERT_TYPE(NODE, struct hmapx_node *), \
|
||||
BUILD_ASSERT_TYPE(HMAPX, struct hmapx *))
|
||||
|
||||
#define HMAPX_FOR_EACH_SAFE_LONG(NODE, NEXT, HMAPX) \
|
||||
HMAP_FOR_EACH_SAFE_LONG_INIT (NODE, NEXT, hmap_node, &(HMAPX)->map, \
|
||||
BUILD_ASSERT_TYPE(NODE, struct hmapx_node *), \
|
||||
BUILD_ASSERT_TYPE(NEXT, struct hmapx_node *), \
|
||||
BUILD_ASSERT_TYPE(HMAPX, struct hmapx *))
|
||||
|
||||
#define HMAPX_FOR_EACH_SAFE(...) \
|
||||
OVERLOAD_SAFE_MACRO(HMAPX_FOR_EACH_SAFE_LONG, \
|
||||
HMAPX_FOR_EACH_SAFE_SHORT, \
|
||||
3, __VA_ARGS__)
|
||||
|
||||
#endif /* hmapx.h */
|
||||
|
@ -397,9 +397,9 @@ json_destroy__(struct json *json)
|
||||
static void
|
||||
json_destroy_object(struct shash *object)
|
||||
{
|
||||
struct shash_node *node, *next;
|
||||
struct shash_node *node;
|
||||
|
||||
SHASH_FOR_EACH_SAFE (node, next, object) {
|
||||
SHASH_FOR_EACH_SAFE (node, object) {
|
||||
struct json *value = node->data;
|
||||
|
||||
json_destroy(value);
|
||||
|
@ -280,10 +280,10 @@ void
|
||||
lacp_unref(struct lacp *lacp) OVS_EXCLUDED(mutex)
|
||||
{
|
||||
if (lacp && ovs_refcount_unref_relaxed(&lacp->ref_cnt) == 1) {
|
||||
struct member *member, *next;
|
||||
struct member *member;
|
||||
|
||||
lacp_lock();
|
||||
HMAP_FOR_EACH_SAFE (member, next, node, &lacp->members) {
|
||||
HMAP_FOR_EACH_SAFE (member, node, &lacp->members) {
|
||||
member_destroy(member);
|
||||
}
|
||||
|
||||
|
@ -244,10 +244,10 @@ void
|
||||
mac_learning_unref(struct mac_learning *ml)
|
||||
{
|
||||
if (ml && ovs_refcount_unref(&ml->ref_cnt) == 1) {
|
||||
struct mac_entry *e, *next;
|
||||
struct mac_entry *e;
|
||||
|
||||
ovs_rwlock_wrlock(&ml->rwlock);
|
||||
HMAP_FOR_EACH_SAFE (e, next, hmap_node, &ml->table) {
|
||||
HMAP_FOR_EACH_SAFE (e, hmap_node, &ml->table) {
|
||||
mac_learning_expire(ml, e);
|
||||
}
|
||||
hmap_destroy(&ml->table);
|
||||
|
@ -90,9 +90,9 @@ void
|
||||
namemap_destroy(struct namemap *map)
|
||||
{
|
||||
if (map) {
|
||||
struct namemap_node *node, *next;
|
||||
struct namemap_node *node;
|
||||
|
||||
HMAP_FOR_EACH_SAFE (node, next, name_node, &map->by_name) {
|
||||
HMAP_FOR_EACH_SAFE (node, name_node, &map->by_name) {
|
||||
hmap_remove(&map->by_name, &node->name_node);
|
||||
hmap_remove(&map->by_number, &node->number_node);
|
||||
free(node->name);
|
||||
|
@ -4690,11 +4690,11 @@ trtcm_policer_qos_construct(const struct smap *details,
|
||||
static void
|
||||
trtcm_policer_qos_destruct(struct qos_conf *conf)
|
||||
{
|
||||
struct trtcm_policer_queue *queue, *next_queue;
|
||||
struct trtcm_policer_queue *queue;
|
||||
struct trtcm_policer *policer = CONTAINER_OF(conf, struct trtcm_policer,
|
||||
qos_conf);
|
||||
|
||||
HMAP_FOR_EACH_SAFE (queue, next_queue, hmap_node, &policer->queues) {
|
||||
HMAP_FOR_EACH_SAFE (queue, hmap_node, &policer->queues) {
|
||||
hmap_remove(&policer->queues, &queue->hmap_node);
|
||||
free(queue);
|
||||
}
|
||||
|
@ -5331,11 +5331,11 @@ static void
|
||||
hfsc_tc_destroy(struct tc *tc)
|
||||
{
|
||||
struct hfsc *hfsc;
|
||||
struct hfsc_class *hc, *next;
|
||||
struct hfsc_class *hc;
|
||||
|
||||
hfsc = CONTAINER_OF(tc, struct hfsc, tc);
|
||||
|
||||
HMAP_FOR_EACH_SAFE (hc, next, tc_queue.hmap_node, &hfsc->tc.queues) {
|
||||
HMAP_FOR_EACH_SAFE (hc, tc_queue.hmap_node, &hfsc->tc.queues) {
|
||||
hmap_remove(&hfsc->tc.queues, &hc->tc_queue.hmap_node);
|
||||
free(hc);
|
||||
}
|
||||
|
@ -417,11 +417,11 @@ delete_chains_from_netdev(struct netdev *netdev, struct tcf_id *id)
|
||||
static int
|
||||
netdev_tc_flow_flush(struct netdev *netdev)
|
||||
{
|
||||
struct ufid_tc_data *data, *next;
|
||||
struct ufid_tc_data *data;
|
||||
int err;
|
||||
|
||||
ovs_mutex_lock(&ufid_lock);
|
||||
HMAP_FOR_EACH_SAFE (data, next, tc_to_ufid_node, &tc_to_ufid) {
|
||||
HMAP_FOR_EACH_SAFE (data, tc_to_ufid_node, &tc_to_ufid) {
|
||||
if (data->netdev != netdev) {
|
||||
continue;
|
||||
}
|
||||
|
@ -1123,8 +1123,8 @@ ofpmp_partial_error(struct hmap *assembler, struct ofpmp_partial *p,
|
||||
void
|
||||
ofpmp_assembler_clear(struct hmap *assembler)
|
||||
{
|
||||
struct ofpmp_partial *p, *next;
|
||||
HMAP_FOR_EACH_SAFE (p, next, hmap_node, assembler) {
|
||||
struct ofpmp_partial *p;
|
||||
HMAP_FOR_EACH_SAFE (p, hmap_node, assembler) {
|
||||
ofpmp_partial_destroy(assembler, p);
|
||||
}
|
||||
}
|
||||
|
@ -900,8 +900,8 @@ ovsdb_cs_db_get_table(struct ovsdb_cs_db *db, const char *table)
|
||||
static void
|
||||
ovsdb_cs_db_destroy_tables(struct ovsdb_cs_db *db)
|
||||
{
|
||||
struct ovsdb_cs_db_table *table, *next;
|
||||
HMAP_FOR_EACH_SAFE (table, next, hmap_node, &db->tables) {
|
||||
struct ovsdb_cs_db_table *table;
|
||||
HMAP_FOR_EACH_SAFE (table, hmap_node, &db->tables) {
|
||||
json_destroy(table->ack_cond);
|
||||
json_destroy(table->req_cond);
|
||||
json_destroy(table->new_cond);
|
||||
@ -1794,8 +1794,8 @@ ovsdb_cs_update_server_row(struct server_row *row,
|
||||
static void
|
||||
ovsdb_cs_clear_server_rows(struct ovsdb_cs *cs)
|
||||
{
|
||||
struct server_row *row, *next;
|
||||
HMAP_FOR_EACH_SAFE (row, next, hmap_node, &cs->server_rows) {
|
||||
struct server_row *row;
|
||||
HMAP_FOR_EACH_SAFE (row, hmap_node, &cs->server_rows) {
|
||||
ovsdb_cs_delete_server_row(cs, row);
|
||||
}
|
||||
}
|
||||
@ -2129,9 +2129,9 @@ void
|
||||
ovsdb_cs_free_schema(struct shash *schema)
|
||||
{
|
||||
if (schema) {
|
||||
struct shash_node *node, *next;
|
||||
struct shash_node *node;
|
||||
|
||||
SHASH_FOR_EACH_SAFE (node, next, schema) {
|
||||
SHASH_FOR_EACH_SAFE (node, schema) {
|
||||
struct sset *sset = node->data;
|
||||
sset_destroy(sset);
|
||||
free(sset);
|
||||
|
@ -389,13 +389,13 @@ ovsdb_idl_clear(struct ovsdb_idl *db)
|
||||
*/
|
||||
for (size_t i = 0; i < db->class_->n_tables; i++) {
|
||||
struct ovsdb_idl_table *table = &db->tables[i];
|
||||
struct ovsdb_idl_row *row, *next_row;
|
||||
struct ovsdb_idl_row *row;
|
||||
|
||||
if (hmap_is_empty(&table->rows)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
HMAP_FOR_EACH_SAFE (row, next_row, hmap_node, &table->rows) {
|
||||
HMAP_FOR_EACH_SAFE (row, hmap_node, &table->rows) {
|
||||
struct ovsdb_idl_arc *arc;
|
||||
|
||||
if (!ovsdb_idl_row_is_orphan(row)) {
|
||||
@ -1041,8 +1041,8 @@ ovsdb_idl_condition_destroy(struct ovsdb_idl_condition *cond)
|
||||
void
|
||||
ovsdb_idl_condition_clear(struct ovsdb_idl_condition *cond)
|
||||
{
|
||||
struct ovsdb_idl_clause *clause, *next;
|
||||
HMAP_FOR_EACH_SAFE (clause, next, hmap_node, &cond->clauses) {
|
||||
struct ovsdb_idl_clause *clause;
|
||||
HMAP_FOR_EACH_SAFE (clause, hmap_node, &cond->clauses) {
|
||||
hmap_remove(&cond->clauses, &clause->hmap_node);
|
||||
ovsdb_idl_clause_destroy(clause);
|
||||
}
|
||||
@ -2729,7 +2729,7 @@ ovsdb_idl_txn_increment(struct ovsdb_idl_txn *txn,
|
||||
void
|
||||
ovsdb_idl_txn_destroy(struct ovsdb_idl_txn *txn)
|
||||
{
|
||||
struct ovsdb_idl_txn_insert *insert, *next;
|
||||
struct ovsdb_idl_txn_insert *insert;
|
||||
|
||||
if (txn->status == TXN_INCOMPLETE) {
|
||||
ovsdb_cs_forget_transaction(txn->idl->cs, txn->request_id);
|
||||
@ -2739,7 +2739,7 @@ ovsdb_idl_txn_destroy(struct ovsdb_idl_txn *txn)
|
||||
ovsdb_idl_txn_abort(txn);
|
||||
ds_destroy(&txn->comment);
|
||||
free(txn->error);
|
||||
HMAP_FOR_EACH_SAFE (insert, next, hmap_node, &txn->inserted_rows) {
|
||||
HMAP_FOR_EACH_SAFE (insert, hmap_node, &txn->inserted_rows) {
|
||||
free(insert);
|
||||
}
|
||||
hmap_destroy(&txn->inserted_rows);
|
||||
@ -2824,7 +2824,7 @@ substitute_uuids(struct json *json, const struct ovsdb_idl_txn *txn)
|
||||
static void
|
||||
ovsdb_idl_txn_disassemble(struct ovsdb_idl_txn *txn)
|
||||
{
|
||||
struct ovsdb_idl_row *row, *next;
|
||||
struct ovsdb_idl_row *row;
|
||||
|
||||
/* This must happen early. Otherwise, ovsdb_idl_row_parse() will call an
|
||||
* ovsdb_idl_column's 'parse' function, which will call
|
||||
@ -2832,7 +2832,7 @@ ovsdb_idl_txn_disassemble(struct ovsdb_idl_txn *txn)
|
||||
* transaction and fail to update the graph. */
|
||||
txn->idl->txn = NULL;
|
||||
|
||||
HMAP_FOR_EACH_SAFE (row, next, txn_node, &txn->txn_rows) {
|
||||
HMAP_FOR_EACH_SAFE (row, txn_node, &txn->txn_rows) {
|
||||
enum { INSERTED, MODIFIED, DELETED } op
|
||||
= (!row->new_datum ? DELETED
|
||||
: !row->old_datum ? INSERTED
|
||||
|
@ -91,8 +91,8 @@ map_op_list_create(void)
|
||||
void
|
||||
map_op_list_destroy(struct map_op_list *list, const struct ovsdb_type *type)
|
||||
{
|
||||
struct map_op *map_op, *next;
|
||||
HMAP_FOR_EACH_SAFE (map_op, next, node, &list->hmap) {
|
||||
struct map_op *map_op;
|
||||
HMAP_FOR_EACH_SAFE (map_op, node, &list->hmap) {
|
||||
map_op_destroy(map_op, type);
|
||||
}
|
||||
hmap_destroy(&list->hmap);
|
||||
|
@ -90,8 +90,8 @@ set_op_list_create(void)
|
||||
void
|
||||
set_op_list_destroy(struct set_op_list *list, const struct ovsdb_type *type)
|
||||
{
|
||||
struct set_op *set_op, *next;
|
||||
HMAP_FOR_EACH_SAFE (set_op, next, node, &list->hmap) {
|
||||
struct set_op *set_op;
|
||||
HMAP_FOR_EACH_SAFE (set_op, node, &list->hmap) {
|
||||
set_op_destroy(set_op, type);
|
||||
}
|
||||
hmap_destroy(&list->hmap);
|
||||
|
@ -344,9 +344,9 @@ tcp_reader_open(void)
|
||||
void
|
||||
tcp_reader_close(struct tcp_reader *r)
|
||||
{
|
||||
struct tcp_stream *stream, *next_stream;
|
||||
struct tcp_stream *stream;
|
||||
|
||||
HMAP_FOR_EACH_SAFE (stream, next_stream, hmap_node, &r->streams) {
|
||||
HMAP_FOR_EACH_SAFE (stream, hmap_node, &r->streams) {
|
||||
tcp_stream_destroy(r, stream);
|
||||
}
|
||||
hmap_destroy(&r->streams);
|
||||
|
@ -178,14 +178,14 @@ perf_counters_clear(void)
|
||||
void
|
||||
perf_counters_destroy(void)
|
||||
{
|
||||
struct shash_node *node, *next;
|
||||
struct shash_node *node;
|
||||
|
||||
if (fd__ != -1) {
|
||||
ioctl(fd__, PERF_EVENT_IOC_DISABLE, 0);
|
||||
close(fd__);
|
||||
}
|
||||
|
||||
SHASH_FOR_EACH_SAFE (node, next, &perf_counters) {
|
||||
SHASH_FOR_EACH_SAFE (node, &perf_counters) {
|
||||
shash_delete(&perf_counters, node);
|
||||
}
|
||||
|
||||
|
@ -298,9 +298,9 @@ log_wakeup(const char *where, const struct pollfd *pollfd, int timeout)
|
||||
static void
|
||||
free_poll_nodes(struct poll_loop *loop)
|
||||
{
|
||||
struct poll_node *node, *next;
|
||||
struct poll_node *node;
|
||||
|
||||
HMAP_FOR_EACH_SAFE (node, next, hmap_node, &loop->poll_nodes) {
|
||||
HMAP_FOR_EACH_SAFE (node, hmap_node, &loop->poll_nodes) {
|
||||
hmap_remove(&loop->poll_nodes, &node->hmap_node);
|
||||
#ifdef _WIN32
|
||||
if (node->wevent && node->pollfd.fd) {
|
||||
|
@ -319,9 +319,9 @@ static void
|
||||
seq_wake_waiters(struct seq *seq)
|
||||
OVS_REQUIRES(seq_mutex)
|
||||
{
|
||||
struct seq_waiter *waiter, *next_waiter;
|
||||
struct seq_waiter *waiter;
|
||||
|
||||
HMAP_FOR_EACH_SAFE (waiter, next_waiter, hmap_node, &seq->waiters) {
|
||||
HMAP_FOR_EACH_SAFE (waiter, hmap_node, &seq->waiters) {
|
||||
latch_set(&waiter->thread->latch);
|
||||
seq_waiter_destroy(waiter);
|
||||
}
|
||||
|
@ -68,9 +68,9 @@ shash_moved(struct shash *sh)
|
||||
void
|
||||
shash_clear(struct shash *sh)
|
||||
{
|
||||
struct shash_node *node, *next;
|
||||
struct shash_node *node;
|
||||
|
||||
SHASH_FOR_EACH_SAFE (node, next, sh) {
|
||||
SHASH_FOR_EACH_SAFE (node, sh) {
|
||||
hmap_remove(&sh->map, &node->node);
|
||||
free(node->name);
|
||||
free(node);
|
||||
@ -81,9 +81,9 @@ shash_clear(struct shash *sh)
|
||||
void
|
||||
shash_clear_free_data(struct shash *sh)
|
||||
{
|
||||
struct shash_node *node, *next;
|
||||
struct shash_node *node;
|
||||
|
||||
SHASH_FOR_EACH_SAFE (node, next, sh) {
|
||||
SHASH_FOR_EACH_SAFE (node, sh) {
|
||||
hmap_remove(&sh->map, &node->node);
|
||||
free(node->data);
|
||||
free(node->name);
|
||||
|
@ -63,9 +63,9 @@ simap_moved(struct simap *simap)
|
||||
void
|
||||
simap_clear(struct simap *simap)
|
||||
{
|
||||
struct simap_node *node, *next;
|
||||
struct simap_node *node;
|
||||
|
||||
SIMAP_FOR_EACH_SAFE (node, next, simap) {
|
||||
SIMAP_FOR_EACH_SAFE (node, simap) {
|
||||
hmap_remove(&simap->map, &node->node);
|
||||
free(node->name);
|
||||
free(node);
|
||||
|
14
lib/simap.h
14
lib/simap.h
@ -41,12 +41,22 @@ struct simap_node {
|
||||
BUILD_ASSERT_TYPE(SIMAP_NODE, struct simap_node *), \
|
||||
BUILD_ASSERT_TYPE(SIMAP, struct simap *))
|
||||
|
||||
#define SIMAP_FOR_EACH_SAFE(SIMAP_NODE, NEXT, SIMAP) \
|
||||
HMAP_FOR_EACH_SAFE_INIT (SIMAP_NODE, NEXT, node, &(SIMAP)->map, \
|
||||
#define SIMAP_FOR_EACH_SAFE_SHORT(SIMAP_NODE, SIMAP) \
|
||||
HMAP_FOR_EACH_SAFE_SHORT_INIT (SIMAP_NODE, node, &(SIMAP)->map, \
|
||||
BUILD_ASSERT_TYPE(SIMAP_NODE, struct simap_node *), \
|
||||
BUILD_ASSERT_TYPE(SIMAP, struct simap *))
|
||||
|
||||
#define SIMAP_FOR_EACH_SAFE_LONG(SIMAP_NODE, NEXT, SIMAP) \
|
||||
HMAP_FOR_EACH_SAFE_LONG_INIT (SIMAP_NODE, NEXT, node, &(SIMAP)->map, \
|
||||
BUILD_ASSERT_TYPE(SIMAP_NODE, struct simap_node *), \
|
||||
BUILD_ASSERT_TYPE(NEXT, struct simap_node *), \
|
||||
BUILD_ASSERT_TYPE(SIMAP, struct simap *))
|
||||
|
||||
#define SIMAP_FOR_EACH_SAFE(...) \
|
||||
OVERLOAD_SAFE_MACRO(SIMAP_FOR_EACH_SAFE_LONG, \
|
||||
SIMAP_FOR_EACH_SAFE_SHORT, \
|
||||
3, __VA_ARGS__)
|
||||
|
||||
void simap_init(struct simap *);
|
||||
void simap_destroy(struct simap *);
|
||||
void simap_swap(struct simap *, struct simap *);
|
||||
|
@ -185,9 +185,9 @@ smap_steal(struct smap *smap, struct smap_node *node,
|
||||
void
|
||||
smap_clear(struct smap *smap)
|
||||
{
|
||||
struct smap_node *node, *next;
|
||||
struct smap_node *node;
|
||||
|
||||
SMAP_FOR_EACH_SAFE (node, next, smap) {
|
||||
SMAP_FOR_EACH_SAFE (node, smap) {
|
||||
smap_remove_node(smap, node);
|
||||
}
|
||||
}
|
||||
|
15
lib/smap.h
15
lib/smap.h
@ -45,13 +45,24 @@ struct smap_node {
|
||||
BUILD_ASSERT_TYPE(SMAP_NODE, struct smap_node *), \
|
||||
BUILD_ASSERT_TYPE(SMAP, struct smap *))
|
||||
|
||||
#define SMAP_FOR_EACH_SAFE(SMAP_NODE, NEXT, SMAP) \
|
||||
HMAP_FOR_EACH_SAFE_INIT ( \
|
||||
#define SMAP_FOR_EACH_SAFE_SHORT(SMAP_NODE, SMAP) \
|
||||
HMAP_FOR_EACH_SAFE_SHORT_INIT ( \
|
||||
SMAP_NODE, node, &(SMAP)->map, \
|
||||
BUILD_ASSERT_TYPE(SMAP_NODE, struct smap_node *), \
|
||||
BUILD_ASSERT_TYPE(SMAP, struct smap *))
|
||||
|
||||
#define SMAP_FOR_EACH_SAFE_LONG(SMAP_NODE, NEXT, SMAP) \
|
||||
HMAP_FOR_EACH_SAFE_LONG_INIT ( \
|
||||
SMAP_NODE, NEXT, node, &(SMAP)->map, \
|
||||
BUILD_ASSERT_TYPE(SMAP_NODE, struct smap_node *), \
|
||||
BUILD_ASSERT_TYPE(NEXT, struct smap_node *), \
|
||||
BUILD_ASSERT_TYPE(SMAP, struct smap *))
|
||||
|
||||
#define SMAP_FOR_EACH_SAFE(...) \
|
||||
OVERLOAD_SAFE_MACRO(SMAP_FOR_EACH_SAFE_LONG, \
|
||||
SMAP_FOR_EACH_SAFE_SHORT, \
|
||||
3, __VA_ARGS__)
|
||||
|
||||
/* Initializer for an immutable struct smap 'SMAP' that contains one or two
|
||||
* key-value pairs, e.g.
|
||||
*
|
||||
|
@ -464,7 +464,7 @@ stopwatch_thread(void *ign OVS_UNUSED)
|
||||
static void
|
||||
stopwatch_exit(void)
|
||||
{
|
||||
struct shash_node *node, *node_next;
|
||||
struct shash_node *node;
|
||||
struct stopwatch_packet *pkt = stopwatch_packet_create(OP_SHUTDOWN);
|
||||
stopwatch_packet_write(pkt);
|
||||
xpthread_join(stopwatch_thread_id, NULL);
|
||||
@ -473,7 +473,7 @@ stopwatch_exit(void)
|
||||
* other competing thread. We are now the sole owners
|
||||
* of all data in the file.
|
||||
*/
|
||||
SHASH_FOR_EACH_SAFE (node, node_next, &stopwatches) {
|
||||
SHASH_FOR_EACH_SAFE (node, &stopwatches) {
|
||||
struct stopwatch *sw = node->data;
|
||||
shash_delete(&stopwatches, node);
|
||||
free(sw);
|
||||
|
@ -338,7 +338,7 @@ static void
|
||||
update_recirc_rules__(struct bond *bond)
|
||||
{
|
||||
struct match match;
|
||||
struct bond_pr_rule_op *pr_op, *next_op;
|
||||
struct bond_pr_rule_op *pr_op;
|
||||
uint64_t ofpacts_stub[128 / 8];
|
||||
struct ofpbuf ofpacts;
|
||||
int i;
|
||||
@ -372,7 +372,7 @@ update_recirc_rules__(struct bond *bond)
|
||||
|
||||
ofpbuf_use_stub(&ofpacts, ofpacts_stub, sizeof ofpacts_stub);
|
||||
|
||||
HMAP_FOR_EACH_SAFE(pr_op, next_op, hmap_node, &bond->pr_rule_ops) {
|
||||
HMAP_FOR_EACH_SAFE (pr_op, hmap_node, &bond->pr_rule_ops) {
|
||||
int error;
|
||||
switch (pr_op->op) {
|
||||
case ADD:
|
||||
|
@ -310,8 +310,8 @@ connmgr_destroy(struct connmgr *mgr)
|
||||
return;
|
||||
}
|
||||
|
||||
struct ofservice *ofservice, *next_ofservice;
|
||||
HMAP_FOR_EACH_SAFE (ofservice, next_ofservice, hmap_node, &mgr->services) {
|
||||
struct ofservice *ofservice;
|
||||
HMAP_FOR_EACH_SAFE (ofservice, hmap_node, &mgr->services) {
|
||||
ofservice_destroy(ofservice);
|
||||
}
|
||||
hmap_destroy(&mgr->services);
|
||||
@ -592,8 +592,8 @@ connmgr_set_controllers(struct connmgr *mgr, struct shash *controllers)
|
||||
|
||||
/* Delete services that are no longer configured.
|
||||
* Update configuration of all now-existing services. */
|
||||
struct ofservice *ofservice, *next_ofservice;
|
||||
HMAP_FOR_EACH_SAFE (ofservice, next_ofservice, hmap_node, &mgr->services) {
|
||||
struct ofservice *ofservice;
|
||||
HMAP_FOR_EACH_SAFE (ofservice, hmap_node, &mgr->services) {
|
||||
const char *target = ofservice->target;
|
||||
struct ofproto_controller *c = shash_find_data(controllers, target);
|
||||
if (!c) {
|
||||
@ -1137,9 +1137,9 @@ ofconn_remove_bundle(struct ofconn *ofconn, struct ofp_bundle *bundle)
|
||||
static void
|
||||
bundle_remove_all(struct ofconn *ofconn)
|
||||
{
|
||||
struct ofp_bundle *b, *next;
|
||||
struct ofp_bundle *b;
|
||||
|
||||
HMAP_FOR_EACH_SAFE (b, next, node, &ofconn->bundles) {
|
||||
HMAP_FOR_EACH_SAFE (b, node, &ofconn->bundles) {
|
||||
ofp_bundle_remove__(ofconn, b);
|
||||
}
|
||||
}
|
||||
@ -1149,8 +1149,8 @@ bundle_remove_expired(struct ofconn *ofconn, long long int now)
|
||||
{
|
||||
long long int limit = now - bundle_idle_timeout;
|
||||
|
||||
struct ofp_bundle *b, *next;
|
||||
HMAP_FOR_EACH_SAFE (b, next, node, &ofconn->bundles) {
|
||||
struct ofp_bundle *b;
|
||||
HMAP_FOR_EACH_SAFE (b, node, &ofconn->bundles) {
|
||||
if (b->used <= limit) {
|
||||
ofconn_send_error(ofconn, b->msg, OFPERR_OFPBFC_TIMEOUT);
|
||||
ofp_bundle_remove__(ofconn, b);
|
||||
@ -1247,8 +1247,8 @@ ofconn_destroy(struct ofconn *ofconn)
|
||||
|
||||
free(ofconn->async_cfg);
|
||||
|
||||
struct ofmonitor *monitor, *next_monitor;
|
||||
HMAP_FOR_EACH_SAFE (monitor, next_monitor, ofconn_node,
|
||||
struct ofmonitor *monitor;
|
||||
HMAP_FOR_EACH_SAFE (monitor, ofconn_node,
|
||||
&ofconn->monitors) {
|
||||
ofmonitor_destroy(monitor);
|
||||
}
|
||||
|
@ -377,7 +377,7 @@ in_band_run(struct in_band *ib)
|
||||
uint64_t ofpacts_stub[128 / 8];
|
||||
struct ofpbuf ofpacts;
|
||||
|
||||
struct in_band_rule *rule, *next;
|
||||
struct in_band_rule *rule;
|
||||
|
||||
ofpbuf_use_stub(&ofpacts, ofpacts_stub, sizeof ofpacts_stub);
|
||||
|
||||
@ -391,7 +391,7 @@ in_band_run(struct in_band *ib)
|
||||
|
||||
update_rules(ib);
|
||||
|
||||
HMAP_FOR_EACH_SAFE (rule, next, hmap_node, &ib->rules) {
|
||||
HMAP_FOR_EACH_SAFE (rule, hmap_node, &ib->rules) {
|
||||
switch (rule->op) {
|
||||
case ADD:
|
||||
ofproto_add_flow(ib->ofproto, &rule->match, rule->priority,
|
||||
|
@ -299,7 +299,7 @@ static void
|
||||
netflow_run__(struct netflow *nf) OVS_REQUIRES(mutex)
|
||||
{
|
||||
long long int now = time_msec();
|
||||
struct netflow_flow *nf_flow, *next;
|
||||
struct netflow_flow *nf_flow;
|
||||
|
||||
if (nf->packet.size) {
|
||||
collectors_send(nf->collectors, nf->packet.data, nf->packet.size);
|
||||
@ -312,7 +312,7 @@ netflow_run__(struct netflow *nf) OVS_REQUIRES(mutex)
|
||||
|
||||
nf->next_timeout = now + 1000;
|
||||
|
||||
HMAP_FOR_EACH_SAFE (nf_flow, next, hmap_node, &nf->flows) {
|
||||
HMAP_FOR_EACH_SAFE (nf_flow, hmap_node, &nf->flows) {
|
||||
if (now > nf_flow->last_expired + nf->active_timeout) {
|
||||
bool idle = nf_flow->used < nf_flow->last_expired;
|
||||
netflow_expire__(nf, nf_flow);
|
||||
@ -416,8 +416,8 @@ netflow_unref(struct netflow *nf)
|
||||
collectors_destroy(nf->collectors);
|
||||
ofpbuf_uninit(&nf->packet);
|
||||
|
||||
struct netflow_flow *nf_flow, *next;
|
||||
HMAP_FOR_EACH_SAFE (nf_flow, next, hmap_node, &nf->flows) {
|
||||
struct netflow_flow *nf_flow;
|
||||
HMAP_FOR_EACH_SAFE (nf_flow, hmap_node, &nf->flows) {
|
||||
hmap_remove(&nf->flows, &nf_flow->hmap_node);
|
||||
free(nf_flow);
|
||||
}
|
||||
|
@ -1078,7 +1078,7 @@ dpif_ipfix_set_options(
|
||||
{
|
||||
int i;
|
||||
struct ofproto_ipfix_flow_exporter_options *options;
|
||||
struct dpif_ipfix_flow_exporter_map_node *node, *next;
|
||||
struct dpif_ipfix_flow_exporter_map_node *node;
|
||||
|
||||
ovs_mutex_lock(&mutex);
|
||||
dpif_ipfix_bridge_exporter_set_options(&di->bridge_exporter,
|
||||
@ -1103,7 +1103,7 @@ dpif_ipfix_set_options(
|
||||
}
|
||||
|
||||
/* Remove dropped flow exporters, if any needs to be removed. */
|
||||
HMAP_FOR_EACH_SAFE (node, next, node, &di->flow_exporter_map) {
|
||||
HMAP_FOR_EACH_SAFE (node, node, &di->flow_exporter_map) {
|
||||
/* This is slow but doesn't take any extra memory, and
|
||||
* this table is not supposed to contain many rows anyway. */
|
||||
options = (struct ofproto_ipfix_flow_exporter_options *)
|
||||
@ -1215,7 +1215,7 @@ static void
|
||||
dpif_ipfix_clear(struct dpif_ipfix *di) OVS_REQUIRES(mutex)
|
||||
{
|
||||
struct dpif_ipfix_flow_exporter_map_node *exp_node;
|
||||
struct dpif_ipfix_port *dip, *next;
|
||||
struct dpif_ipfix_port *dip;
|
||||
|
||||
dpif_ipfix_bridge_exporter_clear(&di->bridge_exporter);
|
||||
|
||||
@ -1224,7 +1224,7 @@ dpif_ipfix_clear(struct dpif_ipfix *di) OVS_REQUIRES(mutex)
|
||||
free(exp_node);
|
||||
}
|
||||
|
||||
HMAP_FOR_EACH_SAFE (dip, next, hmap_node, &di->ports) {
|
||||
HMAP_FOR_EACH_SAFE (dip, hmap_node, &di->ports) {
|
||||
dpif_ipfix_del_port__(di, dip);
|
||||
}
|
||||
}
|
||||
|
@ -591,10 +591,10 @@ void
|
||||
dpif_sflow_unref(struct dpif_sflow *ds) OVS_EXCLUDED(mutex)
|
||||
{
|
||||
if (ds && ovs_refcount_unref_relaxed(&ds->ref_cnt) == 1) {
|
||||
struct dpif_sflow_port *dsp, *next;
|
||||
struct dpif_sflow_port *dsp;
|
||||
|
||||
dpif_sflow_clear(ds);
|
||||
HMAP_FOR_EACH_SAFE (dsp, next, hmap_node, &ds->ports) {
|
||||
HMAP_FOR_EACH_SAFE (dsp, hmap_node, &ds->ports) {
|
||||
dpif_sflow_del_port__(ds, dsp);
|
||||
}
|
||||
hmap_destroy(&ds->ports);
|
||||
|
@ -1222,13 +1222,13 @@ xlate_txn_start(void)
|
||||
static void
|
||||
xlate_xcfg_free(struct xlate_cfg *xcfg)
|
||||
{
|
||||
struct xbridge *xbridge, *next_xbridge;
|
||||
struct xbridge *xbridge;
|
||||
|
||||
if (!xcfg) {
|
||||
return;
|
||||
}
|
||||
|
||||
HMAP_FOR_EACH_SAFE (xbridge, next_xbridge, hmap_node, &xcfg->xbridges) {
|
||||
HMAP_FOR_EACH_SAFE (xbridge, hmap_node, &xcfg->xbridges) {
|
||||
xlate_xbridge_remove(xcfg, xbridge);
|
||||
}
|
||||
|
||||
@ -1283,13 +1283,13 @@ static void
|
||||
xlate_xbridge_remove(struct xlate_cfg *xcfg, struct xbridge *xbridge)
|
||||
{
|
||||
struct xbundle *xbundle;
|
||||
struct xport *xport, *next_xport;
|
||||
struct xport *xport;
|
||||
|
||||
if (!xbridge) {
|
||||
return;
|
||||
}
|
||||
|
||||
HMAP_FOR_EACH_SAFE (xport, next_xport, ofp_node, &xbridge->xports) {
|
||||
HMAP_FOR_EACH_SAFE (xport, ofp_node, &xbridge->xports) {
|
||||
xlate_xport_remove(xcfg, xport);
|
||||
}
|
||||
|
||||
|
@ -1659,7 +1659,7 @@ static int
|
||||
construct(struct ofproto *ofproto_)
|
||||
{
|
||||
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
|
||||
struct shash_node *node, *next;
|
||||
struct shash_node *node;
|
||||
int error;
|
||||
|
||||
/* Tunnel module can get used right after the udpif threads are running. */
|
||||
@ -1697,7 +1697,7 @@ construct(struct ofproto *ofproto_)
|
||||
ofproto->ams_seqno = seq_read(ofproto->ams_seq);
|
||||
|
||||
|
||||
SHASH_FOR_EACH_SAFE (node, next, &init_ofp_ports) {
|
||||
SHASH_FOR_EACH_SAFE (node, &init_ofp_ports) {
|
||||
struct iface_hint *iface_hint = node->data;
|
||||
|
||||
if (!strcmp(iface_hint->br_name, ofproto->up.name)) {
|
||||
|
@ -1754,7 +1754,7 @@ void
|
||||
ofproto_destroy(struct ofproto *p, bool del)
|
||||
OVS_EXCLUDED(ofproto_mutex)
|
||||
{
|
||||
struct ofport *ofport, *next_ofport;
|
||||
struct ofport *ofport;
|
||||
struct ofport_usage *usage;
|
||||
|
||||
if (!p) {
|
||||
@ -1762,7 +1762,7 @@ ofproto_destroy(struct ofproto *p, bool del)
|
||||
}
|
||||
|
||||
ofproto_flush__(p, del);
|
||||
HMAP_FOR_EACH_SAFE (ofport, next_ofport, hmap_node, &p->ports) {
|
||||
HMAP_FOR_EACH_SAFE (ofport, hmap_node, &p->ports) {
|
||||
ofport_destroy(ofport, del);
|
||||
}
|
||||
|
||||
@ -2826,7 +2826,7 @@ init_ports(struct ofproto *p)
|
||||
{
|
||||
struct ofproto_port_dump dump;
|
||||
struct ofproto_port ofproto_port;
|
||||
struct shash_node *node, *next;
|
||||
struct shash_node *node;
|
||||
|
||||
OFPROTO_PORT_FOR_EACH (&ofproto_port, &dump, p) {
|
||||
const char *name = ofproto_port.name;
|
||||
@ -2857,7 +2857,7 @@ init_ports(struct ofproto *p)
|
||||
}
|
||||
}
|
||||
|
||||
SHASH_FOR_EACH_SAFE(node, next, &init_ofp_ports) {
|
||||
SHASH_FOR_EACH_SAFE (node, &init_ofp_ports) {
|
||||
struct iface_hint *iface_hint = node->data;
|
||||
|
||||
if (!strcmp(iface_hint->br_name, p->name)) {
|
||||
@ -6852,9 +6852,9 @@ static void
|
||||
meter_delete_all(struct ofproto *ofproto)
|
||||
OVS_REQUIRES(ofproto_mutex)
|
||||
{
|
||||
struct meter *meter, *next;
|
||||
struct meter *meter;
|
||||
|
||||
HMAP_FOR_EACH_SAFE (meter, next, node, &ofproto->meters) {
|
||||
HMAP_FOR_EACH_SAFE (meter, node, &ofproto->meters) {
|
||||
hmap_remove(&ofproto->meters, &meter->node);
|
||||
meter_destroy(ofproto, meter);
|
||||
}
|
||||
@ -9199,8 +9199,8 @@ oftable_configure_eviction(struct oftable *table, unsigned int eviction,
|
||||
|
||||
/* Destroy existing eviction groups, then destroy and recreate data
|
||||
* structures to recover memory. */
|
||||
struct eviction_group *evg, *next;
|
||||
HMAP_FOR_EACH_SAFE (evg, next, id_node, &table->eviction_groups_by_id) {
|
||||
struct eviction_group *evg;
|
||||
HMAP_FOR_EACH_SAFE (evg, id_node, &table->eviction_groups_by_id) {
|
||||
eviction_group_destroy(table, evg);
|
||||
}
|
||||
hmap_destroy(&table->eviction_groups_by_id);
|
||||
|
@ -220,13 +220,13 @@ ovsdb_condition_optimize(struct ovsdb_condition *cnd)
|
||||
static void
|
||||
ovsdb_condition_optimize_destroy(struct ovsdb_condition *cnd)
|
||||
{
|
||||
struct shash_node *node, *next;
|
||||
struct shash_node *node;
|
||||
|
||||
SHASH_FOR_EACH_SAFE (node, next, &cnd->o_columns) {
|
||||
SHASH_FOR_EACH_SAFE (node, &cnd->o_columns) {
|
||||
struct ovsdb_o_column *o_column = node->data;
|
||||
struct ovsdb_o_clause *c, *c_next;
|
||||
struct ovsdb_o_clause *c;
|
||||
|
||||
HMAP_FOR_EACH_SAFE(c, c_next, hmap_node, &o_column->o_clauses) {
|
||||
HMAP_FOR_EACH_SAFE (c, hmap_node, &o_column->o_clauses) {
|
||||
hmap_remove(&o_column->o_clauses, &c->hmap_node);
|
||||
free(c);
|
||||
}
|
||||
|
@ -197,9 +197,9 @@ ovsdb_jsonrpc_server_remove_db(struct ovsdb_jsonrpc_server *svr,
|
||||
void
|
||||
ovsdb_jsonrpc_server_destroy(struct ovsdb_jsonrpc_server *svr)
|
||||
{
|
||||
struct shash_node *node, *next;
|
||||
struct shash_node *node;
|
||||
|
||||
SHASH_FOR_EACH_SAFE (node, next, &svr->remotes) {
|
||||
SHASH_FOR_EACH_SAFE (node, &svr->remotes) {
|
||||
ovsdb_jsonrpc_server_del_remote(node);
|
||||
}
|
||||
shash_destroy(&svr->remotes);
|
||||
@ -227,9 +227,9 @@ void
|
||||
ovsdb_jsonrpc_server_set_remotes(struct ovsdb_jsonrpc_server *svr,
|
||||
const struct shash *new_remotes)
|
||||
{
|
||||
struct shash_node *node, *next;
|
||||
struct shash_node *node;
|
||||
|
||||
SHASH_FOR_EACH_SAFE (node, next, &svr->remotes) {
|
||||
SHASH_FOR_EACH_SAFE (node, &svr->remotes) {
|
||||
struct ovsdb_jsonrpc_remote *remote = node->data;
|
||||
struct ovsdb_jsonrpc_options *options
|
||||
= shash_find_data(new_remotes, node->name);
|
||||
@ -909,9 +909,9 @@ error:
|
||||
static void
|
||||
ovsdb_jsonrpc_session_unlock_all(struct ovsdb_jsonrpc_session *s)
|
||||
{
|
||||
struct ovsdb_lock_waiter *waiter, *next;
|
||||
struct ovsdb_lock_waiter *waiter;
|
||||
|
||||
HMAP_FOR_EACH_SAFE (waiter, next, session_node, &s->up.waiters) {
|
||||
HMAP_FOR_EACH_SAFE (waiter, session_node, &s->up.waiters) {
|
||||
ovsdb_jsonrpc_session_unlock__(waiter);
|
||||
}
|
||||
}
|
||||
@ -1198,8 +1198,8 @@ static void
|
||||
ovsdb_jsonrpc_trigger_remove__(struct ovsdb_jsonrpc_session *s,
|
||||
struct ovsdb *db)
|
||||
{
|
||||
struct ovsdb_jsonrpc_trigger *t, *next;
|
||||
HMAP_FOR_EACH_SAFE (t, next, hmap_node, &s->triggers) {
|
||||
struct ovsdb_jsonrpc_trigger *t;
|
||||
HMAP_FOR_EACH_SAFE (t, hmap_node, &s->triggers) {
|
||||
if (!db || t->trigger.db == db) {
|
||||
ovsdb_jsonrpc_trigger_complete(t);
|
||||
}
|
||||
@ -1688,8 +1688,8 @@ ovsdb_jsonrpc_monitor_preremove_db(struct ovsdb_jsonrpc_session *s,
|
||||
{
|
||||
ovs_assert(db);
|
||||
|
||||
struct ovsdb_jsonrpc_monitor *m, *next;
|
||||
HMAP_FOR_EACH_SAFE (m, next, node, &s->monitors) {
|
||||
struct ovsdb_jsonrpc_monitor *m;
|
||||
HMAP_FOR_EACH_SAFE (m, node, &s->monitors) {
|
||||
if (m->db == db) {
|
||||
ovsdb_jsonrpc_monitor_destroy(m, true);
|
||||
}
|
||||
@ -1700,9 +1700,9 @@ ovsdb_jsonrpc_monitor_preremove_db(struct ovsdb_jsonrpc_session *s,
|
||||
static void
|
||||
ovsdb_jsonrpc_monitor_remove_all(struct ovsdb_jsonrpc_session *s)
|
||||
{
|
||||
struct ovsdb_jsonrpc_monitor *m, *next;
|
||||
struct ovsdb_jsonrpc_monitor *m;
|
||||
|
||||
HMAP_FOR_EACH_SAFE (m, next, node, &s->monitors) {
|
||||
HMAP_FOR_EACH_SAFE (m, node, &s->monitors) {
|
||||
ovsdb_jsonrpc_monitor_destroy(m, false);
|
||||
}
|
||||
}
|
||||
|
@ -644,8 +644,8 @@ ovsdb_monitor_change_set_destroy(struct ovsdb_monitor_change_set *mcs)
|
||||
ovs_list_remove(&mcst->list_in_change_set);
|
||||
ovs_list_remove(&mcst->list_in_mt);
|
||||
|
||||
struct ovsdb_monitor_row *row, *next;
|
||||
HMAP_FOR_EACH_SAFE (row, next, hmap_node, &mcst->rows) {
|
||||
struct ovsdb_monitor_row *row;
|
||||
HMAP_FOR_EACH_SAFE (row, hmap_node, &mcst->rows) {
|
||||
hmap_remove(&mcst->rows, &row->hmap_node);
|
||||
ovsdb_monitor_row_destroy(mcst->mt, row, mcst->n_columns);
|
||||
}
|
||||
@ -700,13 +700,13 @@ void
|
||||
ovsdb_monitor_session_condition_destroy(
|
||||
struct ovsdb_monitor_session_condition *condition)
|
||||
{
|
||||
struct shash_node *node, *next;
|
||||
struct shash_node *node;
|
||||
|
||||
if (!condition) {
|
||||
return;
|
||||
}
|
||||
|
||||
SHASH_FOR_EACH_SAFE (node, next, &condition->tables) {
|
||||
SHASH_FOR_EACH_SAFE (node, &condition->tables) {
|
||||
struct ovsdb_monitor_table_condition *mtc = node->data;
|
||||
|
||||
ovsdb_condition_destroy(&mtc->new_condition);
|
||||
@ -1122,11 +1122,11 @@ ovsdb_monitor_compose_update(
|
||||
json = NULL;
|
||||
struct ovsdb_monitor_change_set_for_table *mcst;
|
||||
LIST_FOR_EACH (mcst, list_in_change_set, &mcs->change_set_for_tables) {
|
||||
struct ovsdb_monitor_row *row, *next;
|
||||
struct ovsdb_monitor_row *row;
|
||||
struct json *table_json = NULL;
|
||||
struct ovsdb_monitor_table *mt = mcst->mt;
|
||||
|
||||
HMAP_FOR_EACH_SAFE (row, next, hmap_node, &mcst->rows) {
|
||||
HMAP_FOR_EACH_SAFE (row, hmap_node, &mcst->rows) {
|
||||
struct json *row_json;
|
||||
row_json = (*row_update)(mt, condition, OVSDB_MONITOR_ROW, row,
|
||||
initial, changed, mcst->n_columns);
|
||||
|
@ -229,8 +229,7 @@ main_loop(struct server_config *config,
|
||||
|
||||
ovsdb_relay_run();
|
||||
|
||||
struct shash_node *next;
|
||||
SHASH_FOR_EACH_SAFE (node, next, all_dbs) {
|
||||
SHASH_FOR_EACH_SAFE (node, all_dbs) {
|
||||
struct db *db = node->data;
|
||||
ovsdb_txn_history_run(db->db);
|
||||
ovsdb_storage_run(db->db->storage);
|
||||
@ -322,7 +321,7 @@ main(int argc, char *argv[])
|
||||
FILE *config_tmpfile;
|
||||
struct server_config server_config;
|
||||
struct shash all_dbs;
|
||||
struct shash_node *node, *next;
|
||||
struct shash_node *node;
|
||||
int replication_probe_interval = REPLICATION_DEFAULT_PROBE_INTERVAL;
|
||||
|
||||
ovs_cmdl_proctitle_init(argc, argv);
|
||||
@ -492,7 +491,7 @@ main(int argc, char *argv[])
|
||||
main_loop(&server_config, jsonrpc, &all_dbs, unixctl, &remotes,
|
||||
run_process, &exiting, &is_backup);
|
||||
|
||||
SHASH_FOR_EACH_SAFE(node, next, &all_dbs) {
|
||||
SHASH_FOR_EACH_SAFE (node, &all_dbs) {
|
||||
struct db *db = node->data;
|
||||
close_db(&server_config, db, NULL);
|
||||
shash_delete(&all_dbs, node);
|
||||
@ -1245,8 +1244,8 @@ update_server_status(struct shash *all_dbs)
|
||||
|
||||
/* Update rows for databases that still exist.
|
||||
* Delete rows for databases that no longer exist. */
|
||||
const struct ovsdb_row *row, *next_row;
|
||||
HMAP_FOR_EACH_SAFE (row, next_row, hmap_node, &database_table->rows) {
|
||||
const struct ovsdb_row *row;
|
||||
HMAP_FOR_EACH_SAFE (row, hmap_node, &database_table->rows) {
|
||||
const char *name;
|
||||
ovsdb_util_read_string_column(row, "name", &name);
|
||||
struct db *db = shash_find_data(all_dbs, name);
|
||||
|
@ -1579,15 +1579,14 @@ do_check_cluster(struct ovs_cmdl_context *ctx)
|
||||
}
|
||||
free(c.servers);
|
||||
|
||||
struct commit *next_commit;
|
||||
HMAP_FOR_EACH_SAFE (commit, next_commit, hmap_node, &c.commits) {
|
||||
HMAP_FOR_EACH_SAFE (commit, hmap_node, &c.commits) {
|
||||
hmap_remove(&c.commits, &commit->hmap_node);
|
||||
free(commit);
|
||||
}
|
||||
hmap_destroy(&c.commits);
|
||||
|
||||
struct leader *leader, *next_leader;
|
||||
HMAP_FOR_EACH_SAFE (leader, next_leader, hmap_node, &c.leaders) {
|
||||
struct leader *leader;
|
||||
HMAP_FOR_EACH_SAFE (leader, hmap_node, &c.leaders) {
|
||||
hmap_remove(&c.leaders, &leader->hmap_node);
|
||||
free(leader);
|
||||
}
|
||||
|
@ -40,9 +40,9 @@ ovsdb_query(struct ovsdb_table *table, const struct ovsdb_condition *cnd,
|
||||
}
|
||||
} else {
|
||||
/* Linear scan. */
|
||||
const struct ovsdb_row *row, *next;
|
||||
const struct ovsdb_row *row;
|
||||
|
||||
HMAP_FOR_EACH_SAFE (row, next, hmap_node, &table->rows) {
|
||||
HMAP_FOR_EACH_SAFE (row, hmap_node, &table->rows) {
|
||||
if (ovsdb_condition_match_every_clause(row, cnd) &&
|
||||
!output_row(row, aux)) {
|
||||
break;
|
||||
|
@ -150,8 +150,8 @@ raft_server_destroy(struct raft_server *s)
|
||||
void
|
||||
raft_servers_destroy(struct hmap *servers)
|
||||
{
|
||||
struct raft_server *s, *next;
|
||||
HMAP_FOR_EACH_SAFE (s, next, hmap_node, servers) {
|
||||
struct raft_server *s;
|
||||
HMAP_FOR_EACH_SAFE (s, hmap_node, servers) {
|
||||
hmap_remove(servers, &s->hmap_node);
|
||||
raft_server_destroy(s);
|
||||
}
|
||||
|
18
ovsdb/raft.c
18
ovsdb/raft.c
@ -700,8 +700,8 @@ static void
|
||||
raft_set_servers(struct raft *raft, const struct hmap *new_servers,
|
||||
enum vlog_level level)
|
||||
{
|
||||
struct raft_server *s, *next;
|
||||
HMAP_FOR_EACH_SAFE (s, next, hmap_node, &raft->servers) {
|
||||
struct raft_server *s;
|
||||
HMAP_FOR_EACH_SAFE (s, hmap_node, &raft->servers) {
|
||||
if (!raft_server_find(new_servers, &s->sid)) {
|
||||
ovs_assert(s != raft->remove_server);
|
||||
|
||||
@ -711,7 +711,7 @@ raft_set_servers(struct raft *raft, const struct hmap *new_servers,
|
||||
}
|
||||
}
|
||||
|
||||
HMAP_FOR_EACH_SAFE (s, next, hmap_node, new_servers) {
|
||||
HMAP_FOR_EACH_SAFE (s, hmap_node, new_servers) {
|
||||
if (!raft_find_server(raft, &s->sid)) {
|
||||
VLOG(level, "server %s added to configuration", s->nickname);
|
||||
|
||||
@ -2062,8 +2062,8 @@ raft_run(struct raft *raft)
|
||||
* commands becomes new leader: the pending commands can still complete
|
||||
* if the crashed leader has replicated the transactions to majority of
|
||||
* followers before it crashed. */
|
||||
struct raft_command *cmd, *next_cmd;
|
||||
HMAP_FOR_EACH_SAFE (cmd, next_cmd, hmap_node, &raft->commands) {
|
||||
struct raft_command *cmd;
|
||||
HMAP_FOR_EACH_SAFE (cmd, hmap_node, &raft->commands) {
|
||||
if (cmd->timestamp
|
||||
&& now - cmd->timestamp > raft->election_timer * 2) {
|
||||
raft_command_complete(raft, cmd, RAFT_CMD_TIMEOUT);
|
||||
@ -2266,8 +2266,8 @@ raft_command_initiate(struct raft *raft,
|
||||
static void
|
||||
log_all_commands(struct raft *raft)
|
||||
{
|
||||
struct raft_command *cmd, *next;
|
||||
HMAP_FOR_EACH_SAFE (cmd, next, hmap_node, &raft->commands) {
|
||||
struct raft_command *cmd;
|
||||
HMAP_FOR_EACH_SAFE (cmd, hmap_node, &raft->commands) {
|
||||
VLOG_DBG("raft command eid: "UUID_FMT, UUID_ARGS(&cmd->eid));
|
||||
}
|
||||
}
|
||||
@ -2421,8 +2421,8 @@ raft_command_complete(struct raft *raft,
|
||||
static void
|
||||
raft_complete_all_commands(struct raft *raft, enum raft_command_status status)
|
||||
{
|
||||
struct raft_command *cmd, *next;
|
||||
HMAP_FOR_EACH_SAFE (cmd, next, hmap_node, &raft->commands) {
|
||||
struct raft_command *cmd;
|
||||
HMAP_FOR_EACH_SAFE (cmd, hmap_node, &raft->commands) {
|
||||
raft_command_complete(raft, cmd, status);
|
||||
}
|
||||
}
|
||||
|
@ -280,9 +280,9 @@ ovsdb_relay_clear(struct ovsdb *db)
|
||||
|
||||
SHASH_FOR_EACH (table_node, &db->tables) {
|
||||
struct ovsdb_table *table = table_node->data;
|
||||
struct ovsdb_row *row, *next;
|
||||
struct ovsdb_row *row;
|
||||
|
||||
HMAP_FOR_EACH_SAFE (row, next, hmap_node, &table->rows) {
|
||||
HMAP_FOR_EACH_SAFE (row, hmap_node, &table->rows) {
|
||||
ovsdb_txn_row_delete(txn, row);
|
||||
}
|
||||
}
|
||||
|
@ -549,8 +549,8 @@ reset_database(struct ovsdb *db)
|
||||
/* Delete all rows if the table is not excluded. */
|
||||
if (!excluded_tables_find(db->schema->name, table_node->name)) {
|
||||
struct ovsdb_table *table = table_node->data;
|
||||
struct ovsdb_row *row, *next;
|
||||
HMAP_FOR_EACH_SAFE (row, next, hmap_node, &table->rows) {
|
||||
struct ovsdb_row *row;
|
||||
HMAP_FOR_EACH_SAFE (row, hmap_node, &table->rows) {
|
||||
ovsdb_txn_row_delete(txn, row);
|
||||
}
|
||||
}
|
||||
@ -769,9 +769,9 @@ replication_dbs_destroy(void)
|
||||
return;
|
||||
}
|
||||
|
||||
struct shash_node *node, *next;
|
||||
struct shash_node *node;
|
||||
|
||||
SHASH_FOR_EACH_SAFE (node, next, replication_dbs) {
|
||||
SHASH_FOR_EACH_SAFE (node, replication_dbs) {
|
||||
hmap_remove(&replication_dbs->map, &node->node);
|
||||
struct replication_db *rdb = node->data;
|
||||
if (rdb->active_db_schema) {
|
||||
|
@ -309,10 +309,10 @@ void
|
||||
ovsdb_table_destroy(struct ovsdb_table *table)
|
||||
{
|
||||
if (table) {
|
||||
struct ovsdb_row *row, *next;
|
||||
struct ovsdb_row *row;
|
||||
size_t i;
|
||||
|
||||
HMAP_FOR_EACH_SAFE (row, next, hmap_node, &table->rows) {
|
||||
HMAP_FOR_EACH_SAFE (row, hmap_node, &table->rows) {
|
||||
ovsdb_row_destroy(row);
|
||||
}
|
||||
hmap_destroy(&table->rows);
|
||||
|
@ -167,9 +167,9 @@ ovsdb_txn_forward_cancel(struct ovsdb *db, struct ovsdb_txn_forward *txn_fwd)
|
||||
void
|
||||
ovsdb_txn_forward_cancel_all(struct ovsdb *db, bool sent_only)
|
||||
{
|
||||
struct ovsdb_txn_forward *t, *next;
|
||||
struct ovsdb_txn_forward *t;
|
||||
|
||||
HMAP_FOR_EACH_SAFE (t, next, sent_node, &db->txn_forward_sent) {
|
||||
HMAP_FOR_EACH_SAFE (t, sent_node, &db->txn_forward_sent) {
|
||||
ovsdb_txn_forward_cancel(db, t);
|
||||
}
|
||||
|
||||
|
@ -1096,8 +1096,8 @@ ovsdb_txn_destroy_cloned(struct ovsdb_txn *txn)
|
||||
ovs_assert(!txn->db);
|
||||
struct ovsdb_txn_table *t;
|
||||
LIST_FOR_EACH_SAFE (t, node, &txn->txn_tables) {
|
||||
struct ovsdb_txn_row *r, *next_txn_row;
|
||||
HMAP_FOR_EACH_SAFE (r, next_txn_row, hmap_node, &t->txn_rows) {
|
||||
struct ovsdb_txn_row *r;
|
||||
HMAP_FOR_EACH_SAFE (r, hmap_node, &t->txn_rows) {
|
||||
if (r->old) {
|
||||
ovsdb_row_destroy(r->old);
|
||||
}
|
||||
@ -1560,9 +1560,9 @@ for_each_txn_row(struct ovsdb_txn *txn,
|
||||
}
|
||||
|
||||
while (t->n_processed < hmap_count(&t->txn_rows)) {
|
||||
struct ovsdb_txn_row *r, *next_txn_row;
|
||||
struct ovsdb_txn_row *r;
|
||||
|
||||
HMAP_FOR_EACH_SAFE (r, next_txn_row, hmap_node, &t->txn_rows) {
|
||||
HMAP_FOR_EACH_SAFE (r, hmap_node, &t->txn_rows) {
|
||||
if (r->serial != serial) {
|
||||
struct ovsdb_error *error;
|
||||
|
||||
|
@ -584,7 +584,7 @@ benchmark_hmap(void)
|
||||
{
|
||||
struct helement *elements;
|
||||
struct hmap hmap;
|
||||
struct helement *e, *next;
|
||||
struct helement *e;
|
||||
struct timeval start;
|
||||
pthread_t *threads;
|
||||
struct hmap_aux aux;
|
||||
@ -622,7 +622,7 @@ benchmark_hmap(void)
|
||||
|
||||
/* Destruction. */
|
||||
xgettimeofday(&start);
|
||||
HMAP_FOR_EACH_SAFE (e, next, node, &hmap) {
|
||||
HMAP_FOR_EACH_SAFE (e, node, &hmap) {
|
||||
hmap_remove(&hmap, &e->node);
|
||||
}
|
||||
hmap_destroy(&hmap);
|
||||
|
@ -269,6 +269,38 @@ test_hmap_for_each_safe(hash_func *hash)
|
||||
assert(next == NULL);
|
||||
assert(e == NULL);
|
||||
|
||||
for (i = 0; i < n; i++) {
|
||||
if (pattern & (1ul << i)) {
|
||||
n_remaining++;
|
||||
}
|
||||
}
|
||||
assert(n == n_remaining);
|
||||
hmap_destroy(&hmap);
|
||||
|
||||
/* Test short version (without next variable). */
|
||||
make_hmap(&hmap, elements, values, n, hash);
|
||||
|
||||
i = 0;
|
||||
n_remaining = n;
|
||||
HMAP_FOR_EACH_SAFE (e, node, &hmap) {
|
||||
assert(i < n);
|
||||
if (pattern & (1ul << e->value)) {
|
||||
size_t j;
|
||||
hmap_remove(&hmap, &e->node);
|
||||
for (j = 0; ; j++) {
|
||||
assert(j < n_remaining);
|
||||
if (values[j] == e->value) {
|
||||
values[j] = values[--n_remaining];
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
check_hmap(&hmap, values, n_remaining, hash);
|
||||
i++;
|
||||
}
|
||||
assert(i == n);
|
||||
assert(e == NULL);
|
||||
|
||||
for (i = 0; i < n; i++) {
|
||||
if (pattern & (1ul << i)) {
|
||||
n_remaining++;
|
||||
|
@ -1525,11 +1525,11 @@ del_port(struct vsctl_context *vsctl_ctx, struct vsctl_port *port)
|
||||
static void
|
||||
del_bridge(struct vsctl_context *vsctl_ctx, struct vsctl_bridge *br)
|
||||
{
|
||||
struct vsctl_bridge *child, *next_child;
|
||||
struct vsctl_bridge *child;
|
||||
struct vsctl_port *port;
|
||||
const struct ovsrec_flow_sample_collector_set *fscset, *next_fscset;
|
||||
|
||||
HMAP_FOR_EACH_SAFE (child, next_child, children_node, &br->children) {
|
||||
HMAP_FOR_EACH_SAFE (child, children_node, &br->children) {
|
||||
del_bridge(vsctl_ctx, child);
|
||||
}
|
||||
|
||||
|
@ -543,13 +543,13 @@ bridge_exit(bool delete_datapath)
|
||||
if_notifier_destroy(ifnotifier);
|
||||
seq_destroy(ifaces_changed);
|
||||
|
||||
struct datapath *dp, *next;
|
||||
HMAP_FOR_EACH_SAFE (dp, next, node, &all_datapaths) {
|
||||
struct datapath *dp;
|
||||
HMAP_FOR_EACH_SAFE (dp, node, &all_datapaths) {
|
||||
datapath_destroy(dp);
|
||||
}
|
||||
|
||||
struct bridge *br, *next_br;
|
||||
HMAP_FOR_EACH_SAFE (br, next_br, node, &all_bridges) {
|
||||
struct bridge *br;
|
||||
HMAP_FOR_EACH_SAFE (br, node, &all_bridges) {
|
||||
bridge_destroy(br, delete_datapath);
|
||||
}
|
||||
|
||||
@ -716,8 +716,8 @@ static void
|
||||
datapath_destroy(struct datapath *dp)
|
||||
{
|
||||
if (dp) {
|
||||
struct ct_zone *ct_zone, *next;
|
||||
HMAP_FOR_EACH_SAFE (ct_zone, next, node, &dp->ct_zones) {
|
||||
struct ct_zone *ct_zone;
|
||||
HMAP_FOR_EACH_SAFE (ct_zone, node, &dp->ct_zones) {
|
||||
ofproto_ct_del_zone_timeout_policy(dp->type, ct_zone->zone_id);
|
||||
ct_zone_remove_and_destroy(dp, ct_zone);
|
||||
}
|
||||
@ -733,7 +733,7 @@ datapath_destroy(struct datapath *dp)
|
||||
static void
|
||||
ct_zones_reconfigure(struct datapath *dp, struct ovsrec_datapath *dp_cfg)
|
||||
{
|
||||
struct ct_zone *ct_zone, *next;
|
||||
struct ct_zone *ct_zone;
|
||||
|
||||
/* Add new 'ct_zone's or update existing 'ct_zone's based on the database
|
||||
* state. */
|
||||
@ -760,7 +760,7 @@ ct_zones_reconfigure(struct datapath *dp, struct ovsrec_datapath *dp_cfg)
|
||||
}
|
||||
|
||||
/* Purge 'ct_zone's no longer found in the database. */
|
||||
HMAP_FOR_EACH_SAFE (ct_zone, next, node, &dp->ct_zones) {
|
||||
HMAP_FOR_EACH_SAFE (ct_zone, node, &dp->ct_zones) {
|
||||
if (ct_zone->last_used != idl_seqno) {
|
||||
ofproto_ct_del_zone_timeout_policy(dp->type, ct_zone->zone_id);
|
||||
ct_zone_remove_and_destroy(dp, ct_zone);
|
||||
@ -788,7 +788,7 @@ dp_capability_reconfigure(struct datapath *dp,
|
||||
static void
|
||||
datapath_reconfigure(const struct ovsrec_open_vswitch *cfg)
|
||||
{
|
||||
struct datapath *dp, *next;
|
||||
struct datapath *dp;
|
||||
|
||||
/* Add new 'datapath's or update existing ones. */
|
||||
for (size_t i = 0; i < cfg->n_datapaths; i++) {
|
||||
@ -805,7 +805,7 @@ datapath_reconfigure(const struct ovsrec_open_vswitch *cfg)
|
||||
}
|
||||
|
||||
/* Purge deleted 'datapath's. */
|
||||
HMAP_FOR_EACH_SAFE (dp, next, node, &all_datapaths) {
|
||||
HMAP_FOR_EACH_SAFE (dp, node, &all_datapaths) {
|
||||
if (dp->last_used != idl_seqno) {
|
||||
datapath_destroy(dp);
|
||||
}
|
||||
@ -816,7 +816,7 @@ static void
|
||||
bridge_reconfigure(const struct ovsrec_open_vswitch *ovs_cfg)
|
||||
{
|
||||
struct sockaddr_in *managers;
|
||||
struct bridge *br, *next;
|
||||
struct bridge *br;
|
||||
int sflow_bridge_number;
|
||||
size_t n_managers;
|
||||
|
||||
@ -875,7 +875,7 @@ bridge_reconfigure(const struct ovsrec_open_vswitch *ovs_cfg)
|
||||
* - Create ofprotos that are missing.
|
||||
*
|
||||
* - Add ports that are missing. */
|
||||
HMAP_FOR_EACH_SAFE (br, next, node, &all_bridges) {
|
||||
HMAP_FOR_EACH_SAFE (br, node, &all_bridges) {
|
||||
if (!br->ofproto) {
|
||||
int error;
|
||||
|
||||
@ -1020,7 +1020,7 @@ bridge_delete_or_reconfigure_ports(struct bridge *br)
|
||||
struct ofproto_port_dump dump;
|
||||
|
||||
struct sset ofproto_ports;
|
||||
struct port *port, *port_next;
|
||||
struct port *port;
|
||||
|
||||
/* List of "ofp_port"s to delete. We make a list instead of deleting them
|
||||
* right away because ofproto implementations aren't necessarily able to
|
||||
@ -1132,7 +1132,7 @@ bridge_delete_or_reconfigure_ports(struct bridge *br)
|
||||
* device destroyed via "tunctl -d", a physical Ethernet device
|
||||
* whose module was just unloaded via "rmmod", or a virtual NIC for a
|
||||
* VM whose VM was just terminated. */
|
||||
HMAP_FOR_EACH_SAFE (port, port_next, hmap_node, &br->ports) {
|
||||
HMAP_FOR_EACH_SAFE (port, hmap_node, &br->ports) {
|
||||
struct iface *iface;
|
||||
|
||||
LIST_FOR_EACH_SAFE (iface, port_elem, &port->ifaces) {
|
||||
@ -1967,7 +1967,7 @@ port_is_bond_fake_iface(const struct port *port)
|
||||
static void
|
||||
add_del_bridges(const struct ovsrec_open_vswitch *cfg)
|
||||
{
|
||||
struct bridge *br, *next;
|
||||
struct bridge *br;
|
||||
struct shash_node *node;
|
||||
struct shash new_br;
|
||||
size_t i;
|
||||
@ -1993,7 +1993,7 @@ add_del_bridges(const struct ovsrec_open_vswitch *cfg)
|
||||
|
||||
/* Get rid of deleted bridges or those whose types have changed.
|
||||
* Update 'cfg' of bridges that still exist. */
|
||||
HMAP_FOR_EACH_SAFE (br, next, node, &all_bridges) {
|
||||
HMAP_FOR_EACH_SAFE (br, node, &all_bridges) {
|
||||
br->cfg = shash_find_data(&new_br, br->name);
|
||||
if (!br->cfg || strcmp(br->type, ofproto_normalize_type(
|
||||
br->cfg->datapath_type))) {
|
||||
@ -3266,13 +3266,13 @@ bridge_run(void)
|
||||
|
||||
if (ovsdb_idl_is_lock_contended(idl)) {
|
||||
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
|
||||
struct bridge *br, *next_br;
|
||||
struct bridge *br;
|
||||
|
||||
VLOG_ERR_RL(&rl, "another ovs-vswitchd process is running, "
|
||||
"disabling this process (pid %ld) until it goes away",
|
||||
(long int) getpid());
|
||||
|
||||
HMAP_FOR_EACH_SAFE (br, next_br, node, &all_bridges) {
|
||||
HMAP_FOR_EACH_SAFE (br, node, &all_bridges) {
|
||||
bridge_destroy(br, false);
|
||||
}
|
||||
/* Since we will not be running system_stats_run() in this process
|
||||
@ -3594,13 +3594,13 @@ static void
|
||||
bridge_destroy(struct bridge *br, bool del)
|
||||
{
|
||||
if (br) {
|
||||
struct mirror *mirror, *next_mirror;
|
||||
struct port *port, *next_port;
|
||||
struct mirror *mirror;
|
||||
struct port *port;
|
||||
|
||||
HMAP_FOR_EACH_SAFE (port, next_port, hmap_node, &br->ports) {
|
||||
HMAP_FOR_EACH_SAFE (port, hmap_node, &br->ports) {
|
||||
port_destroy(port);
|
||||
}
|
||||
HMAP_FOR_EACH_SAFE (mirror, next_mirror, hmap_node, &br->mirrors) {
|
||||
HMAP_FOR_EACH_SAFE (mirror, hmap_node, &br->mirrors) {
|
||||
mirror_destroy(mirror);
|
||||
}
|
||||
|
||||
@ -3746,11 +3746,11 @@ static void
|
||||
bridge_del_ports(struct bridge *br, const struct shash *wanted_ports)
|
||||
{
|
||||
struct shash_node *port_node;
|
||||
struct port *port, *next;
|
||||
struct port *port;
|
||||
|
||||
/* Get rid of deleted ports.
|
||||
* Get rid of deleted interfaces on ports that still exist. */
|
||||
HMAP_FOR_EACH_SAFE (port, next, hmap_node, &br->ports) {
|
||||
HMAP_FOR_EACH_SAFE (port, hmap_node, &br->ports) {
|
||||
port->cfg = shash_find_data(wanted_ports, port->name);
|
||||
if (!port->cfg) {
|
||||
port_destroy(port);
|
||||
@ -4211,7 +4211,7 @@ bridge_configure_aa(struct bridge *br)
|
||||
const struct ovsdb_datum *mc;
|
||||
struct ovsrec_autoattach *auto_attach = br->cfg->auto_attach;
|
||||
struct aa_settings aa_s;
|
||||
struct aa_mapping *m, *next;
|
||||
struct aa_mapping *m;
|
||||
size_t i;
|
||||
|
||||
if (!auto_attach) {
|
||||
@ -4227,7 +4227,7 @@ bridge_configure_aa(struct bridge *br)
|
||||
mc = ovsrec_autoattach_get_mappings(auto_attach,
|
||||
OVSDB_TYPE_INTEGER,
|
||||
OVSDB_TYPE_INTEGER);
|
||||
HMAP_FOR_EACH_SAFE (m, next, hmap_node, &br->mappings) {
|
||||
HMAP_FOR_EACH_SAFE (m, hmap_node, &br->mappings) {
|
||||
union ovsdb_atom atom;
|
||||
|
||||
atom.integer = m->isid;
|
||||
@ -5013,12 +5013,12 @@ bridge_configure_mirrors(struct bridge *br)
|
||||
{
|
||||
const struct ovsdb_datum *mc;
|
||||
unsigned long *flood_vlans;
|
||||
struct mirror *m, *next;
|
||||
struct mirror *m;
|
||||
size_t i;
|
||||
|
||||
/* Get rid of deleted mirrors. */
|
||||
mc = ovsrec_bridge_get_mirrors(br->cfg, OVSDB_TYPE_UUID);
|
||||
HMAP_FOR_EACH_SAFE (m, next, hmap_node, &br->mirrors) {
|
||||
HMAP_FOR_EACH_SAFE (m, hmap_node, &br->mirrors) {
|
||||
union ovsdb_atom atom;
|
||||
|
||||
atom.uuid = m->uuid;
|
||||
|
@ -801,12 +801,12 @@ vtep_ctl_context_invalidate_cache(struct ctl_context *ctx)
|
||||
|
||||
SHASH_FOR_EACH (node, &vtepctl_ctx->lswitches) {
|
||||
struct vtep_ctl_lswitch *ls = node->data;
|
||||
struct shash_node *node2, *next_node2;
|
||||
struct shash_node *node2;
|
||||
|
||||
shash_destroy(&ls->ucast_local);
|
||||
shash_destroy(&ls->ucast_remote);
|
||||
|
||||
SHASH_FOR_EACH_SAFE (node2, next_node2, &ls->mcast_local) {
|
||||
SHASH_FOR_EACH_SAFE (node2, &ls->mcast_local) {
|
||||
struct vtep_ctl_mcast_mac *mcast_mac = node2->data;
|
||||
struct vtep_ctl_ploc *ploc;
|
||||
|
||||
@ -818,7 +818,7 @@ vtep_ctl_context_invalidate_cache(struct ctl_context *ctx)
|
||||
}
|
||||
shash_destroy(&ls->mcast_local);
|
||||
|
||||
SHASH_FOR_EACH_SAFE (node2, next_node2, &ls->mcast_remote) {
|
||||
SHASH_FOR_EACH_SAFE (node2, &ls->mcast_remote) {
|
||||
struct vtep_ctl_mcast_mac *mcast_mac = node2->data;
|
||||
struct vtep_ctl_ploc *ploc;
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user