2
0
mirror of https://github.com/openvswitch/ovs synced 2025-08-31 22:35:15 +00:00

netdev: Fix potential deadlock.

Until now, netdev_class_mutex and route_table_mutex could be taken in
either order:

    * netdev_run() takes netdev_class_mutex, then netdev_vport_run() calls
      route_table_run(), which takes route_table_mutex.

    * route_table_init() takes route_table_mutex and then eventually calls
      netdev_open(), which takes netdev_class_mutex.

This commit fixes the problem by converting the netdev_classes hmap,
protected by netdev_class_mutex, into a cmap protected on the read
side by RCU.  Only a very small amount of code actually writes to the
cmap in question, so it's a lot easier to understand the locking rules
at that point.  In particular, there's no need to take netdev_class_mutex
from either netdev_run() or netdev_open(), so neither of the code paths
above determines a lock ordering any longer.

Reported-by: William Tu <u9012063@gmail.com>
Reported-at: http://openvswitch.org/pipermail/discuss/2016-February/020216.html
Signed-off-by: Ben Pfaff <blp@ovn.org>
Acked-by: Ryan Moats <rmoats@us.ibm.com>
Tested-by: William Tu <u9012063@gmail.com>
This commit is contained in:
Ben Pfaff
2016-04-22 17:03:22 -07:00
parent b70e697679
commit 0692257923

View File

@@ -31,6 +31,7 @@
#include <sys/types.h>
#endif
#include "cmap.h"
#include "coverage.h"
#include "dpif.h"
#include "dp-packet.h"
@@ -75,24 +76,20 @@ static struct ovs_mutex netdev_mutex = OVS_MUTEX_INITIALIZER;
static struct shash netdev_shash OVS_GUARDED_BY(netdev_mutex)
= SHASH_INITIALIZER(&netdev_shash);
/* Protects 'netdev_classes' against insertions or deletions.
*
* This is a recursive mutex to allow recursive acquisition when calling into
* providers. For example, netdev_run() calls into provider 'run' functions,
* which might reasonably want to call one of the netdev functions that takes
* netdev_class_mutex. */
static struct ovs_mutex netdev_class_mutex OVS_ACQ_BEFORE(netdev_mutex);
/* Mutual exclusion of */
static struct ovs_mutex netdev_class_mutex OVS_ACQ_BEFORE(netdev_mutex)
= OVS_MUTEX_INITIALIZER;
/* Contains 'struct netdev_registered_class'es. */
static struct hmap netdev_classes OVS_GUARDED_BY(netdev_class_mutex)
= HMAP_INITIALIZER(&netdev_classes);
static struct cmap netdev_classes = CMAP_INITIALIZER;
struct netdev_registered_class {
/* In 'netdev_classes', by class->type. */
struct hmap_node hmap_node OVS_GUARDED_BY(netdev_class_mutex);
const struct netdev_class *class OVS_GUARDED_BY(netdev_class_mutex);
/* Number of 'struct netdev's of this class. */
int ref_cnt OVS_GUARDED_BY(netdev_class_mutex);
struct cmap_node cmap_node; /* In 'netdev_classes', by class->type. */
const struct netdev_class *class;
/* Number of references: one for the class itself and one for every
* instance of the class. */
struct ovs_refcount refcnt;
};
/* This is set pretty low because we probably won't learn anything from the
@@ -126,28 +123,15 @@ netdev_is_pmd(const struct netdev *netdev)
return netdev->netdev_class->is_pmd;
}
static void
netdev_class_mutex_initialize(void)
OVS_EXCLUDED(netdev_class_mutex, netdev_mutex)
{
static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
if (ovsthread_once_start(&once)) {
ovs_mutex_init_recursive(&netdev_class_mutex);
ovsthread_once_done(&once);
}
}
static void
netdev_initialize(void)
OVS_EXCLUDED(netdev_class_mutex, netdev_mutex)
OVS_EXCLUDED(netdev_mutex)
{
static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
if (ovsthread_once_start(&once)) {
netdev_class_mutex_initialize();
fatal_signal_add_hook(restore_all_flags, NULL, NULL, true);
netdev_vport_patch_register();
#ifdef __linux__
@@ -165,6 +149,7 @@ netdev_initialize(void)
netdev_register_provider(&netdev_internal_class);
netdev_vport_tunnel_register();
#endif
netdev_dpdk_register();
ovsthread_once_done(&once);
}
}
@@ -175,18 +160,16 @@ netdev_initialize(void)
* main poll loop. */
void
netdev_run(void)
OVS_EXCLUDED(netdev_class_mutex, netdev_mutex)
OVS_EXCLUDED(netdev_mutex)
{
struct netdev_registered_class *rc;
netdev_initialize();
ovs_mutex_lock(&netdev_class_mutex);
HMAP_FOR_EACH (rc, hmap_node, &netdev_classes) {
struct netdev_registered_class *rc;
CMAP_FOR_EACH (rc, cmap_node, &netdev_classes) {
if (rc->class->run) {
rc->class->run();
}
}
ovs_mutex_unlock(&netdev_class_mutex);
}
/* Arranges for poll_block() to wake up when netdev_run() needs to be called.
@@ -195,26 +178,23 @@ netdev_run(void)
* main poll loop. */
void
netdev_wait(void)
OVS_EXCLUDED(netdev_class_mutex, netdev_mutex)
OVS_EXCLUDED(netdev_mutex)
{
struct netdev_registered_class *rc;
netdev_initialize();
ovs_mutex_lock(&netdev_class_mutex);
HMAP_FOR_EACH (rc, hmap_node, &netdev_classes) {
struct netdev_registered_class *rc;
CMAP_FOR_EACH (rc, cmap_node, &netdev_classes) {
if (rc->class->wait) {
rc->class->wait();
}
}
ovs_mutex_unlock(&netdev_class_mutex);
}
static struct netdev_registered_class *
netdev_lookup_class(const char *type)
OVS_REQ_RDLOCK(netdev_class_mutex)
{
struct netdev_registered_class *rc;
HMAP_FOR_EACH_WITH_HASH (rc, hmap_node, hash_string(type, 0),
CMAP_FOR_EACH_WITH_HASH (rc, cmap_node, hash_string(type, 0),
&netdev_classes) {
if (!strcmp(type, rc->class->type)) {
return rc;
@@ -231,7 +211,6 @@ netdev_register_provider(const struct netdev_class *new_class)
{
int error;
netdev_class_mutex_initialize();
ovs_mutex_lock(&netdev_class_mutex);
if (netdev_lookup_class(new_class->type)) {
VLOG_WARN("attempted to register duplicate netdev provider: %s",
@@ -243,10 +222,10 @@ netdev_register_provider(const struct netdev_class *new_class)
struct netdev_registered_class *rc;
rc = xmalloc(sizeof *rc);
hmap_insert(&netdev_classes, &rc->hmap_node,
cmap_insert(&netdev_classes, &rc->cmap_node,
hash_string(new_class->type, 0));
rc->class = new_class;
rc->ref_cnt = 0;
ovs_refcount_init(&rc->refcnt);
} else {
VLOG_ERR("failed to initialize %s network device class: %s",
new_class->type, ovs_strerror(error));
@@ -257,9 +236,12 @@ netdev_register_provider(const struct netdev_class *new_class)
return error;
}
/* Unregisters a netdev provider. 'type' must have been previously
* registered and not currently be in use by any netdevs. After unregistration
* new netdevs of that type cannot be opened using netdev_open(). */
/* Unregisters a netdev provider. 'type' must have been previously registered
* and not currently be in use by any netdevs. After unregistration new
* netdevs of that type cannot be opened using netdev_open(). (However, the
* provider may still be accessible from other threads until the next RCU grace
* period, so the caller must not free or re-register the same netdev_class
* until that has passed.) */
int
netdev_unregister_provider(const char *type)
OVS_EXCLUDED(netdev_class_mutex, netdev_mutex)
@@ -275,16 +257,16 @@ netdev_unregister_provider(const char *type)
VLOG_WARN("attempted to unregister a netdev provider that is not "
"registered: %s", type);
error = EAFNOSUPPORT;
} else {
if (!rc->ref_cnt) {
hmap_remove(&netdev_classes, &rc->hmap_node);
free(rc);
error = 0;
} else {
VLOG_WARN("attempted to unregister in use netdev provider: %s",
type);
error = EBUSY;
}
} else if (ovs_refcount_unref(&rc->refcnt) != 1) {
ovs_refcount_ref(&rc->refcnt);
VLOG_WARN("attempted to unregister in use netdev provider: %s",
type);
error = EBUSY;
} else {
cmap_remove(&netdev_classes, &rc->cmap_node,
hash_string(rc->class->type, 0));
ovsrcu_postpone(free, rc);
error = 0;
}
ovs_mutex_unlock(&netdev_class_mutex);
@@ -297,16 +279,13 @@ void
netdev_enumerate_types(struct sset *types)
OVS_EXCLUDED(netdev_mutex)
{
struct netdev_registered_class *rc;
netdev_initialize();
sset_clear(types);
ovs_mutex_lock(&netdev_class_mutex);
HMAP_FOR_EACH (rc, hmap_node, &netdev_classes) {
struct netdev_registered_class *rc;
CMAP_FOR_EACH (rc, cmap_node, &netdev_classes) {
sset_add(types, rc->class->type);
}
ovs_mutex_unlock(&netdev_class_mutex);
}
/* Check that the network device name is not the same as any of the registered
@@ -318,19 +297,15 @@ bool
netdev_is_reserved_name(const char *name)
OVS_EXCLUDED(netdev_mutex)
{
struct netdev_registered_class *rc;
netdev_initialize();
ovs_mutex_lock(&netdev_class_mutex);
HMAP_FOR_EACH (rc, hmap_node, &netdev_classes) {
struct netdev_registered_class *rc;
CMAP_FOR_EACH (rc, cmap_node, &netdev_classes) {
const char *dpif_port = netdev_vport_class_get_dpif_port(rc->class);
if (dpif_port && !strncmp(name, dpif_port, strlen(dpif_port))) {
ovs_mutex_unlock(&netdev_class_mutex);
return true;
}
}
ovs_mutex_unlock(&netdev_class_mutex);
if (!strncmp(name, "ovs-", 4)) {
struct sset types;
@@ -366,14 +341,13 @@ netdev_open(const char *name, const char *type, struct netdev **netdevp)
netdev_initialize();
ovs_mutex_lock(&netdev_class_mutex);
ovs_mutex_lock(&netdev_mutex);
netdev = shash_find_data(&netdev_shash, name);
if (!netdev) {
struct netdev_registered_class *rc;
rc = netdev_lookup_class(type && type[0] ? type : "system");
if (rc) {
if (rc && ovs_refcount_try_ref_rcu(&rc->refcnt)) {
netdev = rc->class->alloc();
if (netdev) {
memset(netdev, 0, sizeof *netdev);
@@ -391,9 +365,9 @@ netdev_open(const char *name, const char *type, struct netdev **netdevp)
error = rc->class->construct(netdev);
if (!error) {
rc->ref_cnt++;
netdev_change_seq_changed(netdev);
} else {
ovs_refcount_unref(&rc->refcnt);
free(netdev->name);
ovs_assert(ovs_list_is_empty(&netdev->saved_flags_list));
shash_delete(&netdev_shash, netdev->node);
@@ -418,7 +392,6 @@ netdev_open(const char *name, const char *type, struct netdev **netdevp)
*netdevp = NULL;
}
ovs_mutex_unlock(&netdev_mutex);
ovs_mutex_unlock(&netdev_class_mutex);
return error;
}
@@ -531,11 +504,8 @@ netdev_unref(struct netdev *dev)
dev->netdev_class->dealloc(dev);
ovs_mutex_unlock(&netdev_mutex);
ovs_mutex_lock(&netdev_class_mutex);
rc = netdev_lookup_class(class->type);
ovs_assert(rc->ref_cnt > 0);
rc->ref_cnt--;
ovs_mutex_unlock(&netdev_class_mutex);
ovs_refcount_unref(&rc->refcnt);
} else {
ovs_mutex_unlock(&netdev_mutex);
}