2
0
mirror of https://github.com/openvswitch/ovs synced 2025-10-21 14:49:41 +00:00

dpif-netdev: Fix race condition in pmd thread initialization.

The pmds and the main threads are synchronized using a condition
variable.  The main thread writes a new configuration, then it waits on
the condition variable.  A pmd thread reads the new configuration, then
it calls signal() on the condition variable. To make sure that the pmds
and the main thread have a consistent view, each signal() should be
backed by a wait().

Currently the first signal() doesn't have a corresponding wait().  If
the pmd thread takes a long time to start and the signal() is received
by a later wait, the threads will have an inconsistent view.

The commit fixes the problem by removing the first signal() from the
pmd thread.

This is hardly a problem on current master, because the main thread
will call the first wait() a long time after the creation of a pmd
thread.  It becomes a problem with the next commits.

Signed-off-by: Daniele Di Proietto <diproiettod@vmware.com>
Tested-by: Ilya Maximets <i.maximets@samsung.com>
Acked-by: Ilya Maximets <i.maximets@samsung.com>
This commit is contained in:
Daniele Di Proietto
2016-04-05 18:02:14 -07:00
parent b68872d8bb
commit d42f9307a0

View File

@@ -2661,21 +2661,22 @@ dpif_netdev_wait(struct dpif *dpif)
static int
pmd_load_queues(struct dp_netdev_pmd_thread *pmd, struct rxq_poll **ppoll_list)
OVS_REQUIRES(pmd->poll_mutex)
{
struct rxq_poll *poll_list = *ppoll_list;
struct rxq_poll *poll;
int i;
ovs_mutex_lock(&pmd->poll_mutex);
poll_list = xrealloc(poll_list, pmd->poll_cnt * sizeof *poll_list);
i = 0;
LIST_FOR_EACH (poll, node, &pmd->poll_list) {
poll_list[i++] = *poll;
}
ovs_mutex_unlock(&pmd->poll_mutex);
*ppoll_list = poll_list;
return pmd->poll_cnt;
return i;
}
static void *
@@ -2685,6 +2686,7 @@ pmd_thread_main(void *f_)
unsigned int lc = 0;
struct rxq_poll *poll_list;
unsigned int port_seq = PMD_INITIAL_SEQ;
bool exiting;
int poll_cnt;
int i;
@@ -2694,13 +2696,10 @@ pmd_thread_main(void *f_)
/* Stores the pmd thread's 'pmd' to 'per_pmd_key'. */
ovsthread_setspecific(pmd->dp->per_pmd_key, pmd);
pmd_thread_setaffinity_cpu(pmd->core_id);
poll_cnt = pmd_load_queues(pmd, &poll_list);
reload:
emc_cache_init(&pmd->flow_cache);
ovs_mutex_lock(&pmd->poll_mutex);
poll_cnt = pmd_load_queues(pmd, &poll_list);
ovs_mutex_unlock(&pmd->poll_mutex);
/* List port/core affinity */
for (i = 0; i < poll_cnt; i++) {
VLOG_DBG("Core %d processing port \'%s\' with queue-id %d\n",
@@ -2708,10 +2707,6 @@ reload:
netdev_rxq_get_queue_id(poll_list[i].rx));
}
/* Signal here to make sure the pmd finishes
* reloading the updated configuration. */
dp_netdev_pmd_reload_done(pmd);
for (;;) {
for (i = 0; i < poll_cnt; i++) {
dp_netdev_process_rxq_port(pmd, poll_list[i].port, poll_list[i].rx);
@@ -2734,14 +2729,18 @@ reload:
}
}
poll_cnt = pmd_load_queues(pmd, &poll_list);
exiting = latch_is_set(&pmd->exit_latch);
/* Signal here to make sure the pmd finishes
* reloading the updated configuration. */
dp_netdev_pmd_reload_done(pmd);
emc_cache_uninit(&pmd->flow_cache);
if (!latch_is_set(&pmd->exit_latch)){
if (!exiting) {
goto reload;
}
dp_netdev_pmd_reload_done(pmd);
free(poll_list);
return NULL;
}