mirror of
https://github.com/openvswitch/ovs
synced 2025-10-19 14:37:21 +00:00
dpif-netdev: Trigger parallel pmd reloads.
pmd reloads are currently serialised in each steps calling reload_affected_pmds. Any pmd processing packets, waiting on a mutex etc... will make other pmd threads wait for a delay that can be undeterministic when syscalls adds up. Switch to a little busy loop on the control thread using the existing per-pmd reload boolean. The memory order on this atomic is rel-acq to have an explicit synchronisation between the pmd threads and the control thread. Signed-off-by: David Marchand <david.marchand@redhat.com> Acked-by: Eelco Chaudron <echaudro@redhat.com> Acked-by: Ilya Maximets <i.maximets@samsung.com> Signed-off-by: Ian Stokes <ian.stokes@intel.com>
This commit is contained in:
committed by
Ian Stokes
parent
299c8d611e
commit
8f077b31e9
@@ -649,9 +649,6 @@ struct dp_netdev_pmd_thread {
|
|||||||
struct ovs_refcount ref_cnt; /* Every reference must be refcount'ed. */
|
struct ovs_refcount ref_cnt; /* Every reference must be refcount'ed. */
|
||||||
struct cmap_node node; /* In 'dp->poll_threads'. */
|
struct cmap_node node; /* In 'dp->poll_threads'. */
|
||||||
|
|
||||||
pthread_cond_t cond; /* For synchronizing pmd thread reload. */
|
|
||||||
struct ovs_mutex cond_mutex; /* Mutex for condition variable. */
|
|
||||||
|
|
||||||
/* Per thread exact-match cache. Note, the instance for cpu core
|
/* Per thread exact-match cache. Note, the instance for cpu core
|
||||||
* NON_PMD_CORE_ID can be accessed by multiple threads, and thusly
|
* NON_PMD_CORE_ID can be accessed by multiple threads, and thusly
|
||||||
* need to be protected by 'non_pmd_mutex'. Every other instance
|
* need to be protected by 'non_pmd_mutex'. Every other instance
|
||||||
@@ -1758,11 +1755,8 @@ dp_netdev_reload_pmd__(struct dp_netdev_pmd_thread *pmd)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
ovs_mutex_lock(&pmd->cond_mutex);
|
|
||||||
seq_change(pmd->reload_seq);
|
seq_change(pmd->reload_seq);
|
||||||
atomic_store_explicit(&pmd->reload, true, memory_order_release);
|
atomic_store_explicit(&pmd->reload, true, memory_order_release);
|
||||||
ovs_mutex_cond_wait(&pmd->cond, &pmd->cond_mutex);
|
|
||||||
ovs_mutex_unlock(&pmd->cond_mutex);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static uint32_t
|
static uint32_t
|
||||||
@@ -4655,6 +4649,19 @@ reload_affected_pmds(struct dp_netdev *dp)
|
|||||||
if (pmd->need_reload) {
|
if (pmd->need_reload) {
|
||||||
flow_mark_flush(pmd);
|
flow_mark_flush(pmd);
|
||||||
dp_netdev_reload_pmd__(pmd);
|
dp_netdev_reload_pmd__(pmd);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
CMAP_FOR_EACH (pmd, node, &dp->poll_threads) {
|
||||||
|
if (pmd->need_reload) {
|
||||||
|
if (pmd->core_id != NON_PMD_CORE_ID) {
|
||||||
|
bool reload;
|
||||||
|
|
||||||
|
do {
|
||||||
|
atomic_read_explicit(&pmd->reload, &reload,
|
||||||
|
memory_order_acquire);
|
||||||
|
} while (reload);
|
||||||
|
}
|
||||||
pmd->need_reload = false;
|
pmd->need_reload = false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -5842,11 +5849,8 @@ dpif_netdev_enable_upcall(struct dpif *dpif)
|
|||||||
static void
|
static void
|
||||||
dp_netdev_pmd_reload_done(struct dp_netdev_pmd_thread *pmd)
|
dp_netdev_pmd_reload_done(struct dp_netdev_pmd_thread *pmd)
|
||||||
{
|
{
|
||||||
ovs_mutex_lock(&pmd->cond_mutex);
|
|
||||||
atomic_store_relaxed(&pmd->reload, false);
|
|
||||||
pmd->last_reload_seq = seq_read(pmd->reload_seq);
|
pmd->last_reload_seq = seq_read(pmd->reload_seq);
|
||||||
xpthread_cond_signal(&pmd->cond);
|
atomic_store_explicit(&pmd->reload, false, memory_order_release);
|
||||||
ovs_mutex_unlock(&pmd->cond_mutex);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Finds and refs the dp_netdev_pmd_thread on core 'core_id'. Returns
|
/* Finds and refs the dp_netdev_pmd_thread on core 'core_id'. Returns
|
||||||
@@ -5931,8 +5935,6 @@ dp_netdev_configure_pmd(struct dp_netdev_pmd_thread *pmd, struct dp_netdev *dp,
|
|||||||
pmd->reload_seq = seq_create();
|
pmd->reload_seq = seq_create();
|
||||||
pmd->last_reload_seq = seq_read(pmd->reload_seq);
|
pmd->last_reload_seq = seq_read(pmd->reload_seq);
|
||||||
atomic_init(&pmd->reload, false);
|
atomic_init(&pmd->reload, false);
|
||||||
xpthread_cond_init(&pmd->cond, NULL);
|
|
||||||
ovs_mutex_init(&pmd->cond_mutex);
|
|
||||||
ovs_mutex_init(&pmd->flow_mutex);
|
ovs_mutex_init(&pmd->flow_mutex);
|
||||||
ovs_mutex_init(&pmd->port_mutex);
|
ovs_mutex_init(&pmd->port_mutex);
|
||||||
cmap_init(&pmd->flow_table);
|
cmap_init(&pmd->flow_table);
|
||||||
@@ -5975,8 +5977,6 @@ dp_netdev_destroy_pmd(struct dp_netdev_pmd_thread *pmd)
|
|||||||
cmap_destroy(&pmd->flow_table);
|
cmap_destroy(&pmd->flow_table);
|
||||||
ovs_mutex_destroy(&pmd->flow_mutex);
|
ovs_mutex_destroy(&pmd->flow_mutex);
|
||||||
seq_destroy(pmd->reload_seq);
|
seq_destroy(pmd->reload_seq);
|
||||||
xpthread_cond_destroy(&pmd->cond);
|
|
||||||
ovs_mutex_destroy(&pmd->cond_mutex);
|
|
||||||
ovs_mutex_destroy(&pmd->port_mutex);
|
ovs_mutex_destroy(&pmd->port_mutex);
|
||||||
free(pmd);
|
free(pmd);
|
||||||
}
|
}
|
||||||
|
Reference in New Issue
Block a user