mirror of
https://github.com/openvswitch/ovs
synced 2025-08-31 06:15:47 +00:00
dpif-netdev: fix race for queues between pmd threads.
Currently pmd threads select queues in pmd_load_queues() according to get_n_pmd_threads_on_numa(). This behavior leads to race between pmds, beacause dp_netdev_set_pmds_on_numa() starts them one by one and current number of threads changes incrementally. As a result we may have the following situation with 2 pmd threads: * dp_netdev_set_pmds_on_numa() * pmd12 thread started. Currently only 1 pmd thread exists. dpif_netdev(pmd12)|INFO|Core 1 processing port 'port_1' dpif_netdev(pmd12)|INFO|Core 1 processing port 'port_2' * pmd14 thread started. 2 pmd threads exists. dpif_netdev|INFO|Created 2 pmd threads on numa node 0 dpif_netdev(pmd14)|INFO|Core 2 processing port 'port_2' We have: core 1 --> port 1, port 2 core 2 --> port 2 Fix this by starting pmd threads only after all of them have been configured. Cc: Daniele Di Proietto <diproiettod at vmware.com> Cc: Dyasly Sergey <s.dyasly at samsung.com> Acked-by: Flavio Leitner <fbl@sysclose.org> Acked-by: Daniele Di Proietto <diproiettod@vmware.com> Signed-off-by: Ilya Maximets <i.maximets at samsung.com> Signed-off-by: Ethan Jackson <ethan@nicira.com>
This commit is contained in:
committed by
Ethan Jackson
parent
cfe17b43c1
commit
2aca813cf4
@@ -2961,6 +2961,7 @@ dp_netdev_set_pmds_on_numa(struct dp_netdev *dp, int numa_id)
|
||||
* pmd threads for the numa node. */
|
||||
if (!n_pmds) {
|
||||
int can_have, n_unpinned, i;
|
||||
struct dp_netdev_pmd_thread **pmds;
|
||||
|
||||
n_unpinned = ovs_numa_get_n_unpinned_cores_on_numa(numa_id);
|
||||
if (!n_unpinned) {
|
||||
@@ -2972,15 +2973,22 @@ dp_netdev_set_pmds_on_numa(struct dp_netdev *dp, int numa_id)
|
||||
/* If cpu mask is specified, uses all unpinned cores, otherwise
|
||||
* tries creating NR_PMD_THREADS pmd threads. */
|
||||
can_have = dp->pmd_cmask ? n_unpinned : MIN(n_unpinned, NR_PMD_THREADS);
|
||||
pmds = xzalloc(can_have * sizeof *pmds);
|
||||
for (i = 0; i < can_have; i++) {
|
||||
struct dp_netdev_pmd_thread *pmd = xzalloc(sizeof *pmd);
|
||||
unsigned core_id = ovs_numa_get_unpinned_core_on_numa(numa_id);
|
||||
|
||||
dp_netdev_configure_pmd(pmd, dp, i, core_id, numa_id);
|
||||
pmds[i] = xzalloc(sizeof **pmds);
|
||||
dp_netdev_configure_pmd(pmds[i], dp, i, core_id, numa_id);
|
||||
}
|
||||
/* The pmd thread code needs to see all the others configured pmd
|
||||
* threads on the same numa node. That's why we call
|
||||
* 'dp_netdev_configure_pmd()' on all the threads and then we actually
|
||||
* start them. */
|
||||
for (i = 0; i < can_have; i++) {
|
||||
/* Each thread will distribute all devices rx-queues among
|
||||
* themselves. */
|
||||
pmd->thread = ovs_thread_create("pmd", pmd_thread_main, pmd);
|
||||
pmds[i]->thread = ovs_thread_create("pmd", pmd_thread_main, pmds[i]);
|
||||
}
|
||||
free(pmds);
|
||||
VLOG_INFO("Created %d pmd threads on numa node %d", can_have, numa_id);
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user