2
0
mirror of https://github.com/openvswitch/ovs synced 2025-08-31 14:25:26 +00:00

dpif-netdev: Allow PMD auto load balance with cross-numa.

Previously auto load balance did not trigger a reassignment when
there was any cross-numa polling as an rxq could be polled from a
different numa after reassign and it could impact estimates.

In the case where there is only one numa with pmds available, the
same numa will always poll before and after reassignment, so estimates
are valid. Allow PMD auto load balance to trigger a reassignment in
this case.

Acked-by: Eelco Chaudron <echaudro@redhat.com>
Acked-by: David Marchand <david.marchand@redhat.com>
Tested-by: Sunil Pai G <sunil.pai.g@intel.com>
Acked-by: Flavio Leitner <fbl@sysclose.org>
Signed-off-by: Kevin Traynor <ktraynor@redhat.com>
Signed-off-by: Ilya Maximets <i.maximets@ovn.org>
This commit is contained in:
Kevin Traynor
2021-03-18 11:34:04 +00:00
committed by Ilya Maximets
parent edcfd7176f
commit ec68a877db
3 changed files with 22 additions and 4 deletions

View File

@@ -4887,6 +4887,12 @@ struct rr_numa {
bool idx_inc;
};
static size_t
rr_numa_list_count(struct rr_numa_list *rr)
{
return hmap_count(&rr->numas);
}
static struct rr_numa *
rr_numa_list_lookup(struct rr_numa_list *rr, int numa_id)
{
@@ -5599,10 +5605,17 @@ get_dry_run_variance(struct dp_netdev *dp, uint32_t *core_list,
for (int i = 0; i < n_rxqs; i++) {
int numa_id = netdev_get_numa_id(rxqs[i]->port->netdev);
numa = rr_numa_list_lookup(&rr, numa_id);
/* If there is no available pmd on the local numa but there is only one
* numa for cross-numa polling, we can estimate the dry run. */
if (!numa && rr_numa_list_count(&rr) == 1) {
numa = rr_numa_list_next(&rr, NULL);
}
if (!numa) {
/* Abort if cross NUMA polling. */
VLOG_DBG("PMD auto lb dry run."
" Aborting due to cross-numa polling.");
VLOG_DBG("PMD auto lb dry run: "
"There's no available (non-isolated) PMD thread on NUMA "
"node %d for port '%s' and there are PMD threads on more "
"than one NUMA node available for cross-NUMA polling. "
"Aborting.", numa_id, netdev_rxq_get_name(rxqs[i]->rx));
goto cleanup;
}