2
0
mirror of https://github.com/openvswitch/ovs synced 2025-08-30 22:05:19 +00:00

dpif-netdev: Change pmd selection order.

Up to his point rxqs are sorted by processing cycles they
consumed and assigned to pmds in a round robin manner.

Ian pointed out that on wrap around the most loaded pmd will be
the next one to be assigned an additional rxq and that it would be
better to reverse the pmd order when wraparound occurs.

In other words, change from assigning by rr to assigning in a forward
and reverse cycle through pmds.

Also, now that the algorithm has finalized, document an example.

Suggested-by: Ian Stokes <ian.stokes@intel.com>
Signed-off-by: Kevin Traynor <ktraynor@redhat.com>
Signed-off-by: Darrell Ball <dlu998@gmail.com>
This commit is contained in:
Kevin Traynor
2017-08-25 00:51:18 -07:00
committed by Darrell Ball
parent 655856ef39
commit 79da1e411b
3 changed files with 43 additions and 2 deletions

View File

@@ -3287,6 +3287,7 @@ struct rr_numa {
int n_pmds;
int cur_index;
bool idx_inc;
};
static struct rr_numa *
@@ -3343,13 +3344,37 @@ rr_numa_list_populate(struct dp_netdev *dp, struct rr_numa_list *rr)
numa->n_pmds++;
numa->pmds = xrealloc(numa->pmds, numa->n_pmds * sizeof *numa->pmds);
numa->pmds[numa->n_pmds - 1] = pmd;
/* At least one pmd so initialise curr_idx and idx_inc. */
numa->cur_index = 0;
numa->idx_inc = true;
}
}
/* Returns the next pmd from the numa node in
* incrementing or decrementing order. */
static struct dp_netdev_pmd_thread *
rr_numa_get_pmd(struct rr_numa *numa)
{
return numa->pmds[numa->cur_index++ % numa->n_pmds];
int numa_idx = numa->cur_index;
if (numa->idx_inc == true) {
/* Incrementing through list of pmds. */
if (numa->cur_index == numa->n_pmds-1) {
/* Reached the last pmd. */
numa->idx_inc = false;
} else {
numa->cur_index++;
}
} else {
/* Decrementing through list of pmds. */
if (numa->cur_index == 0) {
/* Reached the first pmd. */
numa->idx_inc = true;
} else {
numa->cur_index--;
}
}
return numa->pmds[numa_idx];
}
static void