mirror of
https://github.com/openvswitch/ovs
synced 2025-08-31 06:15:47 +00:00
dpif-netdev: Catch reloads faster.
Looking at the reload flag only every 1024 loops can be a long time under load, since we might be handling 32 packets per rxq, per iteration, which means up to poll_cnt * 32 * 1024 packets. Look at the flag every loop, no major performance impact seen. Signed-off-by: David Marchand <david.marchand@redhat.com> Acked-by: Eelco Chaudron <echaudro@redhat.com> Acked-by: Ilya Maximets <i.maximets@samsung.com> Signed-off-by: Ian Stokes <ian.stokes@intel.com>
This commit is contained in:
committed by
Ian Stokes
parent
e2cafa8692
commit
68a0625b78
@@ -5480,7 +5480,6 @@ reload:
|
||||
poll_block();
|
||||
}
|
||||
}
|
||||
lc = UINT_MAX;
|
||||
}
|
||||
|
||||
pmd->intrvl_tsc_prev = 0;
|
||||
@@ -5529,11 +5528,6 @@ reload:
|
||||
emc_cache_slow_sweep(&((pmd->flow_cache).emc_cache));
|
||||
}
|
||||
|
||||
atomic_read_explicit(&pmd->reload, &reload, memory_order_acquire);
|
||||
if (reload) {
|
||||
break;
|
||||
}
|
||||
|
||||
for (i = 0; i < poll_cnt; i++) {
|
||||
uint64_t current_seq =
|
||||
netdev_get_change_seq(poll_list[i].rxq->port->netdev);
|
||||
@@ -5544,6 +5538,12 @@ reload:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
atomic_read_explicit(&pmd->reload, &reload, memory_order_acquire);
|
||||
if (OVS_UNLIKELY(reload)) {
|
||||
break;
|
||||
}
|
||||
|
||||
pmd_perf_end_iteration(s, rx_packets, tx_packets,
|
||||
pmd_perf_metrics_enabled(pmd));
|
||||
}
|
||||
|
Reference in New Issue
Block a user