2
0
mirror of https://github.com/openvswitch/ovs synced 2025-09-05 00:35:33 +00:00

dpif-netdev: Avoid hw_miss_packet_recover() for devices with no support.

The hw_miss_packet_recover() API results in performance degradation, for
ports that are either not offload capable or do not support this specific
offload API.

For example, in the test configuration shown below, the vhost-user port
does not support offloads and the VF port doesn't support hw_miss offload
API. But because tunnel offload needs to be configured in other bridges
(br-vxlan and br-phy), OVS has been built with -DALLOW_EXPERIMENTAL_API.

    br-vhost            br-vxlan            br-phy
vhost-user<-->VF    VF-Rep<-->VxLAN       uplink-port

For every packet between the VF and the vhost-user ports, hw_miss API is
called even though it is not supported by the ports involved. This leads
to significant performance drop (~3x in some cases; both cycles and pps).

Return EOPNOTSUPP when this API fails for a device that doesn't support it
and avoid this API on that port for subsequent packets.

Signed-off-by: Sriharsha Basavapatna <sriharsha.basavapatna@broadcom.com>
Signed-off-by: Ilya Maximets <i.maximets@ovn.org>
This commit is contained in:
Sriharsha Basavapatna
2021-12-12 23:37:06 -08:00
committed by Ilya Maximets
parent e7e9973b80
commit 6e50c16518
2 changed files with 19 additions and 7 deletions

View File

@@ -378,6 +378,7 @@ struct dp_netdev_rxq {
unsigned intrvl_idx; /* Write index for 'cycles_intrvl'. */
struct dp_netdev_pmd_thread *pmd; /* pmd thread that polls this queue. */
bool is_vhost; /* Is rxq of a vhost port. */
bool hw_miss_api_supported; /* hw_miss_packet_recover() supported.*/
/* Counters of cycles spent successfully polling and processing pkts. */
atomic_ullong cycles[RXQ_N_CYCLES];
@@ -4988,6 +4989,7 @@ port_reconfigure(struct dp_netdev_port *port)
port->rxqs[i].port = port;
port->rxqs[i].is_vhost = !strncmp(port->type, "dpdkvhost", 9);
port->rxqs[i].hw_miss_api_supported = true;
err = netdev_rxq_open(netdev, &port->rxqs[i].rx, i);
if (err) {
@@ -7536,12 +7538,18 @@ dp_netdev_hw_flow(const struct dp_netdev_pmd_thread *pmd,
#ifdef ALLOW_EXPERIMENTAL_API /* Packet restoration API required. */
/* Restore the packet if HW processing was terminated before completion. */
struct dp_netdev_rxq *rxq = pmd->ctx.last_rxq;
int err;
err = netdev_hw_miss_packet_recover(rxq->port->netdev, packet);
if (err && err != EOPNOTSUPP) {
COVERAGE_INC(datapath_drop_hw_miss_recover);
return -1;
if (rxq->hw_miss_api_supported) {
int err = netdev_hw_miss_packet_recover(rxq->port->netdev, packet);
if (err) {
if (err != EOPNOTSUPP) {
COVERAGE_INC(datapath_drop_hw_miss_recover);
return -1;
} else {
/* API unsupported by the port; avoid subsequent calls. */
rxq->hw_miss_api_supported = false;
}
}
}
#endif