2
0
mirror of https://github.com/openvswitch/ovs synced 2025-08-31 06:15:47 +00:00

dpif-netdev: Update offloaded flows statistics.

In case a flow is HW offloaded, packets do not reach the SW, thus not
counted for statistics. Use netdev flow get API in order to update the
statistics of flows by the HW statistics.

Co-authored-by: Eli Britstein <elibr@mellanox.com>
Signed-off-by: Ophir Munk <ophirmu@mellanox.com>
Reviewed-by: Oz Shlomo <ozsh@mellanox.com>
Signed-off-by: Eli Britstein <elibr@mellanox.com>
Signed-off-by: Ilya Maximets <i.maximets@ovn.org>
This commit is contained in:
Ophir Munk
2020-01-09 07:46:47 +00:00
committed by Ilya Maximets
parent 80944cb41d
commit a309e4f526

View File

@@ -3028,10 +3028,51 @@ dp_netdev_pmd_find_flow(const struct dp_netdev_pmd_thread *pmd,
return NULL;
}
static void
get_dpif_flow_stats(const struct dp_netdev_flow *netdev_flow_,
struct dpif_flow_stats *stats)
static bool
dpif_netdev_get_flow_offload_status(const struct dp_netdev *dp,
const struct dp_netdev_flow *netdev_flow,
struct dpif_flow_stats *stats,
struct dpif_flow_attrs *attrs)
{
uint64_t act_buf[1024 / 8];
struct nlattr *actions;
struct netdev *netdev;
struct match match;
struct ofpbuf buf;
int ret = 0;
if (!netdev_is_flow_api_enabled()) {
return false;
}
netdev = netdev_ports_get(netdev_flow->flow.in_port.odp_port, dp->class);
if (!netdev) {
return false;
}
ofpbuf_use_stack(&buf, &act_buf, sizeof act_buf);
/* Taking a global 'port_mutex' to fulfill thread safety
* restrictions for the netdev-offload-dpdk module. */
ovs_mutex_lock(&dp->port_mutex);
ret = netdev_flow_get(netdev, &match, &actions, &netdev_flow->mega_ufid,
stats, attrs, &buf);
ovs_mutex_unlock(&dp->port_mutex);
netdev_close(netdev);
if (ret) {
return false;
}
return true;
}
static void
get_dpif_flow_status(const struct dp_netdev *dp,
const struct dp_netdev_flow *netdev_flow_,
struct dpif_flow_stats *stats,
struct dpif_flow_attrs *attrs)
{
struct dpif_flow_stats offload_stats;
struct dpif_flow_attrs offload_attrs;
struct dp_netdev_flow *netdev_flow;
unsigned long long n;
long long used;
@@ -3047,6 +3088,21 @@ get_dpif_flow_stats(const struct dp_netdev_flow *netdev_flow_,
stats->used = used;
atomic_read_relaxed(&netdev_flow->stats.tcp_flags, &flags);
stats->tcp_flags = flags;
if (dpif_netdev_get_flow_offload_status(dp, netdev_flow,
&offload_stats, &offload_attrs)) {
stats->n_packets += offload_stats.n_packets;
stats->n_bytes += offload_stats.n_bytes;
stats->used = MAX(stats->used, offload_stats.used);
stats->tcp_flags |= offload_stats.tcp_flags;
if (attrs) {
attrs->offloaded = offload_attrs.offloaded;
attrs->dp_layer = offload_attrs.dp_layer;
}
} else if (attrs) {
attrs->offloaded = false;
attrs->dp_layer = "ovs";
}
}
/* Converts to the dpif_flow format, using 'key_buf' and 'mask_buf' for
@@ -3054,7 +3110,8 @@ get_dpif_flow_stats(const struct dp_netdev_flow *netdev_flow_,
* 'mask_buf'. Actions will be returned without copying, by relying on RCU to
* protect them. */
static void
dp_netdev_flow_to_dpif_flow(const struct dp_netdev_flow *netdev_flow,
dp_netdev_flow_to_dpif_flow(const struct dp_netdev *dp,
const struct dp_netdev_flow *netdev_flow,
struct ofpbuf *key_buf, struct ofpbuf *mask_buf,
struct dpif_flow *flow, bool terse)
{
@@ -3097,10 +3154,8 @@ dp_netdev_flow_to_dpif_flow(const struct dp_netdev_flow *netdev_flow,
flow->ufid = netdev_flow->ufid;
flow->ufid_present = true;
flow->pmd_id = netdev_flow->pmd_id;
get_dpif_flow_stats(netdev_flow, &flow->stats);
flow->attrs.offloaded = false;
flow->attrs.dp_layer = "ovs";
get_dpif_flow_status(dp, netdev_flow, &flow->stats, &flow->attrs);
}
static int
@@ -3203,8 +3258,8 @@ dpif_netdev_flow_get(const struct dpif *dpif, const struct dpif_flow_get *get)
netdev_flow = dp_netdev_pmd_find_flow(pmd, get->ufid, get->key,
get->key_len);
if (netdev_flow) {
dp_netdev_flow_to_dpif_flow(netdev_flow, get->buffer, get->buffer,
get->flow, false);
dp_netdev_flow_to_dpif_flow(dp, netdev_flow, get->buffer,
get->buffer, get->flow, false);
error = 0;
break;
} else {
@@ -3379,7 +3434,7 @@ flow_put_on_pmd(struct dp_netdev_pmd_thread *pmd,
put->actions, put->actions_len);
if (stats) {
get_dpif_flow_stats(netdev_flow, stats);
get_dpif_flow_status(pmd->dp, netdev_flow, stats, NULL);
}
if (put->flags & DPIF_FP_ZERO_STATS) {
/* XXX: The userspace datapath uses thread local statistics
@@ -3498,7 +3553,7 @@ flow_del_on_pmd(struct dp_netdev_pmd_thread *pmd,
del->key_len);
if (netdev_flow) {
if (stats) {
get_dpif_flow_stats(netdev_flow, stats);
get_dpif_flow_status(pmd->dp, netdev_flow, stats, NULL);
}
dp_netdev_pmd_remove_flow(pmd, netdev_flow);
} else {
@@ -3632,13 +3687,13 @@ dpif_netdev_flow_dump_next(struct dpif_flow_dump_thread *thread_,
= dpif_netdev_flow_dump_thread_cast(thread_);
struct dpif_netdev_flow_dump *dump = thread->dump;
struct dp_netdev_flow *netdev_flows[FLOW_DUMP_MAX_BATCH];
struct dpif_netdev *dpif = dpif_netdev_cast(thread->up.dpif);
struct dp_netdev *dp = get_dp_netdev(&dpif->dpif);
int n_flows = 0;
int i;
ovs_mutex_lock(&dump->mutex);
if (!dump->status) {
struct dpif_netdev *dpif = dpif_netdev_cast(thread->up.dpif);
struct dp_netdev *dp = get_dp_netdev(&dpif->dpif);
struct dp_netdev_pmd_thread *pmd = dump->cur_pmd;
int flow_limit = MIN(max_flows, FLOW_DUMP_MAX_BATCH);
@@ -3695,7 +3750,7 @@ dpif_netdev_flow_dump_next(struct dpif_flow_dump_thread *thread_,
ofpbuf_use_stack(&key, keybuf, sizeof *keybuf);
ofpbuf_use_stack(&mask, maskbuf, sizeof *maskbuf);
dp_netdev_flow_to_dpif_flow(netdev_flow, &key, &mask, f,
dp_netdev_flow_to_dpif_flow(dp, netdev_flow, &key, &mask, f,
dump->up.terse);
}