2
0
mirror of https://github.com/openvswitch/ovs synced 2025-08-22 09:58:01 +00:00

Add offload packets statistics

Add argument '--offload-stats' for command ovs-appctl bridge/dump-flows
to display the offloaded packets statistics.

The commands display as below:

orignal command:

ovs-appctl bridge/dump-flows br0

duration=574s, n_packets=1152, n_bytes=110768, priority=0,actions=NORMAL
table_id=254, duration=574s, n_packets=0, n_bytes=0, priority=2,recirc_id=0,actions=drop
table_id=254, duration=574s, n_packets=0, n_bytes=0, priority=0,reg0=0x1,actions=controller(reason=)
table_id=254, duration=574s, n_packets=0, n_bytes=0, priority=0,reg0=0x2,actions=drop
table_id=254, duration=574s, n_packets=0, n_bytes=0, priority=0,reg0=0x3,actions=drop

new command with argument '--offload-stats'

Notice: 'n_offload_packets' are a subset of n_packets and 'n_offload_bytes' are
a subset of n_bytes.

ovs-appctl bridge/dump-flows --offload-stats br0

duration=582s, n_packets=1152, n_bytes=110768, n_offload_packets=1107, n_offload_bytes=107992, priority=0,actions=NORMAL
table_id=254, duration=582s, n_packets=0, n_bytes=0, n_offload_packets=0, n_offload_bytes=0, priority=2,recirc_id=0,actions=drop
table_id=254, duration=582s, n_packets=0, n_bytes=0, n_offload_packets=0, n_offload_bytes=0, priority=0,reg0=0x1,actions=controller(reason=)
table_id=254, duration=582s, n_packets=0, n_bytes=0, n_offload_packets=0, n_offload_bytes=0, priority=0,reg0=0x2,actions=drop
table_id=254, duration=582s, n_packets=0, n_bytes=0, n_offload_packets=0, n_offload_bytes=0, priority=0,reg0=0x3,actions=drop

Signed-off-by: zhaozhanxu <zhaozhanxu@163.com>
Signed-off-by: Simon Horman <simon.horman@netronome.com>
This commit is contained in:
zhaozhanxu 2019-12-05 14:26:25 +08:00 committed by Simon Horman
parent 3843208ee0
commit 164413156c
14 changed files with 112 additions and 62 deletions

3
NEWS
View File

@ -100,6 +100,9 @@ v2.12.0 - 03 Sep 2019
* Add support for conntrack zone-based timeout policy.
- 'ovs-dpctl dump-flows' is no longer suitable for dumping offloaded flows.
'ovs-appctl dpctl/dump-flows' should be used instead.
- Add new argument '--offload-stats' for command
'ovs-appctl bridge/dump-flows',
so it can display offloaded packets statistics.
- Add L2 GRE tunnel over IPv6 support.
v2.11.0 - 19 Feb 2019

View File

@ -496,6 +496,18 @@ struct dpif_flow_stats {
uint16_t tcp_flags;
};
/* more statistics info for offloaded packets and bytes */
struct dpif_flow_detailed_stats {
uint64_t n_packets;
uint64_t n_bytes;
/* n_offload_packets are a subset of n_packets */
uint64_t n_offload_packets;
/* n_offload_bytes are a subset of n_bytes */
uint64_t n_offload_bytes;
long long int used;
uint16_t tcp_flags;
};
struct dpif_flow_attrs {
bool offloaded; /* True if flow is offloaded to HW. */
const char *dp_layer; /* DP layer the flow is handled in. */

View File

@ -950,13 +950,12 @@ bond_recirculation_account(struct bond *bond)
struct rule *rule = entry->pr_rule;
if (rule) {
uint64_t n_packets OVS_UNUSED;
struct pkt_stats stats;
long long int used OVS_UNUSED;
uint64_t n_bytes;
rule->ofproto->ofproto_class->rule_get_stats(
rule, &n_packets, &n_bytes, &used);
bond_entry_account(entry, n_bytes);
rule, &stats, &used);
bond_entry_account(entry, stats.n_bytes);
}
}
}

View File

@ -2264,7 +2264,7 @@ static enum reval_result
revalidate_ukey(struct udpif *udpif, struct udpif_key *ukey,
const struct dpif_flow_stats *stats,
struct ofpbuf *odp_actions, uint64_t reval_seq,
struct recirc_refs *recircs)
struct recirc_refs *recircs, bool offloaded)
OVS_REQUIRES(ukey->mutex)
{
bool need_revalidate = ukey->reval_seq != reval_seq;
@ -2299,7 +2299,7 @@ revalidate_ukey(struct udpif *udpif, struct udpif_key *ukey,
/* Stats for deleted flows will be attributed upon flow deletion. Skip. */
if (result != UKEY_DELETE) {
xlate_push_stats(ukey->xcache, &push);
xlate_push_stats(ukey->xcache, &push, offloaded);
ukey->stats = *stats;
ukey->reval_seq = reval_seq;
}
@ -2412,7 +2412,7 @@ push_dp_ops(struct udpif *udpif, struct ukey_op *ops, size_t n_ops)
if (op->ukey) {
ovs_mutex_lock(&op->ukey->mutex);
if (op->ukey->xcache) {
xlate_push_stats(op->ukey->xcache, push);
xlate_push_stats(op->ukey->xcache, push, false);
ovs_mutex_unlock(&op->ukey->mutex);
continue;
}
@ -2689,7 +2689,8 @@ revalidate(struct revalidator *revalidator)
result = UKEY_DELETE;
} else {
result = revalidate_ukey(udpif, ukey, &f->stats, &odp_actions,
reval_seq, &recircs);
reval_seq, &recircs,
f->attrs.offloaded);
}
ukey->dump_seq = dump_seq;
@ -2774,7 +2775,7 @@ revalidator_sweep__(struct revalidator *revalidator, bool purge)
COVERAGE_INC(revalidate_missed_dp_flow);
memset(&stats, 0, sizeof stats);
result = revalidate_ukey(udpif, ukey, &stats, &odp_actions,
reval_seq, &recircs);
reval_seq, &recircs, false);
}
if (result != UKEY_KEEP) {
/* Clears 'recircs' if filled by revalidate_ukey(). */

View File

@ -90,7 +90,7 @@ xlate_cache_netdev(struct xc_entry *entry, const struct dpif_flow_stats *stats)
/* Push stats and perform side effects of flow translation. */
void
xlate_push_stats_entry(struct xc_entry *entry,
struct dpif_flow_stats *stats)
struct dpif_flow_stats *stats, bool offloaded)
{
struct eth_addr dmac;
@ -104,7 +104,7 @@ xlate_push_stats_entry(struct xc_entry *entry,
? 0 : stats->n_packets);
break;
case XC_RULE:
rule_dpif_credit_stats(entry->rule, stats);
rule_dpif_credit_stats(entry->rule, stats, offloaded);
break;
case XC_BOND:
bond_account(entry->bond.bond, entry->bond.flow,
@ -169,7 +169,7 @@ xlate_push_stats_entry(struct xc_entry *entry,
void
xlate_push_stats(struct xlate_cache *xcache,
struct dpif_flow_stats *stats)
struct dpif_flow_stats *stats, bool offloaded)
{
if (!stats->n_packets) {
return;
@ -178,7 +178,7 @@ xlate_push_stats(struct xlate_cache *xcache,
struct xc_entry *entry;
struct ofpbuf entries = xcache->entries;
XC_ENTRY_FOR_EACH (entry, &entries) {
xlate_push_stats_entry(entry, stats);
xlate_push_stats_entry(entry, stats, offloaded);
}
}

View File

@ -142,8 +142,10 @@ struct xlate_cache {
void xlate_cache_init(struct xlate_cache *);
struct xlate_cache *xlate_cache_new(void);
struct xc_entry *xlate_cache_add_entry(struct xlate_cache *, enum xc_type);
void xlate_push_stats_entry(struct xc_entry *, struct dpif_flow_stats *);
void xlate_push_stats(struct xlate_cache *, struct dpif_flow_stats *);
void xlate_push_stats_entry(struct xc_entry *, struct dpif_flow_stats *,
bool);
void xlate_push_stats(struct xlate_cache *, struct dpif_flow_stats *,
bool);
void xlate_cache_clear_entry(struct xc_entry *);
void xlate_cache_clear(struct xlate_cache *);
void xlate_cache_uninit(struct xlate_cache *);

View File

@ -3704,7 +3704,7 @@ native_tunnel_output(struct xlate_ctx *ctx, const struct xport *xport,
*/
if (backup_resubmit_stats) {
struct dpif_flow_stats stats = *backup_resubmit_stats;
xlate_push_stats(ctx->xin->xcache, &stats);
xlate_push_stats(ctx->xin->xcache, &stats, false);
}
xlate_cache_steal_entries(backup_xcache, ctx->xin->xcache);
@ -4270,7 +4270,7 @@ xlate_recursively(struct xlate_ctx *ctx, struct rule_dpif *rule,
const struct rule_actions *actions;
if (ctx->xin->resubmit_stats) {
rule_dpif_credit_stats(rule, ctx->xin->resubmit_stats);
rule_dpif_credit_stats(rule, ctx->xin->resubmit_stats, false);
}
ctx->resubmits++;
@ -7597,7 +7597,7 @@ xlate_actions(struct xlate_in *xin, struct xlate_out *xout)
ctx.xin->resubmit_stats, &ctx.table_id,
flow->in_port.ofp_port, true, true, ctx.xin->xcache);
if (ctx.xin->resubmit_stats) {
rule_dpif_credit_stats(ctx.rule, ctx.xin->resubmit_stats);
rule_dpif_credit_stats(ctx.rule, ctx.xin->resubmit_stats, false);
}
if (ctx.xin->xcache) {
struct xc_entry *entry;

View File

@ -80,7 +80,7 @@ COVERAGE_DEFINE(packet_in_overflow);
struct flow_miss;
static void rule_get_stats(struct rule *, uint64_t *packets, uint64_t *bytes,
static void rule_get_stats(struct rule *, struct pkt_stats *stats,
long long int *used);
static struct rule_dpif *rule_dpif_cast(const struct rule *);
static void rule_expire(struct rule_dpif *, long long now);
@ -4168,7 +4168,7 @@ ofproto_dpif_execute_actions__(struct ofproto_dpif *ofproto,
dpif_flow_stats_extract(flow, packet, time_msec(), &stats);
if (rule) {
rule_dpif_credit_stats(rule, &stats);
rule_dpif_credit_stats(rule, &stats, false);
}
uint64_t odp_actions_stub[1024 / 8];
@ -4222,10 +4222,14 @@ ofproto_dpif_execute_actions(struct ofproto_dpif *ofproto,
static void
rule_dpif_credit_stats__(struct rule_dpif *rule,
const struct dpif_flow_stats *stats,
bool credit_counts)
bool credit_counts, bool offloaded)
OVS_REQUIRES(rule->stats_mutex)
{
if (credit_counts) {
if (offloaded) {
rule->stats.n_offload_packets += stats->n_packets;
rule->stats.n_offload_bytes += stats->n_bytes;
}
rule->stats.n_packets += stats->n_packets;
rule->stats.n_bytes += stats->n_bytes;
}
@ -4234,15 +4238,16 @@ rule_dpif_credit_stats__(struct rule_dpif *rule,
void
rule_dpif_credit_stats(struct rule_dpif *rule,
const struct dpif_flow_stats *stats)
const struct dpif_flow_stats *stats, bool offloaded)
{
ovs_mutex_lock(&rule->stats_mutex);
if (OVS_UNLIKELY(rule->new_rule)) {
ovs_mutex_lock(&rule->new_rule->stats_mutex);
rule_dpif_credit_stats__(rule->new_rule, stats, rule->forward_counts);
rule_dpif_credit_stats__(rule->new_rule, stats, rule->forward_counts,
offloaded);
ovs_mutex_unlock(&rule->new_rule->stats_mutex);
} else {
rule_dpif_credit_stats__(rule, stats, true);
rule_dpif_credit_stats__(rule, stats, true, offloaded);
}
ovs_mutex_unlock(&rule->stats_mutex);
}
@ -4701,17 +4706,19 @@ rule_destruct(struct rule *rule_)
}
static void
rule_get_stats(struct rule *rule_, uint64_t *packets, uint64_t *bytes,
rule_get_stats(struct rule *rule_, struct pkt_stats *stats,
long long int *used)
{
struct rule_dpif *rule = rule_dpif_cast(rule_);
ovs_mutex_lock(&rule->stats_mutex);
if (OVS_UNLIKELY(rule->new_rule)) {
rule_get_stats(&rule->new_rule->up, packets, bytes, used);
rule_get_stats(&rule->new_rule->up, stats, used);
} else {
*packets = rule->stats.n_packets;
*bytes = rule->stats.n_bytes;
stats->n_packets = rule->stats.n_packets;
stats->n_bytes = rule->stats.n_bytes;
stats->n_offload_packets = rule->stats.n_offload_packets;
stats->n_offload_bytes = rule->stats.n_offload_bytes;
*used = rule->stats.used;
}
ovs_mutex_unlock(&rule->stats_mutex);
@ -4875,7 +4882,7 @@ ofproto_dpif_xcache_execute(struct ofproto_dpif *ofproto,
case XC_GROUP:
case XC_TNL_NEIGH:
case XC_TUNNEL_HEADER:
xlate_push_stats_entry(entry, stats);
xlate_push_stats_entry(entry, stats, false);
break;
default:
OVS_NOT_REACHED();

View File

@ -76,7 +76,7 @@ struct rule_dpif {
* - Do include packets and bytes from datapath flows which have not
* recently been processed by a revalidator. */
struct ovs_mutex stats_mutex;
struct dpif_flow_stats stats OVS_GUARDED;
struct dpif_flow_detailed_stats stats OVS_GUARDED;
/* In non-NULL, will point to a new rule (for which a reference is held) to
* which all the stats updates should be forwarded. This exists only
@ -107,7 +107,7 @@ struct rule_dpif *rule_dpif_lookup_from_table(struct ofproto_dpif *,
struct xlate_cache *);
void rule_dpif_credit_stats(struct rule_dpif *,
const struct dpif_flow_stats *);
const struct dpif_flow_stats *, bool);
void rule_set_recirc_id(struct rule *, uint32_t id);

View File

@ -588,6 +588,13 @@ struct ofgroup {
struct rule_collection rules OVS_GUARDED; /* Referring rules. */
};
struct pkt_stats {
uint64_t n_packets;
uint64_t n_bytes;
uint64_t n_offload_packets; /* n_offload_packets are a subset n_packets */
uint64_t n_offload_bytes; /* n_offload_bytes are a subset of n_bytes */
};
struct ofgroup *ofproto_group_lookup(const struct ofproto *ofproto,
uint32_t group_id, ovs_version_t version,
bool take_ref);
@ -1348,8 +1355,8 @@ struct ofproto_class {
* matched it in '*packet_count' and the number of bytes in those packets
* in '*byte_count'. UINT64_MAX indicates that the packet count or byte
* count is unknown. */
void (*rule_get_stats)(struct rule *rule, uint64_t *packet_count,
uint64_t *byte_count, long long int *used)
void (*rule_get_stats)(struct rule *rule, struct pkt_stats *stats,
long long int *used)
/* OVS_EXCLUDED(ofproto_mutex) */;
/* Translates actions in 'opo->ofpacts', for 'opo->packet' in flow tables

View File

@ -4669,6 +4669,7 @@ handle_flow_stats_request(struct ofconn *ofconn,
RULE_COLLECTION_FOR_EACH (rule, &rules) {
long long int now = time_msec();
struct ofputil_flow_stats fs;
struct pkt_stats stats;
long long int created, used, modified;
const struct rule_actions *actions;
enum ofputil_flow_mod_flags flags;
@ -4684,8 +4685,9 @@ handle_flow_stats_request(struct ofconn *ofconn,
flags = rule->flags;
ovs_mutex_unlock(&rule->mutex);
ofproto->ofproto_class->rule_get_stats(rule, &fs.packet_count,
&fs.byte_count, &used);
ofproto->ofproto_class->rule_get_stats(rule, &stats, &used);
fs.packet_count = stats.n_packets;
fs.byte_count = stats.n_bytes;
minimatch_expand(&rule->cr.match, &fs.match);
fs.table_id = rule->table_id;
@ -4710,14 +4712,14 @@ handle_flow_stats_request(struct ofconn *ofconn,
}
static void
flow_stats_ds(struct ofproto *ofproto, struct rule *rule, struct ds *results)
flow_stats_ds(struct ofproto *ofproto, struct rule *rule, struct ds *results,
bool offload_stats)
{
uint64_t packet_count, byte_count;
struct pkt_stats stats;
const struct rule_actions *actions;
long long int created, used;
rule->ofproto->ofproto_class->rule_get_stats(rule, &packet_count,
&byte_count, &used);
rule->ofproto->ofproto_class->rule_get_stats(rule, &stats, &used);
ovs_mutex_lock(&rule->mutex);
actions = rule_get_actions(rule);
@ -4728,8 +4730,14 @@ flow_stats_ds(struct ofproto *ofproto, struct rule *rule, struct ds *results)
ds_put_format(results, "table_id=%"PRIu8", ", rule->table_id);
}
ds_put_format(results, "duration=%llds, ", (time_msec() - created) / 1000);
ds_put_format(results, "n_packets=%"PRIu64", ", packet_count);
ds_put_format(results, "n_bytes=%"PRIu64", ", byte_count);
ds_put_format(results, "n_packets=%"PRIu64", ", stats.n_packets);
ds_put_format(results, "n_bytes=%"PRIu64", ", stats.n_bytes);
if (offload_stats) {
ds_put_format(results, "n_offload_packets=%"PRIu64", ",
stats.n_offload_packets);
ds_put_format(results, "n_offload_bytes=%"PRIu64", ",
stats.n_offload_bytes);
}
cls_rule_format(&rule->cr, ofproto_get_tun_tab(ofproto), NULL, results);
ds_put_char(results, ',');
@ -4743,7 +4751,8 @@ flow_stats_ds(struct ofproto *ofproto, struct rule *rule, struct ds *results)
/* Adds a pretty-printed description of all flows to 'results', including
* hidden flows (e.g., set up by in-band control). */
void
ofproto_get_all_flows(struct ofproto *p, struct ds *results)
ofproto_get_all_flows(struct ofproto *p, struct ds *results,
bool offload_stats)
{
struct oftable *table;
@ -4751,7 +4760,7 @@ ofproto_get_all_flows(struct ofproto *p, struct ds *results)
struct rule *rule;
CLS_FOR_EACH (rule, cr, &table->cls) {
flow_stats_ds(p, rule, results);
flow_stats_ds(p, rule, results, offload_stats);
}
}
}
@ -4839,23 +4848,21 @@ handle_aggregate_stats_request(struct ofconn *ofconn,
struct rule *rule;
RULE_COLLECTION_FOR_EACH (rule, &rules) {
uint64_t packet_count;
uint64_t byte_count;
struct pkt_stats pkt_stats;
long long int used;
ofproto->ofproto_class->rule_get_stats(rule, &packet_count,
&byte_count, &used);
ofproto->ofproto_class->rule_get_stats(rule, &pkt_stats, &used);
if (packet_count == UINT64_MAX) {
if (pkt_stats.n_packets == UINT64_MAX) {
unknown_packets = true;
} else {
stats.packet_count += packet_count;
stats.packet_count += pkt_stats.n_packets;
}
if (byte_count == UINT64_MAX) {
if (pkt_stats.n_bytes == UINT64_MAX) {
unknown_bytes = true;
} else {
stats.byte_count += byte_count;
stats.byte_count += pkt_stats.n_bytes;
}
stats.flow_count++;
@ -6046,6 +6053,7 @@ ofproto_rule_send_removed(struct rule *rule)
{
struct ofputil_flow_removed fr;
long long int used;
struct pkt_stats stats;
minimatch_expand(&rule->cr.match, &fr.match);
fr.priority = rule->cr.priority;
@ -6068,8 +6076,9 @@ ofproto_rule_send_removed(struct rule *rule)
fr.idle_timeout = rule->idle_timeout;
fr.hard_timeout = rule->hard_timeout;
ovs_mutex_unlock(&rule->mutex);
rule->ofproto->ofproto_class->rule_get_stats(rule, &fr.packet_count,
&fr.byte_count, &used);
rule->ofproto->ofproto_class->rule_get_stats(rule, &stats, &used);
fr.packet_count += stats.n_packets;
fr.byte_count += stats.n_bytes;
connmgr_send_flow_removed(connmgr, &fr);
ovs_mutex_unlock(&ofproto_mutex);
}
@ -8886,11 +8895,11 @@ rule_eviction_priority(struct ofproto *ofproto, struct rule *rule)
expiration = modified + rule->hard_timeout * 1000;
}
if (rule->idle_timeout) {
uint64_t packets, bytes;
struct pkt_stats stats;
long long int used;
long long int idle_expiration;
ofproto->ofproto_class->rule_get_stats(rule, &packets, &bytes, &used);
ofproto->ofproto_class->rule_get_stats(rule, &stats, &used);
idle_expiration = used + rule->idle_timeout * 1000;
expiration = MIN(expiration, idle_expiration);
}

View File

@ -543,7 +543,7 @@ void ofproto_configure_table(struct ofproto *, int table_id,
/* Configuration querying. */
bool ofproto_has_snoops(const struct ofproto *);
void ofproto_get_snoops(const struct ofproto *, struct sset *);
void ofproto_get_all_flows(struct ofproto *p, struct ds *);
void ofproto_get_all_flows(struct ofproto *p, struct ds *, bool);
void ofproto_get_netflow_ids(const struct ofproto *,
uint8_t *engine_type, uint8_t *engine_id);

View File

@ -515,8 +515,8 @@ bridge_init(const char *remote)
qos_unixctl_show_types, NULL);
unixctl_command_register("qos/show", "interface", 1, 1,
qos_unixctl_show, NULL);
unixctl_command_register("bridge/dump-flows", "bridge", 1, 1,
bridge_unixctl_dump_flows, NULL);
unixctl_command_register("bridge/dump-flows", "[--offload-stats] bridge",
1, 2, bridge_unixctl_dump_flows, NULL);
unixctl_command_register("bridge/reconnect", "[bridge]", 0, 1,
bridge_unixctl_reconnect, NULL);
lacp_init();
@ -3594,20 +3594,27 @@ bridge_lookup(const char *name)
/* Handle requests for a listing of all flows known by the OpenFlow
* stack, including those normally hidden. */
static void
bridge_unixctl_dump_flows(struct unixctl_conn *conn, int argc OVS_UNUSED,
bridge_unixctl_dump_flows(struct unixctl_conn *conn, int argc,
const char *argv[], void *aux OVS_UNUSED)
{
struct bridge *br;
struct ds results;
br = bridge_lookup(argv[1]);
br = bridge_lookup(argv[argc - 1]);
if (!br) {
unixctl_command_reply_error(conn, "Unknown bridge");
return;
}
bool offload_stats = false;
for (int i = 1; i < argc - 1; i++) {
if (!strcmp(argv[i], "--offload-stats")) {
offload_stats = true;
}
}
ds_init(&results);
ofproto_get_all_flows(br->ofproto, &results);
ofproto_get_all_flows(br->ofproto, &results, offload_stats);
unixctl_command_reply(conn, ds_cstr(&results));
ds_destroy(&results);

View File

@ -184,11 +184,14 @@ their controller connections and reconnect.
.IP
This command might be useful for debugging OpenFlow controller issues.
.
.IP "\fBbridge/dump\-flows\fR \fIbridge\fR"
.IP "\fBbridge/dump\-flows\fR [\fB\-\-offload-stats\fR] \fIbridge\fR"
Lists all flows in \fIbridge\fR, including those normally hidden to
commands such as \fBovs\-ofctl dump\-flows\fR. Flows set up by mechanisms
such as in-band control and fail-open are hidden from the controller
since it is not allowed to modify or override them.
If \fB\-\-offload-stats\fR are specified then also list statistics for
offloaded packets and bytes, which are a subset of the total packets and
bytes.
.SS "BOND COMMANDS"
These commands manage bonded ports on an Open vSwitch's bridges. To
understand some of these commands, it is important to understand a