2009-07-08 13:19:16 -07:00
|
|
|
/*
|
2012-12-16 16:42:17 -08:00
|
|
|
* Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013 Nicira, Inc.
|
2009-07-08 13:19:16 -07:00
|
|
|
*
|
2009-06-15 15:11:30 -07:00
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at:
|
2009-07-08 13:19:16 -07:00
|
|
|
*
|
2009-06-15 15:11:30 -07:00
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
2009-07-08 13:19:16 -07:00
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef NETDEV_H
|
|
|
|
#define NETDEV_H 1
|
|
|
|
|
2016-04-04 21:32:05 -04:00
|
|
|
#include "openvswitch/netdev.h"
|
2011-03-29 14:42:20 -07:00
|
|
|
#include "openvswitch/types.h"
|
2014-10-22 14:58:43 +08:00
|
|
|
#include "packets.h"
|
2015-03-26 06:11:28 -07:00
|
|
|
#include "flow.h"
|
2009-07-08 13:19:16 -07:00
|
|
|
|
2010-02-04 09:37:30 -08:00
|
|
|
#ifdef __cplusplus
|
|
|
|
extern "C" {
|
|
|
|
#endif
|
|
|
|
|
2013-02-28 11:20:19 -08:00
|
|
|
/* Generic interface to network devices ("netdev"s).
|
2009-07-08 13:19:16 -07:00
|
|
|
*
|
2013-02-28 11:20:19 -08:00
|
|
|
* Every port on a switch must have a corresponding netdev that must minimally
|
|
|
|
* support a few operations, such as the ability to read the netdev's MTU.
|
2016-12-08 12:55:26 +00:00
|
|
|
* The Porting section of the documentation has more information in the
|
2013-08-09 21:34:02 -07:00
|
|
|
* "Writing a netdev Provider" section.
|
|
|
|
*
|
|
|
|
* Thread-safety
|
|
|
|
* =============
|
|
|
|
*
|
|
|
|
* Most of the netdev functions are fully thread-safe: they may be called from
|
|
|
|
* any number of threads on the same or different netdev objects. The
|
|
|
|
* exceptions are:
|
|
|
|
*
|
2014-03-20 19:38:14 -07:00
|
|
|
* netdev_rxq_recv()
|
|
|
|
* netdev_rxq_wait()
|
|
|
|
* netdev_rxq_drain()
|
2013-08-09 21:34:02 -07:00
|
|
|
*
|
|
|
|
* These functions are conditionally thread-safe: they may be called from
|
2014-03-20 19:38:14 -07:00
|
|
|
* different threads only on different netdev_rxq objects. (The client may
|
|
|
|
* create multiple netdev_rxq objects for a single netdev and access each
|
2013-08-27 17:15:53 -07:00
|
|
|
* of those from a different thread.)
|
|
|
|
*
|
2016-06-03 12:31:34 -07:00
|
|
|
* NETDEV_QUEUE_FOR_EACH
|
2013-08-27 17:15:53 -07:00
|
|
|
* netdev_queue_dump_next()
|
|
|
|
* netdev_queue_dump_done()
|
|
|
|
*
|
|
|
|
* These functions are conditionally thread-safe: they may be called from
|
|
|
|
* different threads only on different netdev_queue_dump objects. (The
|
|
|
|
* client may create multiple netdev_queue_dump objects for a single
|
|
|
|
* netdev and access each of those from a different thread.)
|
|
|
|
*/
|
2009-07-08 13:19:16 -07:00
|
|
|
|
2016-05-17 17:32:33 -07:00
|
|
|
struct dp_packet_batch;
|
2015-02-25 12:01:53 -08:00
|
|
|
struct dp_packet;
|
2013-05-10 14:39:19 -07:00
|
|
|
struct netdev_class;
|
2014-03-20 19:38:14 -07:00
|
|
|
struct netdev_rxq;
|
2013-05-10 08:55:25 -07:00
|
|
|
struct netdev_saved_flags;
|
2009-07-08 13:19:16 -07:00
|
|
|
struct ofpbuf;
|
|
|
|
struct in_addr;
|
|
|
|
struct in6_addr;
|
2012-05-22 03:47:36 -07:00
|
|
|
struct smap;
|
2011-03-25 13:04:47 -07:00
|
|
|
struct sset;
|
2014-11-11 11:53:47 -08:00
|
|
|
struct ovs_action_push_tnl;
|
2009-07-08 13:19:16 -07:00
|
|
|
|
2017-06-23 16:47:59 +00:00
|
|
|
enum netdev_pt_mode {
|
|
|
|
/* The netdev is packet type aware. It can potentially carry any kind of
|
|
|
|
* packet. This "modern" mode is appropriate for both netdevs that handle
|
|
|
|
* only a single kind of packet (such as a virtual or physical Ethernet
|
|
|
|
* interface) and for those that can handle multiple (such as VXLAN-GPE or
|
|
|
|
* Geneve). */
|
|
|
|
NETDEV_PT_AWARE,
|
|
|
|
|
|
|
|
/* The netdev sends and receives only Ethernet frames. The netdev cannot
|
|
|
|
* carry packets other than Ethernet frames. This is a legacy mode for
|
|
|
|
* backward compability with controllers that are not prepared to handle
|
|
|
|
* OpenFlow 1.5+ "packet_type". */
|
|
|
|
NETDEV_PT_LEGACY_L2,
|
|
|
|
|
|
|
|
/* The netdev sends and receives only IPv4 and IPv6 packets. The netdev
|
|
|
|
* cannot carry Ethernet frames or other kinds of packets.
|
|
|
|
*
|
|
|
|
* IPv4 and IPv6 packets carried over the netdev are treated as Ethernet:
|
|
|
|
* when they are received, they are converted to Ethernet by adding a dummy
|
|
|
|
* header with the proper Ethertype; on tranmission, the Ethernet header is
|
|
|
|
* stripped. This is a legacy mode for backward compability with
|
|
|
|
* controllers that are not prepared to handle OpenFlow 1.5+
|
|
|
|
* "packet_type". */
|
|
|
|
NETDEV_PT_LEGACY_L3,
|
|
|
|
};
|
|
|
|
|
2013-01-07 16:56:04 -08:00
|
|
|
/* Configuration specific to tunnels. */
|
|
|
|
struct netdev_tunnel_config {
|
2017-09-08 18:59:17 +01:00
|
|
|
ovs_be64 in_key;
|
2013-01-07 16:56:04 -08:00
|
|
|
bool in_key_present;
|
|
|
|
bool in_key_flow;
|
|
|
|
|
|
|
|
bool out_key_present;
|
|
|
|
bool out_key_flow;
|
|
|
|
ovs_be64 out_key;
|
|
|
|
|
|
|
|
ovs_be16 dst_port;
|
|
|
|
|
2013-05-09 15:24:16 +03:00
|
|
|
bool ip_src_flow;
|
|
|
|
bool ip_dst_flow;
|
2015-11-25 11:31:08 -02:00
|
|
|
struct in6_addr ipv6_src;
|
|
|
|
struct in6_addr ipv6_dst;
|
2013-01-07 16:56:04 -08:00
|
|
|
|
2015-02-06 21:10:45 +01:00
|
|
|
uint32_t exts;
|
2017-01-17 10:16:09 -08:00
|
|
|
uint32_t egress_pkt_mark;
|
2017-09-08 18:59:17 +01:00
|
|
|
bool set_egress_pkt_mark;
|
2015-02-06 21:10:45 +01:00
|
|
|
|
2013-01-07 16:56:04 -08:00
|
|
|
uint8_t ttl;
|
|
|
|
bool ttl_inherit;
|
|
|
|
|
|
|
|
uint8_t tos;
|
|
|
|
bool tos_inherit;
|
|
|
|
|
|
|
|
bool csum;
|
|
|
|
bool dont_fragment;
|
2017-06-23 16:47:59 +00:00
|
|
|
enum netdev_pt_mode pt_mode;
|
2018-05-15 16:10:49 -04:00
|
|
|
|
|
|
|
bool set_seq;
|
|
|
|
uint32_t seqno;
|
2018-05-15 16:10:48 -04:00
|
|
|
uint32_t erspan_idx;
|
|
|
|
uint8_t erspan_ver;
|
|
|
|
uint8_t erspan_dir;
|
|
|
|
uint8_t erspan_hwid;
|
2018-05-17 17:46:41 -07:00
|
|
|
|
|
|
|
bool erspan_ver_flow;
|
|
|
|
bool erspan_idx_flow;
|
|
|
|
bool erspan_dir_flow;
|
|
|
|
bool erspan_hwid_flow;
|
2013-01-07 16:56:04 -08:00
|
|
|
};
|
|
|
|
|
2009-07-30 16:04:45 -07:00
|
|
|
void netdev_run(void);
|
|
|
|
void netdev_wait(void);
|
|
|
|
|
2011-03-25 13:04:47 -07:00
|
|
|
void netdev_enumerate_types(struct sset *types);
|
2013-05-16 14:11:51 -07:00
|
|
|
bool netdev_is_reserved_name(const char *name);
|
2010-02-01 11:35:54 -05:00
|
|
|
|
2014-09-03 14:37:35 -07:00
|
|
|
int netdev_n_txq(const struct netdev *netdev);
|
2014-03-20 20:52:06 -07:00
|
|
|
int netdev_n_rxq(const struct netdev *netdev);
|
2014-03-20 10:57:41 -07:00
|
|
|
bool netdev_is_pmd(const struct netdev *netdev);
|
2016-11-15 15:40:49 -08:00
|
|
|
bool netdev_has_tunnel_push_pop(const struct netdev *netdev);
|
2014-03-20 10:57:41 -07:00
|
|
|
|
2010-06-17 15:04:12 -07:00
|
|
|
/* Open and close. */
|
2014-03-20 10:57:41 -07:00
|
|
|
int netdev_open(const char *name, const char *type, struct netdev **netdevp);
|
|
|
|
|
2013-05-21 15:42:44 -07:00
|
|
|
struct netdev *netdev_ref(const struct netdev *);
|
2014-05-16 02:17:58 -07:00
|
|
|
void netdev_remove(struct netdev *);
|
2009-07-08 13:19:16 -07:00
|
|
|
void netdev_close(struct netdev *);
|
|
|
|
|
2011-08-18 11:20:12 -07:00
|
|
|
void netdev_parse_name(const char *netdev_name, char **name, char **type);
|
|
|
|
|
2010-12-29 16:02:22 -08:00
|
|
|
/* Options. */
|
2014-04-10 12:50:10 +02:00
|
|
|
int netdev_set_config(struct netdev *, const struct smap *args, char **errp);
|
2012-05-22 03:47:36 -07:00
|
|
|
int netdev_get_config(const struct netdev *, struct smap *);
|
2013-01-07 16:56:04 -08:00
|
|
|
const struct netdev_tunnel_config *
|
|
|
|
netdev_get_tunnel_config(const struct netdev *);
|
2014-06-11 16:33:08 -07:00
|
|
|
int netdev_get_numa_id(const struct netdev *);
|
2010-12-29 16:02:22 -08:00
|
|
|
|
2010-06-17 15:04:12 -07:00
|
|
|
/* Basic properties. */
|
2009-07-30 16:04:45 -07:00
|
|
|
const char *netdev_get_name(const struct netdev *);
|
2010-01-12 16:01:43 -05:00
|
|
|
const char *netdev_get_type(const struct netdev *);
|
2012-12-20 15:32:03 -08:00
|
|
|
const char *netdev_get_type_from_name(const char *);
|
2009-07-30 16:04:45 -07:00
|
|
|
int netdev_get_mtu(const struct netdev *, int *mtup);
|
2016-08-09 17:01:19 +01:00
|
|
|
int netdev_set_mtu(struct netdev *, int mtu);
|
2016-09-02 09:53:00 -07:00
|
|
|
void netdev_mtu_user_config(struct netdev *, bool);
|
|
|
|
bool netdev_mtu_is_user_config(struct netdev *);
|
2009-11-23 12:25:08 -08:00
|
|
|
int netdev_get_ifindex(const struct netdev *);
|
2016-02-26 15:58:24 -08:00
|
|
|
int netdev_set_tx_multiq(struct netdev *, unsigned int n_txq);
|
2017-06-23 16:47:59 +00:00
|
|
|
enum netdev_pt_mode netdev_get_pt_mode(const struct netdev *);
|
2009-07-23 10:42:48 -07:00
|
|
|
|
2013-05-10 14:39:19 -07:00
|
|
|
/* Packet reception. */
|
2014-03-20 20:52:06 -07:00
|
|
|
int netdev_rxq_open(struct netdev *, struct netdev_rxq **, int id);
|
2014-03-20 19:38:14 -07:00
|
|
|
void netdev_rxq_close(struct netdev_rxq *);
|
2013-05-10 14:39:19 -07:00
|
|
|
|
2014-03-20 19:38:14 -07:00
|
|
|
const char *netdev_rxq_get_name(const struct netdev_rxq *);
|
2016-02-08 10:38:47 +03:00
|
|
|
int netdev_rxq_get_queue_id(const struct netdev_rxq *);
|
2013-05-10 14:39:19 -07:00
|
|
|
|
2018-04-19 19:40:44 +02:00
|
|
|
int netdev_rxq_recv(struct netdev_rxq *rx, struct dp_packet_batch *,
|
|
|
|
int *qfill);
|
2014-03-20 19:38:14 -07:00
|
|
|
void netdev_rxq_wait(struct netdev_rxq *);
|
|
|
|
int netdev_rxq_drain(struct netdev_rxq *);
|
2009-07-30 16:04:45 -07:00
|
|
|
|
2013-05-10 14:39:19 -07:00
|
|
|
/* Packet transmission. */
|
2016-05-17 17:32:33 -07:00
|
|
|
int netdev_send(struct netdev *, int qid, struct dp_packet_batch *,
|
2017-12-14 14:59:25 +03:00
|
|
|
bool concurrent_txq);
|
2014-09-03 14:37:35 -07:00
|
|
|
void netdev_send_wait(struct netdev *, int qid);
|
2009-07-30 16:04:45 -07:00
|
|
|
|
2017-06-13 18:03:28 +03:00
|
|
|
/* Flow offloading. */
|
|
|
|
struct offload_info {
|
2017-07-25 08:28:41 +03:00
|
|
|
const struct dpif_class *dpif_class;
|
2017-06-13 18:03:28 +03:00
|
|
|
ovs_be16 tp_dst_port; /* Destination port for tunnel in SET action */
|
2018-10-11 10:06:43 +03:00
|
|
|
uint8_t tunnel_csum_on; /* Tunnel header with checksum */
|
dpif-netdev: associate flow with a mark id
Most modern NICs have the ability to bind a flow with a mark, so that
every packet matches such flow will have that mark present in its
descriptor.
The basic idea of doing that is, when we receives packets later, we could
directly get the flow from the mark. That could avoid some very costly
CPU operations, including (but not limiting to) miniflow_extract, emc
lookup, dpcls lookup, etc. Thus, performance could be greatly improved.
Thus, the major work of this patch is to associate a flow with a mark
id (an uint32_t number). The association in netdev datapath is done
by CMAP, while in hardware it's done by the rte_flow MARK action.
One tricky thing in OVS-DPDK is, the flow tables is per-PMD. For the
case there is only one phys port but with 2 queues, there could be 2
PMDs. In other words, even for a single mega flow (i.e. udp,tp_src=1000),
there could be 2 different dp_netdev flows, one for each PMD. That could
results to the same mega flow being offloaded twice in the hardware,
worse, we may get 2 different marks and only the last one will work.
To avoid that, a megaflow_to_mark CMAP is created. An entry will be
added for the first PMD that wants to offload a flow. For later PMDs,
it will see such megaflow is already offloaded, then the flow will not
be offloaded to HW twice.
Meanwhile, the mark to flow mapping becomes to 1:N mapping. That is
what the mark_to_flow CMAP is for. When the first PMD wants to offload
a flow, it allocates a new mark and performs the flow offload by reusing
the ->flow_put method. When it succeeds, a "mark to flow" entry will be
added. For later PMDs, it will get the corresponding mark by above
megaflow_to_mark CMAP. Then, another "mark to flow" entry will be added.
Signed-off-by: Yuanhan Liu <yliu@fridaylinux.org>
Co-authored-by: Finn Christensen <fc@napatech.com>
Signed-off-by: Finn Christensen <fc@napatech.com>
Co-authored-by: Shahaf Shuler <shahafs@mellanox.com>
Signed-off-by: Shahaf Shuler <shahafs@mellanox.com>
Signed-off-by: Ian Stokes <ian.stokes@intel.com>
2018-06-25 16:21:03 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The flow mark id assigened to the flow. If any pkts hit the flow,
|
|
|
|
* it will be in the pkt meta data.
|
|
|
|
*/
|
|
|
|
uint32_t flow_mark;
|
2017-06-13 18:03:28 +03:00
|
|
|
};
|
2017-07-25 08:28:41 +03:00
|
|
|
struct dpif_class;
|
2017-06-13 18:03:28 +03:00
|
|
|
struct netdev_flow_dump;
|
|
|
|
int netdev_flow_flush(struct netdev *);
|
|
|
|
int netdev_flow_dump_create(struct netdev *, struct netdev_flow_dump **dump);
|
|
|
|
int netdev_flow_dump_destroy(struct netdev_flow_dump *);
|
|
|
|
bool netdev_flow_dump_next(struct netdev_flow_dump *, struct match *,
|
|
|
|
struct nlattr **actions, struct dpif_flow_stats *,
|
dpctl: Properly reflect a rule's offloaded to HW state
Previously, any rule that is offloaded via a netdev, not necessarily
to the HW, would be reported as "offloaded". This patch fixes this
misalignment, and introduces the 'dp' state, as follows:
rule is in HW via TC offload -> offloaded=yes dp:tc
rule is in not HW over TC DP -> offloaded=no dp:tc
rule is in not HW over OVS DP -> offloaded=no dp:ovs
To achieve this, the flows's 'offloaded' flag was encapsulated in a new
attrs struct, which contains the offloaded state of the flow and the
DP layer the flow is handled in, and instead of setting the flow's
'offloaded' state based solely on the type of dump it was acquired
via, for netdev flows it now sends the new attrs struct to be
collected along with the rest of the flow via the netdev, allowing
it to be set per flow.
For TC offloads, the offloaded state is set based on the 'in_hw' and
'not_in_hw' flags received from the TC as part of the flower. If no
such flag was received, due to lack of kernel support, it defaults
to true.
Signed-off-by: Gavi Teitz <gavi@mellanox.com>
Acked-by: Roi Dayan <roid@mellanox.com>
[simon: resolved conflict in lib/dpctl.man]
Signed-off-by: Simon Horman <simon.horman@netronome.com>
2018-06-07 09:36:59 +03:00
|
|
|
struct dpif_flow_attrs *, ovs_u128 *ufid,
|
|
|
|
struct ofpbuf *rbuffer, struct ofpbuf *wbuffer);
|
2017-06-13 18:03:28 +03:00
|
|
|
int netdev_flow_put(struct netdev *, struct match *, struct nlattr *actions,
|
|
|
|
size_t actions_len, const ovs_u128 *,
|
|
|
|
struct offload_info *, struct dpif_flow_stats *);
|
|
|
|
int netdev_flow_get(struct netdev *, struct match *, struct nlattr **actions,
|
|
|
|
const ovs_u128 *, struct dpif_flow_stats *,
|
dpctl: Properly reflect a rule's offloaded to HW state
Previously, any rule that is offloaded via a netdev, not necessarily
to the HW, would be reported as "offloaded". This patch fixes this
misalignment, and introduces the 'dp' state, as follows:
rule is in HW via TC offload -> offloaded=yes dp:tc
rule is in not HW over TC DP -> offloaded=no dp:tc
rule is in not HW over OVS DP -> offloaded=no dp:ovs
To achieve this, the flows's 'offloaded' flag was encapsulated in a new
attrs struct, which contains the offloaded state of the flow and the
DP layer the flow is handled in, and instead of setting the flow's
'offloaded' state based solely on the type of dump it was acquired
via, for netdev flows it now sends the new attrs struct to be
collected along with the rest of the flow via the netdev, allowing
it to be set per flow.
For TC offloads, the offloaded state is set based on the 'in_hw' and
'not_in_hw' flags received from the TC as part of the flower. If no
such flag was received, due to lack of kernel support, it defaults
to true.
Signed-off-by: Gavi Teitz <gavi@mellanox.com>
Acked-by: Roi Dayan <roid@mellanox.com>
[simon: resolved conflict in lib/dpctl.man]
Signed-off-by: Simon Horman <simon.horman@netronome.com>
2018-06-07 09:36:59 +03:00
|
|
|
struct dpif_flow_attrs *, struct ofpbuf *wbuffer);
|
2017-06-13 18:03:28 +03:00
|
|
|
int netdev_flow_del(struct netdev *, const ovs_u128 *,
|
|
|
|
struct dpif_flow_stats *);
|
|
|
|
int netdev_init_flow_api(struct netdev *);
|
2018-06-28 17:03:03 +01:00
|
|
|
uint32_t netdev_get_block_id(struct netdev *);
|
2018-10-18 21:43:12 +05:30
|
|
|
int netdev_get_hw_info(struct netdev *, int);
|
|
|
|
void netdev_set_hw_info(struct netdev *, int, int);
|
revalidator: Rebalance offloaded flows based on the pps rate
This is the third patch in the patch-set to support dynamic rebalancing
of offloaded flows.
The dynamic rebalancing functionality is implemented in this patch. The
ukeys that are not scheduled for deletion are obtained and passed as input
to the rebalancing routine. The rebalancing is done in the context of
revalidation leader thread, after all other revalidator threads are
done with gathering rebalancing data for flows.
For each netdev that is in OOR state, a list of flows - both offloaded
and non-offloaded (pending) - is obtained using the ukeys. For each netdev
that is in OOR state, the flows are grouped and sorted into offloaded and
pending flows. The offloaded flows are sorted in descending order of
pps-rate, while pending flows are sorted in ascending order of pps-rate.
The rebalancing is done in two phases. In the first phase, we try to
offload all pending flows and if that succeeds, the OOR state on the device
is cleared. If some (or none) of the pending flows could not be offloaded,
then we start replacing an offloaded flow that has a lower pps-rate than
a pending flow, until there are no more pending flows with a higher rate
than an offloaded flow. The flows that are replaced from the device are
added into kernel datapath.
A new OVS configuration parameter "offload-rebalance", is added to ovsdb.
The default value of this is "false". To enable this feature, set the
value of this parameter to "true", which provides packets-per-second
rate based policy to dynamically offload and un-offload flows.
Note: This option can be enabled only when 'hw-offload' policy is enabled.
It also requires 'tc-policy' to be set to 'skip_sw'; otherwise, flow
offload errors (specifically ENOSPC error this feature depends on) reported
by an offloaded device are supressed by TC-Flower kernel module.
Signed-off-by: Sriharsha Basavapatna <sriharsha.basavapatna@broadcom.com>
Co-authored-by: Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>
Signed-off-by: Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>
Reviewed-by: Sathya Perla <sathya.perla@broadcom.com>
Reviewed-by: Ben Pfaff <blp@ovn.org>
Signed-off-by: Simon Horman <simon.horman@netronome.com>
2018-10-18 21:43:14 +05:30
|
|
|
bool netdev_any_oor(void);
|
2017-06-13 18:03:29 +03:00
|
|
|
bool netdev_is_flow_api_enabled(void);
|
|
|
|
void netdev_set_flow_api_enabled(const struct smap *ovs_other_config);
|
2018-10-18 21:43:12 +05:30
|
|
|
bool netdev_is_offload_rebalance_policy_enabled(void);
|
2017-06-13 18:03:28 +03:00
|
|
|
|
2017-06-13 18:03:31 +03:00
|
|
|
struct dpif_port;
|
2017-07-25 08:28:41 +03:00
|
|
|
int netdev_ports_insert(struct netdev *, const struct dpif_class *,
|
|
|
|
struct dpif_port *);
|
|
|
|
struct netdev *netdev_ports_get(odp_port_t port, const struct dpif_class *);
|
|
|
|
int netdev_ports_remove(odp_port_t port, const struct dpif_class *);
|
2017-06-13 18:03:31 +03:00
|
|
|
odp_port_t netdev_ifindex_to_odp_port(int ifindex);
|
2017-07-25 08:28:41 +03:00
|
|
|
struct netdev_flow_dump **netdev_ports_flow_dump_create(
|
|
|
|
const struct dpif_class *,
|
|
|
|
int *ports);
|
|
|
|
void netdev_ports_flow_flush(const struct dpif_class *);
|
|
|
|
int netdev_ports_flow_del(const struct dpif_class *, const ovs_u128 *ufid,
|
2017-06-13 18:03:41 +03:00
|
|
|
struct dpif_flow_stats *stats);
|
2017-07-25 08:28:41 +03:00
|
|
|
int netdev_ports_flow_get(const struct dpif_class *, struct match *match,
|
2017-06-13 18:03:43 +03:00
|
|
|
struct nlattr **actions,
|
|
|
|
const ovs_u128 *ufid,
|
|
|
|
struct dpif_flow_stats *stats,
|
dpctl: Properly reflect a rule's offloaded to HW state
Previously, any rule that is offloaded via a netdev, not necessarily
to the HW, would be reported as "offloaded". This patch fixes this
misalignment, and introduces the 'dp' state, as follows:
rule is in HW via TC offload -> offloaded=yes dp:tc
rule is in not HW over TC DP -> offloaded=no dp:tc
rule is in not HW over OVS DP -> offloaded=no dp:ovs
To achieve this, the flows's 'offloaded' flag was encapsulated in a new
attrs struct, which contains the offloaded state of the flow and the
DP layer the flow is handled in, and instead of setting the flow's
'offloaded' state based solely on the type of dump it was acquired
via, for netdev flows it now sends the new attrs struct to be
collected along with the rest of the flow via the netdev, allowing
it to be set per flow.
For TC offloads, the offloaded state is set based on the 'in_hw' and
'not_in_hw' flags received from the TC as part of the flower. If no
such flag was received, due to lack of kernel support, it defaults
to true.
Signed-off-by: Gavi Teitz <gavi@mellanox.com>
Acked-by: Roi Dayan <roid@mellanox.com>
[simon: resolved conflict in lib/dpctl.man]
Signed-off-by: Simon Horman <simon.horman@netronome.com>
2018-06-07 09:36:59 +03:00
|
|
|
struct dpif_flow_attrs *attrs,
|
2017-06-13 18:03:43 +03:00
|
|
|
struct ofpbuf *buf);
|
2017-06-13 18:03:31 +03:00
|
|
|
|
2016-05-23 20:27:14 -07:00
|
|
|
/* native tunnel APIs */
|
|
|
|
/* Structure to pass parameters required to build a tunnel header. */
|
|
|
|
struct netdev_tnl_build_header_params {
|
|
|
|
const struct flow *flow;
|
|
|
|
const struct in6_addr *s_ip;
|
|
|
|
struct eth_addr dmac;
|
|
|
|
struct eth_addr smac;
|
|
|
|
bool is_ipv6;
|
|
|
|
};
|
|
|
|
|
|
|
|
void
|
|
|
|
netdev_init_tnl_build_header_params(struct netdev_tnl_build_header_params *params,
|
|
|
|
const struct flow *tnl_flow,
|
|
|
|
const struct in6_addr *src,
|
|
|
|
struct eth_addr dmac,
|
|
|
|
struct eth_addr smac);
|
|
|
|
|
2015-03-26 06:11:28 -07:00
|
|
|
int netdev_build_header(const struct netdev *, struct ovs_action_push_tnl *data,
|
2016-05-23 20:27:14 -07:00
|
|
|
const struct netdev_tnl_build_header_params *params);
|
|
|
|
|
2014-11-11 11:53:47 -08:00
|
|
|
int netdev_push_header(const struct netdev *netdev,
|
2016-05-17 17:32:33 -07:00
|
|
|
struct dp_packet_batch *,
|
2014-11-11 11:53:47 -08:00
|
|
|
const struct ovs_action_push_tnl *data);
|
2016-05-17 17:32:37 -07:00
|
|
|
void netdev_pop_header(struct netdev *netdev, struct dp_packet_batch *);
|
2014-11-11 11:53:47 -08:00
|
|
|
|
2010-06-17 15:04:12 -07:00
|
|
|
/* Hardware address. */
|
2015-08-28 14:55:11 -07:00
|
|
|
int netdev_set_etheraddr(struct netdev *, const struct eth_addr mac);
|
|
|
|
int netdev_get_etheraddr(const struct netdev *, struct eth_addr *mac);
|
2009-07-30 16:04:45 -07:00
|
|
|
|
2010-06-17 15:04:12 -07:00
|
|
|
/* PHY interface. */
|
2010-10-27 15:29:16 -07:00
|
|
|
bool netdev_get_carrier(const struct netdev *);
|
2011-10-14 12:49:57 -07:00
|
|
|
long long int netdev_get_carrier_resets(const struct netdev *);
|
2011-05-16 14:40:03 -07:00
|
|
|
int netdev_set_miimon_interval(struct netdev *, long long int interval);
|
2012-02-15 14:23:38 -08:00
|
|
|
|
2013-05-10 08:55:25 -07:00
|
|
|
/* Flags. */
|
|
|
|
enum netdev_flags {
|
|
|
|
NETDEV_UP = 0x0001, /* Device enabled? */
|
|
|
|
NETDEV_PROMISC = 0x0002, /* Promiscuous mode? */
|
|
|
|
NETDEV_LOOPBACK = 0x0004 /* This is a loopback device. */
|
|
|
|
};
|
|
|
|
|
|
|
|
int netdev_get_flags(const struct netdev *, enum netdev_flags *);
|
|
|
|
int netdev_set_flags(struct netdev *, enum netdev_flags,
|
|
|
|
struct netdev_saved_flags **);
|
|
|
|
int netdev_turn_flags_on(struct netdev *, enum netdev_flags,
|
|
|
|
struct netdev_saved_flags **);
|
|
|
|
int netdev_turn_flags_off(struct netdev *, enum netdev_flags,
|
|
|
|
struct netdev_saved_flags **);
|
|
|
|
|
|
|
|
void netdev_restore_flags(struct netdev_saved_flags *);
|
|
|
|
|
2010-06-17 15:04:12 -07:00
|
|
|
/* TCP/IP stack interface. */
|
2009-07-08 13:19:16 -07:00
|
|
|
int netdev_set_in4(struct netdev *, struct in_addr addr, struct in_addr mask);
|
2011-12-06 13:01:25 -08:00
|
|
|
int netdev_get_in4_by_name(const char *device_name, struct in_addr *in4);
|
2018-04-13 10:03:12 -07:00
|
|
|
int netdev_get_ip_by_name(const char *device_name, struct in6_addr *);
|
2016-03-24 09:30:57 -07:00
|
|
|
int netdev_get_addr_list(const struct netdev *netdev, struct in6_addr **addr,
|
|
|
|
struct in6_addr **mask, int *n_in6);
|
|
|
|
|
2009-07-23 12:18:14 -07:00
|
|
|
int netdev_add_router(struct netdev *, struct in_addr router);
|
2009-09-02 10:14:53 -07:00
|
|
|
int netdev_get_next_hop(const struct netdev *, const struct in_addr *host,
|
|
|
|
struct in_addr *next_hop, char **);
|
2012-12-16 16:42:17 -08:00
|
|
|
int netdev_get_status(const struct netdev *, struct smap *);
|
2014-10-22 14:58:43 +08:00
|
|
|
int netdev_arp_lookup(const struct netdev *, ovs_be32 ip,
|
2015-08-28 14:55:11 -07:00
|
|
|
struct eth_addr *mac);
|
2009-07-30 16:04:45 -07:00
|
|
|
|
2010-06-17 15:04:12 -07:00
|
|
|
struct netdev *netdev_find_dev_by_in4(const struct in_addr *);
|
2009-07-30 16:04:45 -07:00
|
|
|
|
2010-06-17 15:04:12 -07:00
|
|
|
/* Statistics. */
|
2009-07-08 13:19:16 -07:00
|
|
|
int netdev_get_stats(const struct netdev *, struct netdev_stats *);
|
2018-01-09 07:55:37 +00:00
|
|
|
int netdev_get_custom_stats(const struct netdev *,
|
|
|
|
struct netdev_custom_stats *);
|
2010-06-17 15:04:12 -07:00
|
|
|
|
|
|
|
/* Quality of service. */
|
|
|
|
struct netdev_qos_capabilities {
|
|
|
|
unsigned int n_queues;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct netdev_queue_stats {
|
|
|
|
/* Values of unsupported statistics are set to all-1-bits (UINT64_MAX). */
|
|
|
|
uint64_t tx_bytes;
|
|
|
|
uint64_t tx_packets;
|
|
|
|
uint64_t tx_errors;
|
2013-07-17 15:56:22 -07:00
|
|
|
|
|
|
|
/* Time at which the queue was created, in msecs, LLONG_MIN if unknown. */
|
|
|
|
long long int created;
|
2010-06-17 15:04:12 -07:00
|
|
|
};
|
|
|
|
|
|
|
|
int netdev_set_policing(struct netdev *, uint32_t kbits_rate,
|
2009-07-08 13:19:16 -07:00
|
|
|
uint32_t kbits_burst);
|
|
|
|
|
2011-03-25 13:04:47 -07:00
|
|
|
int netdev_get_qos_types(const struct netdev *, struct sset *types);
|
2010-06-17 15:04:12 -07:00
|
|
|
int netdev_get_qos_capabilities(const struct netdev *,
|
|
|
|
const char *type,
|
|
|
|
struct netdev_qos_capabilities *);
|
|
|
|
int netdev_get_n_queues(const struct netdev *,
|
|
|
|
const char *type, unsigned int *n_queuesp);
|
|
|
|
|
|
|
|
int netdev_get_qos(const struct netdev *,
|
2012-05-22 03:47:36 -07:00
|
|
|
const char **typep, struct smap *details);
|
2010-06-17 15:04:12 -07:00
|
|
|
int netdev_set_qos(struct netdev *,
|
2012-05-22 03:47:36 -07:00
|
|
|
const char *type, const struct smap *details);
|
2010-06-17 15:04:12 -07:00
|
|
|
|
|
|
|
int netdev_get_queue(const struct netdev *,
|
2012-05-22 03:47:36 -07:00
|
|
|
unsigned int queue_id, struct smap *details);
|
2010-06-17 15:04:12 -07:00
|
|
|
int netdev_set_queue(struct netdev *,
|
2012-05-22 03:47:36 -07:00
|
|
|
unsigned int queue_id, const struct smap *details);
|
2010-06-17 15:04:12 -07:00
|
|
|
int netdev_delete_queue(struct netdev *, unsigned int queue_id);
|
|
|
|
int netdev_get_queue_stats(const struct netdev *, unsigned int queue_id,
|
|
|
|
struct netdev_queue_stats *);
|
2014-04-03 00:17:34 -07:00
|
|
|
uint64_t netdev_get_change_seq(const struct netdev *);
|
2010-06-17 15:04:12 -07:00
|
|
|
|
2016-02-24 17:25:11 -08:00
|
|
|
int netdev_reconfigure(struct netdev *netdev);
|
|
|
|
void netdev_wait_reconf_required(struct netdev *netdev);
|
|
|
|
bool netdev_is_reconf_required(struct netdev *netdev);
|
|
|
|
|
2013-08-27 17:15:53 -07:00
|
|
|
struct netdev_queue_dump {
|
|
|
|
struct netdev *netdev;
|
|
|
|
int error;
|
|
|
|
void *state;
|
|
|
|
};
|
|
|
|
void netdev_queue_dump_start(struct netdev_queue_dump *,
|
|
|
|
const struct netdev *);
|
|
|
|
bool netdev_queue_dump_next(struct netdev_queue_dump *,
|
|
|
|
unsigned int *queue_id, struct smap *details);
|
|
|
|
int netdev_queue_dump_done(struct netdev_queue_dump *);
|
|
|
|
|
|
|
|
/* Iterates through each queue in NETDEV, using DUMP as state. Fills QUEUE_ID
|
|
|
|
* and DETAILS with information about queues. The client must initialize and
|
|
|
|
* destroy DETAILS.
|
|
|
|
*
|
|
|
|
* Arguments all have pointer type.
|
|
|
|
*
|
|
|
|
* If you break out of the loop, then you need to free the dump structure by
|
|
|
|
* hand using netdev_queue_dump_done(). */
|
|
|
|
#define NETDEV_QUEUE_FOR_EACH(QUEUE_ID, DETAILS, DUMP, NETDEV) \
|
|
|
|
for (netdev_queue_dump_start(DUMP, NETDEV); \
|
|
|
|
(netdev_queue_dump_next(DUMP, QUEUE_ID, DETAILS) \
|
|
|
|
? true \
|
|
|
|
: (netdev_queue_dump_done(DUMP), false)); \
|
|
|
|
)
|
2010-06-17 15:04:12 -07:00
|
|
|
|
|
|
|
typedef void netdev_dump_queue_stats_cb(unsigned int queue_id,
|
|
|
|
struct netdev_queue_stats *,
|
|
|
|
void *aux);
|
|
|
|
int netdev_dump_queue_stats(const struct netdev *,
|
|
|
|
netdev_dump_queue_stats_cb *, void *aux);
|
|
|
|
|
2014-11-11 11:53:47 -08:00
|
|
|
extern struct seq *tnl_conf_seq;
|
2014-03-20 10:54:37 -07:00
|
|
|
|
2016-03-24 09:30:57 -07:00
|
|
|
#ifndef _WIN32
|
|
|
|
void netdev_get_addrs_list_flush(void);
|
|
|
|
int netdev_get_addrs(const char dev[], struct in6_addr **paddr,
|
|
|
|
struct in6_addr **pmask, int *n_in6);
|
|
|
|
#endif
|
|
|
|
|
2010-02-04 09:37:30 -08:00
|
|
|
#ifdef __cplusplus
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2009-07-08 13:19:16 -07:00
|
|
|
#endif /* netdev.h */
|