2009-06-17 14:35:35 -07:00
|
|
|
|
/*
|
2019-02-13 15:34:21 -08:00
|
|
|
|
* Copyright (c) 2008-2018 Nicira, Inc.
|
2009-06-17 14:35:35 -07:00
|
|
|
|
*
|
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
|
* You may obtain a copy of the License at:
|
|
|
|
|
*
|
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
*
|
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
|
* limitations under the License.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
#include <config.h>
|
2010-12-29 14:20:16 -08:00
|
|
|
|
|
2014-09-18 04:17:54 -07:00
|
|
|
|
#include "dpif-netlink.h"
|
2009-06-17 14:35:35 -07:00
|
|
|
|
|
|
|
|
|
#include <ctype.h>
|
|
|
|
|
#include <errno.h>
|
|
|
|
|
#include <fcntl.h>
|
|
|
|
|
#include <inttypes.h>
|
|
|
|
|
#include <net/if.h>
|
2010-05-26 15:32:34 -07:00
|
|
|
|
#include <linux/types.h>
|
2010-07-20 11:23:21 -07:00
|
|
|
|
#include <linux/pkt_sched.h>
|
2011-11-28 09:29:18 -08:00
|
|
|
|
#include <poll.h>
|
2009-06-17 14:35:35 -07:00
|
|
|
|
#include <stdlib.h>
|
2011-11-28 09:29:18 -08:00
|
|
|
|
#include <strings.h>
|
2011-11-22 09:25:32 -08:00
|
|
|
|
#include <sys/epoll.h>
|
2010-05-20 13:26:48 -07:00
|
|
|
|
#include <sys/stat.h>
|
2009-06-17 14:35:35 -07:00
|
|
|
|
#include <unistd.h>
|
|
|
|
|
|
2011-04-04 16:55:34 -07:00
|
|
|
|
#include "bitmap.h"
|
2017-05-18 16:10:29 -04:00
|
|
|
|
#include "dpif-netlink-rtnl.h"
|
2018-02-09 10:04:26 -08:00
|
|
|
|
#include "dpif-provider.h"
|
2014-02-26 10:10:29 -08:00
|
|
|
|
#include "fat-rwlock.h"
|
2018-02-09 10:04:26 -08:00
|
|
|
|
#include "flow.h"
|
2011-04-28 13:02:15 -07:00
|
|
|
|
#include "netdev-linux.h"
|
2019-05-07 12:24:08 +03:00
|
|
|
|
#include "netdev-offload.h"
|
2018-02-09 10:04:26 -08:00
|
|
|
|
#include "netdev-provider.h"
|
2010-12-03 14:41:38 -08:00
|
|
|
|
#include "netdev-vport.h"
|
2018-02-09 10:04:26 -08:00
|
|
|
|
#include "netdev.h"
|
2015-10-28 11:26:18 -07:00
|
|
|
|
#include "netlink-conntrack.h"
|
2011-08-25 14:06:54 -07:00
|
|
|
|
#include "netlink-notifier.h"
|
2011-01-26 13:41:54 -08:00
|
|
|
|
#include "netlink-socket.h"
|
datapath: Report kernel's flow key when passing packets up to userspace.
One of the goals for Open vSwitch is to decouple kernel and userspace
software, so that either one can be upgraded or rolled back independent of
the other. To do this in full generality, it must be possible to change
the kernel's idea of the flow key separately from the userspace version.
This commit takes one step in that direction by making the kernel report
its idea of the flow that a packet belongs to whenever it passes a packet
up to userspace. This means that userspace can intelligently figure out
what to do:
- If userspace's notion of the flow for the packet matches the kernel's,
then nothing special is necessary.
- If the kernel has a more specific notion for the flow than userspace,
for example if the kernel decoded IPv6 headers but userspace stopped
at the Ethernet type (because it does not understand IPv6), then again
nothing special is necessary: userspace can still set up the flow in
the usual way.
- If userspace has a more specific notion for the flow than the kernel,
for example if userspace decoded an IPv6 header but the kernel
stopped at the Ethernet type, then userspace can forward the packet
manually, without setting up a flow in the kernel. (This case is
bad from a performance point of view, but at least it is correct.)
This commit does not actually make userspace flexible enough to handle
changes in the kernel flow key structure, although userspace does now
have enough information to do that intelligently. This will have to wait
for later commits.
This commit is bigger than it would otherwise be because it is rolled
together with changing "struct odp_msg" to a sequence of Netlink
attributes. The alternative, to do each of those changes in a separate
patch, seemed like overkill because it meant that either we would have to
introduce and then kill off Netlink attributes for in_port and tun_id, if
Netlink conversion went first, or shove yet another variable-length header
into the stuff already after odp_msg, if adding the flow key to odp_msg
went first.
This commit will slow down performance of checksumming packets sent up to
userspace. I'm not entirely pleased with how I did it. I considered a
couple of alternatives, but none of them seemed that much better.
Suggestions welcome. Not changing anything wasn't an option,
unfortunately. At any rate some slowdown will become unavoidable when OVS
actually starts using Netlink instead of just Netlink framing.
(Actually, I thought of one option where we could avoid that: make
userspace do the checksum instead, by passing csum_start and csum_offset as
part of what goes to userspace. But that's not perfect either.)
Signed-off-by: Ben Pfaff <blp@nicira.com>
Acked-by: Jesse Gross <jesse@nicira.com>
2011-01-24 14:59:57 -08:00
|
|
|
|
#include "netlink.h"
|
2018-03-29 23:05:27 -03:00
|
|
|
|
#include "netnsid.h"
|
2011-01-26 07:03:39 -08:00
|
|
|
|
#include "odp-util.h"
|
2018-02-09 10:04:26 -08:00
|
|
|
|
#include "openvswitch/dynamic-string.h"
|
|
|
|
|
#include "openvswitch/flow.h"
|
ct-dpif, dpif-netlink: Add conntrack timeout policy support
This patch first defines the dpif interface for a datapath to support
adding, deleting, getting and dumping conntrack timeout policy.
The timeout policy is identified by a 4 bytes unsigned integer in
datapath, and it currently support timeout for TCP, UDP, and ICMP
protocols.
Moreover, this patch provides the implementation for Linux kernel
datapath in dpif-netlink.
In Linux kernel, the timeout policy is maintained per L3/L4 protocol,
and it is identified by 32 bytes null terminated string. On the other
hand, in vswitchd, the timeout policy is a generic one that consists of
all the supported L4 protocols. Therefore, one of the main task in
dpif-netlink is to break down the generic timeout policy into 6
sub policies (ipv4 tcp, udp, icmp, and ipv6 tcp, udp, icmp),
and push down the configuration using the netlink API in
netlink-conntrack.c.
This patch also adds missing symbols in the windows datapath so
that the build on windows can pass.
Appveyor CI:
* https://ci.appveyor.com/project/YiHungWei/ovs/builds/26387754
Signed-off-by: Yi-Hung Wei <yihung.wei@gmail.com>
Acked-by: Alin Gabriel Serdean <aserdean@ovn.org>
Signed-off-by: Justin Pettit <jpettit@ovn.org>
2019-08-28 15:14:24 -07:00
|
|
|
|
#include "openvswitch/hmap.h"
|
2018-02-09 10:04:26 -08:00
|
|
|
|
#include "openvswitch/match.h"
|
2016-03-25 14:10:24 -07:00
|
|
|
|
#include "openvswitch/ofpbuf.h"
|
2017-11-03 13:53:53 +08:00
|
|
|
|
#include "openvswitch/poll-loop.h"
|
2016-07-12 16:37:34 -05:00
|
|
|
|
#include "openvswitch/shash.h"
|
2018-08-08 17:31:17 -07:00
|
|
|
|
#include "openvswitch/thread.h"
|
2018-02-09 10:04:26 -08:00
|
|
|
|
#include "openvswitch/vlog.h"
|
|
|
|
|
#include "packets.h"
|
|
|
|
|
#include "random.h"
|
2011-03-25 15:26:30 -07:00
|
|
|
|
#include "sset.h"
|
2012-06-01 17:40:31 -04:00
|
|
|
|
#include "timeval.h"
|
2011-01-26 15:42:00 -08:00
|
|
|
|
#include "unaligned.h"
|
2009-06-17 14:35:35 -07:00
|
|
|
|
#include "util.h"
|
2010-07-16 11:02:49 -07:00
|
|
|
|
|
2014-09-18 04:17:54 -07:00
|
|
|
|
VLOG_DEFINE_THIS_MODULE(dpif_netlink);
|
2014-10-23 08:27:34 -07:00
|
|
|
|
#ifdef _WIN32
|
2016-12-20 19:41:22 +00:00
|
|
|
|
#include "wmi.h"
|
2014-10-23 08:27:34 -07:00
|
|
|
|
enum { WINDOWS = 1 };
|
|
|
|
|
#else
|
|
|
|
|
enum { WINDOWS = 0 };
|
|
|
|
|
#endif
|
2012-02-16 17:12:36 -08:00
|
|
|
|
enum { MAX_PORTS = USHRT_MAX };
|
2011-04-04 16:55:34 -07:00
|
|
|
|
|
2011-08-26 23:34:40 -07:00
|
|
|
|
/* This ethtool flag was introduced in Linux 2.6.24, so it might be
|
|
|
|
|
* missing if we have old headers. */
|
|
|
|
|
#define ETH_FLAG_LRO (1 << 15) /* LRO is enabled */
|
|
|
|
|
|
2017-06-13 18:03:34 +03:00
|
|
|
|
#define FLOW_DUMP_MAX_BATCH 50
|
2017-06-13 18:03:38 +03:00
|
|
|
|
#define OPERATE_MAX_OPS 50
|
2017-06-13 18:03:34 +03:00
|
|
|
|
|
dpif-netlink: don't allocate per thread netlink sockets
When using the kernel datapath, OVS allocates a pool of sockets to handle
netlink events. The number of sockets is: ports * n-handler-threads, where
n-handler-threads is user configurable and defaults to 3/4*number of cores.
This because vswitchd starts n-handler-threads threads, each one with a
netlink socket for every port of the switch. Every thread then, starts
listening on events on its set of sockets with epoll().
On setup with lot of CPUs and ports, the number of sockets easily hits
the process file descriptor limit, and ovs-vswitchd will exit with -EMFILE.
Change the number of allocated sockets to just one per port by moving
the socket array from a per handler structure to a per datapath one,
and let all the handlers share the same sockets by using EPOLLEXCLUSIVE
epoll flag which avoids duplicate events, on systems that support it.
The patch was tested on a 56 core machine running Linux 4.18 and latest
Open vSwitch. A bridge was created with 2000+ ports, some of them being
veth interfaces with the peer outside the bridge. The latency of the upcall
is measured by setting a single 'action=controller,local' OpenFlow rule to
force all the packets going to the slow path and then to the local port.
A tool[1] injects some packets to the veth outside the bridge, and measures
the delay until the packet is captured on the local port. The rx timestamp
is get from the socket ancillary data in the attribute SO_TIMESTAMPNS, to
avoid having the scheduler delay in the measured time.
The first test measures the average latency for an upcall generated from
a single port. To measure it 100k packets, one every msec, are sent to a
single port and the latencies are measured.
The second test is meant to check latency fairness among ports, namely if
latency is equal between ports or if some ports have lower priority.
The previous test is repeated for every port, the average of the average
latencies and the standard deviation between averages is measured.
The third test serves to measure responsiveness under load. Heavy traffic
is sent through all ports, latency and packet loss is measured
on a single idle port.
The fourth test is all about fairness. Heavy traffic is injected in all
ports but one, latency and packet loss is measured on the single idle port.
This is the test setup:
# nproc
56
# ovs-vsctl show |grep -c Port
2223
# ovs-ofctl dump-flows ovs_upc_br
cookie=0x0, duration=4.827s, table=0, n_packets=0, n_bytes=0, actions=CONTROLLER:65535,LOCAL
# uname -a
Linux fc28 4.18.7-200.fc28.x86_64 #1 SMP Mon Sep 10 15:44:45 UTC 2018 x86_64 x86_64 x86_64 GNU/Linux
And these are the results of the tests:
Stock OVS Patched
netlink sockets
in use by vswitchd
lsof -p $(pidof ovs-vswitchd) \
|grep -c GENERIC 91187 2227
Test 1
one port latency
min/avg/max/mdev (us) 2.7/6.6/238.7/1.8 1.6/6.8/160.6/1.7
Test 2
all port
avg latency/mdev (us) 6.51/0.97 6.86/0.17
Test 3
single port latency
under load
avg/mdev (us) 7.5/5.9 3.8/4.8
packet loss 95 % 62 %
Test 4
idle port latency
under load
min/avg/max/mdev (us) 0.8/1.5/210.5/0.9 1.0/2.1/344.5/1.2
packet loss 94 % 4 %
CPU and RAM usage seems not to be affected, the resource usage of vswitchd
idle with 2000+ ports is unchanged:
# ps u $(pidof ovs-vswitchd)
USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND
openvsw+ 5430 54.3 0.3 4263964 510968 pts/1 RLl+ 16:20 0:50 ovs-vswitchd
Additionally, to check if vswitchd is thread safe with this patch, the
following test was run for circa 48 hours: on a 56 core machine, a
bridge with kernel datapath is filled with 2200 dummy interfaces and 22
veth, then 22 traffic generators are run in parallel piping traffic into
the veths peers outside the bridge.
To generate as many upcalls as possible, all packets were forced to the
slowpath with an openflow rule like 'action=controller,local' and packet
size was set to 64 byte. Also, to avoid overflowing the FDB early and
slowing down the upcall processing, generated mac addresses were restricted
to a small interval. vswitchd ran without problems for 48+ hours,
obviously with all the handler threads with almost 99% CPU usage.
[1] https://github.com/teknoraver/network-tools/blob/master/weed.c
Signed-off-by: Matteo Croce <mcroce@redhat.com>
Signed-off-by: Ben Pfaff <blp@ovn.org>
Acked-by: Flavio Leitner <fbl@sysclose.org>
2018-09-25 10:51:05 +02:00
|
|
|
|
#ifndef EPOLLEXCLUSIVE
|
|
|
|
|
#define EPOLLEXCLUSIVE (1u << 28)
|
|
|
|
|
#endif
|
|
|
|
|
|
2014-09-18 04:17:54 -07:00
|
|
|
|
struct dpif_netlink_dp {
|
2011-01-28 13:55:04 -08:00
|
|
|
|
/* Generic Netlink header. */
|
|
|
|
|
uint8_t cmd;
|
2011-01-26 15:42:00 -08:00
|
|
|
|
|
2011-08-18 10:35:40 -07:00
|
|
|
|
/* struct ovs_header. */
|
2011-01-21 17:01:56 -08:00
|
|
|
|
int dp_ifindex;
|
2011-01-26 15:42:00 -08:00
|
|
|
|
|
|
|
|
|
/* Attributes. */
|
2011-08-18 10:35:40 -07:00
|
|
|
|
const char *name; /* OVS_DP_ATTR_NAME. */
|
2013-11-18 15:24:10 +01:00
|
|
|
|
const uint32_t *upcall_pid; /* OVS_DP_ATTR_UPCALL_PID. */
|
2013-12-19 16:20:42 +01:00
|
|
|
|
uint32_t user_features; /* OVS_DP_ATTR_USER_FEATURES */
|
2014-06-13 15:28:29 -07:00
|
|
|
|
const struct ovs_dp_stats *stats; /* OVS_DP_ATTR_STATS. */
|
|
|
|
|
const struct ovs_dp_megaflow_stats *megaflow_stats;
|
2013-10-21 14:37:34 -07:00
|
|
|
|
/* OVS_DP_ATTR_MEGAFLOW_STATS.*/
|
2011-01-26 15:42:00 -08:00
|
|
|
|
};
|
|
|
|
|
|
2014-09-18 04:17:54 -07:00
|
|
|
|
static void dpif_netlink_dp_init(struct dpif_netlink_dp *);
|
|
|
|
|
static int dpif_netlink_dp_from_ofpbuf(struct dpif_netlink_dp *,
|
|
|
|
|
const struct ofpbuf *);
|
|
|
|
|
static void dpif_netlink_dp_dump_start(struct nl_dump *);
|
|
|
|
|
static int dpif_netlink_dp_transact(const struct dpif_netlink_dp *request,
|
|
|
|
|
struct dpif_netlink_dp *reply,
|
|
|
|
|
struct ofpbuf **bufp);
|
|
|
|
|
static int dpif_netlink_dp_get(const struct dpif *,
|
|
|
|
|
struct dpif_netlink_dp *reply,
|
|
|
|
|
struct ofpbuf **bufp);
|
2019-12-22 12:16:40 +02:00
|
|
|
|
static int
|
|
|
|
|
dpif_netlink_set_features(struct dpif *dpif_, uint32_t new_features);
|
2014-09-18 04:17:54 -07:00
|
|
|
|
|
|
|
|
|
struct dpif_netlink_flow {
|
2011-01-28 14:00:51 -08:00
|
|
|
|
/* Generic Netlink header. */
|
|
|
|
|
uint8_t cmd;
|
2011-01-26 15:42:00 -08:00
|
|
|
|
|
2011-08-18 10:35:40 -07:00
|
|
|
|
/* struct ovs_header. */
|
2011-01-26 15:42:00 -08:00
|
|
|
|
unsigned int nlmsg_flags;
|
2011-01-21 17:01:56 -08:00
|
|
|
|
int dp_ifindex;
|
2011-01-26 15:42:00 -08:00
|
|
|
|
|
|
|
|
|
/* Attributes.
|
|
|
|
|
*
|
2011-10-04 15:25:14 -07:00
|
|
|
|
* The 'stats' member points to 64-bit data that might only be aligned on
|
|
|
|
|
* 32-bit boundaries, so get_unaligned_u64() should be used to access its
|
|
|
|
|
* values.
|
2011-01-31 15:46:03 -08:00
|
|
|
|
*
|
2011-08-18 10:35:40 -07:00
|
|
|
|
* If 'actions' is nonnull then OVS_FLOW_ATTR_ACTIONS will be included in
|
2011-01-31 15:46:03 -08:00
|
|
|
|
* the Netlink version of the command, even if actions_len is zero. */
|
2011-08-18 10:35:40 -07:00
|
|
|
|
const struct nlattr *key; /* OVS_FLOW_ATTR_KEY. */
|
2011-01-26 15:42:00 -08:00
|
|
|
|
size_t key_len;
|
2013-06-19 07:15:10 +00:00
|
|
|
|
const struct nlattr *mask; /* OVS_FLOW_ATTR_MASK. */
|
|
|
|
|
size_t mask_len;
|
2011-08-18 10:35:40 -07:00
|
|
|
|
const struct nlattr *actions; /* OVS_FLOW_ATTR_ACTIONS. */
|
2011-01-26 15:42:00 -08:00
|
|
|
|
size_t actions_len;
|
2014-09-24 16:26:35 +12:00
|
|
|
|
ovs_u128 ufid; /* OVS_FLOW_ATTR_FLOW_ID. */
|
|
|
|
|
bool ufid_present; /* Is there a UFID? */
|
|
|
|
|
bool ufid_terse; /* Skip serializing key/mask/acts? */
|
2011-08-18 10:35:40 -07:00
|
|
|
|
const struct ovs_flow_stats *stats; /* OVS_FLOW_ATTR_STATS. */
|
|
|
|
|
const uint8_t *tcp_flags; /* OVS_FLOW_ATTR_TCP_FLAGS. */
|
2011-10-04 15:25:14 -07:00
|
|
|
|
const ovs_32aligned_u64 *used; /* OVS_FLOW_ATTR_USED. */
|
2011-08-18 10:35:40 -07:00
|
|
|
|
bool clear; /* OVS_FLOW_ATTR_CLEAR. */
|
2014-09-12 11:20:13 -07:00
|
|
|
|
bool probe; /* OVS_FLOW_ATTR_PROBE. */
|
2011-01-26 15:42:00 -08:00
|
|
|
|
};
|
|
|
|
|
|
2014-09-18 04:17:54 -07:00
|
|
|
|
static void dpif_netlink_flow_init(struct dpif_netlink_flow *);
|
|
|
|
|
static int dpif_netlink_flow_from_ofpbuf(struct dpif_netlink_flow *,
|
|
|
|
|
const struct ofpbuf *);
|
|
|
|
|
static void dpif_netlink_flow_to_ofpbuf(const struct dpif_netlink_flow *,
|
|
|
|
|
struct ofpbuf *);
|
|
|
|
|
static int dpif_netlink_flow_transact(struct dpif_netlink_flow *request,
|
|
|
|
|
struct dpif_netlink_flow *reply,
|
|
|
|
|
struct ofpbuf **bufp);
|
|
|
|
|
static void dpif_netlink_flow_get_stats(const struct dpif_netlink_flow *,
|
|
|
|
|
struct dpif_flow_stats *);
|
2019-12-08 18:09:53 +01:00
|
|
|
|
static void dpif_netlink_flow_to_dpif_flow(struct dpif_flow *,
|
2014-09-18 04:17:54 -07:00
|
|
|
|
const struct dpif_netlink_flow *);
|
2011-01-26 15:42:00 -08:00
|
|
|
|
|
2013-01-04 18:34:26 -08:00
|
|
|
|
/* One of the dpif channels between the kernel and userspace. */
|
2012-05-23 16:55:09 -07:00
|
|
|
|
struct dpif_channel {
|
2012-06-01 17:40:31 -04:00
|
|
|
|
struct nl_sock *sock; /* Netlink socket. */
|
|
|
|
|
long long int last_poll; /* Last time this channel was polled. */
|
2012-05-23 16:55:09 -07:00
|
|
|
|
};
|
|
|
|
|
|
2014-10-23 08:27:34 -07:00
|
|
|
|
#ifdef _WIN32
|
|
|
|
|
#define VPORT_SOCK_POOL_SIZE 1
|
|
|
|
|
/* On Windows, there is no native support for epoll. There are equivalent
|
|
|
|
|
* interfaces though, that are not used currently. For simpicity, a pool of
|
|
|
|
|
* netlink sockets is used. Each socket is represented by 'struct
|
|
|
|
|
* dpif_windows_vport_sock'. Since it is a pool, multiple OVS ports may be
|
|
|
|
|
* sharing the same socket. In the future, we can add a reference count and
|
|
|
|
|
* such fields. */
|
|
|
|
|
struct dpif_windows_vport_sock {
|
|
|
|
|
struct nl_sock *nl_sock; /* netlink socket. */
|
|
|
|
|
};
|
|
|
|
|
#endif
|
|
|
|
|
|
2014-02-26 10:10:29 -08:00
|
|
|
|
struct dpif_handler {
|
|
|
|
|
struct epoll_event *epoll_events;
|
|
|
|
|
int epoll_fd; /* epoll fd that includes channel socks. */
|
|
|
|
|
int n_events; /* Num events returned by epoll_wait(). */
|
|
|
|
|
int event_offset; /* Offset into 'epoll_events'. */
|
2014-10-23 08:27:34 -07:00
|
|
|
|
|
|
|
|
|
#ifdef _WIN32
|
|
|
|
|
/* Pool of sockets. */
|
|
|
|
|
struct dpif_windows_vport_sock *vport_sock_pool;
|
|
|
|
|
size_t last_used_pool_idx; /* Index to aid in allocating a
|
|
|
|
|
socket in the pool to a port. */
|
|
|
|
|
#endif
|
2014-02-26 10:10:29 -08:00
|
|
|
|
};
|
2012-06-01 17:40:31 -04:00
|
|
|
|
|
2009-06-17 14:35:35 -07:00
|
|
|
|
/* Datapath interface for the openvswitch Linux kernel module. */
|
2014-09-18 04:17:54 -07:00
|
|
|
|
struct dpif_netlink {
|
2009-06-17 14:35:35 -07:00
|
|
|
|
struct dpif dpif;
|
2011-01-21 17:01:56 -08:00
|
|
|
|
int dp_ifindex;
|
2019-12-22 12:16:38 +02:00
|
|
|
|
uint32_t user_features;
|
2009-06-24 10:24:09 -07:00
|
|
|
|
|
2011-09-14 13:05:09 -07:00
|
|
|
|
/* Upcall messages. */
|
2014-02-26 10:10:29 -08:00
|
|
|
|
struct fat_rwlock upcall_lock;
|
|
|
|
|
struct dpif_handler *handlers;
|
|
|
|
|
uint32_t n_handlers; /* Num of upcall handlers. */
|
dpif-netlink: don't allocate per thread netlink sockets
When using the kernel datapath, OVS allocates a pool of sockets to handle
netlink events. The number of sockets is: ports * n-handler-threads, where
n-handler-threads is user configurable and defaults to 3/4*number of cores.
This because vswitchd starts n-handler-threads threads, each one with a
netlink socket for every port of the switch. Every thread then, starts
listening on events on its set of sockets with epoll().
On setup with lot of CPUs and ports, the number of sockets easily hits
the process file descriptor limit, and ovs-vswitchd will exit with -EMFILE.
Change the number of allocated sockets to just one per port by moving
the socket array from a per handler structure to a per datapath one,
and let all the handlers share the same sockets by using EPOLLEXCLUSIVE
epoll flag which avoids duplicate events, on systems that support it.
The patch was tested on a 56 core machine running Linux 4.18 and latest
Open vSwitch. A bridge was created with 2000+ ports, some of them being
veth interfaces with the peer outside the bridge. The latency of the upcall
is measured by setting a single 'action=controller,local' OpenFlow rule to
force all the packets going to the slow path and then to the local port.
A tool[1] injects some packets to the veth outside the bridge, and measures
the delay until the packet is captured on the local port. The rx timestamp
is get from the socket ancillary data in the attribute SO_TIMESTAMPNS, to
avoid having the scheduler delay in the measured time.
The first test measures the average latency for an upcall generated from
a single port. To measure it 100k packets, one every msec, are sent to a
single port and the latencies are measured.
The second test is meant to check latency fairness among ports, namely if
latency is equal between ports or if some ports have lower priority.
The previous test is repeated for every port, the average of the average
latencies and the standard deviation between averages is measured.
The third test serves to measure responsiveness under load. Heavy traffic
is sent through all ports, latency and packet loss is measured
on a single idle port.
The fourth test is all about fairness. Heavy traffic is injected in all
ports but one, latency and packet loss is measured on the single idle port.
This is the test setup:
# nproc
56
# ovs-vsctl show |grep -c Port
2223
# ovs-ofctl dump-flows ovs_upc_br
cookie=0x0, duration=4.827s, table=0, n_packets=0, n_bytes=0, actions=CONTROLLER:65535,LOCAL
# uname -a
Linux fc28 4.18.7-200.fc28.x86_64 #1 SMP Mon Sep 10 15:44:45 UTC 2018 x86_64 x86_64 x86_64 GNU/Linux
And these are the results of the tests:
Stock OVS Patched
netlink sockets
in use by vswitchd
lsof -p $(pidof ovs-vswitchd) \
|grep -c GENERIC 91187 2227
Test 1
one port latency
min/avg/max/mdev (us) 2.7/6.6/238.7/1.8 1.6/6.8/160.6/1.7
Test 2
all port
avg latency/mdev (us) 6.51/0.97 6.86/0.17
Test 3
single port latency
under load
avg/mdev (us) 7.5/5.9 3.8/4.8
packet loss 95 % 62 %
Test 4
idle port latency
under load
min/avg/max/mdev (us) 0.8/1.5/210.5/0.9 1.0/2.1/344.5/1.2
packet loss 94 % 4 %
CPU and RAM usage seems not to be affected, the resource usage of vswitchd
idle with 2000+ ports is unchanged:
# ps u $(pidof ovs-vswitchd)
USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND
openvsw+ 5430 54.3 0.3 4263964 510968 pts/1 RLl+ 16:20 0:50 ovs-vswitchd
Additionally, to check if vswitchd is thread safe with this patch, the
following test was run for circa 48 hours: on a 56 core machine, a
bridge with kernel datapath is filled with 2200 dummy interfaces and 22
veth, then 22 traffic generators are run in parallel piping traffic into
the veths peers outside the bridge.
To generate as many upcalls as possible, all packets were forced to the
slowpath with an openflow rule like 'action=controller,local' and packet
size was set to 64 byte. Also, to avoid overflowing the FDB early and
slowing down the upcall processing, generated mac addresses were restricted
to a small interval. vswitchd ran without problems for 48+ hours,
obviously with all the handler threads with almost 99% CPU usage.
[1] https://github.com/teknoraver/network-tools/blob/master/weed.c
Signed-off-by: Matteo Croce <mcroce@redhat.com>
Signed-off-by: Ben Pfaff <blp@ovn.org>
Acked-by: Flavio Leitner <fbl@sysclose.org>
2018-09-25 10:51:05 +02:00
|
|
|
|
struct dpif_channel *channels; /* Array of channels for each port. */
|
2014-02-26 10:10:29 -08:00
|
|
|
|
int uc_array_size; /* Size of 'handler->channels' and */
|
|
|
|
|
/* 'handler->epoll_events'. */
|
2011-01-26 13:41:54 -08:00
|
|
|
|
|
2009-06-24 10:24:09 -07:00
|
|
|
|
/* Change notification. */
|
2013-07-22 15:00:49 -07:00
|
|
|
|
struct nl_sock *port_notifier; /* vport multicast group subscriber. */
|
2013-05-01 17:13:14 -07:00
|
|
|
|
bool refresh_channels;
|
2009-06-17 14:35:35 -07:00
|
|
|
|
};
|
|
|
|
|
|
2014-09-18 04:17:54 -07:00
|
|
|
|
static void report_loss(struct dpif_netlink *, struct dpif_channel *,
|
2014-04-17 16:33:17 -07:00
|
|
|
|
uint32_t ch_idx, uint32_t handler_id);
|
2014-02-26 10:10:29 -08:00
|
|
|
|
|
2009-06-17 14:35:35 -07:00
|
|
|
|
static struct vlog_rate_limit error_rl = VLOG_RATE_LIMIT_INIT(9999, 5);
|
|
|
|
|
|
2013-07-22 15:00:49 -07:00
|
|
|
|
/* Generic Netlink family numbers for OVS.
|
|
|
|
|
*
|
2014-09-18 04:17:54 -07:00
|
|
|
|
* Initialized by dpif_netlink_init(). */
|
2011-08-18 10:35:40 -07:00
|
|
|
|
static int ovs_datapath_family;
|
|
|
|
|
static int ovs_vport_family;
|
|
|
|
|
static int ovs_flow_family;
|
|
|
|
|
static int ovs_packet_family;
|
2017-11-17 02:15:47 -08:00
|
|
|
|
static int ovs_meter_family;
|
2018-08-17 02:05:09 -07:00
|
|
|
|
static int ovs_ct_limit_family;
|
2011-01-26 13:41:54 -08:00
|
|
|
|
|
2013-07-22 15:00:49 -07:00
|
|
|
|
/* Generic Netlink multicast groups for OVS.
|
|
|
|
|
*
|
2014-09-18 04:17:54 -07:00
|
|
|
|
* Initialized by dpif_netlink_init(). */
|
2013-07-22 15:00:49 -07:00
|
|
|
|
static unsigned int ovs_vport_mcgroup;
|
2011-01-26 13:41:54 -08:00
|
|
|
|
|
2017-05-18 16:10:33 -04:00
|
|
|
|
/* If true, tunnel devices are created using OVS compat/genetlink.
|
|
|
|
|
* If false, tunnel devices are created with rtnetlink and using light weight
|
|
|
|
|
* tunnels. If we fail to create the tunnel the rtnetlink+LWT, then we fallback
|
|
|
|
|
* to using the compat interface. */
|
|
|
|
|
static bool ovs_tunnels_out_of_tree = true;
|
|
|
|
|
|
2014-09-18 04:17:54 -07:00
|
|
|
|
static int dpif_netlink_init(void);
|
|
|
|
|
static int open_dpif(const struct dpif_netlink_dp *, struct dpif **);
|
|
|
|
|
static uint32_t dpif_netlink_port_get_pid(const struct dpif *,
|
2018-09-25 15:14:13 -07:00
|
|
|
|
odp_port_t port_no);
|
2014-10-23 08:27:34 -07:00
|
|
|
|
static void dpif_netlink_handler_uninit(struct dpif_handler *handler);
|
2014-09-18 04:17:54 -07:00
|
|
|
|
static int dpif_netlink_refresh_channels(struct dpif_netlink *,
|
|
|
|
|
uint32_t n_handlers);
|
|
|
|
|
static void dpif_netlink_vport_to_ofpbuf(const struct dpif_netlink_vport *,
|
|
|
|
|
struct ofpbuf *);
|
|
|
|
|
static int dpif_netlink_vport_from_ofpbuf(struct dpif_netlink_vport *,
|
|
|
|
|
const struct ofpbuf *);
|
2017-05-18 16:10:33 -04:00
|
|
|
|
static int dpif_netlink_port_query__(const struct dpif_netlink *dpif,
|
|
|
|
|
odp_port_t port_no, const char *port_name,
|
|
|
|
|
struct dpif_port *dpif_port);
|
2011-01-28 13:59:03 -08:00
|
|
|
|
|
2018-11-16 15:32:58 +02:00
|
|
|
|
static int
|
2019-10-14 11:10:47 -07:00
|
|
|
|
create_nl_sock(struct dpif_netlink *dpif OVS_UNUSED, struct nl_sock **sockp)
|
2018-11-16 15:32:58 +02:00
|
|
|
|
OVS_REQ_WRLOCK(dpif->upcall_lock)
|
|
|
|
|
{
|
|
|
|
|
#ifndef _WIN32
|
2019-10-14 11:10:47 -07:00
|
|
|
|
return nl_sock_create(NETLINK_GENERIC, sockp);
|
2018-11-16 15:32:58 +02:00
|
|
|
|
#else
|
|
|
|
|
/* Pick netlink sockets to use in a round-robin fashion from each
|
|
|
|
|
* handler's pool of sockets. */
|
|
|
|
|
struct dpif_handler *handler = &dpif->handlers[0];
|
|
|
|
|
struct dpif_windows_vport_sock *sock_pool = handler->vport_sock_pool;
|
|
|
|
|
size_t index = handler->last_used_pool_idx;
|
|
|
|
|
|
|
|
|
|
/* A pool of sockets is allocated when the handler is initialized. */
|
|
|
|
|
if (sock_pool == NULL) {
|
2019-10-14 11:10:47 -07:00
|
|
|
|
*sockp = NULL;
|
2018-11-16 15:32:58 +02:00
|
|
|
|
return EINVAL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ovs_assert(index < VPORT_SOCK_POOL_SIZE);
|
2019-10-14 11:10:47 -07:00
|
|
|
|
*sockp = sock_pool[index].nl_sock;
|
|
|
|
|
ovs_assert(*sockp);
|
2018-11-16 15:32:58 +02:00
|
|
|
|
index = (index == VPORT_SOCK_POOL_SIZE - 1) ? 0 : index + 1;
|
|
|
|
|
handler->last_used_pool_idx = index;
|
|
|
|
|
return 0;
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
2019-10-14 11:10:47 -07:00
|
|
|
|
close_nl_sock(struct nl_sock *sock)
|
2018-11-16 15:32:58 +02:00
|
|
|
|
{
|
|
|
|
|
#ifndef _WIN32
|
2019-10-14 11:10:47 -07:00
|
|
|
|
nl_sock_destroy(sock);
|
2018-11-16 15:32:58 +02:00
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
2014-09-18 04:17:54 -07:00
|
|
|
|
static struct dpif_netlink *
|
|
|
|
|
dpif_netlink_cast(const struct dpif *dpif)
|
2009-06-17 14:35:35 -07:00
|
|
|
|
{
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_assert_class(dpif, &dpif_netlink_class);
|
|
|
|
|
return CONTAINER_OF(dpif, struct dpif_netlink, dpif);
|
2009-06-17 14:35:35 -07:00
|
|
|
|
}
|
|
|
|
|
|
2009-07-06 11:06:36 -07:00
|
|
|
|
static int
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_enumerate(struct sset *all_dps,
|
|
|
|
|
const struct dpif_class *dpif_class OVS_UNUSED)
|
2009-07-06 11:06:36 -07:00
|
|
|
|
{
|
2011-01-28 13:55:04 -08:00
|
|
|
|
struct nl_dump dump;
|
2014-02-27 14:13:05 -08:00
|
|
|
|
uint64_t reply_stub[NL_DUMP_BUFSIZE / 8];
|
|
|
|
|
struct ofpbuf msg, buf;
|
2011-01-28 13:55:04 -08:00
|
|
|
|
int error;
|
2011-01-26 13:41:54 -08:00
|
|
|
|
|
2014-09-18 04:17:54 -07:00
|
|
|
|
error = dpif_netlink_init();
|
2011-01-28 13:55:04 -08:00
|
|
|
|
if (error) {
|
|
|
|
|
return error;
|
2011-01-26 13:41:54 -08:00
|
|
|
|
}
|
2009-07-06 11:06:36 -07:00
|
|
|
|
|
2014-02-27 14:13:05 -08:00
|
|
|
|
ofpbuf_use_stub(&buf, reply_stub, sizeof reply_stub);
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_dp_dump_start(&dump);
|
2014-02-27 14:13:05 -08:00
|
|
|
|
while (nl_dump_next(&dump, &msg, &buf)) {
|
2014-09-18 04:17:54 -07:00
|
|
|
|
struct dpif_netlink_dp dp;
|
2011-01-26 15:42:00 -08:00
|
|
|
|
|
2014-09-18 04:17:54 -07:00
|
|
|
|
if (!dpif_netlink_dp_from_ofpbuf(&dp, &msg)) {
|
2011-03-25 13:00:13 -07:00
|
|
|
|
sset_add(all_dps, dp.name);
|
2009-07-06 11:06:36 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
2014-02-27 14:13:05 -08:00
|
|
|
|
ofpbuf_uninit(&buf);
|
2011-01-28 13:55:04 -08:00
|
|
|
|
return nl_dump_done(&dump);
|
2009-07-06 11:06:36 -07:00
|
|
|
|
}
|
|
|
|
|
|
2009-06-17 14:35:35 -07:00
|
|
|
|
static int
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_open(const struct dpif_class *class OVS_UNUSED, const char *name,
|
|
|
|
|
bool create, struct dpif **dpifp)
|
2009-06-17 14:35:35 -07:00
|
|
|
|
{
|
2014-09-18 04:17:54 -07:00
|
|
|
|
struct dpif_netlink_dp dp_request, dp;
|
2011-01-26 12:28:59 -08:00
|
|
|
|
struct ofpbuf *buf;
|
2011-10-12 11:04:10 -07:00
|
|
|
|
uint32_t upcall_pid;
|
2011-01-26 12:28:59 -08:00
|
|
|
|
int error;
|
2009-06-17 14:35:35 -07:00
|
|
|
|
|
2014-09-18 04:17:54 -07:00
|
|
|
|
error = dpif_netlink_init();
|
2011-01-26 13:41:54 -08:00
|
|
|
|
if (error) {
|
|
|
|
|
return error;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Create or look up datapath. */
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_dp_init(&dp_request);
|
2019-12-22 12:16:38 +02:00
|
|
|
|
upcall_pid = 0;
|
|
|
|
|
dp_request.upcall_pid = &upcall_pid;
|
|
|
|
|
dp_request.name = name;
|
|
|
|
|
|
2011-10-12 11:04:10 -07:00
|
|
|
|
if (create) {
|
|
|
|
|
dp_request.cmd = OVS_DP_CMD_NEW;
|
|
|
|
|
} else {
|
2019-12-22 12:16:38 +02:00
|
|
|
|
dp_request.cmd = OVS_DP_CMD_GET;
|
|
|
|
|
|
|
|
|
|
error = dpif_netlink_dp_transact(&dp_request, &dp, &buf);
|
|
|
|
|
if (error) {
|
|
|
|
|
return error;
|
|
|
|
|
}
|
|
|
|
|
dp_request.user_features = dp.user_features;
|
|
|
|
|
ofpbuf_delete(buf);
|
|
|
|
|
|
2013-12-19 16:20:42 +01:00
|
|
|
|
/* Use OVS_DP_CMD_SET to report user features */
|
|
|
|
|
dp_request.cmd = OVS_DP_CMD_SET;
|
2011-10-12 11:04:10 -07:00
|
|
|
|
}
|
2019-12-22 12:16:38 +02:00
|
|
|
|
|
2013-12-19 16:20:42 +01:00
|
|
|
|
dp_request.user_features |= OVS_DP_F_UNALIGNED;
|
2014-02-26 10:10:29 -08:00
|
|
|
|
dp_request.user_features |= OVS_DP_F_VPORT_PIDS;
|
2014-09-18 04:17:54 -07:00
|
|
|
|
error = dpif_netlink_dp_transact(&dp_request, &dp, &buf);
|
2011-01-26 13:41:54 -08:00
|
|
|
|
if (error) {
|
|
|
|
|
return error;
|
2011-01-26 12:28:59 -08:00
|
|
|
|
}
|
2011-01-21 17:01:56 -08:00
|
|
|
|
|
2013-07-22 15:00:49 -07:00
|
|
|
|
error = open_dpif(&dp, dpifp);
|
2019-12-22 12:16:40 +02:00
|
|
|
|
dpif_netlink_set_features(*dpifp, OVS_DP_F_TC_RECIRC_SHARING);
|
2011-09-14 11:26:21 -07:00
|
|
|
|
ofpbuf_delete(buf);
|
2019-12-22 12:16:40 +02:00
|
|
|
|
|
2013-07-22 15:00:49 -07:00
|
|
|
|
return error;
|
2011-01-26 12:28:59 -08:00
|
|
|
|
}
|
|
|
|
|
|
2013-07-22 15:00:49 -07:00
|
|
|
|
static int
|
2014-09-18 04:17:54 -07:00
|
|
|
|
open_dpif(const struct dpif_netlink_dp *dp, struct dpif **dpifp)
|
2011-01-26 12:28:59 -08:00
|
|
|
|
{
|
2014-09-18 04:17:54 -07:00
|
|
|
|
struct dpif_netlink *dpif;
|
2011-01-26 12:28:59 -08:00
|
|
|
|
|
2011-09-16 15:23:37 -07:00
|
|
|
|
dpif = xzalloc(sizeof *dpif);
|
2013-07-22 15:00:49 -07:00
|
|
|
|
dpif->port_notifier = NULL;
|
2014-02-26 10:10:29 -08:00
|
|
|
|
fat_rwlock_init(&dpif->upcall_lock);
|
2011-01-26 12:28:59 -08:00
|
|
|
|
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_init(&dpif->dpif, &dpif_netlink_class, dp->name,
|
2011-01-21 17:01:56 -08:00
|
|
|
|
dp->dp_ifindex, dp->dp_ifindex);
|
2011-01-26 12:28:59 -08:00
|
|
|
|
|
2011-01-21 17:01:56 -08:00
|
|
|
|
dpif->dp_ifindex = dp->dp_ifindex;
|
2019-12-22 12:16:38 +02:00
|
|
|
|
dpif->user_features = dp->user_features;
|
2011-01-26 12:28:59 -08:00
|
|
|
|
*dpifp = &dpif->dpif;
|
2013-07-22 15:00:49 -07:00
|
|
|
|
|
|
|
|
|
return 0;
|
2009-06-17 14:35:35 -07:00
|
|
|
|
}
|
|
|
|
|
|
2014-10-23 08:27:34 -07:00
|
|
|
|
#ifdef _WIN32
|
|
|
|
|
static void
|
|
|
|
|
vport_delete_sock_pool(struct dpif_handler *handler)
|
|
|
|
|
OVS_REQ_WRLOCK(dpif->upcall_lock)
|
|
|
|
|
{
|
|
|
|
|
if (handler->vport_sock_pool) {
|
|
|
|
|
uint32_t i;
|
|
|
|
|
struct dpif_windows_vport_sock *sock_pool =
|
|
|
|
|
handler->vport_sock_pool;
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < VPORT_SOCK_POOL_SIZE; i++) {
|
|
|
|
|
if (sock_pool[i].nl_sock) {
|
|
|
|
|
nl_sock_unsubscribe_packets(sock_pool[i].nl_sock);
|
|
|
|
|
nl_sock_destroy(sock_pool[i].nl_sock);
|
|
|
|
|
sock_pool[i].nl_sock = NULL;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
free(handler->vport_sock_pool);
|
|
|
|
|
handler->vport_sock_pool = NULL;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
vport_create_sock_pool(struct dpif_handler *handler)
|
|
|
|
|
OVS_REQ_WRLOCK(dpif->upcall_lock)
|
|
|
|
|
{
|
|
|
|
|
struct dpif_windows_vport_sock *sock_pool;
|
|
|
|
|
size_t i;
|
|
|
|
|
int error = 0;
|
|
|
|
|
|
|
|
|
|
sock_pool = xzalloc(VPORT_SOCK_POOL_SIZE * sizeof *sock_pool);
|
|
|
|
|
for (i = 0; i < VPORT_SOCK_POOL_SIZE; i++) {
|
|
|
|
|
error = nl_sock_create(NETLINK_GENERIC, &sock_pool[i].nl_sock);
|
|
|
|
|
if (error) {
|
|
|
|
|
goto error;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Enable the netlink socket to receive packets. This is equivalent to
|
|
|
|
|
* calling nl_sock_join_mcgroup() to receive events. */
|
|
|
|
|
error = nl_sock_subscribe_packets(sock_pool[i].nl_sock);
|
|
|
|
|
if (error) {
|
|
|
|
|
goto error;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
handler->vport_sock_pool = sock_pool;
|
|
|
|
|
handler->last_used_pool_idx = 0;
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
error:
|
|
|
|
|
vport_delete_sock_pool(handler);
|
|
|
|
|
return error;
|
|
|
|
|
}
|
|
|
|
|
#endif /* _WIN32 */
|
|
|
|
|
|
dpif-netlink: don't allocate per thread netlink sockets
When using the kernel datapath, OVS allocates a pool of sockets to handle
netlink events. The number of sockets is: ports * n-handler-threads, where
n-handler-threads is user configurable and defaults to 3/4*number of cores.
This because vswitchd starts n-handler-threads threads, each one with a
netlink socket for every port of the switch. Every thread then, starts
listening on events on its set of sockets with epoll().
On setup with lot of CPUs and ports, the number of sockets easily hits
the process file descriptor limit, and ovs-vswitchd will exit with -EMFILE.
Change the number of allocated sockets to just one per port by moving
the socket array from a per handler structure to a per datapath one,
and let all the handlers share the same sockets by using EPOLLEXCLUSIVE
epoll flag which avoids duplicate events, on systems that support it.
The patch was tested on a 56 core machine running Linux 4.18 and latest
Open vSwitch. A bridge was created with 2000+ ports, some of them being
veth interfaces with the peer outside the bridge. The latency of the upcall
is measured by setting a single 'action=controller,local' OpenFlow rule to
force all the packets going to the slow path and then to the local port.
A tool[1] injects some packets to the veth outside the bridge, and measures
the delay until the packet is captured on the local port. The rx timestamp
is get from the socket ancillary data in the attribute SO_TIMESTAMPNS, to
avoid having the scheduler delay in the measured time.
The first test measures the average latency for an upcall generated from
a single port. To measure it 100k packets, one every msec, are sent to a
single port and the latencies are measured.
The second test is meant to check latency fairness among ports, namely if
latency is equal between ports or if some ports have lower priority.
The previous test is repeated for every port, the average of the average
latencies and the standard deviation between averages is measured.
The third test serves to measure responsiveness under load. Heavy traffic
is sent through all ports, latency and packet loss is measured
on a single idle port.
The fourth test is all about fairness. Heavy traffic is injected in all
ports but one, latency and packet loss is measured on the single idle port.
This is the test setup:
# nproc
56
# ovs-vsctl show |grep -c Port
2223
# ovs-ofctl dump-flows ovs_upc_br
cookie=0x0, duration=4.827s, table=0, n_packets=0, n_bytes=0, actions=CONTROLLER:65535,LOCAL
# uname -a
Linux fc28 4.18.7-200.fc28.x86_64 #1 SMP Mon Sep 10 15:44:45 UTC 2018 x86_64 x86_64 x86_64 GNU/Linux
And these are the results of the tests:
Stock OVS Patched
netlink sockets
in use by vswitchd
lsof -p $(pidof ovs-vswitchd) \
|grep -c GENERIC 91187 2227
Test 1
one port latency
min/avg/max/mdev (us) 2.7/6.6/238.7/1.8 1.6/6.8/160.6/1.7
Test 2
all port
avg latency/mdev (us) 6.51/0.97 6.86/0.17
Test 3
single port latency
under load
avg/mdev (us) 7.5/5.9 3.8/4.8
packet loss 95 % 62 %
Test 4
idle port latency
under load
min/avg/max/mdev (us) 0.8/1.5/210.5/0.9 1.0/2.1/344.5/1.2
packet loss 94 % 4 %
CPU and RAM usage seems not to be affected, the resource usage of vswitchd
idle with 2000+ ports is unchanged:
# ps u $(pidof ovs-vswitchd)
USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND
openvsw+ 5430 54.3 0.3 4263964 510968 pts/1 RLl+ 16:20 0:50 ovs-vswitchd
Additionally, to check if vswitchd is thread safe with this patch, the
following test was run for circa 48 hours: on a 56 core machine, a
bridge with kernel datapath is filled with 2200 dummy interfaces and 22
veth, then 22 traffic generators are run in parallel piping traffic into
the veths peers outside the bridge.
To generate as many upcalls as possible, all packets were forced to the
slowpath with an openflow rule like 'action=controller,local' and packet
size was set to 64 byte. Also, to avoid overflowing the FDB early and
slowing down the upcall processing, generated mac addresses were restricted
to a small interval. vswitchd ran without problems for 48+ hours,
obviously with all the handler threads with almost 99% CPU usage.
[1] https://github.com/teknoraver/network-tools/blob/master/weed.c
Signed-off-by: Matteo Croce <mcroce@redhat.com>
Signed-off-by: Ben Pfaff <blp@ovn.org>
Acked-by: Flavio Leitner <fbl@sysclose.org>
2018-09-25 10:51:05 +02:00
|
|
|
|
/* Given the port number 'port_idx', extracts the pid of netlink socket
|
|
|
|
|
* associated to the port and assigns it to 'upcall_pid'. */
|
2014-02-26 10:10:29 -08:00
|
|
|
|
static bool
|
dpif-netlink: don't allocate per thread netlink sockets
When using the kernel datapath, OVS allocates a pool of sockets to handle
netlink events. The number of sockets is: ports * n-handler-threads, where
n-handler-threads is user configurable and defaults to 3/4*number of cores.
This because vswitchd starts n-handler-threads threads, each one with a
netlink socket for every port of the switch. Every thread then, starts
listening on events on its set of sockets with epoll().
On setup with lot of CPUs and ports, the number of sockets easily hits
the process file descriptor limit, and ovs-vswitchd will exit with -EMFILE.
Change the number of allocated sockets to just one per port by moving
the socket array from a per handler structure to a per datapath one,
and let all the handlers share the same sockets by using EPOLLEXCLUSIVE
epoll flag which avoids duplicate events, on systems that support it.
The patch was tested on a 56 core machine running Linux 4.18 and latest
Open vSwitch. A bridge was created with 2000+ ports, some of them being
veth interfaces with the peer outside the bridge. The latency of the upcall
is measured by setting a single 'action=controller,local' OpenFlow rule to
force all the packets going to the slow path and then to the local port.
A tool[1] injects some packets to the veth outside the bridge, and measures
the delay until the packet is captured on the local port. The rx timestamp
is get from the socket ancillary data in the attribute SO_TIMESTAMPNS, to
avoid having the scheduler delay in the measured time.
The first test measures the average latency for an upcall generated from
a single port. To measure it 100k packets, one every msec, are sent to a
single port and the latencies are measured.
The second test is meant to check latency fairness among ports, namely if
latency is equal between ports or if some ports have lower priority.
The previous test is repeated for every port, the average of the average
latencies and the standard deviation between averages is measured.
The third test serves to measure responsiveness under load. Heavy traffic
is sent through all ports, latency and packet loss is measured
on a single idle port.
The fourth test is all about fairness. Heavy traffic is injected in all
ports but one, latency and packet loss is measured on the single idle port.
This is the test setup:
# nproc
56
# ovs-vsctl show |grep -c Port
2223
# ovs-ofctl dump-flows ovs_upc_br
cookie=0x0, duration=4.827s, table=0, n_packets=0, n_bytes=0, actions=CONTROLLER:65535,LOCAL
# uname -a
Linux fc28 4.18.7-200.fc28.x86_64 #1 SMP Mon Sep 10 15:44:45 UTC 2018 x86_64 x86_64 x86_64 GNU/Linux
And these are the results of the tests:
Stock OVS Patched
netlink sockets
in use by vswitchd
lsof -p $(pidof ovs-vswitchd) \
|grep -c GENERIC 91187 2227
Test 1
one port latency
min/avg/max/mdev (us) 2.7/6.6/238.7/1.8 1.6/6.8/160.6/1.7
Test 2
all port
avg latency/mdev (us) 6.51/0.97 6.86/0.17
Test 3
single port latency
under load
avg/mdev (us) 7.5/5.9 3.8/4.8
packet loss 95 % 62 %
Test 4
idle port latency
under load
min/avg/max/mdev (us) 0.8/1.5/210.5/0.9 1.0/2.1/344.5/1.2
packet loss 94 % 4 %
CPU and RAM usage seems not to be affected, the resource usage of vswitchd
idle with 2000+ ports is unchanged:
# ps u $(pidof ovs-vswitchd)
USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND
openvsw+ 5430 54.3 0.3 4263964 510968 pts/1 RLl+ 16:20 0:50 ovs-vswitchd
Additionally, to check if vswitchd is thread safe with this patch, the
following test was run for circa 48 hours: on a 56 core machine, a
bridge with kernel datapath is filled with 2200 dummy interfaces and 22
veth, then 22 traffic generators are run in parallel piping traffic into
the veths peers outside the bridge.
To generate as many upcalls as possible, all packets were forced to the
slowpath with an openflow rule like 'action=controller,local' and packet
size was set to 64 byte. Also, to avoid overflowing the FDB early and
slowing down the upcall processing, generated mac addresses were restricted
to a small interval. vswitchd ran without problems for 48+ hours,
obviously with all the handler threads with almost 99% CPU usage.
[1] https://github.com/teknoraver/network-tools/blob/master/weed.c
Signed-off-by: Matteo Croce <mcroce@redhat.com>
Signed-off-by: Ben Pfaff <blp@ovn.org>
Acked-by: Flavio Leitner <fbl@sysclose.org>
2018-09-25 10:51:05 +02:00
|
|
|
|
vport_get_pid(struct dpif_netlink *dpif, uint32_t port_idx,
|
|
|
|
|
uint32_t *upcall_pid)
|
2014-02-26 10:10:29 -08:00
|
|
|
|
{
|
|
|
|
|
/* Since the nl_sock can only be assigned in either all
|
dpif-netlink: don't allocate per thread netlink sockets
When using the kernel datapath, OVS allocates a pool of sockets to handle
netlink events. The number of sockets is: ports * n-handler-threads, where
n-handler-threads is user configurable and defaults to 3/4*number of cores.
This because vswitchd starts n-handler-threads threads, each one with a
netlink socket for every port of the switch. Every thread then, starts
listening on events on its set of sockets with epoll().
On setup with lot of CPUs and ports, the number of sockets easily hits
the process file descriptor limit, and ovs-vswitchd will exit with -EMFILE.
Change the number of allocated sockets to just one per port by moving
the socket array from a per handler structure to a per datapath one,
and let all the handlers share the same sockets by using EPOLLEXCLUSIVE
epoll flag which avoids duplicate events, on systems that support it.
The patch was tested on a 56 core machine running Linux 4.18 and latest
Open vSwitch. A bridge was created with 2000+ ports, some of them being
veth interfaces with the peer outside the bridge. The latency of the upcall
is measured by setting a single 'action=controller,local' OpenFlow rule to
force all the packets going to the slow path and then to the local port.
A tool[1] injects some packets to the veth outside the bridge, and measures
the delay until the packet is captured on the local port. The rx timestamp
is get from the socket ancillary data in the attribute SO_TIMESTAMPNS, to
avoid having the scheduler delay in the measured time.
The first test measures the average latency for an upcall generated from
a single port. To measure it 100k packets, one every msec, are sent to a
single port and the latencies are measured.
The second test is meant to check latency fairness among ports, namely if
latency is equal between ports or if some ports have lower priority.
The previous test is repeated for every port, the average of the average
latencies and the standard deviation between averages is measured.
The third test serves to measure responsiveness under load. Heavy traffic
is sent through all ports, latency and packet loss is measured
on a single idle port.
The fourth test is all about fairness. Heavy traffic is injected in all
ports but one, latency and packet loss is measured on the single idle port.
This is the test setup:
# nproc
56
# ovs-vsctl show |grep -c Port
2223
# ovs-ofctl dump-flows ovs_upc_br
cookie=0x0, duration=4.827s, table=0, n_packets=0, n_bytes=0, actions=CONTROLLER:65535,LOCAL
# uname -a
Linux fc28 4.18.7-200.fc28.x86_64 #1 SMP Mon Sep 10 15:44:45 UTC 2018 x86_64 x86_64 x86_64 GNU/Linux
And these are the results of the tests:
Stock OVS Patched
netlink sockets
in use by vswitchd
lsof -p $(pidof ovs-vswitchd) \
|grep -c GENERIC 91187 2227
Test 1
one port latency
min/avg/max/mdev (us) 2.7/6.6/238.7/1.8 1.6/6.8/160.6/1.7
Test 2
all port
avg latency/mdev (us) 6.51/0.97 6.86/0.17
Test 3
single port latency
under load
avg/mdev (us) 7.5/5.9 3.8/4.8
packet loss 95 % 62 %
Test 4
idle port latency
under load
min/avg/max/mdev (us) 0.8/1.5/210.5/0.9 1.0/2.1/344.5/1.2
packet loss 94 % 4 %
CPU and RAM usage seems not to be affected, the resource usage of vswitchd
idle with 2000+ ports is unchanged:
# ps u $(pidof ovs-vswitchd)
USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND
openvsw+ 5430 54.3 0.3 4263964 510968 pts/1 RLl+ 16:20 0:50 ovs-vswitchd
Additionally, to check if vswitchd is thread safe with this patch, the
following test was run for circa 48 hours: on a 56 core machine, a
bridge with kernel datapath is filled with 2200 dummy interfaces and 22
veth, then 22 traffic generators are run in parallel piping traffic into
the veths peers outside the bridge.
To generate as many upcalls as possible, all packets were forced to the
slowpath with an openflow rule like 'action=controller,local' and packet
size was set to 64 byte. Also, to avoid overflowing the FDB early and
slowing down the upcall processing, generated mac addresses were restricted
to a small interval. vswitchd ran without problems for 48+ hours,
obviously with all the handler threads with almost 99% CPU usage.
[1] https://github.com/teknoraver/network-tools/blob/master/weed.c
Signed-off-by: Matteo Croce <mcroce@redhat.com>
Signed-off-by: Ben Pfaff <blp@ovn.org>
Acked-by: Flavio Leitner <fbl@sysclose.org>
2018-09-25 10:51:05 +02:00
|
|
|
|
* or none "dpif" channels, the following check
|
2014-02-26 10:10:29 -08:00
|
|
|
|
* would suffice. */
|
dpif-netlink: don't allocate per thread netlink sockets
When using the kernel datapath, OVS allocates a pool of sockets to handle
netlink events. The number of sockets is: ports * n-handler-threads, where
n-handler-threads is user configurable and defaults to 3/4*number of cores.
This because vswitchd starts n-handler-threads threads, each one with a
netlink socket for every port of the switch. Every thread then, starts
listening on events on its set of sockets with epoll().
On setup with lot of CPUs and ports, the number of sockets easily hits
the process file descriptor limit, and ovs-vswitchd will exit with -EMFILE.
Change the number of allocated sockets to just one per port by moving
the socket array from a per handler structure to a per datapath one,
and let all the handlers share the same sockets by using EPOLLEXCLUSIVE
epoll flag which avoids duplicate events, on systems that support it.
The patch was tested on a 56 core machine running Linux 4.18 and latest
Open vSwitch. A bridge was created with 2000+ ports, some of them being
veth interfaces with the peer outside the bridge. The latency of the upcall
is measured by setting a single 'action=controller,local' OpenFlow rule to
force all the packets going to the slow path and then to the local port.
A tool[1] injects some packets to the veth outside the bridge, and measures
the delay until the packet is captured on the local port. The rx timestamp
is get from the socket ancillary data in the attribute SO_TIMESTAMPNS, to
avoid having the scheduler delay in the measured time.
The first test measures the average latency for an upcall generated from
a single port. To measure it 100k packets, one every msec, are sent to a
single port and the latencies are measured.
The second test is meant to check latency fairness among ports, namely if
latency is equal between ports or if some ports have lower priority.
The previous test is repeated for every port, the average of the average
latencies and the standard deviation between averages is measured.
The third test serves to measure responsiveness under load. Heavy traffic
is sent through all ports, latency and packet loss is measured
on a single idle port.
The fourth test is all about fairness. Heavy traffic is injected in all
ports but one, latency and packet loss is measured on the single idle port.
This is the test setup:
# nproc
56
# ovs-vsctl show |grep -c Port
2223
# ovs-ofctl dump-flows ovs_upc_br
cookie=0x0, duration=4.827s, table=0, n_packets=0, n_bytes=0, actions=CONTROLLER:65535,LOCAL
# uname -a
Linux fc28 4.18.7-200.fc28.x86_64 #1 SMP Mon Sep 10 15:44:45 UTC 2018 x86_64 x86_64 x86_64 GNU/Linux
And these are the results of the tests:
Stock OVS Patched
netlink sockets
in use by vswitchd
lsof -p $(pidof ovs-vswitchd) \
|grep -c GENERIC 91187 2227
Test 1
one port latency
min/avg/max/mdev (us) 2.7/6.6/238.7/1.8 1.6/6.8/160.6/1.7
Test 2
all port
avg latency/mdev (us) 6.51/0.97 6.86/0.17
Test 3
single port latency
under load
avg/mdev (us) 7.5/5.9 3.8/4.8
packet loss 95 % 62 %
Test 4
idle port latency
under load
min/avg/max/mdev (us) 0.8/1.5/210.5/0.9 1.0/2.1/344.5/1.2
packet loss 94 % 4 %
CPU and RAM usage seems not to be affected, the resource usage of vswitchd
idle with 2000+ ports is unchanged:
# ps u $(pidof ovs-vswitchd)
USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND
openvsw+ 5430 54.3 0.3 4263964 510968 pts/1 RLl+ 16:20 0:50 ovs-vswitchd
Additionally, to check if vswitchd is thread safe with this patch, the
following test was run for circa 48 hours: on a 56 core machine, a
bridge with kernel datapath is filled with 2200 dummy interfaces and 22
veth, then 22 traffic generators are run in parallel piping traffic into
the veths peers outside the bridge.
To generate as many upcalls as possible, all packets were forced to the
slowpath with an openflow rule like 'action=controller,local' and packet
size was set to 64 byte. Also, to avoid overflowing the FDB early and
slowing down the upcall processing, generated mac addresses were restricted
to a small interval. vswitchd ran without problems for 48+ hours,
obviously with all the handler threads with almost 99% CPU usage.
[1] https://github.com/teknoraver/network-tools/blob/master/weed.c
Signed-off-by: Matteo Croce <mcroce@redhat.com>
Signed-off-by: Ben Pfaff <blp@ovn.org>
Acked-by: Flavio Leitner <fbl@sysclose.org>
2018-09-25 10:51:05 +02:00
|
|
|
|
if (!dpif->channels[port_idx].sock) {
|
2014-02-26 10:10:29 -08:00
|
|
|
|
return false;
|
|
|
|
|
}
|
2014-10-23 08:27:34 -07:00
|
|
|
|
ovs_assert(!WINDOWS || dpif->n_handlers <= 1);
|
2014-02-26 10:10:29 -08:00
|
|
|
|
|
dpif-netlink: don't allocate per thread netlink sockets
When using the kernel datapath, OVS allocates a pool of sockets to handle
netlink events. The number of sockets is: ports * n-handler-threads, where
n-handler-threads is user configurable and defaults to 3/4*number of cores.
This because vswitchd starts n-handler-threads threads, each one with a
netlink socket for every port of the switch. Every thread then, starts
listening on events on its set of sockets with epoll().
On setup with lot of CPUs and ports, the number of sockets easily hits
the process file descriptor limit, and ovs-vswitchd will exit with -EMFILE.
Change the number of allocated sockets to just one per port by moving
the socket array from a per handler structure to a per datapath one,
and let all the handlers share the same sockets by using EPOLLEXCLUSIVE
epoll flag which avoids duplicate events, on systems that support it.
The patch was tested on a 56 core machine running Linux 4.18 and latest
Open vSwitch. A bridge was created with 2000+ ports, some of them being
veth interfaces with the peer outside the bridge. The latency of the upcall
is measured by setting a single 'action=controller,local' OpenFlow rule to
force all the packets going to the slow path and then to the local port.
A tool[1] injects some packets to the veth outside the bridge, and measures
the delay until the packet is captured on the local port. The rx timestamp
is get from the socket ancillary data in the attribute SO_TIMESTAMPNS, to
avoid having the scheduler delay in the measured time.
The first test measures the average latency for an upcall generated from
a single port. To measure it 100k packets, one every msec, are sent to a
single port and the latencies are measured.
The second test is meant to check latency fairness among ports, namely if
latency is equal between ports or if some ports have lower priority.
The previous test is repeated for every port, the average of the average
latencies and the standard deviation between averages is measured.
The third test serves to measure responsiveness under load. Heavy traffic
is sent through all ports, latency and packet loss is measured
on a single idle port.
The fourth test is all about fairness. Heavy traffic is injected in all
ports but one, latency and packet loss is measured on the single idle port.
This is the test setup:
# nproc
56
# ovs-vsctl show |grep -c Port
2223
# ovs-ofctl dump-flows ovs_upc_br
cookie=0x0, duration=4.827s, table=0, n_packets=0, n_bytes=0, actions=CONTROLLER:65535,LOCAL
# uname -a
Linux fc28 4.18.7-200.fc28.x86_64 #1 SMP Mon Sep 10 15:44:45 UTC 2018 x86_64 x86_64 x86_64 GNU/Linux
And these are the results of the tests:
Stock OVS Patched
netlink sockets
in use by vswitchd
lsof -p $(pidof ovs-vswitchd) \
|grep -c GENERIC 91187 2227
Test 1
one port latency
min/avg/max/mdev (us) 2.7/6.6/238.7/1.8 1.6/6.8/160.6/1.7
Test 2
all port
avg latency/mdev (us) 6.51/0.97 6.86/0.17
Test 3
single port latency
under load
avg/mdev (us) 7.5/5.9 3.8/4.8
packet loss 95 % 62 %
Test 4
idle port latency
under load
min/avg/max/mdev (us) 0.8/1.5/210.5/0.9 1.0/2.1/344.5/1.2
packet loss 94 % 4 %
CPU and RAM usage seems not to be affected, the resource usage of vswitchd
idle with 2000+ ports is unchanged:
# ps u $(pidof ovs-vswitchd)
USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND
openvsw+ 5430 54.3 0.3 4263964 510968 pts/1 RLl+ 16:20 0:50 ovs-vswitchd
Additionally, to check if vswitchd is thread safe with this patch, the
following test was run for circa 48 hours: on a 56 core machine, a
bridge with kernel datapath is filled with 2200 dummy interfaces and 22
veth, then 22 traffic generators are run in parallel piping traffic into
the veths peers outside the bridge.
To generate as many upcalls as possible, all packets were forced to the
slowpath with an openflow rule like 'action=controller,local' and packet
size was set to 64 byte. Also, to avoid overflowing the FDB early and
slowing down the upcall processing, generated mac addresses were restricted
to a small interval. vswitchd ran without problems for 48+ hours,
obviously with all the handler threads with almost 99% CPU usage.
[1] https://github.com/teknoraver/network-tools/blob/master/weed.c
Signed-off-by: Matteo Croce <mcroce@redhat.com>
Signed-off-by: Ben Pfaff <blp@ovn.org>
Acked-by: Flavio Leitner <fbl@sysclose.org>
2018-09-25 10:51:05 +02:00
|
|
|
|
*upcall_pid = nl_sock_pid(dpif->channels[port_idx].sock);
|
2013-01-04 18:34:26 -08:00
|
|
|
|
|
2014-02-26 10:10:29 -08:00
|
|
|
|
return true;
|
2013-01-04 18:34:26 -08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
dpif-netlink: don't allocate per thread netlink sockets
When using the kernel datapath, OVS allocates a pool of sockets to handle
netlink events. The number of sockets is: ports * n-handler-threads, where
n-handler-threads is user configurable and defaults to 3/4*number of cores.
This because vswitchd starts n-handler-threads threads, each one with a
netlink socket for every port of the switch. Every thread then, starts
listening on events on its set of sockets with epoll().
On setup with lot of CPUs and ports, the number of sockets easily hits
the process file descriptor limit, and ovs-vswitchd will exit with -EMFILE.
Change the number of allocated sockets to just one per port by moving
the socket array from a per handler structure to a per datapath one,
and let all the handlers share the same sockets by using EPOLLEXCLUSIVE
epoll flag which avoids duplicate events, on systems that support it.
The patch was tested on a 56 core machine running Linux 4.18 and latest
Open vSwitch. A bridge was created with 2000+ ports, some of them being
veth interfaces with the peer outside the bridge. The latency of the upcall
is measured by setting a single 'action=controller,local' OpenFlow rule to
force all the packets going to the slow path and then to the local port.
A tool[1] injects some packets to the veth outside the bridge, and measures
the delay until the packet is captured on the local port. The rx timestamp
is get from the socket ancillary data in the attribute SO_TIMESTAMPNS, to
avoid having the scheduler delay in the measured time.
The first test measures the average latency for an upcall generated from
a single port. To measure it 100k packets, one every msec, are sent to a
single port and the latencies are measured.
The second test is meant to check latency fairness among ports, namely if
latency is equal between ports or if some ports have lower priority.
The previous test is repeated for every port, the average of the average
latencies and the standard deviation between averages is measured.
The third test serves to measure responsiveness under load. Heavy traffic
is sent through all ports, latency and packet loss is measured
on a single idle port.
The fourth test is all about fairness. Heavy traffic is injected in all
ports but one, latency and packet loss is measured on the single idle port.
This is the test setup:
# nproc
56
# ovs-vsctl show |grep -c Port
2223
# ovs-ofctl dump-flows ovs_upc_br
cookie=0x0, duration=4.827s, table=0, n_packets=0, n_bytes=0, actions=CONTROLLER:65535,LOCAL
# uname -a
Linux fc28 4.18.7-200.fc28.x86_64 #1 SMP Mon Sep 10 15:44:45 UTC 2018 x86_64 x86_64 x86_64 GNU/Linux
And these are the results of the tests:
Stock OVS Patched
netlink sockets
in use by vswitchd
lsof -p $(pidof ovs-vswitchd) \
|grep -c GENERIC 91187 2227
Test 1
one port latency
min/avg/max/mdev (us) 2.7/6.6/238.7/1.8 1.6/6.8/160.6/1.7
Test 2
all port
avg latency/mdev (us) 6.51/0.97 6.86/0.17
Test 3
single port latency
under load
avg/mdev (us) 7.5/5.9 3.8/4.8
packet loss 95 % 62 %
Test 4
idle port latency
under load
min/avg/max/mdev (us) 0.8/1.5/210.5/0.9 1.0/2.1/344.5/1.2
packet loss 94 % 4 %
CPU and RAM usage seems not to be affected, the resource usage of vswitchd
idle with 2000+ ports is unchanged:
# ps u $(pidof ovs-vswitchd)
USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND
openvsw+ 5430 54.3 0.3 4263964 510968 pts/1 RLl+ 16:20 0:50 ovs-vswitchd
Additionally, to check if vswitchd is thread safe with this patch, the
following test was run for circa 48 hours: on a 56 core machine, a
bridge with kernel datapath is filled with 2200 dummy interfaces and 22
veth, then 22 traffic generators are run in parallel piping traffic into
the veths peers outside the bridge.
To generate as many upcalls as possible, all packets were forced to the
slowpath with an openflow rule like 'action=controller,local' and packet
size was set to 64 byte. Also, to avoid overflowing the FDB early and
slowing down the upcall processing, generated mac addresses were restricted
to a small interval. vswitchd ran without problems for 48+ hours,
obviously with all the handler threads with almost 99% CPU usage.
[1] https://github.com/teknoraver/network-tools/blob/master/weed.c
Signed-off-by: Matteo Croce <mcroce@redhat.com>
Signed-off-by: Ben Pfaff <blp@ovn.org>
Acked-by: Flavio Leitner <fbl@sysclose.org>
2018-09-25 10:51:05 +02:00
|
|
|
|
vport_add_channel(struct dpif_netlink *dpif, odp_port_t port_no,
|
2019-10-14 11:10:47 -07:00
|
|
|
|
struct nl_sock *sock)
|
2013-01-04 18:34:26 -08:00
|
|
|
|
{
|
|
|
|
|
struct epoll_event event;
|
2013-06-19 16:58:44 -07:00
|
|
|
|
uint32_t port_idx = odp_to_u32(port_no);
|
dpif-netlink: don't allocate per thread netlink sockets
When using the kernel datapath, OVS allocates a pool of sockets to handle
netlink events. The number of sockets is: ports * n-handler-threads, where
n-handler-threads is user configurable and defaults to 3/4*number of cores.
This because vswitchd starts n-handler-threads threads, each one with a
netlink socket for every port of the switch. Every thread then, starts
listening on events on its set of sockets with epoll().
On setup with lot of CPUs and ports, the number of sockets easily hits
the process file descriptor limit, and ovs-vswitchd will exit with -EMFILE.
Change the number of allocated sockets to just one per port by moving
the socket array from a per handler structure to a per datapath one,
and let all the handlers share the same sockets by using EPOLLEXCLUSIVE
epoll flag which avoids duplicate events, on systems that support it.
The patch was tested on a 56 core machine running Linux 4.18 and latest
Open vSwitch. A bridge was created with 2000+ ports, some of them being
veth interfaces with the peer outside the bridge. The latency of the upcall
is measured by setting a single 'action=controller,local' OpenFlow rule to
force all the packets going to the slow path and then to the local port.
A tool[1] injects some packets to the veth outside the bridge, and measures
the delay until the packet is captured on the local port. The rx timestamp
is get from the socket ancillary data in the attribute SO_TIMESTAMPNS, to
avoid having the scheduler delay in the measured time.
The first test measures the average latency for an upcall generated from
a single port. To measure it 100k packets, one every msec, are sent to a
single port and the latencies are measured.
The second test is meant to check latency fairness among ports, namely if
latency is equal between ports or if some ports have lower priority.
The previous test is repeated for every port, the average of the average
latencies and the standard deviation between averages is measured.
The third test serves to measure responsiveness under load. Heavy traffic
is sent through all ports, latency and packet loss is measured
on a single idle port.
The fourth test is all about fairness. Heavy traffic is injected in all
ports but one, latency and packet loss is measured on the single idle port.
This is the test setup:
# nproc
56
# ovs-vsctl show |grep -c Port
2223
# ovs-ofctl dump-flows ovs_upc_br
cookie=0x0, duration=4.827s, table=0, n_packets=0, n_bytes=0, actions=CONTROLLER:65535,LOCAL
# uname -a
Linux fc28 4.18.7-200.fc28.x86_64 #1 SMP Mon Sep 10 15:44:45 UTC 2018 x86_64 x86_64 x86_64 GNU/Linux
And these are the results of the tests:
Stock OVS Patched
netlink sockets
in use by vswitchd
lsof -p $(pidof ovs-vswitchd) \
|grep -c GENERIC 91187 2227
Test 1
one port latency
min/avg/max/mdev (us) 2.7/6.6/238.7/1.8 1.6/6.8/160.6/1.7
Test 2
all port
avg latency/mdev (us) 6.51/0.97 6.86/0.17
Test 3
single port latency
under load
avg/mdev (us) 7.5/5.9 3.8/4.8
packet loss 95 % 62 %
Test 4
idle port latency
under load
min/avg/max/mdev (us) 0.8/1.5/210.5/0.9 1.0/2.1/344.5/1.2
packet loss 94 % 4 %
CPU and RAM usage seems not to be affected, the resource usage of vswitchd
idle with 2000+ ports is unchanged:
# ps u $(pidof ovs-vswitchd)
USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND
openvsw+ 5430 54.3 0.3 4263964 510968 pts/1 RLl+ 16:20 0:50 ovs-vswitchd
Additionally, to check if vswitchd is thread safe with this patch, the
following test was run for circa 48 hours: on a 56 core machine, a
bridge with kernel datapath is filled with 2200 dummy interfaces and 22
veth, then 22 traffic generators are run in parallel piping traffic into
the veths peers outside the bridge.
To generate as many upcalls as possible, all packets were forced to the
slowpath with an openflow rule like 'action=controller,local' and packet
size was set to 64 byte. Also, to avoid overflowing the FDB early and
slowing down the upcall processing, generated mac addresses were restricted
to a small interval. vswitchd ran without problems for 48+ hours,
obviously with all the handler threads with almost 99% CPU usage.
[1] https://github.com/teknoraver/network-tools/blob/master/weed.c
Signed-off-by: Matteo Croce <mcroce@redhat.com>
Signed-off-by: Ben Pfaff <blp@ovn.org>
Acked-by: Flavio Leitner <fbl@sysclose.org>
2018-09-25 10:51:05 +02:00
|
|
|
|
size_t i;
|
2014-02-26 10:10:29 -08:00
|
|
|
|
int error;
|
2013-01-04 18:34:26 -08:00
|
|
|
|
|
2014-02-26 10:10:29 -08:00
|
|
|
|
if (dpif->handlers == NULL) {
|
2019-10-14 11:10:47 -07:00
|
|
|
|
close_nl_sock(sock);
|
2013-01-04 18:34:26 -08:00
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2014-02-26 10:10:29 -08:00
|
|
|
|
/* We assume that the datapath densely chooses port numbers, which can
|
|
|
|
|
* therefore be used as an index into 'channels' and 'epoll_events' of
|
dpif-netlink: don't allocate per thread netlink sockets
When using the kernel datapath, OVS allocates a pool of sockets to handle
netlink events. The number of sockets is: ports * n-handler-threads, where
n-handler-threads is user configurable and defaults to 3/4*number of cores.
This because vswitchd starts n-handler-threads threads, each one with a
netlink socket for every port of the switch. Every thread then, starts
listening on events on its set of sockets with epoll().
On setup with lot of CPUs and ports, the number of sockets easily hits
the process file descriptor limit, and ovs-vswitchd will exit with -EMFILE.
Change the number of allocated sockets to just one per port by moving
the socket array from a per handler structure to a per datapath one,
and let all the handlers share the same sockets by using EPOLLEXCLUSIVE
epoll flag which avoids duplicate events, on systems that support it.
The patch was tested on a 56 core machine running Linux 4.18 and latest
Open vSwitch. A bridge was created with 2000+ ports, some of them being
veth interfaces with the peer outside the bridge. The latency of the upcall
is measured by setting a single 'action=controller,local' OpenFlow rule to
force all the packets going to the slow path and then to the local port.
A tool[1] injects some packets to the veth outside the bridge, and measures
the delay until the packet is captured on the local port. The rx timestamp
is get from the socket ancillary data in the attribute SO_TIMESTAMPNS, to
avoid having the scheduler delay in the measured time.
The first test measures the average latency for an upcall generated from
a single port. To measure it 100k packets, one every msec, are sent to a
single port and the latencies are measured.
The second test is meant to check latency fairness among ports, namely if
latency is equal between ports or if some ports have lower priority.
The previous test is repeated for every port, the average of the average
latencies and the standard deviation between averages is measured.
The third test serves to measure responsiveness under load. Heavy traffic
is sent through all ports, latency and packet loss is measured
on a single idle port.
The fourth test is all about fairness. Heavy traffic is injected in all
ports but one, latency and packet loss is measured on the single idle port.
This is the test setup:
# nproc
56
# ovs-vsctl show |grep -c Port
2223
# ovs-ofctl dump-flows ovs_upc_br
cookie=0x0, duration=4.827s, table=0, n_packets=0, n_bytes=0, actions=CONTROLLER:65535,LOCAL
# uname -a
Linux fc28 4.18.7-200.fc28.x86_64 #1 SMP Mon Sep 10 15:44:45 UTC 2018 x86_64 x86_64 x86_64 GNU/Linux
And these are the results of the tests:
Stock OVS Patched
netlink sockets
in use by vswitchd
lsof -p $(pidof ovs-vswitchd) \
|grep -c GENERIC 91187 2227
Test 1
one port latency
min/avg/max/mdev (us) 2.7/6.6/238.7/1.8 1.6/6.8/160.6/1.7
Test 2
all port
avg latency/mdev (us) 6.51/0.97 6.86/0.17
Test 3
single port latency
under load
avg/mdev (us) 7.5/5.9 3.8/4.8
packet loss 95 % 62 %
Test 4
idle port latency
under load
min/avg/max/mdev (us) 0.8/1.5/210.5/0.9 1.0/2.1/344.5/1.2
packet loss 94 % 4 %
CPU and RAM usage seems not to be affected, the resource usage of vswitchd
idle with 2000+ ports is unchanged:
# ps u $(pidof ovs-vswitchd)
USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND
openvsw+ 5430 54.3 0.3 4263964 510968 pts/1 RLl+ 16:20 0:50 ovs-vswitchd
Additionally, to check if vswitchd is thread safe with this patch, the
following test was run for circa 48 hours: on a 56 core machine, a
bridge with kernel datapath is filled with 2200 dummy interfaces and 22
veth, then 22 traffic generators are run in parallel piping traffic into
the veths peers outside the bridge.
To generate as many upcalls as possible, all packets were forced to the
slowpath with an openflow rule like 'action=controller,local' and packet
size was set to 64 byte. Also, to avoid overflowing the FDB early and
slowing down the upcall processing, generated mac addresses were restricted
to a small interval. vswitchd ran without problems for 48+ hours,
obviously with all the handler threads with almost 99% CPU usage.
[1] https://github.com/teknoraver/network-tools/blob/master/weed.c
Signed-off-by: Matteo Croce <mcroce@redhat.com>
Signed-off-by: Ben Pfaff <blp@ovn.org>
Acked-by: Flavio Leitner <fbl@sysclose.org>
2018-09-25 10:51:05 +02:00
|
|
|
|
* 'dpif'. */
|
2013-06-19 16:58:44 -07:00
|
|
|
|
if (port_idx >= dpif->uc_array_size) {
|
|
|
|
|
uint32_t new_size = port_idx + 1;
|
2013-01-04 18:34:26 -08:00
|
|
|
|
|
2013-05-01 16:54:18 -07:00
|
|
|
|
if (new_size > MAX_PORTS) {
|
2013-01-04 18:34:26 -08:00
|
|
|
|
VLOG_WARN_RL(&error_rl, "%s: datapath port %"PRIu32" too big",
|
|
|
|
|
dpif_name(&dpif->dpif), port_no);
|
|
|
|
|
return EFBIG;
|
|
|
|
|
}
|
|
|
|
|
|
dpif-netlink: don't allocate per thread netlink sockets
When using the kernel datapath, OVS allocates a pool of sockets to handle
netlink events. The number of sockets is: ports * n-handler-threads, where
n-handler-threads is user configurable and defaults to 3/4*number of cores.
This because vswitchd starts n-handler-threads threads, each one with a
netlink socket for every port of the switch. Every thread then, starts
listening on events on its set of sockets with epoll().
On setup with lot of CPUs and ports, the number of sockets easily hits
the process file descriptor limit, and ovs-vswitchd will exit with -EMFILE.
Change the number of allocated sockets to just one per port by moving
the socket array from a per handler structure to a per datapath one,
and let all the handlers share the same sockets by using EPOLLEXCLUSIVE
epoll flag which avoids duplicate events, on systems that support it.
The patch was tested on a 56 core machine running Linux 4.18 and latest
Open vSwitch. A bridge was created with 2000+ ports, some of them being
veth interfaces with the peer outside the bridge. The latency of the upcall
is measured by setting a single 'action=controller,local' OpenFlow rule to
force all the packets going to the slow path and then to the local port.
A tool[1] injects some packets to the veth outside the bridge, and measures
the delay until the packet is captured on the local port. The rx timestamp
is get from the socket ancillary data in the attribute SO_TIMESTAMPNS, to
avoid having the scheduler delay in the measured time.
The first test measures the average latency for an upcall generated from
a single port. To measure it 100k packets, one every msec, are sent to a
single port and the latencies are measured.
The second test is meant to check latency fairness among ports, namely if
latency is equal between ports or if some ports have lower priority.
The previous test is repeated for every port, the average of the average
latencies and the standard deviation between averages is measured.
The third test serves to measure responsiveness under load. Heavy traffic
is sent through all ports, latency and packet loss is measured
on a single idle port.
The fourth test is all about fairness. Heavy traffic is injected in all
ports but one, latency and packet loss is measured on the single idle port.
This is the test setup:
# nproc
56
# ovs-vsctl show |grep -c Port
2223
# ovs-ofctl dump-flows ovs_upc_br
cookie=0x0, duration=4.827s, table=0, n_packets=0, n_bytes=0, actions=CONTROLLER:65535,LOCAL
# uname -a
Linux fc28 4.18.7-200.fc28.x86_64 #1 SMP Mon Sep 10 15:44:45 UTC 2018 x86_64 x86_64 x86_64 GNU/Linux
And these are the results of the tests:
Stock OVS Patched
netlink sockets
in use by vswitchd
lsof -p $(pidof ovs-vswitchd) \
|grep -c GENERIC 91187 2227
Test 1
one port latency
min/avg/max/mdev (us) 2.7/6.6/238.7/1.8 1.6/6.8/160.6/1.7
Test 2
all port
avg latency/mdev (us) 6.51/0.97 6.86/0.17
Test 3
single port latency
under load
avg/mdev (us) 7.5/5.9 3.8/4.8
packet loss 95 % 62 %
Test 4
idle port latency
under load
min/avg/max/mdev (us) 0.8/1.5/210.5/0.9 1.0/2.1/344.5/1.2
packet loss 94 % 4 %
CPU and RAM usage seems not to be affected, the resource usage of vswitchd
idle with 2000+ ports is unchanged:
# ps u $(pidof ovs-vswitchd)
USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND
openvsw+ 5430 54.3 0.3 4263964 510968 pts/1 RLl+ 16:20 0:50 ovs-vswitchd
Additionally, to check if vswitchd is thread safe with this patch, the
following test was run for circa 48 hours: on a 56 core machine, a
bridge with kernel datapath is filled with 2200 dummy interfaces and 22
veth, then 22 traffic generators are run in parallel piping traffic into
the veths peers outside the bridge.
To generate as many upcalls as possible, all packets were forced to the
slowpath with an openflow rule like 'action=controller,local' and packet
size was set to 64 byte. Also, to avoid overflowing the FDB early and
slowing down the upcall processing, generated mac addresses were restricted
to a small interval. vswitchd ran without problems for 48+ hours,
obviously with all the handler threads with almost 99% CPU usage.
[1] https://github.com/teknoraver/network-tools/blob/master/weed.c
Signed-off-by: Matteo Croce <mcroce@redhat.com>
Signed-off-by: Ben Pfaff <blp@ovn.org>
Acked-by: Flavio Leitner <fbl@sysclose.org>
2018-09-25 10:51:05 +02:00
|
|
|
|
dpif->channels = xrealloc(dpif->channels,
|
|
|
|
|
new_size * sizeof *dpif->channels);
|
2014-02-26 10:10:29 -08:00
|
|
|
|
|
dpif-netlink: don't allocate per thread netlink sockets
When using the kernel datapath, OVS allocates a pool of sockets to handle
netlink events. The number of sockets is: ports * n-handler-threads, where
n-handler-threads is user configurable and defaults to 3/4*number of cores.
This because vswitchd starts n-handler-threads threads, each one with a
netlink socket for every port of the switch. Every thread then, starts
listening on events on its set of sockets with epoll().
On setup with lot of CPUs and ports, the number of sockets easily hits
the process file descriptor limit, and ovs-vswitchd will exit with -EMFILE.
Change the number of allocated sockets to just one per port by moving
the socket array from a per handler structure to a per datapath one,
and let all the handlers share the same sockets by using EPOLLEXCLUSIVE
epoll flag which avoids duplicate events, on systems that support it.
The patch was tested on a 56 core machine running Linux 4.18 and latest
Open vSwitch. A bridge was created with 2000+ ports, some of them being
veth interfaces with the peer outside the bridge. The latency of the upcall
is measured by setting a single 'action=controller,local' OpenFlow rule to
force all the packets going to the slow path and then to the local port.
A tool[1] injects some packets to the veth outside the bridge, and measures
the delay until the packet is captured on the local port. The rx timestamp
is get from the socket ancillary data in the attribute SO_TIMESTAMPNS, to
avoid having the scheduler delay in the measured time.
The first test measures the average latency for an upcall generated from
a single port. To measure it 100k packets, one every msec, are sent to a
single port and the latencies are measured.
The second test is meant to check latency fairness among ports, namely if
latency is equal between ports or if some ports have lower priority.
The previous test is repeated for every port, the average of the average
latencies and the standard deviation between averages is measured.
The third test serves to measure responsiveness under load. Heavy traffic
is sent through all ports, latency and packet loss is measured
on a single idle port.
The fourth test is all about fairness. Heavy traffic is injected in all
ports but one, latency and packet loss is measured on the single idle port.
This is the test setup:
# nproc
56
# ovs-vsctl show |grep -c Port
2223
# ovs-ofctl dump-flows ovs_upc_br
cookie=0x0, duration=4.827s, table=0, n_packets=0, n_bytes=0, actions=CONTROLLER:65535,LOCAL
# uname -a
Linux fc28 4.18.7-200.fc28.x86_64 #1 SMP Mon Sep 10 15:44:45 UTC 2018 x86_64 x86_64 x86_64 GNU/Linux
And these are the results of the tests:
Stock OVS Patched
netlink sockets
in use by vswitchd
lsof -p $(pidof ovs-vswitchd) \
|grep -c GENERIC 91187 2227
Test 1
one port latency
min/avg/max/mdev (us) 2.7/6.6/238.7/1.8 1.6/6.8/160.6/1.7
Test 2
all port
avg latency/mdev (us) 6.51/0.97 6.86/0.17
Test 3
single port latency
under load
avg/mdev (us) 7.5/5.9 3.8/4.8
packet loss 95 % 62 %
Test 4
idle port latency
under load
min/avg/max/mdev (us) 0.8/1.5/210.5/0.9 1.0/2.1/344.5/1.2
packet loss 94 % 4 %
CPU and RAM usage seems not to be affected, the resource usage of vswitchd
idle with 2000+ ports is unchanged:
# ps u $(pidof ovs-vswitchd)
USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND
openvsw+ 5430 54.3 0.3 4263964 510968 pts/1 RLl+ 16:20 0:50 ovs-vswitchd
Additionally, to check if vswitchd is thread safe with this patch, the
following test was run for circa 48 hours: on a 56 core machine, a
bridge with kernel datapath is filled with 2200 dummy interfaces and 22
veth, then 22 traffic generators are run in parallel piping traffic into
the veths peers outside the bridge.
To generate as many upcalls as possible, all packets were forced to the
slowpath with an openflow rule like 'action=controller,local' and packet
size was set to 64 byte. Also, to avoid overflowing the FDB early and
slowing down the upcall processing, generated mac addresses were restricted
to a small interval. vswitchd ran without problems for 48+ hours,
obviously with all the handler threads with almost 99% CPU usage.
[1] https://github.com/teknoraver/network-tools/blob/master/weed.c
Signed-off-by: Matteo Croce <mcroce@redhat.com>
Signed-off-by: Ben Pfaff <blp@ovn.org>
Acked-by: Flavio Leitner <fbl@sysclose.org>
2018-09-25 10:51:05 +02:00
|
|
|
|
for (i = dpif->uc_array_size; i < new_size; i++) {
|
|
|
|
|
dpif->channels[i].sock = NULL;
|
|
|
|
|
}
|
2014-02-26 10:10:29 -08:00
|
|
|
|
|
dpif-netlink: don't allocate per thread netlink sockets
When using the kernel datapath, OVS allocates a pool of sockets to handle
netlink events. The number of sockets is: ports * n-handler-threads, where
n-handler-threads is user configurable and defaults to 3/4*number of cores.
This because vswitchd starts n-handler-threads threads, each one with a
netlink socket for every port of the switch. Every thread then, starts
listening on events on its set of sockets with epoll().
On setup with lot of CPUs and ports, the number of sockets easily hits
the process file descriptor limit, and ovs-vswitchd will exit with -EMFILE.
Change the number of allocated sockets to just one per port by moving
the socket array from a per handler structure to a per datapath one,
and let all the handlers share the same sockets by using EPOLLEXCLUSIVE
epoll flag which avoids duplicate events, on systems that support it.
The patch was tested on a 56 core machine running Linux 4.18 and latest
Open vSwitch. A bridge was created with 2000+ ports, some of them being
veth interfaces with the peer outside the bridge. The latency of the upcall
is measured by setting a single 'action=controller,local' OpenFlow rule to
force all the packets going to the slow path and then to the local port.
A tool[1] injects some packets to the veth outside the bridge, and measures
the delay until the packet is captured on the local port. The rx timestamp
is get from the socket ancillary data in the attribute SO_TIMESTAMPNS, to
avoid having the scheduler delay in the measured time.
The first test measures the average latency for an upcall generated from
a single port. To measure it 100k packets, one every msec, are sent to a
single port and the latencies are measured.
The second test is meant to check latency fairness among ports, namely if
latency is equal between ports or if some ports have lower priority.
The previous test is repeated for every port, the average of the average
latencies and the standard deviation between averages is measured.
The third test serves to measure responsiveness under load. Heavy traffic
is sent through all ports, latency and packet loss is measured
on a single idle port.
The fourth test is all about fairness. Heavy traffic is injected in all
ports but one, latency and packet loss is measured on the single idle port.
This is the test setup:
# nproc
56
# ovs-vsctl show |grep -c Port
2223
# ovs-ofctl dump-flows ovs_upc_br
cookie=0x0, duration=4.827s, table=0, n_packets=0, n_bytes=0, actions=CONTROLLER:65535,LOCAL
# uname -a
Linux fc28 4.18.7-200.fc28.x86_64 #1 SMP Mon Sep 10 15:44:45 UTC 2018 x86_64 x86_64 x86_64 GNU/Linux
And these are the results of the tests:
Stock OVS Patched
netlink sockets
in use by vswitchd
lsof -p $(pidof ovs-vswitchd) \
|grep -c GENERIC 91187 2227
Test 1
one port latency
min/avg/max/mdev (us) 2.7/6.6/238.7/1.8 1.6/6.8/160.6/1.7
Test 2
all port
avg latency/mdev (us) 6.51/0.97 6.86/0.17
Test 3
single port latency
under load
avg/mdev (us) 7.5/5.9 3.8/4.8
packet loss 95 % 62 %
Test 4
idle port latency
under load
min/avg/max/mdev (us) 0.8/1.5/210.5/0.9 1.0/2.1/344.5/1.2
packet loss 94 % 4 %
CPU and RAM usage seems not to be affected, the resource usage of vswitchd
idle with 2000+ ports is unchanged:
# ps u $(pidof ovs-vswitchd)
USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND
openvsw+ 5430 54.3 0.3 4263964 510968 pts/1 RLl+ 16:20 0:50 ovs-vswitchd
Additionally, to check if vswitchd is thread safe with this patch, the
following test was run for circa 48 hours: on a 56 core machine, a
bridge with kernel datapath is filled with 2200 dummy interfaces and 22
veth, then 22 traffic generators are run in parallel piping traffic into
the veths peers outside the bridge.
To generate as many upcalls as possible, all packets were forced to the
slowpath with an openflow rule like 'action=controller,local' and packet
size was set to 64 byte. Also, to avoid overflowing the FDB early and
slowing down the upcall processing, generated mac addresses were restricted
to a small interval. vswitchd ran without problems for 48+ hours,
obviously with all the handler threads with almost 99% CPU usage.
[1] https://github.com/teknoraver/network-tools/blob/master/weed.c
Signed-off-by: Matteo Croce <mcroce@redhat.com>
Signed-off-by: Ben Pfaff <blp@ovn.org>
Acked-by: Flavio Leitner <fbl@sysclose.org>
2018-09-25 10:51:05 +02:00
|
|
|
|
for (i = 0; i < dpif->n_handlers; i++) {
|
|
|
|
|
struct dpif_handler *handler = &dpif->handlers[i];
|
2014-02-26 10:10:29 -08:00
|
|
|
|
|
|
|
|
|
handler->epoll_events = xrealloc(handler->epoll_events,
|
|
|
|
|
new_size * sizeof *handler->epoll_events);
|
2013-01-04 18:34:26 -08:00
|
|
|
|
|
2014-02-26 10:10:29 -08:00
|
|
|
|
}
|
2013-01-04 18:34:26 -08:00
|
|
|
|
dpif->uc_array_size = new_size;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
memset(&event, 0, sizeof event);
|
dpif-netlink: don't allocate per thread netlink sockets
When using the kernel datapath, OVS allocates a pool of sockets to handle
netlink events. The number of sockets is: ports * n-handler-threads, where
n-handler-threads is user configurable and defaults to 3/4*number of cores.
This because vswitchd starts n-handler-threads threads, each one with a
netlink socket for every port of the switch. Every thread then, starts
listening on events on its set of sockets with epoll().
On setup with lot of CPUs and ports, the number of sockets easily hits
the process file descriptor limit, and ovs-vswitchd will exit with -EMFILE.
Change the number of allocated sockets to just one per port by moving
the socket array from a per handler structure to a per datapath one,
and let all the handlers share the same sockets by using EPOLLEXCLUSIVE
epoll flag which avoids duplicate events, on systems that support it.
The patch was tested on a 56 core machine running Linux 4.18 and latest
Open vSwitch. A bridge was created with 2000+ ports, some of them being
veth interfaces with the peer outside the bridge. The latency of the upcall
is measured by setting a single 'action=controller,local' OpenFlow rule to
force all the packets going to the slow path and then to the local port.
A tool[1] injects some packets to the veth outside the bridge, and measures
the delay until the packet is captured on the local port. The rx timestamp
is get from the socket ancillary data in the attribute SO_TIMESTAMPNS, to
avoid having the scheduler delay in the measured time.
The first test measures the average latency for an upcall generated from
a single port. To measure it 100k packets, one every msec, are sent to a
single port and the latencies are measured.
The second test is meant to check latency fairness among ports, namely if
latency is equal between ports or if some ports have lower priority.
The previous test is repeated for every port, the average of the average
latencies and the standard deviation between averages is measured.
The third test serves to measure responsiveness under load. Heavy traffic
is sent through all ports, latency and packet loss is measured
on a single idle port.
The fourth test is all about fairness. Heavy traffic is injected in all
ports but one, latency and packet loss is measured on the single idle port.
This is the test setup:
# nproc
56
# ovs-vsctl show |grep -c Port
2223
# ovs-ofctl dump-flows ovs_upc_br
cookie=0x0, duration=4.827s, table=0, n_packets=0, n_bytes=0, actions=CONTROLLER:65535,LOCAL
# uname -a
Linux fc28 4.18.7-200.fc28.x86_64 #1 SMP Mon Sep 10 15:44:45 UTC 2018 x86_64 x86_64 x86_64 GNU/Linux
And these are the results of the tests:
Stock OVS Patched
netlink sockets
in use by vswitchd
lsof -p $(pidof ovs-vswitchd) \
|grep -c GENERIC 91187 2227
Test 1
one port latency
min/avg/max/mdev (us) 2.7/6.6/238.7/1.8 1.6/6.8/160.6/1.7
Test 2
all port
avg latency/mdev (us) 6.51/0.97 6.86/0.17
Test 3
single port latency
under load
avg/mdev (us) 7.5/5.9 3.8/4.8
packet loss 95 % 62 %
Test 4
idle port latency
under load
min/avg/max/mdev (us) 0.8/1.5/210.5/0.9 1.0/2.1/344.5/1.2
packet loss 94 % 4 %
CPU and RAM usage seems not to be affected, the resource usage of vswitchd
idle with 2000+ ports is unchanged:
# ps u $(pidof ovs-vswitchd)
USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND
openvsw+ 5430 54.3 0.3 4263964 510968 pts/1 RLl+ 16:20 0:50 ovs-vswitchd
Additionally, to check if vswitchd is thread safe with this patch, the
following test was run for circa 48 hours: on a 56 core machine, a
bridge with kernel datapath is filled with 2200 dummy interfaces and 22
veth, then 22 traffic generators are run in parallel piping traffic into
the veths peers outside the bridge.
To generate as many upcalls as possible, all packets were forced to the
slowpath with an openflow rule like 'action=controller,local' and packet
size was set to 64 byte. Also, to avoid overflowing the FDB early and
slowing down the upcall processing, generated mac addresses were restricted
to a small interval. vswitchd ran without problems for 48+ hours,
obviously with all the handler threads with almost 99% CPU usage.
[1] https://github.com/teknoraver/network-tools/blob/master/weed.c
Signed-off-by: Matteo Croce <mcroce@redhat.com>
Signed-off-by: Ben Pfaff <blp@ovn.org>
Acked-by: Flavio Leitner <fbl@sysclose.org>
2018-09-25 10:51:05 +02:00
|
|
|
|
event.events = EPOLLIN | EPOLLEXCLUSIVE;
|
2013-06-19 16:58:44 -07:00
|
|
|
|
event.data.u32 = port_idx;
|
2013-01-04 18:34:26 -08:00
|
|
|
|
|
2014-02-26 10:10:29 -08:00
|
|
|
|
for (i = 0; i < dpif->n_handlers; i++) {
|
|
|
|
|
struct dpif_handler *handler = &dpif->handlers[i];
|
|
|
|
|
|
2014-10-23 08:27:34 -07:00
|
|
|
|
#ifndef _WIN32
|
2019-10-14 11:10:47 -07:00
|
|
|
|
if (epoll_ctl(handler->epoll_fd, EPOLL_CTL_ADD, nl_sock_fd(sock),
|
2014-02-26 10:10:29 -08:00
|
|
|
|
&event) < 0) {
|
|
|
|
|
error = errno;
|
|
|
|
|
goto error;
|
|
|
|
|
}
|
2014-09-18 04:17:54 -07:00
|
|
|
|
#endif
|
2014-02-26 10:10:29 -08:00
|
|
|
|
}
|
2019-10-14 11:10:47 -07:00
|
|
|
|
dpif->channels[port_idx].sock = sock;
|
dpif-netlink: don't allocate per thread netlink sockets
When using the kernel datapath, OVS allocates a pool of sockets to handle
netlink events. The number of sockets is: ports * n-handler-threads, where
n-handler-threads is user configurable and defaults to 3/4*number of cores.
This because vswitchd starts n-handler-threads threads, each one with a
netlink socket for every port of the switch. Every thread then, starts
listening on events on its set of sockets with epoll().
On setup with lot of CPUs and ports, the number of sockets easily hits
the process file descriptor limit, and ovs-vswitchd will exit with -EMFILE.
Change the number of allocated sockets to just one per port by moving
the socket array from a per handler structure to a per datapath one,
and let all the handlers share the same sockets by using EPOLLEXCLUSIVE
epoll flag which avoids duplicate events, on systems that support it.
The patch was tested on a 56 core machine running Linux 4.18 and latest
Open vSwitch. A bridge was created with 2000+ ports, some of them being
veth interfaces with the peer outside the bridge. The latency of the upcall
is measured by setting a single 'action=controller,local' OpenFlow rule to
force all the packets going to the slow path and then to the local port.
A tool[1] injects some packets to the veth outside the bridge, and measures
the delay until the packet is captured on the local port. The rx timestamp
is get from the socket ancillary data in the attribute SO_TIMESTAMPNS, to
avoid having the scheduler delay in the measured time.
The first test measures the average latency for an upcall generated from
a single port. To measure it 100k packets, one every msec, are sent to a
single port and the latencies are measured.
The second test is meant to check latency fairness among ports, namely if
latency is equal between ports or if some ports have lower priority.
The previous test is repeated for every port, the average of the average
latencies and the standard deviation between averages is measured.
The third test serves to measure responsiveness under load. Heavy traffic
is sent through all ports, latency and packet loss is measured
on a single idle port.
The fourth test is all about fairness. Heavy traffic is injected in all
ports but one, latency and packet loss is measured on the single idle port.
This is the test setup:
# nproc
56
# ovs-vsctl show |grep -c Port
2223
# ovs-ofctl dump-flows ovs_upc_br
cookie=0x0, duration=4.827s, table=0, n_packets=0, n_bytes=0, actions=CONTROLLER:65535,LOCAL
# uname -a
Linux fc28 4.18.7-200.fc28.x86_64 #1 SMP Mon Sep 10 15:44:45 UTC 2018 x86_64 x86_64 x86_64 GNU/Linux
And these are the results of the tests:
Stock OVS Patched
netlink sockets
in use by vswitchd
lsof -p $(pidof ovs-vswitchd) \
|grep -c GENERIC 91187 2227
Test 1
one port latency
min/avg/max/mdev (us) 2.7/6.6/238.7/1.8 1.6/6.8/160.6/1.7
Test 2
all port
avg latency/mdev (us) 6.51/0.97 6.86/0.17
Test 3
single port latency
under load
avg/mdev (us) 7.5/5.9 3.8/4.8
packet loss 95 % 62 %
Test 4
idle port latency
under load
min/avg/max/mdev (us) 0.8/1.5/210.5/0.9 1.0/2.1/344.5/1.2
packet loss 94 % 4 %
CPU and RAM usage seems not to be affected, the resource usage of vswitchd
idle with 2000+ ports is unchanged:
# ps u $(pidof ovs-vswitchd)
USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND
openvsw+ 5430 54.3 0.3 4263964 510968 pts/1 RLl+ 16:20 0:50 ovs-vswitchd
Additionally, to check if vswitchd is thread safe with this patch, the
following test was run for circa 48 hours: on a 56 core machine, a
bridge with kernel datapath is filled with 2200 dummy interfaces and 22
veth, then 22 traffic generators are run in parallel piping traffic into
the veths peers outside the bridge.
To generate as many upcalls as possible, all packets were forced to the
slowpath with an openflow rule like 'action=controller,local' and packet
size was set to 64 byte. Also, to avoid overflowing the FDB early and
slowing down the upcall processing, generated mac addresses were restricted
to a small interval. vswitchd ran without problems for 48+ hours,
obviously with all the handler threads with almost 99% CPU usage.
[1] https://github.com/teknoraver/network-tools/blob/master/weed.c
Signed-off-by: Matteo Croce <mcroce@redhat.com>
Signed-off-by: Ben Pfaff <blp@ovn.org>
Acked-by: Flavio Leitner <fbl@sysclose.org>
2018-09-25 10:51:05 +02:00
|
|
|
|
dpif->channels[port_idx].last_poll = LLONG_MIN;
|
2013-01-04 18:34:26 -08:00
|
|
|
|
|
|
|
|
|
return 0;
|
2014-02-26 10:10:29 -08:00
|
|
|
|
|
|
|
|
|
error:
|
2014-10-23 08:27:34 -07:00
|
|
|
|
#ifndef _WIN32
|
dpif-netlink: don't allocate per thread netlink sockets
When using the kernel datapath, OVS allocates a pool of sockets to handle
netlink events. The number of sockets is: ports * n-handler-threads, where
n-handler-threads is user configurable and defaults to 3/4*number of cores.
This because vswitchd starts n-handler-threads threads, each one with a
netlink socket for every port of the switch. Every thread then, starts
listening on events on its set of sockets with epoll().
On setup with lot of CPUs and ports, the number of sockets easily hits
the process file descriptor limit, and ovs-vswitchd will exit with -EMFILE.
Change the number of allocated sockets to just one per port by moving
the socket array from a per handler structure to a per datapath one,
and let all the handlers share the same sockets by using EPOLLEXCLUSIVE
epoll flag which avoids duplicate events, on systems that support it.
The patch was tested on a 56 core machine running Linux 4.18 and latest
Open vSwitch. A bridge was created with 2000+ ports, some of them being
veth interfaces with the peer outside the bridge. The latency of the upcall
is measured by setting a single 'action=controller,local' OpenFlow rule to
force all the packets going to the slow path and then to the local port.
A tool[1] injects some packets to the veth outside the bridge, and measures
the delay until the packet is captured on the local port. The rx timestamp
is get from the socket ancillary data in the attribute SO_TIMESTAMPNS, to
avoid having the scheduler delay in the measured time.
The first test measures the average latency for an upcall generated from
a single port. To measure it 100k packets, one every msec, are sent to a
single port and the latencies are measured.
The second test is meant to check latency fairness among ports, namely if
latency is equal between ports or if some ports have lower priority.
The previous test is repeated for every port, the average of the average
latencies and the standard deviation between averages is measured.
The third test serves to measure responsiveness under load. Heavy traffic
is sent through all ports, latency and packet loss is measured
on a single idle port.
The fourth test is all about fairness. Heavy traffic is injected in all
ports but one, latency and packet loss is measured on the single idle port.
This is the test setup:
# nproc
56
# ovs-vsctl show |grep -c Port
2223
# ovs-ofctl dump-flows ovs_upc_br
cookie=0x0, duration=4.827s, table=0, n_packets=0, n_bytes=0, actions=CONTROLLER:65535,LOCAL
# uname -a
Linux fc28 4.18.7-200.fc28.x86_64 #1 SMP Mon Sep 10 15:44:45 UTC 2018 x86_64 x86_64 x86_64 GNU/Linux
And these are the results of the tests:
Stock OVS Patched
netlink sockets
in use by vswitchd
lsof -p $(pidof ovs-vswitchd) \
|grep -c GENERIC 91187 2227
Test 1
one port latency
min/avg/max/mdev (us) 2.7/6.6/238.7/1.8 1.6/6.8/160.6/1.7
Test 2
all port
avg latency/mdev (us) 6.51/0.97 6.86/0.17
Test 3
single port latency
under load
avg/mdev (us) 7.5/5.9 3.8/4.8
packet loss 95 % 62 %
Test 4
idle port latency
under load
min/avg/max/mdev (us) 0.8/1.5/210.5/0.9 1.0/2.1/344.5/1.2
packet loss 94 % 4 %
CPU and RAM usage seems not to be affected, the resource usage of vswitchd
idle with 2000+ ports is unchanged:
# ps u $(pidof ovs-vswitchd)
USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND
openvsw+ 5430 54.3 0.3 4263964 510968 pts/1 RLl+ 16:20 0:50 ovs-vswitchd
Additionally, to check if vswitchd is thread safe with this patch, the
following test was run for circa 48 hours: on a 56 core machine, a
bridge with kernel datapath is filled with 2200 dummy interfaces and 22
veth, then 22 traffic generators are run in parallel piping traffic into
the veths peers outside the bridge.
To generate as many upcalls as possible, all packets were forced to the
slowpath with an openflow rule like 'action=controller,local' and packet
size was set to 64 byte. Also, to avoid overflowing the FDB early and
slowing down the upcall processing, generated mac addresses were restricted
to a small interval. vswitchd ran without problems for 48+ hours,
obviously with all the handler threads with almost 99% CPU usage.
[1] https://github.com/teknoraver/network-tools/blob/master/weed.c
Signed-off-by: Matteo Croce <mcroce@redhat.com>
Signed-off-by: Ben Pfaff <blp@ovn.org>
Acked-by: Flavio Leitner <fbl@sysclose.org>
2018-09-25 10:51:05 +02:00
|
|
|
|
while (i--) {
|
|
|
|
|
epoll_ctl(dpif->handlers[i].epoll_fd, EPOLL_CTL_DEL,
|
2019-10-14 11:10:47 -07:00
|
|
|
|
nl_sock_fd(sock), NULL);
|
2014-02-26 10:10:29 -08:00
|
|
|
|
}
|
dpif-netlink: don't allocate per thread netlink sockets
When using the kernel datapath, OVS allocates a pool of sockets to handle
netlink events. The number of sockets is: ports * n-handler-threads, where
n-handler-threads is user configurable and defaults to 3/4*number of cores.
This because vswitchd starts n-handler-threads threads, each one with a
netlink socket for every port of the switch. Every thread then, starts
listening on events on its set of sockets with epoll().
On setup with lot of CPUs and ports, the number of sockets easily hits
the process file descriptor limit, and ovs-vswitchd will exit with -EMFILE.
Change the number of allocated sockets to just one per port by moving
the socket array from a per handler structure to a per datapath one,
and let all the handlers share the same sockets by using EPOLLEXCLUSIVE
epoll flag which avoids duplicate events, on systems that support it.
The patch was tested on a 56 core machine running Linux 4.18 and latest
Open vSwitch. A bridge was created with 2000+ ports, some of them being
veth interfaces with the peer outside the bridge. The latency of the upcall
is measured by setting a single 'action=controller,local' OpenFlow rule to
force all the packets going to the slow path and then to the local port.
A tool[1] injects some packets to the veth outside the bridge, and measures
the delay until the packet is captured on the local port. The rx timestamp
is get from the socket ancillary data in the attribute SO_TIMESTAMPNS, to
avoid having the scheduler delay in the measured time.
The first test measures the average latency for an upcall generated from
a single port. To measure it 100k packets, one every msec, are sent to a
single port and the latencies are measured.
The second test is meant to check latency fairness among ports, namely if
latency is equal between ports or if some ports have lower priority.
The previous test is repeated for every port, the average of the average
latencies and the standard deviation between averages is measured.
The third test serves to measure responsiveness under load. Heavy traffic
is sent through all ports, latency and packet loss is measured
on a single idle port.
The fourth test is all about fairness. Heavy traffic is injected in all
ports but one, latency and packet loss is measured on the single idle port.
This is the test setup:
# nproc
56
# ovs-vsctl show |grep -c Port
2223
# ovs-ofctl dump-flows ovs_upc_br
cookie=0x0, duration=4.827s, table=0, n_packets=0, n_bytes=0, actions=CONTROLLER:65535,LOCAL
# uname -a
Linux fc28 4.18.7-200.fc28.x86_64 #1 SMP Mon Sep 10 15:44:45 UTC 2018 x86_64 x86_64 x86_64 GNU/Linux
And these are the results of the tests:
Stock OVS Patched
netlink sockets
in use by vswitchd
lsof -p $(pidof ovs-vswitchd) \
|grep -c GENERIC 91187 2227
Test 1
one port latency
min/avg/max/mdev (us) 2.7/6.6/238.7/1.8 1.6/6.8/160.6/1.7
Test 2
all port
avg latency/mdev (us) 6.51/0.97 6.86/0.17
Test 3
single port latency
under load
avg/mdev (us) 7.5/5.9 3.8/4.8
packet loss 95 % 62 %
Test 4
idle port latency
under load
min/avg/max/mdev (us) 0.8/1.5/210.5/0.9 1.0/2.1/344.5/1.2
packet loss 94 % 4 %
CPU and RAM usage seems not to be affected, the resource usage of vswitchd
idle with 2000+ ports is unchanged:
# ps u $(pidof ovs-vswitchd)
USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND
openvsw+ 5430 54.3 0.3 4263964 510968 pts/1 RLl+ 16:20 0:50 ovs-vswitchd
Additionally, to check if vswitchd is thread safe with this patch, the
following test was run for circa 48 hours: on a 56 core machine, a
bridge with kernel datapath is filled with 2200 dummy interfaces and 22
veth, then 22 traffic generators are run in parallel piping traffic into
the veths peers outside the bridge.
To generate as many upcalls as possible, all packets were forced to the
slowpath with an openflow rule like 'action=controller,local' and packet
size was set to 64 byte. Also, to avoid overflowing the FDB early and
slowing down the upcall processing, generated mac addresses were restricted
to a small interval. vswitchd ran without problems for 48+ hours,
obviously with all the handler threads with almost 99% CPU usage.
[1] https://github.com/teknoraver/network-tools/blob/master/weed.c
Signed-off-by: Matteo Croce <mcroce@redhat.com>
Signed-off-by: Ben Pfaff <blp@ovn.org>
Acked-by: Flavio Leitner <fbl@sysclose.org>
2018-09-25 10:51:05 +02:00
|
|
|
|
#endif
|
|
|
|
|
dpif->channels[port_idx].sock = NULL;
|
2014-02-26 10:10:29 -08:00
|
|
|
|
|
|
|
|
|
return error;
|
2013-01-04 18:34:26 -08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
2014-09-18 04:17:54 -07:00
|
|
|
|
vport_del_channels(struct dpif_netlink *dpif, odp_port_t port_no)
|
2013-01-04 18:34:26 -08:00
|
|
|
|
{
|
2013-06-19 16:58:44 -07:00
|
|
|
|
uint32_t port_idx = odp_to_u32(port_no);
|
2014-02-26 10:10:29 -08:00
|
|
|
|
size_t i;
|
2013-01-04 18:34:26 -08:00
|
|
|
|
|
dpif-netlink: don't allocate per thread netlink sockets
When using the kernel datapath, OVS allocates a pool of sockets to handle
netlink events. The number of sockets is: ports * n-handler-threads, where
n-handler-threads is user configurable and defaults to 3/4*number of cores.
This because vswitchd starts n-handler-threads threads, each one with a
netlink socket for every port of the switch. Every thread then, starts
listening on events on its set of sockets with epoll().
On setup with lot of CPUs and ports, the number of sockets easily hits
the process file descriptor limit, and ovs-vswitchd will exit with -EMFILE.
Change the number of allocated sockets to just one per port by moving
the socket array from a per handler structure to a per datapath one,
and let all the handlers share the same sockets by using EPOLLEXCLUSIVE
epoll flag which avoids duplicate events, on systems that support it.
The patch was tested on a 56 core machine running Linux 4.18 and latest
Open vSwitch. A bridge was created with 2000+ ports, some of them being
veth interfaces with the peer outside the bridge. The latency of the upcall
is measured by setting a single 'action=controller,local' OpenFlow rule to
force all the packets going to the slow path and then to the local port.
A tool[1] injects some packets to the veth outside the bridge, and measures
the delay until the packet is captured on the local port. The rx timestamp
is get from the socket ancillary data in the attribute SO_TIMESTAMPNS, to
avoid having the scheduler delay in the measured time.
The first test measures the average latency for an upcall generated from
a single port. To measure it 100k packets, one every msec, are sent to a
single port and the latencies are measured.
The second test is meant to check latency fairness among ports, namely if
latency is equal between ports or if some ports have lower priority.
The previous test is repeated for every port, the average of the average
latencies and the standard deviation between averages is measured.
The third test serves to measure responsiveness under load. Heavy traffic
is sent through all ports, latency and packet loss is measured
on a single idle port.
The fourth test is all about fairness. Heavy traffic is injected in all
ports but one, latency and packet loss is measured on the single idle port.
This is the test setup:
# nproc
56
# ovs-vsctl show |grep -c Port
2223
# ovs-ofctl dump-flows ovs_upc_br
cookie=0x0, duration=4.827s, table=0, n_packets=0, n_bytes=0, actions=CONTROLLER:65535,LOCAL
# uname -a
Linux fc28 4.18.7-200.fc28.x86_64 #1 SMP Mon Sep 10 15:44:45 UTC 2018 x86_64 x86_64 x86_64 GNU/Linux
And these are the results of the tests:
Stock OVS Patched
netlink sockets
in use by vswitchd
lsof -p $(pidof ovs-vswitchd) \
|grep -c GENERIC 91187 2227
Test 1
one port latency
min/avg/max/mdev (us) 2.7/6.6/238.7/1.8 1.6/6.8/160.6/1.7
Test 2
all port
avg latency/mdev (us) 6.51/0.97 6.86/0.17
Test 3
single port latency
under load
avg/mdev (us) 7.5/5.9 3.8/4.8
packet loss 95 % 62 %
Test 4
idle port latency
under load
min/avg/max/mdev (us) 0.8/1.5/210.5/0.9 1.0/2.1/344.5/1.2
packet loss 94 % 4 %
CPU and RAM usage seems not to be affected, the resource usage of vswitchd
idle with 2000+ ports is unchanged:
# ps u $(pidof ovs-vswitchd)
USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND
openvsw+ 5430 54.3 0.3 4263964 510968 pts/1 RLl+ 16:20 0:50 ovs-vswitchd
Additionally, to check if vswitchd is thread safe with this patch, the
following test was run for circa 48 hours: on a 56 core machine, a
bridge with kernel datapath is filled with 2200 dummy interfaces and 22
veth, then 22 traffic generators are run in parallel piping traffic into
the veths peers outside the bridge.
To generate as many upcalls as possible, all packets were forced to the
slowpath with an openflow rule like 'action=controller,local' and packet
size was set to 64 byte. Also, to avoid overflowing the FDB early and
slowing down the upcall processing, generated mac addresses were restricted
to a small interval. vswitchd ran without problems for 48+ hours,
obviously with all the handler threads with almost 99% CPU usage.
[1] https://github.com/teknoraver/network-tools/blob/master/weed.c
Signed-off-by: Matteo Croce <mcroce@redhat.com>
Signed-off-by: Ben Pfaff <blp@ovn.org>
Acked-by: Flavio Leitner <fbl@sysclose.org>
2018-09-25 10:51:05 +02:00
|
|
|
|
if (!dpif->handlers || port_idx >= dpif->uc_array_size
|
|
|
|
|
|| !dpif->channels[port_idx].sock) {
|
2013-01-04 18:34:26 -08:00
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2014-02-26 10:10:29 -08:00
|
|
|
|
for (i = 0; i < dpif->n_handlers; i++) {
|
|
|
|
|
struct dpif_handler *handler = &dpif->handlers[i];
|
2014-10-23 08:27:34 -07:00
|
|
|
|
#ifndef _WIN32
|
2014-02-26 10:10:29 -08:00
|
|
|
|
epoll_ctl(handler->epoll_fd, EPOLL_CTL_DEL,
|
dpif-netlink: don't allocate per thread netlink sockets
When using the kernel datapath, OVS allocates a pool of sockets to handle
netlink events. The number of sockets is: ports * n-handler-threads, where
n-handler-threads is user configurable and defaults to 3/4*number of cores.
This because vswitchd starts n-handler-threads threads, each one with a
netlink socket for every port of the switch. Every thread then, starts
listening on events on its set of sockets with epoll().
On setup with lot of CPUs and ports, the number of sockets easily hits
the process file descriptor limit, and ovs-vswitchd will exit with -EMFILE.
Change the number of allocated sockets to just one per port by moving
the socket array from a per handler structure to a per datapath one,
and let all the handlers share the same sockets by using EPOLLEXCLUSIVE
epoll flag which avoids duplicate events, on systems that support it.
The patch was tested on a 56 core machine running Linux 4.18 and latest
Open vSwitch. A bridge was created with 2000+ ports, some of them being
veth interfaces with the peer outside the bridge. The latency of the upcall
is measured by setting a single 'action=controller,local' OpenFlow rule to
force all the packets going to the slow path and then to the local port.
A tool[1] injects some packets to the veth outside the bridge, and measures
the delay until the packet is captured on the local port. The rx timestamp
is get from the socket ancillary data in the attribute SO_TIMESTAMPNS, to
avoid having the scheduler delay in the measured time.
The first test measures the average latency for an upcall generated from
a single port. To measure it 100k packets, one every msec, are sent to a
single port and the latencies are measured.
The second test is meant to check latency fairness among ports, namely if
latency is equal between ports or if some ports have lower priority.
The previous test is repeated for every port, the average of the average
latencies and the standard deviation between averages is measured.
The third test serves to measure responsiveness under load. Heavy traffic
is sent through all ports, latency and packet loss is measured
on a single idle port.
The fourth test is all about fairness. Heavy traffic is injected in all
ports but one, latency and packet loss is measured on the single idle port.
This is the test setup:
# nproc
56
# ovs-vsctl show |grep -c Port
2223
# ovs-ofctl dump-flows ovs_upc_br
cookie=0x0, duration=4.827s, table=0, n_packets=0, n_bytes=0, actions=CONTROLLER:65535,LOCAL
# uname -a
Linux fc28 4.18.7-200.fc28.x86_64 #1 SMP Mon Sep 10 15:44:45 UTC 2018 x86_64 x86_64 x86_64 GNU/Linux
And these are the results of the tests:
Stock OVS Patched
netlink sockets
in use by vswitchd
lsof -p $(pidof ovs-vswitchd) \
|grep -c GENERIC 91187 2227
Test 1
one port latency
min/avg/max/mdev (us) 2.7/6.6/238.7/1.8 1.6/6.8/160.6/1.7
Test 2
all port
avg latency/mdev (us) 6.51/0.97 6.86/0.17
Test 3
single port latency
under load
avg/mdev (us) 7.5/5.9 3.8/4.8
packet loss 95 % 62 %
Test 4
idle port latency
under load
min/avg/max/mdev (us) 0.8/1.5/210.5/0.9 1.0/2.1/344.5/1.2
packet loss 94 % 4 %
CPU and RAM usage seems not to be affected, the resource usage of vswitchd
idle with 2000+ ports is unchanged:
# ps u $(pidof ovs-vswitchd)
USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND
openvsw+ 5430 54.3 0.3 4263964 510968 pts/1 RLl+ 16:20 0:50 ovs-vswitchd
Additionally, to check if vswitchd is thread safe with this patch, the
following test was run for circa 48 hours: on a 56 core machine, a
bridge with kernel datapath is filled with 2200 dummy interfaces and 22
veth, then 22 traffic generators are run in parallel piping traffic into
the veths peers outside the bridge.
To generate as many upcalls as possible, all packets were forced to the
slowpath with an openflow rule like 'action=controller,local' and packet
size was set to 64 byte. Also, to avoid overflowing the FDB early and
slowing down the upcall processing, generated mac addresses were restricted
to a small interval. vswitchd ran without problems for 48+ hours,
obviously with all the handler threads with almost 99% CPU usage.
[1] https://github.com/teknoraver/network-tools/blob/master/weed.c
Signed-off-by: Matteo Croce <mcroce@redhat.com>
Signed-off-by: Ben Pfaff <blp@ovn.org>
Acked-by: Flavio Leitner <fbl@sysclose.org>
2018-09-25 10:51:05 +02:00
|
|
|
|
nl_sock_fd(dpif->channels[port_idx].sock), NULL);
|
2014-10-23 08:27:34 -07:00
|
|
|
|
#endif
|
2014-02-26 10:10:29 -08:00
|
|
|
|
handler->event_offset = handler->n_events = 0;
|
|
|
|
|
}
|
dpif-netlink: don't allocate per thread netlink sockets
When using the kernel datapath, OVS allocates a pool of sockets to handle
netlink events. The number of sockets is: ports * n-handler-threads, where
n-handler-threads is user configurable and defaults to 3/4*number of cores.
This because vswitchd starts n-handler-threads threads, each one with a
netlink socket for every port of the switch. Every thread then, starts
listening on events on its set of sockets with epoll().
On setup with lot of CPUs and ports, the number of sockets easily hits
the process file descriptor limit, and ovs-vswitchd will exit with -EMFILE.
Change the number of allocated sockets to just one per port by moving
the socket array from a per handler structure to a per datapath one,
and let all the handlers share the same sockets by using EPOLLEXCLUSIVE
epoll flag which avoids duplicate events, on systems that support it.
The patch was tested on a 56 core machine running Linux 4.18 and latest
Open vSwitch. A bridge was created with 2000+ ports, some of them being
veth interfaces with the peer outside the bridge. The latency of the upcall
is measured by setting a single 'action=controller,local' OpenFlow rule to
force all the packets going to the slow path and then to the local port.
A tool[1] injects some packets to the veth outside the bridge, and measures
the delay until the packet is captured on the local port. The rx timestamp
is get from the socket ancillary data in the attribute SO_TIMESTAMPNS, to
avoid having the scheduler delay in the measured time.
The first test measures the average latency for an upcall generated from
a single port. To measure it 100k packets, one every msec, are sent to a
single port and the latencies are measured.
The second test is meant to check latency fairness among ports, namely if
latency is equal between ports or if some ports have lower priority.
The previous test is repeated for every port, the average of the average
latencies and the standard deviation between averages is measured.
The third test serves to measure responsiveness under load. Heavy traffic
is sent through all ports, latency and packet loss is measured
on a single idle port.
The fourth test is all about fairness. Heavy traffic is injected in all
ports but one, latency and packet loss is measured on the single idle port.
This is the test setup:
# nproc
56
# ovs-vsctl show |grep -c Port
2223
# ovs-ofctl dump-flows ovs_upc_br
cookie=0x0, duration=4.827s, table=0, n_packets=0, n_bytes=0, actions=CONTROLLER:65535,LOCAL
# uname -a
Linux fc28 4.18.7-200.fc28.x86_64 #1 SMP Mon Sep 10 15:44:45 UTC 2018 x86_64 x86_64 x86_64 GNU/Linux
And these are the results of the tests:
Stock OVS Patched
netlink sockets
in use by vswitchd
lsof -p $(pidof ovs-vswitchd) \
|grep -c GENERIC 91187 2227
Test 1
one port latency
min/avg/max/mdev (us) 2.7/6.6/238.7/1.8 1.6/6.8/160.6/1.7
Test 2
all port
avg latency/mdev (us) 6.51/0.97 6.86/0.17
Test 3
single port latency
under load
avg/mdev (us) 7.5/5.9 3.8/4.8
packet loss 95 % 62 %
Test 4
idle port latency
under load
min/avg/max/mdev (us) 0.8/1.5/210.5/0.9 1.0/2.1/344.5/1.2
packet loss 94 % 4 %
CPU and RAM usage seems not to be affected, the resource usage of vswitchd
idle with 2000+ ports is unchanged:
# ps u $(pidof ovs-vswitchd)
USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND
openvsw+ 5430 54.3 0.3 4263964 510968 pts/1 RLl+ 16:20 0:50 ovs-vswitchd
Additionally, to check if vswitchd is thread safe with this patch, the
following test was run for circa 48 hours: on a 56 core machine, a
bridge with kernel datapath is filled with 2200 dummy interfaces and 22
veth, then 22 traffic generators are run in parallel piping traffic into
the veths peers outside the bridge.
To generate as many upcalls as possible, all packets were forced to the
slowpath with an openflow rule like 'action=controller,local' and packet
size was set to 64 byte. Also, to avoid overflowing the FDB early and
slowing down the upcall processing, generated mac addresses were restricted
to a small interval. vswitchd ran without problems for 48+ hours,
obviously with all the handler threads with almost 99% CPU usage.
[1] https://github.com/teknoraver/network-tools/blob/master/weed.c
Signed-off-by: Matteo Croce <mcroce@redhat.com>
Signed-off-by: Ben Pfaff <blp@ovn.org>
Acked-by: Flavio Leitner <fbl@sysclose.org>
2018-09-25 10:51:05 +02:00
|
|
|
|
#ifndef _WIN32
|
|
|
|
|
nl_sock_destroy(dpif->channels[port_idx].sock);
|
|
|
|
|
#endif
|
|
|
|
|
dpif->channels[port_idx].sock = NULL;
|
2014-02-26 10:10:29 -08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
2014-09-18 04:17:54 -07:00
|
|
|
|
destroy_all_channels(struct dpif_netlink *dpif)
|
|
|
|
|
OVS_REQ_WRLOCK(dpif->upcall_lock)
|
2014-02-26 10:10:29 -08:00
|
|
|
|
{
|
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
|
|
if (!dpif->handlers) {
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < dpif->uc_array_size; i++ ) {
|
2014-09-18 04:17:54 -07:00
|
|
|
|
struct dpif_netlink_vport vport_request;
|
2014-02-26 10:10:29 -08:00
|
|
|
|
uint32_t upcall_pids = 0;
|
|
|
|
|
|
dpif-netlink: don't allocate per thread netlink sockets
When using the kernel datapath, OVS allocates a pool of sockets to handle
netlink events. The number of sockets is: ports * n-handler-threads, where
n-handler-threads is user configurable and defaults to 3/4*number of cores.
This because vswitchd starts n-handler-threads threads, each one with a
netlink socket for every port of the switch. Every thread then, starts
listening on events on its set of sockets with epoll().
On setup with lot of CPUs and ports, the number of sockets easily hits
the process file descriptor limit, and ovs-vswitchd will exit with -EMFILE.
Change the number of allocated sockets to just one per port by moving
the socket array from a per handler structure to a per datapath one,
and let all the handlers share the same sockets by using EPOLLEXCLUSIVE
epoll flag which avoids duplicate events, on systems that support it.
The patch was tested on a 56 core machine running Linux 4.18 and latest
Open vSwitch. A bridge was created with 2000+ ports, some of them being
veth interfaces with the peer outside the bridge. The latency of the upcall
is measured by setting a single 'action=controller,local' OpenFlow rule to
force all the packets going to the slow path and then to the local port.
A tool[1] injects some packets to the veth outside the bridge, and measures
the delay until the packet is captured on the local port. The rx timestamp
is get from the socket ancillary data in the attribute SO_TIMESTAMPNS, to
avoid having the scheduler delay in the measured time.
The first test measures the average latency for an upcall generated from
a single port. To measure it 100k packets, one every msec, are sent to a
single port and the latencies are measured.
The second test is meant to check latency fairness among ports, namely if
latency is equal between ports or if some ports have lower priority.
The previous test is repeated for every port, the average of the average
latencies and the standard deviation between averages is measured.
The third test serves to measure responsiveness under load. Heavy traffic
is sent through all ports, latency and packet loss is measured
on a single idle port.
The fourth test is all about fairness. Heavy traffic is injected in all
ports but one, latency and packet loss is measured on the single idle port.
This is the test setup:
# nproc
56
# ovs-vsctl show |grep -c Port
2223
# ovs-ofctl dump-flows ovs_upc_br
cookie=0x0, duration=4.827s, table=0, n_packets=0, n_bytes=0, actions=CONTROLLER:65535,LOCAL
# uname -a
Linux fc28 4.18.7-200.fc28.x86_64 #1 SMP Mon Sep 10 15:44:45 UTC 2018 x86_64 x86_64 x86_64 GNU/Linux
And these are the results of the tests:
Stock OVS Patched
netlink sockets
in use by vswitchd
lsof -p $(pidof ovs-vswitchd) \
|grep -c GENERIC 91187 2227
Test 1
one port latency
min/avg/max/mdev (us) 2.7/6.6/238.7/1.8 1.6/6.8/160.6/1.7
Test 2
all port
avg latency/mdev (us) 6.51/0.97 6.86/0.17
Test 3
single port latency
under load
avg/mdev (us) 7.5/5.9 3.8/4.8
packet loss 95 % 62 %
Test 4
idle port latency
under load
min/avg/max/mdev (us) 0.8/1.5/210.5/0.9 1.0/2.1/344.5/1.2
packet loss 94 % 4 %
CPU and RAM usage seems not to be affected, the resource usage of vswitchd
idle with 2000+ ports is unchanged:
# ps u $(pidof ovs-vswitchd)
USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND
openvsw+ 5430 54.3 0.3 4263964 510968 pts/1 RLl+ 16:20 0:50 ovs-vswitchd
Additionally, to check if vswitchd is thread safe with this patch, the
following test was run for circa 48 hours: on a 56 core machine, a
bridge with kernel datapath is filled with 2200 dummy interfaces and 22
veth, then 22 traffic generators are run in parallel piping traffic into
the veths peers outside the bridge.
To generate as many upcalls as possible, all packets were forced to the
slowpath with an openflow rule like 'action=controller,local' and packet
size was set to 64 byte. Also, to avoid overflowing the FDB early and
slowing down the upcall processing, generated mac addresses were restricted
to a small interval. vswitchd ran without problems for 48+ hours,
obviously with all the handler threads with almost 99% CPU usage.
[1] https://github.com/teknoraver/network-tools/blob/master/weed.c
Signed-off-by: Matteo Croce <mcroce@redhat.com>
Signed-off-by: Ben Pfaff <blp@ovn.org>
Acked-by: Flavio Leitner <fbl@sysclose.org>
2018-09-25 10:51:05 +02:00
|
|
|
|
if (!dpif->channels[i].sock) {
|
2014-02-26 10:10:29 -08:00
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Turn off upcalls. */
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_vport_init(&vport_request);
|
2014-02-26 10:10:29 -08:00
|
|
|
|
vport_request.cmd = OVS_VPORT_CMD_SET;
|
|
|
|
|
vport_request.dp_ifindex = dpif->dp_ifindex;
|
|
|
|
|
vport_request.port_no = u32_to_odp(i);
|
2015-01-22 17:01:28 +08:00
|
|
|
|
vport_request.n_upcall_pids = 1;
|
2014-02-26 10:10:29 -08:00
|
|
|
|
vport_request.upcall_pids = &upcall_pids;
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_vport_transact(&vport_request, NULL, NULL);
|
2014-02-26 10:10:29 -08:00
|
|
|
|
|
|
|
|
|
vport_del_channels(dpif, u32_to_odp(i));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < dpif->n_handlers; i++) {
|
|
|
|
|
struct dpif_handler *handler = &dpif->handlers[i];
|
|
|
|
|
|
2014-10-23 08:27:34 -07:00
|
|
|
|
dpif_netlink_handler_uninit(handler);
|
2014-02-26 10:10:29 -08:00
|
|
|
|
free(handler->epoll_events);
|
|
|
|
|
}
|
dpif-netlink: don't allocate per thread netlink sockets
When using the kernel datapath, OVS allocates a pool of sockets to handle
netlink events. The number of sockets is: ports * n-handler-threads, where
n-handler-threads is user configurable and defaults to 3/4*number of cores.
This because vswitchd starts n-handler-threads threads, each one with a
netlink socket for every port of the switch. Every thread then, starts
listening on events on its set of sockets with epoll().
On setup with lot of CPUs and ports, the number of sockets easily hits
the process file descriptor limit, and ovs-vswitchd will exit with -EMFILE.
Change the number of allocated sockets to just one per port by moving
the socket array from a per handler structure to a per datapath one,
and let all the handlers share the same sockets by using EPOLLEXCLUSIVE
epoll flag which avoids duplicate events, on systems that support it.
The patch was tested on a 56 core machine running Linux 4.18 and latest
Open vSwitch. A bridge was created with 2000+ ports, some of them being
veth interfaces with the peer outside the bridge. The latency of the upcall
is measured by setting a single 'action=controller,local' OpenFlow rule to
force all the packets going to the slow path and then to the local port.
A tool[1] injects some packets to the veth outside the bridge, and measures
the delay until the packet is captured on the local port. The rx timestamp
is get from the socket ancillary data in the attribute SO_TIMESTAMPNS, to
avoid having the scheduler delay in the measured time.
The first test measures the average latency for an upcall generated from
a single port. To measure it 100k packets, one every msec, are sent to a
single port and the latencies are measured.
The second test is meant to check latency fairness among ports, namely if
latency is equal between ports or if some ports have lower priority.
The previous test is repeated for every port, the average of the average
latencies and the standard deviation between averages is measured.
The third test serves to measure responsiveness under load. Heavy traffic
is sent through all ports, latency and packet loss is measured
on a single idle port.
The fourth test is all about fairness. Heavy traffic is injected in all
ports but one, latency and packet loss is measured on the single idle port.
This is the test setup:
# nproc
56
# ovs-vsctl show |grep -c Port
2223
# ovs-ofctl dump-flows ovs_upc_br
cookie=0x0, duration=4.827s, table=0, n_packets=0, n_bytes=0, actions=CONTROLLER:65535,LOCAL
# uname -a
Linux fc28 4.18.7-200.fc28.x86_64 #1 SMP Mon Sep 10 15:44:45 UTC 2018 x86_64 x86_64 x86_64 GNU/Linux
And these are the results of the tests:
Stock OVS Patched
netlink sockets
in use by vswitchd
lsof -p $(pidof ovs-vswitchd) \
|grep -c GENERIC 91187 2227
Test 1
one port latency
min/avg/max/mdev (us) 2.7/6.6/238.7/1.8 1.6/6.8/160.6/1.7
Test 2
all port
avg latency/mdev (us) 6.51/0.97 6.86/0.17
Test 3
single port latency
under load
avg/mdev (us) 7.5/5.9 3.8/4.8
packet loss 95 % 62 %
Test 4
idle port latency
under load
min/avg/max/mdev (us) 0.8/1.5/210.5/0.9 1.0/2.1/344.5/1.2
packet loss 94 % 4 %
CPU and RAM usage seems not to be affected, the resource usage of vswitchd
idle with 2000+ ports is unchanged:
# ps u $(pidof ovs-vswitchd)
USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND
openvsw+ 5430 54.3 0.3 4263964 510968 pts/1 RLl+ 16:20 0:50 ovs-vswitchd
Additionally, to check if vswitchd is thread safe with this patch, the
following test was run for circa 48 hours: on a 56 core machine, a
bridge with kernel datapath is filled with 2200 dummy interfaces and 22
veth, then 22 traffic generators are run in parallel piping traffic into
the veths peers outside the bridge.
To generate as many upcalls as possible, all packets were forced to the
slowpath with an openflow rule like 'action=controller,local' and packet
size was set to 64 byte. Also, to avoid overflowing the FDB early and
slowing down the upcall processing, generated mac addresses were restricted
to a small interval. vswitchd ran without problems for 48+ hours,
obviously with all the handler threads with almost 99% CPU usage.
[1] https://github.com/teknoraver/network-tools/blob/master/weed.c
Signed-off-by: Matteo Croce <mcroce@redhat.com>
Signed-off-by: Ben Pfaff <blp@ovn.org>
Acked-by: Flavio Leitner <fbl@sysclose.org>
2018-09-25 10:51:05 +02:00
|
|
|
|
free(dpif->channels);
|
2014-02-26 10:10:29 -08:00
|
|
|
|
free(dpif->handlers);
|
|
|
|
|
dpif->handlers = NULL;
|
dpif-netlink: don't allocate per thread netlink sockets
When using the kernel datapath, OVS allocates a pool of sockets to handle
netlink events. The number of sockets is: ports * n-handler-threads, where
n-handler-threads is user configurable and defaults to 3/4*number of cores.
This because vswitchd starts n-handler-threads threads, each one with a
netlink socket for every port of the switch. Every thread then, starts
listening on events on its set of sockets with epoll().
On setup with lot of CPUs and ports, the number of sockets easily hits
the process file descriptor limit, and ovs-vswitchd will exit with -EMFILE.
Change the number of allocated sockets to just one per port by moving
the socket array from a per handler structure to a per datapath one,
and let all the handlers share the same sockets by using EPOLLEXCLUSIVE
epoll flag which avoids duplicate events, on systems that support it.
The patch was tested on a 56 core machine running Linux 4.18 and latest
Open vSwitch. A bridge was created with 2000+ ports, some of them being
veth interfaces with the peer outside the bridge. The latency of the upcall
is measured by setting a single 'action=controller,local' OpenFlow rule to
force all the packets going to the slow path and then to the local port.
A tool[1] injects some packets to the veth outside the bridge, and measures
the delay until the packet is captured on the local port. The rx timestamp
is get from the socket ancillary data in the attribute SO_TIMESTAMPNS, to
avoid having the scheduler delay in the measured time.
The first test measures the average latency for an upcall generated from
a single port. To measure it 100k packets, one every msec, are sent to a
single port and the latencies are measured.
The second test is meant to check latency fairness among ports, namely if
latency is equal between ports or if some ports have lower priority.
The previous test is repeated for every port, the average of the average
latencies and the standard deviation between averages is measured.
The third test serves to measure responsiveness under load. Heavy traffic
is sent through all ports, latency and packet loss is measured
on a single idle port.
The fourth test is all about fairness. Heavy traffic is injected in all
ports but one, latency and packet loss is measured on the single idle port.
This is the test setup:
# nproc
56
# ovs-vsctl show |grep -c Port
2223
# ovs-ofctl dump-flows ovs_upc_br
cookie=0x0, duration=4.827s, table=0, n_packets=0, n_bytes=0, actions=CONTROLLER:65535,LOCAL
# uname -a
Linux fc28 4.18.7-200.fc28.x86_64 #1 SMP Mon Sep 10 15:44:45 UTC 2018 x86_64 x86_64 x86_64 GNU/Linux
And these are the results of the tests:
Stock OVS Patched
netlink sockets
in use by vswitchd
lsof -p $(pidof ovs-vswitchd) \
|grep -c GENERIC 91187 2227
Test 1
one port latency
min/avg/max/mdev (us) 2.7/6.6/238.7/1.8 1.6/6.8/160.6/1.7
Test 2
all port
avg latency/mdev (us) 6.51/0.97 6.86/0.17
Test 3
single port latency
under load
avg/mdev (us) 7.5/5.9 3.8/4.8
packet loss 95 % 62 %
Test 4
idle port latency
under load
min/avg/max/mdev (us) 0.8/1.5/210.5/0.9 1.0/2.1/344.5/1.2
packet loss 94 % 4 %
CPU and RAM usage seems not to be affected, the resource usage of vswitchd
idle with 2000+ ports is unchanged:
# ps u $(pidof ovs-vswitchd)
USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND
openvsw+ 5430 54.3 0.3 4263964 510968 pts/1 RLl+ 16:20 0:50 ovs-vswitchd
Additionally, to check if vswitchd is thread safe with this patch, the
following test was run for circa 48 hours: on a 56 core machine, a
bridge with kernel datapath is filled with 2200 dummy interfaces and 22
veth, then 22 traffic generators are run in parallel piping traffic into
the veths peers outside the bridge.
To generate as many upcalls as possible, all packets were forced to the
slowpath with an openflow rule like 'action=controller,local' and packet
size was set to 64 byte. Also, to avoid overflowing the FDB early and
slowing down the upcall processing, generated mac addresses were restricted
to a small interval. vswitchd ran without problems for 48+ hours,
obviously with all the handler threads with almost 99% CPU usage.
[1] https://github.com/teknoraver/network-tools/blob/master/weed.c
Signed-off-by: Matteo Croce <mcroce@redhat.com>
Signed-off-by: Ben Pfaff <blp@ovn.org>
Acked-by: Flavio Leitner <fbl@sysclose.org>
2018-09-25 10:51:05 +02:00
|
|
|
|
dpif->channels = NULL;
|
2014-02-26 10:10:29 -08:00
|
|
|
|
dpif->n_handlers = 0;
|
|
|
|
|
dpif->uc_array_size = 0;
|
2011-09-16 15:23:37 -07:00
|
|
|
|
}
|
|
|
|
|
|
2009-06-17 14:35:35 -07:00
|
|
|
|
static void
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_close(struct dpif *dpif_)
|
2009-06-17 14:35:35 -07:00
|
|
|
|
{
|
2014-09-18 04:17:54 -07:00
|
|
|
|
struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
|
2011-08-24 16:21:10 -07:00
|
|
|
|
|
2013-07-22 15:00:49 -07:00
|
|
|
|
nl_sock_destroy(dpif->port_notifier);
|
2014-02-26 10:10:29 -08:00
|
|
|
|
|
|
|
|
|
fat_rwlock_wrlock(&dpif->upcall_lock);
|
|
|
|
|
destroy_all_channels(dpif);
|
|
|
|
|
fat_rwlock_unlock(&dpif->upcall_lock);
|
|
|
|
|
|
|
|
|
|
fat_rwlock_destroy(&dpif->upcall_lock);
|
2009-06-17 14:35:35 -07:00
|
|
|
|
free(dpif);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_destroy(struct dpif *dpif_)
|
2009-06-17 14:35:35 -07:00
|
|
|
|
{
|
2014-09-18 04:17:54 -07:00
|
|
|
|
struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
|
|
|
|
|
struct dpif_netlink_dp dp;
|
2011-01-26 15:42:00 -08:00
|
|
|
|
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_dp_init(&dp);
|
2011-08-18 10:35:40 -07:00
|
|
|
|
dp.cmd = OVS_DP_CMD_DEL;
|
2011-01-21 17:01:56 -08:00
|
|
|
|
dp.dp_ifindex = dpif->dp_ifindex;
|
2014-09-18 04:17:54 -07:00
|
|
|
|
return dpif_netlink_dp_transact(&dp, NULL, NULL);
|
2009-06-17 14:35:35 -07:00
|
|
|
|
}
|
|
|
|
|
|
2014-11-11 11:53:47 -08:00
|
|
|
|
static bool
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_run(struct dpif *dpif_)
|
2013-05-01 17:13:14 -07:00
|
|
|
|
{
|
2014-09-18 04:17:54 -07:00
|
|
|
|
struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
|
2014-02-26 10:10:29 -08:00
|
|
|
|
|
2013-05-01 17:13:14 -07:00
|
|
|
|
if (dpif->refresh_channels) {
|
|
|
|
|
dpif->refresh_channels = false;
|
2014-02-26 10:10:29 -08:00
|
|
|
|
fat_rwlock_wrlock(&dpif->upcall_lock);
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_refresh_channels(dpif, dpif->n_handlers);
|
2014-02-26 10:10:29 -08:00
|
|
|
|
fat_rwlock_unlock(&dpif->upcall_lock);
|
2013-05-01 17:13:14 -07:00
|
|
|
|
}
|
2014-11-11 11:53:47 -08:00
|
|
|
|
return false;
|
2013-05-01 17:13:14 -07:00
|
|
|
|
}
|
|
|
|
|
|
2009-06-17 14:35:35 -07:00
|
|
|
|
static int
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_get_stats(const struct dpif *dpif_, struct dpif_dp_stats *stats)
|
2009-06-17 14:35:35 -07:00
|
|
|
|
{
|
2014-09-18 04:17:54 -07:00
|
|
|
|
struct dpif_netlink_dp dp;
|
2011-01-26 15:42:00 -08:00
|
|
|
|
struct ofpbuf *buf;
|
|
|
|
|
int error;
|
|
|
|
|
|
2014-09-18 04:17:54 -07:00
|
|
|
|
error = dpif_netlink_dp_get(dpif_, &dp, &buf);
|
2011-01-26 15:42:00 -08:00
|
|
|
|
if (!error) {
|
2014-06-13 15:28:29 -07:00
|
|
|
|
memset(stats, 0, sizeof *stats);
|
|
|
|
|
|
|
|
|
|
if (dp.stats) {
|
|
|
|
|
stats->n_hit = get_32aligned_u64(&dp.stats->n_hit);
|
|
|
|
|
stats->n_missed = get_32aligned_u64(&dp.stats->n_missed);
|
|
|
|
|
stats->n_lost = get_32aligned_u64(&dp.stats->n_lost);
|
|
|
|
|
stats->n_flows = get_32aligned_u64(&dp.stats->n_flows);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (dp.megaflow_stats) {
|
|
|
|
|
stats->n_masks = dp.megaflow_stats->n_masks;
|
|
|
|
|
stats->n_mask_hit = get_32aligned_u64(
|
|
|
|
|
&dp.megaflow_stats->n_mask_hit);
|
|
|
|
|
} else {
|
|
|
|
|
stats->n_masks = UINT32_MAX;
|
|
|
|
|
stats->n_mask_hit = UINT64_MAX;
|
|
|
|
|
}
|
2011-01-26 15:42:00 -08:00
|
|
|
|
ofpbuf_delete(buf);
|
|
|
|
|
}
|
|
|
|
|
return error;
|
2009-06-17 14:35:35 -07:00
|
|
|
|
}
|
|
|
|
|
|
2019-12-22 12:16:38 +02:00
|
|
|
|
static int
|
|
|
|
|
dpif_netlink_set_features(struct dpif *dpif_, uint32_t new_features)
|
|
|
|
|
{
|
|
|
|
|
struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
|
|
|
|
|
struct dpif_netlink_dp request, reply;
|
|
|
|
|
struct ofpbuf *bufp;
|
|
|
|
|
int error;
|
|
|
|
|
|
|
|
|
|
dpif_netlink_dp_init(&request);
|
|
|
|
|
request.cmd = OVS_DP_CMD_SET;
|
|
|
|
|
request.dp_ifindex = dpif->dp_ifindex;
|
|
|
|
|
request.user_features = dpif->user_features | new_features;
|
|
|
|
|
|
|
|
|
|
error = dpif_netlink_dp_transact(&request, &reply, &bufp);
|
|
|
|
|
if (!error) {
|
|
|
|
|
dpif->user_features = reply.user_features;
|
|
|
|
|
ofpbuf_delete(bufp);
|
|
|
|
|
if (!(dpif->user_features & new_features)) {
|
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return error;
|
|
|
|
|
}
|
|
|
|
|
|
2012-12-14 19:14:54 -08:00
|
|
|
|
static const char *
|
2014-09-18 04:17:54 -07:00
|
|
|
|
get_vport_type(const struct dpif_netlink_vport *vport)
|
2012-12-14 19:14:54 -08:00
|
|
|
|
{
|
|
|
|
|
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
|
|
|
|
|
|
|
|
|
|
switch (vport->type) {
|
2014-02-26 13:22:35 -08:00
|
|
|
|
case OVS_VPORT_TYPE_NETDEV: {
|
|
|
|
|
const char *type = netdev_get_type_from_name(vport->name);
|
|
|
|
|
|
|
|
|
|
return type ? type : "system";
|
|
|
|
|
}
|
2012-12-14 19:14:54 -08:00
|
|
|
|
|
|
|
|
|
case OVS_VPORT_TYPE_INTERNAL:
|
|
|
|
|
return "internal";
|
|
|
|
|
|
2014-06-05 19:07:32 -07:00
|
|
|
|
case OVS_VPORT_TYPE_GENEVE:
|
|
|
|
|
return "geneve";
|
|
|
|
|
|
2012-12-14 19:14:54 -08:00
|
|
|
|
case OVS_VPORT_TYPE_GRE:
|
|
|
|
|
return "gre";
|
|
|
|
|
|
|
|
|
|
case OVS_VPORT_TYPE_VXLAN:
|
|
|
|
|
return "vxlan";
|
|
|
|
|
|
2013-02-21 21:52:04 -08:00
|
|
|
|
case OVS_VPORT_TYPE_LISP:
|
|
|
|
|
return "lisp";
|
|
|
|
|
|
2015-04-09 20:12:32 -07:00
|
|
|
|
case OVS_VPORT_TYPE_STT:
|
|
|
|
|
return "stt";
|
|
|
|
|
|
2018-03-05 10:11:57 -08:00
|
|
|
|
case OVS_VPORT_TYPE_ERSPAN:
|
2018-03-21 14:02:25 -07:00
|
|
|
|
return "erspan";
|
|
|
|
|
|
2018-03-05 10:11:57 -08:00
|
|
|
|
case OVS_VPORT_TYPE_IP6ERSPAN:
|
2018-05-04 10:14:44 -07:00
|
|
|
|
return "ip6erspan";
|
|
|
|
|
|
2018-03-05 10:11:57 -08:00
|
|
|
|
case OVS_VPORT_TYPE_IP6GRE:
|
2018-05-04 10:14:44 -07:00
|
|
|
|
return "ip6gre";
|
2018-03-05 10:11:57 -08:00
|
|
|
|
|
2012-12-14 19:14:54 -08:00
|
|
|
|
case OVS_VPORT_TYPE_UNSPEC:
|
|
|
|
|
case __OVS_VPORT_TYPE_MAX:
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
VLOG_WARN_RL(&rl, "dp%d: port `%s' has unsupported type %u",
|
|
|
|
|
vport->dp_ifindex, vport->name, (unsigned int) vport->type);
|
|
|
|
|
return "unknown";
|
|
|
|
|
}
|
|
|
|
|
|
2017-05-18 16:10:29 -04:00
|
|
|
|
enum ovs_vport_type
|
2017-05-18 16:10:28 -04:00
|
|
|
|
netdev_to_ovs_vport_type(const char *type)
|
2013-01-25 13:30:40 -08:00
|
|
|
|
{
|
|
|
|
|
if (!strcmp(type, "tap") || !strcmp(type, "system")) {
|
|
|
|
|
return OVS_VPORT_TYPE_NETDEV;
|
|
|
|
|
} else if (!strcmp(type, "internal")) {
|
|
|
|
|
return OVS_VPORT_TYPE_INTERNAL;
|
2015-04-09 20:12:32 -07:00
|
|
|
|
} else if (strstr(type, "stt")) {
|
|
|
|
|
return OVS_VPORT_TYPE_STT;
|
2014-06-05 19:07:32 -07:00
|
|
|
|
} else if (!strcmp(type, "geneve")) {
|
|
|
|
|
return OVS_VPORT_TYPE_GENEVE;
|
2013-01-25 13:30:40 -08:00
|
|
|
|
} else if (!strcmp(type, "vxlan")) {
|
|
|
|
|
return OVS_VPORT_TYPE_VXLAN;
|
2013-02-21 21:52:04 -08:00
|
|
|
|
} else if (!strcmp(type, "lisp")) {
|
|
|
|
|
return OVS_VPORT_TYPE_LISP;
|
2018-05-15 16:10:48 -04:00
|
|
|
|
} else if (!strcmp(type, "erspan")) {
|
|
|
|
|
return OVS_VPORT_TYPE_ERSPAN;
|
|
|
|
|
} else if (!strcmp(type, "ip6erspan")) {
|
|
|
|
|
return OVS_VPORT_TYPE_IP6ERSPAN;
|
2018-05-04 10:14:44 -07:00
|
|
|
|
} else if (!strcmp(type, "ip6gre")) {
|
|
|
|
|
return OVS_VPORT_TYPE_IP6GRE;
|
2018-05-04 16:48:43 -07:00
|
|
|
|
} else if (!strcmp(type, "gre")) {
|
|
|
|
|
return OVS_VPORT_TYPE_GRE;
|
2013-01-25 13:30:40 -08:00
|
|
|
|
} else {
|
|
|
|
|
return OVS_VPORT_TYPE_UNSPEC;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2009-06-17 14:35:35 -07:00
|
|
|
|
static int
|
2017-05-18 16:10:28 -04:00
|
|
|
|
dpif_netlink_port_add__(struct dpif_netlink *dpif, const char *name,
|
|
|
|
|
enum ovs_vport_type type,
|
|
|
|
|
struct ofpbuf *options,
|
2014-09-18 04:17:54 -07:00
|
|
|
|
odp_port_t *port_nop)
|
2014-04-17 17:16:34 -07:00
|
|
|
|
OVS_REQ_WRLOCK(dpif->upcall_lock)
|
2009-06-17 14:35:35 -07:00
|
|
|
|
{
|
2014-09-18 04:17:54 -07:00
|
|
|
|
struct dpif_netlink_vport request, reply;
|
2011-01-26 12:28:59 -08:00
|
|
|
|
struct ofpbuf *buf;
|
2019-10-14 11:10:47 -07:00
|
|
|
|
struct nl_sock *sock = NULL;
|
2018-10-06 18:19:55 +02:00
|
|
|
|
uint32_t upcall_pids = 0;
|
2014-02-26 10:10:29 -08:00
|
|
|
|
int error = 0;
|
2009-06-17 14:35:35 -07:00
|
|
|
|
|
2014-02-26 10:10:29 -08:00
|
|
|
|
if (dpif->handlers) {
|
2019-10-14 11:10:47 -07:00
|
|
|
|
error = create_nl_sock(dpif, &sock);
|
2018-11-15 09:08:18 -08:00
|
|
|
|
if (error) {
|
2013-01-04 18:34:26 -08:00
|
|
|
|
return error;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_vport_init(&request);
|
2011-08-18 10:35:40 -07:00
|
|
|
|
request.cmd = OVS_VPORT_CMD_NEW;
|
2011-01-21 17:01:56 -08:00
|
|
|
|
request.dp_ifindex = dpif->dp_ifindex;
|
2017-05-18 16:10:28 -04:00
|
|
|
|
request.type = type;
|
|
|
|
|
request.name = name;
|
|
|
|
|
|
|
|
|
|
request.port_no = *port_nop;
|
2019-10-14 11:10:47 -07:00
|
|
|
|
if (sock) {
|
|
|
|
|
upcall_pids = nl_sock_pid(sock);
|
2018-10-06 18:19:55 +02:00
|
|
|
|
}
|
dpif-netlink: don't allocate per thread netlink sockets
When using the kernel datapath, OVS allocates a pool of sockets to handle
netlink events. The number of sockets is: ports * n-handler-threads, where
n-handler-threads is user configurable and defaults to 3/4*number of cores.
This because vswitchd starts n-handler-threads threads, each one with a
netlink socket for every port of the switch. Every thread then, starts
listening on events on its set of sockets with epoll().
On setup with lot of CPUs and ports, the number of sockets easily hits
the process file descriptor limit, and ovs-vswitchd will exit with -EMFILE.
Change the number of allocated sockets to just one per port by moving
the socket array from a per handler structure to a per datapath one,
and let all the handlers share the same sockets by using EPOLLEXCLUSIVE
epoll flag which avoids duplicate events, on systems that support it.
The patch was tested on a 56 core machine running Linux 4.18 and latest
Open vSwitch. A bridge was created with 2000+ ports, some of them being
veth interfaces with the peer outside the bridge. The latency of the upcall
is measured by setting a single 'action=controller,local' OpenFlow rule to
force all the packets going to the slow path and then to the local port.
A tool[1] injects some packets to the veth outside the bridge, and measures
the delay until the packet is captured on the local port. The rx timestamp
is get from the socket ancillary data in the attribute SO_TIMESTAMPNS, to
avoid having the scheduler delay in the measured time.
The first test measures the average latency for an upcall generated from
a single port. To measure it 100k packets, one every msec, are sent to a
single port and the latencies are measured.
The second test is meant to check latency fairness among ports, namely if
latency is equal between ports or if some ports have lower priority.
The previous test is repeated for every port, the average of the average
latencies and the standard deviation between averages is measured.
The third test serves to measure responsiveness under load. Heavy traffic
is sent through all ports, latency and packet loss is measured
on a single idle port.
The fourth test is all about fairness. Heavy traffic is injected in all
ports but one, latency and packet loss is measured on the single idle port.
This is the test setup:
# nproc
56
# ovs-vsctl show |grep -c Port
2223
# ovs-ofctl dump-flows ovs_upc_br
cookie=0x0, duration=4.827s, table=0, n_packets=0, n_bytes=0, actions=CONTROLLER:65535,LOCAL
# uname -a
Linux fc28 4.18.7-200.fc28.x86_64 #1 SMP Mon Sep 10 15:44:45 UTC 2018 x86_64 x86_64 x86_64 GNU/Linux
And these are the results of the tests:
Stock OVS Patched
netlink sockets
in use by vswitchd
lsof -p $(pidof ovs-vswitchd) \
|grep -c GENERIC 91187 2227
Test 1
one port latency
min/avg/max/mdev (us) 2.7/6.6/238.7/1.8 1.6/6.8/160.6/1.7
Test 2
all port
avg latency/mdev (us) 6.51/0.97 6.86/0.17
Test 3
single port latency
under load
avg/mdev (us) 7.5/5.9 3.8/4.8
packet loss 95 % 62 %
Test 4
idle port latency
under load
min/avg/max/mdev (us) 0.8/1.5/210.5/0.9 1.0/2.1/344.5/1.2
packet loss 94 % 4 %
CPU and RAM usage seems not to be affected, the resource usage of vswitchd
idle with 2000+ ports is unchanged:
# ps u $(pidof ovs-vswitchd)
USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND
openvsw+ 5430 54.3 0.3 4263964 510968 pts/1 RLl+ 16:20 0:50 ovs-vswitchd
Additionally, to check if vswitchd is thread safe with this patch, the
following test was run for circa 48 hours: on a 56 core machine, a
bridge with kernel datapath is filled with 2200 dummy interfaces and 22
veth, then 22 traffic generators are run in parallel piping traffic into
the veths peers outside the bridge.
To generate as many upcalls as possible, all packets were forced to the
slowpath with an openflow rule like 'action=controller,local' and packet
size was set to 64 byte. Also, to avoid overflowing the FDB early and
slowing down the upcall processing, generated mac addresses were restricted
to a small interval. vswitchd ran without problems for 48+ hours,
obviously with all the handler threads with almost 99% CPU usage.
[1] https://github.com/teknoraver/network-tools/blob/master/weed.c
Signed-off-by: Matteo Croce <mcroce@redhat.com>
Signed-off-by: Ben Pfaff <blp@ovn.org>
Acked-by: Flavio Leitner <fbl@sysclose.org>
2018-09-25 10:51:05 +02:00
|
|
|
|
request.n_upcall_pids = 1;
|
|
|
|
|
request.upcall_pids = &upcall_pids;
|
2017-05-18 16:10:28 -04:00
|
|
|
|
|
|
|
|
|
if (options) {
|
|
|
|
|
request.options = options->data;
|
|
|
|
|
request.options_len = options->size;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
error = dpif_netlink_vport_transact(&request, &reply, &buf);
|
|
|
|
|
if (!error) {
|
|
|
|
|
*port_nop = reply.port_no;
|
|
|
|
|
} else {
|
|
|
|
|
if (error == EBUSY && *port_nop != ODPP_NONE) {
|
|
|
|
|
VLOG_INFO("%s: requested port %"PRIu32" is in use",
|
|
|
|
|
dpif_name(&dpif->dpif), *port_nop);
|
|
|
|
|
}
|
|
|
|
|
|
2019-10-14 11:10:47 -07:00
|
|
|
|
close_nl_sock(sock);
|
2017-05-18 16:10:28 -04:00
|
|
|
|
goto exit;
|
|
|
|
|
}
|
|
|
|
|
|
2019-10-14 11:10:47 -07:00
|
|
|
|
error = vport_add_channel(dpif, *port_nop, sock);
|
dpif-netlink: don't allocate per thread netlink sockets
When using the kernel datapath, OVS allocates a pool of sockets to handle
netlink events. The number of sockets is: ports * n-handler-threads, where
n-handler-threads is user configurable and defaults to 3/4*number of cores.
This because vswitchd starts n-handler-threads threads, each one with a
netlink socket for every port of the switch. Every thread then, starts
listening on events on its set of sockets with epoll().
On setup with lot of CPUs and ports, the number of sockets easily hits
the process file descriptor limit, and ovs-vswitchd will exit with -EMFILE.
Change the number of allocated sockets to just one per port by moving
the socket array from a per handler structure to a per datapath one,
and let all the handlers share the same sockets by using EPOLLEXCLUSIVE
epoll flag which avoids duplicate events, on systems that support it.
The patch was tested on a 56 core machine running Linux 4.18 and latest
Open vSwitch. A bridge was created with 2000+ ports, some of them being
veth interfaces with the peer outside the bridge. The latency of the upcall
is measured by setting a single 'action=controller,local' OpenFlow rule to
force all the packets going to the slow path and then to the local port.
A tool[1] injects some packets to the veth outside the bridge, and measures
the delay until the packet is captured on the local port. The rx timestamp
is get from the socket ancillary data in the attribute SO_TIMESTAMPNS, to
avoid having the scheduler delay in the measured time.
The first test measures the average latency for an upcall generated from
a single port. To measure it 100k packets, one every msec, are sent to a
single port and the latencies are measured.
The second test is meant to check latency fairness among ports, namely if
latency is equal between ports or if some ports have lower priority.
The previous test is repeated for every port, the average of the average
latencies and the standard deviation between averages is measured.
The third test serves to measure responsiveness under load. Heavy traffic
is sent through all ports, latency and packet loss is measured
on a single idle port.
The fourth test is all about fairness. Heavy traffic is injected in all
ports but one, latency and packet loss is measured on the single idle port.
This is the test setup:
# nproc
56
# ovs-vsctl show |grep -c Port
2223
# ovs-ofctl dump-flows ovs_upc_br
cookie=0x0, duration=4.827s, table=0, n_packets=0, n_bytes=0, actions=CONTROLLER:65535,LOCAL
# uname -a
Linux fc28 4.18.7-200.fc28.x86_64 #1 SMP Mon Sep 10 15:44:45 UTC 2018 x86_64 x86_64 x86_64 GNU/Linux
And these are the results of the tests:
Stock OVS Patched
netlink sockets
in use by vswitchd
lsof -p $(pidof ovs-vswitchd) \
|grep -c GENERIC 91187 2227
Test 1
one port latency
min/avg/max/mdev (us) 2.7/6.6/238.7/1.8 1.6/6.8/160.6/1.7
Test 2
all port
avg latency/mdev (us) 6.51/0.97 6.86/0.17
Test 3
single port latency
under load
avg/mdev (us) 7.5/5.9 3.8/4.8
packet loss 95 % 62 %
Test 4
idle port latency
under load
min/avg/max/mdev (us) 0.8/1.5/210.5/0.9 1.0/2.1/344.5/1.2
packet loss 94 % 4 %
CPU and RAM usage seems not to be affected, the resource usage of vswitchd
idle with 2000+ ports is unchanged:
# ps u $(pidof ovs-vswitchd)
USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND
openvsw+ 5430 54.3 0.3 4263964 510968 pts/1 RLl+ 16:20 0:50 ovs-vswitchd
Additionally, to check if vswitchd is thread safe with this patch, the
following test was run for circa 48 hours: on a 56 core machine, a
bridge with kernel datapath is filled with 2200 dummy interfaces and 22
veth, then 22 traffic generators are run in parallel piping traffic into
the veths peers outside the bridge.
To generate as many upcalls as possible, all packets were forced to the
slowpath with an openflow rule like 'action=controller,local' and packet
size was set to 64 byte. Also, to avoid overflowing the FDB early and
slowing down the upcall processing, generated mac addresses were restricted
to a small interval. vswitchd ran without problems for 48+ hours,
obviously with all the handler threads with almost 99% CPU usage.
[1] https://github.com/teknoraver/network-tools/blob/master/weed.c
Signed-off-by: Matteo Croce <mcroce@redhat.com>
Signed-off-by: Ben Pfaff <blp@ovn.org>
Acked-by: Flavio Leitner <fbl@sysclose.org>
2018-09-25 10:51:05 +02:00
|
|
|
|
if (error) {
|
|
|
|
|
VLOG_INFO("%s: could not add channel for port %s",
|
|
|
|
|
dpif_name(&dpif->dpif), name);
|
|
|
|
|
|
|
|
|
|
/* Delete the port. */
|
|
|
|
|
dpif_netlink_vport_init(&request);
|
|
|
|
|
request.cmd = OVS_VPORT_CMD_DEL;
|
|
|
|
|
request.dp_ifindex = dpif->dp_ifindex;
|
|
|
|
|
request.port_no = *port_nop;
|
|
|
|
|
dpif_netlink_vport_transact(&request, NULL, NULL);
|
2019-10-14 11:10:47 -07:00
|
|
|
|
close_nl_sock(sock);
|
dpif-netlink: don't allocate per thread netlink sockets
When using the kernel datapath, OVS allocates a pool of sockets to handle
netlink events. The number of sockets is: ports * n-handler-threads, where
n-handler-threads is user configurable and defaults to 3/4*number of cores.
This because vswitchd starts n-handler-threads threads, each one with a
netlink socket for every port of the switch. Every thread then, starts
listening on events on its set of sockets with epoll().
On setup with lot of CPUs and ports, the number of sockets easily hits
the process file descriptor limit, and ovs-vswitchd will exit with -EMFILE.
Change the number of allocated sockets to just one per port by moving
the socket array from a per handler structure to a per datapath one,
and let all the handlers share the same sockets by using EPOLLEXCLUSIVE
epoll flag which avoids duplicate events, on systems that support it.
The patch was tested on a 56 core machine running Linux 4.18 and latest
Open vSwitch. A bridge was created with 2000+ ports, some of them being
veth interfaces with the peer outside the bridge. The latency of the upcall
is measured by setting a single 'action=controller,local' OpenFlow rule to
force all the packets going to the slow path and then to the local port.
A tool[1] injects some packets to the veth outside the bridge, and measures
the delay until the packet is captured on the local port. The rx timestamp
is get from the socket ancillary data in the attribute SO_TIMESTAMPNS, to
avoid having the scheduler delay in the measured time.
The first test measures the average latency for an upcall generated from
a single port. To measure it 100k packets, one every msec, are sent to a
single port and the latencies are measured.
The second test is meant to check latency fairness among ports, namely if
latency is equal between ports or if some ports have lower priority.
The previous test is repeated for every port, the average of the average
latencies and the standard deviation between averages is measured.
The third test serves to measure responsiveness under load. Heavy traffic
is sent through all ports, latency and packet loss is measured
on a single idle port.
The fourth test is all about fairness. Heavy traffic is injected in all
ports but one, latency and packet loss is measured on the single idle port.
This is the test setup:
# nproc
56
# ovs-vsctl show |grep -c Port
2223
# ovs-ofctl dump-flows ovs_upc_br
cookie=0x0, duration=4.827s, table=0, n_packets=0, n_bytes=0, actions=CONTROLLER:65535,LOCAL
# uname -a
Linux fc28 4.18.7-200.fc28.x86_64 #1 SMP Mon Sep 10 15:44:45 UTC 2018 x86_64 x86_64 x86_64 GNU/Linux
And these are the results of the tests:
Stock OVS Patched
netlink sockets
in use by vswitchd
lsof -p $(pidof ovs-vswitchd) \
|grep -c GENERIC 91187 2227
Test 1
one port latency
min/avg/max/mdev (us) 2.7/6.6/238.7/1.8 1.6/6.8/160.6/1.7
Test 2
all port
avg latency/mdev (us) 6.51/0.97 6.86/0.17
Test 3
single port latency
under load
avg/mdev (us) 7.5/5.9 3.8/4.8
packet loss 95 % 62 %
Test 4
idle port latency
under load
min/avg/max/mdev (us) 0.8/1.5/210.5/0.9 1.0/2.1/344.5/1.2
packet loss 94 % 4 %
CPU and RAM usage seems not to be affected, the resource usage of vswitchd
idle with 2000+ ports is unchanged:
# ps u $(pidof ovs-vswitchd)
USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND
openvsw+ 5430 54.3 0.3 4263964 510968 pts/1 RLl+ 16:20 0:50 ovs-vswitchd
Additionally, to check if vswitchd is thread safe with this patch, the
following test was run for circa 48 hours: on a 56 core machine, a
bridge with kernel datapath is filled with 2200 dummy interfaces and 22
veth, then 22 traffic generators are run in parallel piping traffic into
the veths peers outside the bridge.
To generate as many upcalls as possible, all packets were forced to the
slowpath with an openflow rule like 'action=controller,local' and packet
size was set to 64 byte. Also, to avoid overflowing the FDB early and
slowing down the upcall processing, generated mac addresses were restricted
to a small interval. vswitchd ran without problems for 48+ hours,
obviously with all the handler threads with almost 99% CPU usage.
[1] https://github.com/teknoraver/network-tools/blob/master/weed.c
Signed-off-by: Matteo Croce <mcroce@redhat.com>
Signed-off-by: Ben Pfaff <blp@ovn.org>
Acked-by: Flavio Leitner <fbl@sysclose.org>
2018-09-25 10:51:05 +02:00
|
|
|
|
goto exit;
|
2017-05-18 16:10:28 -04:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
exit:
|
|
|
|
|
ofpbuf_delete(buf);
|
|
|
|
|
|
|
|
|
|
return error;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
dpif_netlink_port_add_compat(struct dpif_netlink *dpif, struct netdev *netdev,
|
|
|
|
|
odp_port_t *port_nop)
|
|
|
|
|
OVS_REQ_WRLOCK(dpif->upcall_lock)
|
|
|
|
|
{
|
|
|
|
|
const struct netdev_tunnel_config *tnl_cfg;
|
|
|
|
|
char namebuf[NETDEV_VPORT_NAME_BUFSIZE];
|
|
|
|
|
const char *type = netdev_get_type(netdev);
|
|
|
|
|
uint64_t options_stub[64 / 8];
|
|
|
|
|
enum ovs_vport_type ovs_type;
|
|
|
|
|
struct ofpbuf options;
|
|
|
|
|
const char *name;
|
|
|
|
|
|
|
|
|
|
name = netdev_vport_get_dpif_port(netdev, namebuf, sizeof namebuf);
|
|
|
|
|
|
|
|
|
|
ovs_type = netdev_to_ovs_vport_type(netdev_get_type(netdev));
|
|
|
|
|
if (ovs_type == OVS_VPORT_TYPE_UNSPEC) {
|
2011-01-23 20:01:30 -08:00
|
|
|
|
VLOG_WARN_RL(&error_rl, "%s: cannot create port `%s' because it has "
|
|
|
|
|
"unsupported type `%s'",
|
2014-04-17 16:33:17 -07:00
|
|
|
|
dpif_name(&dpif->dpif), name, type);
|
2011-01-23 20:01:30 -08:00
|
|
|
|
return EINVAL;
|
|
|
|
|
}
|
2010-12-03 14:41:38 -08:00
|
|
|
|
|
2017-05-18 16:10:28 -04:00
|
|
|
|
if (ovs_type == OVS_VPORT_TYPE_NETDEV) {
|
2014-09-18 04:17:54 -07:00
|
|
|
|
#ifdef _WIN32
|
2014-10-23 08:27:34 -07:00
|
|
|
|
/* XXX : Map appropiate Windows handle */
|
2014-09-18 04:17:54 -07:00
|
|
|
|
#else
|
2011-08-26 23:34:40 -07:00
|
|
|
|
netdev_linux_ethtool_set_flag(netdev, ETH_FLAG_LRO, "LRO", false);
|
2014-09-18 04:17:54 -07:00
|
|
|
|
#endif
|
2011-08-26 23:34:40 -07:00
|
|
|
|
}
|
|
|
|
|
|
2016-12-20 19:41:22 +00:00
|
|
|
|
#ifdef _WIN32
|
2017-05-18 16:10:28 -04:00
|
|
|
|
if (ovs_type == OVS_VPORT_TYPE_INTERNAL) {
|
2016-12-20 19:41:22 +00:00
|
|
|
|
if (!create_wmi_port(name)){
|
|
|
|
|
VLOG_ERR("Could not create wmi internal port with name:%s", name);
|
|
|
|
|
return EINVAL;
|
|
|
|
|
};
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
|
2013-02-14 09:37:26 -05:00
|
|
|
|
tnl_cfg = netdev_get_tunnel_config(netdev);
|
2015-02-06 21:10:45 +01:00
|
|
|
|
if (tnl_cfg && (tnl_cfg->dst_port != 0 || tnl_cfg->exts)) {
|
2013-02-14 09:37:26 -05:00
|
|
|
|
ofpbuf_use_stack(&options, options_stub, sizeof options_stub);
|
2015-02-06 21:10:45 +01:00
|
|
|
|
if (tnl_cfg->dst_port) {
|
|
|
|
|
nl_msg_put_u16(&options, OVS_TUNNEL_ATTR_DST_PORT,
|
|
|
|
|
ntohs(tnl_cfg->dst_port));
|
|
|
|
|
}
|
|
|
|
|
if (tnl_cfg->exts) {
|
|
|
|
|
size_t ext_ofs;
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
ext_ofs = nl_msg_start_nested(&options, OVS_TUNNEL_ATTR_EXTENSION);
|
|
|
|
|
for (i = 0; i < 32; i++) {
|
|
|
|
|
if (tnl_cfg->exts & (1 << i)) {
|
|
|
|
|
nl_msg_put_flag(&options, i);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
nl_msg_end_nested(&options, ext_ofs);
|
|
|
|
|
}
|
2017-05-18 16:10:28 -04:00
|
|
|
|
return dpif_netlink_port_add__(dpif, name, ovs_type, &options,
|
|
|
|
|
port_nop);
|
2013-01-16 15:53:14 -08:00
|
|
|
|
} else {
|
2017-05-18 16:10:28 -04:00
|
|
|
|
return dpif_netlink_port_add__(dpif, name, ovs_type, NULL, port_nop);
|
2012-11-14 15:58:19 -08:00
|
|
|
|
}
|
2010-12-03 14:41:38 -08:00
|
|
|
|
|
2017-05-18 16:10:28 -04:00
|
|
|
|
}
|
2013-01-04 18:34:26 -08:00
|
|
|
|
|
2017-05-18 16:10:33 -04:00
|
|
|
|
static int
|
2017-05-18 16:10:29 -04:00
|
|
|
|
dpif_netlink_rtnl_port_create_and_add(struct dpif_netlink *dpif,
|
|
|
|
|
struct netdev *netdev,
|
|
|
|
|
odp_port_t *port_nop)
|
|
|
|
|
OVS_REQ_WRLOCK(dpif->upcall_lock)
|
|
|
|
|
{
|
|
|
|
|
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
|
|
|
|
|
char namebuf[NETDEV_VPORT_NAME_BUFSIZE];
|
|
|
|
|
const char *name;
|
|
|
|
|
int error;
|
2013-01-04 18:34:26 -08:00
|
|
|
|
|
2017-05-18 16:10:29 -04:00
|
|
|
|
error = dpif_netlink_rtnl_port_create(netdev);
|
|
|
|
|
if (error) {
|
|
|
|
|
if (error != EOPNOTSUPP) {
|
2017-07-30 07:58:17 +03:00
|
|
|
|
VLOG_WARN_RL(&rl, "Failed to create %s with rtnetlink: %s",
|
2017-05-18 16:10:29 -04:00
|
|
|
|
netdev_get_name(netdev), ovs_strerror(error));
|
|
|
|
|
}
|
|
|
|
|
return error;
|
|
|
|
|
}
|
2014-02-26 10:10:29 -08:00
|
|
|
|
|
2017-05-18 16:10:29 -04:00
|
|
|
|
name = netdev_vport_get_dpif_port(netdev, namebuf, sizeof namebuf);
|
|
|
|
|
error = dpif_netlink_port_add__(dpif, name, OVS_VPORT_TYPE_NETDEV, NULL,
|
|
|
|
|
port_nop);
|
2018-12-13 12:24:46 -02:00
|
|
|
|
if (error) {
|
2017-05-18 16:10:29 -04:00
|
|
|
|
dpif_netlink_rtnl_port_destroy(name, netdev_get_type(netdev));
|
|
|
|
|
}
|
|
|
|
|
return error;
|
|
|
|
|
}
|
2009-06-17 14:35:35 -07:00
|
|
|
|
|
|
|
|
|
static int
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_port_add(struct dpif *dpif_, struct netdev *netdev,
|
|
|
|
|
odp_port_t *port_nop)
|
2013-07-23 12:41:57 -07:00
|
|
|
|
{
|
2014-09-18 04:17:54 -07:00
|
|
|
|
struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
|
2017-05-18 16:10:33 -04:00
|
|
|
|
int error = EOPNOTSUPP;
|
2013-07-23 12:41:57 -07:00
|
|
|
|
|
2014-02-26 10:10:29 -08:00
|
|
|
|
fat_rwlock_wrlock(&dpif->upcall_lock);
|
2017-05-18 16:10:33 -04:00
|
|
|
|
if (!ovs_tunnels_out_of_tree) {
|
|
|
|
|
error = dpif_netlink_rtnl_port_create_and_add(dpif, netdev, port_nop);
|
|
|
|
|
}
|
2018-12-13 12:24:46 -02:00
|
|
|
|
if (error) {
|
2017-05-18 16:10:33 -04:00
|
|
|
|
error = dpif_netlink_port_add_compat(dpif, netdev, port_nop);
|
|
|
|
|
}
|
2014-02-26 10:10:29 -08:00
|
|
|
|
fat_rwlock_unlock(&dpif->upcall_lock);
|
2013-07-23 12:41:57 -07:00
|
|
|
|
|
|
|
|
|
return error;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_port_del__(struct dpif_netlink *dpif, odp_port_t port_no)
|
2014-04-17 17:16:34 -07:00
|
|
|
|
OVS_REQ_WRLOCK(dpif->upcall_lock)
|
2009-06-17 14:35:35 -07:00
|
|
|
|
{
|
2014-09-18 04:17:54 -07:00
|
|
|
|
struct dpif_netlink_vport vport;
|
2017-05-18 16:10:33 -04:00
|
|
|
|
struct dpif_port dpif_port;
|
2011-04-04 16:55:34 -07:00
|
|
|
|
int error;
|
2011-01-26 12:28:59 -08:00
|
|
|
|
|
2017-05-18 16:10:33 -04:00
|
|
|
|
error = dpif_netlink_port_query__(dpif, port_no, NULL, &dpif_port);
|
|
|
|
|
if (error) {
|
|
|
|
|
return error;
|
|
|
|
|
}
|
|
|
|
|
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_vport_init(&vport);
|
2011-08-18 10:35:40 -07:00
|
|
|
|
vport.cmd = OVS_VPORT_CMD_DEL;
|
2011-01-21 17:01:56 -08:00
|
|
|
|
vport.dp_ifindex = dpif->dp_ifindex;
|
2011-01-26 12:28:59 -08:00
|
|
|
|
vport.port_no = port_no;
|
2016-12-20 19:41:22 +00:00
|
|
|
|
#ifdef _WIN32
|
2017-05-18 16:10:33 -04:00
|
|
|
|
if (!strcmp(dpif_port.type, "internal")) {
|
|
|
|
|
if (!delete_wmi_port(dpif_port.name)) {
|
2016-12-20 19:41:22 +00:00
|
|
|
|
VLOG_ERR("Could not delete wmi port with name: %s",
|
2017-05-18 16:10:33 -04:00
|
|
|
|
dpif_port.name);
|
2016-12-20 19:41:22 +00:00
|
|
|
|
};
|
|
|
|
|
}
|
|
|
|
|
#endif
|
2014-09-18 04:17:54 -07:00
|
|
|
|
error = dpif_netlink_vport_transact(&vport, NULL, NULL);
|
2011-04-04 16:55:34 -07:00
|
|
|
|
|
2014-02-26 10:10:29 -08:00
|
|
|
|
vport_del_channels(dpif, port_no);
|
2013-01-04 18:34:26 -08:00
|
|
|
|
|
2017-05-18 16:10:33 -04:00
|
|
|
|
if (!error && !ovs_tunnels_out_of_tree) {
|
|
|
|
|
error = dpif_netlink_rtnl_port_destroy(dpif_port.name, dpif_port.type);
|
|
|
|
|
if (error == EOPNOTSUPP) {
|
|
|
|
|
error = 0;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
dpif_port_destroy(&dpif_port);
|
|
|
|
|
|
2011-04-04 16:55:34 -07:00
|
|
|
|
return error;
|
2010-12-03 14:41:38 -08:00
|
|
|
|
}
|
2010-04-10 01:19:29 -04:00
|
|
|
|
|
2013-07-23 12:41:57 -07:00
|
|
|
|
static int
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_port_del(struct dpif *dpif_, odp_port_t port_no)
|
2013-07-23 12:41:57 -07:00
|
|
|
|
{
|
2014-09-18 04:17:54 -07:00
|
|
|
|
struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
|
2013-07-23 12:41:57 -07:00
|
|
|
|
int error;
|
|
|
|
|
|
2014-02-26 10:10:29 -08:00
|
|
|
|
fat_rwlock_wrlock(&dpif->upcall_lock);
|
2014-09-18 04:17:54 -07:00
|
|
|
|
error = dpif_netlink_port_del__(dpif, port_no);
|
2014-02-26 10:10:29 -08:00
|
|
|
|
fat_rwlock_unlock(&dpif->upcall_lock);
|
2013-07-23 12:41:57 -07:00
|
|
|
|
|
|
|
|
|
return error;
|
|
|
|
|
}
|
|
|
|
|
|
2010-12-03 14:41:38 -08:00
|
|
|
|
static int
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_port_query__(const struct dpif_netlink *dpif, odp_port_t port_no,
|
|
|
|
|
const char *port_name, struct dpif_port *dpif_port)
|
2010-12-03 14:41:38 -08:00
|
|
|
|
{
|
2014-09-18 04:17:54 -07:00
|
|
|
|
struct dpif_netlink_vport request;
|
|
|
|
|
struct dpif_netlink_vport reply;
|
2011-01-26 12:28:59 -08:00
|
|
|
|
struct ofpbuf *buf;
|
2011-01-23 18:48:02 -08:00
|
|
|
|
int error;
|
|
|
|
|
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_vport_init(&request);
|
2011-08-18 10:35:40 -07:00
|
|
|
|
request.cmd = OVS_VPORT_CMD_GET;
|
2014-04-17 16:33:17 -07:00
|
|
|
|
request.dp_ifindex = dpif->dp_ifindex;
|
2011-01-26 12:28:59 -08:00
|
|
|
|
request.port_no = port_no;
|
|
|
|
|
request.name = port_name;
|
2011-01-23 18:48:02 -08:00
|
|
|
|
|
2014-09-18 04:17:54 -07:00
|
|
|
|
error = dpif_netlink_vport_transact(&request, &reply, &buf);
|
2011-01-26 12:28:59 -08:00
|
|
|
|
if (!error) {
|
2012-02-27 18:42:17 -08:00
|
|
|
|
if (reply.dp_ifindex != request.dp_ifindex) {
|
|
|
|
|
/* A query by name reported that 'port_name' is in some datapath
|
|
|
|
|
* other than 'dpif', but the caller wants to know about 'dpif'. */
|
|
|
|
|
error = ENODEV;
|
2012-10-17 23:11:53 -07:00
|
|
|
|
} else if (dpif_port) {
|
2012-02-27 18:42:17 -08:00
|
|
|
|
dpif_port->name = xstrdup(reply.name);
|
2012-12-14 19:14:54 -08:00
|
|
|
|
dpif_port->type = xstrdup(get_vport_type(&reply));
|
2012-02-27 18:42:17 -08:00
|
|
|
|
dpif_port->port_no = reply.port_no;
|
|
|
|
|
}
|
2011-01-26 12:28:59 -08:00
|
|
|
|
ofpbuf_delete(buf);
|
2010-04-10 01:19:29 -04:00
|
|
|
|
}
|
2011-01-26 12:28:59 -08:00
|
|
|
|
return error;
|
2009-06-17 14:35:35 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_port_query_by_number(const struct dpif *dpif_, odp_port_t port_no,
|
|
|
|
|
struct dpif_port *dpif_port)
|
2009-06-17 14:35:35 -07:00
|
|
|
|
{
|
2014-09-18 04:17:54 -07:00
|
|
|
|
struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
|
2014-04-17 16:33:17 -07:00
|
|
|
|
|
2014-09-18 04:17:54 -07:00
|
|
|
|
return dpif_netlink_port_query__(dpif, port_no, NULL, dpif_port);
|
2009-06-17 14:35:35 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_port_query_by_name(const struct dpif *dpif_, const char *devname,
|
2011-01-23 18:48:02 -08:00
|
|
|
|
struct dpif_port *dpif_port)
|
2009-06-17 14:35:35 -07:00
|
|
|
|
{
|
2014-09-18 04:17:54 -07:00
|
|
|
|
struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
|
2014-04-17 16:33:17 -07:00
|
|
|
|
|
2014-09-18 04:17:54 -07:00
|
|
|
|
return dpif_netlink_port_query__(dpif, 0, devname, dpif_port);
|
2009-06-17 14:35:35 -07:00
|
|
|
|
}
|
|
|
|
|
|
2011-10-12 16:24:54 -07:00
|
|
|
|
static uint32_t
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_port_get_pid__(const struct dpif_netlink *dpif,
|
2018-09-25 15:14:13 -07:00
|
|
|
|
odp_port_t port_no)
|
2014-04-17 17:16:34 -07:00
|
|
|
|
OVS_REQ_RDLOCK(dpif->upcall_lock)
|
2011-10-12 16:24:54 -07:00
|
|
|
|
{
|
2013-06-19 16:58:44 -07:00
|
|
|
|
uint32_t port_idx = odp_to_u32(port_no);
|
2013-07-23 12:41:57 -07:00
|
|
|
|
uint32_t pid = 0;
|
2011-10-12 16:24:54 -07:00
|
|
|
|
|
2014-07-14 13:17:05 -07:00
|
|
|
|
if (dpif->handlers && dpif->uc_array_size > 0) {
|
2013-06-19 16:58:44 -07:00
|
|
|
|
/* The ODPP_NONE "reserved" port number uses the "ovs-system"'s
|
2013-01-04 18:34:26 -08:00
|
|
|
|
* channel, since it is not heavily loaded. */
|
2013-06-19 16:58:44 -07:00
|
|
|
|
uint32_t idx = port_idx >= dpif->uc_array_size ? 0 : port_idx;
|
2014-02-26 10:10:29 -08:00
|
|
|
|
|
2014-07-07 21:58:33 -07:00
|
|
|
|
/* Needs to check in case the socket pointer is changed in between
|
|
|
|
|
* the holding of upcall_lock. A known case happens when the main
|
|
|
|
|
* thread deletes the vport while the handler thread is handling
|
|
|
|
|
* the upcall from that port. */
|
dpif-netlink: don't allocate per thread netlink sockets
When using the kernel datapath, OVS allocates a pool of sockets to handle
netlink events. The number of sockets is: ports * n-handler-threads, where
n-handler-threads is user configurable and defaults to 3/4*number of cores.
This because vswitchd starts n-handler-threads threads, each one with a
netlink socket for every port of the switch. Every thread then, starts
listening on events on its set of sockets with epoll().
On setup with lot of CPUs and ports, the number of sockets easily hits
the process file descriptor limit, and ovs-vswitchd will exit with -EMFILE.
Change the number of allocated sockets to just one per port by moving
the socket array from a per handler structure to a per datapath one,
and let all the handlers share the same sockets by using EPOLLEXCLUSIVE
epoll flag which avoids duplicate events, on systems that support it.
The patch was tested on a 56 core machine running Linux 4.18 and latest
Open vSwitch. A bridge was created with 2000+ ports, some of them being
veth interfaces with the peer outside the bridge. The latency of the upcall
is measured by setting a single 'action=controller,local' OpenFlow rule to
force all the packets going to the slow path and then to the local port.
A tool[1] injects some packets to the veth outside the bridge, and measures
the delay until the packet is captured on the local port. The rx timestamp
is get from the socket ancillary data in the attribute SO_TIMESTAMPNS, to
avoid having the scheduler delay in the measured time.
The first test measures the average latency for an upcall generated from
a single port. To measure it 100k packets, one every msec, are sent to a
single port and the latencies are measured.
The second test is meant to check latency fairness among ports, namely if
latency is equal between ports or if some ports have lower priority.
The previous test is repeated for every port, the average of the average
latencies and the standard deviation between averages is measured.
The third test serves to measure responsiveness under load. Heavy traffic
is sent through all ports, latency and packet loss is measured
on a single idle port.
The fourth test is all about fairness. Heavy traffic is injected in all
ports but one, latency and packet loss is measured on the single idle port.
This is the test setup:
# nproc
56
# ovs-vsctl show |grep -c Port
2223
# ovs-ofctl dump-flows ovs_upc_br
cookie=0x0, duration=4.827s, table=0, n_packets=0, n_bytes=0, actions=CONTROLLER:65535,LOCAL
# uname -a
Linux fc28 4.18.7-200.fc28.x86_64 #1 SMP Mon Sep 10 15:44:45 UTC 2018 x86_64 x86_64 x86_64 GNU/Linux
And these are the results of the tests:
Stock OVS Patched
netlink sockets
in use by vswitchd
lsof -p $(pidof ovs-vswitchd) \
|grep -c GENERIC 91187 2227
Test 1
one port latency
min/avg/max/mdev (us) 2.7/6.6/238.7/1.8 1.6/6.8/160.6/1.7
Test 2
all port
avg latency/mdev (us) 6.51/0.97 6.86/0.17
Test 3
single port latency
under load
avg/mdev (us) 7.5/5.9 3.8/4.8
packet loss 95 % 62 %
Test 4
idle port latency
under load
min/avg/max/mdev (us) 0.8/1.5/210.5/0.9 1.0/2.1/344.5/1.2
packet loss 94 % 4 %
CPU and RAM usage seems not to be affected, the resource usage of vswitchd
idle with 2000+ ports is unchanged:
# ps u $(pidof ovs-vswitchd)
USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND
openvsw+ 5430 54.3 0.3 4263964 510968 pts/1 RLl+ 16:20 0:50 ovs-vswitchd
Additionally, to check if vswitchd is thread safe with this patch, the
following test was run for circa 48 hours: on a 56 core machine, a
bridge with kernel datapath is filled with 2200 dummy interfaces and 22
veth, then 22 traffic generators are run in parallel piping traffic into
the veths peers outside the bridge.
To generate as many upcalls as possible, all packets were forced to the
slowpath with an openflow rule like 'action=controller,local' and packet
size was set to 64 byte. Also, to avoid overflowing the FDB early and
slowing down the upcall processing, generated mac addresses were restricted
to a small interval. vswitchd ran without problems for 48+ hours,
obviously with all the handler threads with almost 99% CPU usage.
[1] https://github.com/teknoraver/network-tools/blob/master/weed.c
Signed-off-by: Matteo Croce <mcroce@redhat.com>
Signed-off-by: Ben Pfaff <blp@ovn.org>
Acked-by: Flavio Leitner <fbl@sysclose.org>
2018-09-25 10:51:05 +02:00
|
|
|
|
if (dpif->channels[idx].sock) {
|
|
|
|
|
pid = nl_sock_pid(dpif->channels[idx].sock);
|
2014-07-07 21:58:33 -07:00
|
|
|
|
}
|
2011-10-12 16:24:54 -07:00
|
|
|
|
}
|
2013-07-23 12:41:57 -07:00
|
|
|
|
|
|
|
|
|
return pid;
|
2011-10-12 16:24:54 -07:00
|
|
|
|
}
|
|
|
|
|
|
2014-04-17 17:16:34 -07:00
|
|
|
|
static uint32_t
|
2018-09-25 15:14:13 -07:00
|
|
|
|
dpif_netlink_port_get_pid(const struct dpif *dpif_, odp_port_t port_no)
|
2014-04-17 17:16:34 -07:00
|
|
|
|
{
|
2014-09-18 04:17:54 -07:00
|
|
|
|
const struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
|
2014-04-17 17:16:34 -07:00
|
|
|
|
uint32_t ret;
|
|
|
|
|
|
|
|
|
|
fat_rwlock_rdlock(&dpif->upcall_lock);
|
2018-09-25 15:14:13 -07:00
|
|
|
|
ret = dpif_netlink_port_get_pid__(dpif, port_no);
|
2014-04-17 17:16:34 -07:00
|
|
|
|
fat_rwlock_unlock(&dpif->upcall_lock);
|
|
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
2009-06-17 14:35:35 -07:00
|
|
|
|
static int
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_flow_flush(struct dpif *dpif_)
|
2009-06-17 14:35:35 -07:00
|
|
|
|
{
|
2014-09-18 04:17:54 -07:00
|
|
|
|
const struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
|
|
|
|
|
struct dpif_netlink_flow flow;
|
2011-01-28 14:00:51 -08:00
|
|
|
|
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_flow_init(&flow);
|
2011-08-18 10:35:40 -07:00
|
|
|
|
flow.cmd = OVS_FLOW_CMD_DEL;
|
2011-01-21 17:01:56 -08:00
|
|
|
|
flow.dp_ifindex = dpif->dp_ifindex;
|
2017-06-13 18:03:32 +03:00
|
|
|
|
|
|
|
|
|
if (netdev_is_flow_api_enabled()) {
|
2017-07-25 08:28:41 +03:00
|
|
|
|
netdev_ports_flow_flush(dpif_->dpif_class);
|
2017-06-13 18:03:32 +03:00
|
|
|
|
}
|
|
|
|
|
|
2014-09-18 04:17:54 -07:00
|
|
|
|
return dpif_netlink_flow_transact(&flow, NULL, NULL);
|
2009-06-17 14:35:35 -07:00
|
|
|
|
}
|
|
|
|
|
|
2014-09-18 04:17:54 -07:00
|
|
|
|
struct dpif_netlink_port_state {
|
2011-01-28 13:59:03 -08:00
|
|
|
|
struct nl_dump dump;
|
2014-02-27 14:13:05 -08:00
|
|
|
|
struct ofpbuf buf;
|
2011-01-26 12:28:59 -08:00
|
|
|
|
};
|
|
|
|
|
|
2013-05-01 14:40:38 -07:00
|
|
|
|
static void
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_port_dump_start__(const struct dpif_netlink *dpif,
|
|
|
|
|
struct nl_dump *dump)
|
2009-06-17 14:35:35 -07:00
|
|
|
|
{
|
2014-09-18 04:17:54 -07:00
|
|
|
|
struct dpif_netlink_vport request;
|
2011-01-28 13:59:03 -08:00
|
|
|
|
struct ofpbuf *buf;
|
|
|
|
|
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_vport_init(&request);
|
2013-09-12 10:54:23 -07:00
|
|
|
|
request.cmd = OVS_VPORT_CMD_GET;
|
2011-01-21 17:01:56 -08:00
|
|
|
|
request.dp_ifindex = dpif->dp_ifindex;
|
2011-01-28 13:59:03 -08:00
|
|
|
|
|
|
|
|
|
buf = ofpbuf_new(1024);
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_vport_to_ofpbuf(&request, buf);
|
2013-05-01 14:40:38 -07:00
|
|
|
|
nl_dump_start(dump, NETLINK_GENERIC, buf);
|
2011-01-28 13:59:03 -08:00
|
|
|
|
ofpbuf_delete(buf);
|
2013-05-01 14:40:38 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_port_dump_start(const struct dpif *dpif_, void **statep)
|
2013-05-01 14:40:38 -07:00
|
|
|
|
{
|
2014-09-18 04:17:54 -07:00
|
|
|
|
struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
|
|
|
|
|
struct dpif_netlink_port_state *state;
|
2013-05-01 14:40:38 -07:00
|
|
|
|
|
|
|
|
|
*statep = state = xmalloc(sizeof *state);
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_port_dump_start__(dpif, &state->dump);
|
2011-01-28 13:59:03 -08:00
|
|
|
|
|
2014-02-27 14:13:05 -08:00
|
|
|
|
ofpbuf_init(&state->buf, NL_DUMP_BUFSIZE);
|
datapath: Change listing ports to use an iterator concept.
One of the goals for Open vSwitch is to decouple kernel and userspace
software, so that either one can be upgraded or rolled back independent of
the other. To do this in full generality, it must be possible to add new
features to the kernel vport layer without changing userspace software. In
turn, that means that the odp_port structure must become variable-length.
This does not, however, fit in well with the ODP_PORT_LIST ioctl in its
current form, because that would require userspace to know how much space
to allocate for each port in advance, or to allocate as much space as
could possibly be needed. Neither choice is very attractive.
This commit prepares for a different solution, by replacing ODP_PORT_LIST
by a new ioctl ODP_VPORT_DUMP that retrieves information about a single
vport from the datapath on each call. It is much cleaner to allocate the
maximum amount of space for a single vport than to do so for possibly a
large number of vports.
It would be faster to retrieve a number of vports in batch instead of just
one at a time, but that will naturally happen later when the kernel
datapath interface is changed to use Netlink, so this patch does not bother
with it.
The Netlink version won't need to take the starting port number from
userspace, since Netlink sockets can keep track of that state as part
of their "dump" feature.
Signed-off-by: Ben Pfaff <blp@nicira.com>
Acked-by: Jesse Gross <jesse@nicira.com>
2011-01-10 13:12:12 -08:00
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2013-12-17 14:37:09 -08:00
|
|
|
|
static int
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_port_dump_next__(const struct dpif_netlink *dpif,
|
|
|
|
|
struct nl_dump *dump,
|
|
|
|
|
struct dpif_netlink_vport *vport,
|
|
|
|
|
struct ofpbuf *buffer)
|
2013-05-01 14:40:38 -07:00
|
|
|
|
{
|
|
|
|
|
struct ofpbuf buf;
|
|
|
|
|
int error;
|
|
|
|
|
|
2014-02-27 14:13:05 -08:00
|
|
|
|
if (!nl_dump_next(dump, &buf, buffer)) {
|
2013-05-01 14:40:38 -07:00
|
|
|
|
return EOF;
|
|
|
|
|
}
|
|
|
|
|
|
2014-09-18 04:17:54 -07:00
|
|
|
|
error = dpif_netlink_vport_from_ofpbuf(vport, &buf);
|
2013-05-01 14:40:38 -07:00
|
|
|
|
if (error) {
|
|
|
|
|
VLOG_WARN_RL(&error_rl, "%s: failed to parse vport record (%s)",
|
|
|
|
|
dpif_name(&dpif->dpif), ovs_strerror(error));
|
|
|
|
|
}
|
|
|
|
|
return error;
|
|
|
|
|
}
|
|
|
|
|
|
datapath: Change listing ports to use an iterator concept.
One of the goals for Open vSwitch is to decouple kernel and userspace
software, so that either one can be upgraded or rolled back independent of
the other. To do this in full generality, it must be possible to add new
features to the kernel vport layer without changing userspace software. In
turn, that means that the odp_port structure must become variable-length.
This does not, however, fit in well with the ODP_PORT_LIST ioctl in its
current form, because that would require userspace to know how much space
to allocate for each port in advance, or to allocate as much space as
could possibly be needed. Neither choice is very attractive.
This commit prepares for a different solution, by replacing ODP_PORT_LIST
by a new ioctl ODP_VPORT_DUMP that retrieves information about a single
vport from the datapath on each call. It is much cleaner to allocate the
maximum amount of space for a single vport than to do so for possibly a
large number of vports.
It would be faster to retrieve a number of vports in batch instead of just
one at a time, but that will naturally happen later when the kernel
datapath interface is changed to use Netlink, so this patch does not bother
with it.
The Netlink version won't need to take the starting port number from
userspace, since Netlink sockets can keep track of that state as part
of their "dump" feature.
Signed-off-by: Ben Pfaff <blp@nicira.com>
Acked-by: Jesse Gross <jesse@nicira.com>
2011-01-10 13:12:12 -08:00
|
|
|
|
static int
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_port_dump_next(const struct dpif *dpif_, void *state_,
|
|
|
|
|
struct dpif_port *dpif_port)
|
datapath: Change listing ports to use an iterator concept.
One of the goals for Open vSwitch is to decouple kernel and userspace
software, so that either one can be upgraded or rolled back independent of
the other. To do this in full generality, it must be possible to add new
features to the kernel vport layer without changing userspace software. In
turn, that means that the odp_port structure must become variable-length.
This does not, however, fit in well with the ODP_PORT_LIST ioctl in its
current form, because that would require userspace to know how much space
to allocate for each port in advance, or to allocate as much space as
could possibly be needed. Neither choice is very attractive.
This commit prepares for a different solution, by replacing ODP_PORT_LIST
by a new ioctl ODP_VPORT_DUMP that retrieves information about a single
vport from the datapath on each call. It is much cleaner to allocate the
maximum amount of space for a single vport than to do so for possibly a
large number of vports.
It would be faster to retrieve a number of vports in batch instead of just
one at a time, but that will naturally happen later when the kernel
datapath interface is changed to use Netlink, so this patch does not bother
with it.
The Netlink version won't need to take the starting port number from
userspace, since Netlink sockets can keep track of that state as part
of their "dump" feature.
Signed-off-by: Ben Pfaff <blp@nicira.com>
Acked-by: Jesse Gross <jesse@nicira.com>
2011-01-10 13:12:12 -08:00
|
|
|
|
{
|
2014-09-18 04:17:54 -07:00
|
|
|
|
struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
|
|
|
|
|
struct dpif_netlink_port_state *state = state_;
|
|
|
|
|
struct dpif_netlink_vport vport;
|
2009-06-17 14:35:35 -07:00
|
|
|
|
int error;
|
|
|
|
|
|
2014-09-18 04:17:54 -07:00
|
|
|
|
error = dpif_netlink_port_dump_next__(dpif, &state->dump, &vport,
|
|
|
|
|
&state->buf);
|
2010-12-03 14:41:38 -08:00
|
|
|
|
if (error) {
|
2011-01-28 13:59:03 -08:00
|
|
|
|
return error;
|
2010-12-03 14:41:38 -08:00
|
|
|
|
}
|
2012-07-13 16:00:29 -07:00
|
|
|
|
dpif_port->name = CONST_CAST(char *, vport.name);
|
2012-12-14 19:14:54 -08:00
|
|
|
|
dpif_port->type = CONST_CAST(char *, get_vport_type(&vport));
|
2011-01-28 13:59:03 -08:00
|
|
|
|
dpif_port->port_no = vport.port_no;
|
|
|
|
|
return 0;
|
datapath: Change listing ports to use an iterator concept.
One of the goals for Open vSwitch is to decouple kernel and userspace
software, so that either one can be upgraded or rolled back independent of
the other. To do this in full generality, it must be possible to add new
features to the kernel vport layer without changing userspace software. In
turn, that means that the odp_port structure must become variable-length.
This does not, however, fit in well with the ODP_PORT_LIST ioctl in its
current form, because that would require userspace to know how much space
to allocate for each port in advance, or to allocate as much space as
could possibly be needed. Neither choice is very attractive.
This commit prepares for a different solution, by replacing ODP_PORT_LIST
by a new ioctl ODP_VPORT_DUMP that retrieves information about a single
vport from the datapath on each call. It is much cleaner to allocate the
maximum amount of space for a single vport than to do so for possibly a
large number of vports.
It would be faster to retrieve a number of vports in batch instead of just
one at a time, but that will naturally happen later when the kernel
datapath interface is changed to use Netlink, so this patch does not bother
with it.
The Netlink version won't need to take the starting port number from
userspace, since Netlink sockets can keep track of that state as part
of their "dump" feature.
Signed-off-by: Ben Pfaff <blp@nicira.com>
Acked-by: Jesse Gross <jesse@nicira.com>
2011-01-10 13:12:12 -08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_port_dump_done(const struct dpif *dpif_ OVS_UNUSED, void *state_)
|
datapath: Change listing ports to use an iterator concept.
One of the goals for Open vSwitch is to decouple kernel and userspace
software, so that either one can be upgraded or rolled back independent of
the other. To do this in full generality, it must be possible to add new
features to the kernel vport layer without changing userspace software. In
turn, that means that the odp_port structure must become variable-length.
This does not, however, fit in well with the ODP_PORT_LIST ioctl in its
current form, because that would require userspace to know how much space
to allocate for each port in advance, or to allocate as much space as
could possibly be needed. Neither choice is very attractive.
This commit prepares for a different solution, by replacing ODP_PORT_LIST
by a new ioctl ODP_VPORT_DUMP that retrieves information about a single
vport from the datapath on each call. It is much cleaner to allocate the
maximum amount of space for a single vport than to do so for possibly a
large number of vports.
It would be faster to retrieve a number of vports in batch instead of just
one at a time, but that will naturally happen later when the kernel
datapath interface is changed to use Netlink, so this patch does not bother
with it.
The Netlink version won't need to take the starting port number from
userspace, since Netlink sockets can keep track of that state as part
of their "dump" feature.
Signed-off-by: Ben Pfaff <blp@nicira.com>
Acked-by: Jesse Gross <jesse@nicira.com>
2011-01-10 13:12:12 -08:00
|
|
|
|
{
|
2014-09-18 04:17:54 -07:00
|
|
|
|
struct dpif_netlink_port_state *state = state_;
|
2011-01-28 13:59:03 -08:00
|
|
|
|
int error = nl_dump_done(&state->dump);
|
2011-04-29 13:12:19 -07:00
|
|
|
|
|
2014-02-27 14:13:05 -08:00
|
|
|
|
ofpbuf_uninit(&state->buf);
|
datapath: Change listing ports to use an iterator concept.
One of the goals for Open vSwitch is to decouple kernel and userspace
software, so that either one can be upgraded or rolled back independent of
the other. To do this in full generality, it must be possible to add new
features to the kernel vport layer without changing userspace software. In
turn, that means that the odp_port structure must become variable-length.
This does not, however, fit in well with the ODP_PORT_LIST ioctl in its
current form, because that would require userspace to know how much space
to allocate for each port in advance, or to allocate as much space as
could possibly be needed. Neither choice is very attractive.
This commit prepares for a different solution, by replacing ODP_PORT_LIST
by a new ioctl ODP_VPORT_DUMP that retrieves information about a single
vport from the datapath on each call. It is much cleaner to allocate the
maximum amount of space for a single vport than to do so for possibly a
large number of vports.
It would be faster to retrieve a number of vports in batch instead of just
one at a time, but that will naturally happen later when the kernel
datapath interface is changed to use Netlink, so this patch does not bother
with it.
The Netlink version won't need to take the starting port number from
userspace, since Netlink sockets can keep track of that state as part
of their "dump" feature.
Signed-off-by: Ben Pfaff <blp@nicira.com>
Acked-by: Jesse Gross <jesse@nicira.com>
2011-01-10 13:12:12 -08:00
|
|
|
|
free(state);
|
2011-01-28 13:59:03 -08:00
|
|
|
|
return error;
|
2009-06-17 14:35:35 -07:00
|
|
|
|
}
|
|
|
|
|
|
2009-06-24 10:24:09 -07:00
|
|
|
|
static int
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_port_poll(const struct dpif *dpif_, char **devnamep)
|
2009-06-24 10:24:09 -07:00
|
|
|
|
{
|
2014-09-18 04:17:54 -07:00
|
|
|
|
struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
|
2009-06-24 10:24:09 -07:00
|
|
|
|
|
2013-07-22 15:00:49 -07:00
|
|
|
|
/* Lazily create the Netlink socket to listen for notifications. */
|
|
|
|
|
if (!dpif->port_notifier) {
|
|
|
|
|
struct nl_sock *sock;
|
|
|
|
|
int error;
|
|
|
|
|
|
|
|
|
|
error = nl_sock_create(NETLINK_GENERIC, &sock);
|
|
|
|
|
if (error) {
|
|
|
|
|
return error;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
error = nl_sock_join_mcgroup(sock, ovs_vport_mcgroup);
|
|
|
|
|
if (error) {
|
|
|
|
|
nl_sock_destroy(sock);
|
|
|
|
|
return error;
|
|
|
|
|
}
|
|
|
|
|
dpif->port_notifier = sock;
|
|
|
|
|
|
|
|
|
|
/* We have no idea of the current state so report that everything
|
|
|
|
|
* changed. */
|
|
|
|
|
return ENOBUFS;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
|
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
|
|
|
|
|
uint64_t buf_stub[4096 / 8];
|
|
|
|
|
struct ofpbuf buf;
|
|
|
|
|
int error;
|
|
|
|
|
|
|
|
|
|
ofpbuf_use_stub(&buf, buf_stub, sizeof buf_stub);
|
2018-03-29 23:05:26 -03:00
|
|
|
|
error = nl_sock_recv(dpif->port_notifier, &buf, NULL, false);
|
2013-07-22 15:00:49 -07:00
|
|
|
|
if (!error) {
|
2014-09-18 04:17:54 -07:00
|
|
|
|
struct dpif_netlink_vport vport;
|
2013-07-22 15:00:49 -07:00
|
|
|
|
|
2014-09-18 04:17:54 -07:00
|
|
|
|
error = dpif_netlink_vport_from_ofpbuf(&vport, &buf);
|
2013-07-22 15:00:49 -07:00
|
|
|
|
if (!error) {
|
|
|
|
|
if (vport.dp_ifindex == dpif->dp_ifindex
|
|
|
|
|
&& (vport.cmd == OVS_VPORT_CMD_NEW
|
|
|
|
|
|| vport.cmd == OVS_VPORT_CMD_DEL
|
|
|
|
|
|| vport.cmd == OVS_VPORT_CMD_SET)) {
|
|
|
|
|
VLOG_DBG("port_changed: dpif:%s vport:%s cmd:%"PRIu8,
|
|
|
|
|
dpif->dpif.full_name, vport.name, vport.cmd);
|
2014-02-26 10:10:29 -08:00
|
|
|
|
if (vport.cmd == OVS_VPORT_CMD_DEL && dpif->handlers) {
|
2013-05-01 17:13:14 -07:00
|
|
|
|
dpif->refresh_channels = true;
|
|
|
|
|
}
|
2013-07-22 15:00:49 -07:00
|
|
|
|
*devnamep = xstrdup(vport.name);
|
2013-08-01 14:07:35 -07:00
|
|
|
|
ofpbuf_uninit(&buf);
|
2013-07-22 15:00:49 -07:00
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
}
|
2013-08-01 14:07:35 -07:00
|
|
|
|
} else if (error != EAGAIN) {
|
|
|
|
|
VLOG_WARN_RL(&rl, "error reading or parsing netlink (%s)",
|
|
|
|
|
ovs_strerror(error));
|
|
|
|
|
nl_sock_drain(dpif->port_notifier);
|
|
|
|
|
error = ENOBUFS;
|
2013-07-22 15:00:49 -07:00
|
|
|
|
}
|
|
|
|
|
|
2013-08-01 14:07:35 -07:00
|
|
|
|
ofpbuf_uninit(&buf);
|
|
|
|
|
if (error) {
|
|
|
|
|
return error;
|
|
|
|
|
}
|
2009-06-24 10:24:09 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_port_poll_wait(const struct dpif *dpif_)
|
2009-06-24 10:24:09 -07:00
|
|
|
|
{
|
2014-09-18 04:17:54 -07:00
|
|
|
|
const struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
|
2013-07-22 15:00:49 -07:00
|
|
|
|
|
|
|
|
|
if (dpif->port_notifier) {
|
|
|
|
|
nl_sock_wait(dpif->port_notifier, POLLIN);
|
|
|
|
|
} else {
|
2009-06-24 10:24:09 -07:00
|
|
|
|
poll_immediate_wake();
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2014-08-13 09:55:54 +12:00
|
|
|
|
static void
|
2014-09-24 16:26:35 +12:00
|
|
|
|
dpif_netlink_flow_init_ufid(struct dpif_netlink_flow *request,
|
|
|
|
|
const ovs_u128 *ufid, bool terse)
|
|
|
|
|
{
|
|
|
|
|
if (ufid) {
|
|
|
|
|
request->ufid = *ufid;
|
|
|
|
|
request->ufid_present = true;
|
|
|
|
|
} else {
|
|
|
|
|
request->ufid_present = false;
|
|
|
|
|
}
|
|
|
|
|
request->ufid_terse = terse;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
dpif_netlink_init_flow_get__(const struct dpif_netlink *dpif,
|
|
|
|
|
const struct nlattr *key, size_t key_len,
|
|
|
|
|
const ovs_u128 *ufid, bool terse,
|
|
|
|
|
struct dpif_netlink_flow *request)
|
2009-06-17 14:35:35 -07:00
|
|
|
|
{
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_flow_init(request);
|
2014-08-13 09:55:54 +12:00
|
|
|
|
request->cmd = OVS_FLOW_CMD_GET;
|
|
|
|
|
request->dp_ifindex = dpif->dp_ifindex;
|
|
|
|
|
request->key = key;
|
|
|
|
|
request->key_len = key_len;
|
2014-09-24 16:26:35 +12:00
|
|
|
|
dpif_netlink_flow_init_ufid(request, ufid, terse);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
dpif_netlink_init_flow_get(const struct dpif_netlink *dpif,
|
|
|
|
|
const struct dpif_flow_get *get,
|
|
|
|
|
struct dpif_netlink_flow *request)
|
|
|
|
|
{
|
|
|
|
|
dpif_netlink_init_flow_get__(dpif, get->key, get->key_len, get->ufid,
|
|
|
|
|
false, request);
|
2011-02-01 09:25:26 -08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
2014-09-24 16:26:35 +12:00
|
|
|
|
dpif_netlink_flow_get__(const struct dpif_netlink *dpif,
|
|
|
|
|
const struct nlattr *key, size_t key_len,
|
|
|
|
|
const ovs_u128 *ufid, bool terse,
|
|
|
|
|
struct dpif_netlink_flow *reply, struct ofpbuf **bufp)
|
2011-02-01 09:25:26 -08:00
|
|
|
|
{
|
2014-09-18 04:17:54 -07:00
|
|
|
|
struct dpif_netlink_flow request;
|
2011-02-01 09:25:26 -08:00
|
|
|
|
|
2014-09-24 16:26:35 +12:00
|
|
|
|
dpif_netlink_init_flow_get__(dpif, key, key_len, ufid, terse, &request);
|
2014-09-18 04:17:54 -07:00
|
|
|
|
return dpif_netlink_flow_transact(&request, reply, bufp);
|
2009-06-17 14:35:35 -07:00
|
|
|
|
}
|
|
|
|
|
|
2014-09-24 16:26:35 +12:00
|
|
|
|
static int
|
|
|
|
|
dpif_netlink_flow_get(const struct dpif_netlink *dpif,
|
|
|
|
|
const struct dpif_netlink_flow *flow,
|
|
|
|
|
struct dpif_netlink_flow *reply, struct ofpbuf **bufp)
|
|
|
|
|
{
|
|
|
|
|
return dpif_netlink_flow_get__(dpif, flow->key, flow->key_len,
|
|
|
|
|
flow->ufid_present ? &flow->ufid : NULL,
|
|
|
|
|
false, reply, bufp);
|
|
|
|
|
}
|
|
|
|
|
|
2011-09-27 15:08:50 -07:00
|
|
|
|
static void
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_init_flow_put(struct dpif_netlink *dpif,
|
|
|
|
|
const struct dpif_flow_put *put,
|
|
|
|
|
struct dpif_netlink_flow *request)
|
2011-09-27 15:08:50 -07:00
|
|
|
|
{
|
2013-04-23 14:06:25 -07:00
|
|
|
|
static const struct nlattr dummy_action;
|
2011-09-27 15:08:50 -07:00
|
|
|
|
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_flow_init(request);
|
2011-12-26 14:39:03 -08:00
|
|
|
|
request->cmd = (put->flags & DPIF_FP_CREATE
|
2011-09-27 15:08:50 -07:00
|
|
|
|
? OVS_FLOW_CMD_NEW : OVS_FLOW_CMD_SET);
|
|
|
|
|
request->dp_ifindex = dpif->dp_ifindex;
|
2011-12-26 14:39:03 -08:00
|
|
|
|
request->key = put->key;
|
|
|
|
|
request->key_len = put->key_len;
|
2013-06-19 07:15:10 +00:00
|
|
|
|
request->mask = put->mask;
|
|
|
|
|
request->mask_len = put->mask_len;
|
2014-09-24 16:26:35 +12:00
|
|
|
|
dpif_netlink_flow_init_ufid(request, put->ufid, false);
|
|
|
|
|
|
2011-09-27 15:08:50 -07:00
|
|
|
|
/* Ensure that OVS_FLOW_ATTR_ACTIONS will always be included. */
|
2013-04-23 14:06:25 -07:00
|
|
|
|
request->actions = (put->actions
|
|
|
|
|
? put->actions
|
|
|
|
|
: CONST_CAST(struct nlattr *, &dummy_action));
|
2011-12-26 14:39:03 -08:00
|
|
|
|
request->actions_len = put->actions_len;
|
|
|
|
|
if (put->flags & DPIF_FP_ZERO_STATS) {
|
2011-09-27 15:08:50 -07:00
|
|
|
|
request->clear = true;
|
|
|
|
|
}
|
2014-09-12 11:20:13 -07:00
|
|
|
|
if (put->flags & DPIF_FP_PROBE) {
|
|
|
|
|
request->probe = true;
|
|
|
|
|
}
|
2011-12-26 14:39:03 -08:00
|
|
|
|
request->nlmsg_flags = put->flags & DPIF_FP_MODIFY ? 0 : NLM_F_CREATE;
|
2011-09-27 15:08:50 -07:00
|
|
|
|
}
|
|
|
|
|
|
2012-04-17 21:52:10 -07:00
|
|
|
|
static void
|
2014-09-24 16:26:35 +12:00
|
|
|
|
dpif_netlink_init_flow_del__(struct dpif_netlink *dpif,
|
|
|
|
|
const struct nlattr *key, size_t key_len,
|
|
|
|
|
const ovs_u128 *ufid, bool terse,
|
|
|
|
|
struct dpif_netlink_flow *request)
|
2009-06-17 14:35:35 -07:00
|
|
|
|
{
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_flow_init(request);
|
2012-04-17 21:52:10 -07:00
|
|
|
|
request->cmd = OVS_FLOW_CMD_DEL;
|
|
|
|
|
request->dp_ifindex = dpif->dp_ifindex;
|
2014-09-24 16:26:35 +12:00
|
|
|
|
request->key = key;
|
|
|
|
|
request->key_len = key_len;
|
|
|
|
|
dpif_netlink_flow_init_ufid(request, ufid, terse);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
dpif_netlink_init_flow_del(struct dpif_netlink *dpif,
|
|
|
|
|
const struct dpif_flow_del *del,
|
|
|
|
|
struct dpif_netlink_flow *request)
|
|
|
|
|
{
|
2015-07-13 14:15:33 +00:00
|
|
|
|
dpif_netlink_init_flow_del__(dpif, del->key, del->key_len,
|
|
|
|
|
del->ufid, del->terse, request);
|
2014-09-24 16:26:35 +12:00
|
|
|
|
}
|
|
|
|
|
|
2014-09-18 04:17:54 -07:00
|
|
|
|
struct dpif_netlink_flow_dump {
|
2014-05-20 11:37:02 -07:00
|
|
|
|
struct dpif_flow_dump up;
|
|
|
|
|
struct nl_dump nl_dump;
|
2014-02-27 14:13:08 -08:00
|
|
|
|
atomic_int status;
|
2017-06-13 18:03:34 +03:00
|
|
|
|
struct netdev_flow_dump **netdev_dumps;
|
|
|
|
|
int netdev_dumps_num; /* Number of netdev_flow_dumps */
|
|
|
|
|
struct ovs_mutex netdev_lock; /* Guards the following. */
|
|
|
|
|
int netdev_current_dump OVS_GUARDED; /* Shared current dump */
|
2018-08-10 11:30:08 +03:00
|
|
|
|
struct dpif_flow_dump_types types; /* Type of dump */
|
2014-02-27 14:13:07 -08:00
|
|
|
|
};
|
|
|
|
|
|
2014-09-18 04:17:54 -07:00
|
|
|
|
static struct dpif_netlink_flow_dump *
|
|
|
|
|
dpif_netlink_flow_dump_cast(struct dpif_flow_dump *dump)
|
2014-02-27 14:13:07 -08:00
|
|
|
|
{
|
2014-09-18 04:17:54 -07:00
|
|
|
|
return CONTAINER_OF(dump, struct dpif_netlink_flow_dump, up);
|
2014-02-27 14:13:07 -08:00
|
|
|
|
}
|
|
|
|
|
|
2017-06-13 18:03:34 +03:00
|
|
|
|
static void
|
|
|
|
|
start_netdev_dump(const struct dpif *dpif_,
|
|
|
|
|
struct dpif_netlink_flow_dump *dump)
|
|
|
|
|
{
|
|
|
|
|
ovs_mutex_init(&dump->netdev_lock);
|
|
|
|
|
|
2018-08-10 11:30:08 +03:00
|
|
|
|
if (!(dump->types.netdev_flows)) {
|
2017-06-13 18:03:34 +03:00
|
|
|
|
dump->netdev_dumps_num = 0;
|
|
|
|
|
dump->netdev_dumps = NULL;
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ovs_mutex_lock(&dump->netdev_lock);
|
|
|
|
|
dump->netdev_current_dump = 0;
|
|
|
|
|
dump->netdev_dumps
|
2017-07-25 08:28:41 +03:00
|
|
|
|
= netdev_ports_flow_dump_create(dpif_->dpif_class,
|
2017-06-13 18:03:34 +03:00
|
|
|
|
&dump->netdev_dumps_num);
|
|
|
|
|
ovs_mutex_unlock(&dump->netdev_lock);
|
|
|
|
|
}
|
|
|
|
|
|
2018-08-10 11:30:08 +03:00
|
|
|
|
static void
|
|
|
|
|
dpif_netlink_populate_flow_dump_types(struct dpif_netlink_flow_dump *dump,
|
|
|
|
|
struct dpif_flow_dump_types *types)
|
|
|
|
|
{
|
|
|
|
|
if (!types) {
|
|
|
|
|
dump->types.ovs_flows = true;
|
|
|
|
|
dump->types.netdev_flows = true;
|
|
|
|
|
} else {
|
|
|
|
|
memcpy(&dump->types, types, sizeof *types);
|
2018-07-25 13:52:42 -07:00
|
|
|
|
}
|
2017-06-13 18:03:49 +03:00
|
|
|
|
}
|
|
|
|
|
|
2014-05-20 11:37:02 -07:00
|
|
|
|
static struct dpif_flow_dump *
|
2017-06-13 18:03:49 +03:00
|
|
|
|
dpif_netlink_flow_dump_create(const struct dpif *dpif_, bool terse,
|
2018-08-10 11:30:08 +03:00
|
|
|
|
struct dpif_flow_dump_types *types)
|
2009-06-17 14:35:35 -07:00
|
|
|
|
{
|
2014-09-18 04:17:54 -07:00
|
|
|
|
const struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
|
|
|
|
|
struct dpif_netlink_flow_dump *dump;
|
|
|
|
|
struct dpif_netlink_flow request;
|
2011-01-28 14:00:51 -08:00
|
|
|
|
struct ofpbuf *buf;
|
|
|
|
|
|
2014-05-20 11:37:02 -07:00
|
|
|
|
dump = xmalloc(sizeof *dump);
|
|
|
|
|
dpif_flow_dump_init(&dump->up, dpif_);
|
2011-01-28 14:00:51 -08:00
|
|
|
|
|
2018-08-10 11:30:08 +03:00
|
|
|
|
dpif_netlink_populate_flow_dump_types(dump, types);
|
2011-01-28 14:00:51 -08:00
|
|
|
|
|
2018-08-10 11:30:08 +03:00
|
|
|
|
if (dump->types.ovs_flows) {
|
2017-06-13 18:03:49 +03:00
|
|
|
|
dpif_netlink_flow_init(&request);
|
|
|
|
|
request.cmd = OVS_FLOW_CMD_GET;
|
|
|
|
|
request.dp_ifindex = dpif->dp_ifindex;
|
|
|
|
|
request.ufid_present = false;
|
|
|
|
|
request.ufid_terse = terse;
|
|
|
|
|
|
|
|
|
|
buf = ofpbuf_new(1024);
|
|
|
|
|
dpif_netlink_flow_to_ofpbuf(&request, buf);
|
|
|
|
|
nl_dump_start(&dump->nl_dump, NETLINK_GENERIC, buf);
|
|
|
|
|
ofpbuf_delete(buf);
|
|
|
|
|
}
|
2014-05-20 11:37:02 -07:00
|
|
|
|
atomic_init(&dump->status, 0);
|
2014-10-06 11:14:08 +13:00
|
|
|
|
dump->up.terse = terse;
|
2011-02-01 09:25:26 -08:00
|
|
|
|
|
2017-06-13 18:03:34 +03:00
|
|
|
|
start_netdev_dump(dpif_, dump);
|
|
|
|
|
|
2014-05-20 11:37:02 -07:00
|
|
|
|
return &dump->up;
|
datapath: Change listing flows to use an iterator concept.
One of the goals for Open vSwitch is to decouple kernel and userspace
software, so that either one can be upgraded or rolled back independent of
the other. To do this in full generality, it must be possible to change
the kernel's idea of the flow key separately from the userspace version.
In turn, that means that flow keys must become variable-length. This does
not, however, fit in well with the ODP_FLOW_LIST ioctl in its current form,
because that would require userspace to know how much space to allocate
for each flow's key in advance, or to allocate as much space as could
possibly be needed. Neither choice is very attractive.
This commit prepares for a different solution, by replacing ODP_FLOW_LIST
by a new ioctl ODP_FLOW_DUMP that retrieves a single flow from the datapath
on each call. It is much cleaner to allocate the maximum amount of space
for a single flow key than to do so for possibly a very large number of
flow keys.
As a side effect, this patch also fixes a race condition that sometimes
made "ovs-dpctl dump-flows" print an error: previously, flows were listed
and then their actions were retrieved, which left a window in which
ovs-vswitchd could delete the flow. Now dumping a flow and its actions is
a single step, closing that window.
Dumping all of the flows in a datapath is no longer an atomic step, so now
it is possible to miss some flows or see a single flow twice during
iteration, if the flow table is modified by another process. It doesn't
look like this should be a problem for ovs-vswitchd.
It would be faster to retrieve a number of flows in batch instead of just
one at a time, but that will naturally happen later when the kernel
datapath interface is changed to use Netlink, so this patch does not bother
with it.
Signed-off-by: Ben Pfaff <blp@nicira.com>
Acked-by: Jesse Gross <jesse@nicira.com>
2010-12-28 10:39:52 -08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_flow_dump_destroy(struct dpif_flow_dump *dump_)
|
datapath: Change listing flows to use an iterator concept.
One of the goals for Open vSwitch is to decouple kernel and userspace
software, so that either one can be upgraded or rolled back independent of
the other. To do this in full generality, it must be possible to change
the kernel's idea of the flow key separately from the userspace version.
In turn, that means that flow keys must become variable-length. This does
not, however, fit in well with the ODP_FLOW_LIST ioctl in its current form,
because that would require userspace to know how much space to allocate
for each flow's key in advance, or to allocate as much space as could
possibly be needed. Neither choice is very attractive.
This commit prepares for a different solution, by replacing ODP_FLOW_LIST
by a new ioctl ODP_FLOW_DUMP that retrieves a single flow from the datapath
on each call. It is much cleaner to allocate the maximum amount of space
for a single flow key than to do so for possibly a very large number of
flow keys.
As a side effect, this patch also fixes a race condition that sometimes
made "ovs-dpctl dump-flows" print an error: previously, flows were listed
and then their actions were retrieved, which left a window in which
ovs-vswitchd could delete the flow. Now dumping a flow and its actions is
a single step, closing that window.
Dumping all of the flows in a datapath is no longer an atomic step, so now
it is possible to miss some flows or see a single flow twice during
iteration, if the flow table is modified by another process. It doesn't
look like this should be a problem for ovs-vswitchd.
It would be faster to retrieve a number of flows in batch instead of just
one at a time, but that will naturally happen later when the kernel
datapath interface is changed to use Netlink, so this patch does not bother
with it.
Signed-off-by: Ben Pfaff <blp@nicira.com>
Acked-by: Jesse Gross <jesse@nicira.com>
2010-12-28 10:39:52 -08:00
|
|
|
|
{
|
2014-09-18 04:17:54 -07:00
|
|
|
|
struct dpif_netlink_flow_dump *dump = dpif_netlink_flow_dump_cast(dump_);
|
2017-06-13 18:03:49 +03:00
|
|
|
|
unsigned int nl_status = 0;
|
2014-05-20 11:37:02 -07:00
|
|
|
|
int dump_status;
|
2009-06-17 14:35:35 -07:00
|
|
|
|
|
2018-08-10 11:30:08 +03:00
|
|
|
|
if (dump->types.ovs_flows) {
|
2017-06-13 18:03:49 +03:00
|
|
|
|
nl_status = nl_dump_done(&dump->nl_dump);
|
|
|
|
|
}
|
|
|
|
|
|
2017-06-13 18:03:34 +03:00
|
|
|
|
for (int i = 0; i < dump->netdev_dumps_num; i++) {
|
|
|
|
|
int err = netdev_flow_dump_destroy(dump->netdev_dumps[i]);
|
|
|
|
|
|
|
|
|
|
if (err != 0 && err != EOPNOTSUPP) {
|
|
|
|
|
VLOG_ERR("failed dumping netdev: %s", ovs_strerror(err));
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
free(dump->netdev_dumps);
|
|
|
|
|
ovs_mutex_destroy(&dump->netdev_lock);
|
|
|
|
|
|
2014-08-29 10:34:52 -07:00
|
|
|
|
/* No other thread has access to 'dump' at this point. */
|
|
|
|
|
atomic_read_relaxed(&dump->status, &dump_status);
|
2014-05-20 11:37:02 -07:00
|
|
|
|
free(dump);
|
|
|
|
|
return dump_status ? dump_status : nl_status;
|
|
|
|
|
}
|
2011-01-26 07:03:39 -08:00
|
|
|
|
|
2014-09-18 04:17:54 -07:00
|
|
|
|
struct dpif_netlink_flow_dump_thread {
|
2014-05-20 11:37:02 -07:00
|
|
|
|
struct dpif_flow_dump_thread up;
|
2014-09-18 04:17:54 -07:00
|
|
|
|
struct dpif_netlink_flow_dump *dump;
|
|
|
|
|
struct dpif_netlink_flow flow;
|
2014-05-20 11:37:02 -07:00
|
|
|
|
struct dpif_flow_stats stats;
|
|
|
|
|
struct ofpbuf nl_flows; /* Always used to store flows. */
|
|
|
|
|
struct ofpbuf *nl_actions; /* Used if kernel does not supply actions. */
|
2017-06-13 18:03:34 +03:00
|
|
|
|
int netdev_dump_idx; /* This thread current netdev dump index */
|
|
|
|
|
bool netdev_done; /* If we are finished dumping netdevs */
|
|
|
|
|
|
|
|
|
|
/* (Key/Mask/Actions) Buffers for netdev dumping */
|
|
|
|
|
struct odputil_keybuf keybuf[FLOW_DUMP_MAX_BATCH];
|
|
|
|
|
struct odputil_keybuf maskbuf[FLOW_DUMP_MAX_BATCH];
|
|
|
|
|
struct odputil_keybuf actbuf[FLOW_DUMP_MAX_BATCH];
|
2014-05-20 11:37:02 -07:00
|
|
|
|
};
|
|
|
|
|
|
2014-09-18 04:17:54 -07:00
|
|
|
|
static struct dpif_netlink_flow_dump_thread *
|
|
|
|
|
dpif_netlink_flow_dump_thread_cast(struct dpif_flow_dump_thread *thread)
|
2014-05-20 11:37:02 -07:00
|
|
|
|
{
|
2014-09-18 04:17:54 -07:00
|
|
|
|
return CONTAINER_OF(thread, struct dpif_netlink_flow_dump_thread, up);
|
2014-05-20 11:37:02 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static struct dpif_flow_dump_thread *
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_flow_dump_thread_create(struct dpif_flow_dump *dump_)
|
2014-05-20 11:37:02 -07:00
|
|
|
|
{
|
2014-09-18 04:17:54 -07:00
|
|
|
|
struct dpif_netlink_flow_dump *dump = dpif_netlink_flow_dump_cast(dump_);
|
|
|
|
|
struct dpif_netlink_flow_dump_thread *thread;
|
2014-05-20 11:37:02 -07:00
|
|
|
|
|
|
|
|
|
thread = xmalloc(sizeof *thread);
|
|
|
|
|
dpif_flow_dump_thread_init(&thread->up, &dump->up);
|
|
|
|
|
thread->dump = dump;
|
|
|
|
|
ofpbuf_init(&thread->nl_flows, NL_DUMP_BUFSIZE);
|
|
|
|
|
thread->nl_actions = NULL;
|
2017-06-13 18:03:34 +03:00
|
|
|
|
thread->netdev_dump_idx = 0;
|
|
|
|
|
thread->netdev_done = !(thread->netdev_dump_idx < dump->netdev_dumps_num);
|
2014-05-20 11:37:02 -07:00
|
|
|
|
|
|
|
|
|
return &thread->up;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_flow_dump_thread_destroy(struct dpif_flow_dump_thread *thread_)
|
2014-05-20 11:37:02 -07:00
|
|
|
|
{
|
2014-09-18 04:17:54 -07:00
|
|
|
|
struct dpif_netlink_flow_dump_thread *thread
|
|
|
|
|
= dpif_netlink_flow_dump_thread_cast(thread_);
|
2014-05-20 11:37:02 -07:00
|
|
|
|
|
|
|
|
|
ofpbuf_uninit(&thread->nl_flows);
|
|
|
|
|
ofpbuf_delete(thread->nl_actions);
|
|
|
|
|
free(thread);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
2019-12-08 18:09:53 +01:00
|
|
|
|
dpif_netlink_flow_to_dpif_flow(struct dpif_flow *dpif_flow,
|
2014-09-19 15:34:45 -07:00
|
|
|
|
const struct dpif_netlink_flow *datapath_flow)
|
2014-05-20 11:37:02 -07:00
|
|
|
|
{
|
2014-09-19 15:34:45 -07:00
|
|
|
|
dpif_flow->key = datapath_flow->key;
|
|
|
|
|
dpif_flow->key_len = datapath_flow->key_len;
|
|
|
|
|
dpif_flow->mask = datapath_flow->mask;
|
|
|
|
|
dpif_flow->mask_len = datapath_flow->mask_len;
|
|
|
|
|
dpif_flow->actions = datapath_flow->actions;
|
|
|
|
|
dpif_flow->actions_len = datapath_flow->actions_len;
|
2014-09-24 16:26:35 +12:00
|
|
|
|
dpif_flow->ufid_present = datapath_flow->ufid_present;
|
2015-02-02 14:50:47 -08:00
|
|
|
|
dpif_flow->pmd_id = PMD_ID_NULL;
|
2014-09-24 16:26:35 +12:00
|
|
|
|
if (datapath_flow->ufid_present) {
|
|
|
|
|
dpif_flow->ufid = datapath_flow->ufid;
|
|
|
|
|
} else {
|
|
|
|
|
ovs_assert(datapath_flow->key && datapath_flow->key_len);
|
2019-12-08 18:09:53 +01:00
|
|
|
|
odp_flow_key_hash(datapath_flow->key, datapath_flow->key_len,
|
|
|
|
|
&dpif_flow->ufid);
|
2014-09-24 16:26:35 +12:00
|
|
|
|
}
|
2014-09-19 15:34:45 -07:00
|
|
|
|
dpif_netlink_flow_get_stats(datapath_flow, &dpif_flow->stats);
|
dpctl: Properly reflect a rule's offloaded to HW state
Previously, any rule that is offloaded via a netdev, not necessarily
to the HW, would be reported as "offloaded". This patch fixes this
misalignment, and introduces the 'dp' state, as follows:
rule is in HW via TC offload -> offloaded=yes dp:tc
rule is in not HW over TC DP -> offloaded=no dp:tc
rule is in not HW over OVS DP -> offloaded=no dp:ovs
To achieve this, the flows's 'offloaded' flag was encapsulated in a new
attrs struct, which contains the offloaded state of the flow and the
DP layer the flow is handled in, and instead of setting the flow's
'offloaded' state based solely on the type of dump it was acquired
via, for netdev flows it now sends the new attrs struct to be
collected along with the rest of the flow via the netdev, allowing
it to be set per flow.
For TC offloads, the offloaded state is set based on the 'in_hw' and
'not_in_hw' flags received from the TC as part of the flower. If no
such flag was received, due to lack of kernel support, it defaults
to true.
Signed-off-by: Gavi Teitz <gavi@mellanox.com>
Acked-by: Roi Dayan <roid@mellanox.com>
[simon: resolved conflict in lib/dpctl.man]
Signed-off-by: Simon Horman <simon.horman@netronome.com>
2018-06-07 09:36:59 +03:00
|
|
|
|
dpif_flow->attrs.offloaded = false;
|
|
|
|
|
dpif_flow->attrs.dp_layer = "ovs";
|
2020-01-17 23:00:05 +01:00
|
|
|
|
dpif_flow->attrs.dp_extra_info = NULL;
|
2014-05-20 11:37:02 -07:00
|
|
|
|
}
|
|
|
|
|
|
2017-06-13 18:03:34 +03:00
|
|
|
|
/* The design is such that all threads are working together on the first dump
|
|
|
|
|
* to the last, in order (at first they all on dump 0).
|
|
|
|
|
* When the first thread finds that the given dump is finished,
|
|
|
|
|
* they all move to the next. If two or more threads find the same dump
|
|
|
|
|
* is finished at the same time, the first one will advance the shared
|
|
|
|
|
* netdev_current_dump and the others will catch up. */
|
|
|
|
|
static void
|
|
|
|
|
dpif_netlink_advance_netdev_dump(struct dpif_netlink_flow_dump_thread *thread)
|
|
|
|
|
{
|
|
|
|
|
struct dpif_netlink_flow_dump *dump = thread->dump;
|
|
|
|
|
|
|
|
|
|
ovs_mutex_lock(&dump->netdev_lock);
|
|
|
|
|
/* if we haven't finished (dumped everything) */
|
|
|
|
|
if (dump->netdev_current_dump < dump->netdev_dumps_num) {
|
|
|
|
|
/* if we are the first to find that current dump is finished
|
|
|
|
|
* advance it. */
|
|
|
|
|
if (thread->netdev_dump_idx == dump->netdev_current_dump) {
|
|
|
|
|
thread->netdev_dump_idx = ++dump->netdev_current_dump;
|
|
|
|
|
/* did we just finish the last dump? done. */
|
|
|
|
|
if (dump->netdev_current_dump == dump->netdev_dumps_num) {
|
|
|
|
|
thread->netdev_done = true;
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
/* otherwise, we are behind, catch up */
|
|
|
|
|
thread->netdev_dump_idx = dump->netdev_current_dump;
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
/* some other thread finished */
|
|
|
|
|
thread->netdev_done = true;
|
|
|
|
|
}
|
|
|
|
|
ovs_mutex_unlock(&dump->netdev_lock);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
dpif_netlink_netdev_match_to_dpif_flow(struct match *match,
|
|
|
|
|
struct ofpbuf *key_buf,
|
|
|
|
|
struct ofpbuf *mask_buf,
|
|
|
|
|
struct nlattr *actions,
|
|
|
|
|
struct dpif_flow_stats *stats,
|
dpctl: Properly reflect a rule's offloaded to HW state
Previously, any rule that is offloaded via a netdev, not necessarily
to the HW, would be reported as "offloaded". This patch fixes this
misalignment, and introduces the 'dp' state, as follows:
rule is in HW via TC offload -> offloaded=yes dp:tc
rule is in not HW over TC DP -> offloaded=no dp:tc
rule is in not HW over OVS DP -> offloaded=no dp:ovs
To achieve this, the flows's 'offloaded' flag was encapsulated in a new
attrs struct, which contains the offloaded state of the flow and the
DP layer the flow is handled in, and instead of setting the flow's
'offloaded' state based solely on the type of dump it was acquired
via, for netdev flows it now sends the new attrs struct to be
collected along with the rest of the flow via the netdev, allowing
it to be set per flow.
For TC offloads, the offloaded state is set based on the 'in_hw' and
'not_in_hw' flags received from the TC as part of the flower. If no
such flag was received, due to lack of kernel support, it defaults
to true.
Signed-off-by: Gavi Teitz <gavi@mellanox.com>
Acked-by: Roi Dayan <roid@mellanox.com>
[simon: resolved conflict in lib/dpctl.man]
Signed-off-by: Simon Horman <simon.horman@netronome.com>
2018-06-07 09:36:59 +03:00
|
|
|
|
struct dpif_flow_attrs *attrs,
|
2017-06-13 18:03:34 +03:00
|
|
|
|
ovs_u128 *ufid,
|
|
|
|
|
struct dpif_flow *flow,
|
|
|
|
|
bool terse OVS_UNUSED)
|
|
|
|
|
{
|
|
|
|
|
|
|
|
|
|
struct odp_flow_key_parms odp_parms = {
|
|
|
|
|
.flow = &match->flow,
|
|
|
|
|
.mask = &match->wc.masks,
|
|
|
|
|
.support = {
|
2018-07-17 02:01:57 +00:00
|
|
|
|
.max_vlan_headers = 2,
|
2019-12-22 12:16:40 +02:00
|
|
|
|
.recirc = true,
|
2019-12-22 12:16:41 +02:00
|
|
|
|
.ct_state = true,
|
|
|
|
|
.ct_zone = true,
|
2019-12-22 12:16:42 +02:00
|
|
|
|
.ct_mark = true,
|
|
|
|
|
.ct_label = true,
|
2017-06-13 18:03:34 +03:00
|
|
|
|
},
|
|
|
|
|
};
|
|
|
|
|
size_t offset;
|
|
|
|
|
|
|
|
|
|
memset(flow, 0, sizeof *flow);
|
|
|
|
|
|
|
|
|
|
/* Key */
|
|
|
|
|
offset = key_buf->size;
|
|
|
|
|
flow->key = ofpbuf_tail(key_buf);
|
|
|
|
|
odp_flow_key_from_flow(&odp_parms, key_buf);
|
|
|
|
|
flow->key_len = key_buf->size - offset;
|
|
|
|
|
|
|
|
|
|
/* Mask */
|
|
|
|
|
offset = mask_buf->size;
|
|
|
|
|
flow->mask = ofpbuf_tail(mask_buf);
|
|
|
|
|
odp_parms.key_buf = key_buf;
|
|
|
|
|
odp_flow_key_from_mask(&odp_parms, mask_buf);
|
|
|
|
|
flow->mask_len = mask_buf->size - offset;
|
|
|
|
|
|
|
|
|
|
/* Actions */
|
|
|
|
|
flow->actions = nl_attr_get(actions);
|
|
|
|
|
flow->actions_len = nl_attr_get_size(actions);
|
|
|
|
|
|
|
|
|
|
/* Stats */
|
|
|
|
|
memcpy(&flow->stats, stats, sizeof *stats);
|
|
|
|
|
|
|
|
|
|
/* UFID */
|
|
|
|
|
flow->ufid_present = true;
|
|
|
|
|
flow->ufid = *ufid;
|
|
|
|
|
|
|
|
|
|
flow->pmd_id = PMD_ID_NULL;
|
2017-06-13 18:03:50 +03:00
|
|
|
|
|
dpctl: Properly reflect a rule's offloaded to HW state
Previously, any rule that is offloaded via a netdev, not necessarily
to the HW, would be reported as "offloaded". This patch fixes this
misalignment, and introduces the 'dp' state, as follows:
rule is in HW via TC offload -> offloaded=yes dp:tc
rule is in not HW over TC DP -> offloaded=no dp:tc
rule is in not HW over OVS DP -> offloaded=no dp:ovs
To achieve this, the flows's 'offloaded' flag was encapsulated in a new
attrs struct, which contains the offloaded state of the flow and the
DP layer the flow is handled in, and instead of setting the flow's
'offloaded' state based solely on the type of dump it was acquired
via, for netdev flows it now sends the new attrs struct to be
collected along with the rest of the flow via the netdev, allowing
it to be set per flow.
For TC offloads, the offloaded state is set based on the 'in_hw' and
'not_in_hw' flags received from the TC as part of the flower. If no
such flag was received, due to lack of kernel support, it defaults
to true.
Signed-off-by: Gavi Teitz <gavi@mellanox.com>
Acked-by: Roi Dayan <roid@mellanox.com>
[simon: resolved conflict in lib/dpctl.man]
Signed-off-by: Simon Horman <simon.horman@netronome.com>
2018-06-07 09:36:59 +03:00
|
|
|
|
memcpy(&flow->attrs, attrs, sizeof *attrs);
|
2017-06-13 18:03:50 +03:00
|
|
|
|
|
2017-06-13 18:03:34 +03:00
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2014-05-20 11:37:02 -07:00
|
|
|
|
static int
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_flow_dump_next(struct dpif_flow_dump_thread *thread_,
|
|
|
|
|
struct dpif_flow *flows, int max_flows)
|
2014-05-20 11:37:02 -07:00
|
|
|
|
{
|
2014-09-18 04:17:54 -07:00
|
|
|
|
struct dpif_netlink_flow_dump_thread *thread
|
|
|
|
|
= dpif_netlink_flow_dump_thread_cast(thread_);
|
|
|
|
|
struct dpif_netlink_flow_dump *dump = thread->dump;
|
|
|
|
|
struct dpif_netlink *dpif = dpif_netlink_cast(thread->up.dpif);
|
2014-05-20 11:37:02 -07:00
|
|
|
|
int n_flows;
|
|
|
|
|
|
|
|
|
|
ofpbuf_delete(thread->nl_actions);
|
|
|
|
|
thread->nl_actions = NULL;
|
|
|
|
|
|
|
|
|
|
n_flows = 0;
|
2017-06-13 18:03:34 +03:00
|
|
|
|
max_flows = MIN(max_flows, FLOW_DUMP_MAX_BATCH);
|
|
|
|
|
|
|
|
|
|
while (!thread->netdev_done && n_flows < max_flows) {
|
|
|
|
|
struct odputil_keybuf *maskbuf = &thread->maskbuf[n_flows];
|
|
|
|
|
struct odputil_keybuf *keybuf = &thread->keybuf[n_flows];
|
|
|
|
|
struct odputil_keybuf *actbuf = &thread->actbuf[n_flows];
|
|
|
|
|
struct ofpbuf key, mask, act;
|
|
|
|
|
struct dpif_flow *f = &flows[n_flows];
|
|
|
|
|
int cur = thread->netdev_dump_idx;
|
|
|
|
|
struct netdev_flow_dump *netdev_dump = dump->netdev_dumps[cur];
|
|
|
|
|
struct match match;
|
|
|
|
|
struct nlattr *actions;
|
|
|
|
|
struct dpif_flow_stats stats;
|
dpctl: Properly reflect a rule's offloaded to HW state
Previously, any rule that is offloaded via a netdev, not necessarily
to the HW, would be reported as "offloaded". This patch fixes this
misalignment, and introduces the 'dp' state, as follows:
rule is in HW via TC offload -> offloaded=yes dp:tc
rule is in not HW over TC DP -> offloaded=no dp:tc
rule is in not HW over OVS DP -> offloaded=no dp:ovs
To achieve this, the flows's 'offloaded' flag was encapsulated in a new
attrs struct, which contains the offloaded state of the flow and the
DP layer the flow is handled in, and instead of setting the flow's
'offloaded' state based solely on the type of dump it was acquired
via, for netdev flows it now sends the new attrs struct to be
collected along with the rest of the flow via the netdev, allowing
it to be set per flow.
For TC offloads, the offloaded state is set based on the 'in_hw' and
'not_in_hw' flags received from the TC as part of the flower. If no
such flag was received, due to lack of kernel support, it defaults
to true.
Signed-off-by: Gavi Teitz <gavi@mellanox.com>
Acked-by: Roi Dayan <roid@mellanox.com>
[simon: resolved conflict in lib/dpctl.man]
Signed-off-by: Simon Horman <simon.horman@netronome.com>
2018-06-07 09:36:59 +03:00
|
|
|
|
struct dpif_flow_attrs attrs;
|
2017-06-13 18:03:34 +03:00
|
|
|
|
ovs_u128 ufid;
|
|
|
|
|
bool has_next;
|
|
|
|
|
|
|
|
|
|
ofpbuf_use_stack(&key, keybuf, sizeof *keybuf);
|
|
|
|
|
ofpbuf_use_stack(&act, actbuf, sizeof *actbuf);
|
|
|
|
|
ofpbuf_use_stack(&mask, maskbuf, sizeof *maskbuf);
|
|
|
|
|
has_next = netdev_flow_dump_next(netdev_dump, &match,
|
dpctl: Properly reflect a rule's offloaded to HW state
Previously, any rule that is offloaded via a netdev, not necessarily
to the HW, would be reported as "offloaded". This patch fixes this
misalignment, and introduces the 'dp' state, as follows:
rule is in HW via TC offload -> offloaded=yes dp:tc
rule is in not HW over TC DP -> offloaded=no dp:tc
rule is in not HW over OVS DP -> offloaded=no dp:ovs
To achieve this, the flows's 'offloaded' flag was encapsulated in a new
attrs struct, which contains the offloaded state of the flow and the
DP layer the flow is handled in, and instead of setting the flow's
'offloaded' state based solely on the type of dump it was acquired
via, for netdev flows it now sends the new attrs struct to be
collected along with the rest of the flow via the netdev, allowing
it to be set per flow.
For TC offloads, the offloaded state is set based on the 'in_hw' and
'not_in_hw' flags received from the TC as part of the flower. If no
such flag was received, due to lack of kernel support, it defaults
to true.
Signed-off-by: Gavi Teitz <gavi@mellanox.com>
Acked-by: Roi Dayan <roid@mellanox.com>
[simon: resolved conflict in lib/dpctl.man]
Signed-off-by: Simon Horman <simon.horman@netronome.com>
2018-06-07 09:36:59 +03:00
|
|
|
|
&actions, &stats, &attrs,
|
2017-06-13 18:03:34 +03:00
|
|
|
|
&ufid,
|
|
|
|
|
&thread->nl_flows,
|
|
|
|
|
&act);
|
|
|
|
|
if (has_next) {
|
|
|
|
|
dpif_netlink_netdev_match_to_dpif_flow(&match,
|
|
|
|
|
&key, &mask,
|
|
|
|
|
actions,
|
|
|
|
|
&stats,
|
dpctl: Properly reflect a rule's offloaded to HW state
Previously, any rule that is offloaded via a netdev, not necessarily
to the HW, would be reported as "offloaded". This patch fixes this
misalignment, and introduces the 'dp' state, as follows:
rule is in HW via TC offload -> offloaded=yes dp:tc
rule is in not HW over TC DP -> offloaded=no dp:tc
rule is in not HW over OVS DP -> offloaded=no dp:ovs
To achieve this, the flows's 'offloaded' flag was encapsulated in a new
attrs struct, which contains the offloaded state of the flow and the
DP layer the flow is handled in, and instead of setting the flow's
'offloaded' state based solely on the type of dump it was acquired
via, for netdev flows it now sends the new attrs struct to be
collected along with the rest of the flow via the netdev, allowing
it to be set per flow.
For TC offloads, the offloaded state is set based on the 'in_hw' and
'not_in_hw' flags received from the TC as part of the flower. If no
such flag was received, due to lack of kernel support, it defaults
to true.
Signed-off-by: Gavi Teitz <gavi@mellanox.com>
Acked-by: Roi Dayan <roid@mellanox.com>
[simon: resolved conflict in lib/dpctl.man]
Signed-off-by: Simon Horman <simon.horman@netronome.com>
2018-06-07 09:36:59 +03:00
|
|
|
|
&attrs,
|
2017-06-13 18:03:34 +03:00
|
|
|
|
&ufid,
|
|
|
|
|
f,
|
|
|
|
|
dump->up.terse);
|
|
|
|
|
n_flows++;
|
|
|
|
|
} else {
|
|
|
|
|
dpif_netlink_advance_netdev_dump(thread);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-08-10 11:30:08 +03:00
|
|
|
|
if (!(dump->types.ovs_flows)) {
|
2017-06-13 18:03:49 +03:00
|
|
|
|
return n_flows;
|
|
|
|
|
}
|
|
|
|
|
|
2014-05-20 11:37:02 -07:00
|
|
|
|
while (!n_flows
|
2015-03-02 17:29:44 -08:00
|
|
|
|
|| (n_flows < max_flows && thread->nl_flows.size)) {
|
2014-09-19 15:34:45 -07:00
|
|
|
|
struct dpif_netlink_flow datapath_flow;
|
2014-05-20 11:37:02 -07:00
|
|
|
|
struct ofpbuf nl_flow;
|
|
|
|
|
int error;
|
|
|
|
|
|
|
|
|
|
/* Try to grab another flow. */
|
|
|
|
|
if (!nl_dump_next(&dump->nl_dump, &nl_flow, &thread->nl_flows)) {
|
|
|
|
|
break;
|
2011-01-26 07:03:39 -08:00
|
|
|
|
}
|
2011-02-01 09:25:26 -08:00
|
|
|
|
|
2014-05-20 11:37:02 -07:00
|
|
|
|
/* Convert the flow to our output format. */
|
2014-09-19 15:34:45 -07:00
|
|
|
|
error = dpif_netlink_flow_from_ofpbuf(&datapath_flow, &nl_flow);
|
2011-02-01 09:25:26 -08:00
|
|
|
|
if (error) {
|
2014-08-29 10:34:52 -07:00
|
|
|
|
atomic_store_relaxed(&dump->status, error);
|
2014-05-20 11:37:02 -07:00
|
|
|
|
break;
|
2011-01-26 07:03:39 -08:00
|
|
|
|
}
|
2011-02-01 09:25:26 -08:00
|
|
|
|
|
2014-10-06 11:14:08 +13:00
|
|
|
|
if (dump->up.terse || datapath_flow.actions) {
|
|
|
|
|
/* Common case: we don't want actions, or the flow includes
|
|
|
|
|
* actions. */
|
2019-12-08 18:09:53 +01:00
|
|
|
|
dpif_netlink_flow_to_dpif_flow(&flows[n_flows++], &datapath_flow);
|
2014-05-20 11:37:02 -07:00
|
|
|
|
} else {
|
|
|
|
|
/* Rare case: the flow does not include actions. Retrieve this
|
|
|
|
|
* individual flow again to get the actions. */
|
2014-09-24 16:26:35 +12:00
|
|
|
|
error = dpif_netlink_flow_get(dpif, &datapath_flow,
|
2014-09-19 15:34:45 -07:00
|
|
|
|
&datapath_flow, &thread->nl_actions);
|
2011-02-01 09:25:26 -08:00
|
|
|
|
if (error == ENOENT) {
|
|
|
|
|
VLOG_DBG("dumped flow disappeared on get");
|
2014-05-20 11:37:02 -07:00
|
|
|
|
continue;
|
2011-02-01 09:25:26 -08:00
|
|
|
|
} else if (error) {
|
2013-06-24 10:54:49 -07:00
|
|
|
|
VLOG_WARN("error fetching dumped flow: %s",
|
|
|
|
|
ovs_strerror(error));
|
2014-08-29 10:34:52 -07:00
|
|
|
|
atomic_store_relaxed(&dump->status, error);
|
2014-05-20 11:37:02 -07:00
|
|
|
|
break;
|
2011-02-01 09:25:26 -08:00
|
|
|
|
}
|
|
|
|
|
|
2014-05-20 11:37:02 -07:00
|
|
|
|
/* Save this flow. Then exit, because we only have one buffer to
|
|
|
|
|
* handle this case. */
|
2019-12-08 18:09:53 +01:00
|
|
|
|
dpif_netlink_flow_to_dpif_flow(&flows[n_flows++], &datapath_flow);
|
2014-05-20 11:37:02 -07:00
|
|
|
|
break;
|
|
|
|
|
}
|
2011-01-26 07:03:39 -08:00
|
|
|
|
}
|
2014-05-20 11:37:02 -07:00
|
|
|
|
return n_flows;
|
2009-06-17 14:35:35 -07:00
|
|
|
|
}
|
|
|
|
|
|
2012-04-05 16:49:22 -07:00
|
|
|
|
static void
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_encode_execute(int dp_ifindex, const struct dpif_execute *d_exec,
|
|
|
|
|
struct ofpbuf *buf)
|
2009-06-17 14:35:35 -07:00
|
|
|
|
{
|
2011-12-26 14:39:03 -08:00
|
|
|
|
struct ovs_header *k_exec;
|
2013-12-30 15:58:58 -08:00
|
|
|
|
size_t key_ofs;
|
2011-01-23 19:08:06 -08:00
|
|
|
|
|
2012-04-05 16:49:22 -07:00
|
|
|
|
ofpbuf_prealloc_tailroom(buf, (64
|
2015-02-22 03:21:09 -08:00
|
|
|
|
+ dp_packet_size(d_exec->packet)
|
2013-12-30 15:58:58 -08:00
|
|
|
|
+ ODP_KEY_METADATA_SIZE
|
2012-04-05 16:49:22 -07:00
|
|
|
|
+ d_exec->actions_len));
|
2011-01-23 19:08:06 -08:00
|
|
|
|
|
2011-08-18 10:35:40 -07:00
|
|
|
|
nl_msg_put_genlmsghdr(buf, 0, ovs_packet_family, NLM_F_REQUEST,
|
2011-10-22 18:22:18 -07:00
|
|
|
|
OVS_PACKET_CMD_EXECUTE, OVS_PACKET_VERSION);
|
2011-01-23 19:08:06 -08:00
|
|
|
|
|
2011-12-26 14:39:03 -08:00
|
|
|
|
k_exec = ofpbuf_put_uninit(buf, sizeof *k_exec);
|
|
|
|
|
k_exec->dp_ifindex = dp_ifindex;
|
2011-01-23 19:08:06 -08:00
|
|
|
|
|
2011-12-26 14:39:03 -08:00
|
|
|
|
nl_msg_put_unspec(buf, OVS_PACKET_ATTR_PACKET,
|
2015-02-22 03:21:09 -08:00
|
|
|
|
dp_packet_data(d_exec->packet),
|
|
|
|
|
dp_packet_size(d_exec->packet));
|
2013-12-30 15:58:58 -08:00
|
|
|
|
|
|
|
|
|
key_ofs = nl_msg_start_nested(buf, OVS_PACKET_ATTR_KEY);
|
2017-06-02 16:16:17 +00:00
|
|
|
|
odp_key_from_dp_packet(buf, d_exec->packet);
|
2013-12-30 15:58:58 -08:00
|
|
|
|
nl_msg_end_nested(buf, key_ofs);
|
|
|
|
|
|
2011-12-26 14:39:03 -08:00
|
|
|
|
nl_msg_put_unspec(buf, OVS_PACKET_ATTR_ACTIONS,
|
|
|
|
|
d_exec->actions, d_exec->actions_len);
|
2014-09-12 11:20:13 -07:00
|
|
|
|
if (d_exec->probe) {
|
2015-01-15 00:17:31 +01:00
|
|
|
|
nl_msg_put_flag(buf, OVS_PACKET_ATTR_PROBE);
|
2014-09-12 11:20:13 -07:00
|
|
|
|
}
|
2015-02-26 15:52:34 -08:00
|
|
|
|
if (d_exec->mtu) {
|
|
|
|
|
nl_msg_put_u16(buf, OVS_PACKET_ATTR_MRU, d_exec->mtu);
|
|
|
|
|
}
|
ofproto-dpif-upcall: Echo HASH attribute back to datapath.
The kernel datapath may sent upcall with hash info,
ovs-vswitchd should get it from upcall and then send
it back.
The reason is that:
| When using the kernel datapath, the upcall don't
| include skb hash info relatived. That will introduce
| some problem, because the hash of skb is important
| in kernel stack. For example, VXLAN module uses
| it to select UDP src port. The tx queue selection
| may also use the hash in stack.
|
| Hash is computed in different ways. Hash is random
| for a TCP socket, and hash may be computed in hardware,
| or software stack. Recalculation hash is not easy.
|
| There will be one upcall, without information of skb
| hash, to ovs-vswitchd, for the first packet of a TCP
| session. The rest packets will be processed in Open vSwitch
| modules, hash kept. If this tcp session is forward to
| VXLAN module, then the UDP src port of first tcp packet
| is different from rest packets.
|
| TCP packets may come from the host or dockers, to Open vSwitch.
| To fix it, we store the hash info to upcall, and restore hash
| when packets sent back.
Reported-at: https://mail.openvswitch.org/pipermail/ovs-dev/2019-October/364062.html
Link: https://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git/commit/?id=bd1903b7c4596ba6f7677d0dfefd05ba5876707d
Signed-off-by: Tonghao Zhang <xiangxia.m.yue@gmail.com>
Signed-off-by: Ben Pfaff <blp@ovn.org>
2019-11-15 10:58:59 +08:00
|
|
|
|
|
|
|
|
|
if (d_exec->hash) {
|
|
|
|
|
nl_msg_put_u64(buf, OVS_PACKET_ATTR_HASH, d_exec->hash);
|
|
|
|
|
}
|
2011-09-27 15:08:50 -07:00
|
|
|
|
}
|
|
|
|
|
|
2015-01-11 13:45:36 -08:00
|
|
|
|
/* Executes, against 'dpif', up to the first 'n_ops' operations in 'ops'.
|
|
|
|
|
* Returns the number actually executed (at least 1, if 'n_ops' is
|
|
|
|
|
* positive). */
|
|
|
|
|
static size_t
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_operate__(struct dpif_netlink *dpif,
|
|
|
|
|
struct dpif_op **ops, size_t n_ops)
|
2011-09-27 15:08:50 -07:00
|
|
|
|
{
|
2012-04-05 16:49:22 -07:00
|
|
|
|
struct op_auxdata {
|
|
|
|
|
struct nl_transaction txn;
|
2012-04-09 15:35:29 -07:00
|
|
|
|
|
2012-04-05 16:49:22 -07:00
|
|
|
|
struct ofpbuf request;
|
|
|
|
|
uint64_t request_stub[1024 / 8];
|
2012-04-09 15:35:29 -07:00
|
|
|
|
|
|
|
|
|
struct ofpbuf reply;
|
|
|
|
|
uint64_t reply_stub[1024 / 8];
|
2017-06-13 18:03:38 +03:00
|
|
|
|
} auxes[OPERATE_MAX_OPS];
|
2012-04-05 16:49:22 -07:00
|
|
|
|
|
2017-06-13 18:03:38 +03:00
|
|
|
|
struct nl_transaction *txnsp[OPERATE_MAX_OPS];
|
2011-09-27 15:08:50 -07:00
|
|
|
|
size_t i;
|
|
|
|
|
|
2017-06-13 18:03:38 +03:00
|
|
|
|
n_ops = MIN(n_ops, OPERATE_MAX_OPS);
|
2011-09-27 15:08:50 -07:00
|
|
|
|
for (i = 0; i < n_ops; i++) {
|
2012-04-05 16:49:22 -07:00
|
|
|
|
struct op_auxdata *aux = &auxes[i];
|
2011-12-26 14:17:55 -08:00
|
|
|
|
struct dpif_op *op = ops[i];
|
2012-04-17 21:52:10 -07:00
|
|
|
|
struct dpif_flow_put *put;
|
|
|
|
|
struct dpif_flow_del *del;
|
2014-08-13 09:55:54 +12:00
|
|
|
|
struct dpif_flow_get *get;
|
2014-09-18 04:17:54 -07:00
|
|
|
|
struct dpif_netlink_flow flow;
|
2012-04-05 16:49:22 -07:00
|
|
|
|
|
|
|
|
|
ofpbuf_use_stub(&aux->request,
|
|
|
|
|
aux->request_stub, sizeof aux->request_stub);
|
|
|
|
|
aux->txn.request = &aux->request;
|
2012-04-17 21:52:10 -07:00
|
|
|
|
|
2012-04-09 15:35:29 -07:00
|
|
|
|
ofpbuf_use_stub(&aux->reply, aux->reply_stub, sizeof aux->reply_stub);
|
|
|
|
|
aux->txn.reply = NULL;
|
|
|
|
|
|
2012-04-17 21:52:10 -07:00
|
|
|
|
switch (op->type) {
|
|
|
|
|
case DPIF_OP_FLOW_PUT:
|
2018-05-24 10:32:59 -07:00
|
|
|
|
put = &op->flow_put;
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_init_flow_put(dpif, put, &flow);
|
2011-09-27 15:08:50 -07:00
|
|
|
|
if (put->stats) {
|
2012-04-05 16:49:22 -07:00
|
|
|
|
flow.nlmsg_flags |= NLM_F_ECHO;
|
2012-04-09 15:35:29 -07:00
|
|
|
|
aux->txn.reply = &aux->reply;
|
2011-09-27 15:08:50 -07:00
|
|
|
|
}
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_flow_to_ofpbuf(&flow, &aux->request);
|
2012-04-17 21:52:10 -07:00
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case DPIF_OP_FLOW_DEL:
|
2018-05-24 10:32:59 -07:00
|
|
|
|
del = &op->flow_del;
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_init_flow_del(dpif, del, &flow);
|
2012-04-17 21:52:10 -07:00
|
|
|
|
if (del->stats) {
|
2012-04-05 16:49:22 -07:00
|
|
|
|
flow.nlmsg_flags |= NLM_F_ECHO;
|
2012-04-09 15:35:29 -07:00
|
|
|
|
aux->txn.reply = &aux->reply;
|
2012-04-17 21:52:10 -07:00
|
|
|
|
}
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_flow_to_ofpbuf(&flow, &aux->request);
|
2012-04-17 21:52:10 -07:00
|
|
|
|
break;
|
2011-09-27 15:08:50 -07:00
|
|
|
|
|
2012-04-17 21:52:10 -07:00
|
|
|
|
case DPIF_OP_EXECUTE:
|
2015-01-11 13:45:36 -08:00
|
|
|
|
/* Can't execute a packet that won't fit in a Netlink attribute. */
|
|
|
|
|
if (OVS_UNLIKELY(nl_attr_oversized(
|
2018-05-24 10:32:59 -07:00
|
|
|
|
dp_packet_size(op->execute.packet)))) {
|
2015-01-11 13:45:36 -08:00
|
|
|
|
/* Report an error immediately if this is the first operation.
|
|
|
|
|
* Otherwise the easiest thing to do is to postpone to the next
|
|
|
|
|
* call (when this will be the first operation). */
|
|
|
|
|
if (i == 0) {
|
|
|
|
|
VLOG_ERR_RL(&error_rl,
|
|
|
|
|
"dropping oversized %"PRIu32"-byte packet",
|
2018-05-24 10:32:59 -07:00
|
|
|
|
dp_packet_size(op->execute.packet));
|
2015-01-11 13:45:36 -08:00
|
|
|
|
op->error = ENOBUFS;
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
n_ops = i;
|
|
|
|
|
} else {
|
2018-05-24 10:32:59 -07:00
|
|
|
|
dpif_netlink_encode_execute(dpif->dp_ifindex, &op->execute,
|
2015-01-11 13:45:36 -08:00
|
|
|
|
&aux->request);
|
|
|
|
|
}
|
2012-04-17 21:52:10 -07:00
|
|
|
|
break;
|
|
|
|
|
|
2014-08-13 09:55:54 +12:00
|
|
|
|
case DPIF_OP_FLOW_GET:
|
2018-05-24 10:32:59 -07:00
|
|
|
|
get = &op->flow_get;
|
2014-09-24 16:26:35 +12:00
|
|
|
|
dpif_netlink_init_flow_get(dpif, get, &flow);
|
2014-08-13 09:55:54 +12:00
|
|
|
|
aux->txn.reply = get->buffer;
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_flow_to_ofpbuf(&flow, &aux->request);
|
2014-08-13 09:55:54 +12:00
|
|
|
|
break;
|
|
|
|
|
|
2012-04-17 21:52:10 -07:00
|
|
|
|
default:
|
2013-12-17 10:32:12 -08:00
|
|
|
|
OVS_NOT_REACHED();
|
2011-09-27 15:08:50 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < n_ops; i++) {
|
2012-04-05 16:49:22 -07:00
|
|
|
|
txnsp[i] = &auxes[i].txn;
|
2011-09-27 15:08:50 -07:00
|
|
|
|
}
|
2013-04-29 13:57:50 -07:00
|
|
|
|
nl_transact_multiple(NETLINK_GENERIC, txnsp, n_ops);
|
2011-09-27 15:08:50 -07:00
|
|
|
|
|
|
|
|
|
for (i = 0; i < n_ops; i++) {
|
2012-04-09 15:35:29 -07:00
|
|
|
|
struct op_auxdata *aux = &auxes[i];
|
2012-04-05 16:49:22 -07:00
|
|
|
|
struct nl_transaction *txn = &auxes[i].txn;
|
2011-12-26 14:17:55 -08:00
|
|
|
|
struct dpif_op *op = ops[i];
|
2012-04-17 21:52:10 -07:00
|
|
|
|
struct dpif_flow_put *put;
|
|
|
|
|
struct dpif_flow_del *del;
|
2014-08-13 09:55:54 +12:00
|
|
|
|
struct dpif_flow_get *get;
|
2011-09-27 15:08:50 -07:00
|
|
|
|
|
2012-04-17 21:52:10 -07:00
|
|
|
|
op->error = txn->error;
|
2011-09-27 15:08:50 -07:00
|
|
|
|
|
2012-04-17 21:52:10 -07:00
|
|
|
|
switch (op->type) {
|
|
|
|
|
case DPIF_OP_FLOW_PUT:
|
2018-05-24 10:32:59 -07:00
|
|
|
|
put = &op->flow_put;
|
2012-06-20 10:55:41 -07:00
|
|
|
|
if (put->stats) {
|
2012-04-17 21:52:10 -07:00
|
|
|
|
if (!op->error) {
|
2014-09-18 04:17:54 -07:00
|
|
|
|
struct dpif_netlink_flow reply;
|
2012-06-20 10:55:41 -07:00
|
|
|
|
|
2014-09-18 04:17:54 -07:00
|
|
|
|
op->error = dpif_netlink_flow_from_ofpbuf(&reply,
|
|
|
|
|
txn->reply);
|
2012-06-20 10:55:41 -07:00
|
|
|
|
if (!op->error) {
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_flow_get_stats(&reply, put->stats);
|
2012-06-20 10:55:41 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
2011-09-27 15:08:50 -07:00
|
|
|
|
}
|
2012-04-17 21:52:10 -07:00
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case DPIF_OP_FLOW_DEL:
|
2018-05-24 10:32:59 -07:00
|
|
|
|
del = &op->flow_del;
|
2012-06-20 10:55:41 -07:00
|
|
|
|
if (del->stats) {
|
2012-04-17 21:52:10 -07:00
|
|
|
|
if (!op->error) {
|
2014-09-18 04:17:54 -07:00
|
|
|
|
struct dpif_netlink_flow reply;
|
2012-06-20 10:55:41 -07:00
|
|
|
|
|
2014-09-18 04:17:54 -07:00
|
|
|
|
op->error = dpif_netlink_flow_from_ofpbuf(&reply,
|
|
|
|
|
txn->reply);
|
2012-06-20 10:55:41 -07:00
|
|
|
|
if (!op->error) {
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_flow_get_stats(&reply, del->stats);
|
2012-06-20 10:55:41 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
2012-04-17 21:52:10 -07:00
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case DPIF_OP_EXECUTE:
|
|
|
|
|
break;
|
|
|
|
|
|
2014-08-13 09:55:54 +12:00
|
|
|
|
case DPIF_OP_FLOW_GET:
|
2018-05-24 10:32:59 -07:00
|
|
|
|
get = &op->flow_get;
|
2014-08-13 09:55:54 +12:00
|
|
|
|
if (!op->error) {
|
2014-09-18 04:17:54 -07:00
|
|
|
|
struct dpif_netlink_flow reply;
|
2014-08-13 09:55:54 +12:00
|
|
|
|
|
2014-09-18 04:17:54 -07:00
|
|
|
|
op->error = dpif_netlink_flow_from_ofpbuf(&reply, txn->reply);
|
2014-08-13 09:55:54 +12:00
|
|
|
|
if (!op->error) {
|
2019-12-08 18:09:53 +01:00
|
|
|
|
dpif_netlink_flow_to_dpif_flow(get->flow, &reply);
|
2014-08-13 09:55:54 +12:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
|
2012-04-17 21:52:10 -07:00
|
|
|
|
default:
|
2013-12-17 10:32:12 -08:00
|
|
|
|
OVS_NOT_REACHED();
|
2011-09-27 15:08:50 -07:00
|
|
|
|
}
|
|
|
|
|
|
2012-04-09 15:35:29 -07:00
|
|
|
|
ofpbuf_uninit(&aux->request);
|
|
|
|
|
ofpbuf_uninit(&aux->reply);
|
2011-09-27 15:08:50 -07:00
|
|
|
|
}
|
2015-01-11 13:45:36 -08:00
|
|
|
|
|
|
|
|
|
return n_ops;
|
2012-04-05 16:49:22 -07:00
|
|
|
|
}
|
|
|
|
|
|
2017-06-13 18:03:43 +03:00
|
|
|
|
static int
|
|
|
|
|
parse_flow_get(struct dpif_netlink *dpif, struct dpif_flow_get *get)
|
|
|
|
|
{
|
|
|
|
|
struct dpif_flow *dpif_flow = get->flow;
|
|
|
|
|
struct match match;
|
|
|
|
|
struct nlattr *actions;
|
|
|
|
|
struct dpif_flow_stats stats;
|
dpctl: Properly reflect a rule's offloaded to HW state
Previously, any rule that is offloaded via a netdev, not necessarily
to the HW, would be reported as "offloaded". This patch fixes this
misalignment, and introduces the 'dp' state, as follows:
rule is in HW via TC offload -> offloaded=yes dp:tc
rule is in not HW over TC DP -> offloaded=no dp:tc
rule is in not HW over OVS DP -> offloaded=no dp:ovs
To achieve this, the flows's 'offloaded' flag was encapsulated in a new
attrs struct, which contains the offloaded state of the flow and the
DP layer the flow is handled in, and instead of setting the flow's
'offloaded' state based solely on the type of dump it was acquired
via, for netdev flows it now sends the new attrs struct to be
collected along with the rest of the flow via the netdev, allowing
it to be set per flow.
For TC offloads, the offloaded state is set based on the 'in_hw' and
'not_in_hw' flags received from the TC as part of the flower. If no
such flag was received, due to lack of kernel support, it defaults
to true.
Signed-off-by: Gavi Teitz <gavi@mellanox.com>
Acked-by: Roi Dayan <roid@mellanox.com>
[simon: resolved conflict in lib/dpctl.man]
Signed-off-by: Simon Horman <simon.horman@netronome.com>
2018-06-07 09:36:59 +03:00
|
|
|
|
struct dpif_flow_attrs attrs;
|
2017-06-13 18:03:43 +03:00
|
|
|
|
struct ofpbuf buf;
|
|
|
|
|
uint64_t act_buf[1024 / 8];
|
|
|
|
|
struct odputil_keybuf maskbuf;
|
|
|
|
|
struct odputil_keybuf keybuf;
|
|
|
|
|
struct odputil_keybuf actbuf;
|
|
|
|
|
struct ofpbuf key, mask, act;
|
|
|
|
|
int err;
|
|
|
|
|
|
|
|
|
|
ofpbuf_use_stack(&buf, &act_buf, sizeof act_buf);
|
2017-07-25 08:28:41 +03:00
|
|
|
|
err = netdev_ports_flow_get(dpif->dpif.dpif_class, &match,
|
dpctl: Properly reflect a rule's offloaded to HW state
Previously, any rule that is offloaded via a netdev, not necessarily
to the HW, would be reported as "offloaded". This patch fixes this
misalignment, and introduces the 'dp' state, as follows:
rule is in HW via TC offload -> offloaded=yes dp:tc
rule is in not HW over TC DP -> offloaded=no dp:tc
rule is in not HW over OVS DP -> offloaded=no dp:ovs
To achieve this, the flows's 'offloaded' flag was encapsulated in a new
attrs struct, which contains the offloaded state of the flow and the
DP layer the flow is handled in, and instead of setting the flow's
'offloaded' state based solely on the type of dump it was acquired
via, for netdev flows it now sends the new attrs struct to be
collected along with the rest of the flow via the netdev, allowing
it to be set per flow.
For TC offloads, the offloaded state is set based on the 'in_hw' and
'not_in_hw' flags received from the TC as part of the flower. If no
such flag was received, due to lack of kernel support, it defaults
to true.
Signed-off-by: Gavi Teitz <gavi@mellanox.com>
Acked-by: Roi Dayan <roid@mellanox.com>
[simon: resolved conflict in lib/dpctl.man]
Signed-off-by: Simon Horman <simon.horman@netronome.com>
2018-06-07 09:36:59 +03:00
|
|
|
|
&actions, get->ufid, &stats, &attrs, &buf);
|
2017-06-13 18:03:43 +03:00
|
|
|
|
if (err) {
|
|
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
VLOG_DBG("found flow from netdev, translating to dpif flow");
|
|
|
|
|
|
|
|
|
|
ofpbuf_use_stack(&key, &keybuf, sizeof keybuf);
|
|
|
|
|
ofpbuf_use_stack(&act, &actbuf, sizeof actbuf);
|
|
|
|
|
ofpbuf_use_stack(&mask, &maskbuf, sizeof maskbuf);
|
|
|
|
|
dpif_netlink_netdev_match_to_dpif_flow(&match, &key, &mask, actions,
|
dpctl: Properly reflect a rule's offloaded to HW state
Previously, any rule that is offloaded via a netdev, not necessarily
to the HW, would be reported as "offloaded". This patch fixes this
misalignment, and introduces the 'dp' state, as follows:
rule is in HW via TC offload -> offloaded=yes dp:tc
rule is in not HW over TC DP -> offloaded=no dp:tc
rule is in not HW over OVS DP -> offloaded=no dp:ovs
To achieve this, the flows's 'offloaded' flag was encapsulated in a new
attrs struct, which contains the offloaded state of the flow and the
DP layer the flow is handled in, and instead of setting the flow's
'offloaded' state based solely on the type of dump it was acquired
via, for netdev flows it now sends the new attrs struct to be
collected along with the rest of the flow via the netdev, allowing
it to be set per flow.
For TC offloads, the offloaded state is set based on the 'in_hw' and
'not_in_hw' flags received from the TC as part of the flower. If no
such flag was received, due to lack of kernel support, it defaults
to true.
Signed-off-by: Gavi Teitz <gavi@mellanox.com>
Acked-by: Roi Dayan <roid@mellanox.com>
[simon: resolved conflict in lib/dpctl.man]
Signed-off-by: Simon Horman <simon.horman@netronome.com>
2018-06-07 09:36:59 +03:00
|
|
|
|
&stats, &attrs,
|
2017-06-13 18:03:43 +03:00
|
|
|
|
(ovs_u128 *) get->ufid,
|
|
|
|
|
dpif_flow,
|
|
|
|
|
false);
|
|
|
|
|
ofpbuf_put(get->buffer, nl_attr_get(actions), nl_attr_get_size(actions));
|
|
|
|
|
dpif_flow->actions = ofpbuf_at(get->buffer, 0, 0);
|
|
|
|
|
dpif_flow->actions_len = nl_attr_get_size(actions);
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2017-06-13 18:03:38 +03:00
|
|
|
|
static int
|
|
|
|
|
parse_flow_put(struct dpif_netlink *dpif, struct dpif_flow_put *put)
|
|
|
|
|
{
|
|
|
|
|
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
|
2017-07-25 08:28:41 +03:00
|
|
|
|
const struct dpif_class *dpif_class = dpif->dpif.dpif_class;
|
2017-06-13 18:03:38 +03:00
|
|
|
|
struct match match;
|
|
|
|
|
odp_port_t in_port;
|
|
|
|
|
const struct nlattr *nla;
|
|
|
|
|
size_t left;
|
|
|
|
|
struct netdev *dev;
|
|
|
|
|
struct offload_info info;
|
|
|
|
|
ovs_be16 dst_port = 0;
|
2018-10-11 10:06:43 +03:00
|
|
|
|
uint8_t csum_on = false;
|
2017-06-13 18:03:38 +03:00
|
|
|
|
int err;
|
|
|
|
|
|
|
|
|
|
if (put->flags & DPIF_FP_PROBE) {
|
|
|
|
|
return EOPNOTSUPP;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
err = parse_key_and_mask_to_match(put->key, put->key_len, put->mask,
|
|
|
|
|
put->mask_len, &match);
|
|
|
|
|
if (err) {
|
|
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
in_port = match.flow.in_port.odp_port;
|
2017-07-25 08:28:41 +03:00
|
|
|
|
dev = netdev_ports_get(in_port, dpif_class);
|
2017-06-13 18:03:38 +03:00
|
|
|
|
if (!dev) {
|
|
|
|
|
return EOPNOTSUPP;
|
|
|
|
|
}
|
|
|
|
|
|
2018-04-10 14:18:09 +09:00
|
|
|
|
/* Get tunnel dst port */
|
2017-06-13 18:03:38 +03:00
|
|
|
|
NL_ATTR_FOR_EACH(nla, left, put->actions, put->actions_len) {
|
|
|
|
|
if (nl_attr_type(nla) == OVS_ACTION_ATTR_OUTPUT) {
|
|
|
|
|
const struct netdev_tunnel_config *tnl_cfg;
|
|
|
|
|
struct netdev *outdev;
|
|
|
|
|
odp_port_t out_port;
|
|
|
|
|
|
|
|
|
|
out_port = nl_attr_get_odp_port(nla);
|
2017-07-25 08:28:41 +03:00
|
|
|
|
outdev = netdev_ports_get(out_port, dpif_class);
|
2017-06-13 18:03:38 +03:00
|
|
|
|
if (!outdev) {
|
|
|
|
|
err = EOPNOTSUPP;
|
|
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
tnl_cfg = netdev_get_tunnel_config(outdev);
|
|
|
|
|
if (tnl_cfg && tnl_cfg->dst_port != 0) {
|
|
|
|
|
dst_port = tnl_cfg->dst_port;
|
|
|
|
|
}
|
2018-10-11 10:06:43 +03:00
|
|
|
|
if (tnl_cfg) {
|
|
|
|
|
csum_on = tnl_cfg->csum;
|
|
|
|
|
}
|
2017-06-13 18:03:38 +03:00
|
|
|
|
netdev_close(outdev);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2017-07-25 08:28:41 +03:00
|
|
|
|
info.dpif_class = dpif_class;
|
2017-06-13 18:03:38 +03:00
|
|
|
|
info.tp_dst_port = dst_port;
|
2018-10-11 10:06:43 +03:00
|
|
|
|
info.tunnel_csum_on = csum_on;
|
2019-12-22 12:16:40 +02:00
|
|
|
|
info.recirc_id_shared_with_tc = (dpif->user_features
|
|
|
|
|
& OVS_DP_F_TC_RECIRC_SHARING);
|
2020-03-11 13:39:34 +08:00
|
|
|
|
info.tc_modify_flow_deleted = false;
|
2017-06-13 18:03:38 +03:00
|
|
|
|
err = netdev_flow_put(dev, &match,
|
|
|
|
|
CONST_CAST(struct nlattr *, put->actions),
|
|
|
|
|
put->actions_len,
|
|
|
|
|
CONST_CAST(ovs_u128 *, put->ufid),
|
|
|
|
|
&info, put->stats);
|
|
|
|
|
|
|
|
|
|
if (!err) {
|
|
|
|
|
if (put->flags & DPIF_FP_MODIFY) {
|
|
|
|
|
struct dpif_op *opp;
|
|
|
|
|
struct dpif_op op;
|
|
|
|
|
|
|
|
|
|
op.type = DPIF_OP_FLOW_DEL;
|
2018-05-24 10:32:59 -07:00
|
|
|
|
op.flow_del.key = put->key;
|
|
|
|
|
op.flow_del.key_len = put->key_len;
|
|
|
|
|
op.flow_del.ufid = put->ufid;
|
|
|
|
|
op.flow_del.pmd_id = put->pmd_id;
|
|
|
|
|
op.flow_del.stats = NULL;
|
|
|
|
|
op.flow_del.terse = false;
|
2017-06-13 18:03:38 +03:00
|
|
|
|
|
|
|
|
|
opp = &op;
|
|
|
|
|
dpif_netlink_operate__(dpif, &opp, 1);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
VLOG_DBG("added flow");
|
|
|
|
|
} else if (err != EEXIST) {
|
2018-10-18 21:43:12 +05:30
|
|
|
|
struct netdev *oor_netdev = NULL;
|
2019-03-19 20:47:31 +08:00
|
|
|
|
enum vlog_level level;
|
2018-10-18 21:43:12 +05:30
|
|
|
|
if (err == ENOSPC && netdev_is_offload_rebalance_policy_enabled()) {
|
|
|
|
|
/*
|
|
|
|
|
* We need to set OOR on the input netdev (i.e, 'dev') for the
|
|
|
|
|
* flow. But if the flow has a tunnel attribute (i.e, decap action,
|
|
|
|
|
* with a virtual device like a VxLAN interface as its in-port),
|
|
|
|
|
* then lookup and set OOR on the underlying tunnel (real) netdev.
|
|
|
|
|
*/
|
|
|
|
|
oor_netdev = flow_get_tunnel_netdev(&match.flow.tunnel);
|
|
|
|
|
if (!oor_netdev) {
|
|
|
|
|
/* Not a 'tunnel' flow */
|
|
|
|
|
oor_netdev = dev;
|
|
|
|
|
}
|
|
|
|
|
netdev_set_hw_info(oor_netdev, HW_INFO_TYPE_OOR, true);
|
|
|
|
|
}
|
2019-03-19 20:47:31 +08:00
|
|
|
|
level = (err == ENOSPC || err == EOPNOTSUPP) ? VLL_DBG : VLL_ERR;
|
|
|
|
|
VLOG_RL(&rl, level, "failed to offload flow: %s: %s",
|
|
|
|
|
ovs_strerror(err),
|
|
|
|
|
(oor_netdev ? oor_netdev->name : dev->name));
|
2017-06-13 18:03:38 +03:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
out:
|
|
|
|
|
if (err && err != EEXIST && (put->flags & DPIF_FP_MODIFY)) {
|
|
|
|
|
/* Modified rule can't be offloaded, try and delete from HW */
|
2020-03-11 13:39:34 +08:00
|
|
|
|
int del_err = 0;
|
|
|
|
|
|
|
|
|
|
if (!info.tc_modify_flow_deleted) {
|
|
|
|
|
del_err = netdev_flow_del(dev, put->ufid, put->stats);
|
|
|
|
|
}
|
2017-06-13 18:03:38 +03:00
|
|
|
|
|
|
|
|
|
if (!del_err) {
|
|
|
|
|
/* Delete from hw success, so old flow was offloaded.
|
|
|
|
|
* Change flags to create the flow in kernel */
|
|
|
|
|
put->flags &= ~DPIF_FP_MODIFY;
|
|
|
|
|
put->flags |= DPIF_FP_CREATE;
|
|
|
|
|
} else if (del_err != ENOENT) {
|
|
|
|
|
VLOG_ERR_RL(&rl, "failed to delete offloaded flow: %s",
|
|
|
|
|
ovs_strerror(del_err));
|
|
|
|
|
/* stop proccesing the flow in kernel */
|
|
|
|
|
err = 0;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
netdev_close(dev);
|
|
|
|
|
|
|
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
try_send_to_netdev(struct dpif_netlink *dpif, struct dpif_op *op)
|
2012-04-05 16:49:22 -07:00
|
|
|
|
{
|
2017-06-13 18:03:38 +03:00
|
|
|
|
int err = EOPNOTSUPP;
|
2014-04-17 16:33:17 -07:00
|
|
|
|
|
2017-06-13 18:03:38 +03:00
|
|
|
|
switch (op->type) {
|
|
|
|
|
case DPIF_OP_FLOW_PUT: {
|
2018-05-24 10:32:59 -07:00
|
|
|
|
struct dpif_flow_put *put = &op->flow_put;
|
2017-06-13 18:03:38 +03:00
|
|
|
|
|
|
|
|
|
if (!put->ufid) {
|
|
|
|
|
break;
|
|
|
|
|
}
|
2017-06-13 18:03:54 +03:00
|
|
|
|
|
2017-06-13 18:03:38 +03:00
|
|
|
|
err = parse_flow_put(dpif, put);
|
2020-01-06 11:23:42 +01:00
|
|
|
|
log_flow_put_message(&dpif->dpif, &this_module, put, 0);
|
2017-06-13 18:03:38 +03:00
|
|
|
|
break;
|
|
|
|
|
}
|
2017-06-13 18:03:41 +03:00
|
|
|
|
case DPIF_OP_FLOW_DEL: {
|
2018-05-24 10:32:59 -07:00
|
|
|
|
struct dpif_flow_del *del = &op->flow_del;
|
2017-06-13 18:03:41 +03:00
|
|
|
|
|
|
|
|
|
if (!del->ufid) {
|
|
|
|
|
break;
|
|
|
|
|
}
|
2017-06-13 18:03:54 +03:00
|
|
|
|
|
2017-07-25 08:28:41 +03:00
|
|
|
|
err = netdev_ports_flow_del(dpif->dpif.dpif_class, del->ufid,
|
2017-06-13 18:03:41 +03:00
|
|
|
|
del->stats);
|
2020-01-06 11:23:42 +01:00
|
|
|
|
log_flow_del_message(&dpif->dpif, &this_module, del, 0);
|
2017-06-13 18:03:41 +03:00
|
|
|
|
break;
|
|
|
|
|
}
|
2017-06-13 18:03:43 +03:00
|
|
|
|
case DPIF_OP_FLOW_GET: {
|
2018-05-24 10:32:59 -07:00
|
|
|
|
struct dpif_flow_get *get = &op->flow_get;
|
2017-06-13 18:03:43 +03:00
|
|
|
|
|
2018-05-24 10:32:59 -07:00
|
|
|
|
if (!op->flow_get.ufid) {
|
2017-06-13 18:03:43 +03:00
|
|
|
|
break;
|
|
|
|
|
}
|
2017-06-13 18:03:54 +03:00
|
|
|
|
|
2017-06-13 18:03:43 +03:00
|
|
|
|
err = parse_flow_get(dpif, get);
|
2020-01-06 11:23:42 +01:00
|
|
|
|
log_flow_get_message(&dpif->dpif, &this_module, get, 0);
|
2017-06-13 18:03:43 +03:00
|
|
|
|
break;
|
|
|
|
|
}
|
2017-06-13 18:03:38 +03:00
|
|
|
|
case DPIF_OP_EXECUTE:
|
|
|
|
|
default:
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
dpif_netlink_operate_chunks(struct dpif_netlink *dpif, struct dpif_op **ops,
|
|
|
|
|
size_t n_ops)
|
|
|
|
|
{
|
2012-04-05 16:49:22 -07:00
|
|
|
|
while (n_ops > 0) {
|
2015-01-11 13:45:36 -08:00
|
|
|
|
size_t chunk = dpif_netlink_operate__(dpif, ops, n_ops);
|
2017-06-13 18:03:38 +03:00
|
|
|
|
|
2012-04-05 16:49:22 -07:00
|
|
|
|
ops += chunk;
|
|
|
|
|
n_ops -= chunk;
|
|
|
|
|
}
|
2011-09-27 15:08:50 -07:00
|
|
|
|
}
|
|
|
|
|
|
2017-06-13 18:03:38 +03:00
|
|
|
|
static void
|
revalidator: Rebalance offloaded flows based on the pps rate
This is the third patch in the patch-set to support dynamic rebalancing
of offloaded flows.
The dynamic rebalancing functionality is implemented in this patch. The
ukeys that are not scheduled for deletion are obtained and passed as input
to the rebalancing routine. The rebalancing is done in the context of
revalidation leader thread, after all other revalidator threads are
done with gathering rebalancing data for flows.
For each netdev that is in OOR state, a list of flows - both offloaded
and non-offloaded (pending) - is obtained using the ukeys. For each netdev
that is in OOR state, the flows are grouped and sorted into offloaded and
pending flows. The offloaded flows are sorted in descending order of
pps-rate, while pending flows are sorted in ascending order of pps-rate.
The rebalancing is done in two phases. In the first phase, we try to
offload all pending flows and if that succeeds, the OOR state on the device
is cleared. If some (or none) of the pending flows could not be offloaded,
then we start replacing an offloaded flow that has a lower pps-rate than
a pending flow, until there are no more pending flows with a higher rate
than an offloaded flow. The flows that are replaced from the device are
added into kernel datapath.
A new OVS configuration parameter "offload-rebalance", is added to ovsdb.
The default value of this is "false". To enable this feature, set the
value of this parameter to "true", which provides packets-per-second
rate based policy to dynamically offload and un-offload flows.
Note: This option can be enabled only when 'hw-offload' policy is enabled.
It also requires 'tc-policy' to be set to 'skip_sw'; otherwise, flow
offload errors (specifically ENOSPC error this feature depends on) reported
by an offloaded device are supressed by TC-Flower kernel module.
Signed-off-by: Sriharsha Basavapatna <sriharsha.basavapatna@broadcom.com>
Co-authored-by: Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>
Signed-off-by: Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>
Reviewed-by: Sathya Perla <sathya.perla@broadcom.com>
Reviewed-by: Ben Pfaff <blp@ovn.org>
Signed-off-by: Simon Horman <simon.horman@netronome.com>
2018-10-18 21:43:14 +05:30
|
|
|
|
dpif_netlink_operate(struct dpif *dpif_, struct dpif_op **ops, size_t n_ops,
|
|
|
|
|
enum dpif_offload_type offload_type)
|
2017-06-13 18:03:38 +03:00
|
|
|
|
{
|
|
|
|
|
struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
|
|
|
|
|
struct dpif_op *new_ops[OPERATE_MAX_OPS];
|
|
|
|
|
int count = 0;
|
|
|
|
|
int i = 0;
|
|
|
|
|
int err = 0;
|
|
|
|
|
|
revalidator: Rebalance offloaded flows based on the pps rate
This is the third patch in the patch-set to support dynamic rebalancing
of offloaded flows.
The dynamic rebalancing functionality is implemented in this patch. The
ukeys that are not scheduled for deletion are obtained and passed as input
to the rebalancing routine. The rebalancing is done in the context of
revalidation leader thread, after all other revalidator threads are
done with gathering rebalancing data for flows.
For each netdev that is in OOR state, a list of flows - both offloaded
and non-offloaded (pending) - is obtained using the ukeys. For each netdev
that is in OOR state, the flows are grouped and sorted into offloaded and
pending flows. The offloaded flows are sorted in descending order of
pps-rate, while pending flows are sorted in ascending order of pps-rate.
The rebalancing is done in two phases. In the first phase, we try to
offload all pending flows and if that succeeds, the OOR state on the device
is cleared. If some (or none) of the pending flows could not be offloaded,
then we start replacing an offloaded flow that has a lower pps-rate than
a pending flow, until there are no more pending flows with a higher rate
than an offloaded flow. The flows that are replaced from the device are
added into kernel datapath.
A new OVS configuration parameter "offload-rebalance", is added to ovsdb.
The default value of this is "false". To enable this feature, set the
value of this parameter to "true", which provides packets-per-second
rate based policy to dynamically offload and un-offload flows.
Note: This option can be enabled only when 'hw-offload' policy is enabled.
It also requires 'tc-policy' to be set to 'skip_sw'; otherwise, flow
offload errors (specifically ENOSPC error this feature depends on) reported
by an offloaded device are supressed by TC-Flower kernel module.
Signed-off-by: Sriharsha Basavapatna <sriharsha.basavapatna@broadcom.com>
Co-authored-by: Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>
Signed-off-by: Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>
Reviewed-by: Sathya Perla <sathya.perla@broadcom.com>
Reviewed-by: Ben Pfaff <blp@ovn.org>
Signed-off-by: Simon Horman <simon.horman@netronome.com>
2018-10-18 21:43:14 +05:30
|
|
|
|
if (offload_type == DPIF_OFFLOAD_ALWAYS && !netdev_is_flow_api_enabled()) {
|
|
|
|
|
VLOG_DBG("Invalid offload_type: %d", offload_type);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (offload_type != DPIF_OFFLOAD_NEVER && netdev_is_flow_api_enabled()) {
|
2017-06-13 18:03:38 +03:00
|
|
|
|
while (n_ops > 0) {
|
|
|
|
|
count = 0;
|
|
|
|
|
|
|
|
|
|
while (n_ops > 0 && count < OPERATE_MAX_OPS) {
|
|
|
|
|
struct dpif_op *op = ops[i++];
|
|
|
|
|
|
|
|
|
|
err = try_send_to_netdev(dpif, op);
|
|
|
|
|
if (err && err != EEXIST) {
|
revalidator: Rebalance offloaded flows based on the pps rate
This is the third patch in the patch-set to support dynamic rebalancing
of offloaded flows.
The dynamic rebalancing functionality is implemented in this patch. The
ukeys that are not scheduled for deletion are obtained and passed as input
to the rebalancing routine. The rebalancing is done in the context of
revalidation leader thread, after all other revalidator threads are
done with gathering rebalancing data for flows.
For each netdev that is in OOR state, a list of flows - both offloaded
and non-offloaded (pending) - is obtained using the ukeys. For each netdev
that is in OOR state, the flows are grouped and sorted into offloaded and
pending flows. The offloaded flows are sorted in descending order of
pps-rate, while pending flows are sorted in ascending order of pps-rate.
The rebalancing is done in two phases. In the first phase, we try to
offload all pending flows and if that succeeds, the OOR state on the device
is cleared. If some (or none) of the pending flows could not be offloaded,
then we start replacing an offloaded flow that has a lower pps-rate than
a pending flow, until there are no more pending flows with a higher rate
than an offloaded flow. The flows that are replaced from the device are
added into kernel datapath.
A new OVS configuration parameter "offload-rebalance", is added to ovsdb.
The default value of this is "false". To enable this feature, set the
value of this parameter to "true", which provides packets-per-second
rate based policy to dynamically offload and un-offload flows.
Note: This option can be enabled only when 'hw-offload' policy is enabled.
It also requires 'tc-policy' to be set to 'skip_sw'; otherwise, flow
offload errors (specifically ENOSPC error this feature depends on) reported
by an offloaded device are supressed by TC-Flower kernel module.
Signed-off-by: Sriharsha Basavapatna <sriharsha.basavapatna@broadcom.com>
Co-authored-by: Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>
Signed-off-by: Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>
Reviewed-by: Sathya Perla <sathya.perla@broadcom.com>
Reviewed-by: Ben Pfaff <blp@ovn.org>
Signed-off-by: Simon Horman <simon.horman@netronome.com>
2018-10-18 21:43:14 +05:30
|
|
|
|
if (offload_type == DPIF_OFFLOAD_ALWAYS) {
|
|
|
|
|
/* We got an error while offloading an op. Since
|
|
|
|
|
* OFFLOAD_ALWAYS is specified, we stop further
|
|
|
|
|
* processing and return to the caller without
|
|
|
|
|
* invoking kernel datapath as fallback. But the
|
|
|
|
|
* interface requires us to process all n_ops; so
|
|
|
|
|
* return the same error in the remaining ops too.
|
|
|
|
|
*/
|
|
|
|
|
op->error = err;
|
|
|
|
|
n_ops--;
|
|
|
|
|
while (n_ops > 0) {
|
|
|
|
|
op = ops[i++];
|
|
|
|
|
op->error = err;
|
|
|
|
|
n_ops--;
|
|
|
|
|
}
|
|
|
|
|
return;
|
|
|
|
|
}
|
2017-06-13 18:03:38 +03:00
|
|
|
|
new_ops[count++] = op;
|
|
|
|
|
} else {
|
|
|
|
|
op->error = err;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
n_ops--;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
dpif_netlink_operate_chunks(dpif, new_ops, count);
|
|
|
|
|
}
|
revalidator: Rebalance offloaded flows based on the pps rate
This is the third patch in the patch-set to support dynamic rebalancing
of offloaded flows.
The dynamic rebalancing functionality is implemented in this patch. The
ukeys that are not scheduled for deletion are obtained and passed as input
to the rebalancing routine. The rebalancing is done in the context of
revalidation leader thread, after all other revalidator threads are
done with gathering rebalancing data for flows.
For each netdev that is in OOR state, a list of flows - both offloaded
and non-offloaded (pending) - is obtained using the ukeys. For each netdev
that is in OOR state, the flows are grouped and sorted into offloaded and
pending flows. The offloaded flows are sorted in descending order of
pps-rate, while pending flows are sorted in ascending order of pps-rate.
The rebalancing is done in two phases. In the first phase, we try to
offload all pending flows and if that succeeds, the OOR state on the device
is cleared. If some (or none) of the pending flows could not be offloaded,
then we start replacing an offloaded flow that has a lower pps-rate than
a pending flow, until there are no more pending flows with a higher rate
than an offloaded flow. The flows that are replaced from the device are
added into kernel datapath.
A new OVS configuration parameter "offload-rebalance", is added to ovsdb.
The default value of this is "false". To enable this feature, set the
value of this parameter to "true", which provides packets-per-second
rate based policy to dynamically offload and un-offload flows.
Note: This option can be enabled only when 'hw-offload' policy is enabled.
It also requires 'tc-policy' to be set to 'skip_sw'; otherwise, flow
offload errors (specifically ENOSPC error this feature depends on) reported
by an offloaded device are supressed by TC-Flower kernel module.
Signed-off-by: Sriharsha Basavapatna <sriharsha.basavapatna@broadcom.com>
Co-authored-by: Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>
Signed-off-by: Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>
Reviewed-by: Sathya Perla <sathya.perla@broadcom.com>
Reviewed-by: Ben Pfaff <blp@ovn.org>
Signed-off-by: Simon Horman <simon.horman@netronome.com>
2018-10-18 21:43:14 +05:30
|
|
|
|
} else if (offload_type != DPIF_OFFLOAD_ALWAYS) {
|
2017-06-13 18:03:38 +03:00
|
|
|
|
dpif_netlink_operate_chunks(dpif, ops, n_ops);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2014-10-23 08:27:34 -07:00
|
|
|
|
#if _WIN32
|
|
|
|
|
static void
|
|
|
|
|
dpif_netlink_handler_uninit(struct dpif_handler *handler)
|
|
|
|
|
{
|
|
|
|
|
vport_delete_sock_pool(handler);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
dpif_netlink_handler_init(struct dpif_handler *handler)
|
|
|
|
|
{
|
|
|
|
|
return vport_create_sock_pool(handler);
|
|
|
|
|
}
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
dpif_netlink_handler_init(struct dpif_handler *handler)
|
|
|
|
|
{
|
|
|
|
|
handler->epoll_fd = epoll_create(10);
|
|
|
|
|
return handler->epoll_fd < 0 ? errno : 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
dpif_netlink_handler_uninit(struct dpif_handler *handler)
|
|
|
|
|
{
|
|
|
|
|
close(handler->epoll_fd);
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
|
2014-02-26 10:10:29 -08:00
|
|
|
|
/* Synchronizes 'channels' in 'dpif->handlers' with the set of vports
|
|
|
|
|
* currently in 'dpif' in the kernel, by adding a new set of channels for
|
|
|
|
|
* any kernel vport that lacks one and deleting any channels that have no
|
|
|
|
|
* backing kernel vports. */
|
2009-06-17 14:35:35 -07:00
|
|
|
|
static int
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_refresh_channels(struct dpif_netlink *dpif, uint32_t n_handlers)
|
2014-04-17 17:16:34 -07:00
|
|
|
|
OVS_REQ_WRLOCK(dpif->upcall_lock)
|
2009-06-17 14:35:35 -07:00
|
|
|
|
{
|
2013-12-04 13:37:31 -08:00
|
|
|
|
unsigned long int *keep_channels;
|
2014-09-18 04:17:54 -07:00
|
|
|
|
struct dpif_netlink_vport vport;
|
2013-12-04 13:37:31 -08:00
|
|
|
|
size_t keep_channels_nbits;
|
|
|
|
|
struct nl_dump dump;
|
2014-02-27 14:13:05 -08:00
|
|
|
|
uint64_t reply_stub[NL_DUMP_BUFSIZE / 8];
|
|
|
|
|
struct ofpbuf buf;
|
2013-12-04 13:37:31 -08:00
|
|
|
|
int retval = 0;
|
|
|
|
|
size_t i;
|
2011-01-26 13:41:54 -08:00
|
|
|
|
|
2014-10-23 08:27:34 -07:00
|
|
|
|
ovs_assert(!WINDOWS || n_handlers <= 1);
|
|
|
|
|
ovs_assert(!WINDOWS || dpif->n_handlers <= 1);
|
|
|
|
|
|
2014-02-26 10:10:29 -08:00
|
|
|
|
if (dpif->n_handlers != n_handlers) {
|
|
|
|
|
destroy_all_channels(dpif);
|
|
|
|
|
dpif->handlers = xzalloc(n_handlers * sizeof *dpif->handlers);
|
|
|
|
|
for (i = 0; i < n_handlers; i++) {
|
2014-10-23 08:27:34 -07:00
|
|
|
|
int error;
|
2014-02-26 10:10:29 -08:00
|
|
|
|
struct dpif_handler *handler = &dpif->handlers[i];
|
|
|
|
|
|
2014-10-23 08:27:34 -07:00
|
|
|
|
error = dpif_netlink_handler_init(handler);
|
|
|
|
|
if (error) {
|
2014-02-26 10:10:29 -08:00
|
|
|
|
size_t j;
|
|
|
|
|
|
|
|
|
|
for (j = 0; j < i; j++) {
|
2017-05-30 07:38:56 -07:00
|
|
|
|
struct dpif_handler *tmp = &dpif->handlers[j];
|
2014-10-23 08:27:34 -07:00
|
|
|
|
dpif_netlink_handler_uninit(tmp);
|
2014-02-26 10:10:29 -08:00
|
|
|
|
}
|
|
|
|
|
free(dpif->handlers);
|
|
|
|
|
dpif->handlers = NULL;
|
|
|
|
|
|
2014-10-23 08:27:34 -07:00
|
|
|
|
return error;
|
2014-02-26 10:10:29 -08:00
|
|
|
|
}
|
2013-12-04 13:37:31 -08:00
|
|
|
|
}
|
2014-02-26 10:10:29 -08:00
|
|
|
|
dpif->n_handlers = n_handlers;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < n_handlers; i++) {
|
|
|
|
|
struct dpif_handler *handler = &dpif->handlers[i];
|
|
|
|
|
|
|
|
|
|
handler->event_offset = handler->n_events = 0;
|
2011-09-16 15:23:37 -07:00
|
|
|
|
}
|
2011-09-14 13:05:09 -07:00
|
|
|
|
|
2013-12-04 13:37:31 -08:00
|
|
|
|
keep_channels_nbits = dpif->uc_array_size;
|
|
|
|
|
keep_channels = bitmap_allocate(keep_channels_nbits);
|
2011-01-26 13:41:54 -08:00
|
|
|
|
|
2014-02-27 14:13:05 -08:00
|
|
|
|
ofpbuf_use_stub(&buf, reply_stub, sizeof reply_stub);
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_port_dump_start__(dpif, &dump);
|
|
|
|
|
while (!dpif_netlink_port_dump_next__(dpif, &dump, &vport, &buf)) {
|
2013-12-04 13:37:31 -08:00
|
|
|
|
uint32_t port_no = odp_to_u32(vport.port_no);
|
dpif-netlink: don't allocate per thread netlink sockets
When using the kernel datapath, OVS allocates a pool of sockets to handle
netlink events. The number of sockets is: ports * n-handler-threads, where
n-handler-threads is user configurable and defaults to 3/4*number of cores.
This because vswitchd starts n-handler-threads threads, each one with a
netlink socket for every port of the switch. Every thread then, starts
listening on events on its set of sockets with epoll().
On setup with lot of CPUs and ports, the number of sockets easily hits
the process file descriptor limit, and ovs-vswitchd will exit with -EMFILE.
Change the number of allocated sockets to just one per port by moving
the socket array from a per handler structure to a per datapath one,
and let all the handlers share the same sockets by using EPOLLEXCLUSIVE
epoll flag which avoids duplicate events, on systems that support it.
The patch was tested on a 56 core machine running Linux 4.18 and latest
Open vSwitch. A bridge was created with 2000+ ports, some of them being
veth interfaces with the peer outside the bridge. The latency of the upcall
is measured by setting a single 'action=controller,local' OpenFlow rule to
force all the packets going to the slow path and then to the local port.
A tool[1] injects some packets to the veth outside the bridge, and measures
the delay until the packet is captured on the local port. The rx timestamp
is get from the socket ancillary data in the attribute SO_TIMESTAMPNS, to
avoid having the scheduler delay in the measured time.
The first test measures the average latency for an upcall generated from
a single port. To measure it 100k packets, one every msec, are sent to a
single port and the latencies are measured.
The second test is meant to check latency fairness among ports, namely if
latency is equal between ports or if some ports have lower priority.
The previous test is repeated for every port, the average of the average
latencies and the standard deviation between averages is measured.
The third test serves to measure responsiveness under load. Heavy traffic
is sent through all ports, latency and packet loss is measured
on a single idle port.
The fourth test is all about fairness. Heavy traffic is injected in all
ports but one, latency and packet loss is measured on the single idle port.
This is the test setup:
# nproc
56
# ovs-vsctl show |grep -c Port
2223
# ovs-ofctl dump-flows ovs_upc_br
cookie=0x0, duration=4.827s, table=0, n_packets=0, n_bytes=0, actions=CONTROLLER:65535,LOCAL
# uname -a
Linux fc28 4.18.7-200.fc28.x86_64 #1 SMP Mon Sep 10 15:44:45 UTC 2018 x86_64 x86_64 x86_64 GNU/Linux
And these are the results of the tests:
Stock OVS Patched
netlink sockets
in use by vswitchd
lsof -p $(pidof ovs-vswitchd) \
|grep -c GENERIC 91187 2227
Test 1
one port latency
min/avg/max/mdev (us) 2.7/6.6/238.7/1.8 1.6/6.8/160.6/1.7
Test 2
all port
avg latency/mdev (us) 6.51/0.97 6.86/0.17
Test 3
single port latency
under load
avg/mdev (us) 7.5/5.9 3.8/4.8
packet loss 95 % 62 %
Test 4
idle port latency
under load
min/avg/max/mdev (us) 0.8/1.5/210.5/0.9 1.0/2.1/344.5/1.2
packet loss 94 % 4 %
CPU and RAM usage seems not to be affected, the resource usage of vswitchd
idle with 2000+ ports is unchanged:
# ps u $(pidof ovs-vswitchd)
USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND
openvsw+ 5430 54.3 0.3 4263964 510968 pts/1 RLl+ 16:20 0:50 ovs-vswitchd
Additionally, to check if vswitchd is thread safe with this patch, the
following test was run for circa 48 hours: on a 56 core machine, a
bridge with kernel datapath is filled with 2200 dummy interfaces and 22
veth, then 22 traffic generators are run in parallel piping traffic into
the veths peers outside the bridge.
To generate as many upcalls as possible, all packets were forced to the
slowpath with an openflow rule like 'action=controller,local' and packet
size was set to 64 byte. Also, to avoid overflowing the FDB early and
slowing down the upcall processing, generated mac addresses were restricted
to a small interval. vswitchd ran without problems for 48+ hours,
obviously with all the handler threads with almost 99% CPU usage.
[1] https://github.com/teknoraver/network-tools/blob/master/weed.c
Signed-off-by: Matteo Croce <mcroce@redhat.com>
Signed-off-by: Ben Pfaff <blp@ovn.org>
Acked-by: Flavio Leitner <fbl@sysclose.org>
2018-09-25 10:51:05 +02:00
|
|
|
|
uint32_t upcall_pid;
|
2013-12-04 13:37:31 -08:00
|
|
|
|
int error;
|
2011-11-22 09:25:32 -08:00
|
|
|
|
|
2014-02-26 10:10:29 -08:00
|
|
|
|
if (port_no >= dpif->uc_array_size
|
dpif-netlink: don't allocate per thread netlink sockets
When using the kernel datapath, OVS allocates a pool of sockets to handle
netlink events. The number of sockets is: ports * n-handler-threads, where
n-handler-threads is user configurable and defaults to 3/4*number of cores.
This because vswitchd starts n-handler-threads threads, each one with a
netlink socket for every port of the switch. Every thread then, starts
listening on events on its set of sockets with epoll().
On setup with lot of CPUs and ports, the number of sockets easily hits
the process file descriptor limit, and ovs-vswitchd will exit with -EMFILE.
Change the number of allocated sockets to just one per port by moving
the socket array from a per handler structure to a per datapath one,
and let all the handlers share the same sockets by using EPOLLEXCLUSIVE
epoll flag which avoids duplicate events, on systems that support it.
The patch was tested on a 56 core machine running Linux 4.18 and latest
Open vSwitch. A bridge was created with 2000+ ports, some of them being
veth interfaces with the peer outside the bridge. The latency of the upcall
is measured by setting a single 'action=controller,local' OpenFlow rule to
force all the packets going to the slow path and then to the local port.
A tool[1] injects some packets to the veth outside the bridge, and measures
the delay until the packet is captured on the local port. The rx timestamp
is get from the socket ancillary data in the attribute SO_TIMESTAMPNS, to
avoid having the scheduler delay in the measured time.
The first test measures the average latency for an upcall generated from
a single port. To measure it 100k packets, one every msec, are sent to a
single port and the latencies are measured.
The second test is meant to check latency fairness among ports, namely if
latency is equal between ports or if some ports have lower priority.
The previous test is repeated for every port, the average of the average
latencies and the standard deviation between averages is measured.
The third test serves to measure responsiveness under load. Heavy traffic
is sent through all ports, latency and packet loss is measured
on a single idle port.
The fourth test is all about fairness. Heavy traffic is injected in all
ports but one, latency and packet loss is measured on the single idle port.
This is the test setup:
# nproc
56
# ovs-vsctl show |grep -c Port
2223
# ovs-ofctl dump-flows ovs_upc_br
cookie=0x0, duration=4.827s, table=0, n_packets=0, n_bytes=0, actions=CONTROLLER:65535,LOCAL
# uname -a
Linux fc28 4.18.7-200.fc28.x86_64 #1 SMP Mon Sep 10 15:44:45 UTC 2018 x86_64 x86_64 x86_64 GNU/Linux
And these are the results of the tests:
Stock OVS Patched
netlink sockets
in use by vswitchd
lsof -p $(pidof ovs-vswitchd) \
|grep -c GENERIC 91187 2227
Test 1
one port latency
min/avg/max/mdev (us) 2.7/6.6/238.7/1.8 1.6/6.8/160.6/1.7
Test 2
all port
avg latency/mdev (us) 6.51/0.97 6.86/0.17
Test 3
single port latency
under load
avg/mdev (us) 7.5/5.9 3.8/4.8
packet loss 95 % 62 %
Test 4
idle port latency
under load
min/avg/max/mdev (us) 0.8/1.5/210.5/0.9 1.0/2.1/344.5/1.2
packet loss 94 % 4 %
CPU and RAM usage seems not to be affected, the resource usage of vswitchd
idle with 2000+ ports is unchanged:
# ps u $(pidof ovs-vswitchd)
USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND
openvsw+ 5430 54.3 0.3 4263964 510968 pts/1 RLl+ 16:20 0:50 ovs-vswitchd
Additionally, to check if vswitchd is thread safe with this patch, the
following test was run for circa 48 hours: on a 56 core machine, a
bridge with kernel datapath is filled with 2200 dummy interfaces and 22
veth, then 22 traffic generators are run in parallel piping traffic into
the veths peers outside the bridge.
To generate as many upcalls as possible, all packets were forced to the
slowpath with an openflow rule like 'action=controller,local' and packet
size was set to 64 byte. Also, to avoid overflowing the FDB early and
slowing down the upcall processing, generated mac addresses were restricted
to a small interval. vswitchd ran without problems for 48+ hours,
obviously with all the handler threads with almost 99% CPU usage.
[1] https://github.com/teknoraver/network-tools/blob/master/weed.c
Signed-off-by: Matteo Croce <mcroce@redhat.com>
Signed-off-by: Ben Pfaff <blp@ovn.org>
Acked-by: Flavio Leitner <fbl@sysclose.org>
2018-09-25 10:51:05 +02:00
|
|
|
|
|| !vport_get_pid(dpif, port_no, &upcall_pid)) {
|
2019-10-14 11:10:47 -07:00
|
|
|
|
struct nl_sock *sock;
|
|
|
|
|
error = create_nl_sock(dpif, &sock);
|
2014-02-26 10:10:29 -08:00
|
|
|
|
|
2018-11-16 15:32:58 +02:00
|
|
|
|
if (error) {
|
2014-02-26 10:10:29 -08:00
|
|
|
|
goto error;
|
|
|
|
|
}
|
|
|
|
|
|
2019-10-14 11:10:47 -07:00
|
|
|
|
error = vport_add_channel(dpif, vport.port_no, sock);
|
2011-09-14 13:05:09 -07:00
|
|
|
|
if (error) {
|
2014-02-26 10:10:29 -08:00
|
|
|
|
VLOG_INFO("%s: could not add channels for port %s",
|
2014-04-17 16:33:17 -07:00
|
|
|
|
dpif_name(&dpif->dpif), vport.name);
|
2019-10-14 11:10:47 -07:00
|
|
|
|
nl_sock_destroy(sock);
|
2013-12-04 13:37:31 -08:00
|
|
|
|
retval = error;
|
|
|
|
|
goto error;
|
2011-01-26 13:41:54 -08:00
|
|
|
|
}
|
2019-10-14 11:10:47 -07:00
|
|
|
|
upcall_pid = nl_sock_pid(sock);
|
2013-12-04 13:37:31 -08:00
|
|
|
|
}
|
2011-11-22 09:25:32 -08:00
|
|
|
|
|
2013-12-04 13:37:31 -08:00
|
|
|
|
/* Configure the vport to deliver misses to 'sock'. */
|
2014-02-26 10:10:29 -08:00
|
|
|
|
if (vport.upcall_pids[0] == 0
|
dpif-netlink: don't allocate per thread netlink sockets
When using the kernel datapath, OVS allocates a pool of sockets to handle
netlink events. The number of sockets is: ports * n-handler-threads, where
n-handler-threads is user configurable and defaults to 3/4*number of cores.
This because vswitchd starts n-handler-threads threads, each one with a
netlink socket for every port of the switch. Every thread then, starts
listening on events on its set of sockets with epoll().
On setup with lot of CPUs and ports, the number of sockets easily hits
the process file descriptor limit, and ovs-vswitchd will exit with -EMFILE.
Change the number of allocated sockets to just one per port by moving
the socket array from a per handler structure to a per datapath one,
and let all the handlers share the same sockets by using EPOLLEXCLUSIVE
epoll flag which avoids duplicate events, on systems that support it.
The patch was tested on a 56 core machine running Linux 4.18 and latest
Open vSwitch. A bridge was created with 2000+ ports, some of them being
veth interfaces with the peer outside the bridge. The latency of the upcall
is measured by setting a single 'action=controller,local' OpenFlow rule to
force all the packets going to the slow path and then to the local port.
A tool[1] injects some packets to the veth outside the bridge, and measures
the delay until the packet is captured on the local port. The rx timestamp
is get from the socket ancillary data in the attribute SO_TIMESTAMPNS, to
avoid having the scheduler delay in the measured time.
The first test measures the average latency for an upcall generated from
a single port. To measure it 100k packets, one every msec, are sent to a
single port and the latencies are measured.
The second test is meant to check latency fairness among ports, namely if
latency is equal between ports or if some ports have lower priority.
The previous test is repeated for every port, the average of the average
latencies and the standard deviation between averages is measured.
The third test serves to measure responsiveness under load. Heavy traffic
is sent through all ports, latency and packet loss is measured
on a single idle port.
The fourth test is all about fairness. Heavy traffic is injected in all
ports but one, latency and packet loss is measured on the single idle port.
This is the test setup:
# nproc
56
# ovs-vsctl show |grep -c Port
2223
# ovs-ofctl dump-flows ovs_upc_br
cookie=0x0, duration=4.827s, table=0, n_packets=0, n_bytes=0, actions=CONTROLLER:65535,LOCAL
# uname -a
Linux fc28 4.18.7-200.fc28.x86_64 #1 SMP Mon Sep 10 15:44:45 UTC 2018 x86_64 x86_64 x86_64 GNU/Linux
And these are the results of the tests:
Stock OVS Patched
netlink sockets
in use by vswitchd
lsof -p $(pidof ovs-vswitchd) \
|grep -c GENERIC 91187 2227
Test 1
one port latency
min/avg/max/mdev (us) 2.7/6.6/238.7/1.8 1.6/6.8/160.6/1.7
Test 2
all port
avg latency/mdev (us) 6.51/0.97 6.86/0.17
Test 3
single port latency
under load
avg/mdev (us) 7.5/5.9 3.8/4.8
packet loss 95 % 62 %
Test 4
idle port latency
under load
min/avg/max/mdev (us) 0.8/1.5/210.5/0.9 1.0/2.1/344.5/1.2
packet loss 94 % 4 %
CPU and RAM usage seems not to be affected, the resource usage of vswitchd
idle with 2000+ ports is unchanged:
# ps u $(pidof ovs-vswitchd)
USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND
openvsw+ 5430 54.3 0.3 4263964 510968 pts/1 RLl+ 16:20 0:50 ovs-vswitchd
Additionally, to check if vswitchd is thread safe with this patch, the
following test was run for circa 48 hours: on a 56 core machine, a
bridge with kernel datapath is filled with 2200 dummy interfaces and 22
veth, then 22 traffic generators are run in parallel piping traffic into
the veths peers outside the bridge.
To generate as many upcalls as possible, all packets were forced to the
slowpath with an openflow rule like 'action=controller,local' and packet
size was set to 64 byte. Also, to avoid overflowing the FDB early and
slowing down the upcall processing, generated mac addresses were restricted
to a small interval. vswitchd ran without problems for 48+ hours,
obviously with all the handler threads with almost 99% CPU usage.
[1] https://github.com/teknoraver/network-tools/blob/master/weed.c
Signed-off-by: Matteo Croce <mcroce@redhat.com>
Signed-off-by: Ben Pfaff <blp@ovn.org>
Acked-by: Flavio Leitner <fbl@sysclose.org>
2018-09-25 10:51:05 +02:00
|
|
|
|
|| vport.n_upcall_pids != 1
|
|
|
|
|
|| upcall_pid != vport.upcall_pids[0]) {
|
2014-09-18 04:17:54 -07:00
|
|
|
|
struct dpif_netlink_vport vport_request;
|
2013-01-04 18:34:26 -08:00
|
|
|
|
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_vport_init(&vport_request);
|
2013-01-04 18:34:26 -08:00
|
|
|
|
vport_request.cmd = OVS_VPORT_CMD_SET;
|
|
|
|
|
vport_request.dp_ifindex = dpif->dp_ifindex;
|
2013-12-04 13:37:31 -08:00
|
|
|
|
vport_request.port_no = vport.port_no;
|
dpif-netlink: don't allocate per thread netlink sockets
When using the kernel datapath, OVS allocates a pool of sockets to handle
netlink events. The number of sockets is: ports * n-handler-threads, where
n-handler-threads is user configurable and defaults to 3/4*number of cores.
This because vswitchd starts n-handler-threads threads, each one with a
netlink socket for every port of the switch. Every thread then, starts
listening on events on its set of sockets with epoll().
On setup with lot of CPUs and ports, the number of sockets easily hits
the process file descriptor limit, and ovs-vswitchd will exit with -EMFILE.
Change the number of allocated sockets to just one per port by moving
the socket array from a per handler structure to a per datapath one,
and let all the handlers share the same sockets by using EPOLLEXCLUSIVE
epoll flag which avoids duplicate events, on systems that support it.
The patch was tested on a 56 core machine running Linux 4.18 and latest
Open vSwitch. A bridge was created with 2000+ ports, some of them being
veth interfaces with the peer outside the bridge. The latency of the upcall
is measured by setting a single 'action=controller,local' OpenFlow rule to
force all the packets going to the slow path and then to the local port.
A tool[1] injects some packets to the veth outside the bridge, and measures
the delay until the packet is captured on the local port. The rx timestamp
is get from the socket ancillary data in the attribute SO_TIMESTAMPNS, to
avoid having the scheduler delay in the measured time.
The first test measures the average latency for an upcall generated from
a single port. To measure it 100k packets, one every msec, are sent to a
single port and the latencies are measured.
The second test is meant to check latency fairness among ports, namely if
latency is equal between ports or if some ports have lower priority.
The previous test is repeated for every port, the average of the average
latencies and the standard deviation between averages is measured.
The third test serves to measure responsiveness under load. Heavy traffic
is sent through all ports, latency and packet loss is measured
on a single idle port.
The fourth test is all about fairness. Heavy traffic is injected in all
ports but one, latency and packet loss is measured on the single idle port.
This is the test setup:
# nproc
56
# ovs-vsctl show |grep -c Port
2223
# ovs-ofctl dump-flows ovs_upc_br
cookie=0x0, duration=4.827s, table=0, n_packets=0, n_bytes=0, actions=CONTROLLER:65535,LOCAL
# uname -a
Linux fc28 4.18.7-200.fc28.x86_64 #1 SMP Mon Sep 10 15:44:45 UTC 2018 x86_64 x86_64 x86_64 GNU/Linux
And these are the results of the tests:
Stock OVS Patched
netlink sockets
in use by vswitchd
lsof -p $(pidof ovs-vswitchd) \
|grep -c GENERIC 91187 2227
Test 1
one port latency
min/avg/max/mdev (us) 2.7/6.6/238.7/1.8 1.6/6.8/160.6/1.7
Test 2
all port
avg latency/mdev (us) 6.51/0.97 6.86/0.17
Test 3
single port latency
under load
avg/mdev (us) 7.5/5.9 3.8/4.8
packet loss 95 % 62 %
Test 4
idle port latency
under load
min/avg/max/mdev (us) 0.8/1.5/210.5/0.9 1.0/2.1/344.5/1.2
packet loss 94 % 4 %
CPU and RAM usage seems not to be affected, the resource usage of vswitchd
idle with 2000+ ports is unchanged:
# ps u $(pidof ovs-vswitchd)
USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND
openvsw+ 5430 54.3 0.3 4263964 510968 pts/1 RLl+ 16:20 0:50 ovs-vswitchd
Additionally, to check if vswitchd is thread safe with this patch, the
following test was run for circa 48 hours: on a 56 core machine, a
bridge with kernel datapath is filled with 2200 dummy interfaces and 22
veth, then 22 traffic generators are run in parallel piping traffic into
the veths peers outside the bridge.
To generate as many upcalls as possible, all packets were forced to the
slowpath with an openflow rule like 'action=controller,local' and packet
size was set to 64 byte. Also, to avoid overflowing the FDB early and
slowing down the upcall processing, generated mac addresses were restricted
to a small interval. vswitchd ran without problems for 48+ hours,
obviously with all the handler threads with almost 99% CPU usage.
[1] https://github.com/teknoraver/network-tools/blob/master/weed.c
Signed-off-by: Matteo Croce <mcroce@redhat.com>
Signed-off-by: Ben Pfaff <blp@ovn.org>
Acked-by: Flavio Leitner <fbl@sysclose.org>
2018-09-25 10:51:05 +02:00
|
|
|
|
vport_request.n_upcall_pids = 1;
|
|
|
|
|
vport_request.upcall_pids = &upcall_pid;
|
2014-09-18 04:17:54 -07:00
|
|
|
|
error = dpif_netlink_vport_transact(&vport_request, NULL, NULL);
|
2014-02-26 10:10:29 -08:00
|
|
|
|
if (error) {
|
2013-01-04 18:34:26 -08:00
|
|
|
|
VLOG_WARN_RL(&error_rl,
|
|
|
|
|
"%s: failed to set upcall pid on port: %s",
|
2013-06-24 10:54:49 -07:00
|
|
|
|
dpif_name(&dpif->dpif), ovs_strerror(error));
|
2013-01-04 18:34:26 -08:00
|
|
|
|
|
2013-12-04 13:37:31 -08:00
|
|
|
|
if (error != ENODEV && error != ENOENT) {
|
|
|
|
|
retval = error;
|
2013-01-04 18:34:26 -08:00
|
|
|
|
} else {
|
2013-12-04 13:37:31 -08:00
|
|
|
|
/* The vport isn't really there, even though the dump says
|
|
|
|
|
* it is. Probably we just hit a race after a port
|
|
|
|
|
* disappeared. */
|
2013-01-04 18:34:26 -08:00
|
|
|
|
}
|
2013-12-04 13:37:31 -08:00
|
|
|
|
goto error;
|
2011-11-22 09:25:32 -08:00
|
|
|
|
}
|
2013-12-04 13:37:31 -08:00
|
|
|
|
}
|
2012-06-01 17:40:31 -04:00
|
|
|
|
|
2013-12-04 13:37:31 -08:00
|
|
|
|
if (port_no < keep_channels_nbits) {
|
|
|
|
|
bitmap_set1(keep_channels, port_no);
|
|
|
|
|
}
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
error:
|
2014-02-26 10:10:29 -08:00
|
|
|
|
vport_del_channels(dpif, vport.port_no);
|
2011-01-26 13:41:54 -08:00
|
|
|
|
}
|
2013-12-04 13:37:31 -08:00
|
|
|
|
nl_dump_done(&dump);
|
2014-02-27 14:13:05 -08:00
|
|
|
|
ofpbuf_uninit(&buf);
|
2011-09-14 13:05:09 -07:00
|
|
|
|
|
2013-12-04 13:37:31 -08:00
|
|
|
|
/* Discard any saved channels that we didn't reuse. */
|
|
|
|
|
for (i = 0; i < keep_channels_nbits; i++) {
|
|
|
|
|
if (!bitmap_is_set(keep_channels, i)) {
|
2014-02-26 10:10:29 -08:00
|
|
|
|
vport_del_channels(dpif, u32_to_odp(i));
|
2013-12-04 13:37:31 -08:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
free(keep_channels);
|
|
|
|
|
|
|
|
|
|
return retval;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_recv_set__(struct dpif_netlink *dpif, bool enable)
|
2014-04-17 17:16:34 -07:00
|
|
|
|
OVS_REQ_WRLOCK(dpif->upcall_lock)
|
2013-12-04 13:37:31 -08:00
|
|
|
|
{
|
2014-02-26 10:10:29 -08:00
|
|
|
|
if ((dpif->handlers != NULL) == enable) {
|
2013-12-04 13:37:31 -08:00
|
|
|
|
return 0;
|
|
|
|
|
} else if (!enable) {
|
2014-02-26 10:10:29 -08:00
|
|
|
|
destroy_all_channels(dpif);
|
2013-12-04 13:37:31 -08:00
|
|
|
|
return 0;
|
|
|
|
|
} else {
|
2014-09-18 04:17:54 -07:00
|
|
|
|
return dpif_netlink_refresh_channels(dpif, 1);
|
2013-12-04 13:37:31 -08:00
|
|
|
|
}
|
2009-06-17 14:35:35 -07:00
|
|
|
|
}
|
|
|
|
|
|
2013-07-23 12:41:57 -07:00
|
|
|
|
static int
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_recv_set(struct dpif *dpif_, bool enable)
|
2013-07-23 12:41:57 -07:00
|
|
|
|
{
|
2014-09-18 04:17:54 -07:00
|
|
|
|
struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
|
2013-07-23 12:41:57 -07:00
|
|
|
|
int error;
|
|
|
|
|
|
2014-02-26 10:10:29 -08:00
|
|
|
|
fat_rwlock_wrlock(&dpif->upcall_lock);
|
2014-09-18 04:17:54 -07:00
|
|
|
|
error = dpif_netlink_recv_set__(dpif, enable);
|
2014-02-26 10:10:29 -08:00
|
|
|
|
fat_rwlock_unlock(&dpif->upcall_lock);
|
2013-07-23 12:41:57 -07:00
|
|
|
|
|
|
|
|
|
return error;
|
|
|
|
|
}
|
|
|
|
|
|
2014-03-07 10:57:36 -08:00
|
|
|
|
static int
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_handlers_set(struct dpif *dpif_, uint32_t n_handlers)
|
2014-03-07 10:57:36 -08:00
|
|
|
|
{
|
2014-09-18 04:17:54 -07:00
|
|
|
|
struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
|
2014-02-26 10:10:29 -08:00
|
|
|
|
int error = 0;
|
|
|
|
|
|
2014-10-23 08:27:34 -07:00
|
|
|
|
#ifdef _WIN32
|
|
|
|
|
/* Multiple upcall handlers will be supported once kernel datapath supports
|
|
|
|
|
* it. */
|
|
|
|
|
if (n_handlers > 1) {
|
|
|
|
|
return error;
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
|
2014-02-26 10:10:29 -08:00
|
|
|
|
fat_rwlock_wrlock(&dpif->upcall_lock);
|
|
|
|
|
if (dpif->handlers) {
|
2014-09-18 04:17:54 -07:00
|
|
|
|
error = dpif_netlink_refresh_channels(dpif, n_handlers);
|
2014-02-26 10:10:29 -08:00
|
|
|
|
}
|
|
|
|
|
fat_rwlock_unlock(&dpif->upcall_lock);
|
|
|
|
|
|
|
|
|
|
return error;
|
2014-03-07 10:57:36 -08:00
|
|
|
|
}
|
|
|
|
|
|
2010-07-20 11:23:21 -07:00
|
|
|
|
static int
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_queue_to_priority(const struct dpif *dpif OVS_UNUSED,
|
2010-07-20 11:23:21 -07:00
|
|
|
|
uint32_t queue_id, uint32_t *priority)
|
|
|
|
|
{
|
|
|
|
|
if (queue_id < 0xf000) {
|
2010-07-16 15:50:57 -07:00
|
|
|
|
*priority = TC_H_MAKE(1 << 16, queue_id + 1);
|
2010-07-20 11:23:21 -07:00
|
|
|
|
return 0;
|
|
|
|
|
} else {
|
|
|
|
|
return EINVAL;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2009-06-17 14:35:35 -07:00
|
|
|
|
static int
|
2019-12-08 18:09:53 +01:00
|
|
|
|
parse_odp_packet(struct ofpbuf *buf, struct dpif_upcall *upcall,
|
|
|
|
|
int *dp_ifindex)
|
datapath: Report kernel's flow key when passing packets up to userspace.
One of the goals for Open vSwitch is to decouple kernel and userspace
software, so that either one can be upgraded or rolled back independent of
the other. To do this in full generality, it must be possible to change
the kernel's idea of the flow key separately from the userspace version.
This commit takes one step in that direction by making the kernel report
its idea of the flow that a packet belongs to whenever it passes a packet
up to userspace. This means that userspace can intelligently figure out
what to do:
- If userspace's notion of the flow for the packet matches the kernel's,
then nothing special is necessary.
- If the kernel has a more specific notion for the flow than userspace,
for example if the kernel decoded IPv6 headers but userspace stopped
at the Ethernet type (because it does not understand IPv6), then again
nothing special is necessary: userspace can still set up the flow in
the usual way.
- If userspace has a more specific notion for the flow than the kernel,
for example if userspace decoded an IPv6 header but the kernel
stopped at the Ethernet type, then userspace can forward the packet
manually, without setting up a flow in the kernel. (This case is
bad from a performance point of view, but at least it is correct.)
This commit does not actually make userspace flexible enough to handle
changes in the kernel flow key structure, although userspace does now
have enough information to do that intelligently. This will have to wait
for later commits.
This commit is bigger than it would otherwise be because it is rolled
together with changing "struct odp_msg" to a sequence of Netlink
attributes. The alternative, to do each of those changes in a separate
patch, seemed like overkill because it meant that either we would have to
introduce and then kill off Netlink attributes for in_port and tun_id, if
Netlink conversion went first, or shove yet another variable-length header
into the stuff already after odp_msg, if adding the flow key to odp_msg
went first.
This commit will slow down performance of checksumming packets sent up to
userspace. I'm not entirely pleased with how I did it. I considered a
couple of alternatives, but none of them seemed that much better.
Suggestions welcome. Not changing anything wasn't an option,
unfortunately. At any rate some slowdown will become unavoidable when OVS
actually starts using Netlink instead of just Netlink framing.
(Actually, I thought of one option where we could avoid that: make
userspace do the checksum instead, by passing csum_start and csum_offset as
part of what goes to userspace. But that's not perfect either.)
Signed-off-by: Ben Pfaff <blp@nicira.com>
Acked-by: Jesse Gross <jesse@nicira.com>
2011-01-24 14:59:57 -08:00
|
|
|
|
{
|
2011-08-18 10:35:40 -07:00
|
|
|
|
static const struct nl_policy ovs_packet_policy[] = {
|
datapath: Report kernel's flow key when passing packets up to userspace.
One of the goals for Open vSwitch is to decouple kernel and userspace
software, so that either one can be upgraded or rolled back independent of
the other. To do this in full generality, it must be possible to change
the kernel's idea of the flow key separately from the userspace version.
This commit takes one step in that direction by making the kernel report
its idea of the flow that a packet belongs to whenever it passes a packet
up to userspace. This means that userspace can intelligently figure out
what to do:
- If userspace's notion of the flow for the packet matches the kernel's,
then nothing special is necessary.
- If the kernel has a more specific notion for the flow than userspace,
for example if the kernel decoded IPv6 headers but userspace stopped
at the Ethernet type (because it does not understand IPv6), then again
nothing special is necessary: userspace can still set up the flow in
the usual way.
- If userspace has a more specific notion for the flow than the kernel,
for example if userspace decoded an IPv6 header but the kernel
stopped at the Ethernet type, then userspace can forward the packet
manually, without setting up a flow in the kernel. (This case is
bad from a performance point of view, but at least it is correct.)
This commit does not actually make userspace flexible enough to handle
changes in the kernel flow key structure, although userspace does now
have enough information to do that intelligently. This will have to wait
for later commits.
This commit is bigger than it would otherwise be because it is rolled
together with changing "struct odp_msg" to a sequence of Netlink
attributes. The alternative, to do each of those changes in a separate
patch, seemed like overkill because it meant that either we would have to
introduce and then kill off Netlink attributes for in_port and tun_id, if
Netlink conversion went first, or shove yet another variable-length header
into the stuff already after odp_msg, if adding the flow key to odp_msg
went first.
This commit will slow down performance of checksumming packets sent up to
userspace. I'm not entirely pleased with how I did it. I considered a
couple of alternatives, but none of them seemed that much better.
Suggestions welcome. Not changing anything wasn't an option,
unfortunately. At any rate some slowdown will become unavoidable when OVS
actually starts using Netlink instead of just Netlink framing.
(Actually, I thought of one option where we could avoid that: make
userspace do the checksum instead, by passing csum_start and csum_offset as
part of what goes to userspace. But that's not perfect either.)
Signed-off-by: Ben Pfaff <blp@nicira.com>
Acked-by: Jesse Gross <jesse@nicira.com>
2011-01-24 14:59:57 -08:00
|
|
|
|
/* Always present. */
|
2011-08-18 10:35:40 -07:00
|
|
|
|
[OVS_PACKET_ATTR_PACKET] = { .type = NL_A_UNSPEC,
|
datapath: Report kernel's flow key when passing packets up to userspace.
One of the goals for Open vSwitch is to decouple kernel and userspace
software, so that either one can be upgraded or rolled back independent of
the other. To do this in full generality, it must be possible to change
the kernel's idea of the flow key separately from the userspace version.
This commit takes one step in that direction by making the kernel report
its idea of the flow that a packet belongs to whenever it passes a packet
up to userspace. This means that userspace can intelligently figure out
what to do:
- If userspace's notion of the flow for the packet matches the kernel's,
then nothing special is necessary.
- If the kernel has a more specific notion for the flow than userspace,
for example if the kernel decoded IPv6 headers but userspace stopped
at the Ethernet type (because it does not understand IPv6), then again
nothing special is necessary: userspace can still set up the flow in
the usual way.
- If userspace has a more specific notion for the flow than the kernel,
for example if userspace decoded an IPv6 header but the kernel
stopped at the Ethernet type, then userspace can forward the packet
manually, without setting up a flow in the kernel. (This case is
bad from a performance point of view, but at least it is correct.)
This commit does not actually make userspace flexible enough to handle
changes in the kernel flow key structure, although userspace does now
have enough information to do that intelligently. This will have to wait
for later commits.
This commit is bigger than it would otherwise be because it is rolled
together with changing "struct odp_msg" to a sequence of Netlink
attributes. The alternative, to do each of those changes in a separate
patch, seemed like overkill because it meant that either we would have to
introduce and then kill off Netlink attributes for in_port and tun_id, if
Netlink conversion went first, or shove yet another variable-length header
into the stuff already after odp_msg, if adding the flow key to odp_msg
went first.
This commit will slow down performance of checksumming packets sent up to
userspace. I'm not entirely pleased with how I did it. I considered a
couple of alternatives, but none of them seemed that much better.
Suggestions welcome. Not changing anything wasn't an option,
unfortunately. At any rate some slowdown will become unavoidable when OVS
actually starts using Netlink instead of just Netlink framing.
(Actually, I thought of one option where we could avoid that: make
userspace do the checksum instead, by passing csum_start and csum_offset as
part of what goes to userspace. But that's not perfect either.)
Signed-off-by: Ben Pfaff <blp@nicira.com>
Acked-by: Jesse Gross <jesse@nicira.com>
2011-01-24 14:59:57 -08:00
|
|
|
|
.min_len = ETH_HEADER_LEN },
|
2011-08-18 10:35:40 -07:00
|
|
|
|
[OVS_PACKET_ATTR_KEY] = { .type = NL_A_NESTED },
|
datapath: Report kernel's flow key when passing packets up to userspace.
One of the goals for Open vSwitch is to decouple kernel and userspace
software, so that either one can be upgraded or rolled back independent of
the other. To do this in full generality, it must be possible to change
the kernel's idea of the flow key separately from the userspace version.
This commit takes one step in that direction by making the kernel report
its idea of the flow that a packet belongs to whenever it passes a packet
up to userspace. This means that userspace can intelligently figure out
what to do:
- If userspace's notion of the flow for the packet matches the kernel's,
then nothing special is necessary.
- If the kernel has a more specific notion for the flow than userspace,
for example if the kernel decoded IPv6 headers but userspace stopped
at the Ethernet type (because it does not understand IPv6), then again
nothing special is necessary: userspace can still set up the flow in
the usual way.
- If userspace has a more specific notion for the flow than the kernel,
for example if userspace decoded an IPv6 header but the kernel
stopped at the Ethernet type, then userspace can forward the packet
manually, without setting up a flow in the kernel. (This case is
bad from a performance point of view, but at least it is correct.)
This commit does not actually make userspace flexible enough to handle
changes in the kernel flow key structure, although userspace does now
have enough information to do that intelligently. This will have to wait
for later commits.
This commit is bigger than it would otherwise be because it is rolled
together with changing "struct odp_msg" to a sequence of Netlink
attributes. The alternative, to do each of those changes in a separate
patch, seemed like overkill because it meant that either we would have to
introduce and then kill off Netlink attributes for in_port and tun_id, if
Netlink conversion went first, or shove yet another variable-length header
into the stuff already after odp_msg, if adding the flow key to odp_msg
went first.
This commit will slow down performance of checksumming packets sent up to
userspace. I'm not entirely pleased with how I did it. I considered a
couple of alternatives, but none of them seemed that much better.
Suggestions welcome. Not changing anything wasn't an option,
unfortunately. At any rate some slowdown will become unavoidable when OVS
actually starts using Netlink instead of just Netlink framing.
(Actually, I thought of one option where we could avoid that: make
userspace do the checksum instead, by passing csum_start and csum_offset as
part of what goes to userspace. But that's not perfect either.)
Signed-off-by: Ben Pfaff <blp@nicira.com>
Acked-by: Jesse Gross <jesse@nicira.com>
2011-01-24 14:59:57 -08:00
|
|
|
|
|
2011-08-18 10:35:40 -07:00
|
|
|
|
/* OVS_PACKET_CMD_ACTION only. */
|
2013-02-15 16:48:32 -08:00
|
|
|
|
[OVS_PACKET_ATTR_USERDATA] = { .type = NL_A_UNSPEC, .optional = true },
|
2014-08-17 20:19:36 -07:00
|
|
|
|
[OVS_PACKET_ATTR_EGRESS_TUN_KEY] = { .type = NL_A_NESTED, .optional = true },
|
2015-07-17 21:37:02 -07:00
|
|
|
|
[OVS_PACKET_ATTR_ACTIONS] = { .type = NL_A_NESTED, .optional = true },
|
ofproto-dpif-upcall: Echo HASH attribute back to datapath.
The kernel datapath may sent upcall with hash info,
ovs-vswitchd should get it from upcall and then send
it back.
The reason is that:
| When using the kernel datapath, the upcall don't
| include skb hash info relatived. That will introduce
| some problem, because the hash of skb is important
| in kernel stack. For example, VXLAN module uses
| it to select UDP src port. The tx queue selection
| may also use the hash in stack.
|
| Hash is computed in different ways. Hash is random
| for a TCP socket, and hash may be computed in hardware,
| or software stack. Recalculation hash is not easy.
|
| There will be one upcall, without information of skb
| hash, to ovs-vswitchd, for the first packet of a TCP
| session. The rest packets will be processed in Open vSwitch
| modules, hash kept. If this tcp session is forward to
| VXLAN module, then the UDP src port of first tcp packet
| is different from rest packets.
|
| TCP packets may come from the host or dockers, to Open vSwitch.
| To fix it, we store the hash info to upcall, and restore hash
| when packets sent back.
Reported-at: https://mail.openvswitch.org/pipermail/ovs-dev/2019-October/364062.html
Link: https://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git/commit/?id=bd1903b7c4596ba6f7677d0dfefd05ba5876707d
Signed-off-by: Tonghao Zhang <xiangxia.m.yue@gmail.com>
Signed-off-by: Ben Pfaff <blp@ovn.org>
2019-11-15 10:58:59 +08:00
|
|
|
|
[OVS_PACKET_ATTR_MRU] = { .type = NL_A_U16, .optional = true },
|
|
|
|
|
[OVS_PACKET_ATTR_HASH] = { .type = NL_A_U64, .optional = true }
|
datapath: Report kernel's flow key when passing packets up to userspace.
One of the goals for Open vSwitch is to decouple kernel and userspace
software, so that either one can be upgraded or rolled back independent of
the other. To do this in full generality, it must be possible to change
the kernel's idea of the flow key separately from the userspace version.
This commit takes one step in that direction by making the kernel report
its idea of the flow that a packet belongs to whenever it passes a packet
up to userspace. This means that userspace can intelligently figure out
what to do:
- If userspace's notion of the flow for the packet matches the kernel's,
then nothing special is necessary.
- If the kernel has a more specific notion for the flow than userspace,
for example if the kernel decoded IPv6 headers but userspace stopped
at the Ethernet type (because it does not understand IPv6), then again
nothing special is necessary: userspace can still set up the flow in
the usual way.
- If userspace has a more specific notion for the flow than the kernel,
for example if userspace decoded an IPv6 header but the kernel
stopped at the Ethernet type, then userspace can forward the packet
manually, without setting up a flow in the kernel. (This case is
bad from a performance point of view, but at least it is correct.)
This commit does not actually make userspace flexible enough to handle
changes in the kernel flow key structure, although userspace does now
have enough information to do that intelligently. This will have to wait
for later commits.
This commit is bigger than it would otherwise be because it is rolled
together with changing "struct odp_msg" to a sequence of Netlink
attributes. The alternative, to do each of those changes in a separate
patch, seemed like overkill because it meant that either we would have to
introduce and then kill off Netlink attributes for in_port and tun_id, if
Netlink conversion went first, or shove yet another variable-length header
into the stuff already after odp_msg, if adding the flow key to odp_msg
went first.
This commit will slow down performance of checksumming packets sent up to
userspace. I'm not entirely pleased with how I did it. I considered a
couple of alternatives, but none of them seemed that much better.
Suggestions welcome. Not changing anything wasn't an option,
unfortunately. At any rate some slowdown will become unavoidable when OVS
actually starts using Netlink instead of just Netlink framing.
(Actually, I thought of one option where we could avoid that: make
userspace do the checksum instead, by passing csum_start and csum_offset as
part of what goes to userspace. But that's not perfect either.)
Signed-off-by: Ben Pfaff <blp@nicira.com>
Acked-by: Jesse Gross <jesse@nicira.com>
2011-01-24 14:59:57 -08:00
|
|
|
|
};
|
|
|
|
|
|
2016-02-18 15:13:09 -08:00
|
|
|
|
struct ofpbuf b = ofpbuf_const_initializer(buf->data, buf->size);
|
|
|
|
|
struct nlmsghdr *nlmsg = ofpbuf_try_pull(&b, sizeof *nlmsg);
|
|
|
|
|
struct genlmsghdr *genl = ofpbuf_try_pull(&b, sizeof *genl);
|
|
|
|
|
struct ovs_header *ovs_header = ofpbuf_try_pull(&b, sizeof *ovs_header);
|
2011-01-26 13:41:54 -08:00
|
|
|
|
|
2016-02-18 15:13:09 -08:00
|
|
|
|
struct nlattr *a[ARRAY_SIZE(ovs_packet_policy)];
|
2011-08-18 10:35:40 -07:00
|
|
|
|
if (!nlmsg || !genl || !ovs_header
|
|
|
|
|
|| nlmsg->nlmsg_type != ovs_packet_family
|
|
|
|
|
|| !nl_policy_parse(&b, 0, ovs_packet_policy, a,
|
|
|
|
|
ARRAY_SIZE(ovs_packet_policy))) {
|
datapath: Report kernel's flow key when passing packets up to userspace.
One of the goals for Open vSwitch is to decouple kernel and userspace
software, so that either one can be upgraded or rolled back independent of
the other. To do this in full generality, it must be possible to change
the kernel's idea of the flow key separately from the userspace version.
This commit takes one step in that direction by making the kernel report
its idea of the flow that a packet belongs to whenever it passes a packet
up to userspace. This means that userspace can intelligently figure out
what to do:
- If userspace's notion of the flow for the packet matches the kernel's,
then nothing special is necessary.
- If the kernel has a more specific notion for the flow than userspace,
for example if the kernel decoded IPv6 headers but userspace stopped
at the Ethernet type (because it does not understand IPv6), then again
nothing special is necessary: userspace can still set up the flow in
the usual way.
- If userspace has a more specific notion for the flow than the kernel,
for example if userspace decoded an IPv6 header but the kernel
stopped at the Ethernet type, then userspace can forward the packet
manually, without setting up a flow in the kernel. (This case is
bad from a performance point of view, but at least it is correct.)
This commit does not actually make userspace flexible enough to handle
changes in the kernel flow key structure, although userspace does now
have enough information to do that intelligently. This will have to wait
for later commits.
This commit is bigger than it would otherwise be because it is rolled
together with changing "struct odp_msg" to a sequence of Netlink
attributes. The alternative, to do each of those changes in a separate
patch, seemed like overkill because it meant that either we would have to
introduce and then kill off Netlink attributes for in_port and tun_id, if
Netlink conversion went first, or shove yet another variable-length header
into the stuff already after odp_msg, if adding the flow key to odp_msg
went first.
This commit will slow down performance of checksumming packets sent up to
userspace. I'm not entirely pleased with how I did it. I considered a
couple of alternatives, but none of them seemed that much better.
Suggestions welcome. Not changing anything wasn't an option,
unfortunately. At any rate some slowdown will become unavoidable when OVS
actually starts using Netlink instead of just Netlink framing.
(Actually, I thought of one option where we could avoid that: make
userspace do the checksum instead, by passing csum_start and csum_offset as
part of what goes to userspace. But that's not perfect either.)
Signed-off-by: Ben Pfaff <blp@nicira.com>
Acked-by: Jesse Gross <jesse@nicira.com>
2011-01-24 14:59:57 -08:00
|
|
|
|
return EINVAL;
|
|
|
|
|
}
|
|
|
|
|
|
2016-02-18 15:13:09 -08:00
|
|
|
|
int type = (genl->cmd == OVS_PACKET_CMD_MISS ? DPIF_UC_MISS
|
|
|
|
|
: genl->cmd == OVS_PACKET_CMD_ACTION ? DPIF_UC_ACTION
|
|
|
|
|
: -1);
|
2011-01-28 13:55:04 -08:00
|
|
|
|
if (type < 0) {
|
|
|
|
|
return EINVAL;
|
|
|
|
|
}
|
2011-01-26 07:14:04 -08:00
|
|
|
|
|
2013-12-17 15:54:30 -08:00
|
|
|
|
/* (Re)set ALL fields of '*upcall' on successful return. */
|
2011-01-28 13:55:04 -08:00
|
|
|
|
upcall->type = type;
|
2012-07-13 16:00:29 -07:00
|
|
|
|
upcall->key = CONST_CAST(struct nlattr *,
|
|
|
|
|
nl_attr_get(a[OVS_PACKET_ATTR_KEY]));
|
2011-08-18 10:35:40 -07:00
|
|
|
|
upcall->key_len = nl_attr_get_size(a[OVS_PACKET_ATTR_KEY]);
|
2019-12-08 18:09:53 +01:00
|
|
|
|
odp_flow_key_hash(upcall->key, upcall->key_len, &upcall->ufid);
|
2013-02-15 16:48:32 -08:00
|
|
|
|
upcall->userdata = a[OVS_PACKET_ATTR_USERDATA];
|
2014-08-17 20:19:36 -07:00
|
|
|
|
upcall->out_tun_key = a[OVS_PACKET_ATTR_EGRESS_TUN_KEY];
|
2015-07-17 21:37:02 -07:00
|
|
|
|
upcall->actions = a[OVS_PACKET_ATTR_ACTIONS];
|
2015-02-26 15:52:34 -08:00
|
|
|
|
upcall->mru = a[OVS_PACKET_ATTR_MRU];
|
ofproto-dpif-upcall: Echo HASH attribute back to datapath.
The kernel datapath may sent upcall with hash info,
ovs-vswitchd should get it from upcall and then send
it back.
The reason is that:
| When using the kernel datapath, the upcall don't
| include skb hash info relatived. That will introduce
| some problem, because the hash of skb is important
| in kernel stack. For example, VXLAN module uses
| it to select UDP src port. The tx queue selection
| may also use the hash in stack.
|
| Hash is computed in different ways. Hash is random
| for a TCP socket, and hash may be computed in hardware,
| or software stack. Recalculation hash is not easy.
|
| There will be one upcall, without information of skb
| hash, to ovs-vswitchd, for the first packet of a TCP
| session. The rest packets will be processed in Open vSwitch
| modules, hash kept. If this tcp session is forward to
| VXLAN module, then the UDP src port of first tcp packet
| is different from rest packets.
|
| TCP packets may come from the host or dockers, to Open vSwitch.
| To fix it, we store the hash info to upcall, and restore hash
| when packets sent back.
Reported-at: https://mail.openvswitch.org/pipermail/ovs-dev/2019-October/364062.html
Link: https://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git/commit/?id=bd1903b7c4596ba6f7677d0dfefd05ba5876707d
Signed-off-by: Tonghao Zhang <xiangxia.m.yue@gmail.com>
Signed-off-by: Ben Pfaff <blp@ovn.org>
2019-11-15 10:58:59 +08:00
|
|
|
|
upcall->hash = a[OVS_PACKET_ATTR_HASH];
|
2013-12-16 08:14:52 -08:00
|
|
|
|
|
|
|
|
|
/* Allow overwriting the netlink attribute header without reallocating. */
|
2015-02-22 03:21:09 -08:00
|
|
|
|
dp_packet_use_stub(&upcall->packet,
|
2013-12-16 08:14:52 -08:00
|
|
|
|
CONST_CAST(struct nlattr *,
|
|
|
|
|
nl_attr_get(a[OVS_PACKET_ATTR_PACKET])) - 1,
|
|
|
|
|
nl_attr_get_size(a[OVS_PACKET_ATTR_PACKET]) +
|
|
|
|
|
sizeof(struct nlattr));
|
2015-02-22 03:21:09 -08:00
|
|
|
|
dp_packet_set_data(&upcall->packet,
|
|
|
|
|
(char *)dp_packet_data(&upcall->packet) + sizeof(struct nlattr));
|
|
|
|
|
dp_packet_set_size(&upcall->packet, nl_attr_get_size(a[OVS_PACKET_ATTR_PACKET]));
|
2013-12-16 08:14:52 -08:00
|
|
|
|
|
2017-04-25 16:29:59 +00:00
|
|
|
|
if (nl_attr_find__(upcall->key, upcall->key_len, OVS_KEY_ATTR_ETHERNET)) {
|
|
|
|
|
/* Ethernet frame */
|
|
|
|
|
upcall->packet.packet_type = htonl(PT_ETH);
|
|
|
|
|
} else {
|
|
|
|
|
/* Non-Ethernet packet. Get the Ethertype from the NL attributes */
|
|
|
|
|
ovs_be16 ethertype = 0;
|
|
|
|
|
const struct nlattr *et_nla = nl_attr_find__(upcall->key,
|
|
|
|
|
upcall->key_len,
|
|
|
|
|
OVS_KEY_ATTR_ETHERTYPE);
|
|
|
|
|
if (et_nla) {
|
|
|
|
|
ethertype = nl_attr_get_be16(et_nla);
|
|
|
|
|
}
|
|
|
|
|
upcall->packet.packet_type = PACKET_TYPE_BE(OFPHTN_ETHERTYPE,
|
|
|
|
|
ntohs(ethertype));
|
|
|
|
|
dp_packet_set_l3(&upcall->packet, dp_packet_data(&upcall->packet));
|
|
|
|
|
}
|
|
|
|
|
|
2011-08-18 10:35:40 -07:00
|
|
|
|
*dp_ifindex = ovs_header->dp_ifindex;
|
2011-01-26 13:41:54 -08:00
|
|
|
|
|
datapath: Report kernel's flow key when passing packets up to userspace.
One of the goals for Open vSwitch is to decouple kernel and userspace
software, so that either one can be upgraded or rolled back independent of
the other. To do this in full generality, it must be possible to change
the kernel's idea of the flow key separately from the userspace version.
This commit takes one step in that direction by making the kernel report
its idea of the flow that a packet belongs to whenever it passes a packet
up to userspace. This means that userspace can intelligently figure out
what to do:
- If userspace's notion of the flow for the packet matches the kernel's,
then nothing special is necessary.
- If the kernel has a more specific notion for the flow than userspace,
for example if the kernel decoded IPv6 headers but userspace stopped
at the Ethernet type (because it does not understand IPv6), then again
nothing special is necessary: userspace can still set up the flow in
the usual way.
- If userspace has a more specific notion for the flow than the kernel,
for example if userspace decoded an IPv6 header but the kernel
stopped at the Ethernet type, then userspace can forward the packet
manually, without setting up a flow in the kernel. (This case is
bad from a performance point of view, but at least it is correct.)
This commit does not actually make userspace flexible enough to handle
changes in the kernel flow key structure, although userspace does now
have enough information to do that intelligently. This will have to wait
for later commits.
This commit is bigger than it would otherwise be because it is rolled
together with changing "struct odp_msg" to a sequence of Netlink
attributes. The alternative, to do each of those changes in a separate
patch, seemed like overkill because it meant that either we would have to
introduce and then kill off Netlink attributes for in_port and tun_id, if
Netlink conversion went first, or shove yet another variable-length header
into the stuff already after odp_msg, if adding the flow key to odp_msg
went first.
This commit will slow down performance of checksumming packets sent up to
userspace. I'm not entirely pleased with how I did it. I considered a
couple of alternatives, but none of them seemed that much better.
Suggestions welcome. Not changing anything wasn't an option,
unfortunately. At any rate some slowdown will become unavoidable when OVS
actually starts using Netlink instead of just Netlink framing.
(Actually, I thought of one option where we could avoid that: make
userspace do the checksum instead, by passing csum_start and csum_offset as
part of what goes to userspace. But that's not perfect either.)
Signed-off-by: Ben Pfaff <blp@nicira.com>
Acked-by: Jesse Gross <jesse@nicira.com>
2011-01-24 14:59:57 -08:00
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2014-10-23 08:27:34 -07:00
|
|
|
|
#ifdef _WIN32
|
|
|
|
|
#define PACKET_RECV_BATCH_SIZE 50
|
|
|
|
|
static int
|
|
|
|
|
dpif_netlink_recv_windows(struct dpif_netlink *dpif, uint32_t handler_id,
|
|
|
|
|
struct dpif_upcall *upcall, struct ofpbuf *buf)
|
|
|
|
|
OVS_REQ_RDLOCK(dpif->upcall_lock)
|
|
|
|
|
{
|
|
|
|
|
struct dpif_handler *handler;
|
|
|
|
|
int read_tries = 0;
|
|
|
|
|
struct dpif_windows_vport_sock *sock_pool;
|
|
|
|
|
uint32_t i;
|
|
|
|
|
|
|
|
|
|
if (!dpif->handlers) {
|
|
|
|
|
return EAGAIN;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Only one handler is supported currently. */
|
|
|
|
|
if (handler_id >= 1) {
|
|
|
|
|
return EAGAIN;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (handler_id >= dpif->n_handlers) {
|
|
|
|
|
return EAGAIN;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
handler = &dpif->handlers[handler_id];
|
|
|
|
|
sock_pool = handler->vport_sock_pool;
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < VPORT_SOCK_POOL_SIZE; i++) {
|
|
|
|
|
for (;;) {
|
|
|
|
|
int dp_ifindex;
|
|
|
|
|
int error;
|
|
|
|
|
|
|
|
|
|
if (++read_tries > PACKET_RECV_BATCH_SIZE) {
|
|
|
|
|
return EAGAIN;
|
|
|
|
|
}
|
|
|
|
|
|
2018-03-29 23:05:26 -03:00
|
|
|
|
error = nl_sock_recv(sock_pool[i].nl_sock, buf, NULL, false);
|
2014-10-23 08:27:34 -07:00
|
|
|
|
if (error == ENOBUFS) {
|
|
|
|
|
/* ENOBUFS typically means that we've received so many
|
|
|
|
|
* packets that the buffer overflowed. Try again
|
|
|
|
|
* immediately because there's almost certainly a packet
|
|
|
|
|
* waiting for us. */
|
|
|
|
|
/* XXX: report_loss(dpif, ch, idx, handler_id); */
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* XXX: ch->last_poll = time_msec(); */
|
|
|
|
|
if (error) {
|
|
|
|
|
if (error == EAGAIN) {
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
return error;
|
|
|
|
|
}
|
|
|
|
|
|
2019-12-08 18:09:53 +01:00
|
|
|
|
error = parse_odp_packet(buf, upcall, &dp_ifindex);
|
2014-10-23 08:27:34 -07:00
|
|
|
|
if (!error && dp_ifindex == dpif->dp_ifindex) {
|
|
|
|
|
return 0;
|
|
|
|
|
} else if (error) {
|
|
|
|
|
return error;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return EAGAIN;
|
|
|
|
|
}
|
|
|
|
|
#else
|
datapath: Report kernel's flow key when passing packets up to userspace.
One of the goals for Open vSwitch is to decouple kernel and userspace
software, so that either one can be upgraded or rolled back independent of
the other. To do this in full generality, it must be possible to change
the kernel's idea of the flow key separately from the userspace version.
This commit takes one step in that direction by making the kernel report
its idea of the flow that a packet belongs to whenever it passes a packet
up to userspace. This means that userspace can intelligently figure out
what to do:
- If userspace's notion of the flow for the packet matches the kernel's,
then nothing special is necessary.
- If the kernel has a more specific notion for the flow than userspace,
for example if the kernel decoded IPv6 headers but userspace stopped
at the Ethernet type (because it does not understand IPv6), then again
nothing special is necessary: userspace can still set up the flow in
the usual way.
- If userspace has a more specific notion for the flow than the kernel,
for example if userspace decoded an IPv6 header but the kernel
stopped at the Ethernet type, then userspace can forward the packet
manually, without setting up a flow in the kernel. (This case is
bad from a performance point of view, but at least it is correct.)
This commit does not actually make userspace flexible enough to handle
changes in the kernel flow key structure, although userspace does now
have enough information to do that intelligently. This will have to wait
for later commits.
This commit is bigger than it would otherwise be because it is rolled
together with changing "struct odp_msg" to a sequence of Netlink
attributes. The alternative, to do each of those changes in a separate
patch, seemed like overkill because it meant that either we would have to
introduce and then kill off Netlink attributes for in_port and tun_id, if
Netlink conversion went first, or shove yet another variable-length header
into the stuff already after odp_msg, if adding the flow key to odp_msg
went first.
This commit will slow down performance of checksumming packets sent up to
userspace. I'm not entirely pleased with how I did it. I considered a
couple of alternatives, but none of them seemed that much better.
Suggestions welcome. Not changing anything wasn't an option,
unfortunately. At any rate some slowdown will become unavoidable when OVS
actually starts using Netlink instead of just Netlink framing.
(Actually, I thought of one option where we could avoid that: make
userspace do the checksum instead, by passing csum_start and csum_offset as
part of what goes to userspace. But that's not perfect either.)
Signed-off-by: Ben Pfaff <blp@nicira.com>
Acked-by: Jesse Gross <jesse@nicira.com>
2011-01-24 14:59:57 -08:00
|
|
|
|
static int
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_recv__(struct dpif_netlink *dpif, uint32_t handler_id,
|
|
|
|
|
struct dpif_upcall *upcall, struct ofpbuf *buf)
|
2014-04-17 17:16:34 -07:00
|
|
|
|
OVS_REQ_RDLOCK(dpif->upcall_lock)
|
2009-06-17 14:35:35 -07:00
|
|
|
|
{
|
2014-02-26 10:10:29 -08:00
|
|
|
|
struct dpif_handler *handler;
|
2011-09-16 15:23:37 -07:00
|
|
|
|
int read_tries = 0;
|
2009-06-17 14:35:35 -07:00
|
|
|
|
|
2014-02-26 10:10:29 -08:00
|
|
|
|
if (!dpif->handlers || handler_id >= dpif->n_handlers) {
|
|
|
|
|
return EAGAIN;
|
2011-01-26 13:41:54 -08:00
|
|
|
|
}
|
|
|
|
|
|
2014-02-26 10:10:29 -08:00
|
|
|
|
handler = &dpif->handlers[handler_id];
|
|
|
|
|
if (handler->event_offset >= handler->n_events) {
|
2011-11-28 09:29:18 -08:00
|
|
|
|
int retval;
|
2013-01-04 18:34:26 -08:00
|
|
|
|
|
2014-02-26 10:10:29 -08:00
|
|
|
|
handler->event_offset = handler->n_events = 0;
|
2011-11-10 15:39:39 -08:00
|
|
|
|
|
2011-11-28 09:29:18 -08:00
|
|
|
|
do {
|
2014-02-26 10:10:29 -08:00
|
|
|
|
retval = epoll_wait(handler->epoll_fd, handler->epoll_events,
|
2013-01-04 18:34:26 -08:00
|
|
|
|
dpif->uc_array_size, 0);
|
2011-11-28 09:29:18 -08:00
|
|
|
|
} while (retval < 0 && errno == EINTR);
|
2014-10-23 08:27:34 -07:00
|
|
|
|
|
2011-11-28 09:29:18 -08:00
|
|
|
|
if (retval < 0) {
|
|
|
|
|
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
|
2013-06-24 10:54:49 -07:00
|
|
|
|
VLOG_WARN_RL(&rl, "epoll_wait failed (%s)", ovs_strerror(errno));
|
2013-01-04 18:34:26 -08:00
|
|
|
|
} else if (retval > 0) {
|
2014-02-26 10:10:29 -08:00
|
|
|
|
handler->n_events = retval;
|
2011-11-28 09:29:18 -08:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2014-02-26 10:10:29 -08:00
|
|
|
|
while (handler->event_offset < handler->n_events) {
|
|
|
|
|
int idx = handler->epoll_events[handler->event_offset].data.u32;
|
dpif-netlink: don't allocate per thread netlink sockets
When using the kernel datapath, OVS allocates a pool of sockets to handle
netlink events. The number of sockets is: ports * n-handler-threads, where
n-handler-threads is user configurable and defaults to 3/4*number of cores.
This because vswitchd starts n-handler-threads threads, each one with a
netlink socket for every port of the switch. Every thread then, starts
listening on events on its set of sockets with epoll().
On setup with lot of CPUs and ports, the number of sockets easily hits
the process file descriptor limit, and ovs-vswitchd will exit with -EMFILE.
Change the number of allocated sockets to just one per port by moving
the socket array from a per handler structure to a per datapath one,
and let all the handlers share the same sockets by using EPOLLEXCLUSIVE
epoll flag which avoids duplicate events, on systems that support it.
The patch was tested on a 56 core machine running Linux 4.18 and latest
Open vSwitch. A bridge was created with 2000+ ports, some of them being
veth interfaces with the peer outside the bridge. The latency of the upcall
is measured by setting a single 'action=controller,local' OpenFlow rule to
force all the packets going to the slow path and then to the local port.
A tool[1] injects some packets to the veth outside the bridge, and measures
the delay until the packet is captured on the local port. The rx timestamp
is get from the socket ancillary data in the attribute SO_TIMESTAMPNS, to
avoid having the scheduler delay in the measured time.
The first test measures the average latency for an upcall generated from
a single port. To measure it 100k packets, one every msec, are sent to a
single port and the latencies are measured.
The second test is meant to check latency fairness among ports, namely if
latency is equal between ports or if some ports have lower priority.
The previous test is repeated for every port, the average of the average
latencies and the standard deviation between averages is measured.
The third test serves to measure responsiveness under load. Heavy traffic
is sent through all ports, latency and packet loss is measured
on a single idle port.
The fourth test is all about fairness. Heavy traffic is injected in all
ports but one, latency and packet loss is measured on the single idle port.
This is the test setup:
# nproc
56
# ovs-vsctl show |grep -c Port
2223
# ovs-ofctl dump-flows ovs_upc_br
cookie=0x0, duration=4.827s, table=0, n_packets=0, n_bytes=0, actions=CONTROLLER:65535,LOCAL
# uname -a
Linux fc28 4.18.7-200.fc28.x86_64 #1 SMP Mon Sep 10 15:44:45 UTC 2018 x86_64 x86_64 x86_64 GNU/Linux
And these are the results of the tests:
Stock OVS Patched
netlink sockets
in use by vswitchd
lsof -p $(pidof ovs-vswitchd) \
|grep -c GENERIC 91187 2227
Test 1
one port latency
min/avg/max/mdev (us) 2.7/6.6/238.7/1.8 1.6/6.8/160.6/1.7
Test 2
all port
avg latency/mdev (us) 6.51/0.97 6.86/0.17
Test 3
single port latency
under load
avg/mdev (us) 7.5/5.9 3.8/4.8
packet loss 95 % 62 %
Test 4
idle port latency
under load
min/avg/max/mdev (us) 0.8/1.5/210.5/0.9 1.0/2.1/344.5/1.2
packet loss 94 % 4 %
CPU and RAM usage seems not to be affected, the resource usage of vswitchd
idle with 2000+ ports is unchanged:
# ps u $(pidof ovs-vswitchd)
USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND
openvsw+ 5430 54.3 0.3 4263964 510968 pts/1 RLl+ 16:20 0:50 ovs-vswitchd
Additionally, to check if vswitchd is thread safe with this patch, the
following test was run for circa 48 hours: on a 56 core machine, a
bridge with kernel datapath is filled with 2200 dummy interfaces and 22
veth, then 22 traffic generators are run in parallel piping traffic into
the veths peers outside the bridge.
To generate as many upcalls as possible, all packets were forced to the
slowpath with an openflow rule like 'action=controller,local' and packet
size was set to 64 byte. Also, to avoid overflowing the FDB early and
slowing down the upcall processing, generated mac addresses were restricted
to a small interval. vswitchd ran without problems for 48+ hours,
obviously with all the handler threads with almost 99% CPU usage.
[1] https://github.com/teknoraver/network-tools/blob/master/weed.c
Signed-off-by: Matteo Croce <mcroce@redhat.com>
Signed-off-by: Ben Pfaff <blp@ovn.org>
Acked-by: Flavio Leitner <fbl@sysclose.org>
2018-09-25 10:51:05 +02:00
|
|
|
|
struct dpif_channel *ch = &dpif->channels[idx];
|
2011-11-28 09:29:18 -08:00
|
|
|
|
|
2014-02-26 10:10:29 -08:00
|
|
|
|
handler->event_offset++;
|
2011-09-16 15:23:37 -07:00
|
|
|
|
|
2011-11-10 15:39:39 -08:00
|
|
|
|
for (;;) {
|
2011-11-28 09:29:18 -08:00
|
|
|
|
int dp_ifindex;
|
2011-11-10 15:39:39 -08:00
|
|
|
|
int error;
|
2011-09-16 15:23:37 -07:00
|
|
|
|
|
2011-11-10 15:39:39 -08:00
|
|
|
|
if (++read_tries > 50) {
|
|
|
|
|
return EAGAIN;
|
|
|
|
|
}
|
2011-09-16 15:23:37 -07:00
|
|
|
|
|
2018-03-29 23:05:26 -03:00
|
|
|
|
error = nl_sock_recv(ch->sock, buf, NULL, false);
|
2012-06-01 17:40:31 -04:00
|
|
|
|
if (error == ENOBUFS) {
|
|
|
|
|
/* ENOBUFS typically means that we've received so many
|
|
|
|
|
* packets that the buffer overflowed. Try again
|
|
|
|
|
* immediately because there's almost certainly a packet
|
|
|
|
|
* waiting for us. */
|
2014-04-17 16:33:17 -07:00
|
|
|
|
report_loss(dpif, ch, idx, handler_id);
|
2012-06-01 17:40:31 -04:00
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ch->last_poll = time_msec();
|
2012-04-09 15:35:29 -07:00
|
|
|
|
if (error) {
|
|
|
|
|
if (error == EAGAIN) {
|
|
|
|
|
break;
|
|
|
|
|
}
|
2011-11-10 15:39:39 -08:00
|
|
|
|
return error;
|
|
|
|
|
}
|
2011-09-16 15:23:37 -07:00
|
|
|
|
|
2019-12-08 18:09:53 +01:00
|
|
|
|
error = parse_odp_packet(buf, upcall, &dp_ifindex);
|
2012-01-12 17:09:22 -08:00
|
|
|
|
if (!error && dp_ifindex == dpif->dp_ifindex) {
|
2011-11-10 15:39:39 -08:00
|
|
|
|
return 0;
|
2013-01-04 18:34:26 -08:00
|
|
|
|
} else if (error) {
|
2011-11-10 15:39:39 -08:00
|
|
|
|
return error;
|
2011-09-16 15:23:37 -07:00
|
|
|
|
}
|
2011-01-26 13:41:54 -08:00
|
|
|
|
}
|
2011-11-22 09:25:32 -08:00
|
|
|
|
}
|
2011-01-26 13:41:54 -08:00
|
|
|
|
|
|
|
|
|
return EAGAIN;
|
2009-06-17 14:35:35 -07:00
|
|
|
|
}
|
2014-10-23 08:27:34 -07:00
|
|
|
|
#endif
|
2009-06-17 14:35:35 -07:00
|
|
|
|
|
2013-07-23 12:41:57 -07:00
|
|
|
|
static int
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_recv(struct dpif *dpif_, uint32_t handler_id,
|
|
|
|
|
struct dpif_upcall *upcall, struct ofpbuf *buf)
|
2013-07-23 12:41:57 -07:00
|
|
|
|
{
|
2014-09-18 04:17:54 -07:00
|
|
|
|
struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
|
2013-07-23 12:41:57 -07:00
|
|
|
|
int error;
|
|
|
|
|
|
2014-02-26 10:10:29 -08:00
|
|
|
|
fat_rwlock_rdlock(&dpif->upcall_lock);
|
2014-10-23 08:27:34 -07:00
|
|
|
|
#ifdef _WIN32
|
|
|
|
|
error = dpif_netlink_recv_windows(dpif, handler_id, upcall, buf);
|
|
|
|
|
#else
|
2014-09-18 04:17:54 -07:00
|
|
|
|
error = dpif_netlink_recv__(dpif, handler_id, upcall, buf);
|
2014-10-23 08:27:34 -07:00
|
|
|
|
#endif
|
2014-02-26 10:10:29 -08:00
|
|
|
|
fat_rwlock_unlock(&dpif->upcall_lock);
|
2013-07-23 12:41:57 -07:00
|
|
|
|
|
|
|
|
|
return error;
|
|
|
|
|
}
|
|
|
|
|
|
2009-06-17 14:35:35 -07:00
|
|
|
|
static void
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_recv_wait__(struct dpif_netlink *dpif, uint32_t handler_id)
|
2014-04-17 17:16:34 -07:00
|
|
|
|
OVS_REQ_RDLOCK(dpif->upcall_lock)
|
2009-06-17 14:35:35 -07:00
|
|
|
|
{
|
2014-09-18 04:17:54 -07:00
|
|
|
|
#ifdef _WIN32
|
2014-10-23 08:27:34 -07:00
|
|
|
|
uint32_t i;
|
|
|
|
|
struct dpif_windows_vport_sock *sock_pool =
|
|
|
|
|
dpif->handlers[handler_id].vport_sock_pool;
|
|
|
|
|
|
|
|
|
|
/* Only one handler is supported currently. */
|
|
|
|
|
if (handler_id >= 1) {
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < VPORT_SOCK_POOL_SIZE; i++) {
|
|
|
|
|
nl_sock_wait(sock_pool[i].nl_sock, POLLIN);
|
|
|
|
|
}
|
2014-09-18 04:17:54 -07:00
|
|
|
|
#else
|
2014-02-26 10:10:29 -08:00
|
|
|
|
if (dpif->handlers && handler_id < dpif->n_handlers) {
|
|
|
|
|
struct dpif_handler *handler = &dpif->handlers[handler_id];
|
|
|
|
|
|
|
|
|
|
poll_fd_wait(handler->epoll_fd, POLLIN);
|
2011-09-16 15:23:37 -07:00
|
|
|
|
}
|
2014-09-18 04:17:54 -07:00
|
|
|
|
#endif
|
2009-06-17 14:35:35 -07:00
|
|
|
|
}
|
|
|
|
|
|
2011-01-04 17:00:36 -08:00
|
|
|
|
static void
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_recv_wait(struct dpif *dpif_, uint32_t handler_id)
|
2011-01-04 17:00:36 -08:00
|
|
|
|
{
|
2014-09-18 04:17:54 -07:00
|
|
|
|
struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
|
2011-09-16 15:23:37 -07:00
|
|
|
|
|
2014-04-17 17:16:34 -07:00
|
|
|
|
fat_rwlock_rdlock(&dpif->upcall_lock);
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_recv_wait__(dpif, handler_id);
|
2014-04-17 17:16:34 -07:00
|
|
|
|
fat_rwlock_unlock(&dpif->upcall_lock);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_recv_purge__(struct dpif_netlink *dpif)
|
2014-04-17 17:16:34 -07:00
|
|
|
|
OVS_REQ_WRLOCK(dpif->upcall_lock)
|
|
|
|
|
{
|
2014-02-26 10:10:29 -08:00
|
|
|
|
if (dpif->handlers) {
|
dpif-netlink: don't allocate per thread netlink sockets
When using the kernel datapath, OVS allocates a pool of sockets to handle
netlink events. The number of sockets is: ports * n-handler-threads, where
n-handler-threads is user configurable and defaults to 3/4*number of cores.
This because vswitchd starts n-handler-threads threads, each one with a
netlink socket for every port of the switch. Every thread then, starts
listening on events on its set of sockets with epoll().
On setup with lot of CPUs and ports, the number of sockets easily hits
the process file descriptor limit, and ovs-vswitchd will exit with -EMFILE.
Change the number of allocated sockets to just one per port by moving
the socket array from a per handler structure to a per datapath one,
and let all the handlers share the same sockets by using EPOLLEXCLUSIVE
epoll flag which avoids duplicate events, on systems that support it.
The patch was tested on a 56 core machine running Linux 4.18 and latest
Open vSwitch. A bridge was created with 2000+ ports, some of them being
veth interfaces with the peer outside the bridge. The latency of the upcall
is measured by setting a single 'action=controller,local' OpenFlow rule to
force all the packets going to the slow path and then to the local port.
A tool[1] injects some packets to the veth outside the bridge, and measures
the delay until the packet is captured on the local port. The rx timestamp
is get from the socket ancillary data in the attribute SO_TIMESTAMPNS, to
avoid having the scheduler delay in the measured time.
The first test measures the average latency for an upcall generated from
a single port. To measure it 100k packets, one every msec, are sent to a
single port and the latencies are measured.
The second test is meant to check latency fairness among ports, namely if
latency is equal between ports or if some ports have lower priority.
The previous test is repeated for every port, the average of the average
latencies and the standard deviation between averages is measured.
The third test serves to measure responsiveness under load. Heavy traffic
is sent through all ports, latency and packet loss is measured
on a single idle port.
The fourth test is all about fairness. Heavy traffic is injected in all
ports but one, latency and packet loss is measured on the single idle port.
This is the test setup:
# nproc
56
# ovs-vsctl show |grep -c Port
2223
# ovs-ofctl dump-flows ovs_upc_br
cookie=0x0, duration=4.827s, table=0, n_packets=0, n_bytes=0, actions=CONTROLLER:65535,LOCAL
# uname -a
Linux fc28 4.18.7-200.fc28.x86_64 #1 SMP Mon Sep 10 15:44:45 UTC 2018 x86_64 x86_64 x86_64 GNU/Linux
And these are the results of the tests:
Stock OVS Patched
netlink sockets
in use by vswitchd
lsof -p $(pidof ovs-vswitchd) \
|grep -c GENERIC 91187 2227
Test 1
one port latency
min/avg/max/mdev (us) 2.7/6.6/238.7/1.8 1.6/6.8/160.6/1.7
Test 2
all port
avg latency/mdev (us) 6.51/0.97 6.86/0.17
Test 3
single port latency
under load
avg/mdev (us) 7.5/5.9 3.8/4.8
packet loss 95 % 62 %
Test 4
idle port latency
under load
min/avg/max/mdev (us) 0.8/1.5/210.5/0.9 1.0/2.1/344.5/1.2
packet loss 94 % 4 %
CPU and RAM usage seems not to be affected, the resource usage of vswitchd
idle with 2000+ ports is unchanged:
# ps u $(pidof ovs-vswitchd)
USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND
openvsw+ 5430 54.3 0.3 4263964 510968 pts/1 RLl+ 16:20 0:50 ovs-vswitchd
Additionally, to check if vswitchd is thread safe with this patch, the
following test was run for circa 48 hours: on a 56 core machine, a
bridge with kernel datapath is filled with 2200 dummy interfaces and 22
veth, then 22 traffic generators are run in parallel piping traffic into
the veths peers outside the bridge.
To generate as many upcalls as possible, all packets were forced to the
slowpath with an openflow rule like 'action=controller,local' and packet
size was set to 64 byte. Also, to avoid overflowing the FDB early and
slowing down the upcall processing, generated mac addresses were restricted
to a small interval. vswitchd ran without problems for 48+ hours,
obviously with all the handler threads with almost 99% CPU usage.
[1] https://github.com/teknoraver/network-tools/blob/master/weed.c
Signed-off-by: Matteo Croce <mcroce@redhat.com>
Signed-off-by: Ben Pfaff <blp@ovn.org>
Acked-by: Flavio Leitner <fbl@sysclose.org>
2018-09-25 10:51:05 +02:00
|
|
|
|
size_t i;
|
2014-02-26 10:10:29 -08:00
|
|
|
|
|
dpif-netlink: don't allocate per thread netlink sockets
When using the kernel datapath, OVS allocates a pool of sockets to handle
netlink events. The number of sockets is: ports * n-handler-threads, where
n-handler-threads is user configurable and defaults to 3/4*number of cores.
This because vswitchd starts n-handler-threads threads, each one with a
netlink socket for every port of the switch. Every thread then, starts
listening on events on its set of sockets with epoll().
On setup with lot of CPUs and ports, the number of sockets easily hits
the process file descriptor limit, and ovs-vswitchd will exit with -EMFILE.
Change the number of allocated sockets to just one per port by moving
the socket array from a per handler structure to a per datapath one,
and let all the handlers share the same sockets by using EPOLLEXCLUSIVE
epoll flag which avoids duplicate events, on systems that support it.
The patch was tested on a 56 core machine running Linux 4.18 and latest
Open vSwitch. A bridge was created with 2000+ ports, some of them being
veth interfaces with the peer outside the bridge. The latency of the upcall
is measured by setting a single 'action=controller,local' OpenFlow rule to
force all the packets going to the slow path and then to the local port.
A tool[1] injects some packets to the veth outside the bridge, and measures
the delay until the packet is captured on the local port. The rx timestamp
is get from the socket ancillary data in the attribute SO_TIMESTAMPNS, to
avoid having the scheduler delay in the measured time.
The first test measures the average latency for an upcall generated from
a single port. To measure it 100k packets, one every msec, are sent to a
single port and the latencies are measured.
The second test is meant to check latency fairness among ports, namely if
latency is equal between ports or if some ports have lower priority.
The previous test is repeated for every port, the average of the average
latencies and the standard deviation between averages is measured.
The third test serves to measure responsiveness under load. Heavy traffic
is sent through all ports, latency and packet loss is measured
on a single idle port.
The fourth test is all about fairness. Heavy traffic is injected in all
ports but one, latency and packet loss is measured on the single idle port.
This is the test setup:
# nproc
56
# ovs-vsctl show |grep -c Port
2223
# ovs-ofctl dump-flows ovs_upc_br
cookie=0x0, duration=4.827s, table=0, n_packets=0, n_bytes=0, actions=CONTROLLER:65535,LOCAL
# uname -a
Linux fc28 4.18.7-200.fc28.x86_64 #1 SMP Mon Sep 10 15:44:45 UTC 2018 x86_64 x86_64 x86_64 GNU/Linux
And these are the results of the tests:
Stock OVS Patched
netlink sockets
in use by vswitchd
lsof -p $(pidof ovs-vswitchd) \
|grep -c GENERIC 91187 2227
Test 1
one port latency
min/avg/max/mdev (us) 2.7/6.6/238.7/1.8 1.6/6.8/160.6/1.7
Test 2
all port
avg latency/mdev (us) 6.51/0.97 6.86/0.17
Test 3
single port latency
under load
avg/mdev (us) 7.5/5.9 3.8/4.8
packet loss 95 % 62 %
Test 4
idle port latency
under load
min/avg/max/mdev (us) 0.8/1.5/210.5/0.9 1.0/2.1/344.5/1.2
packet loss 94 % 4 %
CPU and RAM usage seems not to be affected, the resource usage of vswitchd
idle with 2000+ ports is unchanged:
# ps u $(pidof ovs-vswitchd)
USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND
openvsw+ 5430 54.3 0.3 4263964 510968 pts/1 RLl+ 16:20 0:50 ovs-vswitchd
Additionally, to check if vswitchd is thread safe with this patch, the
following test was run for circa 48 hours: on a 56 core machine, a
bridge with kernel datapath is filled with 2200 dummy interfaces and 22
veth, then 22 traffic generators are run in parallel piping traffic into
the veths peers outside the bridge.
To generate as many upcalls as possible, all packets were forced to the
slowpath with an openflow rule like 'action=controller,local' and packet
size was set to 64 byte. Also, to avoid overflowing the FDB early and
slowing down the upcall processing, generated mac addresses were restricted
to a small interval. vswitchd ran without problems for 48+ hours,
obviously with all the handler threads with almost 99% CPU usage.
[1] https://github.com/teknoraver/network-tools/blob/master/weed.c
Signed-off-by: Matteo Croce <mcroce@redhat.com>
Signed-off-by: Ben Pfaff <blp@ovn.org>
Acked-by: Flavio Leitner <fbl@sysclose.org>
2018-09-25 10:51:05 +02:00
|
|
|
|
if (!dpif->channels[0].sock) {
|
|
|
|
|
return;
|
|
|
|
|
}
|
2014-02-26 10:10:29 -08:00
|
|
|
|
for (i = 0; i < dpif->uc_array_size; i++ ) {
|
2011-01-04 17:00:36 -08:00
|
|
|
|
|
dpif-netlink: don't allocate per thread netlink sockets
When using the kernel datapath, OVS allocates a pool of sockets to handle
netlink events. The number of sockets is: ports * n-handler-threads, where
n-handler-threads is user configurable and defaults to 3/4*number of cores.
This because vswitchd starts n-handler-threads threads, each one with a
netlink socket for every port of the switch. Every thread then, starts
listening on events on its set of sockets with epoll().
On setup with lot of CPUs and ports, the number of sockets easily hits
the process file descriptor limit, and ovs-vswitchd will exit with -EMFILE.
Change the number of allocated sockets to just one per port by moving
the socket array from a per handler structure to a per datapath one,
and let all the handlers share the same sockets by using EPOLLEXCLUSIVE
epoll flag which avoids duplicate events, on systems that support it.
The patch was tested on a 56 core machine running Linux 4.18 and latest
Open vSwitch. A bridge was created with 2000+ ports, some of them being
veth interfaces with the peer outside the bridge. The latency of the upcall
is measured by setting a single 'action=controller,local' OpenFlow rule to
force all the packets going to the slow path and then to the local port.
A tool[1] injects some packets to the veth outside the bridge, and measures
the delay until the packet is captured on the local port. The rx timestamp
is get from the socket ancillary data in the attribute SO_TIMESTAMPNS, to
avoid having the scheduler delay in the measured time.
The first test measures the average latency for an upcall generated from
a single port. To measure it 100k packets, one every msec, are sent to a
single port and the latencies are measured.
The second test is meant to check latency fairness among ports, namely if
latency is equal between ports or if some ports have lower priority.
The previous test is repeated for every port, the average of the average
latencies and the standard deviation between averages is measured.
The third test serves to measure responsiveness under load. Heavy traffic
is sent through all ports, latency and packet loss is measured
on a single idle port.
The fourth test is all about fairness. Heavy traffic is injected in all
ports but one, latency and packet loss is measured on the single idle port.
This is the test setup:
# nproc
56
# ovs-vsctl show |grep -c Port
2223
# ovs-ofctl dump-flows ovs_upc_br
cookie=0x0, duration=4.827s, table=0, n_packets=0, n_bytes=0, actions=CONTROLLER:65535,LOCAL
# uname -a
Linux fc28 4.18.7-200.fc28.x86_64 #1 SMP Mon Sep 10 15:44:45 UTC 2018 x86_64 x86_64 x86_64 GNU/Linux
And these are the results of the tests:
Stock OVS Patched
netlink sockets
in use by vswitchd
lsof -p $(pidof ovs-vswitchd) \
|grep -c GENERIC 91187 2227
Test 1
one port latency
min/avg/max/mdev (us) 2.7/6.6/238.7/1.8 1.6/6.8/160.6/1.7
Test 2
all port
avg latency/mdev (us) 6.51/0.97 6.86/0.17
Test 3
single port latency
under load
avg/mdev (us) 7.5/5.9 3.8/4.8
packet loss 95 % 62 %
Test 4
idle port latency
under load
min/avg/max/mdev (us) 0.8/1.5/210.5/0.9 1.0/2.1/344.5/1.2
packet loss 94 % 4 %
CPU and RAM usage seems not to be affected, the resource usage of vswitchd
idle with 2000+ ports is unchanged:
# ps u $(pidof ovs-vswitchd)
USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND
openvsw+ 5430 54.3 0.3 4263964 510968 pts/1 RLl+ 16:20 0:50 ovs-vswitchd
Additionally, to check if vswitchd is thread safe with this patch, the
following test was run for circa 48 hours: on a 56 core machine, a
bridge with kernel datapath is filled with 2200 dummy interfaces and 22
veth, then 22 traffic generators are run in parallel piping traffic into
the veths peers outside the bridge.
To generate as many upcalls as possible, all packets were forced to the
slowpath with an openflow rule like 'action=controller,local' and packet
size was set to 64 byte. Also, to avoid overflowing the FDB early and
slowing down the upcall processing, generated mac addresses were restricted
to a small interval. vswitchd ran without problems for 48+ hours,
obviously with all the handler threads with almost 99% CPU usage.
[1] https://github.com/teknoraver/network-tools/blob/master/weed.c
Signed-off-by: Matteo Croce <mcroce@redhat.com>
Signed-off-by: Ben Pfaff <blp@ovn.org>
Acked-by: Flavio Leitner <fbl@sysclose.org>
2018-09-25 10:51:05 +02:00
|
|
|
|
nl_sock_drain(dpif->channels[i].sock);
|
2013-01-04 18:34:26 -08:00
|
|
|
|
}
|
2011-01-04 17:00:36 -08:00
|
|
|
|
}
|
2014-04-17 17:16:34 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_recv_purge(struct dpif *dpif_)
|
2014-04-17 17:16:34 -07:00
|
|
|
|
{
|
2014-09-18 04:17:54 -07:00
|
|
|
|
struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
|
2014-04-17 17:16:34 -07:00
|
|
|
|
|
|
|
|
|
fat_rwlock_wrlock(&dpif->upcall_lock);
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_recv_purge__(dpif);
|
2014-02-26 10:10:29 -08:00
|
|
|
|
fat_rwlock_unlock(&dpif->upcall_lock);
|
2011-01-04 17:00:36 -08:00
|
|
|
|
}
|
|
|
|
|
|
2014-10-16 15:23:11 -07:00
|
|
|
|
static char *
|
|
|
|
|
dpif_netlink_get_datapath_version(void)
|
|
|
|
|
{
|
|
|
|
|
char *version_str = NULL;
|
|
|
|
|
|
|
|
|
|
#ifdef __linux__
|
|
|
|
|
|
|
|
|
|
#define MAX_VERSION_STR_SIZE 80
|
|
|
|
|
#define LINUX_DATAPATH_VERSION_FILE "/sys/module/openvswitch/version"
|
|
|
|
|
FILE *f;
|
|
|
|
|
|
|
|
|
|
f = fopen(LINUX_DATAPATH_VERSION_FILE, "r");
|
|
|
|
|
if (f) {
|
|
|
|
|
char *newline;
|
|
|
|
|
char version[MAX_VERSION_STR_SIZE];
|
|
|
|
|
|
|
|
|
|
if (fgets(version, MAX_VERSION_STR_SIZE, f)) {
|
|
|
|
|
newline = strchr(version, '\n');
|
|
|
|
|
if (newline) {
|
|
|
|
|
*newline = '\0';
|
|
|
|
|
}
|
|
|
|
|
version_str = xstrdup(version);
|
|
|
|
|
}
|
|
|
|
|
fclose(f);
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
return version_str;
|
|
|
|
|
}
|
|
|
|
|
|
2015-10-28 11:26:18 -07:00
|
|
|
|
struct dpif_netlink_ct_dump_state {
|
|
|
|
|
struct ct_dpif_dump_state up;
|
|
|
|
|
struct nl_ct_dump_state *nl_ct_dump;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
dpif_netlink_ct_dump_start(struct dpif *dpif OVS_UNUSED,
|
|
|
|
|
struct ct_dpif_dump_state **dump_,
|
2017-08-01 20:12:03 -07:00
|
|
|
|
const uint16_t *zone, int *ptot_bkts)
|
2015-10-28 11:26:18 -07:00
|
|
|
|
{
|
|
|
|
|
struct dpif_netlink_ct_dump_state *dump;
|
|
|
|
|
int err;
|
|
|
|
|
|
|
|
|
|
dump = xzalloc(sizeof *dump);
|
2017-08-01 20:12:03 -07:00
|
|
|
|
err = nl_ct_dump_start(&dump->nl_ct_dump, zone, ptot_bkts);
|
2015-10-28 11:26:18 -07:00
|
|
|
|
if (err) {
|
|
|
|
|
free(dump);
|
|
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
*dump_ = &dump->up;
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
dpif_netlink_ct_dump_next(struct dpif *dpif OVS_UNUSED,
|
|
|
|
|
struct ct_dpif_dump_state *dump_,
|
|
|
|
|
struct ct_dpif_entry *entry)
|
|
|
|
|
{
|
|
|
|
|
struct dpif_netlink_ct_dump_state *dump;
|
|
|
|
|
|
|
|
|
|
INIT_CONTAINER(dump, dump_, up);
|
|
|
|
|
|
|
|
|
|
return nl_ct_dump_next(dump->nl_ct_dump, entry);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
dpif_netlink_ct_dump_done(struct dpif *dpif OVS_UNUSED,
|
|
|
|
|
struct ct_dpif_dump_state *dump_)
|
|
|
|
|
{
|
|
|
|
|
struct dpif_netlink_ct_dump_state *dump;
|
|
|
|
|
|
|
|
|
|
INIT_CONTAINER(dump, dump_, up);
|
|
|
|
|
|
ct-dpif, dpif-netlink: Add conntrack timeout policy support
This patch first defines the dpif interface for a datapath to support
adding, deleting, getting and dumping conntrack timeout policy.
The timeout policy is identified by a 4 bytes unsigned integer in
datapath, and it currently support timeout for TCP, UDP, and ICMP
protocols.
Moreover, this patch provides the implementation for Linux kernel
datapath in dpif-netlink.
In Linux kernel, the timeout policy is maintained per L3/L4 protocol,
and it is identified by 32 bytes null terminated string. On the other
hand, in vswitchd, the timeout policy is a generic one that consists of
all the supported L4 protocols. Therefore, one of the main task in
dpif-netlink is to break down the generic timeout policy into 6
sub policies (ipv4 tcp, udp, icmp, and ipv6 tcp, udp, icmp),
and push down the configuration using the netlink API in
netlink-conntrack.c.
This patch also adds missing symbols in the windows datapath so
that the build on windows can pass.
Appveyor CI:
* https://ci.appveyor.com/project/YiHungWei/ovs/builds/26387754
Signed-off-by: Yi-Hung Wei <yihung.wei@gmail.com>
Acked-by: Alin Gabriel Serdean <aserdean@ovn.org>
Signed-off-by: Justin Pettit <jpettit@ovn.org>
2019-08-28 15:14:24 -07:00
|
|
|
|
int err = nl_ct_dump_done(dump->nl_ct_dump);
|
2015-10-28 11:26:18 -07:00
|
|
|
|
free(dump);
|
|
|
|
|
return err;
|
|
|
|
|
}
|
2015-10-28 10:34:26 -07:00
|
|
|
|
|
|
|
|
|
static int
|
2017-12-07 10:40:03 -08:00
|
|
|
|
dpif_netlink_ct_flush(struct dpif *dpif OVS_UNUSED, const uint16_t *zone,
|
|
|
|
|
const struct ct_dpif_tuple *tuple)
|
2015-10-28 10:34:26 -07:00
|
|
|
|
{
|
2017-12-07 10:40:03 -08:00
|
|
|
|
if (tuple) {
|
|
|
|
|
return nl_ct_flush_tuple(tuple, zone ? *zone : 0);
|
|
|
|
|
} else if (zone) {
|
2015-10-28 10:34:26 -07:00
|
|
|
|
return nl_ct_flush_zone(*zone);
|
|
|
|
|
} else {
|
|
|
|
|
return nl_ct_flush();
|
|
|
|
|
}
|
|
|
|
|
}
|
2015-10-28 11:26:18 -07:00
|
|
|
|
|
2018-08-17 02:05:09 -07:00
|
|
|
|
static int
|
|
|
|
|
dpif_netlink_ct_set_limits(struct dpif *dpif OVS_UNUSED,
|
|
|
|
|
const uint32_t *default_limits,
|
|
|
|
|
const struct ovs_list *zone_limits)
|
|
|
|
|
{
|
|
|
|
|
struct ovs_zone_limit req_zone_limit;
|
|
|
|
|
|
|
|
|
|
if (ovs_ct_limit_family < 0) {
|
|
|
|
|
return EOPNOTSUPP;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
struct ofpbuf *request = ofpbuf_new(NL_DUMP_BUFSIZE);
|
|
|
|
|
nl_msg_put_genlmsghdr(request, 0, ovs_ct_limit_family,
|
|
|
|
|
NLM_F_REQUEST | NLM_F_ECHO, OVS_CT_LIMIT_CMD_SET,
|
|
|
|
|
OVS_CT_LIMIT_VERSION);
|
|
|
|
|
|
|
|
|
|
struct ovs_header *ovs_header;
|
|
|
|
|
ovs_header = ofpbuf_put_uninit(request, sizeof *ovs_header);
|
|
|
|
|
ovs_header->dp_ifindex = 0;
|
|
|
|
|
|
|
|
|
|
size_t opt_offset;
|
|
|
|
|
opt_offset = nl_msg_start_nested(request, OVS_CT_LIMIT_ATTR_ZONE_LIMIT);
|
|
|
|
|
if (default_limits) {
|
|
|
|
|
req_zone_limit.zone_id = OVS_ZONE_LIMIT_DEFAULT_ZONE;
|
|
|
|
|
req_zone_limit.limit = *default_limits;
|
|
|
|
|
nl_msg_put(request, &req_zone_limit, sizeof req_zone_limit);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (!ovs_list_is_empty(zone_limits)) {
|
|
|
|
|
struct ct_dpif_zone_limit *zone_limit;
|
|
|
|
|
|
|
|
|
|
LIST_FOR_EACH (zone_limit, node, zone_limits) {
|
|
|
|
|
req_zone_limit.zone_id = zone_limit->zone;
|
|
|
|
|
req_zone_limit.limit = zone_limit->limit;
|
|
|
|
|
nl_msg_put(request, &req_zone_limit, sizeof req_zone_limit);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
nl_msg_end_nested(request, opt_offset);
|
|
|
|
|
|
|
|
|
|
int err = nl_transact(NETLINK_GENERIC, request, NULL);
|
2019-03-05 15:27:01 -08:00
|
|
|
|
ofpbuf_delete(request);
|
2018-08-17 02:05:09 -07:00
|
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
dpif_netlink_zone_limits_from_ofpbuf(const struct ofpbuf *buf,
|
|
|
|
|
uint32_t *default_limit,
|
|
|
|
|
struct ovs_list *zone_limits)
|
|
|
|
|
{
|
|
|
|
|
static const struct nl_policy ovs_ct_limit_policy[] = {
|
|
|
|
|
[OVS_CT_LIMIT_ATTR_ZONE_LIMIT] = { .type = NL_A_NESTED,
|
|
|
|
|
.optional = true },
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
struct ofpbuf b = ofpbuf_const_initializer(buf->data, buf->size);
|
|
|
|
|
struct nlmsghdr *nlmsg = ofpbuf_try_pull(&b, sizeof *nlmsg);
|
|
|
|
|
struct genlmsghdr *genl = ofpbuf_try_pull(&b, sizeof *genl);
|
|
|
|
|
struct ovs_header *ovs_header = ofpbuf_try_pull(&b, sizeof *ovs_header);
|
|
|
|
|
|
|
|
|
|
struct nlattr *attr[ARRAY_SIZE(ovs_ct_limit_policy)];
|
|
|
|
|
|
|
|
|
|
if (!nlmsg || !genl || !ovs_header
|
|
|
|
|
|| nlmsg->nlmsg_type != ovs_ct_limit_family
|
|
|
|
|
|| !nl_policy_parse(&b, 0, ovs_ct_limit_policy, attr,
|
|
|
|
|
ARRAY_SIZE(ovs_ct_limit_policy))) {
|
|
|
|
|
return EINVAL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if (!attr[OVS_CT_LIMIT_ATTR_ZONE_LIMIT]) {
|
|
|
|
|
return EINVAL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int rem = NLA_ALIGN(
|
|
|
|
|
nl_attr_get_size(attr[OVS_CT_LIMIT_ATTR_ZONE_LIMIT]));
|
|
|
|
|
const struct ovs_zone_limit *zone_limit =
|
|
|
|
|
nl_attr_get(attr[OVS_CT_LIMIT_ATTR_ZONE_LIMIT]);
|
|
|
|
|
|
|
|
|
|
while (rem >= sizeof *zone_limit) {
|
|
|
|
|
if (zone_limit->zone_id == OVS_ZONE_LIMIT_DEFAULT_ZONE) {
|
|
|
|
|
*default_limit = zone_limit->limit;
|
|
|
|
|
} else if (zone_limit->zone_id < OVS_ZONE_LIMIT_DEFAULT_ZONE ||
|
|
|
|
|
zone_limit->zone_id > UINT16_MAX) {
|
|
|
|
|
} else {
|
|
|
|
|
ct_dpif_push_zone_limit(zone_limits, zone_limit->zone_id,
|
|
|
|
|
zone_limit->limit, zone_limit->count);
|
|
|
|
|
}
|
|
|
|
|
rem -= NLA_ALIGN(sizeof *zone_limit);
|
|
|
|
|
zone_limit = ALIGNED_CAST(struct ovs_zone_limit *,
|
|
|
|
|
(unsigned char *) zone_limit + NLA_ALIGN(sizeof *zone_limit));
|
|
|
|
|
}
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
dpif_netlink_ct_get_limits(struct dpif *dpif OVS_UNUSED,
|
|
|
|
|
uint32_t *default_limit,
|
|
|
|
|
const struct ovs_list *zone_limits_request,
|
|
|
|
|
struct ovs_list *zone_limits_reply)
|
|
|
|
|
{
|
|
|
|
|
if (ovs_ct_limit_family < 0) {
|
|
|
|
|
return EOPNOTSUPP;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
struct ofpbuf *request = ofpbuf_new(NL_DUMP_BUFSIZE);
|
|
|
|
|
nl_msg_put_genlmsghdr(request, 0, ovs_ct_limit_family,
|
|
|
|
|
NLM_F_REQUEST | NLM_F_ECHO, OVS_CT_LIMIT_CMD_GET,
|
|
|
|
|
OVS_CT_LIMIT_VERSION);
|
|
|
|
|
|
|
|
|
|
struct ovs_header *ovs_header;
|
|
|
|
|
ovs_header = ofpbuf_put_uninit(request, sizeof *ovs_header);
|
|
|
|
|
ovs_header->dp_ifindex = 0;
|
|
|
|
|
|
|
|
|
|
if (!ovs_list_is_empty(zone_limits_request)) {
|
|
|
|
|
size_t opt_offset = nl_msg_start_nested(request,
|
|
|
|
|
OVS_CT_LIMIT_ATTR_ZONE_LIMIT);
|
|
|
|
|
|
|
|
|
|
struct ovs_zone_limit req_zone_limit;
|
|
|
|
|
req_zone_limit.zone_id = OVS_ZONE_LIMIT_DEFAULT_ZONE;
|
|
|
|
|
nl_msg_put(request, &req_zone_limit, sizeof req_zone_limit);
|
|
|
|
|
|
|
|
|
|
struct ct_dpif_zone_limit *zone_limit;
|
|
|
|
|
LIST_FOR_EACH (zone_limit, node, zone_limits_request) {
|
|
|
|
|
req_zone_limit.zone_id = zone_limit->zone;
|
|
|
|
|
nl_msg_put(request, &req_zone_limit, sizeof req_zone_limit);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
nl_msg_end_nested(request, opt_offset);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
struct ofpbuf *reply;
|
|
|
|
|
int err = nl_transact(NETLINK_GENERIC, request, &reply);
|
|
|
|
|
if (err) {
|
|
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
err = dpif_netlink_zone_limits_from_ofpbuf(reply, default_limit,
|
|
|
|
|
zone_limits_reply);
|
|
|
|
|
|
|
|
|
|
out:
|
2019-03-05 15:27:01 -08:00
|
|
|
|
ofpbuf_delete(request);
|
|
|
|
|
ofpbuf_delete(reply);
|
2018-08-17 02:05:09 -07:00
|
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
dpif_netlink_ct_del_limits(struct dpif *dpif OVS_UNUSED,
|
|
|
|
|
const struct ovs_list *zone_limits)
|
|
|
|
|
{
|
|
|
|
|
if (ovs_ct_limit_family < 0) {
|
|
|
|
|
return EOPNOTSUPP;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
struct ofpbuf *request = ofpbuf_new(NL_DUMP_BUFSIZE);
|
|
|
|
|
nl_msg_put_genlmsghdr(request, 0, ovs_ct_limit_family,
|
|
|
|
|
NLM_F_REQUEST | NLM_F_ECHO, OVS_CT_LIMIT_CMD_DEL,
|
|
|
|
|
OVS_CT_LIMIT_VERSION);
|
|
|
|
|
|
|
|
|
|
struct ovs_header *ovs_header;
|
|
|
|
|
ovs_header = ofpbuf_put_uninit(request, sizeof *ovs_header);
|
|
|
|
|
ovs_header->dp_ifindex = 0;
|
|
|
|
|
|
|
|
|
|
if (!ovs_list_is_empty(zone_limits)) {
|
|
|
|
|
size_t opt_offset =
|
|
|
|
|
nl_msg_start_nested(request, OVS_CT_LIMIT_ATTR_ZONE_LIMIT);
|
|
|
|
|
|
|
|
|
|
struct ct_dpif_zone_limit *zone_limit;
|
|
|
|
|
LIST_FOR_EACH (zone_limit, node, zone_limits) {
|
|
|
|
|
struct ovs_zone_limit req_zone_limit;
|
|
|
|
|
req_zone_limit.zone_id = zone_limit->zone;
|
|
|
|
|
nl_msg_put(request, &req_zone_limit, sizeof req_zone_limit);
|
|
|
|
|
}
|
|
|
|
|
nl_msg_end_nested(request, opt_offset);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int err = nl_transact(NETLINK_GENERIC, request, NULL);
|
|
|
|
|
|
2019-03-05 15:27:01 -08:00
|
|
|
|
ofpbuf_delete(request);
|
2018-08-17 02:05:09 -07:00
|
|
|
|
return err;
|
|
|
|
|
}
|
ct-dpif, dpif-netlink: Add conntrack timeout policy support
This patch first defines the dpif interface for a datapath to support
adding, deleting, getting and dumping conntrack timeout policy.
The timeout policy is identified by a 4 bytes unsigned integer in
datapath, and it currently support timeout for TCP, UDP, and ICMP
protocols.
Moreover, this patch provides the implementation for Linux kernel
datapath in dpif-netlink.
In Linux kernel, the timeout policy is maintained per L3/L4 protocol,
and it is identified by 32 bytes null terminated string. On the other
hand, in vswitchd, the timeout policy is a generic one that consists of
all the supported L4 protocols. Therefore, one of the main task in
dpif-netlink is to break down the generic timeout policy into 6
sub policies (ipv4 tcp, udp, icmp, and ipv6 tcp, udp, icmp),
and push down the configuration using the netlink API in
netlink-conntrack.c.
This patch also adds missing symbols in the windows datapath so
that the build on windows can pass.
Appveyor CI:
* https://ci.appveyor.com/project/YiHungWei/ovs/builds/26387754
Signed-off-by: Yi-Hung Wei <yihung.wei@gmail.com>
Acked-by: Alin Gabriel Serdean <aserdean@ovn.org>
Signed-off-by: Justin Pettit <jpettit@ovn.org>
2019-08-28 15:14:24 -07:00
|
|
|
|
|
|
|
|
|
#define NL_TP_NAME_PREFIX "ovs_tp_"
|
|
|
|
|
|
|
|
|
|
struct dpif_netlink_timeout_policy_protocol {
|
|
|
|
|
uint16_t l3num;
|
|
|
|
|
uint8_t l4num;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
enum OVS_PACKED_ENUM dpif_netlink_support_timeout_policy_protocol {
|
|
|
|
|
DPIF_NL_TP_AF_INET_TCP,
|
|
|
|
|
DPIF_NL_TP_AF_INET_UDP,
|
|
|
|
|
DPIF_NL_TP_AF_INET_ICMP,
|
|
|
|
|
DPIF_NL_TP_AF_INET6_TCP,
|
|
|
|
|
DPIF_NL_TP_AF_INET6_UDP,
|
|
|
|
|
DPIF_NL_TP_AF_INET6_ICMPV6,
|
|
|
|
|
DPIF_NL_TP_MAX
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
#define DPIF_NL_ALL_TP ((1UL << DPIF_NL_TP_MAX) - 1)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
static struct dpif_netlink_timeout_policy_protocol tp_protos[] = {
|
|
|
|
|
[DPIF_NL_TP_AF_INET_TCP] = { .l3num = AF_INET, .l4num = IPPROTO_TCP },
|
|
|
|
|
[DPIF_NL_TP_AF_INET_UDP] = { .l3num = AF_INET, .l4num = IPPROTO_UDP },
|
|
|
|
|
[DPIF_NL_TP_AF_INET_ICMP] = { .l3num = AF_INET, .l4num = IPPROTO_ICMP },
|
|
|
|
|
[DPIF_NL_TP_AF_INET6_TCP] = { .l3num = AF_INET6, .l4num = IPPROTO_TCP },
|
|
|
|
|
[DPIF_NL_TP_AF_INET6_UDP] = { .l3num = AF_INET6, .l4num = IPPROTO_UDP },
|
|
|
|
|
[DPIF_NL_TP_AF_INET6_ICMPV6] = { .l3num = AF_INET6,
|
|
|
|
|
.l4num = IPPROTO_ICMPV6 },
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
dpif_netlink_format_tp_name(uint32_t id, uint16_t l3num, uint8_t l4num,
|
2019-08-28 15:14:29 -07:00
|
|
|
|
char **tp_name)
|
ct-dpif, dpif-netlink: Add conntrack timeout policy support
This patch first defines the dpif interface for a datapath to support
adding, deleting, getting and dumping conntrack timeout policy.
The timeout policy is identified by a 4 bytes unsigned integer in
datapath, and it currently support timeout for TCP, UDP, and ICMP
protocols.
Moreover, this patch provides the implementation for Linux kernel
datapath in dpif-netlink.
In Linux kernel, the timeout policy is maintained per L3/L4 protocol,
and it is identified by 32 bytes null terminated string. On the other
hand, in vswitchd, the timeout policy is a generic one that consists of
all the supported L4 protocols. Therefore, one of the main task in
dpif-netlink is to break down the generic timeout policy into 6
sub policies (ipv4 tcp, udp, icmp, and ipv6 tcp, udp, icmp),
and push down the configuration using the netlink API in
netlink-conntrack.c.
This patch also adds missing symbols in the windows datapath so
that the build on windows can pass.
Appveyor CI:
* https://ci.appveyor.com/project/YiHungWei/ovs/builds/26387754
Signed-off-by: Yi-Hung Wei <yihung.wei@gmail.com>
Acked-by: Alin Gabriel Serdean <aserdean@ovn.org>
Signed-off-by: Justin Pettit <jpettit@ovn.org>
2019-08-28 15:14:24 -07:00
|
|
|
|
{
|
2019-08-28 15:14:29 -07:00
|
|
|
|
struct ds ds = DS_EMPTY_INITIALIZER;
|
|
|
|
|
ds_put_format(&ds, "%s%"PRIu32"_", NL_TP_NAME_PREFIX, id);
|
|
|
|
|
ct_dpif_format_ipproto(&ds, l4num);
|
ct-dpif, dpif-netlink: Add conntrack timeout policy support
This patch first defines the dpif interface for a datapath to support
adding, deleting, getting and dumping conntrack timeout policy.
The timeout policy is identified by a 4 bytes unsigned integer in
datapath, and it currently support timeout for TCP, UDP, and ICMP
protocols.
Moreover, this patch provides the implementation for Linux kernel
datapath in dpif-netlink.
In Linux kernel, the timeout policy is maintained per L3/L4 protocol,
and it is identified by 32 bytes null terminated string. On the other
hand, in vswitchd, the timeout policy is a generic one that consists of
all the supported L4 protocols. Therefore, one of the main task in
dpif-netlink is to break down the generic timeout policy into 6
sub policies (ipv4 tcp, udp, icmp, and ipv6 tcp, udp, icmp),
and push down the configuration using the netlink API in
netlink-conntrack.c.
This patch also adds missing symbols in the windows datapath so
that the build on windows can pass.
Appveyor CI:
* https://ci.appveyor.com/project/YiHungWei/ovs/builds/26387754
Signed-off-by: Yi-Hung Wei <yihung.wei@gmail.com>
Acked-by: Alin Gabriel Serdean <aserdean@ovn.org>
Signed-off-by: Justin Pettit <jpettit@ovn.org>
2019-08-28 15:14:24 -07:00
|
|
|
|
|
|
|
|
|
if (l3num == AF_INET) {
|
2019-08-28 15:14:29 -07:00
|
|
|
|
ds_put_cstr(&ds, "4");
|
ct-dpif, dpif-netlink: Add conntrack timeout policy support
This patch first defines the dpif interface for a datapath to support
adding, deleting, getting and dumping conntrack timeout policy.
The timeout policy is identified by a 4 bytes unsigned integer in
datapath, and it currently support timeout for TCP, UDP, and ICMP
protocols.
Moreover, this patch provides the implementation for Linux kernel
datapath in dpif-netlink.
In Linux kernel, the timeout policy is maintained per L3/L4 protocol,
and it is identified by 32 bytes null terminated string. On the other
hand, in vswitchd, the timeout policy is a generic one that consists of
all the supported L4 protocols. Therefore, one of the main task in
dpif-netlink is to break down the generic timeout policy into 6
sub policies (ipv4 tcp, udp, icmp, and ipv6 tcp, udp, icmp),
and push down the configuration using the netlink API in
netlink-conntrack.c.
This patch also adds missing symbols in the windows datapath so
that the build on windows can pass.
Appveyor CI:
* https://ci.appveyor.com/project/YiHungWei/ovs/builds/26387754
Signed-off-by: Yi-Hung Wei <yihung.wei@gmail.com>
Acked-by: Alin Gabriel Serdean <aserdean@ovn.org>
Signed-off-by: Justin Pettit <jpettit@ovn.org>
2019-08-28 15:14:24 -07:00
|
|
|
|
} else if (l3num == AF_INET6 && l4num != IPPROTO_ICMPV6) {
|
2019-08-28 15:14:29 -07:00
|
|
|
|
ds_put_cstr(&ds, "6");
|
ct-dpif, dpif-netlink: Add conntrack timeout policy support
This patch first defines the dpif interface for a datapath to support
adding, deleting, getting and dumping conntrack timeout policy.
The timeout policy is identified by a 4 bytes unsigned integer in
datapath, and it currently support timeout for TCP, UDP, and ICMP
protocols.
Moreover, this patch provides the implementation for Linux kernel
datapath in dpif-netlink.
In Linux kernel, the timeout policy is maintained per L3/L4 protocol,
and it is identified by 32 bytes null terminated string. On the other
hand, in vswitchd, the timeout policy is a generic one that consists of
all the supported L4 protocols. Therefore, one of the main task in
dpif-netlink is to break down the generic timeout policy into 6
sub policies (ipv4 tcp, udp, icmp, and ipv6 tcp, udp, icmp),
and push down the configuration using the netlink API in
netlink-conntrack.c.
This patch also adds missing symbols in the windows datapath so
that the build on windows can pass.
Appveyor CI:
* https://ci.appveyor.com/project/YiHungWei/ovs/builds/26387754
Signed-off-by: Yi-Hung Wei <yihung.wei@gmail.com>
Acked-by: Alin Gabriel Serdean <aserdean@ovn.org>
Signed-off-by: Justin Pettit <jpettit@ovn.org>
2019-08-28 15:14:24 -07:00
|
|
|
|
}
|
|
|
|
|
|
2019-08-28 15:14:29 -07:00
|
|
|
|
ovs_assert(ds.length < CTNL_TIMEOUT_NAME_MAX);
|
|
|
|
|
|
|
|
|
|
*tp_name = ds_steal_cstr(&ds);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
dpif_netlink_ct_get_timeout_policy_name(struct dpif *dpif OVS_UNUSED,
|
|
|
|
|
uint32_t tp_id, uint16_t dl_type,
|
|
|
|
|
uint8_t nw_proto, char **tp_name,
|
|
|
|
|
bool *is_generic)
|
|
|
|
|
{
|
|
|
|
|
dpif_netlink_format_tp_name(tp_id,
|
|
|
|
|
dl_type == ETH_TYPE_IP ? AF_INET : AF_INET6,
|
|
|
|
|
nw_proto, tp_name);
|
|
|
|
|
*is_generic = false;
|
|
|
|
|
return 0;
|
ct-dpif, dpif-netlink: Add conntrack timeout policy support
This patch first defines the dpif interface for a datapath to support
adding, deleting, getting and dumping conntrack timeout policy.
The timeout policy is identified by a 4 bytes unsigned integer in
datapath, and it currently support timeout for TCP, UDP, and ICMP
protocols.
Moreover, this patch provides the implementation for Linux kernel
datapath in dpif-netlink.
In Linux kernel, the timeout policy is maintained per L3/L4 protocol,
and it is identified by 32 bytes null terminated string. On the other
hand, in vswitchd, the timeout policy is a generic one that consists of
all the supported L4 protocols. Therefore, one of the main task in
dpif-netlink is to break down the generic timeout policy into 6
sub policies (ipv4 tcp, udp, icmp, and ipv6 tcp, udp, icmp),
and push down the configuration using the netlink API in
netlink-conntrack.c.
This patch also adds missing symbols in the windows datapath so
that the build on windows can pass.
Appveyor CI:
* https://ci.appveyor.com/project/YiHungWei/ovs/builds/26387754
Signed-off-by: Yi-Hung Wei <yihung.wei@gmail.com>
Acked-by: Alin Gabriel Serdean <aserdean@ovn.org>
Signed-off-by: Justin Pettit <jpettit@ovn.org>
2019-08-28 15:14:24 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#define CT_DPIF_NL_TP_TCP_MAPPINGS \
|
|
|
|
|
CT_DPIF_NL_TP_MAPPING(TCP, TCP, SYN_SENT, SYN_SENT) \
|
|
|
|
|
CT_DPIF_NL_TP_MAPPING(TCP, TCP, SYN_RECV, SYN_RECV) \
|
|
|
|
|
CT_DPIF_NL_TP_MAPPING(TCP, TCP, ESTABLISHED, ESTABLISHED) \
|
|
|
|
|
CT_DPIF_NL_TP_MAPPING(TCP, TCP, FIN_WAIT, FIN_WAIT) \
|
|
|
|
|
CT_DPIF_NL_TP_MAPPING(TCP, TCP, CLOSE_WAIT, CLOSE_WAIT) \
|
|
|
|
|
CT_DPIF_NL_TP_MAPPING(TCP, TCP, LAST_ACK, LAST_ACK) \
|
|
|
|
|
CT_DPIF_NL_TP_MAPPING(TCP, TCP, TIME_WAIT, TIME_WAIT) \
|
|
|
|
|
CT_DPIF_NL_TP_MAPPING(TCP, TCP, CLOSE, CLOSE) \
|
|
|
|
|
CT_DPIF_NL_TP_MAPPING(TCP, TCP, SYN_SENT2, SYN_SENT2) \
|
|
|
|
|
CT_DPIF_NL_TP_MAPPING(TCP, TCP, RETRANSMIT, RETRANS) \
|
|
|
|
|
CT_DPIF_NL_TP_MAPPING(TCP, TCP, UNACK, UNACK)
|
|
|
|
|
|
|
|
|
|
#define CT_DPIF_NL_TP_UDP_MAPPINGS \
|
|
|
|
|
CT_DPIF_NL_TP_MAPPING(UDP, UDP, SINGLE, UNREPLIED) \
|
|
|
|
|
CT_DPIF_NL_TP_MAPPING(UDP, UDP, MULTIPLE, REPLIED)
|
|
|
|
|
|
|
|
|
|
#define CT_DPIF_NL_TP_ICMP_MAPPINGS \
|
|
|
|
|
CT_DPIF_NL_TP_MAPPING(ICMP, ICMP, FIRST, TIMEOUT)
|
|
|
|
|
|
|
|
|
|
#define CT_DPIF_NL_TP_ICMPV6_MAPPINGS \
|
|
|
|
|
CT_DPIF_NL_TP_MAPPING(ICMP, ICMPV6, FIRST, TIMEOUT)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#define CT_DPIF_NL_TP_MAPPING(PROTO1, PROTO2, ATTR1, ATTR2) \
|
|
|
|
|
if (tp->present & (1 << CT_DPIF_TP_ATTR_##PROTO1##_##ATTR1)) { \
|
|
|
|
|
nl_tp->present |= 1 << CTA_TIMEOUT_##PROTO2##_##ATTR2; \
|
|
|
|
|
nl_tp->attrs[CTA_TIMEOUT_##PROTO2##_##ATTR2] = \
|
|
|
|
|
tp->attrs[CT_DPIF_TP_ATTR_##PROTO1##_##ATTR1]; \
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
dpif_netlink_get_nl_tp_tcp_attrs(const struct ct_dpif_timeout_policy *tp,
|
|
|
|
|
struct nl_ct_timeout_policy *nl_tp)
|
|
|
|
|
{
|
|
|
|
|
CT_DPIF_NL_TP_TCP_MAPPINGS
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
dpif_netlink_get_nl_tp_udp_attrs(const struct ct_dpif_timeout_policy *tp,
|
|
|
|
|
struct nl_ct_timeout_policy *nl_tp)
|
|
|
|
|
{
|
|
|
|
|
CT_DPIF_NL_TP_UDP_MAPPINGS
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
dpif_netlink_get_nl_tp_icmp_attrs(const struct ct_dpif_timeout_policy *tp,
|
|
|
|
|
struct nl_ct_timeout_policy *nl_tp)
|
|
|
|
|
{
|
|
|
|
|
CT_DPIF_NL_TP_ICMP_MAPPINGS
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
dpif_netlink_get_nl_tp_icmpv6_attrs(const struct ct_dpif_timeout_policy *tp,
|
|
|
|
|
struct nl_ct_timeout_policy *nl_tp)
|
|
|
|
|
{
|
|
|
|
|
CT_DPIF_NL_TP_ICMPV6_MAPPINGS
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#undef CT_DPIF_NL_TP_MAPPING
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
dpif_netlink_get_nl_tp_attrs(const struct ct_dpif_timeout_policy *tp,
|
|
|
|
|
uint8_t l4num, struct nl_ct_timeout_policy *nl_tp)
|
|
|
|
|
{
|
|
|
|
|
nl_tp->present = 0;
|
|
|
|
|
|
|
|
|
|
if (l4num == IPPROTO_TCP) {
|
|
|
|
|
dpif_netlink_get_nl_tp_tcp_attrs(tp, nl_tp);
|
|
|
|
|
} else if (l4num == IPPROTO_UDP) {
|
|
|
|
|
dpif_netlink_get_nl_tp_udp_attrs(tp, nl_tp);
|
|
|
|
|
} else if (l4num == IPPROTO_ICMP) {
|
|
|
|
|
dpif_netlink_get_nl_tp_icmp_attrs(tp, nl_tp);
|
|
|
|
|
} else if (l4num == IPPROTO_ICMPV6) {
|
|
|
|
|
dpif_netlink_get_nl_tp_icmpv6_attrs(tp, nl_tp);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#define CT_DPIF_NL_TP_MAPPING(PROTO1, PROTO2, ATTR1, ATTR2) \
|
|
|
|
|
if (nl_tp->present & (1 << CTA_TIMEOUT_##PROTO2##_##ATTR2)) { \
|
|
|
|
|
if (tp->present & (1 << CT_DPIF_TP_ATTR_##PROTO1##_##ATTR1)) { \
|
|
|
|
|
if (tp->attrs[CT_DPIF_TP_ATTR_##PROTO1##_##ATTR1] != \
|
|
|
|
|
nl_tp->attrs[CTA_TIMEOUT_##PROTO2##_##ATTR2]) { \
|
|
|
|
|
VLOG_WARN_RL(&error_rl, "Inconsistent timeout policy %s " \
|
|
|
|
|
"attribute %s=%"PRIu32" while %s=%"PRIu32, \
|
|
|
|
|
nl_tp->name, "CTA_TIMEOUT_"#PROTO2"_"#ATTR2, \
|
|
|
|
|
nl_tp->attrs[CTA_TIMEOUT_##PROTO2##_##ATTR2], \
|
|
|
|
|
"CT_DPIF_TP_ATTR_"#PROTO1"_"#ATTR1, \
|
|
|
|
|
tp->attrs[CT_DPIF_TP_ATTR_##PROTO1##_##ATTR1]); \
|
|
|
|
|
} \
|
|
|
|
|
} else { \
|
|
|
|
|
tp->present |= 1 << CT_DPIF_TP_ATTR_##PROTO1##_##ATTR1; \
|
|
|
|
|
tp->attrs[CT_DPIF_TP_ATTR_##PROTO1##_##ATTR1] = \
|
|
|
|
|
nl_tp->attrs[CTA_TIMEOUT_##PROTO2##_##ATTR2]; \
|
|
|
|
|
} \
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
dpif_netlink_set_ct_dpif_tp_tcp_attrs(const struct nl_ct_timeout_policy *nl_tp,
|
|
|
|
|
struct ct_dpif_timeout_policy *tp)
|
|
|
|
|
{
|
|
|
|
|
CT_DPIF_NL_TP_TCP_MAPPINGS
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
dpif_netlink_set_ct_dpif_tp_udp_attrs(const struct nl_ct_timeout_policy *nl_tp,
|
|
|
|
|
struct ct_dpif_timeout_policy *tp)
|
|
|
|
|
{
|
|
|
|
|
CT_DPIF_NL_TP_UDP_MAPPINGS
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
dpif_netlink_set_ct_dpif_tp_icmp_attrs(
|
|
|
|
|
const struct nl_ct_timeout_policy *nl_tp,
|
|
|
|
|
struct ct_dpif_timeout_policy *tp)
|
|
|
|
|
{
|
|
|
|
|
CT_DPIF_NL_TP_ICMP_MAPPINGS
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
dpif_netlink_set_ct_dpif_tp_icmpv6_attrs(
|
|
|
|
|
const struct nl_ct_timeout_policy *nl_tp,
|
|
|
|
|
struct ct_dpif_timeout_policy *tp)
|
|
|
|
|
{
|
|
|
|
|
CT_DPIF_NL_TP_ICMPV6_MAPPINGS
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#undef CT_DPIF_NL_TP_MAPPING
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
dpif_netlink_set_ct_dpif_tp_attrs(const struct nl_ct_timeout_policy *nl_tp,
|
|
|
|
|
struct ct_dpif_timeout_policy *tp)
|
|
|
|
|
{
|
|
|
|
|
if (nl_tp->l4num == IPPROTO_TCP) {
|
|
|
|
|
dpif_netlink_set_ct_dpif_tp_tcp_attrs(nl_tp, tp);
|
|
|
|
|
} else if (nl_tp->l4num == IPPROTO_UDP) {
|
|
|
|
|
dpif_netlink_set_ct_dpif_tp_udp_attrs(nl_tp, tp);
|
|
|
|
|
} else if (nl_tp->l4num == IPPROTO_ICMP) {
|
|
|
|
|
dpif_netlink_set_ct_dpif_tp_icmp_attrs(nl_tp, tp);
|
|
|
|
|
} else if (nl_tp->l4num == IPPROTO_ICMPV6) {
|
|
|
|
|
dpif_netlink_set_ct_dpif_tp_icmpv6_attrs(nl_tp, tp);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#ifdef _WIN32
|
|
|
|
|
static int
|
|
|
|
|
dpif_netlink_ct_set_timeout_policy(struct dpif *dpif OVS_UNUSED,
|
|
|
|
|
const struct ct_dpif_timeout_policy *tp)
|
|
|
|
|
{
|
|
|
|
|
return EOPNOTSUPP;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
dpif_netlink_ct_get_timeout_policy(struct dpif *dpif OVS_UNUSED,
|
|
|
|
|
uint32_t tp_id,
|
|
|
|
|
struct ct_dpif_timeout_policy *tp)
|
|
|
|
|
{
|
|
|
|
|
return EOPNOTSUPP;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
dpif_netlink_ct_del_timeout_policy(struct dpif *dpif OVS_UNUSED,
|
|
|
|
|
uint32_t tp_id)
|
|
|
|
|
{
|
|
|
|
|
return EOPNOTSUPP;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
dpif_netlink_ct_timeout_policy_dump_start(struct dpif *dpif OVS_UNUSED,
|
|
|
|
|
void **statep)
|
|
|
|
|
{
|
|
|
|
|
return EOPNOTSUPP;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
dpif_netlink_ct_timeout_policy_dump_next(struct dpif *dpif OVS_UNUSED,
|
|
|
|
|
void *state,
|
|
|
|
|
struct ct_dpif_timeout_policy **tp)
|
|
|
|
|
{
|
|
|
|
|
return EOPNOTSUPP;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
dpif_netlink_ct_timeout_policy_dump_done(struct dpif *dpif OVS_UNUSED,
|
|
|
|
|
void *state)
|
|
|
|
|
{
|
|
|
|
|
return EOPNOTSUPP;
|
|
|
|
|
}
|
|
|
|
|
#else
|
|
|
|
|
static int
|
|
|
|
|
dpif_netlink_ct_set_timeout_policy(struct dpif *dpif OVS_UNUSED,
|
|
|
|
|
const struct ct_dpif_timeout_policy *tp)
|
|
|
|
|
{
|
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
|
|
for (int i = 0; i < ARRAY_SIZE(tp_protos); ++i) {
|
2019-08-28 15:14:29 -07:00
|
|
|
|
struct nl_ct_timeout_policy nl_tp;
|
|
|
|
|
char *nl_tp_name;
|
|
|
|
|
|
ct-dpif, dpif-netlink: Add conntrack timeout policy support
This patch first defines the dpif interface for a datapath to support
adding, deleting, getting and dumping conntrack timeout policy.
The timeout policy is identified by a 4 bytes unsigned integer in
datapath, and it currently support timeout for TCP, UDP, and ICMP
protocols.
Moreover, this patch provides the implementation for Linux kernel
datapath in dpif-netlink.
In Linux kernel, the timeout policy is maintained per L3/L4 protocol,
and it is identified by 32 bytes null terminated string. On the other
hand, in vswitchd, the timeout policy is a generic one that consists of
all the supported L4 protocols. Therefore, one of the main task in
dpif-netlink is to break down the generic timeout policy into 6
sub policies (ipv4 tcp, udp, icmp, and ipv6 tcp, udp, icmp),
and push down the configuration using the netlink API in
netlink-conntrack.c.
This patch also adds missing symbols in the windows datapath so
that the build on windows can pass.
Appveyor CI:
* https://ci.appveyor.com/project/YiHungWei/ovs/builds/26387754
Signed-off-by: Yi-Hung Wei <yihung.wei@gmail.com>
Acked-by: Alin Gabriel Serdean <aserdean@ovn.org>
Signed-off-by: Justin Pettit <jpettit@ovn.org>
2019-08-28 15:14:24 -07:00
|
|
|
|
dpif_netlink_format_tp_name(tp->id, tp_protos[i].l3num,
|
|
|
|
|
tp_protos[i].l4num, &nl_tp_name);
|
2019-08-28 15:14:29 -07:00
|
|
|
|
ovs_strlcpy(nl_tp.name, nl_tp_name, sizeof nl_tp.name);
|
|
|
|
|
free(nl_tp_name);
|
|
|
|
|
|
ct-dpif, dpif-netlink: Add conntrack timeout policy support
This patch first defines the dpif interface for a datapath to support
adding, deleting, getting and dumping conntrack timeout policy.
The timeout policy is identified by a 4 bytes unsigned integer in
datapath, and it currently support timeout for TCP, UDP, and ICMP
protocols.
Moreover, this patch provides the implementation for Linux kernel
datapath in dpif-netlink.
In Linux kernel, the timeout policy is maintained per L3/L4 protocol,
and it is identified by 32 bytes null terminated string. On the other
hand, in vswitchd, the timeout policy is a generic one that consists of
all the supported L4 protocols. Therefore, one of the main task in
dpif-netlink is to break down the generic timeout policy into 6
sub policies (ipv4 tcp, udp, icmp, and ipv6 tcp, udp, icmp),
and push down the configuration using the netlink API in
netlink-conntrack.c.
This patch also adds missing symbols in the windows datapath so
that the build on windows can pass.
Appveyor CI:
* https://ci.appveyor.com/project/YiHungWei/ovs/builds/26387754
Signed-off-by: Yi-Hung Wei <yihung.wei@gmail.com>
Acked-by: Alin Gabriel Serdean <aserdean@ovn.org>
Signed-off-by: Justin Pettit <jpettit@ovn.org>
2019-08-28 15:14:24 -07:00
|
|
|
|
nl_tp.l3num = tp_protos[i].l3num;
|
|
|
|
|
nl_tp.l4num = tp_protos[i].l4num;
|
|
|
|
|
dpif_netlink_get_nl_tp_attrs(tp, tp_protos[i].l4num, &nl_tp);
|
|
|
|
|
err = nl_ct_set_timeout_policy(&nl_tp);
|
|
|
|
|
if (err) {
|
|
|
|
|
VLOG_WARN_RL(&error_rl, "failed to add timeout policy %s (%s)",
|
|
|
|
|
nl_tp.name, ovs_strerror(err));
|
|
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
out:
|
|
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
dpif_netlink_ct_get_timeout_policy(struct dpif *dpif OVS_UNUSED,
|
|
|
|
|
uint32_t tp_id,
|
|
|
|
|
struct ct_dpif_timeout_policy *tp)
|
|
|
|
|
{
|
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
|
|
tp->id = tp_id;
|
|
|
|
|
tp->present = 0;
|
|
|
|
|
for (int i = 0; i < ARRAY_SIZE(tp_protos); ++i) {
|
2019-08-28 15:14:29 -07:00
|
|
|
|
struct nl_ct_timeout_policy nl_tp;
|
|
|
|
|
char *nl_tp_name;
|
|
|
|
|
|
ct-dpif, dpif-netlink: Add conntrack timeout policy support
This patch first defines the dpif interface for a datapath to support
adding, deleting, getting and dumping conntrack timeout policy.
The timeout policy is identified by a 4 bytes unsigned integer in
datapath, and it currently support timeout for TCP, UDP, and ICMP
protocols.
Moreover, this patch provides the implementation for Linux kernel
datapath in dpif-netlink.
In Linux kernel, the timeout policy is maintained per L3/L4 protocol,
and it is identified by 32 bytes null terminated string. On the other
hand, in vswitchd, the timeout policy is a generic one that consists of
all the supported L4 protocols. Therefore, one of the main task in
dpif-netlink is to break down the generic timeout policy into 6
sub policies (ipv4 tcp, udp, icmp, and ipv6 tcp, udp, icmp),
and push down the configuration using the netlink API in
netlink-conntrack.c.
This patch also adds missing symbols in the windows datapath so
that the build on windows can pass.
Appveyor CI:
* https://ci.appveyor.com/project/YiHungWei/ovs/builds/26387754
Signed-off-by: Yi-Hung Wei <yihung.wei@gmail.com>
Acked-by: Alin Gabriel Serdean <aserdean@ovn.org>
Signed-off-by: Justin Pettit <jpettit@ovn.org>
2019-08-28 15:14:24 -07:00
|
|
|
|
dpif_netlink_format_tp_name(tp_id, tp_protos[i].l3num,
|
|
|
|
|
tp_protos[i].l4num, &nl_tp_name);
|
2019-08-28 15:14:29 -07:00
|
|
|
|
err = nl_ct_get_timeout_policy(nl_tp_name, &nl_tp);
|
ct-dpif, dpif-netlink: Add conntrack timeout policy support
This patch first defines the dpif interface for a datapath to support
adding, deleting, getting and dumping conntrack timeout policy.
The timeout policy is identified by a 4 bytes unsigned integer in
datapath, and it currently support timeout for TCP, UDP, and ICMP
protocols.
Moreover, this patch provides the implementation for Linux kernel
datapath in dpif-netlink.
In Linux kernel, the timeout policy is maintained per L3/L4 protocol,
and it is identified by 32 bytes null terminated string. On the other
hand, in vswitchd, the timeout policy is a generic one that consists of
all the supported L4 protocols. Therefore, one of the main task in
dpif-netlink is to break down the generic timeout policy into 6
sub policies (ipv4 tcp, udp, icmp, and ipv6 tcp, udp, icmp),
and push down the configuration using the netlink API in
netlink-conntrack.c.
This patch also adds missing symbols in the windows datapath so
that the build on windows can pass.
Appveyor CI:
* https://ci.appveyor.com/project/YiHungWei/ovs/builds/26387754
Signed-off-by: Yi-Hung Wei <yihung.wei@gmail.com>
Acked-by: Alin Gabriel Serdean <aserdean@ovn.org>
Signed-off-by: Justin Pettit <jpettit@ovn.org>
2019-08-28 15:14:24 -07:00
|
|
|
|
|
|
|
|
|
if (err) {
|
|
|
|
|
VLOG_WARN_RL(&error_rl, "failed to get timeout policy %s (%s)",
|
2019-08-28 15:14:29 -07:00
|
|
|
|
nl_tp_name, ovs_strerror(err));
|
|
|
|
|
free(nl_tp_name);
|
ct-dpif, dpif-netlink: Add conntrack timeout policy support
This patch first defines the dpif interface for a datapath to support
adding, deleting, getting and dumping conntrack timeout policy.
The timeout policy is identified by a 4 bytes unsigned integer in
datapath, and it currently support timeout for TCP, UDP, and ICMP
protocols.
Moreover, this patch provides the implementation for Linux kernel
datapath in dpif-netlink.
In Linux kernel, the timeout policy is maintained per L3/L4 protocol,
and it is identified by 32 bytes null terminated string. On the other
hand, in vswitchd, the timeout policy is a generic one that consists of
all the supported L4 protocols. Therefore, one of the main task in
dpif-netlink is to break down the generic timeout policy into 6
sub policies (ipv4 tcp, udp, icmp, and ipv6 tcp, udp, icmp),
and push down the configuration using the netlink API in
netlink-conntrack.c.
This patch also adds missing symbols in the windows datapath so
that the build on windows can pass.
Appveyor CI:
* https://ci.appveyor.com/project/YiHungWei/ovs/builds/26387754
Signed-off-by: Yi-Hung Wei <yihung.wei@gmail.com>
Acked-by: Alin Gabriel Serdean <aserdean@ovn.org>
Signed-off-by: Justin Pettit <jpettit@ovn.org>
2019-08-28 15:14:24 -07:00
|
|
|
|
goto out;
|
|
|
|
|
}
|
2019-08-28 15:14:29 -07:00
|
|
|
|
free(nl_tp_name);
|
ct-dpif, dpif-netlink: Add conntrack timeout policy support
This patch first defines the dpif interface for a datapath to support
adding, deleting, getting and dumping conntrack timeout policy.
The timeout policy is identified by a 4 bytes unsigned integer in
datapath, and it currently support timeout for TCP, UDP, and ICMP
protocols.
Moreover, this patch provides the implementation for Linux kernel
datapath in dpif-netlink.
In Linux kernel, the timeout policy is maintained per L3/L4 protocol,
and it is identified by 32 bytes null terminated string. On the other
hand, in vswitchd, the timeout policy is a generic one that consists of
all the supported L4 protocols. Therefore, one of the main task in
dpif-netlink is to break down the generic timeout policy into 6
sub policies (ipv4 tcp, udp, icmp, and ipv6 tcp, udp, icmp),
and push down the configuration using the netlink API in
netlink-conntrack.c.
This patch also adds missing symbols in the windows datapath so
that the build on windows can pass.
Appveyor CI:
* https://ci.appveyor.com/project/YiHungWei/ovs/builds/26387754
Signed-off-by: Yi-Hung Wei <yihung.wei@gmail.com>
Acked-by: Alin Gabriel Serdean <aserdean@ovn.org>
Signed-off-by: Justin Pettit <jpettit@ovn.org>
2019-08-28 15:14:24 -07:00
|
|
|
|
dpif_netlink_set_ct_dpif_tp_attrs(&nl_tp, tp);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
out:
|
|
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Returns 0 if all the sub timeout policies are deleted or not exist in the
|
|
|
|
|
* kernel. Returns 1 if any sub timeout policy deletion failed. */
|
|
|
|
|
static int
|
|
|
|
|
dpif_netlink_ct_del_timeout_policy(struct dpif *dpif OVS_UNUSED,
|
|
|
|
|
uint32_t tp_id)
|
|
|
|
|
{
|
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
|
|
for (int i = 0; i < ARRAY_SIZE(tp_protos); ++i) {
|
2019-08-28 15:14:29 -07:00
|
|
|
|
char *nl_tp_name;
|
ct-dpif, dpif-netlink: Add conntrack timeout policy support
This patch first defines the dpif interface for a datapath to support
adding, deleting, getting and dumping conntrack timeout policy.
The timeout policy is identified by a 4 bytes unsigned integer in
datapath, and it currently support timeout for TCP, UDP, and ICMP
protocols.
Moreover, this patch provides the implementation for Linux kernel
datapath in dpif-netlink.
In Linux kernel, the timeout policy is maintained per L3/L4 protocol,
and it is identified by 32 bytes null terminated string. On the other
hand, in vswitchd, the timeout policy is a generic one that consists of
all the supported L4 protocols. Therefore, one of the main task in
dpif-netlink is to break down the generic timeout policy into 6
sub policies (ipv4 tcp, udp, icmp, and ipv6 tcp, udp, icmp),
and push down the configuration using the netlink API in
netlink-conntrack.c.
This patch also adds missing symbols in the windows datapath so
that the build on windows can pass.
Appveyor CI:
* https://ci.appveyor.com/project/YiHungWei/ovs/builds/26387754
Signed-off-by: Yi-Hung Wei <yihung.wei@gmail.com>
Acked-by: Alin Gabriel Serdean <aserdean@ovn.org>
Signed-off-by: Justin Pettit <jpettit@ovn.org>
2019-08-28 15:14:24 -07:00
|
|
|
|
dpif_netlink_format_tp_name(tp_id, tp_protos[i].l3num,
|
|
|
|
|
tp_protos[i].l4num, &nl_tp_name);
|
2019-08-28 15:14:29 -07:00
|
|
|
|
int err = nl_ct_del_timeout_policy(nl_tp_name);
|
ct-dpif, dpif-netlink: Add conntrack timeout policy support
This patch first defines the dpif interface for a datapath to support
adding, deleting, getting and dumping conntrack timeout policy.
The timeout policy is identified by a 4 bytes unsigned integer in
datapath, and it currently support timeout for TCP, UDP, and ICMP
protocols.
Moreover, this patch provides the implementation for Linux kernel
datapath in dpif-netlink.
In Linux kernel, the timeout policy is maintained per L3/L4 protocol,
and it is identified by 32 bytes null terminated string. On the other
hand, in vswitchd, the timeout policy is a generic one that consists of
all the supported L4 protocols. Therefore, one of the main task in
dpif-netlink is to break down the generic timeout policy into 6
sub policies (ipv4 tcp, udp, icmp, and ipv6 tcp, udp, icmp),
and push down the configuration using the netlink API in
netlink-conntrack.c.
This patch also adds missing symbols in the windows datapath so
that the build on windows can pass.
Appveyor CI:
* https://ci.appveyor.com/project/YiHungWei/ovs/builds/26387754
Signed-off-by: Yi-Hung Wei <yihung.wei@gmail.com>
Acked-by: Alin Gabriel Serdean <aserdean@ovn.org>
Signed-off-by: Justin Pettit <jpettit@ovn.org>
2019-08-28 15:14:24 -07:00
|
|
|
|
if (err == ENOENT) {
|
|
|
|
|
err = 0;
|
|
|
|
|
}
|
|
|
|
|
if (err) {
|
|
|
|
|
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(6, 6);
|
|
|
|
|
VLOG_INFO_RL(&rl, "failed to delete timeout policy %s (%s)",
|
2019-08-28 15:14:29 -07:00
|
|
|
|
nl_tp_name, ovs_strerror(err));
|
ct-dpif, dpif-netlink: Add conntrack timeout policy support
This patch first defines the dpif interface for a datapath to support
adding, deleting, getting and dumping conntrack timeout policy.
The timeout policy is identified by a 4 bytes unsigned integer in
datapath, and it currently support timeout for TCP, UDP, and ICMP
protocols.
Moreover, this patch provides the implementation for Linux kernel
datapath in dpif-netlink.
In Linux kernel, the timeout policy is maintained per L3/L4 protocol,
and it is identified by 32 bytes null terminated string. On the other
hand, in vswitchd, the timeout policy is a generic one that consists of
all the supported L4 protocols. Therefore, one of the main task in
dpif-netlink is to break down the generic timeout policy into 6
sub policies (ipv4 tcp, udp, icmp, and ipv6 tcp, udp, icmp),
and push down the configuration using the netlink API in
netlink-conntrack.c.
This patch also adds missing symbols in the windows datapath so
that the build on windows can pass.
Appveyor CI:
* https://ci.appveyor.com/project/YiHungWei/ovs/builds/26387754
Signed-off-by: Yi-Hung Wei <yihung.wei@gmail.com>
Acked-by: Alin Gabriel Serdean <aserdean@ovn.org>
Signed-off-by: Justin Pettit <jpettit@ovn.org>
2019-08-28 15:14:24 -07:00
|
|
|
|
ret = 1;
|
|
|
|
|
}
|
2019-08-28 15:14:29 -07:00
|
|
|
|
free(nl_tp_name);
|
ct-dpif, dpif-netlink: Add conntrack timeout policy support
This patch first defines the dpif interface for a datapath to support
adding, deleting, getting and dumping conntrack timeout policy.
The timeout policy is identified by a 4 bytes unsigned integer in
datapath, and it currently support timeout for TCP, UDP, and ICMP
protocols.
Moreover, this patch provides the implementation for Linux kernel
datapath in dpif-netlink.
In Linux kernel, the timeout policy is maintained per L3/L4 protocol,
and it is identified by 32 bytes null terminated string. On the other
hand, in vswitchd, the timeout policy is a generic one that consists of
all the supported L4 protocols. Therefore, one of the main task in
dpif-netlink is to break down the generic timeout policy into 6
sub policies (ipv4 tcp, udp, icmp, and ipv6 tcp, udp, icmp),
and push down the configuration using the netlink API in
netlink-conntrack.c.
This patch also adds missing symbols in the windows datapath so
that the build on windows can pass.
Appveyor CI:
* https://ci.appveyor.com/project/YiHungWei/ovs/builds/26387754
Signed-off-by: Yi-Hung Wei <yihung.wei@gmail.com>
Acked-by: Alin Gabriel Serdean <aserdean@ovn.org>
Signed-off-by: Justin Pettit <jpettit@ovn.org>
2019-08-28 15:14:24 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
struct dpif_netlink_ct_timeout_policy_dump_state {
|
|
|
|
|
struct nl_ct_timeout_policy_dump_state *nl_dump_state;
|
|
|
|
|
struct hmap tp_dump_map;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
struct dpif_netlink_tp_dump_node {
|
|
|
|
|
struct hmap_node hmap_node; /* node in tp_dump_map. */
|
|
|
|
|
struct ct_dpif_timeout_policy *tp;
|
|
|
|
|
uint32_t l3_l4_present;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
static struct dpif_netlink_tp_dump_node *
|
|
|
|
|
get_dpif_netlink_tp_dump_node_by_tp_id(uint32_t tp_id,
|
|
|
|
|
struct hmap *tp_dump_map)
|
|
|
|
|
{
|
|
|
|
|
struct dpif_netlink_tp_dump_node *tp_dump_node;
|
|
|
|
|
|
|
|
|
|
HMAP_FOR_EACH_WITH_HASH (tp_dump_node, hmap_node, hash_int(tp_id, 0),
|
|
|
|
|
tp_dump_map) {
|
|
|
|
|
if (tp_dump_node->tp->id == tp_id) {
|
|
|
|
|
return tp_dump_node;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
update_dpif_netlink_tp_dump_node(
|
|
|
|
|
const struct nl_ct_timeout_policy *nl_tp,
|
|
|
|
|
struct dpif_netlink_tp_dump_node *tp_dump_node)
|
|
|
|
|
{
|
|
|
|
|
dpif_netlink_set_ct_dpif_tp_attrs(nl_tp, tp_dump_node->tp);
|
|
|
|
|
for (int i = 0; i < DPIF_NL_TP_MAX; ++i) {
|
|
|
|
|
if (nl_tp->l3num == tp_protos[i].l3num &&
|
|
|
|
|
nl_tp->l4num == tp_protos[i].l4num) {
|
|
|
|
|
tp_dump_node->l3_l4_present |= 1 << i;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
dpif_netlink_ct_timeout_policy_dump_start(struct dpif *dpif OVS_UNUSED,
|
|
|
|
|
void **statep)
|
|
|
|
|
{
|
|
|
|
|
struct dpif_netlink_ct_timeout_policy_dump_state *dump_state;
|
|
|
|
|
|
|
|
|
|
*statep = dump_state = xzalloc(sizeof *dump_state);
|
|
|
|
|
int err = nl_ct_timeout_policy_dump_start(&dump_state->nl_dump_state);
|
|
|
|
|
if (err) {
|
|
|
|
|
free(dump_state);
|
|
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
hmap_init(&dump_state->tp_dump_map);
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
get_and_cleanup_tp_dump_node(struct hmap *hmap,
|
|
|
|
|
struct dpif_netlink_tp_dump_node *tp_dump_node,
|
|
|
|
|
struct ct_dpif_timeout_policy *tp)
|
|
|
|
|
{
|
|
|
|
|
hmap_remove(hmap, &tp_dump_node->hmap_node);
|
|
|
|
|
*tp = *tp_dump_node->tp;
|
|
|
|
|
free(tp_dump_node->tp);
|
|
|
|
|
free(tp_dump_node);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
dpif_netlink_ct_timeout_policy_dump_next(struct dpif *dpif OVS_UNUSED,
|
|
|
|
|
void *state,
|
|
|
|
|
struct ct_dpif_timeout_policy *tp)
|
|
|
|
|
{
|
|
|
|
|
struct dpif_netlink_ct_timeout_policy_dump_state *dump_state = state;
|
|
|
|
|
struct dpif_netlink_tp_dump_node *tp_dump_node;
|
|
|
|
|
int err;
|
|
|
|
|
|
|
|
|
|
/* Dumps all the timeout policies in the kernel. */
|
|
|
|
|
do {
|
|
|
|
|
struct nl_ct_timeout_policy nl_tp;
|
|
|
|
|
uint32_t tp_id;
|
|
|
|
|
|
|
|
|
|
err = nl_ct_timeout_policy_dump_next(dump_state->nl_dump_state,
|
|
|
|
|
&nl_tp);
|
|
|
|
|
if (err) {
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* We only interest in OVS installed timeout policies. */
|
|
|
|
|
if (!ovs_scan(nl_tp.name, NL_TP_NAME_PREFIX"%"PRIu32, &tp_id)) {
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
tp_dump_node = get_dpif_netlink_tp_dump_node_by_tp_id(
|
|
|
|
|
tp_id, &dump_state->tp_dump_map);
|
|
|
|
|
if (!tp_dump_node) {
|
|
|
|
|
tp_dump_node = xzalloc(sizeof *tp_dump_node);
|
|
|
|
|
tp_dump_node->tp = xzalloc(sizeof *tp_dump_node->tp);
|
|
|
|
|
tp_dump_node->tp->id = tp_id;
|
|
|
|
|
hmap_insert(&dump_state->tp_dump_map, &tp_dump_node->hmap_node,
|
|
|
|
|
hash_int(tp_id, 0));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
update_dpif_netlink_tp_dump_node(&nl_tp, tp_dump_node);
|
|
|
|
|
|
|
|
|
|
/* Returns one ct_dpif_timeout_policy if we gather all the L3/L4
|
|
|
|
|
* sub-pieces. */
|
|
|
|
|
if (tp_dump_node->l3_l4_present == DPIF_NL_ALL_TP) {
|
|
|
|
|
get_and_cleanup_tp_dump_node(&dump_state->tp_dump_map,
|
|
|
|
|
tp_dump_node, tp);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
} while (true);
|
|
|
|
|
|
|
|
|
|
/* Dump the incomplete timeout policies. */
|
|
|
|
|
if (err == EOF) {
|
|
|
|
|
if (!hmap_is_empty(&dump_state->tp_dump_map)) {
|
|
|
|
|
struct hmap_node *hmap_node = hmap_first(&dump_state->tp_dump_map);
|
|
|
|
|
tp_dump_node = CONTAINER_OF(hmap_node,
|
|
|
|
|
struct dpif_netlink_tp_dump_node,
|
|
|
|
|
hmap_node);
|
|
|
|
|
get_and_cleanup_tp_dump_node(&dump_state->tp_dump_map,
|
|
|
|
|
tp_dump_node, tp);
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
dpif_netlink_ct_timeout_policy_dump_done(struct dpif *dpif OVS_UNUSED,
|
|
|
|
|
void *state)
|
|
|
|
|
{
|
|
|
|
|
struct dpif_netlink_ct_timeout_policy_dump_state *dump_state = state;
|
|
|
|
|
struct dpif_netlink_tp_dump_node *tp_dump_node;
|
|
|
|
|
|
|
|
|
|
int err = nl_ct_timeout_policy_dump_done(dump_state->nl_dump_state);
|
|
|
|
|
HMAP_FOR_EACH_POP (tp_dump_node, hmap_node, &dump_state->tp_dump_map) {
|
|
|
|
|
free(tp_dump_node->tp);
|
|
|
|
|
free(tp_dump_node);
|
|
|
|
|
}
|
|
|
|
|
hmap_destroy(&dump_state->tp_dump_map);
|
|
|
|
|
free(dump_state);
|
|
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
|
2017-02-23 11:27:54 -08:00
|
|
|
|
|
|
|
|
|
/* Meters */
|
2017-11-17 02:15:47 -08:00
|
|
|
|
|
|
|
|
|
/* Set of supported meter flags */
|
|
|
|
|
#define DP_SUPPORTED_METER_FLAGS_MASK \
|
|
|
|
|
(OFPMF13_STATS | OFPMF13_PKTPS | OFPMF13_KBPS | OFPMF13_BURST)
|
|
|
|
|
|
2018-08-08 17:31:17 -07:00
|
|
|
|
/* Meter support was introduced in Linux 4.15. In some versions of
|
|
|
|
|
* Linux 4.15, 4.16, and 4.17, there was a bug that never set the id
|
|
|
|
|
* when the meter was created, so all meters essentially had an id of
|
|
|
|
|
* zero. Check for that condition and disable meters on those kernels. */
|
|
|
|
|
static bool probe_broken_meters(struct dpif *);
|
|
|
|
|
|
2017-02-23 11:27:54 -08:00
|
|
|
|
static void
|
2017-11-17 02:15:47 -08:00
|
|
|
|
dpif_netlink_meter_init(struct dpif_netlink *dpif, struct ofpbuf *buf,
|
|
|
|
|
void *stub, size_t size, uint32_t command)
|
|
|
|
|
{
|
|
|
|
|
ofpbuf_use_stub(buf, stub, size);
|
|
|
|
|
|
|
|
|
|
nl_msg_put_genlmsghdr(buf, 0, ovs_meter_family, NLM_F_REQUEST | NLM_F_ECHO,
|
|
|
|
|
command, OVS_METER_VERSION);
|
|
|
|
|
|
|
|
|
|
struct ovs_header *ovs_header;
|
|
|
|
|
ovs_header = ofpbuf_put_uninit(buf, sizeof *ovs_header);
|
|
|
|
|
ovs_header->dp_ifindex = dpif->dp_ifindex;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Execute meter 'request' in the kernel datapath. If the command
|
|
|
|
|
* fails, returns a positive errno value. Otherwise, stores the reply
|
|
|
|
|
* in '*replyp', parses the policy according to 'reply_policy' into the
|
|
|
|
|
* array of Netlink attribute in 'a', and returns 0. On success, the
|
|
|
|
|
* caller is responsible for calling ofpbuf_delete() on '*replyp'
|
|
|
|
|
* ('replyp' will contain pointers into 'a'). */
|
|
|
|
|
static int
|
|
|
|
|
dpif_netlink_meter_transact(struct ofpbuf *request, struct ofpbuf **replyp,
|
|
|
|
|
const struct nl_policy *reply_policy,
|
|
|
|
|
struct nlattr **a, size_t size_a)
|
|
|
|
|
{
|
|
|
|
|
int error = nl_transact(NETLINK_GENERIC, request, replyp);
|
|
|
|
|
ofpbuf_uninit(request);
|
|
|
|
|
|
|
|
|
|
if (error) {
|
|
|
|
|
return error;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
struct nlmsghdr *nlmsg = ofpbuf_try_pull(*replyp, sizeof *nlmsg);
|
|
|
|
|
struct genlmsghdr *genl = ofpbuf_try_pull(*replyp, sizeof *genl);
|
|
|
|
|
struct ovs_header *ovs_header = ofpbuf_try_pull(*replyp,
|
|
|
|
|
sizeof *ovs_header);
|
|
|
|
|
if (!nlmsg || !genl || !ovs_header
|
|
|
|
|
|| nlmsg->nlmsg_type != ovs_meter_family
|
|
|
|
|
|| !nl_policy_parse(*replyp, 0, reply_policy, a, size_a)) {
|
|
|
|
|
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
|
|
|
|
|
VLOG_DBG_RL(&rl,
|
|
|
|
|
"Kernel module response to meter tranaction is invalid");
|
|
|
|
|
return EINVAL;
|
|
|
|
|
}
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
dpif_netlink_meter_get_features(const struct dpif *dpif_,
|
2017-02-23 11:27:54 -08:00
|
|
|
|
struct ofputil_meter_features *features)
|
|
|
|
|
{
|
2018-08-08 17:31:17 -07:00
|
|
|
|
if (probe_broken_meters(CONST_CAST(struct dpif *, dpif_))) {
|
|
|
|
|
features = NULL;
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2017-11-17 02:15:47 -08:00
|
|
|
|
struct ofpbuf buf, *msg;
|
|
|
|
|
uint64_t stub[1024 / 8];
|
|
|
|
|
|
|
|
|
|
static const struct nl_policy ovs_meter_features_policy[] = {
|
|
|
|
|
[OVS_METER_ATTR_MAX_METERS] = { .type = NL_A_U32 },
|
|
|
|
|
[OVS_METER_ATTR_MAX_BANDS] = { .type = NL_A_U32 },
|
|
|
|
|
[OVS_METER_ATTR_BANDS] = { .type = NL_A_NESTED, .optional = true },
|
|
|
|
|
};
|
|
|
|
|
struct nlattr *a[ARRAY_SIZE(ovs_meter_features_policy)];
|
|
|
|
|
|
|
|
|
|
struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
|
|
|
|
|
dpif_netlink_meter_init(dpif, &buf, stub, sizeof stub,
|
|
|
|
|
OVS_METER_CMD_FEATURES);
|
|
|
|
|
if (dpif_netlink_meter_transact(&buf, &msg, ovs_meter_features_policy, a,
|
|
|
|
|
ARRAY_SIZE(ovs_meter_features_policy))) {
|
|
|
|
|
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
|
|
|
|
|
VLOG_INFO_RL(&rl,
|
|
|
|
|
"dpif_netlink_meter_transact OVS_METER_CMD_FEATURES failed");
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
features->max_meters = nl_attr_get_u32(a[OVS_METER_ATTR_MAX_METERS]);
|
|
|
|
|
features->max_bands = nl_attr_get_u32(a[OVS_METER_ATTR_MAX_BANDS]);
|
|
|
|
|
|
|
|
|
|
/* Bands is a nested attribute of zero or more nested
|
|
|
|
|
* band attributes. */
|
|
|
|
|
if (a[OVS_METER_ATTR_BANDS]) {
|
|
|
|
|
const struct nlattr *nla;
|
|
|
|
|
size_t left;
|
|
|
|
|
|
|
|
|
|
NL_NESTED_FOR_EACH (nla, left, a[OVS_METER_ATTR_BANDS]) {
|
|
|
|
|
const struct nlattr *band_nla;
|
|
|
|
|
size_t band_left;
|
|
|
|
|
|
|
|
|
|
NL_NESTED_FOR_EACH (band_nla, band_left, nla) {
|
|
|
|
|
if (nl_attr_type(band_nla) == OVS_BAND_ATTR_TYPE) {
|
|
|
|
|
if (nl_attr_get_size(band_nla) == sizeof(uint32_t)) {
|
|
|
|
|
switch (nl_attr_get_u32(band_nla)) {
|
|
|
|
|
case OVS_METER_BAND_TYPE_DROP:
|
|
|
|
|
features->band_types |= 1 << OFPMBT13_DROP;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
features->capabilities = DP_SUPPORTED_METER_FLAGS_MASK;
|
|
|
|
|
|
|
|
|
|
ofpbuf_delete(msg);
|
2017-02-23 11:27:54 -08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
2018-08-17 12:48:54 -07:00
|
|
|
|
dpif_netlink_meter_set__(struct dpif *dpif_, ofproto_meter_id meter_id,
|
|
|
|
|
struct ofputil_meter_config *config)
|
2017-02-23 11:27:54 -08:00
|
|
|
|
{
|
2017-11-17 02:15:47 -08:00
|
|
|
|
struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
|
|
|
|
|
struct ofpbuf buf, *msg;
|
|
|
|
|
uint64_t stub[1024 / 8];
|
|
|
|
|
|
|
|
|
|
static const struct nl_policy ovs_meter_set_response_policy[] = {
|
|
|
|
|
[OVS_METER_ATTR_ID] = { .type = NL_A_U32 },
|
|
|
|
|
};
|
|
|
|
|
struct nlattr *a[ARRAY_SIZE(ovs_meter_set_response_policy)];
|
|
|
|
|
|
|
|
|
|
if (config->flags & ~DP_SUPPORTED_METER_FLAGS_MASK) {
|
|
|
|
|
return EBADF; /* Unsupported flags set */
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for (size_t i = 0; i < config->n_bands; i++) {
|
|
|
|
|
switch (config->bands[i].type) {
|
|
|
|
|
case OFPMBT13_DROP:
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
return ENODEV; /* Unsupported band type */
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
dpif_netlink_meter_init(dpif, &buf, stub, sizeof stub, OVS_METER_CMD_SET);
|
|
|
|
|
|
2018-08-07 19:51:26 -07:00
|
|
|
|
nl_msg_put_u32(&buf, OVS_METER_ATTR_ID, meter_id.uint32);
|
|
|
|
|
|
2017-11-17 02:15:47 -08:00
|
|
|
|
if (config->flags & OFPMF13_KBPS) {
|
|
|
|
|
nl_msg_put_flag(&buf, OVS_METER_ATTR_KBPS);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
size_t bands_offset = nl_msg_start_nested(&buf, OVS_METER_ATTR_BANDS);
|
|
|
|
|
/* Bands */
|
|
|
|
|
for (size_t i = 0; i < config->n_bands; ++i) {
|
|
|
|
|
struct ofputil_meter_band * band = &config->bands[i];
|
|
|
|
|
uint32_t band_type;
|
|
|
|
|
|
|
|
|
|
size_t band_offset = nl_msg_start_nested(&buf, OVS_BAND_ATTR_UNSPEC);
|
|
|
|
|
|
|
|
|
|
switch (band->type) {
|
|
|
|
|
case OFPMBT13_DROP:
|
|
|
|
|
band_type = OVS_METER_BAND_TYPE_DROP;
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
band_type = OVS_METER_BAND_TYPE_UNSPEC;
|
|
|
|
|
}
|
|
|
|
|
nl_msg_put_u32(&buf, OVS_BAND_ATTR_TYPE, band_type);
|
|
|
|
|
nl_msg_put_u32(&buf, OVS_BAND_ATTR_RATE, band->rate);
|
|
|
|
|
nl_msg_put_u32(&buf, OVS_BAND_ATTR_BURST,
|
|
|
|
|
config->flags & OFPMF13_BURST ?
|
|
|
|
|
band->burst_size : band->rate);
|
|
|
|
|
nl_msg_end_nested(&buf, band_offset);
|
|
|
|
|
}
|
|
|
|
|
nl_msg_end_nested(&buf, bands_offset);
|
|
|
|
|
|
|
|
|
|
int error = dpif_netlink_meter_transact(&buf, &msg,
|
|
|
|
|
ovs_meter_set_response_policy, a,
|
|
|
|
|
ARRAY_SIZE(ovs_meter_set_response_policy));
|
|
|
|
|
if (error) {
|
|
|
|
|
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
|
|
|
|
|
VLOG_INFO_RL(&rl,
|
|
|
|
|
"dpif_netlink_meter_transact OVS_METER_CMD_SET failed");
|
|
|
|
|
return error;
|
|
|
|
|
}
|
|
|
|
|
|
2018-08-07 19:51:26 -07:00
|
|
|
|
if (nl_attr_get_u32(a[OVS_METER_ATTR_ID]) != meter_id.uint32) {
|
|
|
|
|
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
|
|
|
|
|
VLOG_INFO_RL(&rl,
|
|
|
|
|
"Kernel returned a different meter id than requested");
|
|
|
|
|
}
|
2017-11-17 02:15:47 -08:00
|
|
|
|
ofpbuf_delete(msg);
|
|
|
|
|
return 0;
|
2017-02-23 11:27:54 -08:00
|
|
|
|
}
|
|
|
|
|
|
2018-08-17 12:48:54 -07:00
|
|
|
|
static int
|
|
|
|
|
dpif_netlink_meter_set(struct dpif *dpif_, ofproto_meter_id meter_id,
|
|
|
|
|
struct ofputil_meter_config *config)
|
|
|
|
|
{
|
|
|
|
|
if (probe_broken_meters(dpif_)) {
|
|
|
|
|
return ENOMEM;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return dpif_netlink_meter_set__(dpif_, meter_id, config);
|
|
|
|
|
}
|
|
|
|
|
|
2017-11-17 02:15:47 -08:00
|
|
|
|
/* Retrieve statistics and/or delete meter 'meter_id'. Statistics are
|
|
|
|
|
* stored in 'stats', if it is not null. If 'command' is
|
|
|
|
|
* OVS_METER_CMD_DEL, the meter is deleted and statistics are optionally
|
|
|
|
|
* retrieved. If 'command' is OVS_METER_CMD_GET, then statistics are
|
|
|
|
|
* simply retrieved. */
|
2017-02-23 11:27:54 -08:00
|
|
|
|
static int
|
2017-11-17 02:15:47 -08:00
|
|
|
|
dpif_netlink_meter_get_stats(const struct dpif *dpif_,
|
|
|
|
|
ofproto_meter_id meter_id,
|
|
|
|
|
struct ofputil_meter_stats *stats,
|
|
|
|
|
uint16_t max_bands,
|
|
|
|
|
enum ovs_meter_cmd command)
|
2017-02-23 11:27:54 -08:00
|
|
|
|
{
|
2017-11-17 02:15:47 -08:00
|
|
|
|
struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
|
|
|
|
|
struct ofpbuf buf, *msg;
|
|
|
|
|
uint64_t stub[1024 / 8];
|
|
|
|
|
|
|
|
|
|
static const struct nl_policy ovs_meter_stats_policy[] = {
|
|
|
|
|
[OVS_METER_ATTR_ID] = { .type = NL_A_U32, .optional = true},
|
|
|
|
|
[OVS_METER_ATTR_STATS] = { NL_POLICY_FOR(struct ovs_flow_stats),
|
|
|
|
|
.optional = true},
|
|
|
|
|
[OVS_METER_ATTR_BANDS] = { .type = NL_A_NESTED, .optional = true },
|
|
|
|
|
};
|
|
|
|
|
struct nlattr *a[ARRAY_SIZE(ovs_meter_stats_policy)];
|
|
|
|
|
|
|
|
|
|
dpif_netlink_meter_init(dpif, &buf, stub, sizeof stub, command);
|
|
|
|
|
|
|
|
|
|
nl_msg_put_u32(&buf, OVS_METER_ATTR_ID, meter_id.uint32);
|
|
|
|
|
|
|
|
|
|
int error = dpif_netlink_meter_transact(&buf, &msg,
|
|
|
|
|
ovs_meter_stats_policy, a,
|
|
|
|
|
ARRAY_SIZE(ovs_meter_stats_policy));
|
|
|
|
|
if (error) {
|
|
|
|
|
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
|
|
|
|
|
VLOG_INFO_RL(&rl, "dpif_netlink_meter_transact %s failed",
|
|
|
|
|
command == OVS_METER_CMD_GET ? "get" : "del");
|
|
|
|
|
return error;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (stats
|
|
|
|
|
&& a[OVS_METER_ATTR_ID]
|
|
|
|
|
&& a[OVS_METER_ATTR_STATS]
|
|
|
|
|
&& nl_attr_get_u32(a[OVS_METER_ATTR_ID]) == meter_id.uint32) {
|
|
|
|
|
/* return stats */
|
|
|
|
|
const struct ovs_flow_stats *stat;
|
|
|
|
|
const struct nlattr *nla;
|
|
|
|
|
size_t left;
|
|
|
|
|
|
|
|
|
|
stat = nl_attr_get(a[OVS_METER_ATTR_STATS]);
|
|
|
|
|
stats->packet_in_count = get_32aligned_u64(&stat->n_packets);
|
|
|
|
|
stats->byte_in_count = get_32aligned_u64(&stat->n_bytes);
|
|
|
|
|
|
|
|
|
|
if (a[OVS_METER_ATTR_BANDS]) {
|
|
|
|
|
size_t n_bands = 0;
|
|
|
|
|
NL_NESTED_FOR_EACH (nla, left, a[OVS_METER_ATTR_BANDS]) {
|
|
|
|
|
const struct nlattr *band_nla;
|
|
|
|
|
band_nla = nl_attr_find_nested(nla, OVS_BAND_ATTR_STATS);
|
|
|
|
|
if (band_nla && nl_attr_get_size(band_nla) \
|
|
|
|
|
== sizeof(struct ovs_flow_stats)) {
|
|
|
|
|
stat = nl_attr_get(band_nla);
|
|
|
|
|
|
|
|
|
|
if (n_bands < max_bands) {
|
|
|
|
|
stats->bands[n_bands].packet_count
|
|
|
|
|
= get_32aligned_u64(&stat->n_packets);
|
|
|
|
|
stats->bands[n_bands].byte_count
|
|
|
|
|
= get_32aligned_u64(&stat->n_bytes);
|
|
|
|
|
++n_bands;
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
stats->bands[n_bands].packet_count = 0;
|
|
|
|
|
stats->bands[n_bands].byte_count = 0;
|
|
|
|
|
++n_bands;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
stats->n_bands = n_bands;
|
|
|
|
|
} else {
|
|
|
|
|
/* For a non-existent meter, return 0 stats. */
|
|
|
|
|
stats->n_bands = 0;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ofpbuf_delete(msg);
|
|
|
|
|
return error;
|
2017-02-23 11:27:54 -08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
2017-11-17 02:15:47 -08:00
|
|
|
|
dpif_netlink_meter_get(const struct dpif *dpif, ofproto_meter_id meter_id,
|
|
|
|
|
struct ofputil_meter_stats *stats, uint16_t max_bands)
|
2017-02-23 11:27:54 -08:00
|
|
|
|
{
|
2017-11-17 02:15:47 -08:00
|
|
|
|
return dpif_netlink_meter_get_stats(dpif, meter_id, stats, max_bands,
|
|
|
|
|
OVS_METER_CMD_GET);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
dpif_netlink_meter_del(struct dpif *dpif, ofproto_meter_id meter_id,
|
|
|
|
|
struct ofputil_meter_stats *stats, uint16_t max_bands)
|
|
|
|
|
{
|
|
|
|
|
return dpif_netlink_meter_get_stats(dpif, meter_id, stats, max_bands,
|
|
|
|
|
OVS_METER_CMD_DEL);
|
2017-02-23 11:27:54 -08:00
|
|
|
|
}
|
|
|
|
|
|
2018-08-08 17:31:17 -07:00
|
|
|
|
static bool
|
|
|
|
|
probe_broken_meters__(struct dpif *dpif)
|
|
|
|
|
{
|
|
|
|
|
/* This test is destructive if a probe occurs while ovs-vswitchd is
|
|
|
|
|
* running (e.g., an ovs-dpctl meter command is called), so choose a
|
|
|
|
|
* random high meter id to make this less likely to occur. */
|
|
|
|
|
ofproto_meter_id id1 = { 54545401 };
|
|
|
|
|
ofproto_meter_id id2 = { 54545402 };
|
|
|
|
|
struct ofputil_meter_band band = {OFPMBT13_DROP, 0, 1, 0};
|
|
|
|
|
struct ofputil_meter_config config1 = { 1, OFPMF13_KBPS, 1, &band};
|
|
|
|
|
struct ofputil_meter_config config2 = { 2, OFPMF13_KBPS, 1, &band};
|
|
|
|
|
|
|
|
|
|
/* Try adding two meters and make sure that they both come back with
|
2018-08-17 12:48:54 -07:00
|
|
|
|
* the proper meter id. Use the "__" version so that we don't cause
|
|
|
|
|
* a recurve deadlock. */
|
|
|
|
|
dpif_netlink_meter_set__(dpif, id1, &config1);
|
|
|
|
|
dpif_netlink_meter_set__(dpif, id2, &config2);
|
2018-08-08 17:31:17 -07:00
|
|
|
|
|
|
|
|
|
if (dpif_netlink_meter_get(dpif, id1, NULL, 0)
|
|
|
|
|
|| dpif_netlink_meter_get(dpif, id2, NULL, 0)) {
|
|
|
|
|
VLOG_INFO("The kernel module has a broken meter implementation.");
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
dpif_netlink_meter_del(dpif, id1, NULL, 0);
|
|
|
|
|
dpif_netlink_meter_del(dpif, id2, NULL, 0);
|
|
|
|
|
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static bool
|
|
|
|
|
probe_broken_meters(struct dpif *dpif)
|
|
|
|
|
{
|
|
|
|
|
/* This is a once-only test because currently OVS only has at most a single
|
|
|
|
|
* Netlink capable datapath on any given platform. */
|
|
|
|
|
static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
|
|
|
|
|
|
|
|
|
|
static bool broken_meters = false;
|
|
|
|
|
if (ovsthread_once_start(&once)) {
|
|
|
|
|
broken_meters = probe_broken_meters__(dpif);
|
|
|
|
|
ovsthread_once_done(&once);
|
|
|
|
|
}
|
|
|
|
|
return broken_meters;
|
|
|
|
|
}
|
2017-02-23 11:27:54 -08:00
|
|
|
|
|
2014-09-18 04:17:54 -07:00
|
|
|
|
const struct dpif_class dpif_netlink_class = {
|
2010-01-22 14:37:10 -05:00
|
|
|
|
"system",
|
2019-06-24 17:20:17 +03:00
|
|
|
|
false, /* cleanup_required */
|
2015-04-10 19:09:49 +01:00
|
|
|
|
NULL, /* init */
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_enumerate,
|
2012-11-14 15:50:20 -08:00
|
|
|
|
NULL,
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_open,
|
|
|
|
|
dpif_netlink_close,
|
|
|
|
|
dpif_netlink_destroy,
|
|
|
|
|
dpif_netlink_run,
|
2013-07-22 15:00:49 -07:00
|
|
|
|
NULL, /* wait */
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_get_stats,
|
2019-12-22 12:16:38 +02:00
|
|
|
|
dpif_netlink_set_features,
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_port_add,
|
|
|
|
|
dpif_netlink_port_del,
|
2016-07-27 17:44:42 +03:00
|
|
|
|
NULL, /* port_set_config */
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_port_query_by_number,
|
|
|
|
|
dpif_netlink_port_query_by_name,
|
|
|
|
|
dpif_netlink_port_get_pid,
|
|
|
|
|
dpif_netlink_port_dump_start,
|
|
|
|
|
dpif_netlink_port_dump_next,
|
|
|
|
|
dpif_netlink_port_dump_done,
|
|
|
|
|
dpif_netlink_port_poll,
|
|
|
|
|
dpif_netlink_port_poll_wait,
|
|
|
|
|
dpif_netlink_flow_flush,
|
|
|
|
|
dpif_netlink_flow_dump_create,
|
|
|
|
|
dpif_netlink_flow_dump_destroy,
|
|
|
|
|
dpif_netlink_flow_dump_thread_create,
|
|
|
|
|
dpif_netlink_flow_dump_thread_destroy,
|
|
|
|
|
dpif_netlink_flow_dump_next,
|
|
|
|
|
dpif_netlink_operate,
|
|
|
|
|
dpif_netlink_recv_set,
|
|
|
|
|
dpif_netlink_handlers_set,
|
2017-01-27 16:41:36 -08:00
|
|
|
|
NULL, /* set_config */
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_queue_to_priority,
|
|
|
|
|
dpif_netlink_recv,
|
|
|
|
|
dpif_netlink_recv_wait,
|
|
|
|
|
dpif_netlink_recv_purge,
|
2015-08-25 16:36:46 -07:00
|
|
|
|
NULL, /* register_dp_purge_cb */
|
2014-07-26 06:51:55 +00:00
|
|
|
|
NULL, /* register_upcall_cb */
|
|
|
|
|
NULL, /* enable_upcall */
|
|
|
|
|
NULL, /* disable_upcall */
|
2014-10-16 15:23:11 -07:00
|
|
|
|
dpif_netlink_get_datapath_version, /* get_datapath_version */
|
2015-10-28 11:26:18 -07:00
|
|
|
|
dpif_netlink_ct_dump_start,
|
|
|
|
|
dpif_netlink_ct_dump_next,
|
|
|
|
|
dpif_netlink_ct_dump_done,
|
2017-02-23 11:27:54 -08:00
|
|
|
|
dpif_netlink_ct_flush,
|
2018-01-08 15:18:42 -08:00
|
|
|
|
NULL, /* ct_set_maxconns */
|
|
|
|
|
NULL, /* ct_get_maxconns */
|
2018-01-08 15:18:43 -08:00
|
|
|
|
NULL, /* ct_get_nconns */
|
2019-09-25 14:09:41 -07:00
|
|
|
|
NULL, /* ct_set_tcp_seq_chk */
|
|
|
|
|
NULL, /* ct_get_tcp_seq_chk */
|
2018-08-17 02:05:09 -07:00
|
|
|
|
dpif_netlink_ct_set_limits,
|
|
|
|
|
dpif_netlink_ct_get_limits,
|
|
|
|
|
dpif_netlink_ct_del_limits,
|
ct-dpif, dpif-netlink: Add conntrack timeout policy support
This patch first defines the dpif interface for a datapath to support
adding, deleting, getting and dumping conntrack timeout policy.
The timeout policy is identified by a 4 bytes unsigned integer in
datapath, and it currently support timeout for TCP, UDP, and ICMP
protocols.
Moreover, this patch provides the implementation for Linux kernel
datapath in dpif-netlink.
In Linux kernel, the timeout policy is maintained per L3/L4 protocol,
and it is identified by 32 bytes null terminated string. On the other
hand, in vswitchd, the timeout policy is a generic one that consists of
all the supported L4 protocols. Therefore, one of the main task in
dpif-netlink is to break down the generic timeout policy into 6
sub policies (ipv4 tcp, udp, icmp, and ipv6 tcp, udp, icmp),
and push down the configuration using the netlink API in
netlink-conntrack.c.
This patch also adds missing symbols in the windows datapath so
that the build on windows can pass.
Appveyor CI:
* https://ci.appveyor.com/project/YiHungWei/ovs/builds/26387754
Signed-off-by: Yi-Hung Wei <yihung.wei@gmail.com>
Acked-by: Alin Gabriel Serdean <aserdean@ovn.org>
Signed-off-by: Justin Pettit <jpettit@ovn.org>
2019-08-28 15:14:24 -07:00
|
|
|
|
dpif_netlink_ct_set_timeout_policy,
|
|
|
|
|
dpif_netlink_ct_get_timeout_policy,
|
|
|
|
|
dpif_netlink_ct_del_timeout_policy,
|
|
|
|
|
dpif_netlink_ct_timeout_policy_dump_start,
|
|
|
|
|
dpif_netlink_ct_timeout_policy_dump_next,
|
|
|
|
|
dpif_netlink_ct_timeout_policy_dump_done,
|
2019-08-28 15:14:29 -07:00
|
|
|
|
dpif_netlink_ct_get_timeout_policy_name,
|
2019-02-13 15:34:21 -08:00
|
|
|
|
NULL, /* ipf_set_enabled */
|
|
|
|
|
NULL, /* ipf_set_min_frag */
|
|
|
|
|
NULL, /* ipf_set_max_nfrags */
|
|
|
|
|
NULL, /* ipf_get_status */
|
|
|
|
|
NULL, /* ipf_dump_start */
|
|
|
|
|
NULL, /* ipf_dump_next */
|
|
|
|
|
NULL, /* ipf_dump_done */
|
2017-02-23 11:27:54 -08:00
|
|
|
|
dpif_netlink_meter_get_features,
|
|
|
|
|
dpif_netlink_meter_set,
|
|
|
|
|
dpif_netlink_meter_get,
|
|
|
|
|
dpif_netlink_meter_del,
|
2009-06-17 14:35:35 -07:00
|
|
|
|
};
|
2014-09-18 04:17:54 -07:00
|
|
|
|
|
2009-06-17 14:35:35 -07:00
|
|
|
|
static int
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_init(void)
|
2009-06-17 14:35:35 -07:00
|
|
|
|
{
|
2013-04-23 14:35:29 -07:00
|
|
|
|
static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
|
|
|
|
|
static int error;
|
2011-01-26 13:41:54 -08:00
|
|
|
|
|
2013-04-23 14:35:29 -07:00
|
|
|
|
if (ovsthread_once_start(&once)) {
|
2011-08-18 10:35:40 -07:00
|
|
|
|
error = nl_lookup_genl_family(OVS_DATAPATH_FAMILY,
|
|
|
|
|
&ovs_datapath_family);
|
2011-01-28 14:00:51 -08:00
|
|
|
|
if (error) {
|
2018-03-29 23:05:31 -03:00
|
|
|
|
VLOG_INFO("Generic Netlink family '%s' does not exist. "
|
2016-05-17 14:28:39 +01:00
|
|
|
|
"The Open vSwitch kernel module is probably not loaded.",
|
|
|
|
|
OVS_DATAPATH_FAMILY);
|
2011-01-28 14:00:51 -08:00
|
|
|
|
}
|
2011-01-28 13:59:03 -08:00
|
|
|
|
if (!error) {
|
2011-08-18 10:35:40 -07:00
|
|
|
|
error = nl_lookup_genl_family(OVS_VPORT_FAMILY, &ovs_vport_family);
|
2011-01-28 13:59:03 -08:00
|
|
|
|
}
|
2011-01-28 14:00:51 -08:00
|
|
|
|
if (!error) {
|
2011-08-18 10:35:40 -07:00
|
|
|
|
error = nl_lookup_genl_family(OVS_FLOW_FAMILY, &ovs_flow_family);
|
2011-01-28 14:00:51 -08:00
|
|
|
|
}
|
2011-01-28 13:55:04 -08:00
|
|
|
|
if (!error) {
|
2011-08-18 10:35:40 -07:00
|
|
|
|
error = nl_lookup_genl_family(OVS_PACKET_FAMILY,
|
|
|
|
|
&ovs_packet_family);
|
2011-01-28 13:55:04 -08:00
|
|
|
|
}
|
2011-08-24 16:21:10 -07:00
|
|
|
|
if (!error) {
|
|
|
|
|
error = nl_lookup_genl_mcgroup(OVS_VPORT_FAMILY, OVS_VPORT_MCGROUP,
|
2013-08-26 23:53:17 -07:00
|
|
|
|
&ovs_vport_mcgroup);
|
2011-08-24 16:21:10 -07:00
|
|
|
|
}
|
2017-11-17 02:15:47 -08:00
|
|
|
|
if (!error) {
|
|
|
|
|
if (nl_lookup_genl_family(OVS_METER_FAMILY, &ovs_meter_family)) {
|
|
|
|
|
VLOG_INFO("The kernel module does not support meters.");
|
|
|
|
|
}
|
|
|
|
|
}
|
2018-08-17 02:05:09 -07:00
|
|
|
|
if (nl_lookup_genl_family(OVS_CT_LIMIT_FAMILY,
|
|
|
|
|
&ovs_ct_limit_family) < 0) {
|
|
|
|
|
VLOG_INFO("Generic Netlink family '%s' does not exist. "
|
|
|
|
|
"Please update the Open vSwitch kernel module to enable "
|
|
|
|
|
"the conntrack limit feature.", OVS_CT_LIMIT_FAMILY);
|
|
|
|
|
}
|
2013-04-23 14:35:29 -07:00
|
|
|
|
|
2017-05-18 16:10:33 -04:00
|
|
|
|
ovs_tunnels_out_of_tree = dpif_netlink_rtnl_probe_oot_tunnels();
|
|
|
|
|
|
2013-04-23 14:35:29 -07:00
|
|
|
|
ovsthread_once_done(&once);
|
2011-01-26 13:41:54 -08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return error;
|
2009-06-17 14:35:35 -07:00
|
|
|
|
}
|
|
|
|
|
|
2011-01-26 12:28:59 -08:00
|
|
|
|
bool
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_is_internal_device(const char *name)
|
2010-12-29 14:20:16 -08:00
|
|
|
|
{
|
2014-09-18 04:17:54 -07:00
|
|
|
|
struct dpif_netlink_vport reply;
|
2011-01-26 12:28:59 -08:00
|
|
|
|
struct ofpbuf *buf;
|
2010-12-29 14:20:16 -08:00
|
|
|
|
int error;
|
2009-06-17 14:35:35 -07:00
|
|
|
|
|
2014-09-18 04:17:54 -07:00
|
|
|
|
error = dpif_netlink_vport_get(name, &reply, &buf);
|
2011-01-26 12:28:59 -08:00
|
|
|
|
if (!error) {
|
|
|
|
|
ofpbuf_delete(buf);
|
2011-04-08 16:38:42 -07:00
|
|
|
|
} else if (error != ENODEV && error != ENOENT) {
|
2011-01-26 12:28:59 -08:00
|
|
|
|
VLOG_WARN_RL(&error_rl, "%s: vport query failed (%s)",
|
2013-06-24 10:54:49 -07:00
|
|
|
|
name, ovs_strerror(error));
|
2009-06-17 14:35:35 -07:00
|
|
|
|
}
|
|
|
|
|
|
2011-08-18 10:35:40 -07:00
|
|
|
|
return reply.type == OVS_VPORT_TYPE_INTERNAL;
|
2009-06-17 14:35:35 -07:00
|
|
|
|
}
|
2016-07-01 13:49:34 -07:00
|
|
|
|
|
2011-08-18 10:35:40 -07:00
|
|
|
|
/* Parses the contents of 'buf', which contains a "struct ovs_header" followed
|
2011-01-26 12:28:59 -08:00
|
|
|
|
* by Netlink attributes, into 'vport'. Returns 0 if successful, otherwise a
|
|
|
|
|
* positive errno value.
|
|
|
|
|
*
|
|
|
|
|
* 'vport' will contain pointers into 'buf', so the caller should not free
|
|
|
|
|
* 'buf' while 'vport' is still in use. */
|
|
|
|
|
static int
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_vport_from_ofpbuf(struct dpif_netlink_vport *vport,
|
2011-01-26 12:28:59 -08:00
|
|
|
|
const struct ofpbuf *buf)
|
|
|
|
|
{
|
2011-08-18 10:35:40 -07:00
|
|
|
|
static const struct nl_policy ovs_vport_policy[] = {
|
|
|
|
|
[OVS_VPORT_ATTR_PORT_NO] = { .type = NL_A_U32 },
|
|
|
|
|
[OVS_VPORT_ATTR_TYPE] = { .type = NL_A_U32 },
|
|
|
|
|
[OVS_VPORT_ATTR_NAME] = { .type = NL_A_STRING, .max_len = IFNAMSIZ },
|
2014-02-26 10:10:29 -08:00
|
|
|
|
[OVS_VPORT_ATTR_UPCALL_PID] = { .type = NL_A_UNSPEC },
|
2011-11-07 09:21:17 -08:00
|
|
|
|
[OVS_VPORT_ATTR_STATS] = { NL_POLICY_FOR(struct ovs_vport_stats),
|
2011-01-26 12:28:59 -08:00
|
|
|
|
.optional = true },
|
2011-08-18 10:35:40 -07:00
|
|
|
|
[OVS_VPORT_ATTR_OPTIONS] = { .type = NL_A_NESTED, .optional = true },
|
2018-03-29 23:05:27 -03:00
|
|
|
|
[OVS_VPORT_ATTR_NETNSID] = { .type = NL_A_U32, .optional = true },
|
2011-01-26 12:28:59 -08:00
|
|
|
|
};
|
|
|
|
|
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_vport_init(vport);
|
2011-01-26 12:28:59 -08:00
|
|
|
|
|
2016-02-18 15:13:09 -08:00
|
|
|
|
struct ofpbuf b = ofpbuf_const_initializer(buf->data, buf->size);
|
|
|
|
|
struct nlmsghdr *nlmsg = ofpbuf_try_pull(&b, sizeof *nlmsg);
|
|
|
|
|
struct genlmsghdr *genl = ofpbuf_try_pull(&b, sizeof *genl);
|
|
|
|
|
struct ovs_header *ovs_header = ofpbuf_try_pull(&b, sizeof *ovs_header);
|
|
|
|
|
|
|
|
|
|
struct nlattr *a[ARRAY_SIZE(ovs_vport_policy)];
|
2011-08-18 10:35:40 -07:00
|
|
|
|
if (!nlmsg || !genl || !ovs_header
|
|
|
|
|
|| nlmsg->nlmsg_type != ovs_vport_family
|
|
|
|
|
|| !nl_policy_parse(&b, 0, ovs_vport_policy, a,
|
|
|
|
|
ARRAY_SIZE(ovs_vport_policy))) {
|
2011-01-26 12:28:59 -08:00
|
|
|
|
return EINVAL;
|
|
|
|
|
}
|
|
|
|
|
|
2011-01-28 13:59:03 -08:00
|
|
|
|
vport->cmd = genl->cmd;
|
2011-08-18 10:35:40 -07:00
|
|
|
|
vport->dp_ifindex = ovs_header->dp_ifindex;
|
2013-06-19 16:58:44 -07:00
|
|
|
|
vport->port_no = nl_attr_get_odp_port(a[OVS_VPORT_ATTR_PORT_NO]);
|
2011-08-18 10:35:40 -07:00
|
|
|
|
vport->type = nl_attr_get_u32(a[OVS_VPORT_ATTR_TYPE]);
|
|
|
|
|
vport->name = nl_attr_get_string(a[OVS_VPORT_ATTR_NAME]);
|
2011-09-14 13:05:09 -07:00
|
|
|
|
if (a[OVS_VPORT_ATTR_UPCALL_PID]) {
|
2014-02-26 10:10:29 -08:00
|
|
|
|
vport->n_upcall_pids = nl_attr_get_size(a[OVS_VPORT_ATTR_UPCALL_PID])
|
|
|
|
|
/ (sizeof *vport->upcall_pids);
|
|
|
|
|
vport->upcall_pids = nl_attr_get(a[OVS_VPORT_ATTR_UPCALL_PID]);
|
|
|
|
|
|
2011-09-14 13:05:09 -07:00
|
|
|
|
}
|
2011-08-18 10:35:40 -07:00
|
|
|
|
if (a[OVS_VPORT_ATTR_STATS]) {
|
|
|
|
|
vport->stats = nl_attr_get(a[OVS_VPORT_ATTR_STATS]);
|
|
|
|
|
}
|
|
|
|
|
if (a[OVS_VPORT_ATTR_OPTIONS]) {
|
|
|
|
|
vport->options = nl_attr_get(a[OVS_VPORT_ATTR_OPTIONS]);
|
|
|
|
|
vport->options_len = nl_attr_get_size(a[OVS_VPORT_ATTR_OPTIONS]);
|
2011-01-26 12:28:59 -08:00
|
|
|
|
}
|
2018-03-29 23:05:27 -03:00
|
|
|
|
if (a[OVS_VPORT_ATTR_NETNSID]) {
|
|
|
|
|
netnsid_set(&vport->netnsid,
|
|
|
|
|
nl_attr_get_u32(a[OVS_VPORT_ATTR_NETNSID]));
|
|
|
|
|
} else {
|
|
|
|
|
netnsid_set_local(&vport->netnsid);
|
|
|
|
|
}
|
2011-01-26 12:28:59 -08:00
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2011-08-18 10:35:40 -07:00
|
|
|
|
/* Appends to 'buf' (which must initially be empty) a "struct ovs_header"
|
2011-01-26 12:28:59 -08:00
|
|
|
|
* followed by Netlink attributes corresponding to 'vport'. */
|
|
|
|
|
static void
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_vport_to_ofpbuf(const struct dpif_netlink_vport *vport,
|
|
|
|
|
struct ofpbuf *buf)
|
2011-01-26 12:28:59 -08:00
|
|
|
|
{
|
2011-08-18 10:35:40 -07:00
|
|
|
|
struct ovs_header *ovs_header;
|
2011-01-28 13:59:03 -08:00
|
|
|
|
|
2011-08-18 10:35:40 -07:00
|
|
|
|
nl_msg_put_genlmsghdr(buf, 0, ovs_vport_family, NLM_F_REQUEST | NLM_F_ECHO,
|
2011-10-22 18:22:18 -07:00
|
|
|
|
vport->cmd, OVS_VPORT_VERSION);
|
2011-01-26 12:28:59 -08:00
|
|
|
|
|
2011-08-18 10:35:40 -07:00
|
|
|
|
ovs_header = ofpbuf_put_uninit(buf, sizeof *ovs_header);
|
|
|
|
|
ovs_header->dp_ifindex = vport->dp_ifindex;
|
2011-01-26 12:28:59 -08:00
|
|
|
|
|
2013-06-19 16:58:44 -07:00
|
|
|
|
if (vport->port_no != ODPP_NONE) {
|
|
|
|
|
nl_msg_put_odp_port(buf, OVS_VPORT_ATTR_PORT_NO, vport->port_no);
|
2011-01-26 12:28:59 -08:00
|
|
|
|
}
|
|
|
|
|
|
2011-08-18 10:35:40 -07:00
|
|
|
|
if (vport->type != OVS_VPORT_TYPE_UNSPEC) {
|
|
|
|
|
nl_msg_put_u32(buf, OVS_VPORT_ATTR_TYPE, vport->type);
|
2011-01-26 12:28:59 -08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (vport->name) {
|
2011-08-18 10:35:40 -07:00
|
|
|
|
nl_msg_put_string(buf, OVS_VPORT_ATTR_NAME, vport->name);
|
2011-01-26 12:28:59 -08:00
|
|
|
|
}
|
|
|
|
|
|
2014-02-26 10:10:29 -08:00
|
|
|
|
if (vport->upcall_pids) {
|
|
|
|
|
nl_msg_put_unspec(buf, OVS_VPORT_ATTR_UPCALL_PID,
|
|
|
|
|
vport->upcall_pids,
|
|
|
|
|
vport->n_upcall_pids * sizeof *vport->upcall_pids);
|
2011-10-07 16:41:36 -07:00
|
|
|
|
}
|
2011-09-14 13:05:09 -07:00
|
|
|
|
|
2011-01-26 12:28:59 -08:00
|
|
|
|
if (vport->stats) {
|
2011-08-18 10:35:40 -07:00
|
|
|
|
nl_msg_put_unspec(buf, OVS_VPORT_ATTR_STATS,
|
2011-01-26 12:28:59 -08:00
|
|
|
|
vport->stats, sizeof *vport->stats);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (vport->options) {
|
2011-08-18 10:35:40 -07:00
|
|
|
|
nl_msg_put_nested(buf, OVS_VPORT_ATTR_OPTIONS,
|
2011-01-26 12:28:59 -08:00
|
|
|
|
vport->options, vport->options_len);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Clears 'vport' to "empty" values. */
|
|
|
|
|
void
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_vport_init(struct dpif_netlink_vport *vport)
|
2011-01-26 12:28:59 -08:00
|
|
|
|
{
|
|
|
|
|
memset(vport, 0, sizeof *vport);
|
2013-06-19 16:58:44 -07:00
|
|
|
|
vport->port_no = ODPP_NONE;
|
2011-01-26 12:28:59 -08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Executes 'request' in the kernel datapath. If the command fails, returns a
|
|
|
|
|
* positive errno value. Otherwise, if 'reply' and 'bufp' are null, returns 0
|
|
|
|
|
* without doing anything else. If 'reply' and 'bufp' are nonnull, then the
|
2011-08-18 10:35:40 -07:00
|
|
|
|
* result of the command is expected to be an ovs_vport also, which is decoded
|
2011-01-26 12:28:59 -08:00
|
|
|
|
* and stored in '*reply' and '*bufp'. The caller must free '*bufp' when the
|
|
|
|
|
* reply is no longer needed ('reply' will contain pointers into '*bufp'). */
|
|
|
|
|
int
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_vport_transact(const struct dpif_netlink_vport *request,
|
|
|
|
|
struct dpif_netlink_vport *reply,
|
|
|
|
|
struct ofpbuf **bufp)
|
2011-01-26 12:28:59 -08:00
|
|
|
|
{
|
2011-01-28 13:59:03 -08:00
|
|
|
|
struct ofpbuf *request_buf;
|
2011-01-26 12:28:59 -08:00
|
|
|
|
int error;
|
|
|
|
|
|
2012-11-06 13:14:55 -08:00
|
|
|
|
ovs_assert((reply != NULL) == (bufp != NULL));
|
2011-01-26 12:28:59 -08:00
|
|
|
|
|
2014-09-18 04:17:54 -07:00
|
|
|
|
error = dpif_netlink_init();
|
2011-04-08 16:37:22 -07:00
|
|
|
|
if (error) {
|
|
|
|
|
if (reply) {
|
|
|
|
|
*bufp = NULL;
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_vport_init(reply);
|
2011-04-08 16:37:22 -07:00
|
|
|
|
}
|
|
|
|
|
return error;
|
|
|
|
|
}
|
|
|
|
|
|
2011-01-28 13:59:03 -08:00
|
|
|
|
request_buf = ofpbuf_new(1024);
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_vport_to_ofpbuf(request, request_buf);
|
2013-04-29 13:57:50 -07:00
|
|
|
|
error = nl_transact(NETLINK_GENERIC, request_buf, bufp);
|
2011-01-28 13:59:03 -08:00
|
|
|
|
ofpbuf_delete(request_buf);
|
2011-01-26 12:28:59 -08:00
|
|
|
|
|
2011-01-28 13:59:03 -08:00
|
|
|
|
if (reply) {
|
|
|
|
|
if (!error) {
|
2014-09-18 04:17:54 -07:00
|
|
|
|
error = dpif_netlink_vport_from_ofpbuf(reply, *bufp);
|
2011-01-28 13:59:03 -08:00
|
|
|
|
}
|
2011-01-26 12:28:59 -08:00
|
|
|
|
if (error) {
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_vport_init(reply);
|
2011-01-28 13:59:03 -08:00
|
|
|
|
ofpbuf_delete(*bufp);
|
|
|
|
|
*bufp = NULL;
|
2011-01-26 12:28:59 -08:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return error;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Obtains information about the kernel vport named 'name' and stores it into
|
|
|
|
|
* '*reply' and '*bufp'. The caller must free '*bufp' when the reply is no
|
|
|
|
|
* longer needed ('reply' will contain pointers into '*bufp'). */
|
|
|
|
|
int
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_vport_get(const char *name, struct dpif_netlink_vport *reply,
|
|
|
|
|
struct ofpbuf **bufp)
|
2011-01-26 12:28:59 -08:00
|
|
|
|
{
|
2014-09-18 04:17:54 -07:00
|
|
|
|
struct dpif_netlink_vport request;
|
2011-01-26 12:28:59 -08:00
|
|
|
|
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_vport_init(&request);
|
2011-08-18 10:35:40 -07:00
|
|
|
|
request.cmd = OVS_VPORT_CMD_GET;
|
2011-01-26 12:28:59 -08:00
|
|
|
|
request.name = name;
|
|
|
|
|
|
2014-09-18 04:17:54 -07:00
|
|
|
|
return dpif_netlink_vport_transact(&request, reply, bufp);
|
2011-01-26 12:28:59 -08:00
|
|
|
|
}
|
2014-09-18 04:17:54 -07:00
|
|
|
|
|
2011-08-18 10:35:40 -07:00
|
|
|
|
/* Parses the contents of 'buf', which contains a "struct ovs_header" followed
|
2011-01-28 13:55:04 -08:00
|
|
|
|
* by Netlink attributes, into 'dp'. Returns 0 if successful, otherwise a
|
|
|
|
|
* positive errno value.
|
2011-01-26 15:42:00 -08:00
|
|
|
|
*
|
|
|
|
|
* 'dp' will contain pointers into 'buf', so the caller should not free 'buf'
|
|
|
|
|
* while 'dp' is still in use. */
|
|
|
|
|
static int
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_dp_from_ofpbuf(struct dpif_netlink_dp *dp, const struct ofpbuf *buf)
|
2011-01-26 15:42:00 -08:00
|
|
|
|
{
|
2011-08-18 10:35:40 -07:00
|
|
|
|
static const struct nl_policy ovs_datapath_policy[] = {
|
|
|
|
|
[OVS_DP_ATTR_NAME] = { .type = NL_A_STRING, .max_len = IFNAMSIZ },
|
2011-11-07 09:21:17 -08:00
|
|
|
|
[OVS_DP_ATTR_STATS] = { NL_POLICY_FOR(struct ovs_dp_stats),
|
2011-01-26 15:42:00 -08:00
|
|
|
|
.optional = true },
|
2013-10-21 14:37:34 -07:00
|
|
|
|
[OVS_DP_ATTR_MEGAFLOW_STATS] = {
|
|
|
|
|
NL_POLICY_FOR(struct ovs_dp_megaflow_stats),
|
|
|
|
|
.optional = true },
|
2019-12-22 12:16:38 +02:00
|
|
|
|
[OVS_DP_ATTR_USER_FEATURES] = {
|
|
|
|
|
.type = NL_A_U32,
|
|
|
|
|
.optional = true },
|
2011-01-26 15:42:00 -08:00
|
|
|
|
};
|
|
|
|
|
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_dp_init(dp);
|
2011-01-26 15:42:00 -08:00
|
|
|
|
|
2016-02-18 15:13:09 -08:00
|
|
|
|
struct ofpbuf b = ofpbuf_const_initializer(buf->data, buf->size);
|
|
|
|
|
struct nlmsghdr *nlmsg = ofpbuf_try_pull(&b, sizeof *nlmsg);
|
|
|
|
|
struct genlmsghdr *genl = ofpbuf_try_pull(&b, sizeof *genl);
|
|
|
|
|
struct ovs_header *ovs_header = ofpbuf_try_pull(&b, sizeof *ovs_header);
|
|
|
|
|
|
|
|
|
|
struct nlattr *a[ARRAY_SIZE(ovs_datapath_policy)];
|
2011-08-18 10:35:40 -07:00
|
|
|
|
if (!nlmsg || !genl || !ovs_header
|
|
|
|
|
|| nlmsg->nlmsg_type != ovs_datapath_family
|
|
|
|
|
|| !nl_policy_parse(&b, 0, ovs_datapath_policy, a,
|
|
|
|
|
ARRAY_SIZE(ovs_datapath_policy))) {
|
2011-01-26 15:42:00 -08:00
|
|
|
|
return EINVAL;
|
|
|
|
|
}
|
|
|
|
|
|
2011-01-28 13:55:04 -08:00
|
|
|
|
dp->cmd = genl->cmd;
|
2011-08-18 10:35:40 -07:00
|
|
|
|
dp->dp_ifindex = ovs_header->dp_ifindex;
|
|
|
|
|
dp->name = nl_attr_get_string(a[OVS_DP_ATTR_NAME]);
|
|
|
|
|
if (a[OVS_DP_ATTR_STATS]) {
|
2014-06-13 15:28:29 -07:00
|
|
|
|
dp->stats = nl_attr_get(a[OVS_DP_ATTR_STATS]);
|
2011-01-26 15:42:00 -08:00
|
|
|
|
}
|
2011-01-26 13:41:54 -08:00
|
|
|
|
|
2013-10-21 14:37:34 -07:00
|
|
|
|
if (a[OVS_DP_ATTR_MEGAFLOW_STATS]) {
|
2014-06-13 15:28:29 -07:00
|
|
|
|
dp->megaflow_stats = nl_attr_get(a[OVS_DP_ATTR_MEGAFLOW_STATS]);
|
2013-10-21 14:37:34 -07:00
|
|
|
|
}
|
|
|
|
|
|
2019-12-22 12:16:38 +02:00
|
|
|
|
if (a[OVS_DP_ATTR_USER_FEATURES]) {
|
|
|
|
|
dp->user_features = nl_attr_get_u32(a[OVS_DP_ATTR_USER_FEATURES]);
|
|
|
|
|
}
|
|
|
|
|
|
2011-01-26 15:42:00 -08:00
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2011-01-28 13:55:04 -08:00
|
|
|
|
/* Appends to 'buf' the Generic Netlink message described by 'dp'. */
|
2011-01-26 15:42:00 -08:00
|
|
|
|
static void
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_dp_to_ofpbuf(const struct dpif_netlink_dp *dp, struct ofpbuf *buf)
|
2011-01-26 15:42:00 -08:00
|
|
|
|
{
|
2011-08-18 10:35:40 -07:00
|
|
|
|
struct ovs_header *ovs_header;
|
2011-01-26 15:42:00 -08:00
|
|
|
|
|
2011-08-18 10:35:40 -07:00
|
|
|
|
nl_msg_put_genlmsghdr(buf, 0, ovs_datapath_family,
|
2011-10-22 18:22:18 -07:00
|
|
|
|
NLM_F_REQUEST | NLM_F_ECHO, dp->cmd,
|
|
|
|
|
OVS_DATAPATH_VERSION);
|
2011-01-28 13:55:04 -08:00
|
|
|
|
|
2011-08-18 10:35:40 -07:00
|
|
|
|
ovs_header = ofpbuf_put_uninit(buf, sizeof *ovs_header);
|
|
|
|
|
ovs_header->dp_ifindex = dp->dp_ifindex;
|
2011-01-26 15:42:00 -08:00
|
|
|
|
|
|
|
|
|
if (dp->name) {
|
2011-08-18 10:35:40 -07:00
|
|
|
|
nl_msg_put_string(buf, OVS_DP_ATTR_NAME, dp->name);
|
2011-01-26 15:42:00 -08:00
|
|
|
|
}
|
|
|
|
|
|
2011-10-07 16:41:36 -07:00
|
|
|
|
if (dp->upcall_pid) {
|
|
|
|
|
nl_msg_put_u32(buf, OVS_DP_ATTR_UPCALL_PID, *dp->upcall_pid);
|
|
|
|
|
}
|
2011-09-14 13:05:09 -07:00
|
|
|
|
|
2013-12-19 16:20:42 +01:00
|
|
|
|
if (dp->user_features) {
|
|
|
|
|
nl_msg_put_u32(buf, OVS_DP_ATTR_USER_FEATURES, dp->user_features);
|
|
|
|
|
}
|
|
|
|
|
|
2011-08-18 10:35:40 -07:00
|
|
|
|
/* Skip OVS_DP_ATTR_STATS since we never have a reason to serialize it. */
|
2011-01-26 15:42:00 -08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Clears 'dp' to "empty" values. */
|
2011-05-04 13:58:10 -07:00
|
|
|
|
static void
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_dp_init(struct dpif_netlink_dp *dp)
|
2011-01-26 15:42:00 -08:00
|
|
|
|
{
|
|
|
|
|
memset(dp, 0, sizeof *dp);
|
|
|
|
|
}
|
|
|
|
|
|
2011-01-28 13:55:04 -08:00
|
|
|
|
static void
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_dp_dump_start(struct nl_dump *dump)
|
2011-01-28 13:55:04 -08:00
|
|
|
|
{
|
2014-09-18 04:17:54 -07:00
|
|
|
|
struct dpif_netlink_dp request;
|
2011-01-28 13:55:04 -08:00
|
|
|
|
struct ofpbuf *buf;
|
|
|
|
|
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_dp_init(&request);
|
2011-08-18 10:35:40 -07:00
|
|
|
|
request.cmd = OVS_DP_CMD_GET;
|
2011-01-28 13:55:04 -08:00
|
|
|
|
|
|
|
|
|
buf = ofpbuf_new(1024);
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_dp_to_ofpbuf(&request, buf);
|
2013-04-29 13:57:50 -07:00
|
|
|
|
nl_dump_start(dump, NETLINK_GENERIC, buf);
|
2011-01-28 13:55:04 -08:00
|
|
|
|
ofpbuf_delete(buf);
|
|
|
|
|
}
|
|
|
|
|
|
2011-01-26 15:42:00 -08:00
|
|
|
|
/* Executes 'request' in the kernel datapath. If the command fails, returns a
|
|
|
|
|
* positive errno value. Otherwise, if 'reply' and 'bufp' are null, returns 0
|
|
|
|
|
* without doing anything else. If 'reply' and 'bufp' are nonnull, then the
|
2011-01-28 13:55:04 -08:00
|
|
|
|
* result of the command is expected to be of the same form, which is decoded
|
|
|
|
|
* and stored in '*reply' and '*bufp'. The caller must free '*bufp' when the
|
|
|
|
|
* reply is no longer needed ('reply' will contain pointers into '*bufp'). */
|
2011-05-04 13:58:10 -07:00
|
|
|
|
static int
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_dp_transact(const struct dpif_netlink_dp *request,
|
|
|
|
|
struct dpif_netlink_dp *reply, struct ofpbuf **bufp)
|
2011-01-26 15:42:00 -08:00
|
|
|
|
{
|
2011-01-28 13:55:04 -08:00
|
|
|
|
struct ofpbuf *request_buf;
|
2011-01-26 15:42:00 -08:00
|
|
|
|
int error;
|
|
|
|
|
|
2012-11-06 13:14:55 -08:00
|
|
|
|
ovs_assert((reply != NULL) == (bufp != NULL));
|
2011-01-26 15:42:00 -08:00
|
|
|
|
|
2011-01-28 13:55:04 -08:00
|
|
|
|
request_buf = ofpbuf_new(1024);
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_dp_to_ofpbuf(request, request_buf);
|
2013-04-29 13:57:50 -07:00
|
|
|
|
error = nl_transact(NETLINK_GENERIC, request_buf, bufp);
|
2011-01-28 13:55:04 -08:00
|
|
|
|
ofpbuf_delete(request_buf);
|
2011-01-26 15:42:00 -08:00
|
|
|
|
|
2011-01-28 13:55:04 -08:00
|
|
|
|
if (reply) {
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_dp_init(reply);
|
2011-01-28 13:55:04 -08:00
|
|
|
|
if (!error) {
|
2014-09-18 04:17:54 -07:00
|
|
|
|
error = dpif_netlink_dp_from_ofpbuf(reply, *bufp);
|
2011-01-28 13:55:04 -08:00
|
|
|
|
}
|
2011-01-26 15:42:00 -08:00
|
|
|
|
if (error) {
|
2011-01-28 13:55:04 -08:00
|
|
|
|
ofpbuf_delete(*bufp);
|
|
|
|
|
*bufp = NULL;
|
2011-01-26 15:42:00 -08:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return error;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Obtains information about 'dpif_' and stores it into '*reply' and '*bufp'.
|
|
|
|
|
* The caller must free '*bufp' when the reply is no longer needed ('reply'
|
|
|
|
|
* will contain pointers into '*bufp'). */
|
2011-05-04 13:58:10 -07:00
|
|
|
|
static int
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_dp_get(const struct dpif *dpif_, struct dpif_netlink_dp *reply,
|
|
|
|
|
struct ofpbuf **bufp)
|
2011-01-26 15:42:00 -08:00
|
|
|
|
{
|
2014-09-18 04:17:54 -07:00
|
|
|
|
struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
|
|
|
|
|
struct dpif_netlink_dp request;
|
2011-01-26 15:42:00 -08:00
|
|
|
|
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_dp_init(&request);
|
2011-08-18 10:35:40 -07:00
|
|
|
|
request.cmd = OVS_DP_CMD_GET;
|
2011-01-21 17:01:56 -08:00
|
|
|
|
request.dp_ifindex = dpif->dp_ifindex;
|
2011-01-26 15:42:00 -08:00
|
|
|
|
|
2014-09-18 04:17:54 -07:00
|
|
|
|
return dpif_netlink_dp_transact(&request, reply, bufp);
|
2011-01-26 15:42:00 -08:00
|
|
|
|
}
|
2014-09-18 04:17:54 -07:00
|
|
|
|
|
2011-08-18 10:35:40 -07:00
|
|
|
|
/* Parses the contents of 'buf', which contains a "struct ovs_header" followed
|
2011-01-28 14:00:51 -08:00
|
|
|
|
* by Netlink attributes, into 'flow'. Returns 0 if successful, otherwise a
|
2011-01-26 15:42:00 -08:00
|
|
|
|
* positive errno value.
|
|
|
|
|
*
|
|
|
|
|
* 'flow' will contain pointers into 'buf', so the caller should not free 'buf'
|
|
|
|
|
* while 'flow' is still in use. */
|
|
|
|
|
static int
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_flow_from_ofpbuf(struct dpif_netlink_flow *flow,
|
|
|
|
|
const struct ofpbuf *buf)
|
2011-01-26 15:42:00 -08:00
|
|
|
|
{
|
2014-09-24 16:26:35 +12:00
|
|
|
|
static const struct nl_policy ovs_flow_policy[__OVS_FLOW_ATTR_MAX] = {
|
|
|
|
|
[OVS_FLOW_ATTR_KEY] = { .type = NL_A_NESTED, .optional = true },
|
2013-06-19 07:15:10 +00:00
|
|
|
|
[OVS_FLOW_ATTR_MASK] = { .type = NL_A_NESTED, .optional = true },
|
2011-08-18 10:35:40 -07:00
|
|
|
|
[OVS_FLOW_ATTR_ACTIONS] = { .type = NL_A_NESTED, .optional = true },
|
2011-11-07 09:21:17 -08:00
|
|
|
|
[OVS_FLOW_ATTR_STATS] = { NL_POLICY_FOR(struct ovs_flow_stats),
|
2011-01-26 15:42:00 -08:00
|
|
|
|
.optional = true },
|
2011-08-18 10:35:40 -07:00
|
|
|
|
[OVS_FLOW_ATTR_TCP_FLAGS] = { .type = NL_A_U8, .optional = true },
|
|
|
|
|
[OVS_FLOW_ATTR_USED] = { .type = NL_A_U64, .optional = true },
|
2017-06-14 09:07:45 -07:00
|
|
|
|
[OVS_FLOW_ATTR_UFID] = { .type = NL_A_U128, .optional = true },
|
2011-08-18 10:35:40 -07:00
|
|
|
|
/* The kernel never uses OVS_FLOW_ATTR_CLEAR. */
|
2014-09-12 11:20:13 -07:00
|
|
|
|
/* The kernel never uses OVS_FLOW_ATTR_PROBE. */
|
2014-09-24 16:26:35 +12:00
|
|
|
|
/* The kernel never uses OVS_FLOW_ATTR_UFID_FLAGS. */
|
2011-01-26 15:42:00 -08:00
|
|
|
|
};
|
|
|
|
|
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_flow_init(flow);
|
2011-01-26 15:42:00 -08:00
|
|
|
|
|
2016-02-18 15:13:09 -08:00
|
|
|
|
struct ofpbuf b = ofpbuf_const_initializer(buf->data, buf->size);
|
|
|
|
|
struct nlmsghdr *nlmsg = ofpbuf_try_pull(&b, sizeof *nlmsg);
|
|
|
|
|
struct genlmsghdr *genl = ofpbuf_try_pull(&b, sizeof *genl);
|
|
|
|
|
struct ovs_header *ovs_header = ofpbuf_try_pull(&b, sizeof *ovs_header);
|
|
|
|
|
|
|
|
|
|
struct nlattr *a[ARRAY_SIZE(ovs_flow_policy)];
|
2011-08-18 10:35:40 -07:00
|
|
|
|
if (!nlmsg || !genl || !ovs_header
|
|
|
|
|
|| nlmsg->nlmsg_type != ovs_flow_family
|
|
|
|
|
|| !nl_policy_parse(&b, 0, ovs_flow_policy, a,
|
|
|
|
|
ARRAY_SIZE(ovs_flow_policy))) {
|
2011-01-26 15:42:00 -08:00
|
|
|
|
return EINVAL;
|
|
|
|
|
}
|
2014-09-24 16:26:35 +12:00
|
|
|
|
if (!a[OVS_FLOW_ATTR_KEY] && !a[OVS_FLOW_ATTR_UFID]) {
|
|
|
|
|
return EINVAL;
|
|
|
|
|
}
|
2011-01-26 15:42:00 -08:00
|
|
|
|
|
2011-01-28 14:00:51 -08:00
|
|
|
|
flow->nlmsg_flags = nlmsg->nlmsg_flags;
|
2011-08-18 10:35:40 -07:00
|
|
|
|
flow->dp_ifindex = ovs_header->dp_ifindex;
|
2014-09-24 16:26:35 +12:00
|
|
|
|
if (a[OVS_FLOW_ATTR_KEY]) {
|
|
|
|
|
flow->key = nl_attr_get(a[OVS_FLOW_ATTR_KEY]);
|
|
|
|
|
flow->key_len = nl_attr_get_size(a[OVS_FLOW_ATTR_KEY]);
|
|
|
|
|
}
|
2013-06-19 07:15:10 +00:00
|
|
|
|
|
2014-09-24 16:26:35 +12:00
|
|
|
|
if (a[OVS_FLOW_ATTR_UFID]) {
|
2017-06-14 09:07:45 -07:00
|
|
|
|
flow->ufid = nl_attr_get_u128(a[OVS_FLOW_ATTR_UFID]);
|
2014-09-24 16:26:35 +12:00
|
|
|
|
flow->ufid_present = true;
|
|
|
|
|
}
|
2013-06-19 07:15:10 +00:00
|
|
|
|
if (a[OVS_FLOW_ATTR_MASK]) {
|
|
|
|
|
flow->mask = nl_attr_get(a[OVS_FLOW_ATTR_MASK]);
|
|
|
|
|
flow->mask_len = nl_attr_get_size(a[OVS_FLOW_ATTR_MASK]);
|
|
|
|
|
}
|
2011-08-18 10:35:40 -07:00
|
|
|
|
if (a[OVS_FLOW_ATTR_ACTIONS]) {
|
|
|
|
|
flow->actions = nl_attr_get(a[OVS_FLOW_ATTR_ACTIONS]);
|
|
|
|
|
flow->actions_len = nl_attr_get_size(a[OVS_FLOW_ATTR_ACTIONS]);
|
2011-01-26 15:42:00 -08:00
|
|
|
|
}
|
2011-08-18 10:35:40 -07:00
|
|
|
|
if (a[OVS_FLOW_ATTR_STATS]) {
|
|
|
|
|
flow->stats = nl_attr_get(a[OVS_FLOW_ATTR_STATS]);
|
2011-01-26 15:42:00 -08:00
|
|
|
|
}
|
2011-08-18 10:35:40 -07:00
|
|
|
|
if (a[OVS_FLOW_ATTR_TCP_FLAGS]) {
|
|
|
|
|
flow->tcp_flags = nl_attr_get(a[OVS_FLOW_ATTR_TCP_FLAGS]);
|
2011-01-26 15:42:00 -08:00
|
|
|
|
}
|
2011-08-18 10:35:40 -07:00
|
|
|
|
if (a[OVS_FLOW_ATTR_USED]) {
|
|
|
|
|
flow->used = nl_attr_get(a[OVS_FLOW_ATTR_USED]);
|
2011-01-29 09:41:59 -08:00
|
|
|
|
}
|
2011-01-26 15:42:00 -08:00
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2017-06-02 16:16:17 +00:00
|
|
|
|
|
|
|
|
|
/*
|
2017-07-18 15:32:44 -07:00
|
|
|
|
* If PACKET_TYPE attribute is present in 'data', it filters PACKET_TYPE out.
|
|
|
|
|
* If the flow is not Ethernet, the OVS_KEY_ATTR_PACKET_TYPE is converted to
|
|
|
|
|
* OVS_KEY_ATTR_ETHERTYPE. Puts 'data' to 'buf'.
|
2017-06-02 16:16:17 +00:00
|
|
|
|
*/
|
|
|
|
|
static void
|
|
|
|
|
put_exclude_packet_type(struct ofpbuf *buf, uint16_t type,
|
|
|
|
|
const struct nlattr *data, uint16_t data_len)
|
|
|
|
|
{
|
|
|
|
|
const struct nlattr *packet_type;
|
|
|
|
|
|
|
|
|
|
packet_type = nl_attr_find__(data, data_len, OVS_KEY_ATTR_PACKET_TYPE);
|
|
|
|
|
|
|
|
|
|
if (packet_type) {
|
|
|
|
|
/* exclude PACKET_TYPE Netlink attribute. */
|
|
|
|
|
ovs_assert(NLA_ALIGN(packet_type->nla_len) == NL_A_U32_SIZE);
|
|
|
|
|
size_t packet_type_len = NL_A_U32_SIZE;
|
|
|
|
|
size_t first_chunk_size = (uint8_t *)packet_type - (uint8_t *)data;
|
|
|
|
|
size_t second_chunk_size = data_len - first_chunk_size
|
|
|
|
|
- packet_type_len;
|
|
|
|
|
struct nlattr *next_attr = nl_attr_next(packet_type);
|
2017-07-18 15:32:43 -07:00
|
|
|
|
size_t ofs;
|
2017-06-02 16:16:17 +00:00
|
|
|
|
|
2017-07-18 15:32:43 -07:00
|
|
|
|
ofs = nl_msg_start_nested(buf, type);
|
|
|
|
|
nl_msg_put(buf, data, first_chunk_size);
|
|
|
|
|
nl_msg_put(buf, next_attr, second_chunk_size);
|
2017-07-18 15:32:44 -07:00
|
|
|
|
if (!nl_attr_find__(data, data_len, OVS_KEY_ATTR_ETHERNET)) {
|
|
|
|
|
ovs_be16 pt = pt_ns_type_be(nl_attr_get_be32(packet_type));
|
|
|
|
|
const struct nlattr *nla;
|
|
|
|
|
|
2019-01-31 15:10:00 -08:00
|
|
|
|
nla = nl_attr_find(buf, ofs + NLA_HDRLEN, OVS_KEY_ATTR_ETHERTYPE);
|
2017-07-18 15:32:44 -07:00
|
|
|
|
if (nla) {
|
|
|
|
|
ovs_be16 *ethertype;
|
|
|
|
|
|
|
|
|
|
ethertype = CONST_CAST(ovs_be16 *, nl_attr_get(nla));
|
|
|
|
|
*ethertype = pt;
|
|
|
|
|
} else {
|
|
|
|
|
nl_msg_put_be16(buf, OVS_KEY_ATTR_ETHERTYPE, pt);
|
|
|
|
|
}
|
|
|
|
|
}
|
2017-07-18 15:32:43 -07:00
|
|
|
|
nl_msg_end_nested(buf, ofs);
|
2017-06-02 16:16:17 +00:00
|
|
|
|
} else {
|
|
|
|
|
nl_msg_put_unspec(buf, type, data, data_len);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2011-08-18 10:35:40 -07:00
|
|
|
|
/* Appends to 'buf' (which must initially be empty) a "struct ovs_header"
|
2011-01-26 15:42:00 -08:00
|
|
|
|
* followed by Netlink attributes corresponding to 'flow'. */
|
|
|
|
|
static void
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_flow_to_ofpbuf(const struct dpif_netlink_flow *flow,
|
|
|
|
|
struct ofpbuf *buf)
|
2011-01-26 15:42:00 -08:00
|
|
|
|
{
|
2011-08-18 10:35:40 -07:00
|
|
|
|
struct ovs_header *ovs_header;
|
2011-01-26 15:42:00 -08:00
|
|
|
|
|
2011-08-18 10:35:40 -07:00
|
|
|
|
nl_msg_put_genlmsghdr(buf, 0, ovs_flow_family,
|
2011-09-27 16:07:23 -07:00
|
|
|
|
NLM_F_REQUEST | flow->nlmsg_flags,
|
2011-10-22 18:22:18 -07:00
|
|
|
|
flow->cmd, OVS_FLOW_VERSION);
|
2011-01-28 14:00:51 -08:00
|
|
|
|
|
2011-08-18 10:35:40 -07:00
|
|
|
|
ovs_header = ofpbuf_put_uninit(buf, sizeof *ovs_header);
|
|
|
|
|
ovs_header->dp_ifindex = flow->dp_ifindex;
|
2011-01-26 15:42:00 -08:00
|
|
|
|
|
2014-09-24 16:26:35 +12:00
|
|
|
|
if (flow->ufid_present) {
|
2017-06-14 09:07:45 -07:00
|
|
|
|
nl_msg_put_u128(buf, OVS_FLOW_ATTR_UFID, flow->ufid);
|
2014-09-24 16:26:35 +12:00
|
|
|
|
}
|
|
|
|
|
if (flow->ufid_terse) {
|
|
|
|
|
nl_msg_put_u32(buf, OVS_FLOW_ATTR_UFID_FLAGS,
|
|
|
|
|
OVS_UFID_F_OMIT_KEY | OVS_UFID_F_OMIT_MASK
|
|
|
|
|
| OVS_UFID_F_OMIT_ACTIONS);
|
|
|
|
|
}
|
2014-10-06 11:14:08 +13:00
|
|
|
|
if (!flow->ufid_terse || !flow->ufid_present) {
|
|
|
|
|
if (flow->key_len) {
|
2017-06-02 16:16:17 +00:00
|
|
|
|
put_exclude_packet_type(buf, OVS_FLOW_ATTR_KEY, flow->key,
|
|
|
|
|
flow->key_len);
|
2014-10-06 11:14:08 +13:00
|
|
|
|
}
|
|
|
|
|
if (flow->mask_len) {
|
2017-06-02 16:16:17 +00:00
|
|
|
|
put_exclude_packet_type(buf, OVS_FLOW_ATTR_MASK, flow->mask,
|
|
|
|
|
flow->mask_len);
|
2014-10-06 11:14:08 +13:00
|
|
|
|
}
|
|
|
|
|
if (flow->actions || flow->actions_len) {
|
|
|
|
|
nl_msg_put_unspec(buf, OVS_FLOW_ATTR_ACTIONS,
|
|
|
|
|
flow->actions, flow->actions_len);
|
|
|
|
|
}
|
2011-01-26 15:42:00 -08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* We never need to send these to the kernel. */
|
2012-11-06 13:14:55 -08:00
|
|
|
|
ovs_assert(!flow->stats);
|
|
|
|
|
ovs_assert(!flow->tcp_flags);
|
|
|
|
|
ovs_assert(!flow->used);
|
2011-01-26 15:42:00 -08:00
|
|
|
|
|
|
|
|
|
if (flow->clear) {
|
2011-08-18 10:35:40 -07:00
|
|
|
|
nl_msg_put_flag(buf, OVS_FLOW_ATTR_CLEAR);
|
2011-01-26 15:42:00 -08:00
|
|
|
|
}
|
2014-09-12 11:20:13 -07:00
|
|
|
|
if (flow->probe) {
|
|
|
|
|
nl_msg_put_flag(buf, OVS_FLOW_ATTR_PROBE);
|
|
|
|
|
}
|
2011-01-26 15:42:00 -08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Clears 'flow' to "empty" values. */
|
2011-05-04 13:58:10 -07:00
|
|
|
|
static void
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_flow_init(struct dpif_netlink_flow *flow)
|
2011-01-26 15:42:00 -08:00
|
|
|
|
{
|
|
|
|
|
memset(flow, 0, sizeof *flow);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Executes 'request' in the kernel datapath. If the command fails, returns a
|
|
|
|
|
* positive errno value. Otherwise, if 'reply' and 'bufp' are null, returns 0
|
|
|
|
|
* without doing anything else. If 'reply' and 'bufp' are nonnull, then the
|
2011-01-28 14:00:51 -08:00
|
|
|
|
* result of the command is expected to be a flow also, which is decoded and
|
|
|
|
|
* stored in '*reply' and '*bufp'. The caller must free '*bufp' when the reply
|
|
|
|
|
* is no longer needed ('reply' will contain pointers into '*bufp'). */
|
2011-05-04 13:58:10 -07:00
|
|
|
|
static int
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_flow_transact(struct dpif_netlink_flow *request,
|
|
|
|
|
struct dpif_netlink_flow *reply,
|
|
|
|
|
struct ofpbuf **bufp)
|
2011-01-26 15:42:00 -08:00
|
|
|
|
{
|
2011-01-28 14:00:51 -08:00
|
|
|
|
struct ofpbuf *request_buf;
|
2011-01-26 15:42:00 -08:00
|
|
|
|
int error;
|
|
|
|
|
|
2012-11-06 13:14:55 -08:00
|
|
|
|
ovs_assert((reply != NULL) == (bufp != NULL));
|
2011-01-26 15:42:00 -08:00
|
|
|
|
|
2011-09-27 16:07:23 -07:00
|
|
|
|
if (reply) {
|
|
|
|
|
request->nlmsg_flags |= NLM_F_ECHO;
|
|
|
|
|
}
|
|
|
|
|
|
2011-01-28 14:00:51 -08:00
|
|
|
|
request_buf = ofpbuf_new(1024);
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_flow_to_ofpbuf(request, request_buf);
|
2013-04-29 13:57:50 -07:00
|
|
|
|
error = nl_transact(NETLINK_GENERIC, request_buf, bufp);
|
2011-01-28 14:00:51 -08:00
|
|
|
|
ofpbuf_delete(request_buf);
|
2011-01-26 15:42:00 -08:00
|
|
|
|
|
2011-01-28 14:00:51 -08:00
|
|
|
|
if (reply) {
|
|
|
|
|
if (!error) {
|
2014-09-18 04:17:54 -07:00
|
|
|
|
error = dpif_netlink_flow_from_ofpbuf(reply, *bufp);
|
2011-01-28 14:00:51 -08:00
|
|
|
|
}
|
2011-01-26 15:42:00 -08:00
|
|
|
|
if (error) {
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_flow_init(reply);
|
2011-01-28 14:00:51 -08:00
|
|
|
|
ofpbuf_delete(*bufp);
|
|
|
|
|
*bufp = NULL;
|
2011-01-26 15:42:00 -08:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return error;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
2014-09-18 04:17:54 -07:00
|
|
|
|
dpif_netlink_flow_get_stats(const struct dpif_netlink_flow *flow,
|
|
|
|
|
struct dpif_flow_stats *stats)
|
2011-01-26 15:42:00 -08:00
|
|
|
|
{
|
|
|
|
|
if (flow->stats) {
|
2014-06-13 15:28:29 -07:00
|
|
|
|
stats->n_packets = get_32aligned_u64(&flow->stats->n_packets);
|
|
|
|
|
stats->n_bytes = get_32aligned_u64(&flow->stats->n_bytes);
|
2011-01-26 15:42:00 -08:00
|
|
|
|
} else {
|
|
|
|
|
stats->n_packets = 0;
|
|
|
|
|
stats->n_bytes = 0;
|
|
|
|
|
}
|
2011-10-04 15:25:14 -07:00
|
|
|
|
stats->used = flow->used ? get_32aligned_u64(flow->used) : 0;
|
2011-01-26 15:42:00 -08:00
|
|
|
|
stats->tcp_flags = flow->tcp_flags ? *flow->tcp_flags : 0;
|
|
|
|
|
}
|
2016-07-01 13:49:34 -07:00
|
|
|
|
|
2012-06-01 17:40:31 -04:00
|
|
|
|
/* Logs information about a packet that was recently lost in 'ch' (in
|
|
|
|
|
* 'dpif_'). */
|
|
|
|
|
static void
|
2014-09-18 04:17:54 -07:00
|
|
|
|
report_loss(struct dpif_netlink *dpif, struct dpif_channel *ch, uint32_t ch_idx,
|
2014-02-26 10:10:29 -08:00
|
|
|
|
uint32_t handler_id)
|
2012-06-01 17:40:31 -04:00
|
|
|
|
{
|
|
|
|
|
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 5);
|
|
|
|
|
struct ds s;
|
|
|
|
|
|
2013-01-25 14:29:41 -08:00
|
|
|
|
if (VLOG_DROP_WARN(&rl)) {
|
2012-06-01 17:40:31 -04:00
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ds_init(&s);
|
|
|
|
|
if (ch->last_poll != LLONG_MIN) {
|
|
|
|
|
ds_put_format(&s, " (last polled %lld ms ago)",
|
|
|
|
|
time_msec() - ch->last_poll);
|
|
|
|
|
}
|
|
|
|
|
|
2014-02-26 10:10:29 -08:00
|
|
|
|
VLOG_WARN("%s: lost packet on port channel %u of handler %u",
|
2014-04-17 16:33:17 -07:00
|
|
|
|
dpif_name(&dpif->dpif), ch_idx, handler_id);
|
2012-06-01 17:40:31 -04:00
|
|
|
|
ds_destroy(&s);
|
|
|
|
|
}
|