2009-06-19 14:09:39 -07:00
|
|
|
|
/*
|
2013-12-24 16:08:57 -08:00
|
|
|
|
* Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014 Nicira, Inc.
|
2009-06-19 14:09:39 -07:00
|
|
|
|
*
|
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
|
* You may obtain a copy of the License at:
|
|
|
|
|
*
|
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
*
|
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
|
* limitations under the License.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
#include <config.h>
|
netdev-dpdk: Fix race condition with DPDK mempools in non pmd threads
DPDK mempools rely on rte_lcore_id() to implement a thread-local cache.
Our non pmd threads had rte_lcore_id() == 0. This allowed concurrent access to
the "thread-local" cache, causing crashes.
This commit resolves the issue with the following changes:
- Every non pmd thread has the same lcore_id (0, for management reasons), which
is not shared with any pmd thread (lcore_id for pmd threads now start from 1)
- DPDK mbufs must be allocated/freed in pmd threads. When there is the need to
use mempools in non pmd threads, like in dpdk_do_tx_copy(), a mutex must be
held.
- The previous change does not allow us anymore to pass DPDK mbufs to handler
threads: therefore this commit partially revert 143859ec63d45e. Now packets
are copied for upcall processing. We can remove the extra memcpy by
processing upcalls in the pmd thread itself.
With the introduction of the extra locking, the packet throughput will be lower
in the following cases:
- When using internal (tap) devices with DPDK devices on the same datapath.
Anyway, to support internal devices efficiently, we needed DPDK KNI devices,
which will be proper pmd devices and will not need this locking.
- When packets are processed in the slow path by non pmd threads. This overhead
can be avoided by handling the upcalls directly in pmd threads (a change that
has already been proposed by Ryan Wilson)
Also, the following two fixes have been introduced:
- In dpdk_free_buf() use rte_pktmbuf_free_seg() instead of rte_mempool_put().
This allows OVS to run properly with CONFIG_RTE_LIBRTE_MBUF_DEBUG DPDK option
- Do not bulk free mbufs in a transmission queue. They may belong to different
mempools
Signed-off-by: Daniele Di Proietto <ddiproietto@vmware.com>
Acked-by: Pravin B Shelar <pshelar@nicira.com>
2014-07-17 14:29:36 -07:00
|
|
|
|
#include "dpif-netdev.h"
|
2009-06-19 14:09:39 -07:00
|
|
|
|
|
|
|
|
|
#include <ctype.h>
|
|
|
|
|
#include <errno.h>
|
|
|
|
|
#include <fcntl.h>
|
|
|
|
|
#include <inttypes.h>
|
|
|
|
|
#include <netinet/in.h>
|
2010-05-26 10:05:19 -07:00
|
|
|
|
#include <sys/socket.h>
|
2010-02-12 12:51:36 -08:00
|
|
|
|
#include <net/if.h>
|
2010-12-10 10:40:58 -08:00
|
|
|
|
#include <stdint.h>
|
2009-06-19 14:09:39 -07:00
|
|
|
|
#include <stdlib.h>
|
|
|
|
|
#include <string.h>
|
|
|
|
|
#include <sys/ioctl.h>
|
|
|
|
|
#include <sys/stat.h>
|
|
|
|
|
#include <unistd.h>
|
|
|
|
|
|
2013-11-04 06:23:54 -08:00
|
|
|
|
#include "classifier.h"
|
2014-05-20 13:21:09 -07:00
|
|
|
|
#include "cmap.h"
|
2009-06-19 14:09:39 -07:00
|
|
|
|
#include "csum.h"
|
2010-11-29 12:21:08 -08:00
|
|
|
|
#include "dpif.h"
|
2009-06-19 14:09:39 -07:00
|
|
|
|
#include "dpif-provider.h"
|
2010-11-29 12:21:08 -08:00
|
|
|
|
#include "dummy.h"
|
2011-01-23 18:44:44 -08:00
|
|
|
|
#include "dynamic-string.h"
|
lib/classifier: Lockless lookups.
Now that all the relevant classifier structures use RCU and internal
mutual exclusion for modifications, we can remove the fat-rwlock and
thus make the classifier lookups lockless.
As the readers are operating concurrently with the writers, a
concurrent reader may or may not see a new rule being added by a
writer, depending on how the concurrent events overlap with each
other. Overall, this is no different from the former locked behavior,
but there the visibility of the new rule only depended on the timing
of the locking functions.
A new rule is first added to the segment indices, so the readers may
find the rule in the indices before the rule is visible in the
subtables 'rules' map. This may result in us losing the opportunity
to quit lookups earlier, resulting in sub-optimal wildcarding. This
will be fixed by forthcoming revalidation always scheduled after flow
table changes.
Similar behavior may happen due to us removing the overlapping rule
(if any) from the indices only after the corresponding new rule has
been added.
The subtable's max priority is updated only after a rule is inserted
to the maps, so the concurrent readers may not see the rule, as the
updated priority ordered subtable list will only be visible after the
subtable's max priority is updated.
Similarly, the classifier's partitions are updated by the caller after
the rule is inserted to the maps, so the readers may keep skipping the
subtable until they see the updated partitions.
Signed-off-by: Jarno Rajahalme <jrajahalme@nicira.com>
Acked-by: YAMAMOTO Takashi <yamamoto@valinux.co.jp>
2014-07-11 02:29:08 -07:00
|
|
|
|
#include "fat-rwlock.h"
|
2009-06-19 14:09:39 -07:00
|
|
|
|
#include "flow.h"
|
2014-07-04 06:38:47 -07:00
|
|
|
|
#include "cmap.h"
|
2013-12-27 17:00:30 -08:00
|
|
|
|
#include "latch.h"
|
2009-06-19 14:09:39 -07:00
|
|
|
|
#include "list.h"
|
2013-12-11 11:07:01 -08:00
|
|
|
|
#include "meta-flow.h"
|
2009-06-19 14:09:39 -07:00
|
|
|
|
#include "netdev.h"
|
2014-03-20 22:07:44 -07:00
|
|
|
|
#include "netdev-dpdk.h"
|
2012-12-16 17:08:50 -08:00
|
|
|
|
#include "netdev-vport.h"
|
2010-12-10 10:40:58 -08:00
|
|
|
|
#include "netlink.h"
|
2013-05-29 15:06:38 +09:00
|
|
|
|
#include "odp-execute.h"
|
2009-06-19 14:09:39 -07:00
|
|
|
|
#include "odp-util.h"
|
|
|
|
|
#include "ofp-print.h"
|
|
|
|
|
#include "ofpbuf.h"
|
2014-03-05 22:41:30 -08:00
|
|
|
|
#include "ovs-rcu.h"
|
2014-06-23 11:43:57 -07:00
|
|
|
|
#include "packet-dpif.h"
|
2009-06-19 14:09:39 -07:00
|
|
|
|
#include "packets.h"
|
|
|
|
|
#include "poll-loop.h"
|
2011-10-11 11:07:14 -07:00
|
|
|
|
#include "random.h"
|
2013-08-07 13:29:54 -07:00
|
|
|
|
#include "seq.h"
|
2010-11-24 12:35:22 -08:00
|
|
|
|
#include "shash.h"
|
2012-01-19 10:24:46 -08:00
|
|
|
|
#include "sset.h"
|
2009-06-19 14:09:39 -07:00
|
|
|
|
#include "timeval.h"
|
2013-07-29 15:11:49 -07:00
|
|
|
|
#include "unixctl.h"
|
2009-06-19 14:09:39 -07:00
|
|
|
|
#include "util.h"
|
|
|
|
|
#include "vlog.h"
|
2010-07-16 11:02:49 -07:00
|
|
|
|
|
2010-10-19 14:47:01 -07:00
|
|
|
|
VLOG_DEFINE_THIS_MODULE(dpif_netdev);
|
2009-06-19 14:09:39 -07:00
|
|
|
|
|
2013-11-04 06:23:54 -08:00
|
|
|
|
/* By default, choose a priority in the middle. */
|
|
|
|
|
#define NETDEV_RULE_PRIORITY 0x8000
|
|
|
|
|
|
2014-06-23 12:36:11 -07:00
|
|
|
|
#define FLOW_DUMP_MAX_BATCH 50
|
2014-03-05 15:27:31 -08:00
|
|
|
|
/* Use per thread recirc_depth to prevent recirculation loop. */
|
|
|
|
|
#define MAX_RECIRC_DEPTH 5
|
|
|
|
|
DEFINE_STATIC_PER_THREAD_DATA(uint32_t, recirc_depth, 0)
|
2014-03-20 10:57:41 -07:00
|
|
|
|
|
2009-06-19 14:09:39 -07:00
|
|
|
|
/* Configuration parameters. */
|
|
|
|
|
enum { MAX_FLOWS = 65536 }; /* Maximum number of flows in flow table. */
|
|
|
|
|
|
2014-01-08 15:58:11 -08:00
|
|
|
|
/* Protects against changes to 'dp_netdevs'. */
|
|
|
|
|
static struct ovs_mutex dp_netdev_mutex = OVS_MUTEX_INITIALIZER;
|
|
|
|
|
|
|
|
|
|
/* Contains all 'struct dp_netdev's. */
|
|
|
|
|
static struct shash dp_netdevs OVS_GUARDED_BY(dp_netdev_mutex)
|
|
|
|
|
= SHASH_INITIALIZER(&dp_netdevs);
|
|
|
|
|
|
2014-07-26 15:39:58 -07:00
|
|
|
|
static struct vlog_rate_limit upcall_rl = VLOG_RATE_LIMIT_INIT(600, 600);
|
2014-07-26 06:51:55 +00:00
|
|
|
|
|
2014-01-08 15:58:11 -08:00
|
|
|
|
/* Datapath based on the network device interface from netdev.h.
|
|
|
|
|
*
|
|
|
|
|
*
|
|
|
|
|
* Thread-safety
|
|
|
|
|
* =============
|
|
|
|
|
*
|
|
|
|
|
* Some members, marked 'const', are immutable. Accessing other members
|
|
|
|
|
* requires synchronization, as noted in more detail below.
|
|
|
|
|
*
|
|
|
|
|
* Acquisition order is, from outermost to innermost:
|
|
|
|
|
*
|
|
|
|
|
* dp_netdev_mutex (global)
|
2014-05-20 13:21:09 -07:00
|
|
|
|
* port_mutex
|
2014-01-08 15:58:11 -08:00
|
|
|
|
* flow_mutex
|
|
|
|
|
*/
|
2009-06-19 14:09:39 -07:00
|
|
|
|
struct dp_netdev {
|
2014-01-08 15:58:11 -08:00
|
|
|
|
const struct dpif_class *const class;
|
|
|
|
|
const char *const name;
|
2014-07-26 06:51:55 +00:00
|
|
|
|
struct dpif *dpif;
|
2013-12-27 19:41:10 -08:00
|
|
|
|
struct ovs_refcount ref_cnt;
|
|
|
|
|
atomic_flag destroyed;
|
2009-06-19 14:09:39 -07:00
|
|
|
|
|
2014-01-08 15:58:11 -08:00
|
|
|
|
/* Flows.
|
|
|
|
|
*
|
lib/classifier: Lockless lookups.
Now that all the relevant classifier structures use RCU and internal
mutual exclusion for modifications, we can remove the fat-rwlock and
thus make the classifier lookups lockless.
As the readers are operating concurrently with the writers, a
concurrent reader may or may not see a new rule being added by a
writer, depending on how the concurrent events overlap with each
other. Overall, this is no different from the former locked behavior,
but there the visibility of the new rule only depended on the timing
of the locking functions.
A new rule is first added to the segment indices, so the readers may
find the rule in the indices before the rule is visible in the
subtables 'rules' map. This may result in us losing the opportunity
to quit lookups earlier, resulting in sub-optimal wildcarding. This
will be fixed by forthcoming revalidation always scheduled after flow
table changes.
Similar behavior may happen due to us removing the overlapping rule
(if any) from the indices only after the corresponding new rule has
been added.
The subtable's max priority is updated only after a rule is inserted
to the maps, so the concurrent readers may not see the rule, as the
updated priority ordered subtable list will only be visible after the
subtable's max priority is updated.
Similarly, the classifier's partitions are updated by the caller after
the rule is inserted to the maps, so the readers may keep skipping the
subtable until they see the updated partitions.
Signed-off-by: Jarno Rajahalme <jrajahalme@nicira.com>
Acked-by: YAMAMOTO Takashi <yamamoto@valinux.co.jp>
2014-07-11 02:29:08 -07:00
|
|
|
|
* Writers of 'flow_table' must take the 'flow_mutex'. Corresponding
|
|
|
|
|
* changes to 'cls' must be made while still holding the 'flow_mutex'.
|
2014-01-08 15:58:11 -08:00
|
|
|
|
*/
|
|
|
|
|
struct ovs_mutex flow_mutex;
|
lib/classifier: Lockless lookups.
Now that all the relevant classifier structures use RCU and internal
mutual exclusion for modifications, we can remove the fat-rwlock and
thus make the classifier lookups lockless.
As the readers are operating concurrently with the writers, a
concurrent reader may or may not see a new rule being added by a
writer, depending on how the concurrent events overlap with each
other. Overall, this is no different from the former locked behavior,
but there the visibility of the new rule only depended on the timing
of the locking functions.
A new rule is first added to the segment indices, so the readers may
find the rule in the indices before the rule is visible in the
subtables 'rules' map. This may result in us losing the opportunity
to quit lookups earlier, resulting in sub-optimal wildcarding. This
will be fixed by forthcoming revalidation always scheduled after flow
table changes.
Similar behavior may happen due to us removing the overlapping rule
(if any) from the indices only after the corresponding new rule has
been added.
The subtable's max priority is updated only after a rule is inserted
to the maps, so the concurrent readers may not see the rule, as the
updated priority ordered subtable list will only be visible after the
subtable's max priority is updated.
Similarly, the classifier's partitions are updated by the caller after
the rule is inserted to the maps, so the readers may keep skipping the
subtable until they see the updated partitions.
Signed-off-by: Jarno Rajahalme <jrajahalme@nicira.com>
Acked-by: YAMAMOTO Takashi <yamamoto@valinux.co.jp>
2014-07-11 02:29:08 -07:00
|
|
|
|
struct classifier cls;
|
2014-07-04 06:38:47 -07:00
|
|
|
|
struct cmap flow_table OVS_GUARDED; /* Flow table. */
|
2014-01-08 15:58:11 -08:00
|
|
|
|
|
|
|
|
|
/* Statistics.
|
|
|
|
|
*
|
2014-03-19 07:47:12 -07:00
|
|
|
|
* ovsthread_stats is internally synchronized. */
|
|
|
|
|
struct ovsthread_stats stats; /* Contains 'struct dp_netdev_stats *'. */
|
2009-06-19 14:09:39 -07:00
|
|
|
|
|
2014-01-08 15:58:11 -08:00
|
|
|
|
/* Ports.
|
|
|
|
|
*
|
2014-05-20 13:21:09 -07:00
|
|
|
|
* Protected by RCU. Take the mutex to add or remove ports. */
|
|
|
|
|
struct ovs_mutex port_mutex;
|
|
|
|
|
struct cmap ports;
|
2013-08-07 13:29:54 -07:00
|
|
|
|
struct seq *port_seq; /* Incremented whenever a port changes. */
|
2013-12-27 17:00:30 -08:00
|
|
|
|
|
2014-07-26 06:51:55 +00:00
|
|
|
|
/* Protects access to ofproto-dpif-upcall interface during revalidator
|
|
|
|
|
* thread synchronization. */
|
|
|
|
|
struct fat_rwlock upcall_rwlock;
|
2014-07-26 15:39:58 -07:00
|
|
|
|
upcall_callback *upcall_cb; /* Callback function for executing upcalls. */
|
|
|
|
|
void *upcall_aux;
|
2014-07-26 06:51:55 +00:00
|
|
|
|
|
2013-12-27 17:00:30 -08:00
|
|
|
|
/* Forwarding threads. */
|
|
|
|
|
struct latch exit_latch;
|
2014-03-20 10:57:41 -07:00
|
|
|
|
struct pmd_thread *pmd_threads;
|
|
|
|
|
size_t n_pmd_threads;
|
|
|
|
|
int pmd_count;
|
2009-06-19 14:09:39 -07:00
|
|
|
|
};
|
|
|
|
|
|
2014-01-08 15:58:11 -08:00
|
|
|
|
static struct dp_netdev_port *dp_netdev_lookup_port(const struct dp_netdev *dp,
|
2014-05-20 13:21:09 -07:00
|
|
|
|
odp_port_t);
|
2013-12-24 16:08:57 -08:00
|
|
|
|
|
2014-03-19 07:47:12 -07:00
|
|
|
|
enum dp_stat_type {
|
|
|
|
|
DP_STAT_HIT, /* Packets that matched in the flow table. */
|
|
|
|
|
DP_STAT_MISS, /* Packets that did not match. */
|
|
|
|
|
DP_STAT_LOST, /* Packets not passed up to the client. */
|
|
|
|
|
DP_N_STATS
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
/* Contained by struct dp_netdev's 'stats' member. */
|
|
|
|
|
struct dp_netdev_stats {
|
|
|
|
|
struct ovs_mutex mutex; /* Protects 'n'. */
|
|
|
|
|
|
|
|
|
|
/* Indexed by DP_STAT_*, protected by 'mutex'. */
|
|
|
|
|
unsigned long long int n[DP_N_STATS] OVS_GUARDED;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
2009-06-19 14:09:39 -07:00
|
|
|
|
/* A port in a netdev-based datapath. */
|
|
|
|
|
struct dp_netdev_port {
|
2014-05-20 13:21:09 -07:00
|
|
|
|
struct cmap_node node; /* Node in dp_netdev's 'ports'. */
|
2013-12-24 16:08:57 -08:00
|
|
|
|
odp_port_t port_no;
|
2009-06-19 14:09:39 -07:00
|
|
|
|
struct netdev *netdev;
|
2013-05-10 08:55:25 -07:00
|
|
|
|
struct netdev_saved_flags *sf;
|
2014-03-20 20:52:06 -07:00
|
|
|
|
struct netdev_rxq **rxq;
|
2014-03-20 10:57:19 -07:00
|
|
|
|
struct ovs_refcount ref_cnt;
|
2012-01-19 10:24:46 -08:00
|
|
|
|
char *type; /* Port type as requested by user. */
|
2009-06-19 14:09:39 -07:00
|
|
|
|
};
|
|
|
|
|
|
2014-06-23 11:43:59 -07:00
|
|
|
|
|
|
|
|
|
/* Stores a miniflow */
|
|
|
|
|
|
|
|
|
|
/* There are fields in the flow structure that we never use. Therefore we can
|
|
|
|
|
* save a few words of memory */
|
2014-07-22 17:06:23 -07:00
|
|
|
|
#define NETDEV_KEY_BUF_SIZE_U32 (FLOW_U32S - MINI_N_INLINE \
|
2014-06-23 11:43:59 -07:00
|
|
|
|
- FLOW_U32_SIZE(regs) \
|
|
|
|
|
- FLOW_U32_SIZE(metadata) \
|
|
|
|
|
)
|
|
|
|
|
struct netdev_flow_key {
|
|
|
|
|
struct miniflow flow;
|
|
|
|
|
uint32_t buf[NETDEV_KEY_BUF_SIZE_U32];
|
2014-06-23 17:08:24 -07:00
|
|
|
|
};
|
2014-06-23 11:43:59 -07:00
|
|
|
|
|
2014-01-08 15:58:11 -08:00
|
|
|
|
/* A flow in dp_netdev's 'flow_table'.
|
|
|
|
|
*
|
|
|
|
|
*
|
|
|
|
|
* Thread-safety
|
|
|
|
|
* =============
|
|
|
|
|
*
|
|
|
|
|
* Except near the beginning or ending of its lifespan, rule 'rule' belongs to
|
|
|
|
|
* its dp_netdev's classifier. The text below calls this classifier 'cls'.
|
|
|
|
|
*
|
|
|
|
|
* Motivation
|
|
|
|
|
* ----------
|
|
|
|
|
*
|
|
|
|
|
* The thread safety rules described here for "struct dp_netdev_flow" are
|
|
|
|
|
* motivated by two goals:
|
|
|
|
|
*
|
|
|
|
|
* - Prevent threads that read members of "struct dp_netdev_flow" from
|
|
|
|
|
* reading bad data due to changes by some thread concurrently modifying
|
|
|
|
|
* those members.
|
|
|
|
|
*
|
|
|
|
|
* - Prevent two threads making changes to members of a given "struct
|
|
|
|
|
* dp_netdev_flow" from interfering with each other.
|
|
|
|
|
*
|
|
|
|
|
*
|
|
|
|
|
* Rules
|
|
|
|
|
* -----
|
|
|
|
|
*
|
2014-08-11 17:25:50 -07:00
|
|
|
|
* A flow 'flow' may be accessed without a risk of being freed during an RCU
|
|
|
|
|
* grace period. Code that needs to hold onto a flow for a while
|
|
|
|
|
* should try incrementing 'flow->ref_cnt' with dp_netdev_flow_ref().
|
2014-01-08 15:58:11 -08:00
|
|
|
|
*
|
|
|
|
|
* 'flow->ref_cnt' protects 'flow' from being freed. It doesn't protect the
|
2014-08-11 17:25:50 -07:00
|
|
|
|
* flow from being deleted from 'cls' and it doesn't protect members of 'flow'
|
|
|
|
|
* from modification.
|
2014-01-08 15:58:11 -08:00
|
|
|
|
*
|
|
|
|
|
* Some members, marked 'const', are immutable. Accessing other members
|
|
|
|
|
* requires synchronization, as noted in more detail below.
|
|
|
|
|
*/
|
2009-06-19 14:09:39 -07:00
|
|
|
|
struct dp_netdev_flow {
|
2013-11-04 06:23:54 -08:00
|
|
|
|
/* Packet classification. */
|
2014-01-08 15:58:11 -08:00
|
|
|
|
const struct cls_rule cr; /* In owning dp_netdev's 'cls'. */
|
2013-11-04 06:23:54 -08:00
|
|
|
|
|
2014-01-08 15:58:11 -08:00
|
|
|
|
/* Hash table index by unmasked flow. */
|
2014-07-04 06:38:47 -07:00
|
|
|
|
const struct cmap_node node; /* In owning dp_netdev's 'flow_table'. */
|
2014-01-08 15:58:11 -08:00
|
|
|
|
const struct flow flow; /* The flow that created this entry. */
|
2009-06-19 14:09:39 -07:00
|
|
|
|
|
2014-08-11 17:25:50 -07:00
|
|
|
|
/* Number of references.
|
|
|
|
|
* The classifier owns one reference.
|
|
|
|
|
* Any thread trying to keep a rule from being freed should hold its own
|
|
|
|
|
* reference. */
|
|
|
|
|
struct ovs_refcount ref_cnt;
|
|
|
|
|
|
2014-01-08 15:58:11 -08:00
|
|
|
|
/* Statistics.
|
|
|
|
|
*
|
|
|
|
|
* Reading or writing these members requires 'mutex'. */
|
2014-01-22 16:03:10 -08:00
|
|
|
|
struct ovsthread_stats stats; /* Contains "struct dp_netdev_flow_stats". */
|
2014-01-08 15:58:11 -08:00
|
|
|
|
|
2014-04-15 14:59:30 +09:00
|
|
|
|
/* Actions. */
|
2014-03-05 22:41:30 -08:00
|
|
|
|
OVSRCU_TYPE(struct dp_netdev_actions *) actions;
|
2009-06-19 14:09:39 -07:00
|
|
|
|
};
|
|
|
|
|
|
2014-08-11 17:25:50 -07:00
|
|
|
|
static void dp_netdev_flow_unref(struct dp_netdev_flow *);
|
2014-01-08 15:58:11 -08:00
|
|
|
|
|
2014-01-22 16:03:10 -08:00
|
|
|
|
/* Contained by struct dp_netdev_flow's 'stats' member. */
|
|
|
|
|
struct dp_netdev_flow_stats {
|
|
|
|
|
struct ovs_mutex mutex; /* Guards all the other members. */
|
|
|
|
|
|
|
|
|
|
long long int used OVS_GUARDED; /* Last used time, in monotonic msecs. */
|
|
|
|
|
long long int packet_count OVS_GUARDED; /* Number of packets matched. */
|
|
|
|
|
long long int byte_count OVS_GUARDED; /* Number of bytes matched. */
|
|
|
|
|
uint16_t tcp_flags OVS_GUARDED; /* Bitwise-OR of seen tcp_flags values. */
|
|
|
|
|
};
|
|
|
|
|
|
2014-01-08 14:37:13 -08:00
|
|
|
|
/* A set of datapath actions within a "struct dp_netdev_flow".
|
|
|
|
|
*
|
|
|
|
|
*
|
|
|
|
|
* Thread-safety
|
|
|
|
|
* =============
|
|
|
|
|
*
|
2014-04-15 14:59:30 +09:00
|
|
|
|
* A struct dp_netdev_actions 'actions' is protected with RCU. */
|
2014-01-08 14:37:13 -08:00
|
|
|
|
struct dp_netdev_actions {
|
|
|
|
|
/* These members are immutable: they do not change during the struct's
|
|
|
|
|
* lifetime. */
|
|
|
|
|
struct nlattr *actions; /* Sequence of OVS_ACTION_ATTR_* attributes. */
|
|
|
|
|
unsigned int size; /* Size of 'actions', in bytes. */
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
struct dp_netdev_actions *dp_netdev_actions_create(const struct nlattr *,
|
|
|
|
|
size_t);
|
2014-03-05 22:41:30 -08:00
|
|
|
|
struct dp_netdev_actions *dp_netdev_flow_get_actions(
|
|
|
|
|
const struct dp_netdev_flow *);
|
|
|
|
|
static void dp_netdev_actions_free(struct dp_netdev_actions *);
|
2014-01-08 14:37:13 -08:00
|
|
|
|
|
2014-03-20 10:57:41 -07:00
|
|
|
|
/* PMD: Poll modes drivers. PMD accesses devices via polling to eliminate
|
|
|
|
|
* the performance overhead of interrupt processing. Therefore netdev can
|
|
|
|
|
* not implement rx-wait for these devices. dpif-netdev needs to poll
|
|
|
|
|
* these device to check for recv buffer. pmd-thread does polling for
|
|
|
|
|
* devices assigned to itself thread.
|
|
|
|
|
*
|
|
|
|
|
* DPDK used PMD for accessing NIC.
|
|
|
|
|
*
|
|
|
|
|
* A thread that receives packets from PMD ports, looks them up in the flow
|
|
|
|
|
* table, and executes the actions it finds.
|
|
|
|
|
**/
|
|
|
|
|
struct pmd_thread {
|
2013-12-27 17:00:30 -08:00
|
|
|
|
struct dp_netdev *dp;
|
|
|
|
|
pthread_t thread;
|
2014-03-20 10:57:41 -07:00
|
|
|
|
int id;
|
|
|
|
|
atomic_uint change_seq;
|
2013-12-27 17:00:30 -08:00
|
|
|
|
};
|
|
|
|
|
|
2009-06-19 14:09:39 -07:00
|
|
|
|
/* Interface to netdev-based datapath. */
|
|
|
|
|
struct dpif_netdev {
|
|
|
|
|
struct dpif dpif;
|
|
|
|
|
struct dp_netdev *dp;
|
2013-08-07 13:29:54 -07:00
|
|
|
|
uint64_t last_port_seq;
|
2009-06-19 14:09:39 -07:00
|
|
|
|
};
|
|
|
|
|
|
2014-01-08 15:58:11 -08:00
|
|
|
|
static int get_port_by_number(struct dp_netdev *dp, odp_port_t port_no,
|
2014-05-20 13:21:09 -07:00
|
|
|
|
struct dp_netdev_port **portp);
|
2014-01-08 15:58:11 -08:00
|
|
|
|
static int get_port_by_name(struct dp_netdev *dp, const char *devname,
|
2014-05-20 13:21:09 -07:00
|
|
|
|
struct dp_netdev_port **portp);
|
2014-01-08 15:58:11 -08:00
|
|
|
|
static void dp_netdev_free(struct dp_netdev *)
|
|
|
|
|
OVS_REQUIRES(dp_netdev_mutex);
|
2009-06-19 14:09:39 -07:00
|
|
|
|
static void dp_netdev_flow_flush(struct dp_netdev *);
|
2014-01-08 15:58:11 -08:00
|
|
|
|
static int do_add_port(struct dp_netdev *dp, const char *devname,
|
|
|
|
|
const char *type, odp_port_t port_no)
|
2014-05-20 13:21:09 -07:00
|
|
|
|
OVS_REQUIRES(dp->port_mutex);
|
2014-05-22 09:36:00 -07:00
|
|
|
|
static void do_del_port(struct dp_netdev *dp, struct dp_netdev_port *)
|
2014-05-20 13:21:09 -07:00
|
|
|
|
OVS_REQUIRES(dp->port_mutex);
|
2010-11-29 12:21:08 -08:00
|
|
|
|
static int dpif_netdev_open(const struct dpif_class *, const char *name,
|
|
|
|
|
bool create, struct dpif **);
|
2014-01-08 15:58:11 -08:00
|
|
|
|
static void dp_netdev_execute_actions(struct dp_netdev *dp,
|
2014-06-23 11:43:59 -07:00
|
|
|
|
struct dpif_packet **, int c,
|
|
|
|
|
bool may_steal, struct pkt_metadata *,
|
2011-10-21 14:38:54 -07:00
|
|
|
|
const struct nlattr *actions,
|
2014-03-20 10:57:41 -07:00
|
|
|
|
size_t actions_len);
|
2014-06-23 11:43:57 -07:00
|
|
|
|
static void dp_netdev_port_input(struct dp_netdev *dp,
|
2014-06-23 11:43:59 -07:00
|
|
|
|
struct dpif_packet **packets, int cnt,
|
|
|
|
|
odp_port_t port_no);
|
2014-03-20 10:57:41 -07:00
|
|
|
|
|
|
|
|
|
static void dp_netdev_set_pmd_threads(struct dp_netdev *, int n);
|
2014-07-26 06:51:55 +00:00
|
|
|
|
static void dp_netdev_disable_upcall(struct dp_netdev *);
|
2009-06-19 14:09:39 -07:00
|
|
|
|
|
|
|
|
|
static struct dpif_netdev *
|
|
|
|
|
dpif_netdev_cast(const struct dpif *dpif)
|
|
|
|
|
{
|
2012-11-06 13:14:55 -08:00
|
|
|
|
ovs_assert(dpif->dpif_class->open == dpif_netdev_open);
|
2009-06-19 14:09:39 -07:00
|
|
|
|
return CONTAINER_OF(dpif, struct dpif_netdev, dpif);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static struct dp_netdev *
|
|
|
|
|
get_dp_netdev(const struct dpif *dpif)
|
|
|
|
|
{
|
|
|
|
|
return dpif_netdev_cast(dpif)->dp;
|
|
|
|
|
}
|
|
|
|
|
|
2012-05-09 12:17:15 +02:00
|
|
|
|
static int
|
2014-06-12 16:37:33 -07:00
|
|
|
|
dpif_netdev_enumerate(struct sset *all_dps,
|
|
|
|
|
const struct dpif_class *dpif_class)
|
2012-05-09 12:17:15 +02:00
|
|
|
|
{
|
|
|
|
|
struct shash_node *node;
|
|
|
|
|
|
2013-07-30 15:31:48 -07:00
|
|
|
|
ovs_mutex_lock(&dp_netdev_mutex);
|
2012-05-09 12:17:15 +02:00
|
|
|
|
SHASH_FOR_EACH(node, &dp_netdevs) {
|
2014-06-12 16:37:33 -07:00
|
|
|
|
struct dp_netdev *dp = node->data;
|
|
|
|
|
if (dpif_class != dp->class) {
|
|
|
|
|
/* 'dp_netdevs' contains both "netdev" and "dummy" dpifs.
|
|
|
|
|
* If the class doesn't match, skip this dpif. */
|
|
|
|
|
continue;
|
|
|
|
|
}
|
2012-05-09 12:17:15 +02:00
|
|
|
|
sset_add(all_dps, node->name);
|
|
|
|
|
}
|
2013-07-30 15:31:48 -07:00
|
|
|
|
ovs_mutex_unlock(&dp_netdev_mutex);
|
2013-07-23 16:56:26 -07:00
|
|
|
|
|
2012-05-09 12:17:15 +02:00
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2013-01-08 14:37:23 -08:00
|
|
|
|
static bool
|
|
|
|
|
dpif_netdev_class_is_dummy(const struct dpif_class *class)
|
|
|
|
|
{
|
|
|
|
|
return class != &dpif_netdev_class;
|
|
|
|
|
}
|
|
|
|
|
|
2012-11-14 15:50:20 -08:00
|
|
|
|
static const char *
|
|
|
|
|
dpif_netdev_port_open_type(const struct dpif_class *class, const char *type)
|
|
|
|
|
{
|
|
|
|
|
return strcmp(type, "internal") ? type
|
2013-01-08 14:37:23 -08:00
|
|
|
|
: dpif_netdev_class_is_dummy(class) ? "dummy"
|
2012-11-14 15:50:20 -08:00
|
|
|
|
: "tap";
|
|
|
|
|
}
|
|
|
|
|
|
2009-06-19 14:09:39 -07:00
|
|
|
|
static struct dpif *
|
|
|
|
|
create_dpif_netdev(struct dp_netdev *dp)
|
|
|
|
|
{
|
2010-11-24 12:35:22 -08:00
|
|
|
|
uint16_t netflow_id = hash_string(dp->name, 0);
|
2009-06-19 14:09:39 -07:00
|
|
|
|
struct dpif_netdev *dpif;
|
|
|
|
|
|
2013-12-27 19:41:10 -08:00
|
|
|
|
ovs_refcount_ref(&dp->ref_cnt);
|
2009-06-19 14:09:39 -07:00
|
|
|
|
|
|
|
|
|
dpif = xmalloc(sizeof *dpif);
|
2010-11-29 12:21:08 -08:00
|
|
|
|
dpif_init(&dpif->dpif, dp->class, dp->name, netflow_id >> 8, netflow_id);
|
2009-06-19 14:09:39 -07:00
|
|
|
|
dpif->dp = dp;
|
2013-08-07 13:29:54 -07:00
|
|
|
|
dpif->last_port_seq = seq_read(dp->port_seq);
|
2009-06-19 14:09:39 -07:00
|
|
|
|
|
|
|
|
|
return &dpif->dpif;
|
|
|
|
|
}
|
|
|
|
|
|
2013-06-19 16:58:44 -07:00
|
|
|
|
/* Choose an unused, non-zero port number and return it on success.
|
|
|
|
|
* Return ODPP_NONE on failure. */
|
|
|
|
|
static odp_port_t
|
2012-10-13 17:45:00 -07:00
|
|
|
|
choose_port(struct dp_netdev *dp, const char *name)
|
2014-05-20 13:21:09 -07:00
|
|
|
|
OVS_REQUIRES(dp->port_mutex)
|
2012-10-13 17:45:00 -07:00
|
|
|
|
{
|
2013-06-19 16:58:44 -07:00
|
|
|
|
uint32_t port_no;
|
2012-10-13 17:45:00 -07:00
|
|
|
|
|
|
|
|
|
if (dp->class != &dpif_netdev_class) {
|
|
|
|
|
const char *p;
|
|
|
|
|
int start_no = 0;
|
|
|
|
|
|
|
|
|
|
/* If the port name begins with "br", start the number search at
|
|
|
|
|
* 100 to make writing tests easier. */
|
|
|
|
|
if (!strncmp(name, "br", 2)) {
|
|
|
|
|
start_no = 100;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* If the port name contains a number, try to assign that port number.
|
|
|
|
|
* This can make writing unit tests easier because port numbers are
|
|
|
|
|
* predictable. */
|
|
|
|
|
for (p = name; *p != '\0'; p++) {
|
|
|
|
|
if (isdigit((unsigned char) *p)) {
|
|
|
|
|
port_no = start_no + strtol(p, NULL, 10);
|
2013-12-24 16:08:57 -08:00
|
|
|
|
if (port_no > 0 && port_no != odp_to_u32(ODPP_NONE)
|
|
|
|
|
&& !dp_netdev_lookup_port(dp, u32_to_odp(port_no))) {
|
2013-06-19 16:58:44 -07:00
|
|
|
|
return u32_to_odp(port_no);
|
2012-10-13 17:45:00 -07:00
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2013-12-24 16:08:57 -08:00
|
|
|
|
for (port_no = 1; port_no <= UINT16_MAX; port_no++) {
|
|
|
|
|
if (!dp_netdev_lookup_port(dp, u32_to_odp(port_no))) {
|
2013-06-19 16:58:44 -07:00
|
|
|
|
return u32_to_odp(port_no);
|
2012-10-13 17:45:00 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2013-06-19 16:58:44 -07:00
|
|
|
|
return ODPP_NONE;
|
2012-10-13 17:45:00 -07:00
|
|
|
|
}
|
|
|
|
|
|
2009-06-19 14:09:39 -07:00
|
|
|
|
static int
|
2010-11-29 12:21:08 -08:00
|
|
|
|
create_dp_netdev(const char *name, const struct dpif_class *class,
|
|
|
|
|
struct dp_netdev **dpp)
|
2014-01-08 15:58:11 -08:00
|
|
|
|
OVS_REQUIRES(dp_netdev_mutex)
|
2009-06-19 14:09:39 -07:00
|
|
|
|
{
|
|
|
|
|
struct dp_netdev *dp;
|
|
|
|
|
int error;
|
|
|
|
|
|
2010-11-24 12:35:22 -08:00
|
|
|
|
dp = xzalloc(sizeof *dp);
|
2014-01-08 15:58:11 -08:00
|
|
|
|
shash_add(&dp_netdevs, name, dp);
|
|
|
|
|
|
|
|
|
|
*CONST_CAST(const struct dpif_class **, &dp->class) = class;
|
|
|
|
|
*CONST_CAST(const char **, &dp->name) = xstrdup(name);
|
2013-12-27 19:41:10 -08:00
|
|
|
|
ovs_refcount_init(&dp->ref_cnt);
|
2014-03-17 22:10:53 -07:00
|
|
|
|
atomic_flag_clear(&dp->destroyed);
|
2014-01-08 15:58:11 -08:00
|
|
|
|
|
|
|
|
|
ovs_mutex_init(&dp->flow_mutex);
|
|
|
|
|
classifier_init(&dp->cls, NULL);
|
2014-07-04 06:38:47 -07:00
|
|
|
|
cmap_init(&dp->flow_table);
|
2014-01-08 15:58:11 -08:00
|
|
|
|
|
2014-03-19 07:47:12 -07:00
|
|
|
|
ovsthread_stats_init(&dp->stats);
|
2013-12-23 14:04:13 -08:00
|
|
|
|
|
2014-05-20 13:21:09 -07:00
|
|
|
|
ovs_mutex_init(&dp->port_mutex);
|
|
|
|
|
cmap_init(&dp->ports);
|
2013-08-07 13:29:54 -07:00
|
|
|
|
dp->port_seq = seq_create();
|
2013-12-27 17:00:30 -08:00
|
|
|
|
latch_init(&dp->exit_latch);
|
2014-07-26 06:51:55 +00:00
|
|
|
|
fat_rwlock_init(&dp->upcall_rwlock);
|
|
|
|
|
|
|
|
|
|
/* Disable upcalls by default. */
|
|
|
|
|
dp_netdev_disable_upcall(dp);
|
2014-07-26 15:39:58 -07:00
|
|
|
|
dp->upcall_aux = NULL;
|
2014-07-26 06:51:55 +00:00
|
|
|
|
dp->upcall_cb = NULL;
|
2012-10-13 17:45:00 -07:00
|
|
|
|
|
2014-05-20 13:21:09 -07:00
|
|
|
|
ovs_mutex_lock(&dp->port_mutex);
|
2013-06-19 16:58:44 -07:00
|
|
|
|
error = do_add_port(dp, name, "internal", ODPP_LOCAL);
|
2014-05-20 13:21:09 -07:00
|
|
|
|
ovs_mutex_unlock(&dp->port_mutex);
|
2009-06-19 14:09:39 -07:00
|
|
|
|
if (error) {
|
|
|
|
|
dp_netdev_free(dp);
|
2010-11-24 12:35:22 -08:00
|
|
|
|
return error;
|
2009-06-19 14:09:39 -07:00
|
|
|
|
}
|
|
|
|
|
|
2010-11-24 12:35:22 -08:00
|
|
|
|
*dpp = dp;
|
2009-06-19 14:09:39 -07:00
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
2010-11-29 12:21:08 -08:00
|
|
|
|
dpif_netdev_open(const struct dpif_class *class, const char *name,
|
2010-11-18 10:06:41 -08:00
|
|
|
|
bool create, struct dpif **dpifp)
|
2009-06-19 14:09:39 -07:00
|
|
|
|
{
|
2010-11-24 12:35:22 -08:00
|
|
|
|
struct dp_netdev *dp;
|
2013-07-23 16:56:26 -07:00
|
|
|
|
int error;
|
2010-11-24 12:35:22 -08:00
|
|
|
|
|
2013-07-30 15:31:48 -07:00
|
|
|
|
ovs_mutex_lock(&dp_netdev_mutex);
|
2010-11-24 12:35:22 -08:00
|
|
|
|
dp = shash_find_data(&dp_netdevs, name);
|
|
|
|
|
if (!dp) {
|
2013-07-23 16:56:26 -07:00
|
|
|
|
error = create ? create_dp_netdev(name, class, &dp) : ENODEV;
|
2009-06-19 14:09:39 -07:00
|
|
|
|
} else {
|
2013-07-23 16:56:26 -07:00
|
|
|
|
error = (dp->class != class ? EINVAL
|
|
|
|
|
: create ? EEXIST
|
|
|
|
|
: 0);
|
|
|
|
|
}
|
|
|
|
|
if (!error) {
|
|
|
|
|
*dpifp = create_dpif_netdev(dp);
|
2014-07-26 06:51:55 +00:00
|
|
|
|
dp->dpif = *dpifp;
|
2009-06-19 14:09:39 -07:00
|
|
|
|
}
|
2013-07-30 15:31:48 -07:00
|
|
|
|
ovs_mutex_unlock(&dp_netdev_mutex);
|
2010-11-24 12:35:22 -08:00
|
|
|
|
|
2013-07-23 16:56:26 -07:00
|
|
|
|
return error;
|
2009-06-19 14:09:39 -07:00
|
|
|
|
}
|
|
|
|
|
|
2014-01-08 15:58:11 -08:00
|
|
|
|
/* Requires dp_netdev_mutex so that we can't get a new reference to 'dp'
|
|
|
|
|
* through the 'dp_netdevs' shash while freeing 'dp'. */
|
2011-01-04 17:00:36 -08:00
|
|
|
|
static void
|
|
|
|
|
dp_netdev_free(struct dp_netdev *dp)
|
2014-01-08 15:58:11 -08:00
|
|
|
|
OVS_REQUIRES(dp_netdev_mutex)
|
2011-01-04 17:00:36 -08:00
|
|
|
|
{
|
2014-05-20 13:21:09 -07:00
|
|
|
|
struct dp_netdev_port *port;
|
2014-03-19 07:47:12 -07:00
|
|
|
|
struct dp_netdev_stats *bucket;
|
|
|
|
|
int i;
|
2011-08-10 12:40:10 -07:00
|
|
|
|
|
2014-01-08 15:58:11 -08:00
|
|
|
|
shash_find_and_delete(&dp_netdevs, dp->name);
|
|
|
|
|
|
2014-03-20 10:57:41 -07:00
|
|
|
|
dp_netdev_set_pmd_threads(dp, 0);
|
|
|
|
|
free(dp->pmd_threads);
|
2013-12-27 17:00:30 -08:00
|
|
|
|
|
2011-01-04 17:00:36 -08:00
|
|
|
|
dp_netdev_flow_flush(dp);
|
2014-05-20 13:21:09 -07:00
|
|
|
|
ovs_mutex_lock(&dp->port_mutex);
|
2014-06-11 11:07:43 -07:00
|
|
|
|
CMAP_FOR_EACH (port, node, &dp->ports) {
|
2014-05-22 09:36:00 -07:00
|
|
|
|
do_del_port(dp, port);
|
2011-01-04 17:00:36 -08:00
|
|
|
|
}
|
2014-05-20 13:21:09 -07:00
|
|
|
|
ovs_mutex_unlock(&dp->port_mutex);
|
2014-03-19 07:47:12 -07:00
|
|
|
|
|
|
|
|
|
OVSTHREAD_STATS_FOR_EACH_BUCKET (bucket, i, &dp->stats) {
|
|
|
|
|
ovs_mutex_destroy(&bucket->mutex);
|
|
|
|
|
free_cacheline(bucket);
|
|
|
|
|
}
|
|
|
|
|
ovsthread_stats_destroy(&dp->stats);
|
2013-12-27 09:42:51 -08:00
|
|
|
|
|
2013-11-04 06:23:54 -08:00
|
|
|
|
classifier_destroy(&dp->cls);
|
2014-07-04 06:38:47 -07:00
|
|
|
|
cmap_destroy(&dp->flow_table);
|
2014-01-08 15:58:11 -08:00
|
|
|
|
ovs_mutex_destroy(&dp->flow_mutex);
|
2013-08-07 13:29:54 -07:00
|
|
|
|
seq_destroy(dp->port_seq);
|
2014-05-20 13:21:09 -07:00
|
|
|
|
cmap_destroy(&dp->ports);
|
2014-07-26 06:51:55 +00:00
|
|
|
|
fat_rwlock_destroy(&dp->upcall_rwlock);
|
2013-12-27 17:00:30 -08:00
|
|
|
|
latch_destroy(&dp->exit_latch);
|
2014-01-08 15:58:11 -08:00
|
|
|
|
free(CONST_CAST(char *, dp->name));
|
2009-06-19 14:09:39 -07:00
|
|
|
|
free(dp);
|
|
|
|
|
}
|
|
|
|
|
|
2014-01-08 15:58:11 -08:00
|
|
|
|
static void
|
|
|
|
|
dp_netdev_unref(struct dp_netdev *dp)
|
|
|
|
|
{
|
|
|
|
|
if (dp) {
|
|
|
|
|
/* Take dp_netdev_mutex so that, if dp->ref_cnt falls to zero, we can't
|
|
|
|
|
* get a new reference to 'dp' through the 'dp_netdevs' shash. */
|
|
|
|
|
ovs_mutex_lock(&dp_netdev_mutex);
|
2014-07-07 13:18:46 -07:00
|
|
|
|
if (ovs_refcount_unref_relaxed(&dp->ref_cnt) == 1) {
|
2014-01-08 15:58:11 -08:00
|
|
|
|
dp_netdev_free(dp);
|
|
|
|
|
}
|
|
|
|
|
ovs_mutex_unlock(&dp_netdev_mutex);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2009-06-19 14:09:39 -07:00
|
|
|
|
static void
|
|
|
|
|
dpif_netdev_close(struct dpif *dpif)
|
|
|
|
|
{
|
|
|
|
|
struct dp_netdev *dp = get_dp_netdev(dpif);
|
2013-07-23 16:56:26 -07:00
|
|
|
|
|
2014-01-08 15:58:11 -08:00
|
|
|
|
dp_netdev_unref(dp);
|
2009-06-19 14:09:39 -07:00
|
|
|
|
free(dpif);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
2010-02-08 13:22:41 -05:00
|
|
|
|
dpif_netdev_destroy(struct dpif *dpif)
|
2009-06-19 14:09:39 -07:00
|
|
|
|
{
|
|
|
|
|
struct dp_netdev *dp = get_dp_netdev(dpif);
|
2013-07-23 16:56:26 -07:00
|
|
|
|
|
2013-12-27 19:41:10 -08:00
|
|
|
|
if (!atomic_flag_test_and_set(&dp->destroyed)) {
|
2014-07-07 13:18:46 -07:00
|
|
|
|
if (ovs_refcount_unref_relaxed(&dp->ref_cnt) == 1) {
|
2013-12-27 19:41:10 -08:00
|
|
|
|
/* Can't happen: 'dpif' still owns a reference to 'dp'. */
|
|
|
|
|
OVS_NOT_REACHED();
|
|
|
|
|
}
|
|
|
|
|
}
|
2013-07-23 16:56:26 -07:00
|
|
|
|
|
2009-06-19 14:09:39 -07:00
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
2011-10-05 11:18:13 -07:00
|
|
|
|
dpif_netdev_get_stats(const struct dpif *dpif, struct dpif_dp_stats *stats)
|
2009-06-19 14:09:39 -07:00
|
|
|
|
{
|
|
|
|
|
struct dp_netdev *dp = get_dp_netdev(dpif);
|
2014-03-19 07:47:12 -07:00
|
|
|
|
struct dp_netdev_stats *bucket;
|
|
|
|
|
size_t i;
|
2013-07-23 16:56:26 -07:00
|
|
|
|
|
2014-07-04 06:38:47 -07:00
|
|
|
|
stats->n_flows = cmap_count(&dp->flow_table);
|
2014-01-08 15:58:11 -08:00
|
|
|
|
|
2014-03-19 07:47:12 -07:00
|
|
|
|
stats->n_hit = stats->n_missed = stats->n_lost = 0;
|
|
|
|
|
OVSTHREAD_STATS_FOR_EACH_BUCKET (bucket, i, &dp->stats) {
|
|
|
|
|
ovs_mutex_lock(&bucket->mutex);
|
|
|
|
|
stats->n_hit += bucket->n[DP_STAT_HIT];
|
|
|
|
|
stats->n_missed += bucket->n[DP_STAT_MISS];
|
|
|
|
|
stats->n_lost += bucket->n[DP_STAT_LOST];
|
|
|
|
|
ovs_mutex_unlock(&bucket->mutex);
|
|
|
|
|
}
|
2013-12-17 20:18:18 +01:00
|
|
|
|
stats->n_masks = UINT32_MAX;
|
2013-10-21 14:37:34 -07:00
|
|
|
|
stats->n_mask_hit = UINT64_MAX;
|
2013-07-23 16:56:26 -07:00
|
|
|
|
|
2009-06-19 14:09:39 -07:00
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2014-03-20 10:57:41 -07:00
|
|
|
|
static void
|
|
|
|
|
dp_netdev_reload_pmd_threads(struct dp_netdev *dp)
|
|
|
|
|
{
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < dp->n_pmd_threads; i++) {
|
|
|
|
|
struct pmd_thread *f = &dp->pmd_threads[i];
|
|
|
|
|
int id;
|
|
|
|
|
|
|
|
|
|
atomic_add(&f->change_seq, 1, &id);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2014-05-20 13:21:09 -07:00
|
|
|
|
static uint32_t
|
|
|
|
|
hash_port_no(odp_port_t port_no)
|
|
|
|
|
{
|
|
|
|
|
return hash_int(odp_to_u32(port_no), 0);
|
|
|
|
|
}
|
|
|
|
|
|
2009-06-19 14:09:39 -07:00
|
|
|
|
static int
|
2010-12-03 14:41:38 -08:00
|
|
|
|
do_add_port(struct dp_netdev *dp, const char *devname, const char *type,
|
2013-06-19 16:58:44 -07:00
|
|
|
|
odp_port_t port_no)
|
2014-05-20 13:21:09 -07:00
|
|
|
|
OVS_REQUIRES(dp->port_mutex)
|
2009-06-19 14:09:39 -07:00
|
|
|
|
{
|
2013-05-10 08:55:25 -07:00
|
|
|
|
struct netdev_saved_flags *sf;
|
2009-06-19 14:09:39 -07:00
|
|
|
|
struct dp_netdev_port *port;
|
|
|
|
|
struct netdev *netdev;
|
2013-09-07 12:35:15 +03:00
|
|
|
|
enum netdev_flags flags;
|
2012-01-19 10:24:46 -08:00
|
|
|
|
const char *open_type;
|
2009-06-19 14:09:39 -07:00
|
|
|
|
int error;
|
2014-03-20 20:52:06 -07:00
|
|
|
|
int i;
|
2009-06-19 14:09:39 -07:00
|
|
|
|
|
|
|
|
|
/* XXX reject devices already in some dp_netdev. */
|
|
|
|
|
|
|
|
|
|
/* Open and validate network device. */
|
2012-11-14 15:50:20 -08:00
|
|
|
|
open_type = dpif_netdev_port_open_type(dp->class, type);
|
2012-01-19 10:24:46 -08:00
|
|
|
|
error = netdev_open(devname, open_type, &netdev);
|
2009-06-19 14:09:39 -07:00
|
|
|
|
if (error) {
|
|
|
|
|
return error;
|
|
|
|
|
}
|
|
|
|
|
/* XXX reject non-Ethernet devices */
|
|
|
|
|
|
2013-09-07 12:35:15 +03:00
|
|
|
|
netdev_get_flags(netdev, &flags);
|
|
|
|
|
if (flags & NETDEV_LOOPBACK) {
|
|
|
|
|
VLOG_ERR("%s: cannot add a loopback device", devname);
|
|
|
|
|
netdev_close(netdev);
|
|
|
|
|
return EINVAL;
|
|
|
|
|
}
|
|
|
|
|
|
2014-03-20 10:57:41 -07:00
|
|
|
|
port = xzalloc(sizeof *port);
|
|
|
|
|
port->port_no = port_no;
|
|
|
|
|
port->netdev = netdev;
|
2014-03-20 20:52:06 -07:00
|
|
|
|
port->rxq = xmalloc(sizeof *port->rxq * netdev_n_rxq(netdev));
|
2014-03-20 10:57:41 -07:00
|
|
|
|
port->type = xstrdup(type);
|
2014-03-20 20:52:06 -07:00
|
|
|
|
for (i = 0; i < netdev_n_rxq(netdev); i++) {
|
|
|
|
|
error = netdev_rxq_open(netdev, &port->rxq[i], i);
|
|
|
|
|
if (error
|
|
|
|
|
&& !(error == EOPNOTSUPP && dpif_netdev_class_is_dummy(dp->class))) {
|
|
|
|
|
VLOG_ERR("%s: cannot receive packets on this network device (%s)",
|
|
|
|
|
devname, ovs_strerror(errno));
|
|
|
|
|
netdev_close(netdev);
|
|
|
|
|
return error;
|
|
|
|
|
}
|
2011-08-05 14:15:32 -07:00
|
|
|
|
}
|
|
|
|
|
|
2013-05-10 08:55:25 -07:00
|
|
|
|
error = netdev_turn_flags_on(netdev, NETDEV_PROMISC, &sf);
|
2009-06-19 14:09:39 -07:00
|
|
|
|
if (error) {
|
2014-03-20 20:52:06 -07:00
|
|
|
|
for (i = 0; i < netdev_n_rxq(netdev); i++) {
|
|
|
|
|
netdev_rxq_close(port->rxq[i]);
|
|
|
|
|
}
|
2009-06-19 14:09:39 -07:00
|
|
|
|
netdev_close(netdev);
|
2014-03-20 19:38:14 -07:00
|
|
|
|
free(port->rxq);
|
2014-03-20 10:57:41 -07:00
|
|
|
|
free(port);
|
2009-06-19 14:09:39 -07:00
|
|
|
|
return error;
|
|
|
|
|
}
|
2013-05-10 08:55:25 -07:00
|
|
|
|
port->sf = sf;
|
2014-03-20 10:57:41 -07:00
|
|
|
|
|
|
|
|
|
if (netdev_is_pmd(netdev)) {
|
|
|
|
|
dp->pmd_count++;
|
netdev-dpdk: Fix race condition with DPDK mempools in non pmd threads
DPDK mempools rely on rte_lcore_id() to implement a thread-local cache.
Our non pmd threads had rte_lcore_id() == 0. This allowed concurrent access to
the "thread-local" cache, causing crashes.
This commit resolves the issue with the following changes:
- Every non pmd thread has the same lcore_id (0, for management reasons), which
is not shared with any pmd thread (lcore_id for pmd threads now start from 1)
- DPDK mbufs must be allocated/freed in pmd threads. When there is the need to
use mempools in non pmd threads, like in dpdk_do_tx_copy(), a mutex must be
held.
- The previous change does not allow us anymore to pass DPDK mbufs to handler
threads: therefore this commit partially revert 143859ec63d45e. Now packets
are copied for upcall processing. We can remove the extra memcpy by
processing upcalls in the pmd thread itself.
With the introduction of the extra locking, the packet throughput will be lower
in the following cases:
- When using internal (tap) devices with DPDK devices on the same datapath.
Anyway, to support internal devices efficiently, we needed DPDK KNI devices,
which will be proper pmd devices and will not need this locking.
- When packets are processed in the slow path by non pmd threads. This overhead
can be avoided by handling the upcalls directly in pmd threads (a change that
has already been proposed by Ryan Wilson)
Also, the following two fixes have been introduced:
- In dpdk_free_buf() use rte_pktmbuf_free_seg() instead of rte_mempool_put().
This allows OVS to run properly with CONFIG_RTE_LIBRTE_MBUF_DEBUG DPDK option
- Do not bulk free mbufs in a transmission queue. They may belong to different
mempools
Signed-off-by: Daniele Di Proietto <ddiproietto@vmware.com>
Acked-by: Pravin B Shelar <pshelar@nicira.com>
2014-07-17 14:29:36 -07:00
|
|
|
|
dp_netdev_set_pmd_threads(dp, NR_PMD_THREADS);
|
2014-03-20 10:57:41 -07:00
|
|
|
|
dp_netdev_reload_pmd_threads(dp);
|
|
|
|
|
}
|
|
|
|
|
ovs_refcount_init(&port->ref_cnt);
|
2009-06-19 14:09:39 -07:00
|
|
|
|
|
2014-05-20 13:21:09 -07:00
|
|
|
|
cmap_insert(&dp->ports, &port->node, hash_port_no(port_no));
|
2013-08-07 13:29:54 -07:00
|
|
|
|
seq_change(dp->port_seq);
|
2009-06-19 14:09:39 -07:00
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2012-01-12 15:23:23 -08:00
|
|
|
|
static int
|
|
|
|
|
dpif_netdev_port_add(struct dpif *dpif, struct netdev *netdev,
|
2013-06-19 16:58:44 -07:00
|
|
|
|
odp_port_t *port_nop)
|
2012-01-12 15:23:23 -08:00
|
|
|
|
{
|
|
|
|
|
struct dp_netdev *dp = get_dp_netdev(dpif);
|
2013-05-01 11:05:28 -07:00
|
|
|
|
char namebuf[NETDEV_VPORT_NAME_BUFSIZE];
|
|
|
|
|
const char *dpif_port;
|
2013-06-19 16:58:44 -07:00
|
|
|
|
odp_port_t port_no;
|
2013-07-23 16:56:26 -07:00
|
|
|
|
int error;
|
2012-01-12 15:23:23 -08:00
|
|
|
|
|
2014-05-20 13:21:09 -07:00
|
|
|
|
ovs_mutex_lock(&dp->port_mutex);
|
2013-05-01 11:05:28 -07:00
|
|
|
|
dpif_port = netdev_vport_get_dpif_port(netdev, namebuf, sizeof namebuf);
|
2013-06-19 16:58:44 -07:00
|
|
|
|
if (*port_nop != ODPP_NONE) {
|
2013-12-24 16:08:57 -08:00
|
|
|
|
port_no = *port_nop;
|
|
|
|
|
error = dp_netdev_lookup_port(dp, *port_nop) ? EBUSY : 0;
|
2012-07-27 23:58:24 -07:00
|
|
|
|
} else {
|
2013-05-01 11:05:28 -07:00
|
|
|
|
port_no = choose_port(dp, dpif_port);
|
2013-07-23 16:56:26 -07:00
|
|
|
|
error = port_no == ODPP_NONE ? EFBIG : 0;
|
2012-07-27 23:58:24 -07:00
|
|
|
|
}
|
2013-07-23 16:56:26 -07:00
|
|
|
|
if (!error) {
|
2012-01-12 15:23:23 -08:00
|
|
|
|
*port_nop = port_no;
|
2013-07-23 16:56:26 -07:00
|
|
|
|
error = do_add_port(dp, dpif_port, netdev_get_type(netdev), port_no);
|
2012-01-12 15:23:23 -08:00
|
|
|
|
}
|
2014-05-20 13:21:09 -07:00
|
|
|
|
ovs_mutex_unlock(&dp->port_mutex);
|
2013-07-23 16:56:26 -07:00
|
|
|
|
|
|
|
|
|
return error;
|
2009-06-19 14:09:39 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
2013-06-19 16:58:44 -07:00
|
|
|
|
dpif_netdev_port_del(struct dpif *dpif, odp_port_t port_no)
|
2009-06-19 14:09:39 -07:00
|
|
|
|
{
|
|
|
|
|
struct dp_netdev *dp = get_dp_netdev(dpif);
|
2013-07-23 16:56:26 -07:00
|
|
|
|
int error;
|
|
|
|
|
|
2014-05-20 13:21:09 -07:00
|
|
|
|
ovs_mutex_lock(&dp->port_mutex);
|
2014-05-22 09:36:00 -07:00
|
|
|
|
if (port_no == ODPP_LOCAL) {
|
|
|
|
|
error = EINVAL;
|
|
|
|
|
} else {
|
|
|
|
|
struct dp_netdev_port *port;
|
|
|
|
|
|
|
|
|
|
error = get_port_by_number(dp, port_no, &port);
|
|
|
|
|
if (!error) {
|
|
|
|
|
do_del_port(dp, port);
|
|
|
|
|
}
|
|
|
|
|
}
|
2014-05-20 13:21:09 -07:00
|
|
|
|
ovs_mutex_unlock(&dp->port_mutex);
|
2013-07-23 16:56:26 -07:00
|
|
|
|
|
|
|
|
|
return error;
|
2009-06-19 14:09:39 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static bool
|
2013-06-19 16:58:44 -07:00
|
|
|
|
is_valid_port_number(odp_port_t port_no)
|
2009-06-19 14:09:39 -07:00
|
|
|
|
{
|
2013-12-24 16:08:57 -08:00
|
|
|
|
return port_no != ODPP_NONE;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static struct dp_netdev_port *
|
|
|
|
|
dp_netdev_lookup_port(const struct dp_netdev *dp, odp_port_t port_no)
|
|
|
|
|
{
|
|
|
|
|
struct dp_netdev_port *port;
|
|
|
|
|
|
2014-05-20 13:21:09 -07:00
|
|
|
|
CMAP_FOR_EACH_WITH_HASH (port, node, hash_port_no(port_no), &dp->ports) {
|
2013-12-24 16:08:57 -08:00
|
|
|
|
if (port->port_no == port_no) {
|
|
|
|
|
return port;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return NULL;
|
2009-06-19 14:09:39 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
get_port_by_number(struct dp_netdev *dp,
|
2013-06-19 16:58:44 -07:00
|
|
|
|
odp_port_t port_no, struct dp_netdev_port **portp)
|
2009-06-19 14:09:39 -07:00
|
|
|
|
{
|
|
|
|
|
if (!is_valid_port_number(port_no)) {
|
|
|
|
|
*portp = NULL;
|
|
|
|
|
return EINVAL;
|
|
|
|
|
} else {
|
2013-12-24 16:08:57 -08:00
|
|
|
|
*portp = dp_netdev_lookup_port(dp, port_no);
|
2009-06-19 14:09:39 -07:00
|
|
|
|
return *portp ? 0 : ENOENT;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2014-03-20 10:57:19 -07:00
|
|
|
|
static void
|
|
|
|
|
port_ref(struct dp_netdev_port *port)
|
|
|
|
|
{
|
|
|
|
|
if (port) {
|
|
|
|
|
ovs_refcount_ref(&port->ref_cnt);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
2014-05-20 13:21:09 -07:00
|
|
|
|
port_destroy__(struct dp_netdev_port *port)
|
2014-03-20 10:57:19 -07:00
|
|
|
|
{
|
2014-06-04 15:41:09 -07:00
|
|
|
|
int n_rxq = netdev_n_rxq(port->netdev);
|
2014-05-20 13:21:09 -07:00
|
|
|
|
int i;
|
2014-03-20 20:52:06 -07:00
|
|
|
|
|
2014-05-20 13:21:09 -07:00
|
|
|
|
netdev_close(port->netdev);
|
|
|
|
|
netdev_restore_flags(port->sf);
|
2014-03-20 20:52:06 -07:00
|
|
|
|
|
2014-05-20 13:21:09 -07:00
|
|
|
|
for (i = 0; i < n_rxq; i++) {
|
|
|
|
|
netdev_rxq_close(port->rxq[i]);
|
|
|
|
|
}
|
|
|
|
|
free(port->rxq);
|
|
|
|
|
free(port->type);
|
|
|
|
|
free(port);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
port_unref(struct dp_netdev_port *port)
|
|
|
|
|
{
|
2014-07-07 13:18:46 -07:00
|
|
|
|
if (port && ovs_refcount_unref_relaxed(&port->ref_cnt) == 1) {
|
2014-05-20 13:21:09 -07:00
|
|
|
|
ovsrcu_postpone(port_destroy__, port);
|
2014-03-20 10:57:19 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2009-06-19 14:09:39 -07:00
|
|
|
|
static int
|
|
|
|
|
get_port_by_name(struct dp_netdev *dp,
|
|
|
|
|
const char *devname, struct dp_netdev_port **portp)
|
2014-05-20 13:21:09 -07:00
|
|
|
|
OVS_REQUIRES(dp->port_mutex)
|
2009-06-19 14:09:39 -07:00
|
|
|
|
{
|
|
|
|
|
struct dp_netdev_port *port;
|
|
|
|
|
|
2014-06-11 11:07:43 -07:00
|
|
|
|
CMAP_FOR_EACH (port, node, &dp->ports) {
|
2013-06-06 15:27:15 -07:00
|
|
|
|
if (!strcmp(netdev_get_name(port->netdev), devname)) {
|
2009-06-19 14:09:39 -07:00
|
|
|
|
*portp = port;
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return ENOENT;
|
|
|
|
|
}
|
|
|
|
|
|
2014-05-22 09:36:00 -07:00
|
|
|
|
static void
|
|
|
|
|
do_del_port(struct dp_netdev *dp, struct dp_netdev_port *port)
|
2014-05-20 13:21:09 -07:00
|
|
|
|
OVS_REQUIRES(dp->port_mutex)
|
2009-06-19 14:09:39 -07:00
|
|
|
|
{
|
2014-05-22 09:36:00 -07:00
|
|
|
|
cmap_remove(&dp->ports, &port->node, hash_odp_port(port->port_no));
|
2013-08-07 13:29:54 -07:00
|
|
|
|
seq_change(dp->port_seq);
|
2014-03-20 10:57:41 -07:00
|
|
|
|
if (netdev_is_pmd(port->netdev)) {
|
|
|
|
|
dp_netdev_reload_pmd_threads(dp);
|
|
|
|
|
}
|
2009-06-19 14:09:39 -07:00
|
|
|
|
|
2014-03-20 10:57:19 -07:00
|
|
|
|
port_unref(port);
|
2009-06-19 14:09:39 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
2011-01-23 18:48:02 -08:00
|
|
|
|
answer_port_query(const struct dp_netdev_port *port,
|
|
|
|
|
struct dpif_port *dpif_port)
|
2009-06-19 14:09:39 -07:00
|
|
|
|
{
|
2013-06-06 15:27:15 -07:00
|
|
|
|
dpif_port->name = xstrdup(netdev_get_name(port->netdev));
|
2012-01-19 10:24:46 -08:00
|
|
|
|
dpif_port->type = xstrdup(port->type);
|
2011-01-23 18:48:02 -08:00
|
|
|
|
dpif_port->port_no = port->port_no;
|
2009-06-19 14:09:39 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
2013-06-19 16:58:44 -07:00
|
|
|
|
dpif_netdev_port_query_by_number(const struct dpif *dpif, odp_port_t port_no,
|
2011-01-23 18:48:02 -08:00
|
|
|
|
struct dpif_port *dpif_port)
|
2009-06-19 14:09:39 -07:00
|
|
|
|
{
|
|
|
|
|
struct dp_netdev *dp = get_dp_netdev(dpif);
|
|
|
|
|
struct dp_netdev_port *port;
|
|
|
|
|
int error;
|
|
|
|
|
|
|
|
|
|
error = get_port_by_number(dp, port_no, &port);
|
2012-10-17 23:11:53 -07:00
|
|
|
|
if (!error && dpif_port) {
|
2011-01-23 18:48:02 -08:00
|
|
|
|
answer_port_query(port, dpif_port);
|
2009-06-19 14:09:39 -07:00
|
|
|
|
}
|
2013-07-23 16:56:26 -07:00
|
|
|
|
|
2009-06-19 14:09:39 -07:00
|
|
|
|
return error;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
dpif_netdev_port_query_by_name(const struct dpif *dpif, const char *devname,
|
2011-01-23 18:48:02 -08:00
|
|
|
|
struct dpif_port *dpif_port)
|
2009-06-19 14:09:39 -07:00
|
|
|
|
{
|
|
|
|
|
struct dp_netdev *dp = get_dp_netdev(dpif);
|
|
|
|
|
struct dp_netdev_port *port;
|
|
|
|
|
int error;
|
|
|
|
|
|
2014-05-20 13:21:09 -07:00
|
|
|
|
ovs_mutex_lock(&dp->port_mutex);
|
2009-06-19 14:09:39 -07:00
|
|
|
|
error = get_port_by_name(dp, devname, &port);
|
2012-10-17 23:11:53 -07:00
|
|
|
|
if (!error && dpif_port) {
|
2011-01-23 18:48:02 -08:00
|
|
|
|
answer_port_query(port, dpif_port);
|
2009-06-19 14:09:39 -07:00
|
|
|
|
}
|
2014-05-20 13:21:09 -07:00
|
|
|
|
ovs_mutex_unlock(&dp->port_mutex);
|
2013-07-23 16:56:26 -07:00
|
|
|
|
|
2009-06-19 14:09:39 -07:00
|
|
|
|
return error;
|
|
|
|
|
}
|
|
|
|
|
|
2014-03-05 22:41:30 -08:00
|
|
|
|
static void
|
|
|
|
|
dp_netdev_flow_free(struct dp_netdev_flow *flow)
|
|
|
|
|
{
|
|
|
|
|
struct dp_netdev_flow_stats *bucket;
|
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
|
|
OVSTHREAD_STATS_FOR_EACH_BUCKET (bucket, i, &flow->stats) {
|
|
|
|
|
ovs_mutex_destroy(&bucket->mutex);
|
|
|
|
|
free_cacheline(bucket);
|
|
|
|
|
}
|
|
|
|
|
ovsthread_stats_destroy(&flow->stats);
|
|
|
|
|
|
|
|
|
|
cls_rule_destroy(CONST_CAST(struct cls_rule *, &flow->cr));
|
|
|
|
|
dp_netdev_actions_free(dp_netdev_flow_get_actions(flow));
|
|
|
|
|
free(flow);
|
|
|
|
|
}
|
|
|
|
|
|
2014-08-11 17:25:50 -07:00
|
|
|
|
static void dp_netdev_flow_unref(struct dp_netdev_flow *flow)
|
|
|
|
|
{
|
|
|
|
|
if (ovs_refcount_unref_relaxed(&flow->ref_cnt) == 1) {
|
|
|
|
|
ovsrcu_postpone(dp_netdev_flow_free, flow);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2009-06-19 14:09:39 -07:00
|
|
|
|
static void
|
2014-01-08 15:58:11 -08:00
|
|
|
|
dp_netdev_remove_flow(struct dp_netdev *dp, struct dp_netdev_flow *flow)
|
|
|
|
|
OVS_REQUIRES(dp->flow_mutex)
|
2009-06-19 14:09:39 -07:00
|
|
|
|
{
|
2014-01-08 15:58:11 -08:00
|
|
|
|
struct cls_rule *cr = CONST_CAST(struct cls_rule *, &flow->cr);
|
2014-07-04 06:38:47 -07:00
|
|
|
|
struct cmap_node *node = CONST_CAST(struct cmap_node *, &flow->node);
|
2013-11-04 06:23:54 -08:00
|
|
|
|
|
2014-01-08 15:58:11 -08:00
|
|
|
|
classifier_remove(&dp->cls, cr);
|
2014-07-04 06:38:47 -07:00
|
|
|
|
cmap_remove(&dp->flow_table, node, flow_hash(&flow->flow, 0));
|
2014-08-11 17:25:50 -07:00
|
|
|
|
|
|
|
|
|
dp_netdev_flow_unref(flow);
|
2009-06-19 14:09:39 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
dp_netdev_flow_flush(struct dp_netdev *dp)
|
|
|
|
|
{
|
2014-07-21 21:00:04 -07:00
|
|
|
|
struct dp_netdev_flow *netdev_flow;
|
2009-06-19 14:09:39 -07:00
|
|
|
|
|
2014-01-08 15:58:11 -08:00
|
|
|
|
ovs_mutex_lock(&dp->flow_mutex);
|
2014-07-29 09:02:23 -07:00
|
|
|
|
CMAP_FOR_EACH (netdev_flow, node, &dp->flow_table) {
|
2014-01-08 15:58:11 -08:00
|
|
|
|
dp_netdev_remove_flow(dp, netdev_flow);
|
2009-06-19 14:09:39 -07:00
|
|
|
|
}
|
2014-01-08 15:58:11 -08:00
|
|
|
|
ovs_mutex_unlock(&dp->flow_mutex);
|
2009-06-19 14:09:39 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
dpif_netdev_flow_flush(struct dpif *dpif)
|
|
|
|
|
{
|
|
|
|
|
struct dp_netdev *dp = get_dp_netdev(dpif);
|
2013-07-23 16:56:26 -07:00
|
|
|
|
|
2009-06-19 14:09:39 -07:00
|
|
|
|
dp_netdev_flow_flush(dp);
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
datapath: Change listing ports to use an iterator concept.
One of the goals for Open vSwitch is to decouple kernel and userspace
software, so that either one can be upgraded or rolled back independent of
the other. To do this in full generality, it must be possible to add new
features to the kernel vport layer without changing userspace software. In
turn, that means that the odp_port structure must become variable-length.
This does not, however, fit in well with the ODP_PORT_LIST ioctl in its
current form, because that would require userspace to know how much space
to allocate for each port in advance, or to allocate as much space as
could possibly be needed. Neither choice is very attractive.
This commit prepares for a different solution, by replacing ODP_PORT_LIST
by a new ioctl ODP_VPORT_DUMP that retrieves information about a single
vport from the datapath on each call. It is much cleaner to allocate the
maximum amount of space for a single vport than to do so for possibly a
large number of vports.
It would be faster to retrieve a number of vports in batch instead of just
one at a time, but that will naturally happen later when the kernel
datapath interface is changed to use Netlink, so this patch does not bother
with it.
The Netlink version won't need to take the starting port number from
userspace, since Netlink sockets can keep track of that state as part
of their "dump" feature.
Signed-off-by: Ben Pfaff <blp@nicira.com>
Acked-by: Jesse Gross <jesse@nicira.com>
2011-01-10 13:12:12 -08:00
|
|
|
|
struct dp_netdev_port_state {
|
2014-05-20 13:21:09 -07:00
|
|
|
|
struct cmap_position position;
|
2011-01-23 18:48:02 -08:00
|
|
|
|
char *name;
|
datapath: Change listing ports to use an iterator concept.
One of the goals for Open vSwitch is to decouple kernel and userspace
software, so that either one can be upgraded or rolled back independent of
the other. To do this in full generality, it must be possible to add new
features to the kernel vport layer without changing userspace software. In
turn, that means that the odp_port structure must become variable-length.
This does not, however, fit in well with the ODP_PORT_LIST ioctl in its
current form, because that would require userspace to know how much space
to allocate for each port in advance, or to allocate as much space as
could possibly be needed. Neither choice is very attractive.
This commit prepares for a different solution, by replacing ODP_PORT_LIST
by a new ioctl ODP_VPORT_DUMP that retrieves information about a single
vport from the datapath on each call. It is much cleaner to allocate the
maximum amount of space for a single vport than to do so for possibly a
large number of vports.
It would be faster to retrieve a number of vports in batch instead of just
one at a time, but that will naturally happen later when the kernel
datapath interface is changed to use Netlink, so this patch does not bother
with it.
The Netlink version won't need to take the starting port number from
userspace, since Netlink sockets can keep track of that state as part
of their "dump" feature.
Signed-off-by: Ben Pfaff <blp@nicira.com>
Acked-by: Jesse Gross <jesse@nicira.com>
2011-01-10 13:12:12 -08:00
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
dpif_netdev_port_dump_start(const struct dpif *dpif OVS_UNUSED, void **statep)
|
|
|
|
|
{
|
|
|
|
|
*statep = xzalloc(sizeof(struct dp_netdev_port_state));
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2009-06-19 14:09:39 -07:00
|
|
|
|
static int
|
datapath: Change listing ports to use an iterator concept.
One of the goals for Open vSwitch is to decouple kernel and userspace
software, so that either one can be upgraded or rolled back independent of
the other. To do this in full generality, it must be possible to add new
features to the kernel vport layer without changing userspace software. In
turn, that means that the odp_port structure must become variable-length.
This does not, however, fit in well with the ODP_PORT_LIST ioctl in its
current form, because that would require userspace to know how much space
to allocate for each port in advance, or to allocate as much space as
could possibly be needed. Neither choice is very attractive.
This commit prepares for a different solution, by replacing ODP_PORT_LIST
by a new ioctl ODP_VPORT_DUMP that retrieves information about a single
vport from the datapath on each call. It is much cleaner to allocate the
maximum amount of space for a single vport than to do so for possibly a
large number of vports.
It would be faster to retrieve a number of vports in batch instead of just
one at a time, but that will naturally happen later when the kernel
datapath interface is changed to use Netlink, so this patch does not bother
with it.
The Netlink version won't need to take the starting port number from
userspace, since Netlink sockets can keep track of that state as part
of their "dump" feature.
Signed-off-by: Ben Pfaff <blp@nicira.com>
Acked-by: Jesse Gross <jesse@nicira.com>
2011-01-10 13:12:12 -08:00
|
|
|
|
dpif_netdev_port_dump_next(const struct dpif *dpif, void *state_,
|
2011-01-23 18:48:02 -08:00
|
|
|
|
struct dpif_port *dpif_port)
|
2009-06-19 14:09:39 -07:00
|
|
|
|
{
|
datapath: Change listing ports to use an iterator concept.
One of the goals for Open vSwitch is to decouple kernel and userspace
software, so that either one can be upgraded or rolled back independent of
the other. To do this in full generality, it must be possible to add new
features to the kernel vport layer without changing userspace software. In
turn, that means that the odp_port structure must become variable-length.
This does not, however, fit in well with the ODP_PORT_LIST ioctl in its
current form, because that would require userspace to know how much space
to allocate for each port in advance, or to allocate as much space as
could possibly be needed. Neither choice is very attractive.
This commit prepares for a different solution, by replacing ODP_PORT_LIST
by a new ioctl ODP_VPORT_DUMP that retrieves information about a single
vport from the datapath on each call. It is much cleaner to allocate the
maximum amount of space for a single vport than to do so for possibly a
large number of vports.
It would be faster to retrieve a number of vports in batch instead of just
one at a time, but that will naturally happen later when the kernel
datapath interface is changed to use Netlink, so this patch does not bother
with it.
The Netlink version won't need to take the starting port number from
userspace, since Netlink sockets can keep track of that state as part
of their "dump" feature.
Signed-off-by: Ben Pfaff <blp@nicira.com>
Acked-by: Jesse Gross <jesse@nicira.com>
2011-01-10 13:12:12 -08:00
|
|
|
|
struct dp_netdev_port_state *state = state_;
|
2009-06-19 14:09:39 -07:00
|
|
|
|
struct dp_netdev *dp = get_dp_netdev(dpif);
|
2014-05-20 13:21:09 -07:00
|
|
|
|
struct cmap_node *node;
|
2013-12-24 16:08:57 -08:00
|
|
|
|
int retval;
|
2009-06-19 14:09:39 -07:00
|
|
|
|
|
2014-05-20 13:21:09 -07:00
|
|
|
|
node = cmap_next_position(&dp->ports, &state->position);
|
2013-12-24 16:08:57 -08:00
|
|
|
|
if (node) {
|
|
|
|
|
struct dp_netdev_port *port;
|
2013-07-23 16:56:26 -07:00
|
|
|
|
|
2013-12-24 16:08:57 -08:00
|
|
|
|
port = CONTAINER_OF(node, struct dp_netdev_port, node);
|
|
|
|
|
|
|
|
|
|
free(state->name);
|
|
|
|
|
state->name = xstrdup(netdev_get_name(port->netdev));
|
|
|
|
|
dpif_port->name = state->name;
|
|
|
|
|
dpif_port->type = port->type;
|
|
|
|
|
dpif_port->port_no = port->port_no;
|
|
|
|
|
|
|
|
|
|
retval = 0;
|
|
|
|
|
} else {
|
|
|
|
|
retval = EOF;
|
2009-06-19 14:09:39 -07:00
|
|
|
|
}
|
2013-07-23 16:56:26 -07:00
|
|
|
|
|
2013-12-24 16:08:57 -08:00
|
|
|
|
return retval;
|
datapath: Change listing ports to use an iterator concept.
One of the goals for Open vSwitch is to decouple kernel and userspace
software, so that either one can be upgraded or rolled back independent of
the other. To do this in full generality, it must be possible to add new
features to the kernel vport layer without changing userspace software. In
turn, that means that the odp_port structure must become variable-length.
This does not, however, fit in well with the ODP_PORT_LIST ioctl in its
current form, because that would require userspace to know how much space
to allocate for each port in advance, or to allocate as much space as
could possibly be needed. Neither choice is very attractive.
This commit prepares for a different solution, by replacing ODP_PORT_LIST
by a new ioctl ODP_VPORT_DUMP that retrieves information about a single
vport from the datapath on each call. It is much cleaner to allocate the
maximum amount of space for a single vport than to do so for possibly a
large number of vports.
It would be faster to retrieve a number of vports in batch instead of just
one at a time, but that will naturally happen later when the kernel
datapath interface is changed to use Netlink, so this patch does not bother
with it.
The Netlink version won't need to take the starting port number from
userspace, since Netlink sockets can keep track of that state as part
of their "dump" feature.
Signed-off-by: Ben Pfaff <blp@nicira.com>
Acked-by: Jesse Gross <jesse@nicira.com>
2011-01-10 13:12:12 -08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
2011-01-23 18:48:02 -08:00
|
|
|
|
dpif_netdev_port_dump_done(const struct dpif *dpif OVS_UNUSED, void *state_)
|
datapath: Change listing ports to use an iterator concept.
One of the goals for Open vSwitch is to decouple kernel and userspace
software, so that either one can be upgraded or rolled back independent of
the other. To do this in full generality, it must be possible to add new
features to the kernel vport layer without changing userspace software. In
turn, that means that the odp_port structure must become variable-length.
This does not, however, fit in well with the ODP_PORT_LIST ioctl in its
current form, because that would require userspace to know how much space
to allocate for each port in advance, or to allocate as much space as
could possibly be needed. Neither choice is very attractive.
This commit prepares for a different solution, by replacing ODP_PORT_LIST
by a new ioctl ODP_VPORT_DUMP that retrieves information about a single
vport from the datapath on each call. It is much cleaner to allocate the
maximum amount of space for a single vport than to do so for possibly a
large number of vports.
It would be faster to retrieve a number of vports in batch instead of just
one at a time, but that will naturally happen later when the kernel
datapath interface is changed to use Netlink, so this patch does not bother
with it.
The Netlink version won't need to take the starting port number from
userspace, since Netlink sockets can keep track of that state as part
of their "dump" feature.
Signed-off-by: Ben Pfaff <blp@nicira.com>
Acked-by: Jesse Gross <jesse@nicira.com>
2011-01-10 13:12:12 -08:00
|
|
|
|
{
|
2011-01-23 18:48:02 -08:00
|
|
|
|
struct dp_netdev_port_state *state = state_;
|
|
|
|
|
free(state->name);
|
datapath: Change listing ports to use an iterator concept.
One of the goals for Open vSwitch is to decouple kernel and userspace
software, so that either one can be upgraded or rolled back independent of
the other. To do this in full generality, it must be possible to add new
features to the kernel vport layer without changing userspace software. In
turn, that means that the odp_port structure must become variable-length.
This does not, however, fit in well with the ODP_PORT_LIST ioctl in its
current form, because that would require userspace to know how much space
to allocate for each port in advance, or to allocate as much space as
could possibly be needed. Neither choice is very attractive.
This commit prepares for a different solution, by replacing ODP_PORT_LIST
by a new ioctl ODP_VPORT_DUMP that retrieves information about a single
vport from the datapath on each call. It is much cleaner to allocate the
maximum amount of space for a single vport than to do so for possibly a
large number of vports.
It would be faster to retrieve a number of vports in batch instead of just
one at a time, but that will naturally happen later when the kernel
datapath interface is changed to use Netlink, so this patch does not bother
with it.
The Netlink version won't need to take the starting port number from
userspace, since Netlink sockets can keep track of that state as part
of their "dump" feature.
Signed-off-by: Ben Pfaff <blp@nicira.com>
Acked-by: Jesse Gross <jesse@nicira.com>
2011-01-10 13:12:12 -08:00
|
|
|
|
free(state);
|
|
|
|
|
return 0;
|
2009-06-19 14:09:39 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
2010-02-11 10:59:47 -08:00
|
|
|
|
dpif_netdev_port_poll(const struct dpif *dpif_, char **devnamep OVS_UNUSED)
|
2009-06-19 14:09:39 -07:00
|
|
|
|
{
|
|
|
|
|
struct dpif_netdev *dpif = dpif_netdev_cast(dpif_);
|
2013-08-07 13:29:54 -07:00
|
|
|
|
uint64_t new_port_seq;
|
2013-07-23 16:56:26 -07:00
|
|
|
|
int error;
|
|
|
|
|
|
2013-08-07 13:29:54 -07:00
|
|
|
|
new_port_seq = seq_read(dpif->dp->port_seq);
|
|
|
|
|
if (dpif->last_port_seq != new_port_seq) {
|
|
|
|
|
dpif->last_port_seq = new_port_seq;
|
2013-07-23 16:56:26 -07:00
|
|
|
|
error = ENOBUFS;
|
2009-06-19 14:09:39 -07:00
|
|
|
|
} else {
|
2013-07-23 16:56:26 -07:00
|
|
|
|
error = EAGAIN;
|
2009-06-19 14:09:39 -07:00
|
|
|
|
}
|
2013-07-23 16:56:26 -07:00
|
|
|
|
|
|
|
|
|
return error;
|
2009-06-19 14:09:39 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
dpif_netdev_port_poll_wait(const struct dpif *dpif_)
|
|
|
|
|
{
|
|
|
|
|
struct dpif_netdev *dpif = dpif_netdev_cast(dpif_);
|
2013-07-23 16:56:26 -07:00
|
|
|
|
|
2013-08-07 13:29:54 -07:00
|
|
|
|
seq_wait(dpif->dp->port_seq, dpif->last_port_seq);
|
2014-01-08 15:58:11 -08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static struct dp_netdev_flow *
|
|
|
|
|
dp_netdev_flow_cast(const struct cls_rule *cr)
|
|
|
|
|
{
|
|
|
|
|
return cr ? CONTAINER_OF(cr, struct dp_netdev_flow, cr) : NULL;
|
2009-06-19 14:09:39 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static struct dp_netdev_flow *
|
2014-04-18 08:26:57 -07:00
|
|
|
|
dp_netdev_lookup_flow(const struct dp_netdev *dp, const struct miniflow *key)
|
2013-11-04 06:23:54 -08:00
|
|
|
|
{
|
2014-01-08 15:58:11 -08:00
|
|
|
|
struct dp_netdev_flow *netdev_flow;
|
2014-04-18 08:26:57 -07:00
|
|
|
|
struct cls_rule *rule;
|
2013-11-04 06:23:54 -08:00
|
|
|
|
|
2014-06-23 18:40:47 -07:00
|
|
|
|
classifier_lookup_miniflow_batch(&dp->cls, &key, &rule, 1);
|
2014-04-18 08:26:57 -07:00
|
|
|
|
netdev_flow = dp_netdev_flow_cast(rule);
|
2013-11-04 06:23:54 -08:00
|
|
|
|
|
2014-01-08 15:58:11 -08:00
|
|
|
|
return netdev_flow;
|
2013-11-04 06:23:54 -08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static struct dp_netdev_flow *
|
|
|
|
|
dp_netdev_find_flow(const struct dp_netdev *dp, const struct flow *flow)
|
2009-06-19 14:09:39 -07:00
|
|
|
|
{
|
2013-10-29 02:34:15 -07:00
|
|
|
|
struct dp_netdev_flow *netdev_flow;
|
2009-06-19 14:09:39 -07:00
|
|
|
|
|
2014-07-04 06:38:47 -07:00
|
|
|
|
CMAP_FOR_EACH_WITH_HASH (netdev_flow, node, flow_hash(flow, 0),
|
2013-10-29 02:34:15 -07:00
|
|
|
|
&dp->flow_table) {
|
2013-11-04 06:23:54 -08:00
|
|
|
|
if (flow_equal(&netdev_flow->flow, flow)) {
|
2014-03-05 22:41:30 -08:00
|
|
|
|
return netdev_flow;
|
2009-06-19 14:09:39 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
2014-01-08 15:58:11 -08:00
|
|
|
|
|
2009-06-19 14:09:39 -07:00
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
2014-08-13 09:55:54 +12:00
|
|
|
|
get_dpif_flow_stats(const struct dp_netdev_flow *netdev_flow,
|
2013-10-29 02:34:15 -07:00
|
|
|
|
struct dpif_flow_stats *stats)
|
2011-01-26 07:03:39 -08:00
|
|
|
|
{
|
2014-01-22 16:03:10 -08:00
|
|
|
|
struct dp_netdev_flow_stats *bucket;
|
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
|
|
memset(stats, 0, sizeof *stats);
|
|
|
|
|
OVSTHREAD_STATS_FOR_EACH_BUCKET (bucket, i, &netdev_flow->stats) {
|
|
|
|
|
ovs_mutex_lock(&bucket->mutex);
|
|
|
|
|
stats->n_packets += bucket->packet_count;
|
|
|
|
|
stats->n_bytes += bucket->byte_count;
|
|
|
|
|
stats->used = MAX(stats->used, bucket->used);
|
|
|
|
|
stats->tcp_flags |= bucket->tcp_flags;
|
|
|
|
|
ovs_mutex_unlock(&bucket->mutex);
|
|
|
|
|
}
|
2009-06-19 14:09:39 -07:00
|
|
|
|
}
|
|
|
|
|
|
2014-08-13 09:55:54 +12:00
|
|
|
|
static void
|
|
|
|
|
dp_netdev_flow_to_dpif_flow(const struct dp_netdev_flow *netdev_flow,
|
|
|
|
|
struct ofpbuf *buffer, struct dpif_flow *flow)
|
|
|
|
|
{
|
|
|
|
|
struct flow_wildcards wc;
|
|
|
|
|
struct dp_netdev_actions *actions;
|
|
|
|
|
|
|
|
|
|
minimask_expand(&netdev_flow->cr.match.mask, &wc);
|
|
|
|
|
odp_flow_key_from_mask(buffer, &wc.masks, &netdev_flow->flow,
|
|
|
|
|
odp_to_u32(wc.masks.in_port.odp_port),
|
|
|
|
|
SIZE_MAX, true);
|
|
|
|
|
flow->mask = ofpbuf_data(buffer);
|
|
|
|
|
flow->mask_len = ofpbuf_size(buffer);
|
|
|
|
|
|
|
|
|
|
actions = dp_netdev_flow_get_actions(netdev_flow);
|
|
|
|
|
flow->actions = actions->actions;
|
|
|
|
|
flow->actions_len = actions->size;
|
|
|
|
|
|
|
|
|
|
get_dpif_flow_stats(netdev_flow, &flow->stats);
|
|
|
|
|
}
|
|
|
|
|
|
2011-01-23 18:44:44 -08:00
|
|
|
|
static int
|
2013-12-11 11:07:01 -08:00
|
|
|
|
dpif_netdev_mask_from_nlattrs(const struct nlattr *key, uint32_t key_len,
|
|
|
|
|
const struct nlattr *mask_key,
|
|
|
|
|
uint32_t mask_key_len, const struct flow *flow,
|
|
|
|
|
struct flow *mask)
|
|
|
|
|
{
|
|
|
|
|
if (mask_key_len) {
|
2014-02-04 08:07:45 -08:00
|
|
|
|
enum odp_key_fitness fitness;
|
|
|
|
|
|
|
|
|
|
fitness = odp_flow_key_to_mask(mask_key, mask_key_len, mask, flow);
|
|
|
|
|
if (fitness) {
|
2013-12-11 11:07:01 -08:00
|
|
|
|
/* This should not happen: it indicates that
|
|
|
|
|
* odp_flow_key_from_mask() and odp_flow_key_to_mask()
|
|
|
|
|
* disagree on the acceptable form of a mask. Log the problem
|
|
|
|
|
* as an error, with enough details to enable debugging. */
|
|
|
|
|
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
|
|
|
|
|
|
|
|
|
|
if (!VLOG_DROP_ERR(&rl)) {
|
|
|
|
|
struct ds s;
|
|
|
|
|
|
|
|
|
|
ds_init(&s);
|
|
|
|
|
odp_flow_format(key, key_len, mask_key, mask_key_len, NULL, &s,
|
|
|
|
|
true);
|
2014-02-04 08:07:45 -08:00
|
|
|
|
VLOG_ERR("internal error parsing flow mask %s (%s)",
|
|
|
|
|
ds_cstr(&s), odp_key_fitness_to_string(fitness));
|
2013-12-11 11:07:01 -08:00
|
|
|
|
ds_destroy(&s);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return EINVAL;
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
enum mf_field_id id;
|
|
|
|
|
/* No mask key, unwildcard everything except fields whose
|
|
|
|
|
* prerequisities are not met. */
|
|
|
|
|
memset(mask, 0x0, sizeof *mask);
|
|
|
|
|
|
|
|
|
|
for (id = 0; id < MFF_N_IDS; ++id) {
|
|
|
|
|
/* Skip registers and metadata. */
|
|
|
|
|
if (!(id >= MFF_REG0 && id < MFF_REG0 + FLOW_N_REGS)
|
|
|
|
|
&& id != MFF_METADATA) {
|
|
|
|
|
const struct mf_field *mf = mf_from_id(id);
|
|
|
|
|
if (mf_are_prereqs_ok(mf, flow)) {
|
|
|
|
|
mf_mask_field(mf, mask);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2014-04-05 10:27:05 -07:00
|
|
|
|
/* Force unwildcard the in_port.
|
|
|
|
|
*
|
|
|
|
|
* We need to do this even in the case where we unwildcard "everything"
|
|
|
|
|
* above because "everything" only includes the 16-bit OpenFlow port number
|
|
|
|
|
* mask->in_port.ofp_port, which only covers half of the 32-bit datapath
|
|
|
|
|
* port number mask->in_port.odp_port. */
|
|
|
|
|
mask->in_port.odp_port = u32_to_odp(UINT32_MAX);
|
|
|
|
|
|
2013-12-11 11:07:01 -08:00
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
dpif_netdev_flow_from_nlattrs(const struct nlattr *key, uint32_t key_len,
|
|
|
|
|
struct flow *flow)
|
2011-01-23 18:44:44 -08:00
|
|
|
|
{
|
2013-07-09 09:23:02 -07:00
|
|
|
|
odp_port_t in_port;
|
|
|
|
|
|
2013-12-11 11:07:01 -08:00
|
|
|
|
if (odp_flow_key_to_flow(key, key_len, flow)) {
|
2011-01-23 18:44:44 -08:00
|
|
|
|
/* This should not happen: it indicates that odp_flow_key_from_flow()
|
2013-12-11 11:07:01 -08:00
|
|
|
|
* and odp_flow_key_to_flow() disagree on the acceptable form of a
|
|
|
|
|
* flow. Log the problem as an error, with enough details to enable
|
|
|
|
|
* debugging. */
|
2011-01-23 18:44:44 -08:00
|
|
|
|
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
|
|
|
|
|
|
|
|
|
|
if (!VLOG_DROP_ERR(&rl)) {
|
|
|
|
|
struct ds s;
|
|
|
|
|
|
|
|
|
|
ds_init(&s);
|
2013-12-11 11:07:01 -08:00
|
|
|
|
odp_flow_format(key, key_len, NULL, 0, NULL, &s, true);
|
2011-01-23 18:44:44 -08:00
|
|
|
|
VLOG_ERR("internal error parsing flow key %s", ds_cstr(&s));
|
|
|
|
|
ds_destroy(&s);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return EINVAL;
|
|
|
|
|
}
|
|
|
|
|
|
2013-07-09 09:23:02 -07:00
|
|
|
|
in_port = flow->in_port.odp_port;
|
|
|
|
|
if (!is_valid_port_number(in_port) && in_port != ODPP_NONE) {
|
2011-09-08 16:30:20 -07:00
|
|
|
|
return EINVAL;
|
|
|
|
|
}
|
|
|
|
|
|
2011-01-23 18:44:44 -08:00
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2009-06-19 14:09:39 -07:00
|
|
|
|
static int
|
2014-08-13 09:55:54 +12:00
|
|
|
|
dpif_netdev_flow_get(const struct dpif *dpif, const struct dpif_flow_get *get)
|
2009-06-19 14:09:39 -07:00
|
|
|
|
{
|
|
|
|
|
struct dp_netdev *dp = get_dp_netdev(dpif);
|
2013-10-29 02:34:15 -07:00
|
|
|
|
struct dp_netdev_flow *netdev_flow;
|
2011-01-17 14:40:58 -08:00
|
|
|
|
struct flow key;
|
|
|
|
|
int error;
|
2011-01-23 18:44:44 -08:00
|
|
|
|
|
2014-08-13 09:55:54 +12:00
|
|
|
|
error = dpif_netdev_flow_from_nlattrs(get->key, get->key_len, &key);
|
2011-01-17 14:40:58 -08:00
|
|
|
|
if (error) {
|
|
|
|
|
return error;
|
|
|
|
|
}
|
2010-10-11 13:31:35 -07:00
|
|
|
|
|
2013-11-04 06:23:54 -08:00
|
|
|
|
netdev_flow = dp_netdev_find_flow(dp, &key);
|
2014-01-08 15:58:11 -08:00
|
|
|
|
|
2013-10-29 02:34:15 -07:00
|
|
|
|
if (netdev_flow) {
|
2014-08-13 09:55:54 +12:00
|
|
|
|
dp_netdev_flow_to_dpif_flow(netdev_flow, get->buffer, get->flow);
|
2014-03-05 22:41:30 -08:00
|
|
|
|
} else {
|
2013-07-23 16:56:26 -07:00
|
|
|
|
error = ENOENT;
|
2009-06-19 14:09:39 -07:00
|
|
|
|
}
|
2011-01-17 14:40:58 -08:00
|
|
|
|
|
2013-07-23 16:56:26 -07:00
|
|
|
|
return error;
|
2009-06-19 14:09:39 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
2014-07-27 18:56:45 -07:00
|
|
|
|
dp_netdev_flow_add(struct dp_netdev *dp, struct match *match,
|
|
|
|
|
const struct nlattr *actions, size_t actions_len)
|
2014-01-08 15:58:11 -08:00
|
|
|
|
OVS_REQUIRES(dp->flow_mutex)
|
2009-06-19 14:09:39 -07:00
|
|
|
|
{
|
2013-10-29 02:34:15 -07:00
|
|
|
|
struct dp_netdev_flow *netdev_flow;
|
2009-06-19 14:09:39 -07:00
|
|
|
|
|
2013-10-29 02:34:15 -07:00
|
|
|
|
netdev_flow = xzalloc(sizeof *netdev_flow);
|
2014-07-27 18:56:45 -07:00
|
|
|
|
*CONST_CAST(struct flow *, &netdev_flow->flow) = match->flow;
|
2014-01-08 15:58:11 -08:00
|
|
|
|
|
2014-08-11 17:25:50 -07:00
|
|
|
|
ovs_refcount_init(&netdev_flow->ref_cnt);
|
|
|
|
|
|
2014-01-22 16:03:10 -08:00
|
|
|
|
ovsthread_stats_init(&netdev_flow->stats);
|
|
|
|
|
|
2014-03-05 22:41:30 -08:00
|
|
|
|
ovsrcu_set(&netdev_flow->actions,
|
|
|
|
|
dp_netdev_actions_create(actions, actions_len));
|
2013-11-04 06:23:54 -08:00
|
|
|
|
|
2014-01-08 15:58:11 -08:00
|
|
|
|
cls_rule_init(CONST_CAST(struct cls_rule *, &netdev_flow->cr),
|
2014-07-27 18:56:45 -07:00
|
|
|
|
match, NETDEV_RULE_PRIORITY);
|
2014-07-04 06:38:47 -07:00
|
|
|
|
cmap_insert(&dp->flow_table,
|
|
|
|
|
CONST_CAST(struct cmap_node *, &netdev_flow->node),
|
2014-07-27 18:56:45 -07:00
|
|
|
|
flow_hash(&match->flow, 0));
|
2014-01-08 15:58:11 -08:00
|
|
|
|
classifier_insert(&dp->cls,
|
|
|
|
|
CONST_CAST(struct cls_rule *, &netdev_flow->cr));
|
2009-06-19 14:09:39 -07:00
|
|
|
|
|
2014-07-26 15:39:58 -07:00
|
|
|
|
if (OVS_UNLIKELY(VLOG_IS_DBG_ENABLED())) {
|
|
|
|
|
struct ds ds = DS_EMPTY_INITIALIZER;
|
|
|
|
|
|
|
|
|
|
ds_put_cstr(&ds, "flow_add: ");
|
|
|
|
|
match_format(match, &ds, OFP_DEFAULT_PRIORITY);
|
|
|
|
|
ds_put_cstr(&ds, ", actions:");
|
|
|
|
|
format_odp_actions(&ds, actions, actions_len);
|
|
|
|
|
|
|
|
|
|
VLOG_DBG_RL(&upcall_rl, "%s", ds_cstr(&ds));
|
|
|
|
|
|
|
|
|
|
ds_destroy(&ds);
|
|
|
|
|
}
|
|
|
|
|
|
2009-06-19 14:09:39 -07:00
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
2013-10-29 02:34:15 -07:00
|
|
|
|
clear_stats(struct dp_netdev_flow *netdev_flow)
|
2009-06-19 14:09:39 -07:00
|
|
|
|
{
|
2014-01-22 16:03:10 -08:00
|
|
|
|
struct dp_netdev_flow_stats *bucket;
|
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
|
|
OVSTHREAD_STATS_FOR_EACH_BUCKET (bucket, i, &netdev_flow->stats) {
|
|
|
|
|
ovs_mutex_lock(&bucket->mutex);
|
|
|
|
|
bucket->used = 0;
|
|
|
|
|
bucket->packet_count = 0;
|
|
|
|
|
bucket->byte_count = 0;
|
|
|
|
|
bucket->tcp_flags = 0;
|
|
|
|
|
ovs_mutex_unlock(&bucket->mutex);
|
|
|
|
|
}
|
2009-06-19 14:09:39 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
2011-12-26 14:39:03 -08:00
|
|
|
|
dpif_netdev_flow_put(struct dpif *dpif, const struct dpif_flow_put *put)
|
2009-06-19 14:09:39 -07:00
|
|
|
|
{
|
|
|
|
|
struct dp_netdev *dp = get_dp_netdev(dpif);
|
2013-10-29 02:34:15 -07:00
|
|
|
|
struct dp_netdev_flow *netdev_flow;
|
2014-04-18 08:26:57 -07:00
|
|
|
|
struct miniflow miniflow;
|
2014-07-27 18:56:45 -07:00
|
|
|
|
struct match match;
|
2011-01-23 18:44:44 -08:00
|
|
|
|
int error;
|
|
|
|
|
|
2014-07-27 18:56:45 -07:00
|
|
|
|
error = dpif_netdev_flow_from_nlattrs(put->key, put->key_len, &match.flow);
|
2013-12-11 11:07:01 -08:00
|
|
|
|
if (error) {
|
|
|
|
|
return error;
|
|
|
|
|
}
|
|
|
|
|
error = dpif_netdev_mask_from_nlattrs(put->key, put->key_len,
|
|
|
|
|
put->mask, put->mask_len,
|
2014-07-27 18:56:45 -07:00
|
|
|
|
&match.flow, &match.wc.masks);
|
2011-01-23 18:44:44 -08:00
|
|
|
|
if (error) {
|
|
|
|
|
return error;
|
|
|
|
|
}
|
2014-07-27 18:56:45 -07:00
|
|
|
|
miniflow_init(&miniflow, &match.flow);
|
2009-06-19 14:09:39 -07:00
|
|
|
|
|
2014-01-08 15:58:11 -08:00
|
|
|
|
ovs_mutex_lock(&dp->flow_mutex);
|
2014-04-18 08:26:57 -07:00
|
|
|
|
netdev_flow = dp_netdev_lookup_flow(dp, &miniflow);
|
2013-10-29 02:34:15 -07:00
|
|
|
|
if (!netdev_flow) {
|
2011-12-26 14:39:03 -08:00
|
|
|
|
if (put->flags & DPIF_FP_CREATE) {
|
2014-07-04 06:38:47 -07:00
|
|
|
|
if (cmap_count(&dp->flow_table) < MAX_FLOWS) {
|
2011-12-26 14:39:03 -08:00
|
|
|
|
if (put->stats) {
|
|
|
|
|
memset(put->stats, 0, sizeof *put->stats);
|
2011-01-26 07:03:39 -08:00
|
|
|
|
}
|
2014-07-27 18:56:45 -07:00
|
|
|
|
error = dp_netdev_flow_add(dp, &match, put->actions,
|
2013-07-23 16:56:26 -07:00
|
|
|
|
put->actions_len);
|
2009-06-19 14:09:39 -07:00
|
|
|
|
} else {
|
2013-07-23 16:56:26 -07:00
|
|
|
|
error = EFBIG;
|
2009-06-19 14:09:39 -07:00
|
|
|
|
}
|
|
|
|
|
} else {
|
2013-07-23 16:56:26 -07:00
|
|
|
|
error = ENOENT;
|
2009-06-19 14:09:39 -07:00
|
|
|
|
}
|
|
|
|
|
} else {
|
2013-11-04 06:23:54 -08:00
|
|
|
|
if (put->flags & DPIF_FP_MODIFY
|
2014-07-27 18:56:45 -07:00
|
|
|
|
&& flow_equal(&match.flow, &netdev_flow->flow)) {
|
2014-01-08 15:58:11 -08:00
|
|
|
|
struct dp_netdev_actions *new_actions;
|
|
|
|
|
struct dp_netdev_actions *old_actions;
|
|
|
|
|
|
|
|
|
|
new_actions = dp_netdev_actions_create(put->actions,
|
|
|
|
|
put->actions_len);
|
|
|
|
|
|
2014-03-05 22:41:30 -08:00
|
|
|
|
old_actions = dp_netdev_flow_get_actions(netdev_flow);
|
|
|
|
|
ovsrcu_set(&netdev_flow->actions, new_actions);
|
2014-01-22 16:03:10 -08:00
|
|
|
|
|
2014-01-08 14:37:13 -08:00
|
|
|
|
if (put->stats) {
|
|
|
|
|
get_dpif_flow_stats(netdev_flow, put->stats);
|
|
|
|
|
}
|
|
|
|
|
if (put->flags & DPIF_FP_ZERO_STATS) {
|
|
|
|
|
clear_stats(netdev_flow);
|
2009-06-19 14:09:39 -07:00
|
|
|
|
}
|
2014-01-08 15:58:11 -08:00
|
|
|
|
|
2014-03-05 22:41:30 -08:00
|
|
|
|
ovsrcu_postpone(dp_netdev_actions_free, old_actions);
|
2013-11-04 06:23:54 -08:00
|
|
|
|
} else if (put->flags & DPIF_FP_CREATE) {
|
2013-07-23 16:56:26 -07:00
|
|
|
|
error = EEXIST;
|
2013-11-04 06:23:54 -08:00
|
|
|
|
} else {
|
|
|
|
|
/* Overlapping flow. */
|
|
|
|
|
error = EINVAL;
|
2009-06-19 14:09:39 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
2014-01-08 15:58:11 -08:00
|
|
|
|
ovs_mutex_unlock(&dp->flow_mutex);
|
2014-06-25 13:05:17 -07:00
|
|
|
|
miniflow_destroy(&miniflow);
|
2013-07-23 16:56:26 -07:00
|
|
|
|
|
|
|
|
|
return error;
|
2009-06-19 14:09:39 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
2012-04-17 21:52:10 -07:00
|
|
|
|
dpif_netdev_flow_del(struct dpif *dpif, const struct dpif_flow_del *del)
|
2009-06-19 14:09:39 -07:00
|
|
|
|
{
|
|
|
|
|
struct dp_netdev *dp = get_dp_netdev(dpif);
|
2013-10-29 02:34:15 -07:00
|
|
|
|
struct dp_netdev_flow *netdev_flow;
|
2010-10-11 13:31:35 -07:00
|
|
|
|
struct flow key;
|
2011-01-23 18:44:44 -08:00
|
|
|
|
int error;
|
|
|
|
|
|
2012-04-17 21:52:10 -07:00
|
|
|
|
error = dpif_netdev_flow_from_nlattrs(del->key, del->key_len, &key);
|
2011-01-23 18:44:44 -08:00
|
|
|
|
if (error) {
|
|
|
|
|
return error;
|
|
|
|
|
}
|
2009-06-19 14:09:39 -07:00
|
|
|
|
|
2014-01-08 15:58:11 -08:00
|
|
|
|
ovs_mutex_lock(&dp->flow_mutex);
|
2013-11-04 06:23:54 -08:00
|
|
|
|
netdev_flow = dp_netdev_find_flow(dp, &key);
|
2013-10-29 02:34:15 -07:00
|
|
|
|
if (netdev_flow) {
|
2012-04-17 21:52:10 -07:00
|
|
|
|
if (del->stats) {
|
2013-10-29 02:34:15 -07:00
|
|
|
|
get_dpif_flow_stats(netdev_flow, del->stats);
|
2011-01-26 07:03:39 -08:00
|
|
|
|
}
|
2014-01-08 15:58:11 -08:00
|
|
|
|
dp_netdev_remove_flow(dp, netdev_flow);
|
2009-06-19 14:09:39 -07:00
|
|
|
|
} else {
|
2013-07-23 16:56:26 -07:00
|
|
|
|
error = ENOENT;
|
2009-06-19 14:09:39 -07:00
|
|
|
|
}
|
2014-01-08 15:58:11 -08:00
|
|
|
|
ovs_mutex_unlock(&dp->flow_mutex);
|
2013-07-23 16:56:26 -07:00
|
|
|
|
|
|
|
|
|
return error;
|
2009-06-19 14:09:39 -07:00
|
|
|
|
}
|
|
|
|
|
|
2014-05-20 11:37:02 -07:00
|
|
|
|
struct dpif_netdev_flow_dump {
|
|
|
|
|
struct dpif_flow_dump up;
|
2014-07-04 06:38:47 -07:00
|
|
|
|
struct cmap_position pos;
|
2014-02-27 14:13:08 -08:00
|
|
|
|
int status;
|
|
|
|
|
struct ovs_mutex mutex;
|
2014-02-27 14:13:07 -08:00
|
|
|
|
};
|
|
|
|
|
|
2014-05-20 11:37:02 -07:00
|
|
|
|
static struct dpif_netdev_flow_dump *
|
|
|
|
|
dpif_netdev_flow_dump_cast(struct dpif_flow_dump *dump)
|
2009-06-19 14:09:39 -07:00
|
|
|
|
{
|
2014-05-20 11:37:02 -07:00
|
|
|
|
return CONTAINER_OF(dump, struct dpif_netdev_flow_dump, up);
|
2014-02-27 14:13:07 -08:00
|
|
|
|
}
|
|
|
|
|
|
2014-05-20 11:37:02 -07:00
|
|
|
|
static struct dpif_flow_dump *
|
|
|
|
|
dpif_netdev_flow_dump_create(const struct dpif *dpif_)
|
2014-02-27 14:13:07 -08:00
|
|
|
|
{
|
2014-05-20 11:37:02 -07:00
|
|
|
|
struct dpif_netdev_flow_dump *dump;
|
2014-02-27 14:13:07 -08:00
|
|
|
|
|
2014-05-20 11:37:02 -07:00
|
|
|
|
dump = xmalloc(sizeof *dump);
|
|
|
|
|
dpif_flow_dump_init(&dump->up, dpif_);
|
2014-07-04 06:38:47 -07:00
|
|
|
|
memset(&dump->pos, 0, sizeof dump->pos);
|
2014-05-20 11:37:02 -07:00
|
|
|
|
dump->status = 0;
|
|
|
|
|
ovs_mutex_init(&dump->mutex);
|
|
|
|
|
|
|
|
|
|
return &dump->up;
|
2014-02-27 14:13:07 -08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
2014-05-20 11:37:02 -07:00
|
|
|
|
dpif_netdev_flow_dump_destroy(struct dpif_flow_dump *dump_)
|
2014-02-27 14:13:07 -08:00
|
|
|
|
{
|
2014-05-20 11:37:02 -07:00
|
|
|
|
struct dpif_netdev_flow_dump *dump = dpif_netdev_flow_dump_cast(dump_);
|
2014-02-27 14:13:07 -08:00
|
|
|
|
|
2014-05-20 11:37:02 -07:00
|
|
|
|
ovs_mutex_destroy(&dump->mutex);
|
|
|
|
|
free(dump);
|
datapath: Change listing flows to use an iterator concept.
One of the goals for Open vSwitch is to decouple kernel and userspace
software, so that either one can be upgraded or rolled back independent of
the other. To do this in full generality, it must be possible to change
the kernel's idea of the flow key separately from the userspace version.
In turn, that means that flow keys must become variable-length. This does
not, however, fit in well with the ODP_FLOW_LIST ioctl in its current form,
because that would require userspace to know how much space to allocate
for each flow's key in advance, or to allocate as much space as could
possibly be needed. Neither choice is very attractive.
This commit prepares for a different solution, by replacing ODP_FLOW_LIST
by a new ioctl ODP_FLOW_DUMP that retrieves a single flow from the datapath
on each call. It is much cleaner to allocate the maximum amount of space
for a single flow key than to do so for possibly a very large number of
flow keys.
As a side effect, this patch also fixes a race condition that sometimes
made "ovs-dpctl dump-flows" print an error: previously, flows were listed
and then their actions were retrieved, which left a window in which
ovs-vswitchd could delete the flow. Now dumping a flow and its actions is
a single step, closing that window.
Dumping all of the flows in a datapath is no longer an atomic step, so now
it is possible to miss some flows or see a single flow twice during
iteration, if the flow table is modified by another process. It doesn't
look like this should be a problem for ovs-vswitchd.
It would be faster to retrieve a number of flows in batch instead of just
one at a time, but that will naturally happen later when the kernel
datapath interface is changed to use Netlink, so this patch does not bother
with it.
Signed-off-by: Ben Pfaff <blp@nicira.com>
Acked-by: Jesse Gross <jesse@nicira.com>
2010-12-28 10:39:52 -08:00
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2014-05-20 11:37:02 -07:00
|
|
|
|
struct dpif_netdev_flow_dump_thread {
|
|
|
|
|
struct dpif_flow_dump_thread up;
|
|
|
|
|
struct dpif_netdev_flow_dump *dump;
|
2014-06-23 12:36:11 -07:00
|
|
|
|
struct odputil_keybuf keybuf[FLOW_DUMP_MAX_BATCH];
|
|
|
|
|
struct odputil_keybuf maskbuf[FLOW_DUMP_MAX_BATCH];
|
2014-05-20 11:37:02 -07:00
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
static struct dpif_netdev_flow_dump_thread *
|
|
|
|
|
dpif_netdev_flow_dump_thread_cast(struct dpif_flow_dump_thread *thread)
|
|
|
|
|
{
|
|
|
|
|
return CONTAINER_OF(thread, struct dpif_netdev_flow_dump_thread, up);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static struct dpif_flow_dump_thread *
|
|
|
|
|
dpif_netdev_flow_dump_thread_create(struct dpif_flow_dump *dump_)
|
|
|
|
|
{
|
|
|
|
|
struct dpif_netdev_flow_dump *dump = dpif_netdev_flow_dump_cast(dump_);
|
|
|
|
|
struct dpif_netdev_flow_dump_thread *thread;
|
|
|
|
|
|
|
|
|
|
thread = xmalloc(sizeof *thread);
|
|
|
|
|
dpif_flow_dump_thread_init(&thread->up, &dump->up);
|
|
|
|
|
thread->dump = dump;
|
|
|
|
|
return &thread->up;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
dpif_netdev_flow_dump_thread_destroy(struct dpif_flow_dump_thread *thread_)
|
|
|
|
|
{
|
|
|
|
|
struct dpif_netdev_flow_dump_thread *thread
|
|
|
|
|
= dpif_netdev_flow_dump_thread_cast(thread_);
|
|
|
|
|
|
|
|
|
|
free(thread);
|
|
|
|
|
}
|
|
|
|
|
|
datapath: Change listing flows to use an iterator concept.
One of the goals for Open vSwitch is to decouple kernel and userspace
software, so that either one can be upgraded or rolled back independent of
the other. To do this in full generality, it must be possible to change
the kernel's idea of the flow key separately from the userspace version.
In turn, that means that flow keys must become variable-length. This does
not, however, fit in well with the ODP_FLOW_LIST ioctl in its current form,
because that would require userspace to know how much space to allocate
for each flow's key in advance, or to allocate as much space as could
possibly be needed. Neither choice is very attractive.
This commit prepares for a different solution, by replacing ODP_FLOW_LIST
by a new ioctl ODP_FLOW_DUMP that retrieves a single flow from the datapath
on each call. It is much cleaner to allocate the maximum amount of space
for a single flow key than to do so for possibly a very large number of
flow keys.
As a side effect, this patch also fixes a race condition that sometimes
made "ovs-dpctl dump-flows" print an error: previously, flows were listed
and then their actions were retrieved, which left a window in which
ovs-vswitchd could delete the flow. Now dumping a flow and its actions is
a single step, closing that window.
Dumping all of the flows in a datapath is no longer an atomic step, so now
it is possible to miss some flows or see a single flow twice during
iteration, if the flow table is modified by another process. It doesn't
look like this should be a problem for ovs-vswitchd.
It would be faster to retrieve a number of flows in batch instead of just
one at a time, but that will naturally happen later when the kernel
datapath interface is changed to use Netlink, so this patch does not bother
with it.
Signed-off-by: Ben Pfaff <blp@nicira.com>
Acked-by: Jesse Gross <jesse@nicira.com>
2010-12-28 10:39:52 -08:00
|
|
|
|
static int
|
2014-05-20 11:37:02 -07:00
|
|
|
|
dpif_netdev_flow_dump_next(struct dpif_flow_dump_thread *thread_,
|
2014-06-23 12:36:11 -07:00
|
|
|
|
struct dpif_flow *flows, int max_flows)
|
2014-05-20 11:37:02 -07:00
|
|
|
|
{
|
|
|
|
|
struct dpif_netdev_flow_dump_thread *thread
|
|
|
|
|
= dpif_netdev_flow_dump_thread_cast(thread_);
|
|
|
|
|
struct dpif_netdev_flow_dump *dump = thread->dump;
|
|
|
|
|
struct dpif_netdev *dpif = dpif_netdev_cast(thread->up.dpif);
|
2014-06-23 12:36:11 -07:00
|
|
|
|
struct dp_netdev_flow *netdev_flows[FLOW_DUMP_MAX_BATCH];
|
2014-05-20 11:37:02 -07:00
|
|
|
|
struct dp_netdev *dp = get_dp_netdev(&dpif->dpif);
|
2014-06-23 12:36:11 -07:00
|
|
|
|
int n_flows = 0;
|
|
|
|
|
int i;
|
2010-10-11 13:31:35 -07:00
|
|
|
|
|
2014-05-20 11:37:02 -07:00
|
|
|
|
ovs_mutex_lock(&dump->mutex);
|
2014-06-23 12:36:11 -07:00
|
|
|
|
if (!dump->status) {
|
|
|
|
|
for (n_flows = 0; n_flows < MIN(max_flows, FLOW_DUMP_MAX_BATCH);
|
|
|
|
|
n_flows++) {
|
2014-07-04 06:38:47 -07:00
|
|
|
|
struct cmap_node *node;
|
2014-06-23 12:36:11 -07:00
|
|
|
|
|
2014-07-04 06:38:47 -07:00
|
|
|
|
node = cmap_next_position(&dp->flow_table, &dump->pos);
|
2014-06-23 12:36:11 -07:00
|
|
|
|
if (!node) {
|
|
|
|
|
dump->status = EOF;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
netdev_flows[n_flows] = CONTAINER_OF(node, struct dp_netdev_flow,
|
|
|
|
|
node);
|
2014-02-27 14:13:08 -08:00
|
|
|
|
}
|
2014-01-08 15:58:11 -08:00
|
|
|
|
}
|
2014-05-20 11:37:02 -07:00
|
|
|
|
ovs_mutex_unlock(&dump->mutex);
|
|
|
|
|
|
2014-06-23 12:36:11 -07:00
|
|
|
|
for (i = 0; i < n_flows; i++) {
|
|
|
|
|
struct odputil_keybuf *maskbuf = &thread->maskbuf[i];
|
|
|
|
|
struct odputil_keybuf *keybuf = &thread->keybuf[i];
|
|
|
|
|
struct dp_netdev_flow *netdev_flow = netdev_flows[i];
|
|
|
|
|
struct dpif_flow *f = &flows[i];
|
|
|
|
|
struct dp_netdev_actions *dp_actions;
|
|
|
|
|
struct flow_wildcards wc;
|
|
|
|
|
struct ofpbuf buf;
|
|
|
|
|
|
|
|
|
|
minimask_expand(&netdev_flow->cr.match.mask, &wc);
|
|
|
|
|
|
|
|
|
|
/* Key. */
|
|
|
|
|
ofpbuf_use_stack(&buf, keybuf, sizeof *keybuf);
|
|
|
|
|
odp_flow_key_from_flow(&buf, &netdev_flow->flow, &wc.masks,
|
|
|
|
|
netdev_flow->flow.in_port.odp_port, true);
|
|
|
|
|
f->key = ofpbuf_data(&buf);
|
|
|
|
|
f->key_len = ofpbuf_size(&buf);
|
|
|
|
|
|
|
|
|
|
/* Mask. */
|
|
|
|
|
ofpbuf_use_stack(&buf, maskbuf, sizeof *maskbuf);
|
|
|
|
|
odp_flow_key_from_mask(&buf, &wc.masks, &netdev_flow->flow,
|
|
|
|
|
odp_to_u32(wc.masks.in_port.odp_port),
|
|
|
|
|
SIZE_MAX, true);
|
|
|
|
|
f->mask = ofpbuf_data(&buf);
|
|
|
|
|
f->mask_len = ofpbuf_size(&buf);
|
|
|
|
|
|
|
|
|
|
/* Actions. */
|
|
|
|
|
dp_actions = dp_netdev_flow_get_actions(netdev_flow);
|
|
|
|
|
f->actions = dp_actions->actions;
|
|
|
|
|
f->actions_len = dp_actions->size;
|
|
|
|
|
|
|
|
|
|
/* Stats. */
|
|
|
|
|
get_dpif_flow_stats(netdev_flow, &f->stats);
|
|
|
|
|
}
|
2011-01-26 07:03:39 -08:00
|
|
|
|
|
2014-06-23 12:36:11 -07:00
|
|
|
|
return n_flows;
|
2009-06-19 14:09:39 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
2013-12-30 15:58:58 -08:00
|
|
|
|
dpif_netdev_execute(struct dpif *dpif, struct dpif_execute *execute)
|
2009-06-19 14:09:39 -07:00
|
|
|
|
{
|
|
|
|
|
struct dp_netdev *dp = get_dp_netdev(dpif);
|
2014-06-23 11:43:59 -07:00
|
|
|
|
struct dpif_packet packet, *pp;
|
2013-12-30 15:58:58 -08:00
|
|
|
|
struct pkt_metadata *md = &execute->md;
|
2009-06-19 14:09:39 -07:00
|
|
|
|
|
2014-03-30 01:31:50 -07:00
|
|
|
|
if (ofpbuf_size(execute->packet) < ETH_HEADER_LEN ||
|
|
|
|
|
ofpbuf_size(execute->packet) > UINT16_MAX) {
|
2009-06-19 14:09:39 -07:00
|
|
|
|
return EINVAL;
|
|
|
|
|
}
|
|
|
|
|
|
2014-06-23 11:43:57 -07:00
|
|
|
|
packet.ofpbuf = *execute->packet;
|
2014-06-23 11:43:59 -07:00
|
|
|
|
pp = &packet;
|
2014-06-23 11:43:57 -07:00
|
|
|
|
|
2014-06-23 11:43:59 -07:00
|
|
|
|
dp_netdev_execute_actions(dp, &pp, 1, false, md,
|
2014-03-20 10:54:37 -07:00
|
|
|
|
execute->actions, execute->actions_len);
|
2014-01-08 15:58:11 -08:00
|
|
|
|
|
2014-06-23 11:43:57 -07:00
|
|
|
|
/* Even though may_steal is set to false, some actions could modify or
|
|
|
|
|
* reallocate the ofpbuf memory. We need to pass those changes to the
|
|
|
|
|
* caller */
|
|
|
|
|
*execute->packet = packet.ofpbuf;
|
|
|
|
|
|
2013-12-30 15:58:58 -08:00
|
|
|
|
return 0;
|
2009-06-19 14:09:39 -07:00
|
|
|
|
}
|
|
|
|
|
|
2014-07-15 16:09:40 -07:00
|
|
|
|
static void
|
|
|
|
|
dpif_netdev_operate(struct dpif *dpif, struct dpif_op **ops, size_t n_ops)
|
|
|
|
|
{
|
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < n_ops; i++) {
|
|
|
|
|
struct dpif_op *op = ops[i];
|
|
|
|
|
|
|
|
|
|
switch (op->type) {
|
|
|
|
|
case DPIF_OP_FLOW_PUT:
|
|
|
|
|
op->error = dpif_netdev_flow_put(dpif, &op->u.flow_put);
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case DPIF_OP_FLOW_DEL:
|
|
|
|
|
op->error = dpif_netdev_flow_del(dpif, &op->u.flow_del);
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case DPIF_OP_EXECUTE:
|
|
|
|
|
op->error = dpif_netdev_execute(dpif, &op->u.execute);
|
|
|
|
|
break;
|
2014-08-13 09:55:54 +12:00
|
|
|
|
|
|
|
|
|
case DPIF_OP_FLOW_GET:
|
|
|
|
|
op->error = dpif_netdev_flow_get(dpif, &op->u.flow_get);
|
|
|
|
|
break;
|
2014-07-15 16:09:40 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2011-11-21 13:36:17 -08:00
|
|
|
|
static int
|
|
|
|
|
dpif_netdev_queue_to_priority(const struct dpif *dpif OVS_UNUSED,
|
|
|
|
|
uint32_t queue_id, uint32_t *priority)
|
|
|
|
|
{
|
|
|
|
|
*priority = queue_id;
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2009-06-19 14:09:39 -07:00
|
|
|
|
|
2014-01-08 14:37:13 -08:00
|
|
|
|
/* Creates and returns a new 'struct dp_netdev_actions', with a reference count
|
|
|
|
|
* of 1, whose actions are a copy of from the 'ofpacts_len' bytes of
|
|
|
|
|
* 'ofpacts'. */
|
|
|
|
|
struct dp_netdev_actions *
|
|
|
|
|
dp_netdev_actions_create(const struct nlattr *actions, size_t size)
|
|
|
|
|
{
|
|
|
|
|
struct dp_netdev_actions *netdev_actions;
|
|
|
|
|
|
|
|
|
|
netdev_actions = xmalloc(sizeof *netdev_actions);
|
|
|
|
|
netdev_actions->actions = xmemdup(actions, size);
|
|
|
|
|
netdev_actions->size = size;
|
|
|
|
|
|
|
|
|
|
return netdev_actions;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
struct dp_netdev_actions *
|
2014-03-05 22:41:30 -08:00
|
|
|
|
dp_netdev_flow_get_actions(const struct dp_netdev_flow *flow)
|
2014-01-08 14:37:13 -08:00
|
|
|
|
{
|
2014-03-05 22:41:30 -08:00
|
|
|
|
return ovsrcu_get(struct dp_netdev_actions *, &flow->actions);
|
2014-01-08 14:37:13 -08:00
|
|
|
|
}
|
|
|
|
|
|
2014-03-05 22:41:30 -08:00
|
|
|
|
static void
|
|
|
|
|
dp_netdev_actions_free(struct dp_netdev_actions *actions)
|
2014-01-08 14:37:13 -08:00
|
|
|
|
{
|
2014-03-05 22:41:30 -08:00
|
|
|
|
free(actions->actions);
|
|
|
|
|
free(actions);
|
2014-01-08 14:37:13 -08:00
|
|
|
|
}
|
|
|
|
|
|
2014-03-20 10:57:41 -07:00
|
|
|
|
|
2014-03-28 12:20:00 -07:00
|
|
|
|
static void
|
2014-03-20 19:38:14 -07:00
|
|
|
|
dp_netdev_process_rxq_port(struct dp_netdev *dp,
|
2014-03-20 10:57:41 -07:00
|
|
|
|
struct dp_netdev_port *port,
|
2014-03-20 19:38:14 -07:00
|
|
|
|
struct netdev_rxq *rxq)
|
2014-03-20 10:57:41 -07:00
|
|
|
|
{
|
2014-06-23 11:43:59 -07:00
|
|
|
|
struct dpif_packet *packets[NETDEV_MAX_RX_BATCH];
|
|
|
|
|
int error, cnt;
|
2014-03-20 10:57:41 -07:00
|
|
|
|
|
2014-06-23 11:43:59 -07:00
|
|
|
|
error = netdev_rxq_recv(rxq, packets, &cnt);
|
2014-03-20 10:57:41 -07:00
|
|
|
|
if (!error) {
|
2014-06-23 11:43:59 -07:00
|
|
|
|
dp_netdev_port_input(dp, packets, cnt, port->port_no);
|
2014-03-20 10:57:41 -07:00
|
|
|
|
} else if (error != EAGAIN && error != EOPNOTSUPP) {
|
|
|
|
|
static struct vlog_rate_limit rl
|
|
|
|
|
= VLOG_RATE_LIMIT_INIT(1, 5);
|
|
|
|
|
|
|
|
|
|
VLOG_ERR_RL(&rl, "error receiving data from %s: %s",
|
|
|
|
|
netdev_get_name(port->netdev),
|
|
|
|
|
ovs_strerror(error));
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
dpif_netdev_run(struct dpif *dpif)
|
|
|
|
|
{
|
|
|
|
|
struct dp_netdev_port *port;
|
|
|
|
|
struct dp_netdev *dp = get_dp_netdev(dpif);
|
|
|
|
|
|
2014-06-11 11:07:43 -07:00
|
|
|
|
CMAP_FOR_EACH (port, node, &dp->ports) {
|
2014-03-20 20:52:06 -07:00
|
|
|
|
if (!netdev_is_pmd(port->netdev)) {
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < netdev_n_rxq(port->netdev); i++) {
|
|
|
|
|
dp_netdev_process_rxq_port(dp, port, port->rxq[i]);
|
|
|
|
|
}
|
2014-03-20 10:57:41 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
dpif_netdev_wait(struct dpif *dpif)
|
|
|
|
|
{
|
|
|
|
|
struct dp_netdev_port *port;
|
|
|
|
|
struct dp_netdev *dp = get_dp_netdev(dpif);
|
|
|
|
|
|
2014-05-20 13:21:09 -07:00
|
|
|
|
ovs_mutex_lock(&dp_netdev_mutex);
|
2014-06-11 11:07:43 -07:00
|
|
|
|
CMAP_FOR_EACH (port, node, &dp->ports) {
|
2014-03-20 20:52:06 -07:00
|
|
|
|
if (!netdev_is_pmd(port->netdev)) {
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < netdev_n_rxq(port->netdev); i++) {
|
|
|
|
|
netdev_rxq_wait(port->rxq[i]);
|
|
|
|
|
}
|
2014-03-20 10:57:41 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
2014-05-20 13:21:09 -07:00
|
|
|
|
ovs_mutex_unlock(&dp_netdev_mutex);
|
2014-03-20 10:57:41 -07:00
|
|
|
|
}
|
|
|
|
|
|
2014-03-20 19:38:14 -07:00
|
|
|
|
struct rxq_poll {
|
2014-03-20 10:57:41 -07:00
|
|
|
|
struct dp_netdev_port *port;
|
2014-03-20 20:52:06 -07:00
|
|
|
|
struct netdev_rxq *rx;
|
2014-03-20 10:57:41 -07:00
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
pmd_load_queues(struct pmd_thread *f,
|
2014-03-20 19:38:14 -07:00
|
|
|
|
struct rxq_poll **ppoll_list, int poll_cnt)
|
2014-03-20 10:57:41 -07:00
|
|
|
|
{
|
|
|
|
|
struct dp_netdev *dp = f->dp;
|
2014-03-20 19:38:14 -07:00
|
|
|
|
struct rxq_poll *poll_list = *ppoll_list;
|
2014-03-20 10:57:41 -07:00
|
|
|
|
struct dp_netdev_port *port;
|
|
|
|
|
int id = f->id;
|
|
|
|
|
int index;
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
/* Simple scheduler for netdev rx polling. */
|
|
|
|
|
for (i = 0; i < poll_cnt; i++) {
|
|
|
|
|
port_unref(poll_list[i].port);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
poll_cnt = 0;
|
|
|
|
|
index = 0;
|
|
|
|
|
|
2014-06-11 11:07:43 -07:00
|
|
|
|
CMAP_FOR_EACH (port, node, &f->dp->ports) {
|
2014-03-20 10:57:41 -07:00
|
|
|
|
if (netdev_is_pmd(port->netdev)) {
|
2014-03-20 20:52:06 -07:00
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < netdev_n_rxq(port->netdev); i++) {
|
|
|
|
|
if ((index % dp->n_pmd_threads) == id) {
|
|
|
|
|
poll_list = xrealloc(poll_list, sizeof *poll_list * (poll_cnt + 1));
|
2014-03-20 10:57:41 -07:00
|
|
|
|
|
2014-03-20 20:52:06 -07:00
|
|
|
|
port_ref(port);
|
|
|
|
|
poll_list[poll_cnt].port = port;
|
|
|
|
|
poll_list[poll_cnt].rx = port->rxq[i];
|
|
|
|
|
poll_cnt++;
|
|
|
|
|
}
|
|
|
|
|
index++;
|
2014-03-20 10:57:41 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
*ppoll_list = poll_list;
|
|
|
|
|
return poll_cnt;
|
|
|
|
|
}
|
|
|
|
|
|
2013-12-27 17:00:30 -08:00
|
|
|
|
static void *
|
2014-03-20 10:57:41 -07:00
|
|
|
|
pmd_thread_main(void *f_)
|
2013-12-27 17:00:30 -08:00
|
|
|
|
{
|
2014-03-20 10:57:41 -07:00
|
|
|
|
struct pmd_thread *f = f_;
|
2013-12-27 17:00:30 -08:00
|
|
|
|
struct dp_netdev *dp = f->dp;
|
2014-03-20 10:57:41 -07:00
|
|
|
|
unsigned int lc = 0;
|
2014-03-20 19:38:14 -07:00
|
|
|
|
struct rxq_poll *poll_list;
|
2014-03-20 10:57:41 -07:00
|
|
|
|
unsigned int port_seq;
|
|
|
|
|
int poll_cnt;
|
|
|
|
|
int i;
|
2013-12-27 17:00:30 -08:00
|
|
|
|
|
2014-03-20 10:57:41 -07:00
|
|
|
|
poll_cnt = 0;
|
|
|
|
|
poll_list = NULL;
|
|
|
|
|
|
2014-03-20 22:07:44 -07:00
|
|
|
|
pmd_thread_setaffinity_cpu(f->id);
|
2014-03-20 10:57:41 -07:00
|
|
|
|
reload:
|
|
|
|
|
poll_cnt = pmd_load_queues(f, &poll_list, poll_cnt);
|
|
|
|
|
atomic_read(&f->change_seq, &port_seq);
|
2013-12-27 17:00:30 -08:00
|
|
|
|
|
2014-03-20 10:57:41 -07:00
|
|
|
|
for (;;) {
|
|
|
|
|
unsigned int c_port_seq;
|
2013-12-27 17:00:30 -08:00
|
|
|
|
int i;
|
|
|
|
|
|
2014-03-20 10:57:41 -07:00
|
|
|
|
for (i = 0; i < poll_cnt; i++) {
|
2014-03-20 20:52:06 -07:00
|
|
|
|
dp_netdev_process_rxq_port(dp, poll_list[i].port, poll_list[i].rx);
|
2014-03-20 10:57:41 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (lc++ > 1024) {
|
|
|
|
|
ovsrcu_quiesce();
|
2013-12-27 17:00:30 -08:00
|
|
|
|
|
2014-07-28 13:37:24 -07:00
|
|
|
|
/* XXX: need completely userspace based signaling method.
|
2014-03-20 10:57:41 -07:00
|
|
|
|
* to keep this thread entirely in userspace.
|
|
|
|
|
* For now using atomic counter. */
|
|
|
|
|
lc = 0;
|
|
|
|
|
atomic_read_explicit(&f->change_seq, &c_port_seq, memory_order_consume);
|
|
|
|
|
if (c_port_seq != port_seq) {
|
2013-12-27 17:00:30 -08:00
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
2014-03-20 10:57:41 -07:00
|
|
|
|
}
|
2013-12-27 17:00:30 -08:00
|
|
|
|
|
2014-03-20 10:57:41 -07:00
|
|
|
|
if (!latch_is_set(&f->dp->exit_latch)){
|
|
|
|
|
goto reload;
|
|
|
|
|
}
|
2013-12-27 17:00:30 -08:00
|
|
|
|
|
2014-03-20 10:57:41 -07:00
|
|
|
|
for (i = 0; i < poll_cnt; i++) {
|
|
|
|
|
port_unref(poll_list[i].port);
|
2013-12-27 17:00:30 -08:00
|
|
|
|
}
|
|
|
|
|
|
2014-03-20 10:57:41 -07:00
|
|
|
|
free(poll_list);
|
2013-12-27 17:00:30 -08:00
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
2014-07-26 06:51:55 +00:00
|
|
|
|
static void
|
|
|
|
|
dp_netdev_disable_upcall(struct dp_netdev *dp)
|
|
|
|
|
OVS_ACQUIRES(dp->upcall_rwlock)
|
|
|
|
|
{
|
|
|
|
|
fat_rwlock_wrlock(&dp->upcall_rwlock);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
dpif_netdev_disable_upcall(struct dpif *dpif)
|
|
|
|
|
OVS_NO_THREAD_SAFETY_ANALYSIS
|
|
|
|
|
{
|
|
|
|
|
struct dp_netdev *dp = get_dp_netdev(dpif);
|
|
|
|
|
dp_netdev_disable_upcall(dp);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
dp_netdev_enable_upcall(struct dp_netdev *dp)
|
|
|
|
|
OVS_RELEASES(dp->upcall_rwlock)
|
|
|
|
|
{
|
|
|
|
|
fat_rwlock_unlock(&dp->upcall_rwlock);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
dpif_netdev_enable_upcall(struct dpif *dpif)
|
|
|
|
|
OVS_NO_THREAD_SAFETY_ANALYSIS
|
|
|
|
|
{
|
|
|
|
|
struct dp_netdev *dp = get_dp_netdev(dpif);
|
|
|
|
|
dp_netdev_enable_upcall(dp);
|
|
|
|
|
}
|
|
|
|
|
|
2013-12-27 17:00:30 -08:00
|
|
|
|
static void
|
2014-03-20 10:57:41 -07:00
|
|
|
|
dp_netdev_set_pmd_threads(struct dp_netdev *dp, int n)
|
2013-12-27 17:00:30 -08:00
|
|
|
|
{
|
|
|
|
|
int i;
|
|
|
|
|
|
2014-03-20 10:57:41 -07:00
|
|
|
|
if (n == dp->n_pmd_threads) {
|
2013-12-27 17:00:30 -08:00
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Stop existing threads. */
|
|
|
|
|
latch_set(&dp->exit_latch);
|
2014-03-20 10:57:41 -07:00
|
|
|
|
dp_netdev_reload_pmd_threads(dp);
|
|
|
|
|
for (i = 0; i < dp->n_pmd_threads; i++) {
|
|
|
|
|
struct pmd_thread *f = &dp->pmd_threads[i];
|
2013-12-27 17:00:30 -08:00
|
|
|
|
|
|
|
|
|
xpthread_join(f->thread, NULL);
|
|
|
|
|
}
|
|
|
|
|
latch_poll(&dp->exit_latch);
|
2014-03-20 10:57:41 -07:00
|
|
|
|
free(dp->pmd_threads);
|
2013-12-27 17:00:30 -08:00
|
|
|
|
|
|
|
|
|
/* Start new threads. */
|
2014-03-20 10:57:41 -07:00
|
|
|
|
dp->pmd_threads = xmalloc(n * sizeof *dp->pmd_threads);
|
|
|
|
|
dp->n_pmd_threads = n;
|
|
|
|
|
|
2013-12-27 17:00:30 -08:00
|
|
|
|
for (i = 0; i < n; i++) {
|
2014-03-20 10:57:41 -07:00
|
|
|
|
struct pmd_thread *f = &dp->pmd_threads[i];
|
2013-12-27 17:00:30 -08:00
|
|
|
|
|
|
|
|
|
f->dp = dp;
|
2014-03-20 10:57:41 -07:00
|
|
|
|
f->id = i;
|
|
|
|
|
atomic_store(&f->change_seq, 1);
|
|
|
|
|
|
|
|
|
|
/* Each thread will distribute all devices rx-queues among
|
|
|
|
|
* themselves. */
|
ovs-thread: Make caller provide thread name when creating a thread.
Thread names are occasionally very useful for debugging, but from time to
time we've forgotten to set one. This commit adds the new thread's name
as a parameter to the function to start a thread, to make that mistake
impossible. This also simplifies code, since two function calls become
only one.
This makes a few other changes to the thread creation function:
* Since it is no longer a direct wrapper around a pthread function,
rename it to avoid giving that impression.
* Remove 'pthread_attr_t *' param that every caller supplied as NULL.
* Change 'pthread *' parameter into a return value, for convenience.
The system-stats code hadn't set a thread name, so this fixes that issue.
This patch is a prerequisite for making RCU report the name of a thread
that is blocking RCU synchronization, because the easiest way to do that is
for ovsrcu_quiesce_end() to record the current thread's name.
ovsrcu_quiesce_end() is called before the thread function is called, so it
won't get a name set within the thread function itself. Setting the thread
name earlier, as in this patch, avoids the problem.
Signed-off-by: Ben Pfaff <blp@nicira.com>
Acked-by: Alex Wang <alexw@nicira.com>
2014-04-25 17:46:21 -07:00
|
|
|
|
f->thread = ovs_thread_create("pmd", pmd_thread_main, f);
|
2013-12-27 17:00:30 -08:00
|
|
|
|
}
|
|
|
|
|
}
|
2014-03-20 10:57:41 -07:00
|
|
|
|
|
2013-12-27 17:00:30 -08:00
|
|
|
|
|
2014-01-22 16:03:10 -08:00
|
|
|
|
static void *
|
|
|
|
|
dp_netdev_flow_stats_new_cb(void)
|
|
|
|
|
{
|
|
|
|
|
struct dp_netdev_flow_stats *bucket = xzalloc_cacheline(sizeof *bucket);
|
|
|
|
|
ovs_mutex_init(&bucket->mutex);
|
|
|
|
|
return bucket;
|
|
|
|
|
}
|
|
|
|
|
|
2009-06-19 14:09:39 -07:00
|
|
|
|
static void
|
2013-10-29 02:34:15 -07:00
|
|
|
|
dp_netdev_flow_used(struct dp_netdev_flow *netdev_flow,
|
2014-06-23 11:43:59 -07:00
|
|
|
|
int cnt, int size,
|
|
|
|
|
uint16_t tcp_flags)
|
2009-06-19 14:09:39 -07:00
|
|
|
|
{
|
2014-01-22 16:03:10 -08:00
|
|
|
|
long long int now = time_msec();
|
|
|
|
|
struct dp_netdev_flow_stats *bucket;
|
|
|
|
|
|
|
|
|
|
bucket = ovsthread_stats_bucket_get(&netdev_flow->stats,
|
|
|
|
|
dp_netdev_flow_stats_new_cb);
|
|
|
|
|
|
|
|
|
|
ovs_mutex_lock(&bucket->mutex);
|
|
|
|
|
bucket->used = MAX(now, bucket->used);
|
2014-06-23 11:43:59 -07:00
|
|
|
|
bucket->packet_count += cnt;
|
|
|
|
|
bucket->byte_count += size;
|
2014-01-22 16:03:10 -08:00
|
|
|
|
bucket->tcp_flags |= tcp_flags;
|
|
|
|
|
ovs_mutex_unlock(&bucket->mutex);
|
2009-06-19 14:09:39 -07:00
|
|
|
|
}
|
|
|
|
|
|
2014-03-19 07:47:12 -07:00
|
|
|
|
static void *
|
|
|
|
|
dp_netdev_stats_new_cb(void)
|
|
|
|
|
{
|
|
|
|
|
struct dp_netdev_stats *bucket = xzalloc_cacheline(sizeof *bucket);
|
|
|
|
|
ovs_mutex_init(&bucket->mutex);
|
|
|
|
|
return bucket;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
2014-06-23 11:43:59 -07:00
|
|
|
|
dp_netdev_count_packet(struct dp_netdev *dp, enum dp_stat_type type, int cnt)
|
2014-03-19 07:47:12 -07:00
|
|
|
|
{
|
|
|
|
|
struct dp_netdev_stats *bucket;
|
|
|
|
|
|
|
|
|
|
bucket = ovsthread_stats_bucket_get(&dp->stats, dp_netdev_stats_new_cb);
|
|
|
|
|
ovs_mutex_lock(&bucket->mutex);
|
2014-06-23 11:43:59 -07:00
|
|
|
|
bucket->n[type] += cnt;
|
2014-03-19 07:47:12 -07:00
|
|
|
|
ovs_mutex_unlock(&bucket->mutex);
|
|
|
|
|
}
|
|
|
|
|
|
2014-07-26 15:39:58 -07:00
|
|
|
|
static int
|
|
|
|
|
dp_netdev_upcall(struct dp_netdev *dp, struct dpif_packet *packet_,
|
|
|
|
|
struct flow *flow, struct flow_wildcards *wc,
|
|
|
|
|
enum dpif_upcall_type type, const struct nlattr *userdata,
|
|
|
|
|
struct ofpbuf *actions, struct ofpbuf *put_actions)
|
|
|
|
|
{
|
|
|
|
|
struct ofpbuf *packet = &packet_->ofpbuf;
|
|
|
|
|
|
|
|
|
|
if (type == DPIF_UC_MISS) {
|
|
|
|
|
dp_netdev_count_packet(dp, DP_STAT_MISS, 1);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (OVS_UNLIKELY(!dp->upcall_cb)) {
|
|
|
|
|
return ENODEV;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (OVS_UNLIKELY(!VLOG_DROP_DBG(&upcall_rl))) {
|
|
|
|
|
struct ds ds = DS_EMPTY_INITIALIZER;
|
|
|
|
|
struct ofpbuf key;
|
|
|
|
|
char *packet_str;
|
|
|
|
|
|
|
|
|
|
ofpbuf_init(&key, 0);
|
|
|
|
|
odp_flow_key_from_flow(&key, flow, &wc->masks, flow->in_port.odp_port,
|
|
|
|
|
true);
|
|
|
|
|
|
|
|
|
|
packet_str = ofp_packet_to_string(ofpbuf_data(packet),
|
|
|
|
|
ofpbuf_size(packet));
|
|
|
|
|
|
|
|
|
|
odp_flow_key_format(ofpbuf_data(&key), ofpbuf_size(&key), &ds);
|
|
|
|
|
|
|
|
|
|
VLOG_DBG("%s: %s upcall:\n%s\n%s", dp->name,
|
|
|
|
|
dpif_upcall_type_to_string(type), ds_cstr(&ds), packet_str);
|
|
|
|
|
|
|
|
|
|
ofpbuf_uninit(&key);
|
|
|
|
|
free(packet_str);
|
|
|
|
|
ds_destroy(&ds);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return dp->upcall_cb(packet, flow, type, userdata, actions, wc,
|
|
|
|
|
put_actions, dp->upcall_aux);
|
|
|
|
|
}
|
|
|
|
|
|
2014-06-23 18:22:03 -07:00
|
|
|
|
struct packet_batch {
|
2014-06-23 11:43:59 -07:00
|
|
|
|
unsigned int packet_count;
|
|
|
|
|
unsigned int byte_count;
|
|
|
|
|
uint16_t tcp_flags;
|
|
|
|
|
|
|
|
|
|
struct dp_netdev_flow *flow;
|
|
|
|
|
|
|
|
|
|
struct dpif_packet *packets[NETDEV_MAX_RX_BATCH];
|
|
|
|
|
struct pkt_metadata md;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
static inline void
|
2014-06-23 18:22:03 -07:00
|
|
|
|
packet_batch_update(struct packet_batch *batch,
|
2014-06-23 11:43:59 -07:00
|
|
|
|
struct dpif_packet *packet, const struct miniflow *mf)
|
|
|
|
|
{
|
|
|
|
|
batch->tcp_flags |= miniflow_get_tcp_flags(mf);
|
|
|
|
|
batch->packets[batch->packet_count++] = packet;
|
|
|
|
|
batch->byte_count += ofpbuf_size(&packet->ofpbuf);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline void
|
2014-06-23 18:22:03 -07:00
|
|
|
|
packet_batch_init(struct packet_batch *batch, struct dp_netdev_flow *flow,
|
2014-06-23 18:28:43 -07:00
|
|
|
|
struct pkt_metadata *md)
|
2014-06-23 11:43:59 -07:00
|
|
|
|
{
|
|
|
|
|
batch->flow = flow;
|
|
|
|
|
batch->md = *md;
|
|
|
|
|
|
|
|
|
|
batch->packet_count = 0;
|
|
|
|
|
batch->byte_count = 0;
|
|
|
|
|
batch->tcp_flags = 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline void
|
2014-06-23 18:22:03 -07:00
|
|
|
|
packet_batch_execute(struct packet_batch *batch, struct dp_netdev *dp)
|
2014-06-23 11:43:59 -07:00
|
|
|
|
{
|
|
|
|
|
struct dp_netdev_actions *actions;
|
|
|
|
|
struct dp_netdev_flow *flow = batch->flow;
|
|
|
|
|
|
|
|
|
|
dp_netdev_flow_used(batch->flow, batch->packet_count, batch->byte_count,
|
|
|
|
|
batch->tcp_flags);
|
|
|
|
|
|
|
|
|
|
actions = dp_netdev_flow_get_actions(flow);
|
|
|
|
|
|
|
|
|
|
dp_netdev_execute_actions(dp, batch->packets,
|
|
|
|
|
batch->packet_count, true, &batch->md,
|
|
|
|
|
actions->actions, actions->size);
|
|
|
|
|
|
|
|
|
|
dp_netdev_count_packet(dp, DP_STAT_HIT, batch->packet_count);
|
|
|
|
|
}
|
|
|
|
|
|
2009-06-19 14:09:39 -07:00
|
|
|
|
static void
|
2014-06-23 11:43:59 -07:00
|
|
|
|
dp_netdev_input(struct dp_netdev *dp, struct dpif_packet **packets, int cnt,
|
2014-03-05 15:27:31 -08:00
|
|
|
|
struct pkt_metadata *md)
|
2009-06-19 14:09:39 -07:00
|
|
|
|
{
|
2014-06-23 18:28:43 -07:00
|
|
|
|
struct packet_batch batches[NETDEV_MAX_RX_BATCH];
|
|
|
|
|
struct netdev_flow_key keys[NETDEV_MAX_RX_BATCH];
|
|
|
|
|
const struct miniflow *mfs[NETDEV_MAX_RX_BATCH]; /* NULL at bad packets. */
|
|
|
|
|
struct cls_rule *rules[NETDEV_MAX_RX_BATCH];
|
|
|
|
|
size_t n_batches, i;
|
2014-07-26 15:39:58 -07:00
|
|
|
|
bool any_miss;
|
2014-06-23 11:43:59 -07:00
|
|
|
|
|
2014-06-23 18:28:43 -07:00
|
|
|
|
for (i = 0; i < cnt; i++) {
|
|
|
|
|
if (OVS_UNLIKELY(ofpbuf_size(&packets[i]->ofpbuf) < ETH_HEADER_LEN)) {
|
|
|
|
|
dpif_packet_delete(packets[i]);
|
|
|
|
|
mfs[i] = NULL;
|
|
|
|
|
continue;
|
|
|
|
|
}
|
2014-06-23 11:43:59 -07:00
|
|
|
|
|
2014-06-23 18:28:43 -07:00
|
|
|
|
miniflow_initialize(&keys[i].flow, keys[i].buf);
|
|
|
|
|
miniflow_extract(&packets[i]->ofpbuf, md, &keys[i].flow);
|
|
|
|
|
mfs[i] = &keys[i].flow;
|
|
|
|
|
}
|
2014-04-18 08:26:57 -07:00
|
|
|
|
|
2014-07-26 15:39:58 -07:00
|
|
|
|
any_miss = !classifier_lookup_miniflow_batch(&dp->cls, mfs, rules, cnt);
|
|
|
|
|
if (OVS_UNLIKELY(any_miss) && !fat_rwlock_tryrdlock(&dp->upcall_rwlock)) {
|
|
|
|
|
uint64_t actions_stub[512 / 8], slow_stub[512 / 8];
|
|
|
|
|
struct ofpbuf actions, put_actions;
|
|
|
|
|
struct match match;
|
|
|
|
|
|
|
|
|
|
ofpbuf_use_stub(&actions, actions_stub, sizeof actions_stub);
|
|
|
|
|
ofpbuf_use_stub(&put_actions, slow_stub, sizeof slow_stub);
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < cnt; i++) {
|
|
|
|
|
const struct dp_netdev_flow *netdev_flow;
|
|
|
|
|
struct ofpbuf *add_actions;
|
|
|
|
|
int error;
|
|
|
|
|
|
|
|
|
|
if (OVS_LIKELY(rules[i] || !mfs[i])) {
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* It's possible that an earlier slow path execution installed
|
|
|
|
|
* the rule this flow needs. In this case, it's a lot cheaper
|
|
|
|
|
* to catch it here than execute a miss. */
|
|
|
|
|
netdev_flow = dp_netdev_lookup_flow(dp, mfs[i]);
|
|
|
|
|
if (netdev_flow) {
|
|
|
|
|
rules[i] = CONST_CAST(struct cls_rule *, &netdev_flow->cr);
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
miniflow_expand(mfs[i], &match.flow);
|
|
|
|
|
|
|
|
|
|
ofpbuf_clear(&actions);
|
|
|
|
|
ofpbuf_clear(&put_actions);
|
|
|
|
|
|
|
|
|
|
error = dp_netdev_upcall(dp, packets[i], &match.flow, &match.wc,
|
|
|
|
|
DPIF_UC_MISS, NULL, &actions,
|
|
|
|
|
&put_actions);
|
|
|
|
|
if (OVS_UNLIKELY(error && error != ENOSPC)) {
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* We can't allow the packet batching in the next loop to execute
|
|
|
|
|
* the actions. Otherwise, if there are any slow path actions,
|
|
|
|
|
* we'll send the packet up twice. */
|
|
|
|
|
dp_netdev_execute_actions(dp, &packets[i], 1, false, md,
|
|
|
|
|
ofpbuf_data(&actions),
|
|
|
|
|
ofpbuf_size(&actions));
|
|
|
|
|
|
|
|
|
|
add_actions = ofpbuf_size(&put_actions)
|
|
|
|
|
? &put_actions
|
|
|
|
|
: &actions;
|
|
|
|
|
|
|
|
|
|
ovs_mutex_lock(&dp->flow_mutex);
|
|
|
|
|
/* XXX: There's a brief race where this flow could have already
|
|
|
|
|
* been installed since we last did the flow lookup. This could be
|
|
|
|
|
* solved by moving the mutex lock outside the loop, but that's an
|
|
|
|
|
* awful long time to be locking everyone out of making flow
|
|
|
|
|
* installs. If we move to a per-core classifier, it would be
|
|
|
|
|
* reasonable. */
|
|
|
|
|
if (OVS_LIKELY(error != ENOSPC)
|
|
|
|
|
&& !dp_netdev_lookup_flow(dp, mfs[i])) {
|
|
|
|
|
dp_netdev_flow_add(dp, &match, ofpbuf_data(add_actions),
|
|
|
|
|
ofpbuf_size(add_actions));
|
|
|
|
|
}
|
|
|
|
|
ovs_mutex_unlock(&dp->flow_mutex);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ofpbuf_uninit(&actions);
|
|
|
|
|
ofpbuf_uninit(&put_actions);
|
|
|
|
|
fat_rwlock_unlock(&dp->upcall_rwlock);
|
|
|
|
|
}
|
2014-06-23 18:28:43 -07:00
|
|
|
|
|
|
|
|
|
n_batches = 0;
|
2014-06-23 11:43:59 -07:00
|
|
|
|
for (i = 0; i < cnt; i++) {
|
2014-06-23 18:28:43 -07:00
|
|
|
|
struct dp_netdev_flow *flow;
|
|
|
|
|
struct packet_batch *batch;
|
|
|
|
|
size_t j;
|
2014-06-23 11:43:59 -07:00
|
|
|
|
|
2014-07-26 15:39:58 -07:00
|
|
|
|
if (OVS_UNLIKELY(!rules[i] || !mfs[i])) {
|
2014-06-23 18:28:43 -07:00
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* XXX: This O(n^2) algortihm makes sense if we're operating under the
|
|
|
|
|
* assumption that the number of distinct flows (and therefore the
|
|
|
|
|
* number of distinct batches) is quite small. If this turns out not
|
|
|
|
|
* to be the case, it may make sense to pre sort based on the
|
|
|
|
|
* netdev_flow pointer. That done we can get the appropriate batching
|
|
|
|
|
* in O(n * log(n)) instead. */
|
|
|
|
|
batch = NULL;
|
|
|
|
|
flow = dp_netdev_flow_cast(rules[i]);
|
|
|
|
|
for (j = 0; j < n_batches; j++) {
|
|
|
|
|
if (batches[j].flow == flow) {
|
|
|
|
|
batch = &batches[j];
|
|
|
|
|
break;
|
|
|
|
|
}
|
2014-06-23 11:43:59 -07:00
|
|
|
|
}
|
2014-06-23 18:28:43 -07:00
|
|
|
|
|
|
|
|
|
if (!batch) {
|
|
|
|
|
batch = &batches[n_batches++];
|
|
|
|
|
packet_batch_init(batch, flow, md);
|
|
|
|
|
}
|
|
|
|
|
packet_batch_update(batch, packets[i], mfs[i]);
|
2014-06-23 11:43:59 -07:00
|
|
|
|
}
|
|
|
|
|
|
2014-06-23 18:28:43 -07:00
|
|
|
|
for (i = 0; i < n_batches; i++) {
|
|
|
|
|
packet_batch_execute(&batches[i], dp);
|
2009-06-19 14:09:39 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2014-03-05 15:27:31 -08:00
|
|
|
|
static void
|
2014-06-23 11:43:59 -07:00
|
|
|
|
dp_netdev_port_input(struct dp_netdev *dp, struct dpif_packet **packets,
|
|
|
|
|
int cnt, odp_port_t port_no)
|
2014-03-05 15:27:31 -08:00
|
|
|
|
{
|
|
|
|
|
uint32_t *recirc_depth = recirc_depth_get();
|
2014-06-23 11:43:59 -07:00
|
|
|
|
struct pkt_metadata md = PKT_METADATA_INITIALIZER(port_no);
|
2014-03-05 15:27:31 -08:00
|
|
|
|
|
|
|
|
|
*recirc_depth = 0;
|
2014-06-23 11:43:59 -07:00
|
|
|
|
dp_netdev_input(dp, packets, cnt, &md);
|
2014-03-05 15:27:31 -08:00
|
|
|
|
}
|
|
|
|
|
|
2013-11-14 14:35:58 -08:00
|
|
|
|
struct dp_netdev_execute_aux {
|
|
|
|
|
struct dp_netdev *dp;
|
|
|
|
|
};
|
|
|
|
|
|
2014-07-26 06:51:55 +00:00
|
|
|
|
static void
|
2014-07-26 15:39:58 -07:00
|
|
|
|
dpif_netdev_register_upcall_cb(struct dpif *dpif, upcall_callback *cb,
|
|
|
|
|
void *aux)
|
2014-07-26 06:51:55 +00:00
|
|
|
|
{
|
|
|
|
|
struct dp_netdev *dp = get_dp_netdev(dpif);
|
2014-07-26 15:39:58 -07:00
|
|
|
|
dp->upcall_aux = aux;
|
2014-07-26 06:51:55 +00:00
|
|
|
|
dp->upcall_cb = cb;
|
|
|
|
|
}
|
|
|
|
|
|
2013-11-14 14:35:58 -08:00
|
|
|
|
static void
|
2014-06-23 11:43:59 -07:00
|
|
|
|
dp_execute_cb(void *aux_, struct dpif_packet **packets, int cnt,
|
2014-03-04 15:36:03 -08:00
|
|
|
|
struct pkt_metadata *md,
|
2013-12-30 15:58:58 -08:00
|
|
|
|
const struct nlattr *a, bool may_steal)
|
2014-01-08 15:58:11 -08:00
|
|
|
|
OVS_NO_THREAD_SAFETY_ANALYSIS
|
2013-11-14 14:35:58 -08:00
|
|
|
|
{
|
|
|
|
|
struct dp_netdev_execute_aux *aux = aux_;
|
2014-07-26 15:39:58 -07:00
|
|
|
|
uint32_t *depth = recirc_depth_get();
|
|
|
|
|
struct dp_netdev *dp = aux->dp;
|
2013-12-30 15:58:58 -08:00
|
|
|
|
int type = nl_attr_type(a);
|
2014-01-08 15:58:11 -08:00
|
|
|
|
struct dp_netdev_port *p;
|
2014-06-23 11:43:59 -07:00
|
|
|
|
int i;
|
2013-11-14 14:35:58 -08:00
|
|
|
|
|
2013-12-30 15:58:58 -08:00
|
|
|
|
switch ((enum ovs_action_attr)type) {
|
|
|
|
|
case OVS_ACTION_ATTR_OUTPUT:
|
2014-07-26 15:39:58 -07:00
|
|
|
|
p = dp_netdev_lookup_port(dp, u32_to_odp(nl_attr_get_u32(a)));
|
2014-06-25 11:39:34 -07:00
|
|
|
|
if (OVS_LIKELY(p)) {
|
2014-06-23 11:43:59 -07:00
|
|
|
|
netdev_send(p->netdev, packets, cnt, may_steal);
|
2014-06-25 11:39:34 -07:00
|
|
|
|
} else if (may_steal) {
|
|
|
|
|
for (i = 0; i < cnt; i++) {
|
|
|
|
|
dpif_packet_delete(packets[i]);
|
|
|
|
|
}
|
2014-01-08 15:58:11 -08:00
|
|
|
|
}
|
2013-12-30 15:58:58 -08:00
|
|
|
|
break;
|
|
|
|
|
|
2014-07-26 15:39:58 -07:00
|
|
|
|
case OVS_ACTION_ATTR_USERSPACE:
|
|
|
|
|
if (!fat_rwlock_tryrdlock(&dp->upcall_rwlock)) {
|
|
|
|
|
const struct nlattr *userdata;
|
|
|
|
|
struct ofpbuf actions;
|
|
|
|
|
struct flow flow;
|
2013-09-20 12:47:33 -07:00
|
|
|
|
|
2014-07-26 15:39:58 -07:00
|
|
|
|
userdata = nl_attr_find_nested(a, OVS_USERSPACE_ATTR_USERDATA);
|
|
|
|
|
ofpbuf_init(&actions, 0);
|
2014-06-23 11:43:59 -07:00
|
|
|
|
|
2014-07-26 15:39:58 -07:00
|
|
|
|
for (i = 0; i < cnt; i++) {
|
|
|
|
|
int error;
|
|
|
|
|
|
|
|
|
|
ofpbuf_clear(&actions);
|
|
|
|
|
|
|
|
|
|
flow_extract(&packets[i]->ofpbuf, md, &flow);
|
|
|
|
|
error = dp_netdev_upcall(dp, packets[i], &flow, NULL,
|
|
|
|
|
DPIF_UC_ACTION, userdata, &actions,
|
|
|
|
|
NULL);
|
|
|
|
|
if (!error || error == ENOSPC) {
|
|
|
|
|
dp_netdev_execute_actions(dp, &packets[i], 1, false, md,
|
|
|
|
|
ofpbuf_data(&actions),
|
|
|
|
|
ofpbuf_size(&actions));
|
|
|
|
|
}
|
2014-06-23 11:43:59 -07:00
|
|
|
|
|
2014-07-26 15:39:58 -07:00
|
|
|
|
if (may_steal) {
|
|
|
|
|
dpif_packet_delete(packets[i]);
|
|
|
|
|
}
|
netdev-dpdk: Fix race condition with DPDK mempools in non pmd threads
DPDK mempools rely on rte_lcore_id() to implement a thread-local cache.
Our non pmd threads had rte_lcore_id() == 0. This allowed concurrent access to
the "thread-local" cache, causing crashes.
This commit resolves the issue with the following changes:
- Every non pmd thread has the same lcore_id (0, for management reasons), which
is not shared with any pmd thread (lcore_id for pmd threads now start from 1)
- DPDK mbufs must be allocated/freed in pmd threads. When there is the need to
use mempools in non pmd threads, like in dpdk_do_tx_copy(), a mutex must be
held.
- The previous change does not allow us anymore to pass DPDK mbufs to handler
threads: therefore this commit partially revert 143859ec63d45e. Now packets
are copied for upcall processing. We can remove the extra memcpy by
processing upcalls in the pmd thread itself.
With the introduction of the extra locking, the packet throughput will be lower
in the following cases:
- When using internal (tap) devices with DPDK devices on the same datapath.
Anyway, to support internal devices efficiently, we needed DPDK KNI devices,
which will be proper pmd devices and will not need this locking.
- When packets are processed in the slow path by non pmd threads. This overhead
can be avoided by handling the upcalls directly in pmd threads (a change that
has already been proposed by Ryan Wilson)
Also, the following two fixes have been introduced:
- In dpdk_free_buf() use rte_pktmbuf_free_seg() instead of rte_mempool_put().
This allows OVS to run properly with CONFIG_RTE_LIBRTE_MBUF_DEBUG DPDK option
- Do not bulk free mbufs in a transmission queue. They may belong to different
mempools
Signed-off-by: Daniele Di Proietto <ddiproietto@vmware.com>
Acked-by: Pravin B Shelar <pshelar@nicira.com>
2014-07-17 14:29:36 -07:00
|
|
|
|
}
|
2014-07-26 15:39:58 -07:00
|
|
|
|
ofpbuf_uninit(&actions);
|
|
|
|
|
fat_rwlock_unlock(&dp->upcall_rwlock);
|
2014-06-23 11:43:59 -07:00
|
|
|
|
}
|
2014-07-26 06:51:55 +00:00
|
|
|
|
|
2013-12-30 15:58:58 -08:00
|
|
|
|
break;
|
2014-03-04 15:36:03 -08:00
|
|
|
|
|
2014-04-08 18:42:39 -07:00
|
|
|
|
case OVS_ACTION_ATTR_HASH: {
|
|
|
|
|
const struct ovs_action_hash *hash_act;
|
2014-06-23 11:43:59 -07:00
|
|
|
|
struct netdev_flow_key key;
|
2014-04-08 18:42:39 -07:00
|
|
|
|
uint32_t hash;
|
|
|
|
|
|
|
|
|
|
hash_act = nl_attr_get(a);
|
2014-06-23 11:43:59 -07:00
|
|
|
|
|
|
|
|
|
miniflow_initialize(&key.flow, key.buf);
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < cnt; i++) {
|
|
|
|
|
|
2014-07-28 13:37:24 -07:00
|
|
|
|
/* XXX: this is slow. Use RSS hash in the future */
|
2014-06-23 11:43:59 -07:00
|
|
|
|
miniflow_extract(&packets[i]->ofpbuf, md, &key.flow);
|
|
|
|
|
|
|
|
|
|
if (hash_act->hash_alg == OVS_HASH_ALG_L4) {
|
|
|
|
|
/* Hash need not be symmetric, nor does it need to include
|
|
|
|
|
* L2 fields. */
|
|
|
|
|
hash = miniflow_hash_5tuple(&key.flow, hash_act->hash_basis);
|
|
|
|
|
} else {
|
|
|
|
|
VLOG_WARN("Unknown hash algorithm specified "
|
|
|
|
|
"for the hash action.");
|
|
|
|
|
hash = 2;
|
|
|
|
|
}
|
|
|
|
|
|
2014-04-08 18:42:39 -07:00
|
|
|
|
if (!hash) {
|
|
|
|
|
hash = 1; /* 0 is not valid */
|
|
|
|
|
}
|
|
|
|
|
|
2014-06-23 11:43:59 -07:00
|
|
|
|
if (i == 0) {
|
|
|
|
|
md->dp_hash = hash;
|
|
|
|
|
}
|
|
|
|
|
packets[i]->dp_hash = hash;
|
2014-04-08 18:42:39 -07:00
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2014-03-05 15:27:31 -08:00
|
|
|
|
case OVS_ACTION_ATTR_RECIRC:
|
|
|
|
|
if (*depth < MAX_RECIRC_DEPTH) {
|
2014-03-04 15:36:03 -08:00
|
|
|
|
|
2014-03-05 15:27:31 -08:00
|
|
|
|
(*depth)++;
|
2014-06-23 11:43:59 -07:00
|
|
|
|
for (i = 0; i < cnt; i++) {
|
|
|
|
|
struct dpif_packet *recirc_pkt;
|
|
|
|
|
struct pkt_metadata recirc_md = *md;
|
|
|
|
|
|
|
|
|
|
recirc_pkt = (may_steal) ? packets[i]
|
|
|
|
|
: dpif_packet_clone(packets[i]);
|
|
|
|
|
|
|
|
|
|
recirc_md.recirc_id = nl_attr_get_u32(a);
|
|
|
|
|
|
|
|
|
|
/* Hash is private to each packet */
|
|
|
|
|
recirc_md.dp_hash = packets[i]->dp_hash;
|
|
|
|
|
|
2014-07-26 15:39:58 -07:00
|
|
|
|
dp_netdev_input(dp, &recirc_pkt, 1, &recirc_md);
|
2014-06-23 11:43:59 -07:00
|
|
|
|
}
|
2014-03-05 15:27:31 -08:00
|
|
|
|
(*depth)--;
|
|
|
|
|
|
|
|
|
|
break;
|
|
|
|
|
} else {
|
|
|
|
|
VLOG_WARN("Packet dropped. Max recirculation depth exceeded.");
|
2014-06-25 11:39:34 -07:00
|
|
|
|
if (may_steal) {
|
|
|
|
|
for (i = 0; i < cnt; i++) {
|
|
|
|
|
dpif_packet_delete(packets[i]);
|
|
|
|
|
}
|
|
|
|
|
}
|
2014-03-05 15:27:31 -08:00
|
|
|
|
}
|
2014-03-04 15:36:03 -08:00
|
|
|
|
break;
|
|
|
|
|
|
2013-12-30 15:58:58 -08:00
|
|
|
|
case OVS_ACTION_ATTR_PUSH_VLAN:
|
|
|
|
|
case OVS_ACTION_ATTR_POP_VLAN:
|
|
|
|
|
case OVS_ACTION_ATTR_PUSH_MPLS:
|
|
|
|
|
case OVS_ACTION_ATTR_POP_MPLS:
|
|
|
|
|
case OVS_ACTION_ATTR_SET:
|
|
|
|
|
case OVS_ACTION_ATTR_SAMPLE:
|
|
|
|
|
case OVS_ACTION_ATTR_UNSPEC:
|
|
|
|
|
case __OVS_ACTION_ATTR_MAX:
|
|
|
|
|
OVS_NOT_REACHED();
|
2013-12-16 08:14:52 -08:00
|
|
|
|
}
|
2011-10-12 16:24:54 -07:00
|
|
|
|
}
|
|
|
|
|
|
2011-10-21 14:38:54 -07:00
|
|
|
|
static void
|
2014-06-23 11:43:59 -07:00
|
|
|
|
dp_netdev_execute_actions(struct dp_netdev *dp,
|
|
|
|
|
struct dpif_packet **packets, int cnt,
|
|
|
|
|
bool may_steal, struct pkt_metadata *md,
|
2013-11-14 14:35:58 -08:00
|
|
|
|
const struct nlattr *actions, size_t actions_len)
|
2009-06-19 14:09:39 -07:00
|
|
|
|
{
|
2014-06-23 11:43:59 -07:00
|
|
|
|
struct dp_netdev_execute_aux aux = {dp};
|
2013-11-14 14:35:58 -08:00
|
|
|
|
|
2014-06-23 11:43:59 -07:00
|
|
|
|
odp_execute_actions(&aux, packets, cnt, may_steal, md, actions,
|
|
|
|
|
actions_len, dp_execute_cb);
|
2009-06-19 14:09:39 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
const struct dpif_class dpif_netdev_class = {
|
|
|
|
|
"netdev",
|
2012-05-09 12:17:15 +02:00
|
|
|
|
dpif_netdev_enumerate,
|
2012-11-14 15:50:20 -08:00
|
|
|
|
dpif_netdev_port_open_type,
|
2009-06-19 14:09:39 -07:00
|
|
|
|
dpif_netdev_open,
|
|
|
|
|
dpif_netdev_close,
|
2010-02-08 13:22:41 -05:00
|
|
|
|
dpif_netdev_destroy,
|
2014-03-20 10:57:41 -07:00
|
|
|
|
dpif_netdev_run,
|
|
|
|
|
dpif_netdev_wait,
|
2009-06-19 14:09:39 -07:00
|
|
|
|
dpif_netdev_get_stats,
|
|
|
|
|
dpif_netdev_port_add,
|
|
|
|
|
dpif_netdev_port_del,
|
|
|
|
|
dpif_netdev_port_query_by_number,
|
|
|
|
|
dpif_netdev_port_query_by_name,
|
2011-10-12 16:24:54 -07:00
|
|
|
|
NULL, /* port_get_pid */
|
datapath: Change listing ports to use an iterator concept.
One of the goals for Open vSwitch is to decouple kernel and userspace
software, so that either one can be upgraded or rolled back independent of
the other. To do this in full generality, it must be possible to add new
features to the kernel vport layer without changing userspace software. In
turn, that means that the odp_port structure must become variable-length.
This does not, however, fit in well with the ODP_PORT_LIST ioctl in its
current form, because that would require userspace to know how much space
to allocate for each port in advance, or to allocate as much space as
could possibly be needed. Neither choice is very attractive.
This commit prepares for a different solution, by replacing ODP_PORT_LIST
by a new ioctl ODP_VPORT_DUMP that retrieves information about a single
vport from the datapath on each call. It is much cleaner to allocate the
maximum amount of space for a single vport than to do so for possibly a
large number of vports.
It would be faster to retrieve a number of vports in batch instead of just
one at a time, but that will naturally happen later when the kernel
datapath interface is changed to use Netlink, so this patch does not bother
with it.
The Netlink version won't need to take the starting port number from
userspace, since Netlink sockets can keep track of that state as part
of their "dump" feature.
Signed-off-by: Ben Pfaff <blp@nicira.com>
Acked-by: Jesse Gross <jesse@nicira.com>
2011-01-10 13:12:12 -08:00
|
|
|
|
dpif_netdev_port_dump_start,
|
|
|
|
|
dpif_netdev_port_dump_next,
|
|
|
|
|
dpif_netdev_port_dump_done,
|
2009-06-19 14:09:39 -07:00
|
|
|
|
dpif_netdev_port_poll,
|
|
|
|
|
dpif_netdev_port_poll_wait,
|
|
|
|
|
dpif_netdev_flow_flush,
|
2014-05-20 11:37:02 -07:00
|
|
|
|
dpif_netdev_flow_dump_create,
|
|
|
|
|
dpif_netdev_flow_dump_destroy,
|
|
|
|
|
dpif_netdev_flow_dump_thread_create,
|
|
|
|
|
dpif_netdev_flow_dump_thread_destroy,
|
datapath: Change listing flows to use an iterator concept.
One of the goals for Open vSwitch is to decouple kernel and userspace
software, so that either one can be upgraded or rolled back independent of
the other. To do this in full generality, it must be possible to change
the kernel's idea of the flow key separately from the userspace version.
In turn, that means that flow keys must become variable-length. This does
not, however, fit in well with the ODP_FLOW_LIST ioctl in its current form,
because that would require userspace to know how much space to allocate
for each flow's key in advance, or to allocate as much space as could
possibly be needed. Neither choice is very attractive.
This commit prepares for a different solution, by replacing ODP_FLOW_LIST
by a new ioctl ODP_FLOW_DUMP that retrieves a single flow from the datapath
on each call. It is much cleaner to allocate the maximum amount of space
for a single flow key than to do so for possibly a very large number of
flow keys.
As a side effect, this patch also fixes a race condition that sometimes
made "ovs-dpctl dump-flows" print an error: previously, flows were listed
and then their actions were retrieved, which left a window in which
ovs-vswitchd could delete the flow. Now dumping a flow and its actions is
a single step, closing that window.
Dumping all of the flows in a datapath is no longer an atomic step, so now
it is possible to miss some flows or see a single flow twice during
iteration, if the flow table is modified by another process. It doesn't
look like this should be a problem for ovs-vswitchd.
It would be faster to retrieve a number of flows in batch instead of just
one at a time, but that will naturally happen later when the kernel
datapath interface is changed to use Netlink, so this patch does not bother
with it.
Signed-off-by: Ben Pfaff <blp@nicira.com>
Acked-by: Jesse Gross <jesse@nicira.com>
2010-12-28 10:39:52 -08:00
|
|
|
|
dpif_netdev_flow_dump_next,
|
2014-07-15 16:09:40 -07:00
|
|
|
|
dpif_netdev_operate,
|
2014-07-26 06:51:55 +00:00
|
|
|
|
NULL, /* recv_set */
|
|
|
|
|
NULL, /* handlers_set */
|
2011-11-21 13:36:17 -08:00
|
|
|
|
dpif_netdev_queue_to_priority,
|
2014-07-26 06:51:55 +00:00
|
|
|
|
NULL, /* recv */
|
|
|
|
|
NULL, /* recv_wait */
|
|
|
|
|
NULL, /* recv_purge */
|
|
|
|
|
dpif_netdev_register_upcall_cb,
|
|
|
|
|
dpif_netdev_enable_upcall,
|
|
|
|
|
dpif_netdev_disable_upcall,
|
2009-06-19 14:09:39 -07:00
|
|
|
|
};
|
2010-11-29 12:21:08 -08:00
|
|
|
|
|
2013-07-29 15:11:49 -07:00
|
|
|
|
static void
|
|
|
|
|
dpif_dummy_change_port_number(struct unixctl_conn *conn, int argc OVS_UNUSED,
|
|
|
|
|
const char *argv[], void *aux OVS_UNUSED)
|
|
|
|
|
{
|
2014-05-20 13:21:09 -07:00
|
|
|
|
struct dp_netdev_port *old_port;
|
|
|
|
|
struct dp_netdev_port *new_port;
|
2013-07-29 15:11:49 -07:00
|
|
|
|
struct dp_netdev *dp;
|
2013-12-24 16:08:57 -08:00
|
|
|
|
odp_port_t port_no;
|
2013-07-29 15:11:49 -07:00
|
|
|
|
|
2014-01-08 15:58:11 -08:00
|
|
|
|
ovs_mutex_lock(&dp_netdev_mutex);
|
2013-07-29 15:11:49 -07:00
|
|
|
|
dp = shash_find_data(&dp_netdevs, argv[1]);
|
|
|
|
|
if (!dp || !dpif_netdev_class_is_dummy(dp->class)) {
|
2014-01-08 15:58:11 -08:00
|
|
|
|
ovs_mutex_unlock(&dp_netdev_mutex);
|
2013-07-29 15:11:49 -07:00
|
|
|
|
unixctl_command_reply_error(conn, "unknown datapath or not a dummy");
|
|
|
|
|
return;
|
|
|
|
|
}
|
2014-01-08 15:58:11 -08:00
|
|
|
|
ovs_refcount_ref(&dp->ref_cnt);
|
|
|
|
|
ovs_mutex_unlock(&dp_netdev_mutex);
|
2013-07-29 15:11:49 -07:00
|
|
|
|
|
2014-05-20 13:21:09 -07:00
|
|
|
|
ovs_mutex_lock(&dp->port_mutex);
|
|
|
|
|
if (get_port_by_name(dp, argv[2], &old_port)) {
|
2013-07-29 15:11:49 -07:00
|
|
|
|
unixctl_command_reply_error(conn, "unknown port");
|
2014-01-08 15:58:11 -08:00
|
|
|
|
goto exit;
|
2013-07-29 15:11:49 -07:00
|
|
|
|
}
|
|
|
|
|
|
2013-12-24 16:08:57 -08:00
|
|
|
|
port_no = u32_to_odp(atoi(argv[3]));
|
|
|
|
|
if (!port_no || port_no == ODPP_NONE) {
|
2013-07-29 15:11:49 -07:00
|
|
|
|
unixctl_command_reply_error(conn, "bad port number");
|
2014-01-08 15:58:11 -08:00
|
|
|
|
goto exit;
|
2013-07-29 15:11:49 -07:00
|
|
|
|
}
|
2013-12-24 16:08:57 -08:00
|
|
|
|
if (dp_netdev_lookup_port(dp, port_no)) {
|
2013-07-29 15:11:49 -07:00
|
|
|
|
unixctl_command_reply_error(conn, "port number already in use");
|
2014-01-08 15:58:11 -08:00
|
|
|
|
goto exit;
|
2013-07-29 15:11:49 -07:00
|
|
|
|
}
|
2014-05-20 13:21:09 -07:00
|
|
|
|
|
|
|
|
|
/* Remove old port. */
|
|
|
|
|
cmap_remove(&dp->ports, &old_port->node, hash_port_no(old_port->port_no));
|
|
|
|
|
ovsrcu_postpone(free, old_port);
|
|
|
|
|
|
|
|
|
|
/* Insert new port (cmap semantics mean we cannot re-insert 'old_port'). */
|
|
|
|
|
new_port = xmemdup(old_port, sizeof *old_port);
|
|
|
|
|
new_port->port_no = port_no;
|
|
|
|
|
cmap_insert(&dp->ports, &new_port->node, hash_port_no(port_no));
|
|
|
|
|
|
2013-08-07 13:29:54 -07:00
|
|
|
|
seq_change(dp->port_seq);
|
2013-07-29 15:11:49 -07:00
|
|
|
|
unixctl_command_reply(conn, NULL);
|
2014-01-08 15:58:11 -08:00
|
|
|
|
|
|
|
|
|
exit:
|
2014-05-20 13:21:09 -07:00
|
|
|
|
ovs_mutex_unlock(&dp->port_mutex);
|
2014-01-08 15:58:11 -08:00
|
|
|
|
dp_netdev_unref(dp);
|
2013-07-29 15:11:49 -07:00
|
|
|
|
}
|
|
|
|
|
|
2014-05-22 09:36:00 -07:00
|
|
|
|
static void
|
|
|
|
|
dpif_dummy_delete_port(struct unixctl_conn *conn, int argc OVS_UNUSED,
|
|
|
|
|
const char *argv[], void *aux OVS_UNUSED)
|
|
|
|
|
{
|
|
|
|
|
struct dp_netdev_port *port;
|
|
|
|
|
struct dp_netdev *dp;
|
|
|
|
|
|
|
|
|
|
ovs_mutex_lock(&dp_netdev_mutex);
|
|
|
|
|
dp = shash_find_data(&dp_netdevs, argv[1]);
|
|
|
|
|
if (!dp || !dpif_netdev_class_is_dummy(dp->class)) {
|
|
|
|
|
ovs_mutex_unlock(&dp_netdev_mutex);
|
|
|
|
|
unixctl_command_reply_error(conn, "unknown datapath or not a dummy");
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
ovs_refcount_ref(&dp->ref_cnt);
|
|
|
|
|
ovs_mutex_unlock(&dp_netdev_mutex);
|
|
|
|
|
|
|
|
|
|
ovs_mutex_lock(&dp->port_mutex);
|
|
|
|
|
if (get_port_by_name(dp, argv[2], &port)) {
|
|
|
|
|
unixctl_command_reply_error(conn, "unknown port");
|
|
|
|
|
} else if (port->port_no == ODPP_LOCAL) {
|
|
|
|
|
unixctl_command_reply_error(conn, "can't delete local port");
|
|
|
|
|
} else {
|
|
|
|
|
do_del_port(dp, port);
|
|
|
|
|
unixctl_command_reply(conn, NULL);
|
|
|
|
|
}
|
|
|
|
|
ovs_mutex_unlock(&dp->port_mutex);
|
|
|
|
|
|
|
|
|
|
dp_netdev_unref(dp);
|
|
|
|
|
}
|
|
|
|
|
|
2012-01-19 10:24:46 -08:00
|
|
|
|
static void
|
|
|
|
|
dpif_dummy_register__(const char *type)
|
|
|
|
|
{
|
|
|
|
|
struct dpif_class *class;
|
|
|
|
|
|
|
|
|
|
class = xmalloc(sizeof *class);
|
|
|
|
|
*class = dpif_netdev_class;
|
|
|
|
|
class->type = xstrdup(type);
|
|
|
|
|
dp_register_provider(class);
|
|
|
|
|
}
|
|
|
|
|
|
2010-11-29 12:21:08 -08:00
|
|
|
|
void
|
2012-01-19 10:24:46 -08:00
|
|
|
|
dpif_dummy_register(bool override)
|
2010-11-29 12:21:08 -08:00
|
|
|
|
{
|
2012-01-19 10:24:46 -08:00
|
|
|
|
if (override) {
|
|
|
|
|
struct sset types;
|
|
|
|
|
const char *type;
|
|
|
|
|
|
|
|
|
|
sset_init(&types);
|
|
|
|
|
dp_enumerate_types(&types);
|
|
|
|
|
SSET_FOR_EACH (type, &types) {
|
|
|
|
|
if (!dp_unregister_provider(type)) {
|
|
|
|
|
dpif_dummy_register__(type);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
sset_destroy(&types);
|
2010-11-29 12:21:08 -08:00
|
|
|
|
}
|
2012-01-19 10:24:46 -08:00
|
|
|
|
|
|
|
|
|
dpif_dummy_register__("dummy");
|
2013-07-29 15:11:49 -07:00
|
|
|
|
|
|
|
|
|
unixctl_command_register("dpif-dummy/change-port-number",
|
|
|
|
|
"DP PORT NEW-NUMBER",
|
|
|
|
|
3, 3, dpif_dummy_change_port_number, NULL);
|
2014-05-22 09:36:00 -07:00
|
|
|
|
unixctl_command_register("dpif-dummy/delete-port", "DP PORT",
|
|
|
|
|
2, 2, dpif_dummy_delete_port, NULL);
|
2010-11-29 12:21:08 -08:00
|
|
|
|
}
|