2014-01-13 15:33:27 -08:00
|
|
|
|
/* Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014 Nicira, Inc.
|
2013-06-25 14:45:43 -07:00
|
|
|
|
*
|
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
|
* You may obtain a copy of the License at:
|
|
|
|
|
*
|
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
*
|
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
|
* limitations under the License. */
|
|
|
|
|
|
|
|
|
|
#include <config.h>
|
|
|
|
|
#include "ofproto-dpif-upcall.h"
|
|
|
|
|
|
|
|
|
|
#include <errno.h>
|
|
|
|
|
#include <stdbool.h>
|
|
|
|
|
#include <inttypes.h>
|
|
|
|
|
|
2013-10-22 16:16:31 -07:00
|
|
|
|
#include "connmgr.h"
|
2013-06-25 14:45:43 -07:00
|
|
|
|
#include "coverage.h"
|
|
|
|
|
#include "dpif.h"
|
2013-11-20 18:06:12 -08:00
|
|
|
|
#include "dynamic-string.h"
|
2013-06-25 14:45:43 -07:00
|
|
|
|
#include "fail-open.h"
|
2013-09-12 17:42:23 -07:00
|
|
|
|
#include "guarded-list.h"
|
2013-06-25 14:45:43 -07:00
|
|
|
|
#include "latch.h"
|
|
|
|
|
#include "list.h"
|
|
|
|
|
#include "netlink.h"
|
|
|
|
|
#include "ofpbuf.h"
|
2013-09-24 15:04:04 -07:00
|
|
|
|
#include "ofproto-dpif-ipfix.h"
|
|
|
|
|
#include "ofproto-dpif-sflow.h"
|
2013-09-24 13:39:56 -07:00
|
|
|
|
#include "ofproto-dpif-xlate.h"
|
2014-03-18 16:34:28 -07:00
|
|
|
|
#include "ovs-rcu.h"
|
2013-06-25 14:45:43 -07:00
|
|
|
|
#include "packets.h"
|
|
|
|
|
#include "poll-loop.h"
|
2013-11-20 18:06:12 -08:00
|
|
|
|
#include "seq.h"
|
|
|
|
|
#include "unixctl.h"
|
2013-06-25 14:45:43 -07:00
|
|
|
|
#include "vlog.h"
|
|
|
|
|
|
|
|
|
|
#define MAX_QUEUE_LENGTH 512
|
2014-07-26 06:51:55 +00:00
|
|
|
|
#define UPCALL_MAX_BATCH 64
|
2013-09-24 13:39:56 -07:00
|
|
|
|
#define REVALIDATE_MAX_BATCH 50
|
2013-06-25 14:45:43 -07:00
|
|
|
|
|
|
|
|
|
VLOG_DEFINE_THIS_MODULE(ofproto_dpif_upcall);
|
|
|
|
|
|
revalidator: Prevent handling the same flow twice.
When the datapath flow table is modified while a flow dump operation is
in progress, it is possible for the same flow to be dumped twice. In
such cases, revalidators may perform redundant work, or attempt to
delete the same flow twice.
This was causing intermittent testsuite failures for test #670 -
"ofproto-dpif, active-backup bonding" where a flow (that had not
previously been dumped) was dumped, revalidated and deleted twice.
The logs show errors such as:
"failed to flow_get (No such file or directory) skb_priority(0),..."
"failed to flow_del (No such file or directory) skb_priority(0),..."
This patch adds a 'flow_exists' field to 'struct udpif_key' to track
whether the flow is (in progress) to be deleted. After doing a ukey
lookup, we check whether ukey->mark or ukey->flow indicates that the
flow has already been handled. If it has already been handled, we skip
handling the flow again.
We also defer ukey cleanup for flows that fail revalidation, so that the
ukey will still exist if the same flow is dumped twice. This allows the
above logic to work in this case.
Signed-off-by: Joe Stringer <joestringer@nicira.com>
Acked-by: Alex Wang <alexw@nicira.com>
2014-04-23 15:31:17 +12:00
|
|
|
|
COVERAGE_DEFINE(upcall_duplicate_flow);
|
2014-07-08 07:04:05 +00:00
|
|
|
|
COVERAGE_DEFINE(revalidate_missed_dp_flow);
|
revalidator: Prevent handling the same flow twice.
When the datapath flow table is modified while a flow dump operation is
in progress, it is possible for the same flow to be dumped twice. In
such cases, revalidators may perform redundant work, or attempt to
delete the same flow twice.
This was causing intermittent testsuite failures for test #670 -
"ofproto-dpif, active-backup bonding" where a flow (that had not
previously been dumped) was dumped, revalidated and deleted twice.
The logs show errors such as:
"failed to flow_get (No such file or directory) skb_priority(0),..."
"failed to flow_del (No such file or directory) skb_priority(0),..."
This patch adds a 'flow_exists' field to 'struct udpif_key' to track
whether the flow is (in progress) to be deleted. After doing a ukey
lookup, we check whether ukey->mark or ukey->flow indicates that the
flow has already been handled. If it has already been handled, we skip
handling the flow again.
We also defer ukey cleanup for flows that fail revalidation, so that the
ukey will still exist if the same flow is dumped twice. This allows the
above logic to work in this case.
Signed-off-by: Joe Stringer <joestringer@nicira.com>
Acked-by: Alex Wang <alexw@nicira.com>
2014-04-23 15:31:17 +12:00
|
|
|
|
|
2014-02-26 23:03:24 -08:00
|
|
|
|
/* A thread that reads upcalls from dpif, forwards each upcall's packet,
|
|
|
|
|
* and possibly sets up a kernel flow as a cache. */
|
2013-06-25 14:45:43 -07:00
|
|
|
|
struct handler {
|
|
|
|
|
struct udpif *udpif; /* Parent udpif. */
|
|
|
|
|
pthread_t thread; /* Thread ID. */
|
2014-02-26 23:03:24 -08:00
|
|
|
|
uint32_t handler_id; /* Handler id. */
|
2013-06-25 14:45:43 -07:00
|
|
|
|
};
|
|
|
|
|
|
2014-04-10 07:14:08 +00:00
|
|
|
|
/* A thread that processes datapath flows, updates OpenFlow statistics, and
|
|
|
|
|
* updates or removes them if necessary. */
|
2013-09-24 13:39:56 -07:00
|
|
|
|
struct revalidator {
|
|
|
|
|
struct udpif *udpif; /* Parent udpif. */
|
|
|
|
|
pthread_t thread; /* Thread ID. */
|
ovs-thread: Make caller provide thread name when creating a thread.
Thread names are occasionally very useful for debugging, but from time to
time we've forgotten to set one. This commit adds the new thread's name
as a parameter to the function to start a thread, to make that mistake
impossible. This also simplifies code, since two function calls become
only one.
This makes a few other changes to the thread creation function:
* Since it is no longer a direct wrapper around a pthread function,
rename it to avoid giving that impression.
* Remove 'pthread_attr_t *' param that every caller supplied as NULL.
* Change 'pthread *' parameter into a return value, for convenience.
The system-stats code hadn't set a thread name, so this fixes that issue.
This patch is a prerequisite for making RCU report the name of a thread
that is blocking RCU synchronization, because the easiest way to do that is
for ovsrcu_quiesce_end() to record the current thread's name.
ovsrcu_quiesce_end() is called before the thread function is called, so it
won't get a name set within the thread function itself. Setting the thread
name earlier, as in this patch, avoids the problem.
Signed-off-by: Ben Pfaff <blp@nicira.com>
Acked-by: Alex Wang <alexw@nicira.com>
2014-04-25 17:46:21 -07:00
|
|
|
|
unsigned int id; /* ovsthread_id_self(). */
|
2014-04-10 07:14:08 +00:00
|
|
|
|
struct hmap *ukeys; /* Points into udpif->ukeys for this
|
|
|
|
|
revalidator. Used for GC phase. */
|
2013-09-24 13:39:56 -07:00
|
|
|
|
};
|
|
|
|
|
|
2013-06-25 14:45:43 -07:00
|
|
|
|
/* An upcall handler for ofproto_dpif.
|
|
|
|
|
*
|
2014-02-26 23:03:24 -08:00
|
|
|
|
* udpif keeps records of two kind of logically separate units:
|
|
|
|
|
*
|
|
|
|
|
* upcall handling
|
|
|
|
|
* ---------------
|
|
|
|
|
*
|
|
|
|
|
* - An array of 'struct handler's for upcall handling and flow
|
|
|
|
|
* installation.
|
2013-09-24 13:39:56 -07:00
|
|
|
|
*
|
2014-02-26 23:03:24 -08:00
|
|
|
|
* flow revalidation
|
|
|
|
|
* -----------------
|
|
|
|
|
*
|
2014-04-10 07:14:08 +00:00
|
|
|
|
* - Revalidation threads which read the datapath flow table and maintains
|
|
|
|
|
* them.
|
|
|
|
|
*/
|
2013-06-25 14:45:43 -07:00
|
|
|
|
struct udpif {
|
2013-11-20 18:06:12 -08:00
|
|
|
|
struct list list_node; /* In all_udpifs list. */
|
|
|
|
|
|
2013-06-25 14:45:43 -07:00
|
|
|
|
struct dpif *dpif; /* Datapath handle. */
|
|
|
|
|
struct dpif_backer *backer; /* Opaque dpif_backer pointer. */
|
|
|
|
|
|
|
|
|
|
uint32_t secret; /* Random seed for upcall hash. */
|
|
|
|
|
|
2013-09-24 15:04:04 -07:00
|
|
|
|
struct handler *handlers; /* Upcall handlers. */
|
2013-06-25 14:45:43 -07:00
|
|
|
|
size_t n_handlers;
|
|
|
|
|
|
2013-09-24 13:39:56 -07:00
|
|
|
|
struct revalidator *revalidators; /* Flow revalidators. */
|
|
|
|
|
size_t n_revalidators;
|
|
|
|
|
|
|
|
|
|
struct latch exit_latch; /* Tells child threads to exit. */
|
|
|
|
|
|
2014-04-10 07:14:08 +00:00
|
|
|
|
/* Revalidation. */
|
|
|
|
|
struct seq *reval_seq; /* Incremented to force revalidation. */
|
|
|
|
|
bool need_revalidate; /* As indicated by 'reval_seq'. */
|
|
|
|
|
bool reval_exit; /* Set by leader on 'exit_latch. */
|
2014-05-29 15:37:37 -07:00
|
|
|
|
struct ovs_barrier reval_barrier; /* Barrier used by revalidators. */
|
2014-05-20 11:37:02 -07:00
|
|
|
|
struct dpif_flow_dump *dump; /* DPIF flow dump state. */
|
2013-09-24 13:39:56 -07:00
|
|
|
|
long long int dump_duration; /* Duration of the last flow dump. */
|
2014-04-10 07:14:08 +00:00
|
|
|
|
struct seq *dump_seq; /* Increments each dump iteration. */
|
|
|
|
|
|
|
|
|
|
/* There are 'n_revalidators' ukey hmaps. Each revalidator retains a
|
|
|
|
|
* reference to one of these for garbage collection.
|
|
|
|
|
*
|
|
|
|
|
* During the flow dump phase, revalidators insert into these with a random
|
|
|
|
|
* distribution. During the garbage collection phase, each revalidator
|
|
|
|
|
* takes care of garbage collecting one of these hmaps. */
|
|
|
|
|
struct {
|
|
|
|
|
struct ovs_mutex mutex; /* Guards the following. */
|
|
|
|
|
struct hmap hmap OVS_GUARDED; /* Datapath flow keys. */
|
|
|
|
|
} *ukeys;
|
2013-06-25 14:45:43 -07:00
|
|
|
|
|
2013-09-24 13:39:56 -07:00
|
|
|
|
/* Datapath flow statistics. */
|
|
|
|
|
unsigned int max_n_flows;
|
|
|
|
|
unsigned int avg_n_flows;
|
2013-06-25 14:45:43 -07:00
|
|
|
|
|
2013-09-24 13:39:56 -07:00
|
|
|
|
/* Following fields are accessed and modified by different threads. */
|
|
|
|
|
atomic_uint flow_limit; /* Datapath flow hard limit. */
|
2014-01-22 06:50:49 +00:00
|
|
|
|
|
|
|
|
|
/* n_flows_mutex prevents multiple threads updating these concurrently. */
|
2014-05-14 16:19:34 +09:00
|
|
|
|
atomic_ulong n_flows; /* Number of flows in the datapath. */
|
2014-01-22 06:50:49 +00:00
|
|
|
|
atomic_llong n_flows_timestamp; /* Last time n_flows was updated. */
|
|
|
|
|
struct ovs_mutex n_flows_mutex;
|
2014-06-25 14:02:45 +00:00
|
|
|
|
|
|
|
|
|
/* Following fields are accessed and modified only from the main thread. */
|
|
|
|
|
struct unixctl_conn **conns; /* Connections waiting on dump_seq. */
|
|
|
|
|
uint64_t conn_seq; /* Corresponds to 'dump_seq' when
|
|
|
|
|
conns[n_conns-1] was stored. */
|
|
|
|
|
size_t n_conns; /* Number of connections waiting. */
|
2013-06-25 14:45:43 -07:00
|
|
|
|
};
|
|
|
|
|
|
2013-09-24 15:04:04 -07:00
|
|
|
|
enum upcall_type {
|
|
|
|
|
BAD_UPCALL, /* Some kind of bug somewhere. */
|
|
|
|
|
MISS_UPCALL, /* A flow miss. */
|
|
|
|
|
SFLOW_UPCALL, /* sFlow sample. */
|
|
|
|
|
FLOW_SAMPLE_UPCALL, /* Per-flow sampling. */
|
|
|
|
|
IPFIX_UPCALL /* Per-bridge sampling. */
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
struct upcall {
|
2014-08-06 18:49:44 -07:00
|
|
|
|
struct ofproto_dpif *ofproto; /* Parent ofproto. */
|
2014-05-20 21:50:19 -07:00
|
|
|
|
|
2014-08-06 18:49:44 -07:00
|
|
|
|
/* The flow and packet are only required to be constant when using
|
|
|
|
|
* dpif-netdev. If a modification is absolutely necessary, a const cast
|
|
|
|
|
* may be used with other datapaths. */
|
|
|
|
|
const struct flow *flow; /* Parsed representation of the packet. */
|
|
|
|
|
const struct ofpbuf *packet; /* Packet associated with this upcall. */
|
|
|
|
|
ofp_port_t in_port; /* OpenFlow in port, or OFPP_NONE. */
|
2014-05-20 21:50:19 -07:00
|
|
|
|
|
2014-08-06 18:49:44 -07:00
|
|
|
|
enum dpif_upcall_type type; /* Datapath type of the upcall. */
|
|
|
|
|
const struct nlattr *userdata; /* Userdata for DPIF_UC_ACTION Upcalls. */
|
|
|
|
|
|
|
|
|
|
bool xout_initialized; /* True if 'xout' must be uninitialized. */
|
|
|
|
|
struct xlate_out xout; /* Result of xlate_actions(). */
|
|
|
|
|
struct ofpbuf put_actions; /* Actions 'put' in the fastapath. */
|
|
|
|
|
|
|
|
|
|
struct dpif_ipfix *ipfix; /* IPFIX reference or NULL. */
|
|
|
|
|
struct dpif_sflow *sflow; /* SFlow reference or NULL. */
|
|
|
|
|
struct netflow *netflow; /* Netlow reference or NULL. */
|
2014-05-20 21:50:19 -07:00
|
|
|
|
|
2014-08-06 18:49:44 -07:00
|
|
|
|
bool vsp_adjusted; /* 'packet' and 'flow' were adjusted for
|
|
|
|
|
VLAN splinters if true. */
|
2013-09-24 15:04:04 -07:00
|
|
|
|
|
2014-08-06 18:49:44 -07:00
|
|
|
|
/* Not used by the upcall callback interface. */
|
|
|
|
|
const struct nlattr *key; /* Datapath flow key. */
|
|
|
|
|
size_t key_len; /* Datapath flow key length. */
|
2013-09-24 15:04:04 -07:00
|
|
|
|
};
|
|
|
|
|
|
2013-09-24 13:39:56 -07:00
|
|
|
|
/* 'udpif_key's are responsible for tracking the little bit of state udpif
|
|
|
|
|
* needs to do flow expiration which can't be pulled directly from the
|
2014-04-10 07:14:08 +00:00
|
|
|
|
* datapath. They may be created or maintained by any revalidator during
|
|
|
|
|
* the dump phase, but are owned by a single revalidator, and are destroyed
|
|
|
|
|
* by that revalidator during the garbage-collection phase.
|
|
|
|
|
*
|
|
|
|
|
* While some elements of a udpif_key are protected by a mutex, the ukey itself
|
|
|
|
|
* is not. Therefore it is not safe to destroy a udpif_key except when all
|
|
|
|
|
* revalidators are in garbage collection phase, or they aren't running. */
|
2013-09-24 13:39:56 -07:00
|
|
|
|
struct udpif_key {
|
|
|
|
|
struct hmap_node hmap_node; /* In parent revalidator 'ukeys' map. */
|
|
|
|
|
|
2014-04-10 07:14:08 +00:00
|
|
|
|
/* These elements are read only once created, and therefore aren't
|
|
|
|
|
* protected by a mutex. */
|
|
|
|
|
const struct nlattr *key; /* Datapath flow key. */
|
2013-09-24 13:39:56 -07:00
|
|
|
|
size_t key_len; /* Length of 'key'. */
|
|
|
|
|
|
2014-04-10 07:14:08 +00:00
|
|
|
|
struct ovs_mutex mutex; /* Guards the following. */
|
|
|
|
|
struct dpif_flow_stats stats OVS_GUARDED; /* Last known stats.*/
|
|
|
|
|
long long int created OVS_GUARDED; /* Estimate of creation time. */
|
2014-05-14 16:17:25 +12:00
|
|
|
|
uint64_t dump_seq OVS_GUARDED; /* Tracks udpif->dump_seq. */
|
2014-04-10 07:14:08 +00:00
|
|
|
|
bool flow_exists OVS_GUARDED; /* Ensures flows are only deleted
|
|
|
|
|
once. */
|
|
|
|
|
|
|
|
|
|
struct xlate_cache *xcache OVS_GUARDED; /* Cache for xlate entries that
|
|
|
|
|
* are affected by this ukey.
|
|
|
|
|
* Used for stats and learning.*/
|
2014-08-05 13:51:19 -07:00
|
|
|
|
union {
|
|
|
|
|
struct odputil_keybuf key_buf; /* Memory for 'key'. */
|
|
|
|
|
struct nlattr key_buf_nla;
|
|
|
|
|
};
|
2013-09-24 13:39:56 -07:00
|
|
|
|
};
|
|
|
|
|
|
2013-06-25 14:45:43 -07:00
|
|
|
|
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
|
2013-11-20 18:06:12 -08:00
|
|
|
|
static struct list all_udpifs = LIST_INITIALIZER(&all_udpifs);
|
2013-06-25 14:45:43 -07:00
|
|
|
|
|
2014-08-06 18:49:44 -07:00
|
|
|
|
static size_t recv_upcalls(struct handler *);
|
|
|
|
|
static int process_upcall(struct udpif *, struct upcall *,
|
|
|
|
|
struct ofpbuf *odp_actions);
|
2014-07-26 06:51:55 +00:00
|
|
|
|
static void handle_upcalls(struct udpif *, struct upcall *, size_t n_upcalls);
|
2014-04-21 17:31:11 -07:00
|
|
|
|
static void udpif_stop_threads(struct udpif *);
|
|
|
|
|
static void udpif_start_threads(struct udpif *, size_t n_handlers,
|
|
|
|
|
size_t n_revalidators);
|
2013-09-24 15:04:04 -07:00
|
|
|
|
static void *udpif_upcall_handler(void *);
|
2013-09-24 13:39:56 -07:00
|
|
|
|
static void *udpif_revalidator(void *);
|
2014-05-14 16:19:34 +09:00
|
|
|
|
static unsigned long udpif_get_n_flows(struct udpif *);
|
2014-04-10 07:14:08 +00:00
|
|
|
|
static void revalidate(struct revalidator *);
|
2013-09-24 13:39:56 -07:00
|
|
|
|
static void revalidator_sweep(struct revalidator *);
|
2014-02-11 13:55:36 -08:00
|
|
|
|
static void revalidator_purge(struct revalidator *);
|
2013-11-20 18:06:12 -08:00
|
|
|
|
static void upcall_unixctl_show(struct unixctl_conn *conn, int argc,
|
|
|
|
|
const char *argv[], void *aux);
|
2013-09-24 13:39:56 -07:00
|
|
|
|
static void upcall_unixctl_disable_megaflows(struct unixctl_conn *, int argc,
|
|
|
|
|
const char *argv[], void *aux);
|
|
|
|
|
static void upcall_unixctl_enable_megaflows(struct unixctl_conn *, int argc,
|
|
|
|
|
const char *argv[], void *aux);
|
2014-02-06 09:49:19 -08:00
|
|
|
|
static void upcall_unixctl_set_flow_limit(struct unixctl_conn *conn, int argc,
|
|
|
|
|
const char *argv[], void *aux);
|
2014-06-25 14:02:45 +00:00
|
|
|
|
static void upcall_unixctl_dump_wait(struct unixctl_conn *conn, int argc,
|
|
|
|
|
const char *argv[], void *aux);
|
2014-04-10 07:14:08 +00:00
|
|
|
|
|
|
|
|
|
static struct udpif_key *ukey_create(const struct nlattr *key, size_t key_len,
|
|
|
|
|
long long int used);
|
2014-06-04 09:59:23 +00:00
|
|
|
|
static struct udpif_key *ukey_lookup(struct udpif *udpif,
|
|
|
|
|
const struct nlattr *key, size_t key_len,
|
|
|
|
|
uint32_t hash);
|
|
|
|
|
static bool ukey_acquire(struct udpif *udpif, const struct nlattr *key,
|
|
|
|
|
size_t key_len, long long int used,
|
|
|
|
|
struct udpif_key **result);
|
2013-09-24 13:39:56 -07:00
|
|
|
|
static void ukey_delete(struct revalidator *, struct udpif_key *);
|
2014-08-06 18:49:44 -07:00
|
|
|
|
static enum upcall_type classify_upcall(enum dpif_upcall_type type,
|
|
|
|
|
const struct nlattr *userdata);
|
|
|
|
|
|
|
|
|
|
static int upcall_receive(struct upcall *, const struct dpif_backer *,
|
|
|
|
|
const struct ofpbuf *packet, enum dpif_upcall_type,
|
|
|
|
|
const struct nlattr *userdata, const struct flow *);
|
|
|
|
|
static void upcall_uninit(struct upcall *);
|
2013-09-24 13:39:56 -07:00
|
|
|
|
|
2014-07-26 15:39:58 -07:00
|
|
|
|
static upcall_callback upcall_cb;
|
|
|
|
|
|
2013-09-24 13:39:56 -07:00
|
|
|
|
static atomic_bool enable_megaflows = ATOMIC_VAR_INIT(true);
|
2013-06-25 14:45:43 -07:00
|
|
|
|
|
|
|
|
|
struct udpif *
|
|
|
|
|
udpif_create(struct dpif_backer *backer, struct dpif *dpif)
|
|
|
|
|
{
|
2013-11-20 18:06:12 -08:00
|
|
|
|
static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
|
2013-06-25 14:45:43 -07:00
|
|
|
|
struct udpif *udpif = xzalloc(sizeof *udpif);
|
|
|
|
|
|
2013-11-20 18:06:12 -08:00
|
|
|
|
if (ovsthread_once_start(&once)) {
|
|
|
|
|
unixctl_command_register("upcall/show", "", 0, 0, upcall_unixctl_show,
|
|
|
|
|
NULL);
|
2013-09-24 13:39:56 -07:00
|
|
|
|
unixctl_command_register("upcall/disable-megaflows", "", 0, 0,
|
|
|
|
|
upcall_unixctl_disable_megaflows, NULL);
|
|
|
|
|
unixctl_command_register("upcall/enable-megaflows", "", 0, 0,
|
|
|
|
|
upcall_unixctl_enable_megaflows, NULL);
|
2014-02-06 09:49:19 -08:00
|
|
|
|
unixctl_command_register("upcall/set-flow-limit", "", 1, 1,
|
|
|
|
|
upcall_unixctl_set_flow_limit, NULL);
|
2014-06-25 14:02:45 +00:00
|
|
|
|
unixctl_command_register("revalidator/wait", "", 0, 0,
|
|
|
|
|
upcall_unixctl_dump_wait, NULL);
|
2013-11-20 18:06:12 -08:00
|
|
|
|
ovsthread_once_done(&once);
|
|
|
|
|
}
|
|
|
|
|
|
2013-06-25 14:45:43 -07:00
|
|
|
|
udpif->dpif = dpif;
|
|
|
|
|
udpif->backer = backer;
|
2013-09-24 13:39:56 -07:00
|
|
|
|
atomic_init(&udpif->flow_limit, MIN(ofproto_flow_limit, 10000));
|
2013-06-25 14:45:43 -07:00
|
|
|
|
udpif->secret = random_uint32();
|
2013-09-17 14:35:53 -07:00
|
|
|
|
udpif->reval_seq = seq_create();
|
2013-09-24 13:39:56 -07:00
|
|
|
|
udpif->dump_seq = seq_create();
|
2013-06-25 14:45:43 -07:00
|
|
|
|
latch_init(&udpif->exit_latch);
|
2013-11-20 18:06:12 -08:00
|
|
|
|
list_push_back(&all_udpifs, &udpif->list_node);
|
2014-01-22 06:50:49 +00:00
|
|
|
|
atomic_init(&udpif->n_flows, 0);
|
|
|
|
|
atomic_init(&udpif->n_flows_timestamp, LLONG_MIN);
|
|
|
|
|
ovs_mutex_init(&udpif->n_flows_mutex);
|
2013-06-25 14:45:43 -07:00
|
|
|
|
|
2014-07-26 15:39:58 -07:00
|
|
|
|
dpif_register_upcall_cb(dpif, upcall_cb, udpif);
|
2014-07-26 06:51:55 +00:00
|
|
|
|
|
2013-06-25 14:45:43 -07:00
|
|
|
|
return udpif;
|
|
|
|
|
}
|
|
|
|
|
|
2014-06-25 14:02:45 +00:00
|
|
|
|
void
|
|
|
|
|
udpif_run(struct udpif *udpif)
|
|
|
|
|
{
|
|
|
|
|
if (udpif->conns && udpif->conn_seq != seq_read(udpif->dump_seq)) {
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < udpif->n_conns; i++) {
|
|
|
|
|
unixctl_command_reply(udpif->conns[i], NULL);
|
|
|
|
|
}
|
|
|
|
|
free(udpif->conns);
|
|
|
|
|
udpif->conns = NULL;
|
|
|
|
|
udpif->n_conns = 0;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2013-06-25 14:45:43 -07:00
|
|
|
|
void
|
|
|
|
|
udpif_destroy(struct udpif *udpif)
|
|
|
|
|
{
|
2014-04-21 17:31:11 -07:00
|
|
|
|
udpif_stop_threads(udpif);
|
2013-06-25 14:45:43 -07:00
|
|
|
|
|
2013-11-20 18:06:12 -08:00
|
|
|
|
list_remove(&udpif->list_node);
|
2013-06-25 14:45:43 -07:00
|
|
|
|
latch_destroy(&udpif->exit_latch);
|
2013-09-17 14:35:53 -07:00
|
|
|
|
seq_destroy(udpif->reval_seq);
|
2013-09-24 13:39:56 -07:00
|
|
|
|
seq_destroy(udpif->dump_seq);
|
2014-01-22 06:50:49 +00:00
|
|
|
|
ovs_mutex_destroy(&udpif->n_flows_mutex);
|
2013-06-25 14:45:43 -07:00
|
|
|
|
free(udpif);
|
|
|
|
|
}
|
|
|
|
|
|
2014-04-21 17:31:11 -07:00
|
|
|
|
/* Stops the handler and revalidator threads, must be enclosed in
|
|
|
|
|
* ovsrcu quiescent state unless when destroying udpif. */
|
|
|
|
|
static void
|
|
|
|
|
udpif_stop_threads(struct udpif *udpif)
|
2013-06-25 14:45:43 -07:00
|
|
|
|
{
|
2014-04-21 20:05:08 -07:00
|
|
|
|
if (udpif && (udpif->n_handlers != 0 || udpif->n_revalidators != 0)) {
|
2013-06-25 14:45:43 -07:00
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
|
|
latch_set(&udpif->exit_latch);
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < udpif->n_handlers; i++) {
|
|
|
|
|
struct handler *handler = &udpif->handlers[i];
|
|
|
|
|
|
2013-09-24 13:39:56 -07:00
|
|
|
|
xpthread_join(handler->thread, NULL);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < udpif->n_revalidators; i++) {
|
2014-04-10 07:14:08 +00:00
|
|
|
|
xpthread_join(udpif->revalidators[i].thread, NULL);
|
2013-06-25 14:45:43 -07:00
|
|
|
|
}
|
|
|
|
|
|
2014-07-26 06:51:55 +00:00
|
|
|
|
dpif_disable_upcall(udpif->dpif);
|
|
|
|
|
|
2013-09-24 13:39:56 -07:00
|
|
|
|
for (i = 0; i < udpif->n_revalidators; i++) {
|
|
|
|
|
struct revalidator *revalidator = &udpif->revalidators[i];
|
|
|
|
|
|
2014-02-11 13:55:36 -08:00
|
|
|
|
/* Delete ukeys, and delete all flows from the datapath to prevent
|
|
|
|
|
* double-counting stats. */
|
|
|
|
|
revalidator_purge(revalidator);
|
2014-04-10 07:14:08 +00:00
|
|
|
|
|
|
|
|
|
hmap_destroy(&udpif->ukeys[i].hmap);
|
|
|
|
|
ovs_mutex_destroy(&udpif->ukeys[i].mutex);
|
2013-09-24 13:39:56 -07:00
|
|
|
|
}
|
|
|
|
|
|
2013-06-25 14:45:43 -07:00
|
|
|
|
latch_poll(&udpif->exit_latch);
|
|
|
|
|
|
2014-05-29 15:37:37 -07:00
|
|
|
|
ovs_barrier_destroy(&udpif->reval_barrier);
|
2014-04-10 07:14:08 +00:00
|
|
|
|
|
2013-09-24 13:39:56 -07:00
|
|
|
|
free(udpif->revalidators);
|
|
|
|
|
udpif->revalidators = NULL;
|
|
|
|
|
udpif->n_revalidators = 0;
|
|
|
|
|
|
2013-06-25 14:45:43 -07:00
|
|
|
|
free(udpif->handlers);
|
|
|
|
|
udpif->handlers = NULL;
|
|
|
|
|
udpif->n_handlers = 0;
|
2014-04-10 07:14:08 +00:00
|
|
|
|
|
|
|
|
|
free(udpif->ukeys);
|
|
|
|
|
udpif->ukeys = NULL;
|
2013-06-25 14:45:43 -07:00
|
|
|
|
}
|
2014-04-21 17:31:11 -07:00
|
|
|
|
}
|
2013-06-25 14:45:43 -07:00
|
|
|
|
|
2014-04-21 17:31:11 -07:00
|
|
|
|
/* Starts the handler and revalidator threads, must be enclosed in
|
|
|
|
|
* ovsrcu quiescent state. */
|
|
|
|
|
static void
|
|
|
|
|
udpif_start_threads(struct udpif *udpif, size_t n_handlers,
|
|
|
|
|
size_t n_revalidators)
|
|
|
|
|
{
|
2014-04-25 10:39:53 -07:00
|
|
|
|
if (udpif && n_handlers && n_revalidators) {
|
2013-06-25 14:45:43 -07:00
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
|
|
udpif->n_handlers = n_handlers;
|
2013-09-24 13:39:56 -07:00
|
|
|
|
udpif->n_revalidators = n_revalidators;
|
|
|
|
|
|
2013-06-25 14:45:43 -07:00
|
|
|
|
udpif->handlers = xzalloc(udpif->n_handlers * sizeof *udpif->handlers);
|
|
|
|
|
for (i = 0; i < udpif->n_handlers; i++) {
|
|
|
|
|
struct handler *handler = &udpif->handlers[i];
|
|
|
|
|
|
|
|
|
|
handler->udpif = udpif;
|
2014-02-26 23:03:24 -08:00
|
|
|
|
handler->handler_id = i;
|
ovs-thread: Make caller provide thread name when creating a thread.
Thread names are occasionally very useful for debugging, but from time to
time we've forgotten to set one. This commit adds the new thread's name
as a parameter to the function to start a thread, to make that mistake
impossible. This also simplifies code, since two function calls become
only one.
This makes a few other changes to the thread creation function:
* Since it is no longer a direct wrapper around a pthread function,
rename it to avoid giving that impression.
* Remove 'pthread_attr_t *' param that every caller supplied as NULL.
* Change 'pthread *' parameter into a return value, for convenience.
The system-stats code hadn't set a thread name, so this fixes that issue.
This patch is a prerequisite for making RCU report the name of a thread
that is blocking RCU synchronization, because the easiest way to do that is
for ovsrcu_quiesce_end() to record the current thread's name.
ovsrcu_quiesce_end() is called before the thread function is called, so it
won't get a name set within the thread function itself. Setting the thread
name earlier, as in this patch, avoids the problem.
Signed-off-by: Ben Pfaff <blp@nicira.com>
Acked-by: Alex Wang <alexw@nicira.com>
2014-04-25 17:46:21 -07:00
|
|
|
|
handler->thread = ovs_thread_create(
|
|
|
|
|
"handler", udpif_upcall_handler, handler);
|
2013-06-25 14:45:43 -07:00
|
|
|
|
}
|
|
|
|
|
|
2014-07-26 06:51:55 +00:00
|
|
|
|
dpif_enable_upcall(udpif->dpif);
|
|
|
|
|
|
2014-05-29 15:37:37 -07:00
|
|
|
|
ovs_barrier_init(&udpif->reval_barrier, udpif->n_revalidators);
|
2014-04-10 07:14:08 +00:00
|
|
|
|
udpif->reval_exit = false;
|
2013-09-24 13:39:56 -07:00
|
|
|
|
udpif->revalidators = xzalloc(udpif->n_revalidators
|
|
|
|
|
* sizeof *udpif->revalidators);
|
2014-04-10 07:14:08 +00:00
|
|
|
|
udpif->ukeys = xmalloc(sizeof *udpif->ukeys * n_revalidators);
|
2013-09-24 13:39:56 -07:00
|
|
|
|
for (i = 0; i < udpif->n_revalidators; i++) {
|
|
|
|
|
struct revalidator *revalidator = &udpif->revalidators[i];
|
|
|
|
|
|
|
|
|
|
revalidator->udpif = udpif;
|
2014-04-10 07:14:08 +00:00
|
|
|
|
hmap_init(&udpif->ukeys[i].hmap);
|
|
|
|
|
ovs_mutex_init(&udpif->ukeys[i].mutex);
|
|
|
|
|
revalidator->ukeys = &udpif->ukeys[i].hmap;
|
ovs-thread: Make caller provide thread name when creating a thread.
Thread names are occasionally very useful for debugging, but from time to
time we've forgotten to set one. This commit adds the new thread's name
as a parameter to the function to start a thread, to make that mistake
impossible. This also simplifies code, since two function calls become
only one.
This makes a few other changes to the thread creation function:
* Since it is no longer a direct wrapper around a pthread function,
rename it to avoid giving that impression.
* Remove 'pthread_attr_t *' param that every caller supplied as NULL.
* Change 'pthread *' parameter into a return value, for convenience.
The system-stats code hadn't set a thread name, so this fixes that issue.
This patch is a prerequisite for making RCU report the name of a thread
that is blocking RCU synchronization, because the easiest way to do that is
for ovsrcu_quiesce_end() to record the current thread's name.
ovsrcu_quiesce_end() is called before the thread function is called, so it
won't get a name set within the thread function itself. Setting the thread
name earlier, as in this patch, avoids the problem.
Signed-off-by: Ben Pfaff <blp@nicira.com>
Acked-by: Alex Wang <alexw@nicira.com>
2014-04-25 17:46:21 -07:00
|
|
|
|
revalidator->thread = ovs_thread_create(
|
|
|
|
|
"revalidator", udpif_revalidator, revalidator);
|
2013-09-24 13:39:56 -07:00
|
|
|
|
}
|
2013-06-25 14:45:43 -07:00
|
|
|
|
}
|
2014-04-21 17:31:11 -07:00
|
|
|
|
}
|
2014-03-18 16:34:28 -07:00
|
|
|
|
|
2014-04-21 17:31:11 -07:00
|
|
|
|
/* Tells 'udpif' how many threads it should use to handle upcalls.
|
|
|
|
|
* 'n_handlers' and 'n_revalidators' can never be zero. 'udpif''s
|
|
|
|
|
* datapath handle must have packet reception enabled before starting
|
|
|
|
|
* threads. */
|
|
|
|
|
void
|
|
|
|
|
udpif_set_threads(struct udpif *udpif, size_t n_handlers,
|
|
|
|
|
size_t n_revalidators)
|
|
|
|
|
{
|
2014-04-21 20:05:08 -07:00
|
|
|
|
ovs_assert(udpif);
|
2014-04-21 17:31:11 -07:00
|
|
|
|
ovs_assert(n_handlers && n_revalidators);
|
|
|
|
|
|
|
|
|
|
ovsrcu_quiesce_start();
|
2014-04-21 20:05:08 -07:00
|
|
|
|
if (udpif->n_handlers != n_handlers
|
|
|
|
|
|| udpif->n_revalidators != n_revalidators) {
|
|
|
|
|
udpif_stop_threads(udpif);
|
|
|
|
|
}
|
2014-04-21 17:31:11 -07:00
|
|
|
|
|
2014-04-21 20:05:08 -07:00
|
|
|
|
if (!udpif->handlers && !udpif->revalidators) {
|
2014-05-09 14:42:30 -07:00
|
|
|
|
int error;
|
|
|
|
|
|
|
|
|
|
error = dpif_handlers_set(udpif->dpif, n_handlers);
|
|
|
|
|
if (error) {
|
|
|
|
|
VLOG_ERR("failed to configure handlers in dpif %s: %s",
|
|
|
|
|
dpif_name(udpif->dpif), ovs_strerror(error));
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2014-04-21 20:05:08 -07:00
|
|
|
|
udpif_start_threads(udpif, n_handlers, n_revalidators);
|
|
|
|
|
}
|
2014-03-18 16:34:28 -07:00
|
|
|
|
ovsrcu_quiesce_end();
|
2013-06-25 14:45:43 -07:00
|
|
|
|
}
|
|
|
|
|
|
2014-02-25 08:01:01 -08:00
|
|
|
|
/* Waits for all ongoing upcall translations to complete. This ensures that
|
|
|
|
|
* there are no transient references to any removed ofprotos (or other
|
|
|
|
|
* objects). In particular, this should be called after an ofproto is removed
|
|
|
|
|
* (e.g. via xlate_remove_ofproto()) but before it is destroyed. */
|
|
|
|
|
void
|
|
|
|
|
udpif_synchronize(struct udpif *udpif)
|
|
|
|
|
{
|
|
|
|
|
/* This is stronger than necessary. It would be sufficient to ensure
|
|
|
|
|
* (somehow) that each handler and revalidator thread had passed through
|
|
|
|
|
* its main loop once. */
|
|
|
|
|
size_t n_handlers = udpif->n_handlers;
|
|
|
|
|
size_t n_revalidators = udpif->n_revalidators;
|
2014-04-21 17:31:11 -07:00
|
|
|
|
|
|
|
|
|
ovsrcu_quiesce_start();
|
|
|
|
|
udpif_stop_threads(udpif);
|
|
|
|
|
udpif_start_threads(udpif, n_handlers, n_revalidators);
|
|
|
|
|
ovsrcu_quiesce_end();
|
2014-02-25 08:01:01 -08:00
|
|
|
|
}
|
|
|
|
|
|
2013-06-25 14:45:43 -07:00
|
|
|
|
/* Notifies 'udpif' that something changed which may render previous
|
|
|
|
|
* xlate_actions() results invalid. */
|
|
|
|
|
void
|
|
|
|
|
udpif_revalidate(struct udpif *udpif)
|
|
|
|
|
{
|
2013-09-17 14:35:53 -07:00
|
|
|
|
seq_change(udpif->reval_seq);
|
2013-09-24 13:39:56 -07:00
|
|
|
|
}
|
2013-09-12 17:42:23 -07:00
|
|
|
|
|
2013-09-24 13:39:56 -07:00
|
|
|
|
/* Returns a seq which increments every time 'udpif' pulls stats from the
|
|
|
|
|
* datapath. Callers can use this to get a sense of when might be a good time
|
|
|
|
|
* to do periodic work which relies on relatively up to date statistics. */
|
|
|
|
|
struct seq *
|
|
|
|
|
udpif_dump_seq(struct udpif *udpif)
|
|
|
|
|
{
|
|
|
|
|
return udpif->dump_seq;
|
2013-06-25 14:45:43 -07:00
|
|
|
|
}
|
|
|
|
|
|
2013-11-20 17:41:02 -08:00
|
|
|
|
void
|
|
|
|
|
udpif_get_memory_usage(struct udpif *udpif, struct simap *usage)
|
|
|
|
|
{
|
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
|
|
simap_increase(usage, "handlers", udpif->n_handlers);
|
2013-09-24 13:39:56 -07:00
|
|
|
|
|
|
|
|
|
simap_increase(usage, "revalidators", udpif->n_revalidators);
|
|
|
|
|
for (i = 0; i < udpif->n_revalidators; i++) {
|
2014-04-10 07:14:08 +00:00
|
|
|
|
ovs_mutex_lock(&udpif->ukeys[i].mutex);
|
|
|
|
|
simap_increase(usage, "udpif keys", hmap_count(&udpif->ukeys[i].hmap));
|
|
|
|
|
ovs_mutex_unlock(&udpif->ukeys[i].mutex);
|
2013-09-24 13:39:56 -07:00
|
|
|
|
}
|
2013-11-20 17:41:02 -08:00
|
|
|
|
}
|
|
|
|
|
|
udpif: Bug fix updif_flush
Before this commit, all datapath flows are cleared with dpif_flush(),
but the revalidator thread still holds ukeys, which are caches of the
datapath flows in the revalidaor. Flushing ukeys causes flow_del
messages to be sent to the datapath again on flows that have been
deleted by the dpif_flush() already.
Double deletion by itself is not problem, per se, may an efficiency
issue. However, for ever flow_del message sent to the datapath, a log
message, at the warning level, will be generated in case datapath
failed to execute the command. In addition to cause spurious log
messages, Double deletion causes unit tests to report erroneous
failures as all warning messages are considered test failures.
The fix is to simply shut down the revalidator threads to flush all
ukeys, then flush the datapth before restarting the revalidator threads.
dpif_flush() was implemented as flush flows of all datapaths while
most of its invocation should only flush its local datapath.
Only megaflow on/off commands should flush all dapapaths. This bug is
also fixed.
Found during development.
Signed-off-by: Andy Zhou <azhou@nicira.com>
Acked-by: Jarno Rajahalme <jrajahalme@nicira.com>
2014-03-13 21:48:55 -07:00
|
|
|
|
/* Remove flows from a single datapath. */
|
2013-09-24 13:39:56 -07:00
|
|
|
|
void
|
udpif: Bug fix updif_flush
Before this commit, all datapath flows are cleared with dpif_flush(),
but the revalidator thread still holds ukeys, which are caches of the
datapath flows in the revalidaor. Flushing ukeys causes flow_del
messages to be sent to the datapath again on flows that have been
deleted by the dpif_flush() already.
Double deletion by itself is not problem, per se, may an efficiency
issue. However, for ever flow_del message sent to the datapath, a log
message, at the warning level, will be generated in case datapath
failed to execute the command. In addition to cause spurious log
messages, Double deletion causes unit tests to report erroneous
failures as all warning messages are considered test failures.
The fix is to simply shut down the revalidator threads to flush all
ukeys, then flush the datapth before restarting the revalidator threads.
dpif_flush() was implemented as flush flows of all datapaths while
most of its invocation should only flush its local datapath.
Only megaflow on/off commands should flush all dapapaths. This bug is
also fixed.
Found during development.
Signed-off-by: Andy Zhou <azhou@nicira.com>
Acked-by: Jarno Rajahalme <jrajahalme@nicira.com>
2014-03-13 21:48:55 -07:00
|
|
|
|
udpif_flush(struct udpif *udpif)
|
|
|
|
|
{
|
|
|
|
|
size_t n_handlers, n_revalidators;
|
|
|
|
|
|
|
|
|
|
n_handlers = udpif->n_handlers;
|
|
|
|
|
n_revalidators = udpif->n_revalidators;
|
|
|
|
|
|
2014-04-21 17:31:11 -07:00
|
|
|
|
ovsrcu_quiesce_start();
|
|
|
|
|
|
|
|
|
|
udpif_stop_threads(udpif);
|
udpif: Bug fix updif_flush
Before this commit, all datapath flows are cleared with dpif_flush(),
but the revalidator thread still holds ukeys, which are caches of the
datapath flows in the revalidaor. Flushing ukeys causes flow_del
messages to be sent to the datapath again on flows that have been
deleted by the dpif_flush() already.
Double deletion by itself is not problem, per se, may an efficiency
issue. However, for ever flow_del message sent to the datapath, a log
message, at the warning level, will be generated in case datapath
failed to execute the command. In addition to cause spurious log
messages, Double deletion causes unit tests to report erroneous
failures as all warning messages are considered test failures.
The fix is to simply shut down the revalidator threads to flush all
ukeys, then flush the datapth before restarting the revalidator threads.
dpif_flush() was implemented as flush flows of all datapaths while
most of its invocation should only flush its local datapath.
Only megaflow on/off commands should flush all dapapaths. This bug is
also fixed.
Found during development.
Signed-off-by: Andy Zhou <azhou@nicira.com>
Acked-by: Jarno Rajahalme <jrajahalme@nicira.com>
2014-03-13 21:48:55 -07:00
|
|
|
|
dpif_flow_flush(udpif->dpif);
|
2014-04-21 17:31:11 -07:00
|
|
|
|
udpif_start_threads(udpif, n_handlers, n_revalidators);
|
|
|
|
|
|
|
|
|
|
ovsrcu_quiesce_end();
|
udpif: Bug fix updif_flush
Before this commit, all datapath flows are cleared with dpif_flush(),
but the revalidator thread still holds ukeys, which are caches of the
datapath flows in the revalidaor. Flushing ukeys causes flow_del
messages to be sent to the datapath again on flows that have been
deleted by the dpif_flush() already.
Double deletion by itself is not problem, per se, may an efficiency
issue. However, for ever flow_del message sent to the datapath, a log
message, at the warning level, will be generated in case datapath
failed to execute the command. In addition to cause spurious log
messages, Double deletion causes unit tests to report erroneous
failures as all warning messages are considered test failures.
The fix is to simply shut down the revalidator threads to flush all
ukeys, then flush the datapth before restarting the revalidator threads.
dpif_flush() was implemented as flush flows of all datapaths while
most of its invocation should only flush its local datapath.
Only megaflow on/off commands should flush all dapapaths. This bug is
also fixed.
Found during development.
Signed-off-by: Andy Zhou <azhou@nicira.com>
Acked-by: Jarno Rajahalme <jrajahalme@nicira.com>
2014-03-13 21:48:55 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Removes all flows from all datapaths. */
|
|
|
|
|
static void
|
|
|
|
|
udpif_flush_all_datapaths(void)
|
2013-09-24 13:39:56 -07:00
|
|
|
|
{
|
|
|
|
|
struct udpif *udpif;
|
|
|
|
|
|
|
|
|
|
LIST_FOR_EACH (udpif, list_node, &all_udpifs) {
|
udpif: Bug fix updif_flush
Before this commit, all datapath flows are cleared with dpif_flush(),
but the revalidator thread still holds ukeys, which are caches of the
datapath flows in the revalidaor. Flushing ukeys causes flow_del
messages to be sent to the datapath again on flows that have been
deleted by the dpif_flush() already.
Double deletion by itself is not problem, per se, may an efficiency
issue. However, for ever flow_del message sent to the datapath, a log
message, at the warning level, will be generated in case datapath
failed to execute the command. In addition to cause spurious log
messages, Double deletion causes unit tests to report erroneous
failures as all warning messages are considered test failures.
The fix is to simply shut down the revalidator threads to flush all
ukeys, then flush the datapth before restarting the revalidator threads.
dpif_flush() was implemented as flush flows of all datapaths while
most of its invocation should only flush its local datapath.
Only megaflow on/off commands should flush all dapapaths. This bug is
also fixed.
Found during development.
Signed-off-by: Andy Zhou <azhou@nicira.com>
Acked-by: Jarno Rajahalme <jrajahalme@nicira.com>
2014-03-13 21:48:55 -07:00
|
|
|
|
udpif_flush(udpif);
|
2013-09-24 13:39:56 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
udpif: Bug fix updif_flush
Before this commit, all datapath flows are cleared with dpif_flush(),
but the revalidator thread still holds ukeys, which are caches of the
datapath flows in the revalidaor. Flushing ukeys causes flow_del
messages to be sent to the datapath again on flows that have been
deleted by the dpif_flush() already.
Double deletion by itself is not problem, per se, may an efficiency
issue. However, for ever flow_del message sent to the datapath, a log
message, at the warning level, will be generated in case datapath
failed to execute the command. In addition to cause spurious log
messages, Double deletion causes unit tests to report erroneous
failures as all warning messages are considered test failures.
The fix is to simply shut down the revalidator threads to flush all
ukeys, then flush the datapth before restarting the revalidator threads.
dpif_flush() was implemented as flush flows of all datapaths while
most of its invocation should only flush its local datapath.
Only megaflow on/off commands should flush all dapapaths. This bug is
also fixed.
Found during development.
Signed-off-by: Andy Zhou <azhou@nicira.com>
Acked-by: Jarno Rajahalme <jrajahalme@nicira.com>
2014-03-13 21:48:55 -07:00
|
|
|
|
|
2013-09-24 13:39:56 -07:00
|
|
|
|
|
2014-05-14 16:19:34 +09:00
|
|
|
|
static unsigned long
|
2014-01-22 06:50:49 +00:00
|
|
|
|
udpif_get_n_flows(struct udpif *udpif)
|
2013-06-25 14:45:43 -07:00
|
|
|
|
{
|
2014-01-22 06:50:49 +00:00
|
|
|
|
long long int time, now;
|
2014-05-14 16:19:34 +09:00
|
|
|
|
unsigned long flow_count;
|
2014-01-22 06:50:49 +00:00
|
|
|
|
|
|
|
|
|
now = time_msec();
|
|
|
|
|
atomic_read(&udpif->n_flows_timestamp, &time);
|
|
|
|
|
if (time < now - 100 && !ovs_mutex_trylock(&udpif->n_flows_mutex)) {
|
|
|
|
|
struct dpif_dp_stats stats;
|
|
|
|
|
|
|
|
|
|
atomic_store(&udpif->n_flows_timestamp, now);
|
|
|
|
|
dpif_get_dp_stats(udpif->dpif, &stats);
|
|
|
|
|
flow_count = stats.n_flows;
|
|
|
|
|
atomic_store(&udpif->n_flows, flow_count);
|
|
|
|
|
ovs_mutex_unlock(&udpif->n_flows_mutex);
|
|
|
|
|
} else {
|
|
|
|
|
atomic_read(&udpif->n_flows, &flow_count);
|
|
|
|
|
}
|
|
|
|
|
return flow_count;
|
2013-09-24 13:39:56 -07:00
|
|
|
|
}
|
2013-06-25 14:45:43 -07:00
|
|
|
|
|
2014-05-20 21:50:19 -07:00
|
|
|
|
/* The upcall handler thread tries to read a batch of UPCALL_MAX_BATCH
|
2014-02-26 23:03:24 -08:00
|
|
|
|
* upcalls from dpif, processes the batch and installs corresponding flows
|
|
|
|
|
* in dpif. */
|
2013-06-25 14:45:43 -07:00
|
|
|
|
static void *
|
2013-09-24 15:04:04 -07:00
|
|
|
|
udpif_upcall_handler(void *arg)
|
2013-06-25 14:45:43 -07:00
|
|
|
|
{
|
|
|
|
|
struct handler *handler = arg;
|
2014-02-26 23:03:24 -08:00
|
|
|
|
struct udpif *udpif = handler->udpif;
|
2013-06-25 14:45:43 -07:00
|
|
|
|
|
2013-12-27 16:29:24 -08:00
|
|
|
|
while (!latch_is_set(&handler->udpif->exit_latch)) {
|
2014-08-06 18:49:44 -07:00
|
|
|
|
if (!recv_upcalls(handler)) {
|
2014-02-26 23:03:24 -08:00
|
|
|
|
dpif_recv_wait(udpif->dpif, handler->handler_id);
|
|
|
|
|
latch_wait(&udpif->exit_latch);
|
|
|
|
|
poll_block();
|
2013-06-25 14:45:43 -07:00
|
|
|
|
}
|
2013-09-23 10:24:05 -07:00
|
|
|
|
coverage_clear();
|
2013-06-25 14:45:43 -07:00
|
|
|
|
}
|
2013-12-27 16:29:24 -08:00
|
|
|
|
|
|
|
|
|
return NULL;
|
2013-06-25 14:45:43 -07:00
|
|
|
|
}
|
2013-09-24 13:39:56 -07:00
|
|
|
|
|
2014-08-06 18:49:44 -07:00
|
|
|
|
static size_t
|
|
|
|
|
recv_upcalls(struct handler *handler)
|
|
|
|
|
{
|
|
|
|
|
struct udpif *udpif = handler->udpif;
|
|
|
|
|
uint64_t recv_stubs[UPCALL_MAX_BATCH][512 / 8];
|
|
|
|
|
struct ofpbuf recv_bufs[UPCALL_MAX_BATCH];
|
|
|
|
|
struct upcall upcalls[UPCALL_MAX_BATCH];
|
|
|
|
|
size_t n_upcalls, i;
|
|
|
|
|
|
|
|
|
|
n_upcalls = 0;
|
|
|
|
|
while (n_upcalls < UPCALL_MAX_BATCH) {
|
|
|
|
|
struct ofpbuf *recv_buf = &recv_bufs[n_upcalls];
|
|
|
|
|
struct upcall *upcall = &upcalls[n_upcalls];
|
|
|
|
|
struct dpif_upcall dupcall;
|
|
|
|
|
struct pkt_metadata md;
|
|
|
|
|
struct flow flow;
|
|
|
|
|
int error;
|
|
|
|
|
|
2014-08-14 15:48:00 -07:00
|
|
|
|
ofpbuf_use_stub(recv_buf, recv_stubs[n_upcalls],
|
2014-08-06 18:49:44 -07:00
|
|
|
|
sizeof recv_stubs[n_upcalls]);
|
|
|
|
|
if (dpif_recv(udpif->dpif, handler->handler_id, &dupcall, recv_buf)) {
|
|
|
|
|
ofpbuf_uninit(recv_buf);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (odp_flow_key_to_flow(dupcall.key, dupcall.key_len, &flow)
|
|
|
|
|
== ODP_FIT_ERROR) {
|
|
|
|
|
goto free_dupcall;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
error = upcall_receive(upcall, udpif->backer, &dupcall.packet,
|
|
|
|
|
dupcall.type, dupcall.userdata, &flow);
|
|
|
|
|
if (error) {
|
|
|
|
|
if (error == ENODEV) {
|
|
|
|
|
/* Received packet on datapath port for which we couldn't
|
|
|
|
|
* associate an ofproto. This can happen if a port is removed
|
|
|
|
|
* while traffic is being received. Print a rate-limited
|
|
|
|
|
* message in case it happens frequently. */
|
|
|
|
|
dpif_flow_put(udpif->dpif, DPIF_FP_CREATE, dupcall.key,
|
|
|
|
|
dupcall.key_len, NULL, 0, NULL, 0, NULL);
|
|
|
|
|
VLOG_INFO_RL(&rl, "received packet on unassociated datapath "
|
|
|
|
|
"port %"PRIu32, flow.in_port.odp_port);
|
|
|
|
|
}
|
|
|
|
|
goto free_dupcall;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
upcall->key = dupcall.key;
|
|
|
|
|
upcall->key_len = dupcall.key_len;
|
|
|
|
|
|
|
|
|
|
if (vsp_adjust_flow(upcall->ofproto, &flow, &dupcall.packet)) {
|
|
|
|
|
upcall->vsp_adjusted = true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
md = pkt_metadata_from_flow(&flow);
|
|
|
|
|
flow_extract(&dupcall.packet, &md, &flow);
|
|
|
|
|
|
|
|
|
|
error = process_upcall(udpif, upcall, NULL);
|
|
|
|
|
if (error) {
|
|
|
|
|
goto cleanup;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
n_upcalls++;
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
|
upcall_uninit(upcall);
|
|
|
|
|
free_dupcall:
|
|
|
|
|
ofpbuf_uninit(&dupcall.packet);
|
|
|
|
|
ofpbuf_uninit(recv_buf);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (n_upcalls) {
|
|
|
|
|
handle_upcalls(handler->udpif, upcalls, n_upcalls);
|
|
|
|
|
for (i = 0; i < n_upcalls; i++) {
|
|
|
|
|
ofpbuf_uninit(CONST_CAST(struct ofpbuf *, upcalls[i].packet));
|
|
|
|
|
ofpbuf_uninit(&recv_bufs[i]);
|
|
|
|
|
upcall_uninit(&upcalls[i]);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return n_upcalls;
|
|
|
|
|
}
|
|
|
|
|
|
2013-09-24 13:39:56 -07:00
|
|
|
|
static void *
|
|
|
|
|
udpif_revalidator(void *arg)
|
2013-06-25 14:45:43 -07:00
|
|
|
|
{
|
2014-04-10 07:14:08 +00:00
|
|
|
|
/* Used by all revalidators. */
|
2013-09-24 13:39:56 -07:00
|
|
|
|
struct revalidator *revalidator = arg;
|
2014-04-10 07:14:08 +00:00
|
|
|
|
struct udpif *udpif = revalidator->udpif;
|
|
|
|
|
bool leader = revalidator == &udpif->revalidators[0];
|
|
|
|
|
|
|
|
|
|
/* Used only by the leader. */
|
|
|
|
|
long long int start_time = 0;
|
|
|
|
|
uint64_t last_reval_seq = 0;
|
|
|
|
|
unsigned int flow_limit = 0;
|
|
|
|
|
size_t n_flows = 0;
|
2013-06-25 14:45:43 -07:00
|
|
|
|
|
ovs-thread: Make caller provide thread name when creating a thread.
Thread names are occasionally very useful for debugging, but from time to
time we've forgotten to set one. This commit adds the new thread's name
as a parameter to the function to start a thread, to make that mistake
impossible. This also simplifies code, since two function calls become
only one.
This makes a few other changes to the thread creation function:
* Since it is no longer a direct wrapper around a pthread function,
rename it to avoid giving that impression.
* Remove 'pthread_attr_t *' param that every caller supplied as NULL.
* Change 'pthread *' parameter into a return value, for convenience.
The system-stats code hadn't set a thread name, so this fixes that issue.
This patch is a prerequisite for making RCU report the name of a thread
that is blocking RCU synchronization, because the easiest way to do that is
for ovsrcu_quiesce_end() to record the current thread's name.
ovsrcu_quiesce_end() is called before the thread function is called, so it
won't get a name set within the thread function itself. Setting the thread
name earlier, as in this patch, avoids the problem.
Signed-off-by: Ben Pfaff <blp@nicira.com>
Acked-by: Alex Wang <alexw@nicira.com>
2014-04-25 17:46:21 -07:00
|
|
|
|
revalidator->id = ovsthread_id_self();
|
2013-09-24 13:39:56 -07:00
|
|
|
|
for (;;) {
|
2014-04-10 07:14:08 +00:00
|
|
|
|
if (leader) {
|
|
|
|
|
uint64_t reval_seq;
|
2013-09-24 13:39:56 -07:00
|
|
|
|
|
2014-04-10 07:14:08 +00:00
|
|
|
|
reval_seq = seq_read(udpif->reval_seq);
|
|
|
|
|
udpif->need_revalidate = last_reval_seq != reval_seq;
|
|
|
|
|
last_reval_seq = reval_seq;
|
2013-09-24 13:39:56 -07:00
|
|
|
|
|
2014-04-10 07:14:08 +00:00
|
|
|
|
n_flows = udpif_get_n_flows(udpif);
|
|
|
|
|
udpif->max_n_flows = MAX(n_flows, udpif->max_n_flows);
|
|
|
|
|
udpif->avg_n_flows = (udpif->avg_n_flows + n_flows) / 2;
|
|
|
|
|
|
|
|
|
|
/* Only the leader checks the exit latch to prevent a race where
|
|
|
|
|
* some threads think it's true and exit and others think it's
|
|
|
|
|
* false and block indefinitely on the reval_barrier */
|
|
|
|
|
udpif->reval_exit = latch_is_set(&udpif->exit_latch);
|
|
|
|
|
|
|
|
|
|
start_time = time_msec();
|
|
|
|
|
if (!udpif->reval_exit) {
|
2014-05-20 11:37:02 -07:00
|
|
|
|
udpif->dump = dpif_flow_dump_create(udpif->dpif);
|
2013-09-24 13:39:56 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2014-04-10 07:14:08 +00:00
|
|
|
|
/* Wait for the leader to start the flow dump. */
|
2014-05-29 15:37:37 -07:00
|
|
|
|
ovs_barrier_block(&udpif->reval_barrier);
|
2014-04-10 07:14:08 +00:00
|
|
|
|
if (udpif->reval_exit) {
|
|
|
|
|
break;
|
2013-09-24 13:39:56 -07:00
|
|
|
|
}
|
2014-04-10 07:14:08 +00:00
|
|
|
|
revalidate(revalidator);
|
|
|
|
|
|
|
|
|
|
/* Wait for all flows to have been dumped before we garbage collect. */
|
2014-05-29 15:37:37 -07:00
|
|
|
|
ovs_barrier_block(&udpif->reval_barrier);
|
2014-04-10 07:14:08 +00:00
|
|
|
|
revalidator_sweep(revalidator);
|
|
|
|
|
|
|
|
|
|
/* Wait for all revalidators to finish garbage collection. */
|
2014-05-29 15:37:37 -07:00
|
|
|
|
ovs_barrier_block(&udpif->reval_barrier);
|
2014-04-10 07:14:08 +00:00
|
|
|
|
|
|
|
|
|
if (leader) {
|
|
|
|
|
long long int duration;
|
|
|
|
|
|
2014-05-20 11:37:02 -07:00
|
|
|
|
dpif_flow_dump_destroy(udpif->dump);
|
2014-04-10 07:14:08 +00:00
|
|
|
|
seq_change(udpif->dump_seq);
|
|
|
|
|
|
|
|
|
|
duration = MAX(time_msec() - start_time, 1);
|
|
|
|
|
atomic_read(&udpif->flow_limit, &flow_limit);
|
|
|
|
|
udpif->dump_duration = duration;
|
|
|
|
|
if (duration > 2000) {
|
|
|
|
|
flow_limit /= duration / 1000;
|
|
|
|
|
} else if (duration > 1300) {
|
|
|
|
|
flow_limit = flow_limit * 3 / 4;
|
|
|
|
|
} else if (duration < 1000 && n_flows > 2000
|
|
|
|
|
&& flow_limit < n_flows * 1000 / duration) {
|
|
|
|
|
flow_limit += 1000;
|
|
|
|
|
}
|
|
|
|
|
flow_limit = MIN(ofproto_flow_limit, MAX(flow_limit, 1000));
|
|
|
|
|
atomic_store(&udpif->flow_limit, flow_limit);
|
2013-09-24 13:39:56 -07:00
|
|
|
|
|
2014-04-10 07:14:08 +00:00
|
|
|
|
if (duration > 2000) {
|
|
|
|
|
VLOG_INFO("Spent an unreasonably long %lldms dumping flows",
|
|
|
|
|
duration);
|
|
|
|
|
}
|
2013-09-24 13:39:56 -07:00
|
|
|
|
|
2014-04-10 07:14:08 +00:00
|
|
|
|
poll_timer_wait_until(start_time + MIN(ofproto_max_idle, 500));
|
|
|
|
|
seq_wait(udpif->reval_seq, last_reval_seq);
|
|
|
|
|
latch_wait(&udpif->exit_latch);
|
|
|
|
|
poll_block();
|
2013-09-24 13:39:56 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
2013-06-25 14:45:43 -07:00
|
|
|
|
static enum upcall_type
|
2014-08-06 18:49:44 -07:00
|
|
|
|
classify_upcall(enum dpif_upcall_type type, const struct nlattr *userdata)
|
2013-06-25 14:45:43 -07:00
|
|
|
|
{
|
|
|
|
|
union user_action_cookie cookie;
|
|
|
|
|
size_t userdata_len;
|
|
|
|
|
|
|
|
|
|
/* First look at the upcall type. */
|
2014-08-06 18:49:44 -07:00
|
|
|
|
switch (type) {
|
2013-06-25 14:45:43 -07:00
|
|
|
|
case DPIF_UC_ACTION:
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case DPIF_UC_MISS:
|
|
|
|
|
return MISS_UPCALL;
|
|
|
|
|
|
|
|
|
|
case DPIF_N_UC_TYPES:
|
|
|
|
|
default:
|
2014-08-06 18:49:44 -07:00
|
|
|
|
VLOG_WARN_RL(&rl, "upcall has unexpected type %"PRIu32, type);
|
2013-06-25 14:45:43 -07:00
|
|
|
|
return BAD_UPCALL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* "action" upcalls need a closer look. */
|
2014-08-06 18:49:44 -07:00
|
|
|
|
if (!userdata) {
|
2013-06-25 14:45:43 -07:00
|
|
|
|
VLOG_WARN_RL(&rl, "action upcall missing cookie");
|
|
|
|
|
return BAD_UPCALL;
|
|
|
|
|
}
|
2014-08-06 18:49:44 -07:00
|
|
|
|
userdata_len = nl_attr_get_size(userdata);
|
2013-06-25 14:45:43 -07:00
|
|
|
|
if (userdata_len < sizeof cookie.type
|
|
|
|
|
|| userdata_len > sizeof cookie) {
|
2013-11-25 23:38:48 -08:00
|
|
|
|
VLOG_WARN_RL(&rl, "action upcall cookie has unexpected size %"PRIuSIZE,
|
2013-06-25 14:45:43 -07:00
|
|
|
|
userdata_len);
|
|
|
|
|
return BAD_UPCALL;
|
|
|
|
|
}
|
|
|
|
|
memset(&cookie, 0, sizeof cookie);
|
2014-08-06 18:49:44 -07:00
|
|
|
|
memcpy(&cookie, nl_attr_get(userdata), userdata_len);
|
2014-02-11 15:21:08 -08:00
|
|
|
|
if (userdata_len == MAX(8, sizeof cookie.sflow)
|
2013-06-25 14:45:43 -07:00
|
|
|
|
&& cookie.type == USER_ACTION_COOKIE_SFLOW) {
|
|
|
|
|
return SFLOW_UPCALL;
|
2014-02-11 15:21:08 -08:00
|
|
|
|
} else if (userdata_len == MAX(8, sizeof cookie.slow_path)
|
2013-06-25 14:45:43 -07:00
|
|
|
|
&& cookie.type == USER_ACTION_COOKIE_SLOW_PATH) {
|
|
|
|
|
return MISS_UPCALL;
|
2014-02-11 15:21:08 -08:00
|
|
|
|
} else if (userdata_len == MAX(8, sizeof cookie.flow_sample)
|
2013-06-25 14:45:43 -07:00
|
|
|
|
&& cookie.type == USER_ACTION_COOKIE_FLOW_SAMPLE) {
|
|
|
|
|
return FLOW_SAMPLE_UPCALL;
|
2014-02-11 15:21:08 -08:00
|
|
|
|
} else if (userdata_len == MAX(8, sizeof cookie.ipfix)
|
2013-06-25 14:45:43 -07:00
|
|
|
|
&& cookie.type == USER_ACTION_COOKIE_IPFIX) {
|
|
|
|
|
return IPFIX_UPCALL;
|
|
|
|
|
} else {
|
|
|
|
|
VLOG_WARN_RL(&rl, "invalid user cookie of type %"PRIu16
|
2013-11-25 23:38:48 -08:00
|
|
|
|
" and size %"PRIuSIZE, cookie.type, userdata_len);
|
2013-06-25 14:45:43 -07:00
|
|
|
|
return BAD_UPCALL;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2013-09-24 13:39:56 -07:00
|
|
|
|
/* Calculates slow path actions for 'xout'. 'buf' must statically be
|
|
|
|
|
* initialized with at least 128 bytes of space. */
|
|
|
|
|
static void
|
|
|
|
|
compose_slow_path(struct udpif *udpif, struct xlate_out *xout,
|
2014-08-06 18:49:44 -07:00
|
|
|
|
const struct flow *flow, odp_port_t odp_in_port,
|
2014-02-26 23:03:24 -08:00
|
|
|
|
struct ofpbuf *buf)
|
2013-09-24 13:39:56 -07:00
|
|
|
|
{
|
|
|
|
|
union user_action_cookie cookie;
|
|
|
|
|
odp_port_t port;
|
|
|
|
|
uint32_t pid;
|
|
|
|
|
|
|
|
|
|
cookie.type = USER_ACTION_COOKIE_SLOW_PATH;
|
|
|
|
|
cookie.slow_path.unused = 0;
|
|
|
|
|
cookie.slow_path.reason = xout->slow;
|
|
|
|
|
|
|
|
|
|
port = xout->slow & (SLOW_CFM | SLOW_BFD | SLOW_LACP | SLOW_STP)
|
|
|
|
|
? ODPP_NONE
|
|
|
|
|
: odp_in_port;
|
2014-02-26 23:03:24 -08:00
|
|
|
|
pid = dpif_port_get_pid(udpif->dpif, port, flow_hash_5tuple(flow, 0));
|
2013-09-24 13:39:56 -07:00
|
|
|
|
odp_put_userspace_action(pid, &cookie, sizeof cookie.slow_path, buf);
|
|
|
|
|
}
|
|
|
|
|
|
2014-08-06 18:49:44 -07:00
|
|
|
|
static int
|
|
|
|
|
upcall_receive(struct upcall *upcall, const struct dpif_backer *backer,
|
|
|
|
|
const struct ofpbuf *packet, enum dpif_upcall_type type,
|
|
|
|
|
const struct nlattr *userdata, const struct flow *flow)
|
|
|
|
|
{
|
|
|
|
|
int error;
|
|
|
|
|
|
|
|
|
|
error = xlate_receive(backer, flow, &upcall->ofproto, &upcall->ipfix,
|
|
|
|
|
&upcall->sflow, &upcall->netflow,
|
|
|
|
|
&upcall->in_port);
|
|
|
|
|
if (error) {
|
|
|
|
|
return error;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
upcall->flow = flow;
|
|
|
|
|
upcall->packet = packet;
|
|
|
|
|
upcall->type = type;
|
|
|
|
|
upcall->userdata = userdata;
|
|
|
|
|
ofpbuf_init(&upcall->put_actions, 0);
|
|
|
|
|
|
|
|
|
|
upcall->xout_initialized = false;
|
|
|
|
|
upcall->vsp_adjusted = false;
|
|
|
|
|
|
|
|
|
|
upcall->key = NULL;
|
|
|
|
|
upcall->key_len = 0;
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2014-05-20 21:50:19 -07:00
|
|
|
|
static void
|
2014-08-06 18:49:44 -07:00
|
|
|
|
upcall_xlate(struct udpif *udpif, struct upcall *upcall,
|
|
|
|
|
struct ofpbuf *odp_actions)
|
2013-06-25 14:45:43 -07:00
|
|
|
|
{
|
2014-08-06 18:49:44 -07:00
|
|
|
|
struct dpif_flow_stats stats;
|
2014-05-22 10:53:27 -07:00
|
|
|
|
struct xlate_in xin;
|
2014-05-20 21:50:19 -07:00
|
|
|
|
|
2014-08-06 18:49:44 -07:00
|
|
|
|
stats.n_packets = 1;
|
|
|
|
|
stats.n_bytes = ofpbuf_size(upcall->packet);
|
|
|
|
|
stats.used = time_msec();
|
|
|
|
|
stats.tcp_flags = ntohs(upcall->flow->tcp_flags);
|
2014-05-20 21:50:19 -07:00
|
|
|
|
|
2014-08-06 18:49:44 -07:00
|
|
|
|
xlate_in_init(&xin, upcall->ofproto, upcall->flow, upcall->in_port, NULL,
|
|
|
|
|
stats.tcp_flags, upcall->packet);
|
|
|
|
|
xin.odp_actions = odp_actions;
|
2014-05-20 21:50:19 -07:00
|
|
|
|
|
2014-08-06 18:49:44 -07:00
|
|
|
|
if (upcall->type == DPIF_UC_MISS) {
|
|
|
|
|
xin.resubmit_stats = &stats;
|
2014-05-20 21:50:19 -07:00
|
|
|
|
} else {
|
|
|
|
|
/* For non-miss upcalls, there's a flow in the datapath which this
|
|
|
|
|
* packet was accounted to. Presumably the revalidators will deal
|
|
|
|
|
* with pushing its stats eventually. */
|
2013-06-25 14:45:43 -07:00
|
|
|
|
}
|
|
|
|
|
|
2014-05-20 21:50:19 -07:00
|
|
|
|
xlate_actions(&xin, &upcall->xout);
|
2014-08-06 18:49:44 -07:00
|
|
|
|
upcall->xout_initialized = true;
|
|
|
|
|
|
|
|
|
|
/* Special case for fail-open mode.
|
|
|
|
|
*
|
|
|
|
|
* If we are in fail-open mode, but we are connected to a controller too,
|
|
|
|
|
* then we should send the packet up to the controller in the hope that it
|
|
|
|
|
* will try to set up a flow and thereby allow us to exit fail-open.
|
|
|
|
|
*
|
|
|
|
|
* See the top-level comment in fail-open.c for more information.
|
|
|
|
|
*
|
|
|
|
|
* Copy packets before they are modified by execution. */
|
|
|
|
|
if (upcall->xout.fail_open) {
|
|
|
|
|
const struct ofpbuf *packet = upcall->packet;
|
|
|
|
|
struct ofproto_packet_in *pin;
|
|
|
|
|
|
|
|
|
|
pin = xmalloc(sizeof *pin);
|
|
|
|
|
pin->up.packet = xmemdup(ofpbuf_data(packet), ofpbuf_size(packet));
|
|
|
|
|
pin->up.packet_len = ofpbuf_size(packet);
|
|
|
|
|
pin->up.reason = OFPR_NO_MATCH;
|
|
|
|
|
pin->up.table_id = 0;
|
|
|
|
|
pin->up.cookie = OVS_BE64_MAX;
|
|
|
|
|
flow_get_metadata(upcall->flow, &pin->up.fmd);
|
|
|
|
|
pin->send_len = 0; /* Not used for flow table misses. */
|
|
|
|
|
pin->miss_type = OFPROTO_PACKET_IN_NO_MISS;
|
|
|
|
|
ofproto_dpif_send_packet_in(upcall->ofproto, pin);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (!upcall->xout.slow) {
|
|
|
|
|
ofpbuf_use_const(&upcall->put_actions,
|
|
|
|
|
ofpbuf_data(upcall->xout.odp_actions),
|
|
|
|
|
ofpbuf_size(upcall->xout.odp_actions));
|
|
|
|
|
} else {
|
|
|
|
|
ofpbuf_init(&upcall->put_actions, 0);
|
|
|
|
|
compose_slow_path(udpif, &upcall->xout, upcall->flow,
|
|
|
|
|
upcall->flow->in_port.odp_port,
|
|
|
|
|
&upcall->put_actions);
|
|
|
|
|
}
|
2013-06-25 14:45:43 -07:00
|
|
|
|
}
|
|
|
|
|
|
2014-07-26 12:19:03 -07:00
|
|
|
|
static void
|
2014-08-06 18:49:44 -07:00
|
|
|
|
upcall_uninit(struct upcall *upcall)
|
2014-07-26 06:51:55 +00:00
|
|
|
|
{
|
2014-08-06 18:49:44 -07:00
|
|
|
|
if (upcall) {
|
|
|
|
|
if (upcall->xout_initialized) {
|
|
|
|
|
xlate_out_uninit(&upcall->xout);
|
|
|
|
|
}
|
|
|
|
|
ofpbuf_uninit(&upcall->put_actions);
|
|
|
|
|
dpif_ipfix_unref(upcall->ipfix);
|
|
|
|
|
dpif_sflow_unref(upcall->sflow);
|
|
|
|
|
netflow_unref(upcall->netflow);
|
|
|
|
|
}
|
2014-07-26 06:51:55 +00:00
|
|
|
|
}
|
|
|
|
|
|
2014-07-26 15:39:58 -07:00
|
|
|
|
static int
|
|
|
|
|
upcall_cb(const struct ofpbuf *packet, const struct flow *flow,
|
|
|
|
|
enum dpif_upcall_type type, const struct nlattr *userdata,
|
|
|
|
|
struct ofpbuf *actions, struct flow_wildcards *wc,
|
|
|
|
|
struct ofpbuf *put_actions, void *aux)
|
2014-07-26 06:51:55 +00:00
|
|
|
|
{
|
2014-07-26 15:39:58 -07:00
|
|
|
|
struct udpif *udpif = aux;
|
|
|
|
|
unsigned int flow_limit;
|
|
|
|
|
struct upcall upcall;
|
|
|
|
|
bool megaflow;
|
|
|
|
|
int error;
|
2014-07-26 06:51:55 +00:00
|
|
|
|
|
2014-07-26 15:39:58 -07:00
|
|
|
|
error = upcall_receive(&upcall, udpif->backer, packet, type, userdata,
|
|
|
|
|
flow);
|
|
|
|
|
if (error) {
|
|
|
|
|
goto out;
|
2014-07-26 06:51:55 +00:00
|
|
|
|
}
|
|
|
|
|
|
2014-07-26 15:39:58 -07:00
|
|
|
|
error = process_upcall(udpif, &upcall, actions);
|
|
|
|
|
if (error) {
|
|
|
|
|
goto out;
|
|
|
|
|
}
|
2014-08-06 18:49:44 -07:00
|
|
|
|
|
2014-07-26 15:39:58 -07:00
|
|
|
|
if (upcall.xout.slow && put_actions) {
|
|
|
|
|
ofpbuf_put(put_actions, ofpbuf_data(&upcall.put_actions),
|
|
|
|
|
ofpbuf_size(&upcall.put_actions));
|
|
|
|
|
}
|
2014-08-06 18:49:44 -07:00
|
|
|
|
|
2014-07-26 15:39:58 -07:00
|
|
|
|
if (wc) {
|
|
|
|
|
atomic_read(&enable_megaflows, &megaflow);
|
|
|
|
|
if (megaflow) {
|
|
|
|
|
/* XXX: This could be avoided with sufficient API changes. */
|
|
|
|
|
*wc = upcall.xout.wc;
|
|
|
|
|
} else {
|
|
|
|
|
memset(wc, 0xff, sizeof *wc);
|
|
|
|
|
flow_wildcards_clear_non_packet_fields(wc);
|
2014-02-26 23:03:24 -08:00
|
|
|
|
}
|
2014-07-26 15:39:58 -07:00
|
|
|
|
}
|
2014-02-26 23:03:24 -08:00
|
|
|
|
|
2014-07-26 15:39:58 -07:00
|
|
|
|
atomic_read(&udpif->flow_limit, &flow_limit);
|
|
|
|
|
if (udpif_get_n_flows(udpif) >= flow_limit) {
|
|
|
|
|
error = ENOSPC;
|
2014-07-26 06:51:55 +00:00
|
|
|
|
}
|
2014-07-26 15:39:58 -07:00
|
|
|
|
|
|
|
|
|
out:
|
|
|
|
|
upcall_uninit(&upcall);
|
|
|
|
|
return error;
|
2014-07-26 06:51:55 +00:00
|
|
|
|
}
|
2013-09-24 15:04:04 -07:00
|
|
|
|
|
2014-07-26 12:19:03 -07:00
|
|
|
|
static int
|
2014-08-06 18:49:44 -07:00
|
|
|
|
process_upcall(struct udpif *udpif, struct upcall *upcall,
|
|
|
|
|
struct ofpbuf *odp_actions)
|
2014-07-26 06:51:55 +00:00
|
|
|
|
{
|
2014-08-06 18:49:44 -07:00
|
|
|
|
const struct nlattr *userdata = upcall->userdata;
|
|
|
|
|
const struct ofpbuf *packet = upcall->packet;
|
|
|
|
|
const struct flow *flow = upcall->flow;
|
2013-09-19 11:03:47 -07:00
|
|
|
|
|
2014-08-06 18:49:44 -07:00
|
|
|
|
switch (classify_upcall(upcall->type, userdata)) {
|
|
|
|
|
case MISS_UPCALL:
|
|
|
|
|
upcall_xlate(udpif, upcall, odp_actions);
|
|
|
|
|
return 0;
|
2013-09-24 15:04:04 -07:00
|
|
|
|
|
2014-07-26 06:51:55 +00:00
|
|
|
|
case SFLOW_UPCALL:
|
2014-08-06 18:49:44 -07:00
|
|
|
|
if (upcall->sflow) {
|
2014-07-26 06:51:55 +00:00
|
|
|
|
union user_action_cookie cookie;
|
|
|
|
|
|
|
|
|
|
memset(&cookie, 0, sizeof cookie);
|
2014-08-06 18:49:44 -07:00
|
|
|
|
memcpy(&cookie, nl_attr_get(userdata), sizeof cookie.sflow);
|
|
|
|
|
dpif_sflow_received(upcall->sflow, packet, flow,
|
|
|
|
|
flow->in_port.odp_port, &cookie);
|
2014-07-26 06:51:55 +00:00
|
|
|
|
}
|
|
|
|
|
break;
|
2014-08-06 18:49:44 -07:00
|
|
|
|
|
2014-07-26 06:51:55 +00:00
|
|
|
|
case IPFIX_UPCALL:
|
2014-08-06 18:49:44 -07:00
|
|
|
|
if (upcall->ipfix) {
|
|
|
|
|
dpif_ipfix_bridge_sample(upcall->ipfix, packet, flow);
|
2014-07-26 06:51:55 +00:00
|
|
|
|
}
|
|
|
|
|
break;
|
2014-08-06 18:49:44 -07:00
|
|
|
|
|
2014-07-26 06:51:55 +00:00
|
|
|
|
case FLOW_SAMPLE_UPCALL:
|
2014-08-06 18:49:44 -07:00
|
|
|
|
if (upcall->ipfix) {
|
2014-07-26 06:51:55 +00:00
|
|
|
|
union user_action_cookie cookie;
|
|
|
|
|
|
|
|
|
|
memset(&cookie, 0, sizeof cookie);
|
2014-08-06 18:49:44 -07:00
|
|
|
|
memcpy(&cookie, nl_attr_get(userdata), sizeof cookie.flow_sample);
|
2014-07-26 06:51:55 +00:00
|
|
|
|
|
|
|
|
|
/* The flow reflects exactly the contents of the packet.
|
|
|
|
|
* Sample the packet using it. */
|
2014-08-06 18:49:44 -07:00
|
|
|
|
dpif_ipfix_flow_sample(upcall->ipfix, packet, flow,
|
2014-07-26 06:51:55 +00:00
|
|
|
|
cookie.flow_sample.collector_set_id,
|
|
|
|
|
cookie.flow_sample.probability,
|
|
|
|
|
cookie.flow_sample.obs_domain_id,
|
|
|
|
|
cookie.flow_sample.obs_point_id);
|
2013-06-25 14:45:43 -07:00
|
|
|
|
}
|
2014-07-26 06:51:55 +00:00
|
|
|
|
break;
|
2014-08-06 18:49:44 -07:00
|
|
|
|
|
2014-07-26 06:51:55 +00:00
|
|
|
|
case BAD_UPCALL:
|
|
|
|
|
break;
|
|
|
|
|
}
|
2013-09-24 15:04:04 -07:00
|
|
|
|
|
2014-08-06 18:49:44 -07:00
|
|
|
|
return EAGAIN;
|
2014-02-26 23:03:24 -08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
2014-07-26 06:51:55 +00:00
|
|
|
|
handle_upcalls(struct udpif *udpif, struct upcall *upcalls,
|
2014-05-20 21:50:19 -07:00
|
|
|
|
size_t n_upcalls)
|
2014-02-26 23:03:24 -08:00
|
|
|
|
{
|
2014-08-06 18:49:44 -07:00
|
|
|
|
struct odputil_keybuf mask_bufs[UPCALL_MAX_BATCH];
|
2014-05-20 21:50:19 -07:00
|
|
|
|
struct dpif_op *opsp[UPCALL_MAX_BATCH * 2];
|
|
|
|
|
struct dpif_op ops[UPCALL_MAX_BATCH * 2];
|
2014-02-26 23:03:24 -08:00
|
|
|
|
unsigned int flow_limit;
|
2014-08-06 18:49:44 -07:00
|
|
|
|
size_t n_ops, i;
|
|
|
|
|
bool may_put;
|
2014-02-26 23:03:24 -08:00
|
|
|
|
|
|
|
|
|
atomic_read(&udpif->flow_limit, &flow_limit);
|
|
|
|
|
may_put = udpif_get_n_flows(udpif) < flow_limit;
|
|
|
|
|
|
2014-05-20 21:50:19 -07:00
|
|
|
|
/* Handle the packets individually in order of arrival.
|
2013-09-19 11:03:47 -07:00
|
|
|
|
*
|
|
|
|
|
* - For SLOW_CFM, SLOW_LACP, SLOW_STP, and SLOW_BFD, translation is what
|
|
|
|
|
* processes received packets for these protocols.
|
|
|
|
|
*
|
|
|
|
|
* - For SLOW_CONTROLLER, translation sends the packet to the OpenFlow
|
|
|
|
|
* controller.
|
|
|
|
|
*
|
|
|
|
|
* The loop fills 'ops' with an array of operations to execute in the
|
|
|
|
|
* datapath. */
|
|
|
|
|
n_ops = 0;
|
2014-02-26 23:03:24 -08:00
|
|
|
|
for (i = 0; i < n_upcalls; i++) {
|
|
|
|
|
struct upcall *upcall = &upcalls[i];
|
2014-08-06 18:49:44 -07:00
|
|
|
|
const struct ofpbuf *packet = upcall->packet;
|
2013-09-24 13:39:56 -07:00
|
|
|
|
struct dpif_op *op;
|
2014-01-07 00:17:25 -08:00
|
|
|
|
|
2014-08-06 18:49:44 -07:00
|
|
|
|
if (upcall->vsp_adjusted) {
|
|
|
|
|
/* This packet was received on a VLAN splinter port. We added a
|
|
|
|
|
* VLAN to the packet to make the packet resemble the flow, but the
|
|
|
|
|
* actions were composed assuming that the packet contained no
|
|
|
|
|
* VLAN. So, we must remove the VLAN header from the packet before
|
|
|
|
|
* trying to execute the actions. */
|
|
|
|
|
if (ofpbuf_size(upcall->xout.odp_actions)) {
|
|
|
|
|
eth_pop_vlan(CONST_CAST(struct ofpbuf *, upcall->packet));
|
2014-01-07 00:17:25 -08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Remove the flow vlan tags inserted by vlan splinter logic
|
|
|
|
|
* to ensure megaflow masks generated match the data path flow. */
|
2014-08-06 18:49:44 -07:00
|
|
|
|
CONST_CAST(struct flow *, upcall->flow)->vlan_tci = 0;
|
2013-09-24 13:39:56 -07:00
|
|
|
|
}
|
2013-09-19 11:03:47 -07:00
|
|
|
|
|
2014-01-13 15:33:27 -08:00
|
|
|
|
/* Do not install a flow into the datapath if:
|
|
|
|
|
*
|
|
|
|
|
* - The datapath already has too many flows.
|
|
|
|
|
*
|
|
|
|
|
* - We received this packet via some flow installed in the kernel
|
|
|
|
|
* already. */
|
2014-08-06 18:49:44 -07:00
|
|
|
|
if (may_put && upcall->type == DPIF_UC_MISS) {
|
2014-01-07 00:17:25 -08:00
|
|
|
|
struct ofpbuf mask;
|
|
|
|
|
bool megaflow;
|
|
|
|
|
|
|
|
|
|
atomic_read(&enable_megaflows, &megaflow);
|
2014-08-06 18:49:44 -07:00
|
|
|
|
ofpbuf_use_stack(&mask, &mask_bufs[i], sizeof mask_bufs[i]);
|
2014-01-07 00:17:25 -08:00
|
|
|
|
if (megaflow) {
|
2014-02-04 10:32:35 -08:00
|
|
|
|
size_t max_mpls;
|
2014-05-09 13:58:32 +12:00
|
|
|
|
bool recirc;
|
2014-02-04 10:32:35 -08:00
|
|
|
|
|
2014-05-20 21:50:19 -07:00
|
|
|
|
recirc = ofproto_dpif_get_enable_recirc(upcall->ofproto);
|
|
|
|
|
max_mpls = ofproto_dpif_get_max_mpls_depth(upcall->ofproto);
|
|
|
|
|
odp_flow_key_from_mask(&mask, &upcall->xout.wc.masks,
|
2014-08-06 18:49:44 -07:00
|
|
|
|
upcall->flow, UINT32_MAX, max_mpls,
|
2014-05-09 13:58:32 +12:00
|
|
|
|
recirc);
|
2014-01-07 00:17:25 -08:00
|
|
|
|
}
|
|
|
|
|
|
2013-09-24 13:39:56 -07:00
|
|
|
|
op = &ops[n_ops++];
|
|
|
|
|
op->type = DPIF_OP_FLOW_PUT;
|
ofproto-dpif: Use DPIF_FP_CREATE but not DPIF_FP_MODIFY.
A dpif reports EEXIST if a flow put operation that should create a new flow
instead attempts to modify an existing flow, or ENOENT if a flow put would
create a flow that overlaps some existing flow. The latter does not always
indicate a bug in OVS userspace, because it can also mean that two
userspace OVS packet handler threads received packets that generated
the same megaflow for different microflows. Until now, userspace has
logged this, which confuses users by making them suspect a bug. We could
simply not log ENOENT in userspace, but that would suppress logging for
genuine bugs too. Instead, this commit drops DPIF_FP_MODIFY from flow
put operations in ofproto-dpif, which transforms this particular
problem into EEXIST, which userspace already logs at "debug" level (see
flow_message_log_level()), effectively suppressing the logging in normal
circumstances.
It appears that in practice ofproto-dpif doesn't actually ever need to
modify flows in the datapath, only create and delete them, so this
shouldn't cause problems.
Suggested-by: Jesse Gross <jesse@nicira.com>
Signed-off-by: Ben Pfaff <blp@nicira.com>
Acked-by: Jesse Gross <jesse@nicira.com>
2014-08-01 17:22:20 -07:00
|
|
|
|
op->u.flow_put.flags = DPIF_FP_CREATE;
|
2014-05-20 21:50:19 -07:00
|
|
|
|
op->u.flow_put.key = upcall->key;
|
|
|
|
|
op->u.flow_put.key_len = upcall->key_len;
|
2014-03-30 01:31:50 -07:00
|
|
|
|
op->u.flow_put.mask = ofpbuf_data(&mask);
|
|
|
|
|
op->u.flow_put.mask_len = ofpbuf_size(&mask);
|
2013-09-24 13:39:56 -07:00
|
|
|
|
op->u.flow_put.stats = NULL;
|
2014-08-06 18:49:44 -07:00
|
|
|
|
op->u.flow_put.actions = ofpbuf_data(&upcall->put_actions);
|
|
|
|
|
op->u.flow_put.actions_len = ofpbuf_size(&upcall->put_actions);
|
2013-09-24 13:39:56 -07:00
|
|
|
|
}
|
|
|
|
|
|
2014-08-06 18:49:44 -07:00
|
|
|
|
if (ofpbuf_size(upcall->xout.odp_actions)) {
|
2013-09-19 11:03:47 -07:00
|
|
|
|
op = &ops[n_ops++];
|
|
|
|
|
op->type = DPIF_OP_EXECUTE;
|
2014-08-06 18:49:44 -07:00
|
|
|
|
op->u.execute.packet = CONST_CAST(struct ofpbuf *, packet);
|
2014-05-20 21:50:19 -07:00
|
|
|
|
odp_key_to_pkt_metadata(upcall->key, upcall->key_len,
|
2013-12-30 15:58:58 -08:00
|
|
|
|
&op->u.execute.md);
|
2014-08-06 18:49:44 -07:00
|
|
|
|
op->u.execute.actions = ofpbuf_data(upcall->xout.odp_actions);
|
|
|
|
|
op->u.execute.actions_len = ofpbuf_size(upcall->xout.odp_actions);
|
2014-05-20 21:50:19 -07:00
|
|
|
|
op->u.execute.needs_help = (upcall->xout.slow & SLOW_ACTION) != 0;
|
2013-09-19 11:03:47 -07:00
|
|
|
|
}
|
2013-06-25 14:45:43 -07:00
|
|
|
|
}
|
|
|
|
|
|
2013-12-16 08:14:52 -08:00
|
|
|
|
/* Execute batch. */
|
|
|
|
|
for (i = 0; i < n_ops; i++) {
|
|
|
|
|
opsp[i] = &ops[i];
|
|
|
|
|
}
|
|
|
|
|
dpif_operate(udpif->dpif, opsp, n_ops);
|
2013-09-24 13:39:56 -07:00
|
|
|
|
}
|
|
|
|
|
|
2014-04-10 07:14:08 +00:00
|
|
|
|
/* Must be called with udpif->ukeys[hash % udpif->n_revalidators].mutex. */
|
2013-09-24 13:39:56 -07:00
|
|
|
|
static struct udpif_key *
|
2014-06-04 09:59:23 +00:00
|
|
|
|
ukey_lookup(struct udpif *udpif, const struct nlattr *key, size_t key_len,
|
|
|
|
|
uint32_t hash)
|
|
|
|
|
OVS_REQUIRES(udpif->ukeys->mutex)
|
2013-09-24 13:39:56 -07:00
|
|
|
|
{
|
|
|
|
|
struct udpif_key *ukey;
|
2014-04-10 07:14:08 +00:00
|
|
|
|
struct hmap *hmap = &udpif->ukeys[hash % udpif->n_revalidators].hmap;
|
2013-09-24 13:39:56 -07:00
|
|
|
|
|
2014-04-10 07:14:08 +00:00
|
|
|
|
HMAP_FOR_EACH_WITH_HASH (ukey, hmap_node, hash, hmap) {
|
|
|
|
|
if (ukey->key_len == key_len && !memcmp(ukey->key, key, key_len)) {
|
2013-09-24 13:39:56 -07:00
|
|
|
|
return ukey;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
2014-06-04 09:59:23 +00:00
|
|
|
|
/* Creates a ukey for 'key' and 'key_len', returning it with ukey->mutex in
|
|
|
|
|
* a locked state. */
|
2014-02-11 13:55:34 -08:00
|
|
|
|
static struct udpif_key *
|
|
|
|
|
ukey_create(const struct nlattr *key, size_t key_len, long long int used)
|
2014-06-04 09:59:23 +00:00
|
|
|
|
OVS_NO_THREAD_SAFETY_ANALYSIS
|
2014-02-11 13:55:34 -08:00
|
|
|
|
{
|
|
|
|
|
struct udpif_key *ukey = xmalloc(sizeof *ukey);
|
|
|
|
|
|
2014-06-04 09:59:23 +00:00
|
|
|
|
ovs_mutex_init(&ukey->mutex);
|
2014-08-05 13:51:19 -07:00
|
|
|
|
ukey->key = &ukey->key_buf_nla;
|
2014-02-11 13:55:34 -08:00
|
|
|
|
memcpy(&ukey->key_buf, key, key_len);
|
|
|
|
|
ukey->key_len = key_len;
|
|
|
|
|
|
2014-04-10 07:14:08 +00:00
|
|
|
|
ovs_mutex_lock(&ukey->mutex);
|
2014-05-14 16:17:25 +12:00
|
|
|
|
ukey->dump_seq = 0;
|
revalidator: Prevent handling the same flow twice.
When the datapath flow table is modified while a flow dump operation is
in progress, it is possible for the same flow to be dumped twice. In
such cases, revalidators may perform redundant work, or attempt to
delete the same flow twice.
This was causing intermittent testsuite failures for test #670 -
"ofproto-dpif, active-backup bonding" where a flow (that had not
previously been dumped) was dumped, revalidated and deleted twice.
The logs show errors such as:
"failed to flow_get (No such file or directory) skb_priority(0),..."
"failed to flow_del (No such file or directory) skb_priority(0),..."
This patch adds a 'flow_exists' field to 'struct udpif_key' to track
whether the flow is (in progress) to be deleted. After doing a ukey
lookup, we check whether ukey->mark or ukey->flow indicates that the
flow has already been handled. If it has already been handled, we skip
handling the flow again.
We also defer ukey cleanup for flows that fail revalidation, so that the
ukey will still exist if the same flow is dumped twice. This allows the
above logic to work in this case.
Signed-off-by: Joe Stringer <joestringer@nicira.com>
Acked-by: Alex Wang <alexw@nicira.com>
2014-04-23 15:31:17 +12:00
|
|
|
|
ukey->flow_exists = true;
|
2014-02-11 13:55:34 -08:00
|
|
|
|
ukey->created = used ? used : time_msec();
|
|
|
|
|
memset(&ukey->stats, 0, sizeof ukey->stats);
|
2014-04-10 16:00:28 +12:00
|
|
|
|
ukey->xcache = NULL;
|
2014-02-11 13:55:34 -08:00
|
|
|
|
|
|
|
|
|
return ukey;
|
|
|
|
|
}
|
|
|
|
|
|
2014-06-04 09:59:23 +00:00
|
|
|
|
/* Searches for a ukey in 'udpif->ukeys' that matches 'key' and 'key_len' and
|
|
|
|
|
* attempts to lock the ukey. If the ukey does not exist, create it.
|
2014-04-10 07:14:08 +00:00
|
|
|
|
*
|
2014-06-04 09:59:23 +00:00
|
|
|
|
* Returns true on success, setting *result to the matching ukey and returning
|
|
|
|
|
* it in a locked state. Otherwise, returns false and clears *result. */
|
2014-04-10 07:14:08 +00:00
|
|
|
|
static bool
|
2014-06-04 09:59:23 +00:00
|
|
|
|
ukey_acquire(struct udpif *udpif, const struct nlattr *key, size_t key_len,
|
|
|
|
|
long long int used, struct udpif_key **result)
|
|
|
|
|
OVS_TRY_LOCK(true, (*result)->mutex)
|
2014-04-10 07:14:08 +00:00
|
|
|
|
{
|
2014-06-04 09:59:23 +00:00
|
|
|
|
struct udpif_key *ukey;
|
|
|
|
|
uint32_t hash, idx;
|
|
|
|
|
bool locked = false;
|
|
|
|
|
|
|
|
|
|
hash = hash_bytes(key, key_len, udpif->secret);
|
|
|
|
|
idx = hash % udpif->n_revalidators;
|
2014-04-10 07:14:08 +00:00
|
|
|
|
|
|
|
|
|
ovs_mutex_lock(&udpif->ukeys[idx].mutex);
|
2014-06-04 09:59:23 +00:00
|
|
|
|
ukey = ukey_lookup(udpif, key, key_len, hash);
|
|
|
|
|
if (!ukey) {
|
|
|
|
|
ukey = ukey_create(key, key_len, used);
|
2014-04-10 07:14:08 +00:00
|
|
|
|
hmap_insert(&udpif->ukeys[idx].hmap, &ukey->hmap_node, hash);
|
2014-06-04 09:59:23 +00:00
|
|
|
|
locked = true;
|
|
|
|
|
} else if (!ovs_mutex_trylock(&ukey->mutex)) {
|
|
|
|
|
locked = true;
|
2014-04-10 07:14:08 +00:00
|
|
|
|
}
|
|
|
|
|
ovs_mutex_unlock(&udpif->ukeys[idx].mutex);
|
|
|
|
|
|
2014-06-04 09:59:23 +00:00
|
|
|
|
if (locked) {
|
|
|
|
|
*result = ukey;
|
|
|
|
|
} else {
|
|
|
|
|
*result = NULL;
|
|
|
|
|
}
|
|
|
|
|
return locked;
|
2014-04-10 07:14:08 +00:00
|
|
|
|
}
|
|
|
|
|
|
2013-09-24 13:39:56 -07:00
|
|
|
|
static void
|
|
|
|
|
ukey_delete(struct revalidator *revalidator, struct udpif_key *ukey)
|
2014-04-10 07:14:08 +00:00
|
|
|
|
OVS_NO_THREAD_SAFETY_ANALYSIS
|
2013-09-24 13:39:56 -07:00
|
|
|
|
{
|
2014-04-10 07:14:08 +00:00
|
|
|
|
if (revalidator) {
|
|
|
|
|
hmap_remove(revalidator->ukeys, &ukey->hmap_node);
|
|
|
|
|
}
|
2014-04-10 16:00:28 +12:00
|
|
|
|
xlate_cache_delete(ukey->xcache);
|
2014-04-10 07:14:08 +00:00
|
|
|
|
ovs_mutex_destroy(&ukey->mutex);
|
2013-09-24 13:39:56 -07:00
|
|
|
|
free(ukey);
|
|
|
|
|
}
|
|
|
|
|
|
revalidator: Only revalidate high-throughput flows.
Previously we would revalidate all flows if the "need_revalidate" flag
was raised. This patch modifies the logic to delete low throughput flows
rather than revalidate them. High-throughput flows are unaffected by
this change. This patch identifies the flows based on the mean time
between packets since the last dump.
This change is primarily targeted at situations where:
* Flow dump duration is high (~1 second)
* Revalidation is triggered. (eg, by bridge reconfiguration or learning)
After the need_revalidate flag is set, next time a new flow dump session
starts, revalidators will begin revalidating the flows. This full
revalidation is more expensive, which significantly increases the flow
dump duration. At the end of this dump session, the datapath flow
management algorithms kick in for the next dump:
* If flow dump duration becomes too long, the flow limit is decreased.
* The number of flows in the datapath then exceeds the flow_limit.
* As the flow_limit is exceeded, max_idle is temporarily set to 100ms.
* Revalidators delete all flows that haven't seen traffic recently.
The effect of this is that many low-throughput flows are deleted after
revalidation, even if they are valid. The revalidation is unnecessary
for flows that would be deleted anyway, so this patch skips the
revalidation step for those flows.
Note that this patch will only perform this optimization if the flow has
already been dumped at least once, and only if the time since the last
dump is sufficiently long. This gives the flow a chance to become
high-throughput.
Signed-off-by: Joe Stringer <joestringer@nicira.com>
Acked-by: Ethan Jackson <ethan@nicira.com>
---
v2: Acked.
v1: Determine "high-throughput" by packets rather than bytes.
Calculate the mean time between packets for comparison, rather than
comparing the number of packets since the last dump.
RFC: First post.
2014-03-04 09:36:37 -08:00
|
|
|
|
static bool
|
2014-07-02 07:41:33 +00:00
|
|
|
|
should_revalidate(const struct udpif *udpif, uint64_t packets,
|
|
|
|
|
long long int used)
|
revalidator: Only revalidate high-throughput flows.
Previously we would revalidate all flows if the "need_revalidate" flag
was raised. This patch modifies the logic to delete low throughput flows
rather than revalidate them. High-throughput flows are unaffected by
this change. This patch identifies the flows based on the mean time
between packets since the last dump.
This change is primarily targeted at situations where:
* Flow dump duration is high (~1 second)
* Revalidation is triggered. (eg, by bridge reconfiguration or learning)
After the need_revalidate flag is set, next time a new flow dump session
starts, revalidators will begin revalidating the flows. This full
revalidation is more expensive, which significantly increases the flow
dump duration. At the end of this dump session, the datapath flow
management algorithms kick in for the next dump:
* If flow dump duration becomes too long, the flow limit is decreased.
* The number of flows in the datapath then exceeds the flow_limit.
* As the flow_limit is exceeded, max_idle is temporarily set to 100ms.
* Revalidators delete all flows that haven't seen traffic recently.
The effect of this is that many low-throughput flows are deleted after
revalidation, even if they are valid. The revalidation is unnecessary
for flows that would be deleted anyway, so this patch skips the
revalidation step for those flows.
Note that this patch will only perform this optimization if the flow has
already been dumped at least once, and only if the time since the last
dump is sufficiently long. This gives the flow a chance to become
high-throughput.
Signed-off-by: Joe Stringer <joestringer@nicira.com>
Acked-by: Ethan Jackson <ethan@nicira.com>
---
v2: Acked.
v1: Determine "high-throughput" by packets rather than bytes.
Calculate the mean time between packets for comparison, rather than
comparing the number of packets since the last dump.
RFC: First post.
2014-03-04 09:36:37 -08:00
|
|
|
|
{
|
|
|
|
|
long long int metric, now, duration;
|
|
|
|
|
|
2014-07-02 07:41:33 +00:00
|
|
|
|
if (udpif->dump_duration < 200) {
|
|
|
|
|
/* We are likely to handle full revalidation for the flows. */
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
revalidator: Only revalidate high-throughput flows.
Previously we would revalidate all flows if the "need_revalidate" flag
was raised. This patch modifies the logic to delete low throughput flows
rather than revalidate them. High-throughput flows are unaffected by
this change. This patch identifies the flows based on the mean time
between packets since the last dump.
This change is primarily targeted at situations where:
* Flow dump duration is high (~1 second)
* Revalidation is triggered. (eg, by bridge reconfiguration or learning)
After the need_revalidate flag is set, next time a new flow dump session
starts, revalidators will begin revalidating the flows. This full
revalidation is more expensive, which significantly increases the flow
dump duration. At the end of this dump session, the datapath flow
management algorithms kick in for the next dump:
* If flow dump duration becomes too long, the flow limit is decreased.
* The number of flows in the datapath then exceeds the flow_limit.
* As the flow_limit is exceeded, max_idle is temporarily set to 100ms.
* Revalidators delete all flows that haven't seen traffic recently.
The effect of this is that many low-throughput flows are deleted after
revalidation, even if they are valid. The revalidation is unnecessary
for flows that would be deleted anyway, so this patch skips the
revalidation step for those flows.
Note that this patch will only perform this optimization if the flow has
already been dumped at least once, and only if the time since the last
dump is sufficiently long. This gives the flow a chance to become
high-throughput.
Signed-off-by: Joe Stringer <joestringer@nicira.com>
Acked-by: Ethan Jackson <ethan@nicira.com>
---
v2: Acked.
v1: Determine "high-throughput" by packets rather than bytes.
Calculate the mean time between packets for comparison, rather than
comparing the number of packets since the last dump.
RFC: First post.
2014-03-04 09:36:37 -08:00
|
|
|
|
/* Calculate the mean time between seeing these packets. If this
|
|
|
|
|
* exceeds the threshold, then delete the flow rather than performing
|
|
|
|
|
* costly revalidation for flows that aren't being hit frequently.
|
|
|
|
|
*
|
|
|
|
|
* This is targeted at situations where the dump_duration is high (~1s),
|
|
|
|
|
* and revalidation is triggered by a call to udpif_revalidate(). In
|
|
|
|
|
* these situations, revalidation of all flows causes fluctuations in the
|
|
|
|
|
* flow_limit due to the interaction with the dump_duration and max_idle.
|
|
|
|
|
* This tends to result in deletion of low-throughput flows anyway, so
|
|
|
|
|
* skip the revalidation and just delete those flows. */
|
|
|
|
|
packets = MAX(packets, 1);
|
|
|
|
|
now = MAX(used, time_msec());
|
|
|
|
|
duration = now - used;
|
|
|
|
|
metric = duration / packets;
|
|
|
|
|
|
2014-07-02 07:41:33 +00:00
|
|
|
|
if (metric < 200) {
|
|
|
|
|
/* The flow is receiving more than ~5pps, so keep it. */
|
|
|
|
|
return true;
|
revalidator: Only revalidate high-throughput flows.
Previously we would revalidate all flows if the "need_revalidate" flag
was raised. This patch modifies the logic to delete low throughput flows
rather than revalidate them. High-throughput flows are unaffected by
this change. This patch identifies the flows based on the mean time
between packets since the last dump.
This change is primarily targeted at situations where:
* Flow dump duration is high (~1 second)
* Revalidation is triggered. (eg, by bridge reconfiguration or learning)
After the need_revalidate flag is set, next time a new flow dump session
starts, revalidators will begin revalidating the flows. This full
revalidation is more expensive, which significantly increases the flow
dump duration. At the end of this dump session, the datapath flow
management algorithms kick in for the next dump:
* If flow dump duration becomes too long, the flow limit is decreased.
* The number of flows in the datapath then exceeds the flow_limit.
* As the flow_limit is exceeded, max_idle is temporarily set to 100ms.
* Revalidators delete all flows that haven't seen traffic recently.
The effect of this is that many low-throughput flows are deleted after
revalidation, even if they are valid. The revalidation is unnecessary
for flows that would be deleted anyway, so this patch skips the
revalidation step for those flows.
Note that this patch will only perform this optimization if the flow has
already been dumped at least once, and only if the time since the last
dump is sufficiently long. This gives the flow a chance to become
high-throughput.
Signed-off-by: Joe Stringer <joestringer@nicira.com>
Acked-by: Ethan Jackson <ethan@nicira.com>
---
v2: Acked.
v1: Determine "high-throughput" by packets rather than bytes.
Calculate the mean time between packets for comparison, rather than
comparing the number of packets since the last dump.
RFC: First post.
2014-03-04 09:36:37 -08:00
|
|
|
|
}
|
2014-07-02 07:41:33 +00:00
|
|
|
|
return false;
|
revalidator: Only revalidate high-throughput flows.
Previously we would revalidate all flows if the "need_revalidate" flag
was raised. This patch modifies the logic to delete low throughput flows
rather than revalidate them. High-throughput flows are unaffected by
this change. This patch identifies the flows based on the mean time
between packets since the last dump.
This change is primarily targeted at situations where:
* Flow dump duration is high (~1 second)
* Revalidation is triggered. (eg, by bridge reconfiguration or learning)
After the need_revalidate flag is set, next time a new flow dump session
starts, revalidators will begin revalidating the flows. This full
revalidation is more expensive, which significantly increases the flow
dump duration. At the end of this dump session, the datapath flow
management algorithms kick in for the next dump:
* If flow dump duration becomes too long, the flow limit is decreased.
* The number of flows in the datapath then exceeds the flow_limit.
* As the flow_limit is exceeded, max_idle is temporarily set to 100ms.
* Revalidators delete all flows that haven't seen traffic recently.
The effect of this is that many low-throughput flows are deleted after
revalidation, even if they are valid. The revalidation is unnecessary
for flows that would be deleted anyway, so this patch skips the
revalidation step for those flows.
Note that this patch will only perform this optimization if the flow has
already been dumped at least once, and only if the time since the last
dump is sufficiently long. This gives the flow a chance to become
high-throughput.
Signed-off-by: Joe Stringer <joestringer@nicira.com>
Acked-by: Ethan Jackson <ethan@nicira.com>
---
v2: Acked.
v1: Determine "high-throughput" by packets rather than bytes.
Calculate the mean time between packets for comparison, rather than
comparing the number of packets since the last dump.
RFC: First post.
2014-03-04 09:36:37 -08:00
|
|
|
|
}
|
|
|
|
|
|
2013-09-24 13:39:56 -07:00
|
|
|
|
static bool
|
2014-04-10 07:14:08 +00:00
|
|
|
|
revalidate_ukey(struct udpif *udpif, struct udpif_key *ukey,
|
2014-05-20 11:37:02 -07:00
|
|
|
|
const struct dpif_flow *f)
|
revalidator: Eliminate duplicate flow handling.
A series of bugs have been identified recently that are caused by a
combination of the awkward flow dump API, possibility of duplicate flows
in a flow dump, and premature optimisation of the revalidator logic.
This patch attempts to simplify the revalidator logic by combining
multiple critical sections into one, which should make the state more
consistent.
The new flow of logic is:
+ Lookup the ukey.
+ If the ukey doesn't exist, create it.
+ Insert the ukey into the udpif. If we can't insert it, skip this flow.
+ Lock the ukey. If we can't lock it, skip it.
+ Determine if the ukey was already handled. If it has, skip it.
+ Revalidate.
+ Update ukey's fields (mark, flow_exists).
+ Unlock the ukey.
Previously, we would attempt process a flow without creating a ukey if
it hadn't been dumped before and it was due to be deleted. This patch
changes this to always create a ukey, allowing the ukey's
mutex to be used as the basis for preventing a flow from being handled
twice. This improves code correctness and readability.
Signed-off-by: Joe Stringer <joestringer@nicira.com>
Acked-by: Ethan Jackson <ethan@nicira.com>
2014-05-28 15:23:42 +12:00
|
|
|
|
OVS_REQUIRES(ukey->mutex)
|
2013-09-24 13:39:56 -07:00
|
|
|
|
{
|
|
|
|
|
uint64_t slow_path_buf[128 / 8];
|
|
|
|
|
struct xlate_out xout, *xoutp;
|
2014-04-01 21:21:45 +09:00
|
|
|
|
struct netflow *netflow;
|
2013-09-24 13:39:56 -07:00
|
|
|
|
struct ofproto_dpif *ofproto;
|
|
|
|
|
struct dpif_flow_stats push;
|
2014-04-10 07:14:08 +00:00
|
|
|
|
struct ofpbuf xout_actions;
|
|
|
|
|
struct flow flow, dp_mask;
|
|
|
|
|
uint32_t *dp32, *xout32;
|
2014-08-06 18:49:44 -07:00
|
|
|
|
ofp_port_t ofp_in_port;
|
2013-09-24 13:39:56 -07:00
|
|
|
|
struct xlate_in xin;
|
revalidator: Only revalidate high-throughput flows.
Previously we would revalidate all flows if the "need_revalidate" flag
was raised. This patch modifies the logic to delete low throughput flows
rather than revalidate them. High-throughput flows are unaffected by
this change. This patch identifies the flows based on the mean time
between packets since the last dump.
This change is primarily targeted at situations where:
* Flow dump duration is high (~1 second)
* Revalidation is triggered. (eg, by bridge reconfiguration or learning)
After the need_revalidate flag is set, next time a new flow dump session
starts, revalidators will begin revalidating the flows. This full
revalidation is more expensive, which significantly increases the flow
dump duration. At the end of this dump session, the datapath flow
management algorithms kick in for the next dump:
* If flow dump duration becomes too long, the flow limit is decreased.
* The number of flows in the datapath then exceeds the flow_limit.
* As the flow_limit is exceeded, max_idle is temporarily set to 100ms.
* Revalidators delete all flows that haven't seen traffic recently.
The effect of this is that many low-throughput flows are deleted after
revalidation, even if they are valid. The revalidation is unnecessary
for flows that would be deleted anyway, so this patch skips the
revalidation step for those flows.
Note that this patch will only perform this optimization if the flow has
already been dumped at least once, and only if the time since the last
dump is sufficiently long. This gives the flow a chance to become
high-throughput.
Signed-off-by: Joe Stringer <joestringer@nicira.com>
Acked-by: Ethan Jackson <ethan@nicira.com>
---
v2: Acked.
v1: Determine "high-throughput" by packets rather than bytes.
Calculate the mean time between packets for comparison, rather than
comparing the number of packets since the last dump.
RFC: First post.
2014-03-04 09:36:37 -08:00
|
|
|
|
long long int last_used;
|
2013-09-24 13:39:56 -07:00
|
|
|
|
int error;
|
|
|
|
|
size_t i;
|
2014-04-10 16:00:28 +12:00
|
|
|
|
bool may_learn, ok;
|
2013-09-24 13:39:56 -07:00
|
|
|
|
|
|
|
|
|
ok = false;
|
|
|
|
|
xoutp = NULL;
|
2014-04-01 21:21:45 +09:00
|
|
|
|
netflow = NULL;
|
2013-09-24 13:39:56 -07:00
|
|
|
|
|
revalidator: Only revalidate high-throughput flows.
Previously we would revalidate all flows if the "need_revalidate" flag
was raised. This patch modifies the logic to delete low throughput flows
rather than revalidate them. High-throughput flows are unaffected by
this change. This patch identifies the flows based on the mean time
between packets since the last dump.
This change is primarily targeted at situations where:
* Flow dump duration is high (~1 second)
* Revalidation is triggered. (eg, by bridge reconfiguration or learning)
After the need_revalidate flag is set, next time a new flow dump session
starts, revalidators will begin revalidating the flows. This full
revalidation is more expensive, which significantly increases the flow
dump duration. At the end of this dump session, the datapath flow
management algorithms kick in for the next dump:
* If flow dump duration becomes too long, the flow limit is decreased.
* The number of flows in the datapath then exceeds the flow_limit.
* As the flow_limit is exceeded, max_idle is temporarily set to 100ms.
* Revalidators delete all flows that haven't seen traffic recently.
The effect of this is that many low-throughput flows are deleted after
revalidation, even if they are valid. The revalidation is unnecessary
for flows that would be deleted anyway, so this patch skips the
revalidation step for those flows.
Note that this patch will only perform this optimization if the flow has
already been dumped at least once, and only if the time since the last
dump is sufficiently long. This gives the flow a chance to become
high-throughput.
Signed-off-by: Joe Stringer <joestringer@nicira.com>
Acked-by: Ethan Jackson <ethan@nicira.com>
---
v2: Acked.
v1: Determine "high-throughput" by packets rather than bytes.
Calculate the mean time between packets for comparison, rather than
comparing the number of packets since the last dump.
RFC: First post.
2014-03-04 09:36:37 -08:00
|
|
|
|
last_used = ukey->stats.used;
|
2014-05-20 11:37:02 -07:00
|
|
|
|
push.used = f->stats.used;
|
|
|
|
|
push.tcp_flags = f->stats.tcp_flags;
|
|
|
|
|
push.n_packets = (f->stats.n_packets > ukey->stats.n_packets
|
|
|
|
|
? f->stats.n_packets - ukey->stats.n_packets
|
|
|
|
|
: 0);
|
|
|
|
|
push.n_bytes = (f->stats.n_bytes > ukey->stats.n_bytes
|
|
|
|
|
? f->stats.n_bytes - ukey->stats.n_bytes
|
|
|
|
|
: 0);
|
2013-09-24 13:39:56 -07:00
|
|
|
|
|
2014-04-10 07:14:08 +00:00
|
|
|
|
if (udpif->need_revalidate && last_used
|
2014-07-02 07:41:33 +00:00
|
|
|
|
&& !should_revalidate(udpif, push.n_packets, last_used)) {
|
revalidator: Only revalidate high-throughput flows.
Previously we would revalidate all flows if the "need_revalidate" flag
was raised. This patch modifies the logic to delete low throughput flows
rather than revalidate them. High-throughput flows are unaffected by
this change. This patch identifies the flows based on the mean time
between packets since the last dump.
This change is primarily targeted at situations where:
* Flow dump duration is high (~1 second)
* Revalidation is triggered. (eg, by bridge reconfiguration or learning)
After the need_revalidate flag is set, next time a new flow dump session
starts, revalidators will begin revalidating the flows. This full
revalidation is more expensive, which significantly increases the flow
dump duration. At the end of this dump session, the datapath flow
management algorithms kick in for the next dump:
* If flow dump duration becomes too long, the flow limit is decreased.
* The number of flows in the datapath then exceeds the flow_limit.
* As the flow_limit is exceeded, max_idle is temporarily set to 100ms.
* Revalidators delete all flows that haven't seen traffic recently.
The effect of this is that many low-throughput flows are deleted after
revalidation, even if they are valid. The revalidation is unnecessary
for flows that would be deleted anyway, so this patch skips the
revalidation step for those flows.
Note that this patch will only perform this optimization if the flow has
already been dumped at least once, and only if the time since the last
dump is sufficiently long. This gives the flow a chance to become
high-throughput.
Signed-off-by: Joe Stringer <joestringer@nicira.com>
Acked-by: Ethan Jackson <ethan@nicira.com>
---
v2: Acked.
v1: Determine "high-throughput" by packets rather than bytes.
Calculate the mean time between packets for comparison, rather than
comparing the number of packets since the last dump.
RFC: First post.
2014-03-04 09:36:37 -08:00
|
|
|
|
ok = false;
|
|
|
|
|
goto exit;
|
|
|
|
|
}
|
|
|
|
|
|
2014-04-25 15:23:43 -07:00
|
|
|
|
/* We will push the stats, so update the ukey stats cache. */
|
2014-05-20 11:37:02 -07:00
|
|
|
|
ukey->stats = f->stats;
|
2014-04-10 07:14:08 +00:00
|
|
|
|
if (!push.n_packets && !udpif->need_revalidate) {
|
2013-09-24 13:39:56 -07:00
|
|
|
|
ok = true;
|
|
|
|
|
goto exit;
|
|
|
|
|
}
|
|
|
|
|
|
2014-04-18 11:13:01 +09:00
|
|
|
|
may_learn = push.n_packets > 0;
|
2014-06-23 15:52:03 +00:00
|
|
|
|
if (ukey->xcache && !udpif->need_revalidate) {
|
2014-04-10 16:00:28 +12:00
|
|
|
|
xlate_push_stats(ukey->xcache, may_learn, &push);
|
2014-06-23 15:52:03 +00:00
|
|
|
|
ok = true;
|
|
|
|
|
goto exit;
|
2014-04-10 16:00:28 +12:00
|
|
|
|
}
|
|
|
|
|
|
2014-08-06 18:49:44 -07:00
|
|
|
|
if (odp_flow_key_to_flow(ukey->key, ukey->key_len, &flow)
|
|
|
|
|
== ODP_FIT_ERROR) {
|
|
|
|
|
goto exit;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
error = xlate_receive(udpif->backer, &flow, &ofproto, NULL, NULL, &netflow,
|
|
|
|
|
&ofp_in_port);
|
2013-09-24 13:39:56 -07:00
|
|
|
|
if (error) {
|
|
|
|
|
goto exit;
|
|
|
|
|
}
|
|
|
|
|
|
2014-06-23 15:52:03 +00:00
|
|
|
|
if (udpif->need_revalidate) {
|
|
|
|
|
xlate_cache_clear(ukey->xcache);
|
|
|
|
|
}
|
2014-04-10 16:00:28 +12:00
|
|
|
|
if (!ukey->xcache) {
|
|
|
|
|
ukey->xcache = xlate_cache_new();
|
|
|
|
|
}
|
|
|
|
|
|
2014-08-06 18:49:44 -07:00
|
|
|
|
xlate_in_init(&xin, ofproto, &flow, ofp_in_port, NULL, push.tcp_flags,
|
|
|
|
|
NULL);
|
2013-09-24 13:39:56 -07:00
|
|
|
|
xin.resubmit_stats = push.n_packets ? &push : NULL;
|
2014-04-10 16:00:28 +12:00
|
|
|
|
xin.xcache = ukey->xcache;
|
|
|
|
|
xin.may_learn = may_learn;
|
2014-04-10 07:14:08 +00:00
|
|
|
|
xin.skip_wildcards = !udpif->need_revalidate;
|
2013-09-24 13:39:56 -07:00
|
|
|
|
xlate_actions(&xin, &xout);
|
|
|
|
|
xoutp = &xout;
|
2013-09-23 10:57:22 -07:00
|
|
|
|
|
2014-04-10 07:14:08 +00:00
|
|
|
|
if (!udpif->need_revalidate) {
|
2013-09-24 13:39:56 -07:00
|
|
|
|
ok = true;
|
|
|
|
|
goto exit;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (!xout.slow) {
|
2014-08-06 18:49:44 -07:00
|
|
|
|
ofpbuf_use_const(&xout_actions, ofpbuf_data(xout.odp_actions),
|
|
|
|
|
ofpbuf_size(xout.odp_actions));
|
2013-09-12 17:42:23 -07:00
|
|
|
|
} else {
|
2013-09-24 13:39:56 -07:00
|
|
|
|
ofpbuf_use_stack(&xout_actions, slow_path_buf, sizeof slow_path_buf);
|
2014-08-06 18:49:44 -07:00
|
|
|
|
compose_slow_path(udpif, &xout, &flow, flow.in_port.odp_port,
|
|
|
|
|
&xout_actions);
|
2013-09-24 13:39:56 -07:00
|
|
|
|
}
|
|
|
|
|
|
2014-05-20 11:37:02 -07:00
|
|
|
|
if (f->actions_len != ofpbuf_size(&xout_actions)
|
|
|
|
|
|| memcmp(ofpbuf_data(&xout_actions), f->actions, f->actions_len)) {
|
2013-09-24 13:39:56 -07:00
|
|
|
|
goto exit;
|
|
|
|
|
}
|
|
|
|
|
|
2014-05-20 11:37:02 -07:00
|
|
|
|
if (odp_flow_key_to_mask(f->mask, f->mask_len, &dp_mask, &flow)
|
2013-09-24 13:39:56 -07:00
|
|
|
|
== ODP_FIT_ERROR) {
|
|
|
|
|
goto exit;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Since the kernel is free to ignore wildcarded bits in the mask, we can't
|
|
|
|
|
* directly check that the masks are the same. Instead we check that the
|
|
|
|
|
* mask in the kernel is more specific i.e. less wildcarded, than what
|
|
|
|
|
* we've calculated here. This guarantees we don't catch any packets we
|
|
|
|
|
* shouldn't with the megaflow. */
|
2014-04-10 07:14:08 +00:00
|
|
|
|
dp32 = (uint32_t *) &dp_mask;
|
2013-09-24 13:39:56 -07:00
|
|
|
|
xout32 = (uint32_t *) &xout.wc.masks;
|
|
|
|
|
for (i = 0; i < FLOW_U32S; i++) {
|
2014-04-10 07:14:08 +00:00
|
|
|
|
if ((dp32[i] | xout32[i]) != dp32[i]) {
|
2013-09-24 13:39:56 -07:00
|
|
|
|
goto exit;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
ok = true;
|
|
|
|
|
|
|
|
|
|
exit:
|
2014-04-01 21:21:45 +09:00
|
|
|
|
if (netflow) {
|
|
|
|
|
if (!ok) {
|
|
|
|
|
netflow_flow_clear(netflow, &flow);
|
|
|
|
|
}
|
|
|
|
|
netflow_unref(netflow);
|
|
|
|
|
}
|
2013-09-24 13:39:56 -07:00
|
|
|
|
xlate_out_uninit(xoutp);
|
|
|
|
|
return ok;
|
|
|
|
|
}
|
|
|
|
|
|
2014-02-11 13:55:34 -08:00
|
|
|
|
struct dump_op {
|
|
|
|
|
struct udpif_key *ukey;
|
|
|
|
|
struct dpif_flow_stats stats; /* Stats for 'op'. */
|
|
|
|
|
struct dpif_op op; /* Flow del operation. */
|
|
|
|
|
};
|
|
|
|
|
|
2013-09-24 13:39:56 -07:00
|
|
|
|
static void
|
2014-02-11 13:55:34 -08:00
|
|
|
|
dump_op_init(struct dump_op *op, const struct nlattr *key, size_t key_len,
|
2014-04-10 07:14:08 +00:00
|
|
|
|
struct udpif_key *ukey)
|
2014-02-11 13:55:34 -08:00
|
|
|
|
{
|
|
|
|
|
op->ukey = ukey;
|
|
|
|
|
op->op.type = DPIF_OP_FLOW_DEL;
|
|
|
|
|
op->op.u.flow_del.key = key;
|
|
|
|
|
op->op.u.flow_del.key_len = key_len;
|
|
|
|
|
op->op.u.flow_del.stats = &op->stats;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
2014-04-10 07:14:08 +00:00
|
|
|
|
push_dump_ops__(struct udpif *udpif, struct dump_op *ops, size_t n_ops)
|
2013-09-24 13:39:56 -07:00
|
|
|
|
{
|
2014-02-11 13:55:34 -08:00
|
|
|
|
struct dpif_op *opsp[REVALIDATE_MAX_BATCH];
|
|
|
|
|
size_t i;
|
2013-09-24 13:39:56 -07:00
|
|
|
|
|
2014-02-11 13:55:34 -08:00
|
|
|
|
ovs_assert(n_ops <= REVALIDATE_MAX_BATCH);
|
|
|
|
|
for (i = 0; i < n_ops; i++) {
|
|
|
|
|
opsp[i] = &ops[i].op;
|
|
|
|
|
}
|
|
|
|
|
dpif_operate(udpif->dpif, opsp, n_ops);
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < n_ops; i++) {
|
|
|
|
|
struct dump_op *op = &ops[i];
|
|
|
|
|
struct dpif_flow_stats *push, *stats, push_buf;
|
|
|
|
|
|
|
|
|
|
stats = op->op.u.flow_del.stats;
|
2014-07-01 09:54:18 +00:00
|
|
|
|
push = &push_buf;
|
|
|
|
|
|
|
|
|
|
ovs_mutex_lock(&op->ukey->mutex);
|
|
|
|
|
push->used = MAX(stats->used, op->ukey->stats.used);
|
|
|
|
|
push->tcp_flags = stats->tcp_flags | op->ukey->stats.tcp_flags;
|
|
|
|
|
push->n_packets = stats->n_packets - op->ukey->stats.n_packets;
|
|
|
|
|
push->n_bytes = stats->n_bytes - op->ukey->stats.n_bytes;
|
|
|
|
|
ovs_mutex_unlock(&op->ukey->mutex);
|
2014-02-11 13:55:34 -08:00
|
|
|
|
|
|
|
|
|
if (push->n_packets || netflow_exists()) {
|
|
|
|
|
struct ofproto_dpif *ofproto;
|
|
|
|
|
struct netflow *netflow;
|
2014-08-06 18:49:44 -07:00
|
|
|
|
ofp_port_t ofp_in_port;
|
2014-02-11 13:55:34 -08:00
|
|
|
|
struct flow flow;
|
2014-04-10 16:00:28 +12:00
|
|
|
|
bool may_learn;
|
2014-07-01 09:54:18 +00:00
|
|
|
|
int error;
|
2014-04-10 16:00:28 +12:00
|
|
|
|
|
|
|
|
|
may_learn = push->n_packets > 0;
|
2014-07-01 09:54:18 +00:00
|
|
|
|
ovs_mutex_lock(&op->ukey->mutex);
|
|
|
|
|
if (op->ukey->xcache) {
|
|
|
|
|
xlate_push_stats(op->ukey->xcache, may_learn, push);
|
2014-04-10 07:14:08 +00:00
|
|
|
|
ovs_mutex_unlock(&op->ukey->mutex);
|
2014-07-01 09:54:18 +00:00
|
|
|
|
continue;
|
2014-04-10 16:00:28 +12:00
|
|
|
|
}
|
2014-07-01 09:54:18 +00:00
|
|
|
|
ovs_mutex_unlock(&op->ukey->mutex);
|
2014-02-11 13:55:34 -08:00
|
|
|
|
|
2014-08-06 18:49:44 -07:00
|
|
|
|
if (odp_flow_key_to_flow(op->op.u.flow_del.key,
|
|
|
|
|
op->op.u.flow_del.key_len, &flow)
|
|
|
|
|
== ODP_FIT_ERROR) {
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
error = xlate_receive(udpif->backer, &flow, &ofproto,
|
|
|
|
|
NULL, NULL, &netflow, &ofp_in_port);
|
2014-07-01 09:54:18 +00:00
|
|
|
|
if (!error) {
|
2014-02-11 13:55:34 -08:00
|
|
|
|
struct xlate_in xin;
|
|
|
|
|
|
2014-08-06 18:49:44 -07:00
|
|
|
|
xlate_in_init(&xin, ofproto, &flow, ofp_in_port, NULL,
|
|
|
|
|
push->tcp_flags, NULL);
|
2014-02-11 13:55:34 -08:00
|
|
|
|
xin.resubmit_stats = push->n_packets ? push : NULL;
|
2014-04-10 16:00:28 +12:00
|
|
|
|
xin.may_learn = may_learn;
|
2014-02-11 13:55:34 -08:00
|
|
|
|
xin.skip_wildcards = true;
|
|
|
|
|
xlate_actions_for_side_effects(&xin);
|
|
|
|
|
|
|
|
|
|
if (netflow) {
|
|
|
|
|
netflow_flow_clear(netflow, &flow);
|
|
|
|
|
netflow_unref(netflow);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2014-04-10 07:14:08 +00:00
|
|
|
|
}
|
2014-02-11 13:55:34 -08:00
|
|
|
|
|
2014-04-10 07:14:08 +00:00
|
|
|
|
static void
|
|
|
|
|
push_dump_ops(struct revalidator *revalidator,
|
|
|
|
|
struct dump_op *ops, size_t n_ops)
|
|
|
|
|
{
|
|
|
|
|
int i;
|
2014-02-11 13:55:34 -08:00
|
|
|
|
|
2014-04-10 07:14:08 +00:00
|
|
|
|
push_dump_ops__(revalidator->udpif, ops, n_ops);
|
|
|
|
|
for (i = 0; i < n_ops; i++) {
|
|
|
|
|
ukey_delete(revalidator, ops[i].ukey);
|
2014-02-11 13:55:34 -08:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
2014-04-10 07:14:08 +00:00
|
|
|
|
revalidate(struct revalidator *revalidator)
|
2014-02-11 13:55:34 -08:00
|
|
|
|
{
|
|
|
|
|
struct udpif *udpif = revalidator->udpif;
|
2014-05-20 11:37:02 -07:00
|
|
|
|
struct dpif_flow_dump_thread *dump_thread;
|
2014-05-14 16:17:25 +12:00
|
|
|
|
uint64_t dump_seq;
|
2013-09-24 13:39:56 -07:00
|
|
|
|
unsigned int flow_limit;
|
|
|
|
|
|
2014-05-14 16:17:25 +12:00
|
|
|
|
dump_seq = seq_read(udpif->dump_seq);
|
2013-09-24 13:39:56 -07:00
|
|
|
|
atomic_read(&udpif->flow_limit, &flow_limit);
|
2014-05-20 11:37:02 -07:00
|
|
|
|
dump_thread = dpif_flow_dump_thread_create(udpif->dump);
|
|
|
|
|
for (;;) {
|
|
|
|
|
struct dump_op ops[REVALIDATE_MAX_BATCH];
|
|
|
|
|
int n_ops = 0;
|
2013-09-24 13:39:56 -07:00
|
|
|
|
|
2014-05-20 11:37:02 -07:00
|
|
|
|
struct dpif_flow flows[REVALIDATE_MAX_BATCH];
|
|
|
|
|
const struct dpif_flow *f;
|
|
|
|
|
int n_dumped;
|
2014-04-10 07:14:08 +00:00
|
|
|
|
|
2014-05-20 11:37:02 -07:00
|
|
|
|
long long int max_idle;
|
|
|
|
|
long long int now;
|
|
|
|
|
size_t n_dp_flows;
|
|
|
|
|
bool kill_them_all;
|
2013-09-24 13:39:56 -07:00
|
|
|
|
|
2014-05-20 11:37:02 -07:00
|
|
|
|
n_dumped = dpif_flow_dump_next(dump_thread, flows, ARRAY_SIZE(flows));
|
|
|
|
|
if (!n_dumped) {
|
|
|
|
|
break;
|
revalidator: Prevent handling the same flow twice.
When the datapath flow table is modified while a flow dump operation is
in progress, it is possible for the same flow to be dumped twice. In
such cases, revalidators may perform redundant work, or attempt to
delete the same flow twice.
This was causing intermittent testsuite failures for test #670 -
"ofproto-dpif, active-backup bonding" where a flow (that had not
previously been dumped) was dumped, revalidated and deleted twice.
The logs show errors such as:
"failed to flow_get (No such file or directory) skb_priority(0),..."
"failed to flow_del (No such file or directory) skb_priority(0),..."
This patch adds a 'flow_exists' field to 'struct udpif_key' to track
whether the flow is (in progress) to be deleted. After doing a ukey
lookup, we check whether ukey->mark or ukey->flow indicates that the
flow has already been handled. If it has already been handled, we skip
handling the flow again.
We also defer ukey cleanup for flows that fail revalidation, so that the
ukey will still exist if the same flow is dumped twice. This allows the
above logic to work in this case.
Signed-off-by: Joe Stringer <joestringer@nicira.com>
Acked-by: Alex Wang <alexw@nicira.com>
2014-04-23 15:31:17 +12:00
|
|
|
|
}
|
|
|
|
|
|
2014-05-20 11:37:02 -07:00
|
|
|
|
now = time_msec();
|
|
|
|
|
|
|
|
|
|
/* In normal operation we want to keep flows around until they have
|
|
|
|
|
* been idle for 'ofproto_max_idle' milliseconds. However:
|
|
|
|
|
*
|
|
|
|
|
* - If the number of datapath flows climbs above 'flow_limit',
|
|
|
|
|
* drop that down to 100 ms to try to bring the flows down to
|
|
|
|
|
* the limit.
|
|
|
|
|
*
|
|
|
|
|
* - If the number of datapath flows climbs above twice
|
|
|
|
|
* 'flow_limit', delete all the datapath flows as an emergency
|
|
|
|
|
* measure. (We reassess this condition for the next batch of
|
|
|
|
|
* datapath flows, so we will recover before all the flows are
|
|
|
|
|
* gone.) */
|
|
|
|
|
n_dp_flows = udpif_get_n_flows(udpif);
|
|
|
|
|
kill_them_all = n_dp_flows > flow_limit * 2;
|
|
|
|
|
max_idle = n_dp_flows > flow_limit ? 100 : ofproto_max_idle;
|
|
|
|
|
|
|
|
|
|
for (f = flows; f < &flows[n_dumped]; f++) {
|
|
|
|
|
long long int used = f->stats.used;
|
2014-06-04 09:59:23 +00:00
|
|
|
|
struct udpif_key *ukey;
|
2014-05-14 16:17:25 +12:00
|
|
|
|
bool already_dumped, keep;
|
revalidator: Eliminate duplicate flow handling.
A series of bugs have been identified recently that are caused by a
combination of the awkward flow dump API, possibility of duplicate flows
in a flow dump, and premature optimisation of the revalidator logic.
This patch attempts to simplify the revalidator logic by combining
multiple critical sections into one, which should make the state more
consistent.
The new flow of logic is:
+ Lookup the ukey.
+ If the ukey doesn't exist, create it.
+ Insert the ukey into the udpif. If we can't insert it, skip this flow.
+ Lock the ukey. If we can't lock it, skip it.
+ Determine if the ukey was already handled. If it has, skip it.
+ Revalidate.
+ Update ukey's fields (mark, flow_exists).
+ Unlock the ukey.
Previously, we would attempt process a flow without creating a ukey if
it hadn't been dumped before and it was due to be deleted. This patch
changes this to always create a ukey, allowing the ukey's
mutex to be used as the basis for preventing a flow from being handled
twice. This improves code correctness and readability.
Signed-off-by: Joe Stringer <joestringer@nicira.com>
Acked-by: Ethan Jackson <ethan@nicira.com>
2014-05-28 15:23:42 +12:00
|
|
|
|
|
2014-06-04 09:59:23 +00:00
|
|
|
|
if (!ukey_acquire(udpif, f->key, f->key_len, used, &ukey)) {
|
|
|
|
|
/* We couldn't acquire the ukey. This means that
|
|
|
|
|
* another revalidator is processing this flow
|
|
|
|
|
* concurrently, so don't bother processing it. */
|
revalidator: Eliminate duplicate flow handling.
A series of bugs have been identified recently that are caused by a
combination of the awkward flow dump API, possibility of duplicate flows
in a flow dump, and premature optimisation of the revalidator logic.
This patch attempts to simplify the revalidator logic by combining
multiple critical sections into one, which should make the state more
consistent.
The new flow of logic is:
+ Lookup the ukey.
+ If the ukey doesn't exist, create it.
+ Insert the ukey into the udpif. If we can't insert it, skip this flow.
+ Lock the ukey. If we can't lock it, skip it.
+ Determine if the ukey was already handled. If it has, skip it.
+ Revalidate.
+ Update ukey's fields (mark, flow_exists).
+ Unlock the ukey.
Previously, we would attempt process a flow without creating a ukey if
it hadn't been dumped before and it was due to be deleted. This patch
changes this to always create a ukey, allowing the ukey's
mutex to be used as the basis for preventing a flow from being handled
twice. This improves code correctness and readability.
Signed-off-by: Joe Stringer <joestringer@nicira.com>
Acked-by: Ethan Jackson <ethan@nicira.com>
2014-05-28 15:23:42 +12:00
|
|
|
|
COVERAGE_INC(upcall_duplicate_flow);
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
2014-05-14 16:17:25 +12:00
|
|
|
|
already_dumped = ukey->dump_seq == dump_seq;
|
revalidator: Eliminate duplicate flow handling.
A series of bugs have been identified recently that are caused by a
combination of the awkward flow dump API, possibility of duplicate flows
in a flow dump, and premature optimisation of the revalidator logic.
This patch attempts to simplify the revalidator logic by combining
multiple critical sections into one, which should make the state more
consistent.
The new flow of logic is:
+ Lookup the ukey.
+ If the ukey doesn't exist, create it.
+ Insert the ukey into the udpif. If we can't insert it, skip this flow.
+ Lock the ukey. If we can't lock it, skip it.
+ Determine if the ukey was already handled. If it has, skip it.
+ Revalidate.
+ Update ukey's fields (mark, flow_exists).
+ Unlock the ukey.
Previously, we would attempt process a flow without creating a ukey if
it hadn't been dumped before and it was due to be deleted. This patch
changes this to always create a ukey, allowing the ukey's
mutex to be used as the basis for preventing a flow from being handled
twice. This improves code correctness and readability.
Signed-off-by: Joe Stringer <joestringer@nicira.com>
Acked-by: Ethan Jackson <ethan@nicira.com>
2014-05-28 15:23:42 +12:00
|
|
|
|
if (already_dumped) {
|
|
|
|
|
/* The flow has already been dumped and handled by another
|
|
|
|
|
* revalidator during this flow dump operation. Skip it. */
|
|
|
|
|
COVERAGE_INC(upcall_duplicate_flow);
|
|
|
|
|
ovs_mutex_unlock(&ukey->mutex);
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (!used) {
|
|
|
|
|
used = ukey->created;
|
|
|
|
|
}
|
2014-05-20 11:37:02 -07:00
|
|
|
|
if (kill_them_all || (used && used < now - max_idle)) {
|
2014-05-14 16:17:25 +12:00
|
|
|
|
keep = false;
|
2014-05-20 11:37:02 -07:00
|
|
|
|
} else {
|
2014-05-14 16:17:25 +12:00
|
|
|
|
keep = revalidate_ukey(udpif, ukey, f);
|
2014-05-20 11:37:02 -07:00
|
|
|
|
}
|
2014-05-14 16:17:25 +12:00
|
|
|
|
ukey->dump_seq = dump_seq;
|
|
|
|
|
ukey->flow_exists = keep;
|
2013-09-24 13:39:56 -07:00
|
|
|
|
|
2014-05-14 16:17:25 +12:00
|
|
|
|
if (!keep) {
|
2014-05-20 11:37:02 -07:00
|
|
|
|
dump_op_init(&ops[n_ops++], f->key, f->key_len, ukey);
|
|
|
|
|
}
|
revalidator: Eliminate duplicate flow handling.
A series of bugs have been identified recently that are caused by a
combination of the awkward flow dump API, possibility of duplicate flows
in a flow dump, and premature optimisation of the revalidator logic.
This patch attempts to simplify the revalidator logic by combining
multiple critical sections into one, which should make the state more
consistent.
The new flow of logic is:
+ Lookup the ukey.
+ If the ukey doesn't exist, create it.
+ Insert the ukey into the udpif. If we can't insert it, skip this flow.
+ Lock the ukey. If we can't lock it, skip it.
+ Determine if the ukey was already handled. If it has, skip it.
+ Revalidate.
+ Update ukey's fields (mark, flow_exists).
+ Unlock the ukey.
Previously, we would attempt process a flow without creating a ukey if
it hadn't been dumped before and it was due to be deleted. This patch
changes this to always create a ukey, allowing the ukey's
mutex to be used as the basis for preventing a flow from being handled
twice. This improves code correctness and readability.
Signed-off-by: Joe Stringer <joestringer@nicira.com>
Acked-by: Ethan Jackson <ethan@nicira.com>
2014-05-28 15:23:42 +12:00
|
|
|
|
ovs_mutex_unlock(&ukey->mutex);
|
2014-04-10 07:14:08 +00:00
|
|
|
|
}
|
2014-02-11 13:55:33 -08:00
|
|
|
|
|
2014-05-20 11:37:02 -07:00
|
|
|
|
if (n_ops) {
|
2014-04-10 07:14:08 +00:00
|
|
|
|
push_dump_ops__(udpif, ops, n_ops);
|
|
|
|
|
}
|
2013-09-24 13:39:56 -07:00
|
|
|
|
}
|
2014-05-20 11:37:02 -07:00
|
|
|
|
dpif_flow_dump_thread_destroy(dump_thread);
|
2013-09-24 13:39:56 -07:00
|
|
|
|
}
|
|
|
|
|
|
2014-07-08 07:04:05 +00:00
|
|
|
|
/* Called with exclusive access to 'revalidator' and 'ukey'. */
|
|
|
|
|
static bool
|
|
|
|
|
handle_missed_revalidation(struct revalidator *revalidator,
|
|
|
|
|
struct udpif_key *ukey)
|
|
|
|
|
OVS_NO_THREAD_SAFETY_ANALYSIS
|
|
|
|
|
{
|
|
|
|
|
struct udpif *udpif = revalidator->udpif;
|
|
|
|
|
struct dpif_flow flow;
|
2014-08-13 09:55:54 +12:00
|
|
|
|
struct ofpbuf buf;
|
|
|
|
|
uint64_t stub[DPIF_FLOW_BUFSIZE / 8];
|
2014-07-08 07:04:05 +00:00
|
|
|
|
bool keep = false;
|
|
|
|
|
|
|
|
|
|
COVERAGE_INC(revalidate_missed_dp_flow);
|
|
|
|
|
|
2014-08-13 09:55:54 +12:00
|
|
|
|
ofpbuf_use_stub(&buf, &stub, sizeof stub);
|
2014-07-08 07:04:05 +00:00
|
|
|
|
if (!dpif_flow_get(udpif->dpif, ukey->key, ukey->key_len, &buf, &flow)) {
|
|
|
|
|
keep = revalidate_ukey(udpif, ukey, &flow);
|
|
|
|
|
}
|
2014-08-13 09:55:54 +12:00
|
|
|
|
ofpbuf_uninit(&buf);
|
2014-07-08 07:04:05 +00:00
|
|
|
|
|
|
|
|
|
return keep;
|
|
|
|
|
}
|
|
|
|
|
|
2013-09-24 13:39:56 -07:00
|
|
|
|
static void
|
2014-02-11 13:55:36 -08:00
|
|
|
|
revalidator_sweep__(struct revalidator *revalidator, bool purge)
|
2014-04-10 07:14:08 +00:00
|
|
|
|
OVS_NO_THREAD_SAFETY_ANALYSIS
|
2013-09-24 13:39:56 -07:00
|
|
|
|
{
|
2014-02-11 13:55:35 -08:00
|
|
|
|
struct dump_op ops[REVALIDATE_MAX_BATCH];
|
2013-09-24 13:39:56 -07:00
|
|
|
|
struct udpif_key *ukey, *next;
|
2014-02-11 13:55:35 -08:00
|
|
|
|
size_t n_ops;
|
2014-05-14 16:17:25 +12:00
|
|
|
|
uint64_t dump_seq;
|
2014-02-11 13:55:35 -08:00
|
|
|
|
|
|
|
|
|
n_ops = 0;
|
2014-05-14 16:17:25 +12:00
|
|
|
|
dump_seq = seq_read(revalidator->udpif->dump_seq);
|
2013-09-24 13:39:56 -07:00
|
|
|
|
|
2014-04-10 07:14:08 +00:00
|
|
|
|
/* During garbage collection, this revalidator completely owns its ukeys
|
|
|
|
|
* map, and therefore doesn't need to do any locking. */
|
|
|
|
|
HMAP_FOR_EACH_SAFE (ukey, next, hmap_node, revalidator->ukeys) {
|
2014-07-08 07:04:05 +00:00
|
|
|
|
if (ukey->flow_exists
|
|
|
|
|
&& (purge
|
|
|
|
|
|| (ukey->dump_seq != dump_seq
|
|
|
|
|
&& revalidator->udpif->need_revalidate
|
|
|
|
|
&& !handle_missed_revalidation(revalidator, ukey)))) {
|
2014-02-11 13:55:35 -08:00
|
|
|
|
struct dump_op *op = &ops[n_ops++];
|
|
|
|
|
|
2014-04-10 07:14:08 +00:00
|
|
|
|
dump_op_init(op, ukey->key, ukey->key_len, ukey);
|
2014-02-11 13:55:35 -08:00
|
|
|
|
if (n_ops == REVALIDATE_MAX_BATCH) {
|
|
|
|
|
push_dump_ops(revalidator, ops, n_ops);
|
|
|
|
|
n_ops = 0;
|
|
|
|
|
}
|
2014-07-08 07:04:05 +00:00
|
|
|
|
} else if (!ukey->flow_exists) {
|
|
|
|
|
ukey_delete(revalidator, ukey);
|
2013-09-24 13:39:56 -07:00
|
|
|
|
}
|
2013-06-25 14:45:43 -07:00
|
|
|
|
}
|
2014-02-11 13:55:35 -08:00
|
|
|
|
|
|
|
|
|
if (n_ops) {
|
|
|
|
|
push_dump_ops(revalidator, ops, n_ops);
|
|
|
|
|
}
|
2013-06-25 14:45:43 -07:00
|
|
|
|
}
|
2014-02-11 13:55:36 -08:00
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
revalidator_sweep(struct revalidator *revalidator)
|
|
|
|
|
{
|
|
|
|
|
revalidator_sweep__(revalidator, false);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
revalidator_purge(struct revalidator *revalidator)
|
|
|
|
|
{
|
|
|
|
|
revalidator_sweep__(revalidator, true);
|
|
|
|
|
}
|
2013-11-20 18:06:12 -08:00
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
upcall_unixctl_show(struct unixctl_conn *conn, int argc OVS_UNUSED,
|
|
|
|
|
const char *argv[] OVS_UNUSED, void *aux OVS_UNUSED)
|
|
|
|
|
{
|
|
|
|
|
struct ds ds = DS_EMPTY_INITIALIZER;
|
|
|
|
|
struct udpif *udpif;
|
|
|
|
|
|
|
|
|
|
LIST_FOR_EACH (udpif, list_node, &all_udpifs) {
|
2013-09-24 13:39:56 -07:00
|
|
|
|
unsigned int flow_limit;
|
2013-11-20 18:06:12 -08:00
|
|
|
|
size_t i;
|
|
|
|
|
|
2013-09-24 13:39:56 -07:00
|
|
|
|
atomic_read(&udpif->flow_limit, &flow_limit);
|
|
|
|
|
|
2013-11-20 18:06:12 -08:00
|
|
|
|
ds_put_format(&ds, "%s:\n", dpif_name(udpif->dpif));
|
2014-05-14 16:19:34 +09:00
|
|
|
|
ds_put_format(&ds, "\tflows : (current %lu)"
|
2013-09-24 13:39:56 -07:00
|
|
|
|
" (avg %u) (max %u) (limit %u)\n", udpif_get_n_flows(udpif),
|
|
|
|
|
udpif->avg_n_flows, udpif->max_n_flows, flow_limit);
|
|
|
|
|
ds_put_format(&ds, "\tdump duration : %lldms\n", udpif->dump_duration);
|
|
|
|
|
|
|
|
|
|
ds_put_char(&ds, '\n');
|
|
|
|
|
for (i = 0; i < n_revalidators; i++) {
|
|
|
|
|
struct revalidator *revalidator = &udpif->revalidators[i];
|
|
|
|
|
|
2014-04-10 07:14:08 +00:00
|
|
|
|
ovs_mutex_lock(&udpif->ukeys[i].mutex);
|
ovs-thread: Make caller provide thread name when creating a thread.
Thread names are occasionally very useful for debugging, but from time to
time we've forgotten to set one. This commit adds the new thread's name
as a parameter to the function to start a thread, to make that mistake
impossible. This also simplifies code, since two function calls become
only one.
This makes a few other changes to the thread creation function:
* Since it is no longer a direct wrapper around a pthread function,
rename it to avoid giving that impression.
* Remove 'pthread_attr_t *' param that every caller supplied as NULL.
* Change 'pthread *' parameter into a return value, for convenience.
The system-stats code hadn't set a thread name, so this fixes that issue.
This patch is a prerequisite for making RCU report the name of a thread
that is blocking RCU synchronization, because the easiest way to do that is
for ovsrcu_quiesce_end() to record the current thread's name.
ovsrcu_quiesce_end() is called before the thread function is called, so it
won't get a name set within the thread function itself. Setting the thread
name earlier, as in this patch, avoids the problem.
Signed-off-by: Ben Pfaff <blp@nicira.com>
Acked-by: Alex Wang <alexw@nicira.com>
2014-04-25 17:46:21 -07:00
|
|
|
|
ds_put_format(&ds, "\t%u: (keys %"PRIuSIZE")\n",
|
|
|
|
|
revalidator->id, hmap_count(&udpif->ukeys[i].hmap));
|
2014-04-10 07:14:08 +00:00
|
|
|
|
ovs_mutex_unlock(&udpif->ukeys[i].mutex);
|
2013-09-24 13:39:56 -07:00
|
|
|
|
}
|
2013-11-20 18:06:12 -08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
unixctl_command_reply(conn, ds_cstr(&ds));
|
|
|
|
|
ds_destroy(&ds);
|
|
|
|
|
}
|
2013-09-24 13:39:56 -07:00
|
|
|
|
|
|
|
|
|
/* Disable using the megaflows.
|
|
|
|
|
*
|
|
|
|
|
* This command is only needed for advanced debugging, so it's not
|
|
|
|
|
* documented in the man page. */
|
|
|
|
|
static void
|
|
|
|
|
upcall_unixctl_disable_megaflows(struct unixctl_conn *conn,
|
|
|
|
|
int argc OVS_UNUSED,
|
|
|
|
|
const char *argv[] OVS_UNUSED,
|
|
|
|
|
void *aux OVS_UNUSED)
|
|
|
|
|
{
|
|
|
|
|
atomic_store(&enable_megaflows, false);
|
udpif: Bug fix updif_flush
Before this commit, all datapath flows are cleared with dpif_flush(),
but the revalidator thread still holds ukeys, which are caches of the
datapath flows in the revalidaor. Flushing ukeys causes flow_del
messages to be sent to the datapath again on flows that have been
deleted by the dpif_flush() already.
Double deletion by itself is not problem, per se, may an efficiency
issue. However, for ever flow_del message sent to the datapath, a log
message, at the warning level, will be generated in case datapath
failed to execute the command. In addition to cause spurious log
messages, Double deletion causes unit tests to report erroneous
failures as all warning messages are considered test failures.
The fix is to simply shut down the revalidator threads to flush all
ukeys, then flush the datapth before restarting the revalidator threads.
dpif_flush() was implemented as flush flows of all datapaths while
most of its invocation should only flush its local datapath.
Only megaflow on/off commands should flush all dapapaths. This bug is
also fixed.
Found during development.
Signed-off-by: Andy Zhou <azhou@nicira.com>
Acked-by: Jarno Rajahalme <jrajahalme@nicira.com>
2014-03-13 21:48:55 -07:00
|
|
|
|
udpif_flush_all_datapaths();
|
2013-09-24 13:39:56 -07:00
|
|
|
|
unixctl_command_reply(conn, "megaflows disabled");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Re-enable using megaflows.
|
|
|
|
|
*
|
|
|
|
|
* This command is only needed for advanced debugging, so it's not
|
|
|
|
|
* documented in the man page. */
|
|
|
|
|
static void
|
|
|
|
|
upcall_unixctl_enable_megaflows(struct unixctl_conn *conn,
|
|
|
|
|
int argc OVS_UNUSED,
|
|
|
|
|
const char *argv[] OVS_UNUSED,
|
|
|
|
|
void *aux OVS_UNUSED)
|
|
|
|
|
{
|
|
|
|
|
atomic_store(&enable_megaflows, true);
|
udpif: Bug fix updif_flush
Before this commit, all datapath flows are cleared with dpif_flush(),
but the revalidator thread still holds ukeys, which are caches of the
datapath flows in the revalidaor. Flushing ukeys causes flow_del
messages to be sent to the datapath again on flows that have been
deleted by the dpif_flush() already.
Double deletion by itself is not problem, per se, may an efficiency
issue. However, for ever flow_del message sent to the datapath, a log
message, at the warning level, will be generated in case datapath
failed to execute the command. In addition to cause spurious log
messages, Double deletion causes unit tests to report erroneous
failures as all warning messages are considered test failures.
The fix is to simply shut down the revalidator threads to flush all
ukeys, then flush the datapth before restarting the revalidator threads.
dpif_flush() was implemented as flush flows of all datapaths while
most of its invocation should only flush its local datapath.
Only megaflow on/off commands should flush all dapapaths. This bug is
also fixed.
Found during development.
Signed-off-by: Andy Zhou <azhou@nicira.com>
Acked-by: Jarno Rajahalme <jrajahalme@nicira.com>
2014-03-13 21:48:55 -07:00
|
|
|
|
udpif_flush_all_datapaths();
|
2013-09-24 13:39:56 -07:00
|
|
|
|
unixctl_command_reply(conn, "megaflows enabled");
|
|
|
|
|
}
|
2014-02-06 09:49:19 -08:00
|
|
|
|
|
|
|
|
|
/* Set the flow limit.
|
|
|
|
|
*
|
|
|
|
|
* This command is only needed for advanced debugging, so it's not
|
|
|
|
|
* documented in the man page. */
|
|
|
|
|
static void
|
|
|
|
|
upcall_unixctl_set_flow_limit(struct unixctl_conn *conn,
|
|
|
|
|
int argc OVS_UNUSED,
|
|
|
|
|
const char *argv[] OVS_UNUSED,
|
|
|
|
|
void *aux OVS_UNUSED)
|
|
|
|
|
{
|
|
|
|
|
struct ds ds = DS_EMPTY_INITIALIZER;
|
|
|
|
|
struct udpif *udpif;
|
|
|
|
|
unsigned int flow_limit = atoi(argv[1]);
|
|
|
|
|
|
|
|
|
|
LIST_FOR_EACH (udpif, list_node, &all_udpifs) {
|
|
|
|
|
atomic_store(&udpif->flow_limit, flow_limit);
|
|
|
|
|
}
|
|
|
|
|
ds_put_format(&ds, "set flow_limit to %u\n", flow_limit);
|
|
|
|
|
unixctl_command_reply(conn, ds_cstr(&ds));
|
|
|
|
|
ds_destroy(&ds);
|
|
|
|
|
}
|
2014-06-25 14:02:45 +00:00
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
upcall_unixctl_dump_wait(struct unixctl_conn *conn,
|
|
|
|
|
int argc OVS_UNUSED,
|
|
|
|
|
const char *argv[] OVS_UNUSED,
|
|
|
|
|
void *aux OVS_UNUSED)
|
|
|
|
|
{
|
|
|
|
|
if (list_is_singleton(&all_udpifs)) {
|
|
|
|
|
struct udpif *udpif;
|
|
|
|
|
size_t len;
|
|
|
|
|
|
|
|
|
|
udpif = OBJECT_CONTAINING(list_front(&all_udpifs), udpif, list_node);
|
|
|
|
|
len = (udpif->n_conns + 1) * sizeof *udpif->conns;
|
|
|
|
|
udpif->conn_seq = seq_read(udpif->dump_seq);
|
|
|
|
|
udpif->conns = xrealloc(udpif->conns, len);
|
|
|
|
|
udpif->conns[udpif->n_conns++] = conn;
|
|
|
|
|
} else {
|
|
|
|
|
unixctl_command_reply_error(conn, "can't wait on multiple udpifs.");
|
|
|
|
|
}
|
|
|
|
|
}
|