2013-06-11 13:32:30 -07:00
|
|
|
|
/* Copyright (c) 2009, 2010, 2011, 2012, 2013 Nicira, Inc.
|
|
|
|
|
*
|
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
|
* You may obtain a copy of the License at:
|
|
|
|
|
*
|
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
*
|
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
|
* limitations under the License. */
|
|
|
|
|
|
|
|
|
|
#include <config.h>
|
|
|
|
|
|
|
|
|
|
#include "ofproto/ofproto-dpif-xlate.h"
|
|
|
|
|
|
2013-08-02 12:43:03 -07:00
|
|
|
|
#include <errno.h>
|
|
|
|
|
|
2013-06-17 17:56:54 -07:00
|
|
|
|
#include "bfd.h"
|
2013-06-11 13:32:30 -07:00
|
|
|
|
#include "bitmap.h"
|
|
|
|
|
#include "bond.h"
|
|
|
|
|
#include "bundle.h"
|
|
|
|
|
#include "byte-order.h"
|
2013-06-17 17:56:54 -07:00
|
|
|
|
#include "cfm.h"
|
2013-06-11 13:32:30 -07:00
|
|
|
|
#include "connmgr.h"
|
|
|
|
|
#include "coverage.h"
|
|
|
|
|
#include "dpif.h"
|
|
|
|
|
#include "dynamic-string.h"
|
2013-06-22 10:48:42 -07:00
|
|
|
|
#include "in-band.h"
|
2013-06-17 17:56:54 -07:00
|
|
|
|
#include "lacp.h"
|
2013-06-11 13:32:30 -07:00
|
|
|
|
#include "learn.h"
|
2013-06-13 18:38:24 -07:00
|
|
|
|
#include "list.h"
|
2013-06-11 13:32:30 -07:00
|
|
|
|
#include "mac-learning.h"
|
|
|
|
|
#include "meta-flow.h"
|
|
|
|
|
#include "multipath.h"
|
|
|
|
|
#include "netdev-vport.h"
|
|
|
|
|
#include "netlink.h"
|
|
|
|
|
#include "nx-match.h"
|
|
|
|
|
#include "odp-execute.h"
|
|
|
|
|
#include "ofp-actions.h"
|
|
|
|
|
#include "ofproto/ofproto-dpif-ipfix.h"
|
2013-06-20 13:00:27 -07:00
|
|
|
|
#include "ofproto/ofproto-dpif-mirror.h"
|
2013-12-20 14:53:52 -08:00
|
|
|
|
#include "ofproto/ofproto-dpif-monitor.h"
|
2013-06-11 13:32:30 -07:00
|
|
|
|
#include "ofproto/ofproto-dpif-sflow.h"
|
|
|
|
|
#include "ofproto/ofproto-dpif.h"
|
2013-09-09 13:05:52 -07:00
|
|
|
|
#include "ofproto/ofproto-provider.h"
|
2013-06-11 13:32:30 -07:00
|
|
|
|
#include "tunnel.h"
|
|
|
|
|
#include "vlog.h"
|
|
|
|
|
|
2013-06-13 18:38:24 -07:00
|
|
|
|
COVERAGE_DEFINE(xlate_actions);
|
2013-11-02 08:43:14 -07:00
|
|
|
|
COVERAGE_DEFINE(xlate_actions_oversize);
|
2013-06-11 13:32:30 -07:00
|
|
|
|
|
|
|
|
|
VLOG_DEFINE_THIS_MODULE(ofproto_dpif_xlate);
|
|
|
|
|
|
2013-06-13 18:10:00 -07:00
|
|
|
|
/* Maximum depth of flow table recursion (due to resubmit actions) in a
|
|
|
|
|
* flow translation. */
|
|
|
|
|
#define MAX_RESUBMIT_RECURSION 64
|
|
|
|
|
|
2013-10-04 08:47:16 -07:00
|
|
|
|
/* Maximum number of resubmit actions in a flow translation, whether they are
|
|
|
|
|
* recursive or not. */
|
|
|
|
|
#define MAX_RESUBMITS (MAX_RESUBMIT_RECURSION * MAX_RESUBMIT_RECURSION)
|
|
|
|
|
|
2013-07-21 11:31:32 -07:00
|
|
|
|
struct ovs_rwlock xlate_rwlock = OVS_RWLOCK_INITIALIZER;
|
|
|
|
|
|
2013-06-13 18:38:24 -07:00
|
|
|
|
struct xbridge {
|
|
|
|
|
struct hmap_node hmap_node; /* Node in global 'xbridges' map. */
|
|
|
|
|
struct ofproto_dpif *ofproto; /* Key in global 'xbridges' map. */
|
|
|
|
|
|
|
|
|
|
struct list xbundles; /* Owned xbundles. */
|
|
|
|
|
struct hmap xports; /* Indexed by ofp_port. */
|
|
|
|
|
|
|
|
|
|
char *name; /* Name used in log messages. */
|
2013-07-06 11:46:48 -07:00
|
|
|
|
struct dpif *dpif; /* Datapath interface. */
|
2013-06-13 18:38:24 -07:00
|
|
|
|
struct mac_learning *ml; /* Mac learning handle. */
|
|
|
|
|
struct mbridge *mbridge; /* Mirroring. */
|
|
|
|
|
struct dpif_sflow *sflow; /* SFlow handle, or null. */
|
|
|
|
|
struct dpif_ipfix *ipfix; /* Ipfix handle, or null. */
|
2013-10-30 16:29:58 -07:00
|
|
|
|
struct netflow *netflow; /* Netflow handle, or null. */
|
2013-07-06 09:31:35 -07:00
|
|
|
|
struct stp *stp; /* STP or null if disabled. */
|
2013-06-13 18:38:24 -07:00
|
|
|
|
|
2013-07-25 17:20:53 -07:00
|
|
|
|
/* Special rules installed by ofproto-dpif. */
|
|
|
|
|
struct rule_dpif *miss_rule;
|
|
|
|
|
struct rule_dpif *no_packet_in_rule;
|
|
|
|
|
|
2013-06-13 18:38:24 -07:00
|
|
|
|
enum ofp_config_flags frag; /* Fragmentation handling. */
|
|
|
|
|
bool has_in_band; /* Bridge has in band control? */
|
|
|
|
|
bool forward_bpdu; /* Bridge forwards STP BPDUs? */
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
struct xbundle {
|
|
|
|
|
struct hmap_node hmap_node; /* In global 'xbundles' map. */
|
|
|
|
|
struct ofbundle *ofbundle; /* Key in global 'xbundles' map. */
|
|
|
|
|
|
|
|
|
|
struct list list_node; /* In parent 'xbridges' list. */
|
|
|
|
|
struct xbridge *xbridge; /* Parent xbridge. */
|
|
|
|
|
|
|
|
|
|
struct list xports; /* Contains "struct xport"s. */
|
|
|
|
|
|
|
|
|
|
char *name; /* Name used in log messages. */
|
|
|
|
|
struct bond *bond; /* Nonnull iff more than one port. */
|
|
|
|
|
struct lacp *lacp; /* LACP handle or null. */
|
|
|
|
|
|
|
|
|
|
enum port_vlan_mode vlan_mode; /* VLAN mode. */
|
|
|
|
|
int vlan; /* -1=trunk port, else a 12-bit VLAN ID. */
|
|
|
|
|
unsigned long *trunks; /* Bitmap of trunked VLANs, if 'vlan' == -1.
|
|
|
|
|
* NULL if all VLANs are trunked. */
|
|
|
|
|
bool use_priority_tags; /* Use 802.1p tag for frames in VLAN 0? */
|
|
|
|
|
bool floodable; /* No port has OFPUTIL_PC_NO_FLOOD set? */
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
struct xport {
|
|
|
|
|
struct hmap_node hmap_node; /* Node in global 'xports' map. */
|
|
|
|
|
struct ofport_dpif *ofport; /* Key in global 'xports map. */
|
|
|
|
|
|
|
|
|
|
struct hmap_node ofp_node; /* Node in parent xbridge 'xports' map. */
|
|
|
|
|
ofp_port_t ofp_port; /* Key in parent xbridge 'xports' map. */
|
|
|
|
|
|
|
|
|
|
odp_port_t odp_port; /* Datapath port number or ODPP_NONE. */
|
|
|
|
|
|
|
|
|
|
struct list bundle_node; /* In parent xbundle (if it exists). */
|
|
|
|
|
struct xbundle *xbundle; /* Parent xbundle or null. */
|
|
|
|
|
|
|
|
|
|
struct netdev *netdev; /* 'ofport''s netdev. */
|
|
|
|
|
|
|
|
|
|
struct xbridge *xbridge; /* Parent bridge. */
|
|
|
|
|
struct xport *peer; /* Patch port peer or null. */
|
|
|
|
|
|
|
|
|
|
enum ofputil_port_config config; /* OpenFlow port configuration. */
|
2013-10-30 18:17:18 +09:00
|
|
|
|
enum ofputil_port_state state; /* OpenFlow port state. */
|
2013-09-04 15:21:15 -07:00
|
|
|
|
int stp_port_no; /* STP port number or -1 if not in use. */
|
2013-06-13 18:38:24 -07:00
|
|
|
|
|
2013-07-06 10:25:06 -07:00
|
|
|
|
struct hmap skb_priorities; /* Map of 'skb_priority_to_dscp's. */
|
|
|
|
|
|
2013-06-13 18:38:24 -07:00
|
|
|
|
bool may_enable; /* May be enabled in bonds. */
|
|
|
|
|
bool is_tunnel; /* Is a tunnel port. */
|
|
|
|
|
|
|
|
|
|
struct cfm *cfm; /* CFM handle or null. */
|
|
|
|
|
struct bfd *bfd; /* BFD handle or null. */
|
|
|
|
|
};
|
|
|
|
|
|
2013-06-12 12:51:52 -07:00
|
|
|
|
struct xlate_ctx {
|
|
|
|
|
struct xlate_in *xin;
|
|
|
|
|
struct xlate_out *xout;
|
|
|
|
|
|
2013-06-13 18:38:24 -07:00
|
|
|
|
const struct xbridge *xbridge;
|
2013-06-12 12:51:52 -07:00
|
|
|
|
|
|
|
|
|
/* Flow at the last commit. */
|
|
|
|
|
struct flow base_flow;
|
|
|
|
|
|
|
|
|
|
/* Tunnel IP destination address as received. This is stored separately
|
|
|
|
|
* as the base_flow.tunnel is cleared on init to reflect the datapath
|
|
|
|
|
* behavior. Used to make sure not to send tunneled output to ourselves,
|
|
|
|
|
* which might lead to an infinite loop. This could happen easily
|
|
|
|
|
* if a tunnel is marked as 'ip_remote=flow', and the flow does not
|
|
|
|
|
* actually set the tun_dst field. */
|
|
|
|
|
ovs_be32 orig_tunnel_ip_dst;
|
|
|
|
|
|
|
|
|
|
/* Stack for the push and pop actions. Each stack element is of type
|
|
|
|
|
* "union mf_subvalue". */
|
|
|
|
|
union mf_subvalue init_stack[1024 / sizeof(union mf_subvalue)];
|
|
|
|
|
struct ofpbuf stack;
|
|
|
|
|
|
|
|
|
|
/* The rule that we are currently translating, or NULL. */
|
|
|
|
|
struct rule_dpif *rule;
|
|
|
|
|
|
2013-09-27 06:55:19 +09:00
|
|
|
|
int mpls_depth_delta; /* Delta of the mpls stack depth since
|
|
|
|
|
* actions were last committed.
|
|
|
|
|
* Must be between -1 and 1 inclusive. */
|
|
|
|
|
ovs_be32 pre_push_mpls_lse; /* Used to record the top-most MPLS LSE
|
|
|
|
|
* prior to an mpls_push so that it may be
|
|
|
|
|
* used for a subsequent mpls_pop. */
|
|
|
|
|
|
2013-10-04 08:47:16 -07:00
|
|
|
|
/* Resubmit statistics, via xlate_table_action(). */
|
|
|
|
|
int recurse; /* Current resubmit nesting depth. */
|
|
|
|
|
int resubmits; /* Total number of resubmits. */
|
|
|
|
|
|
2013-06-12 12:51:52 -07:00
|
|
|
|
uint32_t orig_skb_priority; /* Priority when packet arrived. */
|
|
|
|
|
uint8_t table_id; /* OpenFlow table ID where flow was found. */
|
|
|
|
|
uint32_t sflow_n_outputs; /* Number of output ports. */
|
2013-06-19 16:58:44 -07:00
|
|
|
|
odp_port_t sflow_odp_port; /* Output port for composing sFlow action. */
|
2013-06-12 12:51:52 -07:00
|
|
|
|
uint16_t user_cookie_offset;/* Used for user_action_cookie fixup. */
|
|
|
|
|
bool exit; /* No further actions should be processed. */
|
2013-10-11 13:23:29 +09:00
|
|
|
|
|
|
|
|
|
/* OpenFlow 1.1+ action set.
|
|
|
|
|
*
|
|
|
|
|
* 'action_set' accumulates "struct ofpact"s added by OFPACT_WRITE_ACTIONS.
|
|
|
|
|
* When translation is otherwise complete, ofpacts_execute_action_set()
|
|
|
|
|
* converts it to a set of "struct ofpact"s that can be translated into
|
|
|
|
|
* datapath actions. */
|
|
|
|
|
struct ofpbuf action_set; /* Action set. */
|
|
|
|
|
uint64_t action_set_stub[1024 / 8];
|
2013-06-12 12:51:52 -07:00
|
|
|
|
};
|
|
|
|
|
|
2013-06-11 13:32:30 -07:00
|
|
|
|
/* A controller may use OFPP_NONE as the ingress port to indicate that
|
|
|
|
|
* it did not arrive on a "real" port. 'ofpp_none_bundle' exists for
|
|
|
|
|
* when an input bundle is needed for validation (e.g., mirroring or
|
|
|
|
|
* OFPP_NORMAL processing). It is not connected to an 'ofproto' or have
|
2013-07-19 10:04:47 -07:00
|
|
|
|
* any 'port' structs, so care must be taken when dealing with it.
|
|
|
|
|
* The bundle's name and vlan mode are initialized in lookup_input_bundle() */
|
|
|
|
|
static struct xbundle ofpp_none_bundle;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
|
2013-07-06 10:25:06 -07:00
|
|
|
|
/* Node in 'xport''s 'skb_priorities' map. Used to maintain a map from
|
|
|
|
|
* 'priority' (the datapath's term for QoS queue) to the dscp bits which all
|
|
|
|
|
* traffic egressing the 'ofport' with that priority should be marked with. */
|
|
|
|
|
struct skb_priority_to_dscp {
|
|
|
|
|
struct hmap_node hmap_node; /* Node in 'ofport_dpif''s 'skb_priorities'. */
|
|
|
|
|
uint32_t skb_priority; /* Priority of this queue (see struct flow). */
|
|
|
|
|
|
|
|
|
|
uint8_t dscp; /* DSCP bits to mark outgoing traffic with. */
|
|
|
|
|
};
|
|
|
|
|
|
2013-06-13 18:38:24 -07:00
|
|
|
|
static struct hmap xbridges = HMAP_INITIALIZER(&xbridges);
|
|
|
|
|
static struct hmap xbundles = HMAP_INITIALIZER(&xbundles);
|
|
|
|
|
static struct hmap xports = HMAP_INITIALIZER(&xports);
|
|
|
|
|
|
|
|
|
|
static bool may_receive(const struct xport *, struct xlate_ctx *);
|
2013-06-11 13:32:30 -07:00
|
|
|
|
static void do_xlate_actions(const struct ofpact *, size_t ofpacts_len,
|
|
|
|
|
struct xlate_ctx *);
|
2013-10-09 04:30:33 +00:00
|
|
|
|
static void xlate_actions__(struct xlate_in *, struct xlate_out *)
|
|
|
|
|
OVS_REQ_RDLOCK(xlate_rwlock);
|
2013-11-12 18:18:01 -08:00
|
|
|
|
static void xlate_normal(struct xlate_ctx *);
|
|
|
|
|
static void xlate_report(struct xlate_ctx *, const char *);
|
|
|
|
|
static void xlate_table_action(struct xlate_ctx *, ofp_port_t in_port,
|
|
|
|
|
uint8_t table_id, bool may_packet_in);
|
2013-06-13 18:38:24 -07:00
|
|
|
|
static bool input_vid_is_valid(uint16_t vid, struct xbundle *, bool warn);
|
|
|
|
|
static uint16_t input_vid_to_vlan(const struct xbundle *, uint16_t vid);
|
|
|
|
|
static void output_normal(struct xlate_ctx *, const struct xbundle *,
|
2013-06-11 13:32:30 -07:00
|
|
|
|
uint16_t vlan);
|
2013-06-19 16:58:44 -07:00
|
|
|
|
static void compose_output_action(struct xlate_ctx *, ofp_port_t ofp_port);
|
2013-06-11 13:32:30 -07:00
|
|
|
|
|
2013-06-13 18:38:24 -07:00
|
|
|
|
static struct xbridge *xbridge_lookup(const struct ofproto_dpif *);
|
|
|
|
|
static struct xbundle *xbundle_lookup(const struct ofbundle *);
|
2013-08-02 19:31:02 -07:00
|
|
|
|
static struct xport *xport_lookup(const struct ofport_dpif *);
|
2013-06-13 18:38:24 -07:00
|
|
|
|
static struct xport *get_ofp_port(const struct xbridge *, ofp_port_t ofp_port);
|
2013-07-06 10:25:06 -07:00
|
|
|
|
static struct skb_priority_to_dscp *get_skb_priority(const struct xport *,
|
|
|
|
|
uint32_t skb_priority);
|
|
|
|
|
static void clear_skb_priorities(struct xport *);
|
|
|
|
|
static bool dscp_from_skb_priority(const struct xport *, uint32_t skb_priority,
|
|
|
|
|
uint8_t *dscp);
|
2013-06-13 18:38:24 -07:00
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
xlate_ofproto_set(struct ofproto_dpif *ofproto, const char *name,
|
2013-07-25 17:20:53 -07:00
|
|
|
|
struct dpif *dpif, struct rule_dpif *miss_rule,
|
|
|
|
|
struct rule_dpif *no_packet_in_rule,
|
|
|
|
|
const struct mac_learning *ml, struct stp *stp,
|
|
|
|
|
const struct mbridge *mbridge,
|
2013-06-13 18:38:24 -07:00
|
|
|
|
const struct dpif_sflow *sflow,
|
2013-10-30 16:29:58 -07:00
|
|
|
|
const struct dpif_ipfix *ipfix,
|
|
|
|
|
const struct netflow *netflow, enum ofp_config_flags frag,
|
|
|
|
|
bool forward_bpdu, bool has_in_band)
|
2013-06-13 18:38:24 -07:00
|
|
|
|
{
|
|
|
|
|
struct xbridge *xbridge = xbridge_lookup(ofproto);
|
|
|
|
|
|
|
|
|
|
if (!xbridge) {
|
|
|
|
|
xbridge = xzalloc(sizeof *xbridge);
|
|
|
|
|
xbridge->ofproto = ofproto;
|
|
|
|
|
|
|
|
|
|
hmap_insert(&xbridges, &xbridge->hmap_node, hash_pointer(ofproto, 0));
|
|
|
|
|
hmap_init(&xbridge->xports);
|
|
|
|
|
list_init(&xbridge->xbundles);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (xbridge->ml != ml) {
|
|
|
|
|
mac_learning_unref(xbridge->ml);
|
|
|
|
|
xbridge->ml = mac_learning_ref(ml);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (xbridge->mbridge != mbridge) {
|
|
|
|
|
mbridge_unref(xbridge->mbridge);
|
|
|
|
|
xbridge->mbridge = mbridge_ref(mbridge);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (xbridge->sflow != sflow) {
|
|
|
|
|
dpif_sflow_unref(xbridge->sflow);
|
|
|
|
|
xbridge->sflow = dpif_sflow_ref(sflow);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (xbridge->ipfix != ipfix) {
|
|
|
|
|
dpif_ipfix_unref(xbridge->ipfix);
|
|
|
|
|
xbridge->ipfix = dpif_ipfix_ref(ipfix);
|
|
|
|
|
}
|
|
|
|
|
|
2013-07-06 09:31:35 -07:00
|
|
|
|
if (xbridge->stp != stp) {
|
|
|
|
|
stp_unref(xbridge->stp);
|
|
|
|
|
xbridge->stp = stp_ref(stp);
|
|
|
|
|
}
|
|
|
|
|
|
2013-10-30 16:29:58 -07:00
|
|
|
|
if (xbridge->netflow != netflow) {
|
|
|
|
|
netflow_unref(xbridge->netflow);
|
|
|
|
|
xbridge->netflow = netflow_ref(netflow);
|
|
|
|
|
}
|
|
|
|
|
|
2013-06-13 18:38:24 -07:00
|
|
|
|
free(xbridge->name);
|
|
|
|
|
xbridge->name = xstrdup(name);
|
|
|
|
|
|
2013-07-06 11:46:48 -07:00
|
|
|
|
xbridge->dpif = dpif;
|
2013-06-13 18:38:24 -07:00
|
|
|
|
xbridge->forward_bpdu = forward_bpdu;
|
|
|
|
|
xbridge->has_in_band = has_in_band;
|
|
|
|
|
xbridge->frag = frag;
|
2013-07-25 17:20:53 -07:00
|
|
|
|
xbridge->miss_rule = miss_rule;
|
|
|
|
|
xbridge->no_packet_in_rule = no_packet_in_rule;
|
2013-06-13 18:38:24 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
xlate_remove_ofproto(struct ofproto_dpif *ofproto)
|
|
|
|
|
{
|
|
|
|
|
struct xbridge *xbridge = xbridge_lookup(ofproto);
|
|
|
|
|
struct xbundle *xbundle, *next_xbundle;
|
|
|
|
|
struct xport *xport, *next_xport;
|
|
|
|
|
|
|
|
|
|
if (!xbridge) {
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
HMAP_FOR_EACH_SAFE (xport, next_xport, ofp_node, &xbridge->xports) {
|
|
|
|
|
xlate_ofport_remove(xport->ofport);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
LIST_FOR_EACH_SAFE (xbundle, next_xbundle, list_node, &xbridge->xbundles) {
|
|
|
|
|
xlate_bundle_remove(xbundle->ofbundle);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
hmap_remove(&xbridges, &xbridge->hmap_node);
|
2013-08-15 18:37:41 -07:00
|
|
|
|
mac_learning_unref(xbridge->ml);
|
|
|
|
|
mbridge_unref(xbridge->mbridge);
|
|
|
|
|
dpif_sflow_unref(xbridge->sflow);
|
|
|
|
|
dpif_ipfix_unref(xbridge->ipfix);
|
|
|
|
|
stp_unref(xbridge->stp);
|
|
|
|
|
hmap_destroy(&xbridge->xports);
|
2013-06-13 18:38:24 -07:00
|
|
|
|
free(xbridge->name);
|
|
|
|
|
free(xbridge);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
xlate_bundle_set(struct ofproto_dpif *ofproto, struct ofbundle *ofbundle,
|
|
|
|
|
const char *name, enum port_vlan_mode vlan_mode, int vlan,
|
|
|
|
|
unsigned long *trunks, bool use_priority_tags,
|
|
|
|
|
const struct bond *bond, const struct lacp *lacp,
|
|
|
|
|
bool floodable)
|
|
|
|
|
{
|
|
|
|
|
struct xbundle *xbundle = xbundle_lookup(ofbundle);
|
|
|
|
|
|
|
|
|
|
if (!xbundle) {
|
|
|
|
|
xbundle = xzalloc(sizeof *xbundle);
|
|
|
|
|
xbundle->ofbundle = ofbundle;
|
|
|
|
|
xbundle->xbridge = xbridge_lookup(ofproto);
|
|
|
|
|
|
|
|
|
|
hmap_insert(&xbundles, &xbundle->hmap_node, hash_pointer(ofbundle, 0));
|
|
|
|
|
list_insert(&xbundle->xbridge->xbundles, &xbundle->list_node);
|
|
|
|
|
list_init(&xbundle->xports);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ovs_assert(xbundle->xbridge);
|
|
|
|
|
|
|
|
|
|
free(xbundle->name);
|
|
|
|
|
xbundle->name = xstrdup(name);
|
|
|
|
|
|
|
|
|
|
xbundle->vlan_mode = vlan_mode;
|
|
|
|
|
xbundle->vlan = vlan;
|
|
|
|
|
xbundle->trunks = trunks;
|
|
|
|
|
xbundle->use_priority_tags = use_priority_tags;
|
|
|
|
|
xbundle->floodable = floodable;
|
|
|
|
|
|
|
|
|
|
if (xbundle->bond != bond) {
|
|
|
|
|
bond_unref(xbundle->bond);
|
|
|
|
|
xbundle->bond = bond_ref(bond);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (xbundle->lacp != lacp) {
|
|
|
|
|
lacp_unref(xbundle->lacp);
|
|
|
|
|
xbundle->lacp = lacp_ref(lacp);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
xlate_bundle_remove(struct ofbundle *ofbundle)
|
|
|
|
|
{
|
|
|
|
|
struct xbundle *xbundle = xbundle_lookup(ofbundle);
|
|
|
|
|
struct xport *xport, *next;
|
|
|
|
|
|
|
|
|
|
if (!xbundle) {
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
LIST_FOR_EACH_SAFE (xport, next, bundle_node, &xbundle->xports) {
|
|
|
|
|
list_remove(&xport->bundle_node);
|
|
|
|
|
xport->xbundle = NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
hmap_remove(&xbundles, &xbundle->hmap_node);
|
|
|
|
|
list_remove(&xbundle->list_node);
|
|
|
|
|
bond_unref(xbundle->bond);
|
|
|
|
|
lacp_unref(xbundle->lacp);
|
|
|
|
|
free(xbundle->name);
|
|
|
|
|
free(xbundle);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
xlate_ofport_set(struct ofproto_dpif *ofproto, struct ofbundle *ofbundle,
|
|
|
|
|
struct ofport_dpif *ofport, ofp_port_t ofp_port,
|
|
|
|
|
odp_port_t odp_port, const struct netdev *netdev,
|
|
|
|
|
const struct cfm *cfm, const struct bfd *bfd,
|
2013-07-06 09:31:35 -07:00
|
|
|
|
struct ofport_dpif *peer, int stp_port_no,
|
2013-07-06 10:25:06 -07:00
|
|
|
|
const struct ofproto_port_queue *qdscp_list, size_t n_qdscp,
|
2013-10-30 18:17:18 +09:00
|
|
|
|
enum ofputil_port_config config,
|
|
|
|
|
enum ofputil_port_state state, bool is_tunnel,
|
2013-07-06 09:31:35 -07:00
|
|
|
|
bool may_enable)
|
2013-06-13 18:38:24 -07:00
|
|
|
|
{
|
|
|
|
|
struct xport *xport = xport_lookup(ofport);
|
2013-07-06 10:25:06 -07:00
|
|
|
|
size_t i;
|
2013-06-13 18:38:24 -07:00
|
|
|
|
|
|
|
|
|
if (!xport) {
|
|
|
|
|
xport = xzalloc(sizeof *xport);
|
|
|
|
|
xport->ofport = ofport;
|
|
|
|
|
xport->xbridge = xbridge_lookup(ofproto);
|
|
|
|
|
xport->ofp_port = ofp_port;
|
|
|
|
|
|
2013-07-06 10:25:06 -07:00
|
|
|
|
hmap_init(&xport->skb_priorities);
|
2013-06-13 18:38:24 -07:00
|
|
|
|
hmap_insert(&xports, &xport->hmap_node, hash_pointer(ofport, 0));
|
|
|
|
|
hmap_insert(&xport->xbridge->xports, &xport->ofp_node,
|
|
|
|
|
hash_ofp_port(xport->ofp_port));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ovs_assert(xport->ofp_port == ofp_port);
|
|
|
|
|
|
|
|
|
|
xport->config = config;
|
2013-10-30 18:17:18 +09:00
|
|
|
|
xport->state = state;
|
2013-07-06 09:31:35 -07:00
|
|
|
|
xport->stp_port_no = stp_port_no;
|
2013-06-13 18:38:24 -07:00
|
|
|
|
xport->is_tunnel = is_tunnel;
|
|
|
|
|
xport->may_enable = may_enable;
|
|
|
|
|
xport->odp_port = odp_port;
|
|
|
|
|
|
|
|
|
|
if (xport->netdev != netdev) {
|
|
|
|
|
netdev_close(xport->netdev);
|
|
|
|
|
xport->netdev = netdev_ref(netdev);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (xport->cfm != cfm) {
|
|
|
|
|
cfm_unref(xport->cfm);
|
|
|
|
|
xport->cfm = cfm_ref(cfm);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (xport->bfd != bfd) {
|
|
|
|
|
bfd_unref(xport->bfd);
|
|
|
|
|
xport->bfd = bfd_ref(bfd);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (xport->peer) {
|
|
|
|
|
xport->peer->peer = NULL;
|
|
|
|
|
}
|
2013-08-02 19:31:02 -07:00
|
|
|
|
xport->peer = xport_lookup(peer);
|
2013-06-13 18:38:24 -07:00
|
|
|
|
if (xport->peer) {
|
|
|
|
|
xport->peer->peer = xport;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (xport->xbundle) {
|
|
|
|
|
list_remove(&xport->bundle_node);
|
|
|
|
|
}
|
2013-08-02 19:31:02 -07:00
|
|
|
|
xport->xbundle = xbundle_lookup(ofbundle);
|
2013-06-13 18:38:24 -07:00
|
|
|
|
if (xport->xbundle) {
|
|
|
|
|
list_insert(&xport->xbundle->xports, &xport->bundle_node);
|
|
|
|
|
}
|
2013-07-06 10:25:06 -07:00
|
|
|
|
|
|
|
|
|
clear_skb_priorities(xport);
|
|
|
|
|
for (i = 0; i < n_qdscp; i++) {
|
|
|
|
|
struct skb_priority_to_dscp *pdscp;
|
|
|
|
|
uint32_t skb_priority;
|
|
|
|
|
|
2013-07-06 11:46:48 -07:00
|
|
|
|
if (dpif_queue_to_priority(xport->xbridge->dpif, qdscp_list[i].queue,
|
|
|
|
|
&skb_priority)) {
|
2013-07-06 10:25:06 -07:00
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
pdscp = xmalloc(sizeof *pdscp);
|
|
|
|
|
pdscp->skb_priority = skb_priority;
|
|
|
|
|
pdscp->dscp = (qdscp_list[i].dscp << 2) & IP_DSCP_MASK;
|
|
|
|
|
hmap_insert(&xport->skb_priorities, &pdscp->hmap_node,
|
|
|
|
|
hash_int(pdscp->skb_priority, 0));
|
|
|
|
|
}
|
2013-06-13 18:38:24 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
xlate_ofport_remove(struct ofport_dpif *ofport)
|
|
|
|
|
{
|
|
|
|
|
struct xport *xport = xport_lookup(ofport);
|
|
|
|
|
|
|
|
|
|
if (!xport) {
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (xport->peer) {
|
|
|
|
|
xport->peer->peer = NULL;
|
|
|
|
|
xport->peer = NULL;
|
|
|
|
|
}
|
|
|
|
|
|
2013-07-22 12:54:09 -07:00
|
|
|
|
if (xport->xbundle) {
|
|
|
|
|
list_remove(&xport->bundle_node);
|
|
|
|
|
}
|
|
|
|
|
|
2013-07-06 10:25:06 -07:00
|
|
|
|
clear_skb_priorities(xport);
|
|
|
|
|
hmap_destroy(&xport->skb_priorities);
|
|
|
|
|
|
2013-06-13 18:38:24 -07:00
|
|
|
|
hmap_remove(&xports, &xport->hmap_node);
|
|
|
|
|
hmap_remove(&xport->xbridge->xports, &xport->ofp_node);
|
|
|
|
|
|
|
|
|
|
netdev_close(xport->netdev);
|
|
|
|
|
cfm_unref(xport->cfm);
|
|
|
|
|
bfd_unref(xport->bfd);
|
|
|
|
|
free(xport);
|
|
|
|
|
}
|
|
|
|
|
|
2013-08-02 12:43:03 -07:00
|
|
|
|
/* Given a datpath, packet, and flow metadata ('backer', 'packet', and 'key'
|
|
|
|
|
* respectively), populates 'flow' with the result of odp_flow_key_to_flow().
|
|
|
|
|
* Optionally, if nonnull, populates 'fitnessp' with the fitness of 'flow' as
|
|
|
|
|
* returned by odp_flow_key_to_flow(). Also, optionally populates 'ofproto'
|
2013-10-31 16:23:13 -07:00
|
|
|
|
* with the ofproto_dpif, 'odp_in_port' with the datapath in_port, that
|
|
|
|
|
* 'packet' ingressed, and 'ipfix', 'sflow', and 'netflow' with the appropriate
|
|
|
|
|
* handles for those protocols if they're enabled. Caller is responsible for
|
|
|
|
|
* unrefing them.
|
2013-08-02 12:43:03 -07:00
|
|
|
|
*
|
|
|
|
|
* If 'ofproto' is nonnull, requires 'flow''s in_port to exist. Otherwise sets
|
|
|
|
|
* 'flow''s in_port to OFPP_NONE.
|
|
|
|
|
*
|
|
|
|
|
* This function does post-processing on data returned from
|
|
|
|
|
* odp_flow_key_to_flow() to help make VLAN splinters transparent to the rest
|
|
|
|
|
* of the upcall processing logic. In particular, if the extracted in_port is
|
|
|
|
|
* a VLAN splinter port, it replaces flow->in_port by the "real" port, sets
|
|
|
|
|
* flow->vlan_tci correctly for the VLAN of the VLAN splinter port, and pushes
|
|
|
|
|
* a VLAN header onto 'packet' (if it is nonnull).
|
|
|
|
|
*
|
|
|
|
|
* Similarly, this function also includes some logic to help with tunnels. It
|
|
|
|
|
* may modify 'flow' as necessary to make the tunneling implementation
|
|
|
|
|
* transparent to the upcall processing logic.
|
|
|
|
|
*
|
|
|
|
|
* Returns 0 if successful, ENODEV if the parsed flow has no associated ofport,
|
|
|
|
|
* or some other positive errno if there are other problems. */
|
|
|
|
|
int
|
|
|
|
|
xlate_receive(const struct dpif_backer *backer, struct ofpbuf *packet,
|
|
|
|
|
const struct nlattr *key, size_t key_len,
|
|
|
|
|
struct flow *flow, enum odp_key_fitness *fitnessp,
|
2013-10-31 16:23:13 -07:00
|
|
|
|
struct ofproto_dpif **ofproto, struct dpif_ipfix **ipfix,
|
|
|
|
|
struct dpif_sflow **sflow, struct netflow **netflow,
|
|
|
|
|
odp_port_t *odp_in_port)
|
2013-08-02 12:43:03 -07:00
|
|
|
|
{
|
|
|
|
|
enum odp_key_fitness fitness;
|
|
|
|
|
const struct xport *xport;
|
|
|
|
|
int error = ENODEV;
|
|
|
|
|
|
2013-07-21 11:31:32 -07:00
|
|
|
|
ovs_rwlock_rdlock(&xlate_rwlock);
|
2013-08-02 12:43:03 -07:00
|
|
|
|
fitness = odp_flow_key_to_flow(key, key_len, flow);
|
|
|
|
|
if (fitness == ODP_FIT_ERROR) {
|
|
|
|
|
error = EINVAL;
|
|
|
|
|
goto exit;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (odp_in_port) {
|
|
|
|
|
*odp_in_port = flow->in_port.odp_port;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
xport = xport_lookup(tnl_port_should_receive(flow)
|
2013-11-12 18:18:01 -08:00
|
|
|
|
? tnl_port_receive(flow)
|
|
|
|
|
: odp_port_to_ofport(backer, flow->in_port.odp_port));
|
2013-08-02 12:43:03 -07:00
|
|
|
|
|
|
|
|
|
flow->in_port.ofp_port = xport ? xport->ofp_port : OFPP_NONE;
|
|
|
|
|
if (!xport) {
|
|
|
|
|
goto exit;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (vsp_adjust_flow(xport->xbridge->ofproto, flow)) {
|
|
|
|
|
if (packet) {
|
|
|
|
|
/* Make the packet resemble the flow, so that it gets sent to
|
|
|
|
|
* an OpenFlow controller properly, so that it looks correct
|
|
|
|
|
* for sFlow, and so that flow_extract() will get the correct
|
2013-12-16 08:14:52 -08:00
|
|
|
|
* vlan_tci if it is called on 'packet'. */
|
2013-08-02 12:43:03 -07:00
|
|
|
|
eth_push_vlan(packet, flow->vlan_tci);
|
|
|
|
|
}
|
|
|
|
|
/* We can't reproduce 'key' from 'flow'. */
|
|
|
|
|
fitness = fitness == ODP_FIT_PERFECT ? ODP_FIT_TOO_MUCH : fitness;
|
|
|
|
|
}
|
|
|
|
|
error = 0;
|
|
|
|
|
|
|
|
|
|
if (ofproto) {
|
|
|
|
|
*ofproto = xport->xbridge->ofproto;
|
|
|
|
|
}
|
|
|
|
|
|
2013-10-31 16:23:13 -07:00
|
|
|
|
if (ipfix) {
|
|
|
|
|
*ipfix = dpif_ipfix_ref(xport->xbridge->ipfix);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (sflow) {
|
|
|
|
|
*sflow = dpif_sflow_ref(xport->xbridge->sflow);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (netflow) {
|
|
|
|
|
*netflow = netflow_ref(xport->xbridge->netflow);
|
|
|
|
|
}
|
|
|
|
|
|
2013-08-02 12:43:03 -07:00
|
|
|
|
exit:
|
|
|
|
|
if (fitnessp) {
|
|
|
|
|
*fitnessp = fitness;
|
|
|
|
|
}
|
2013-07-21 11:31:32 -07:00
|
|
|
|
ovs_rwlock_unlock(&xlate_rwlock);
|
2013-08-02 12:43:03 -07:00
|
|
|
|
return error;
|
|
|
|
|
}
|
|
|
|
|
|
2013-06-13 18:38:24 -07:00
|
|
|
|
static struct xbridge *
|
|
|
|
|
xbridge_lookup(const struct ofproto_dpif *ofproto)
|
|
|
|
|
{
|
|
|
|
|
struct xbridge *xbridge;
|
|
|
|
|
|
2013-08-02 19:31:02 -07:00
|
|
|
|
if (!ofproto) {
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
2013-06-13 18:38:24 -07:00
|
|
|
|
HMAP_FOR_EACH_IN_BUCKET (xbridge, hmap_node, hash_pointer(ofproto, 0),
|
|
|
|
|
&xbridges) {
|
|
|
|
|
if (xbridge->ofproto == ofproto) {
|
|
|
|
|
return xbridge;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static struct xbundle *
|
|
|
|
|
xbundle_lookup(const struct ofbundle *ofbundle)
|
|
|
|
|
{
|
|
|
|
|
struct xbundle *xbundle;
|
|
|
|
|
|
2013-08-02 19:31:02 -07:00
|
|
|
|
if (!ofbundle) {
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
2013-06-13 18:38:24 -07:00
|
|
|
|
HMAP_FOR_EACH_IN_BUCKET (xbundle, hmap_node, hash_pointer(ofbundle, 0),
|
|
|
|
|
&xbundles) {
|
|
|
|
|
if (xbundle->ofbundle == ofbundle) {
|
|
|
|
|
return xbundle;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static struct xport *
|
2013-08-02 19:31:02 -07:00
|
|
|
|
xport_lookup(const struct ofport_dpif *ofport)
|
2013-06-13 18:38:24 -07:00
|
|
|
|
{
|
|
|
|
|
struct xport *xport;
|
|
|
|
|
|
2013-08-02 19:31:02 -07:00
|
|
|
|
if (!ofport) {
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
2013-06-13 18:38:24 -07:00
|
|
|
|
HMAP_FOR_EACH_IN_BUCKET (xport, hmap_node, hash_pointer(ofport, 0),
|
|
|
|
|
&xports) {
|
|
|
|
|
if (xport->ofport == ofport) {
|
|
|
|
|
return xport;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
2013-08-02 14:55:31 -07:00
|
|
|
|
static struct stp_port *
|
|
|
|
|
xport_get_stp_port(const struct xport *xport)
|
|
|
|
|
{
|
2013-09-04 15:21:15 -07:00
|
|
|
|
return xport->xbridge->stp && xport->stp_port_no != -1
|
2013-08-02 14:55:31 -07:00
|
|
|
|
? stp_get_port(xport->xbridge->stp, xport->stp_port_no)
|
|
|
|
|
: NULL;
|
|
|
|
|
}
|
2013-07-06 09:31:35 -07:00
|
|
|
|
|
|
|
|
|
static enum stp_state
|
|
|
|
|
xport_stp_learn_state(const struct xport *xport)
|
|
|
|
|
{
|
2013-08-02 14:55:31 -07:00
|
|
|
|
struct stp_port *sp = xport_get_stp_port(xport);
|
|
|
|
|
return stp_learn_in_state(sp ? stp_port_get_state(sp) : STP_DISABLED);
|
2013-07-06 09:31:35 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static bool
|
|
|
|
|
xport_stp_forward_state(const struct xport *xport)
|
|
|
|
|
{
|
2013-08-02 14:55:31 -07:00
|
|
|
|
struct stp_port *sp = xport_get_stp_port(xport);
|
|
|
|
|
return stp_forward_in_state(sp ? stp_port_get_state(sp) : STP_DISABLED);
|
2013-07-06 09:31:35 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Returns true if STP should process 'flow'. Sets fields in 'wc' that
|
|
|
|
|
* were used to make the determination.*/
|
|
|
|
|
static bool
|
|
|
|
|
stp_should_process_flow(const struct flow *flow, struct flow_wildcards *wc)
|
|
|
|
|
{
|
|
|
|
|
memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
|
|
|
|
|
return eth_addr_equals(flow->dl_dst, eth_addr_stp);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
stp_process_packet(const struct xport *xport, const struct ofpbuf *packet)
|
|
|
|
|
{
|
2013-08-02 14:55:31 -07:00
|
|
|
|
struct stp_port *sp = xport_get_stp_port(xport);
|
2013-07-06 09:31:35 -07:00
|
|
|
|
struct ofpbuf payload = *packet;
|
|
|
|
|
struct eth_header *eth = payload.data;
|
|
|
|
|
|
|
|
|
|
/* Sink packets on ports that have STP disabled when the bridge has
|
|
|
|
|
* STP enabled. */
|
|
|
|
|
if (!sp || stp_port_get_state(sp) == STP_DISABLED) {
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Trim off padding on payload. */
|
|
|
|
|
if (payload.size > ntohs(eth->eth_type) + ETH_HEADER_LEN) {
|
|
|
|
|
payload.size = ntohs(eth->eth_type) + ETH_HEADER_LEN;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (ofpbuf_try_pull(&payload, ETH_HEADER_LEN + LLC_HEADER_LEN)) {
|
|
|
|
|
stp_received_bpdu(sp, payload.data, payload.size);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2013-06-13 18:38:24 -07:00
|
|
|
|
static struct xport *
|
|
|
|
|
get_ofp_port(const struct xbridge *xbridge, ofp_port_t ofp_port)
|
|
|
|
|
{
|
|
|
|
|
struct xport *xport;
|
|
|
|
|
|
|
|
|
|
HMAP_FOR_EACH_IN_BUCKET (xport, ofp_node, hash_ofp_port(ofp_port),
|
|
|
|
|
&xbridge->xports) {
|
|
|
|
|
if (xport->ofp_port == ofp_port) {
|
|
|
|
|
return xport;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static odp_port_t
|
|
|
|
|
ofp_port_to_odp_port(const struct xbridge *xbridge, ofp_port_t ofp_port)
|
|
|
|
|
{
|
|
|
|
|
const struct xport *xport = get_ofp_port(xbridge, ofp_port);
|
|
|
|
|
return xport ? xport->odp_port : ODPP_NONE;
|
|
|
|
|
}
|
|
|
|
|
|
2013-10-30 18:17:18 +09:00
|
|
|
|
static bool
|
|
|
|
|
odp_port_is_alive(const struct xlate_ctx *ctx, ofp_port_t ofp_port)
|
|
|
|
|
{
|
|
|
|
|
struct xport *xport;
|
|
|
|
|
|
|
|
|
|
xport = get_ofp_port(ctx->xbridge, ofp_port);
|
|
|
|
|
if (!xport || xport->config & OFPUTIL_PC_PORT_DOWN ||
|
|
|
|
|
xport->state & OFPUTIL_PS_LINK_DOWN) {
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static const struct ofputil_bucket *
|
|
|
|
|
group_first_live_bucket(const struct xlate_ctx *, const struct group_dpif *,
|
|
|
|
|
int depth);
|
|
|
|
|
|
|
|
|
|
static bool
|
|
|
|
|
group_is_alive(const struct xlate_ctx *ctx, uint32_t group_id, int depth)
|
|
|
|
|
{
|
|
|
|
|
struct group_dpif *group;
|
|
|
|
|
bool hit;
|
|
|
|
|
|
|
|
|
|
hit = group_dpif_lookup(ctx->xbridge->ofproto, group_id, &group);
|
|
|
|
|
if (!hit) {
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
hit = group_first_live_bucket(ctx, group, depth) != NULL;
|
|
|
|
|
|
|
|
|
|
group_dpif_release(group);
|
|
|
|
|
return hit;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#define MAX_LIVENESS_RECURSION 128 /* Arbitrary limit */
|
|
|
|
|
|
|
|
|
|
static bool
|
|
|
|
|
bucket_is_alive(const struct xlate_ctx *ctx,
|
|
|
|
|
const struct ofputil_bucket *bucket, int depth)
|
|
|
|
|
{
|
|
|
|
|
if (depth >= MAX_LIVENESS_RECURSION) {
|
|
|
|
|
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
|
|
|
|
|
|
|
|
|
|
VLOG_WARN_RL(&rl, "bucket chaining exceeded %d links",
|
|
|
|
|
MAX_LIVENESS_RECURSION);
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
2013-10-30 18:17:19 +09:00
|
|
|
|
return !ofputil_bucket_has_liveness(bucket) ||
|
2013-11-12 18:18:01 -08:00
|
|
|
|
(bucket->watch_port != OFPP_ANY &&
|
|
|
|
|
odp_port_is_alive(ctx, bucket->watch_port)) ||
|
|
|
|
|
(bucket->watch_group != OFPG_ANY &&
|
|
|
|
|
group_is_alive(ctx, bucket->watch_group, depth + 1));
|
2013-10-30 18:17:18 +09:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static const struct ofputil_bucket *
|
|
|
|
|
group_first_live_bucket(const struct xlate_ctx *ctx,
|
|
|
|
|
const struct group_dpif *group, int depth)
|
|
|
|
|
{
|
|
|
|
|
struct ofputil_bucket *bucket;
|
|
|
|
|
const struct list *buckets;
|
|
|
|
|
|
|
|
|
|
group_dpif_get_buckets(group, &buckets);
|
|
|
|
|
LIST_FOR_EACH (bucket, list_node, buckets) {
|
|
|
|
|
if (bucket_is_alive(ctx, bucket, depth)) {
|
|
|
|
|
return bucket;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
2013-10-30 18:17:19 +09:00
|
|
|
|
static const struct ofputil_bucket *
|
|
|
|
|
group_best_live_bucket(const struct xlate_ctx *ctx,
|
|
|
|
|
const struct group_dpif *group,
|
|
|
|
|
uint32_t basis)
|
|
|
|
|
{
|
|
|
|
|
const struct ofputil_bucket *best_bucket = NULL;
|
|
|
|
|
uint32_t best_score = 0;
|
|
|
|
|
int i = 0;
|
|
|
|
|
|
|
|
|
|
const struct ofputil_bucket *bucket;
|
|
|
|
|
const struct list *buckets;
|
|
|
|
|
|
|
|
|
|
group_dpif_get_buckets(group, &buckets);
|
|
|
|
|
LIST_FOR_EACH (bucket, list_node, buckets) {
|
|
|
|
|
if (bucket_is_alive(ctx, bucket, 0)) {
|
2013-10-30 18:17:20 +09:00
|
|
|
|
uint32_t score = (hash_int(i, basis) & 0xffff) * bucket->weight;
|
2013-10-30 18:17:19 +09:00
|
|
|
|
if (score >= best_score) {
|
|
|
|
|
best_bucket = bucket;
|
|
|
|
|
best_score = score;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
i++;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return best_bucket;
|
|
|
|
|
}
|
|
|
|
|
|
2013-06-11 13:32:30 -07:00
|
|
|
|
static bool
|
2013-06-13 18:38:24 -07:00
|
|
|
|
xbundle_trunks_vlan(const struct xbundle *bundle, uint16_t vlan)
|
2013-06-11 13:32:30 -07:00
|
|
|
|
{
|
|
|
|
|
return (bundle->vlan_mode != PORT_VLAN_ACCESS
|
|
|
|
|
&& (!bundle->trunks || bitmap_is_set(bundle->trunks, vlan)));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static bool
|
2013-06-13 18:38:24 -07:00
|
|
|
|
xbundle_includes_vlan(const struct xbundle *xbundle, uint16_t vlan)
|
|
|
|
|
{
|
|
|
|
|
return vlan == xbundle->vlan || xbundle_trunks_vlan(xbundle, vlan);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static mirror_mask_t
|
|
|
|
|
xbundle_mirror_out(const struct xbridge *xbridge, struct xbundle *xbundle)
|
|
|
|
|
{
|
|
|
|
|
return xbundle != &ofpp_none_bundle
|
|
|
|
|
? mirror_bundle_out(xbridge->mbridge, xbundle->ofbundle)
|
|
|
|
|
: 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static mirror_mask_t
|
|
|
|
|
xbundle_mirror_src(const struct xbridge *xbridge, struct xbundle *xbundle)
|
2013-06-11 13:32:30 -07:00
|
|
|
|
{
|
2013-06-13 18:38:24 -07:00
|
|
|
|
return xbundle != &ofpp_none_bundle
|
|
|
|
|
? mirror_bundle_src(xbridge->mbridge, xbundle->ofbundle)
|
|
|
|
|
: 0;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
}
|
|
|
|
|
|
2013-06-13 18:38:24 -07:00
|
|
|
|
static mirror_mask_t
|
|
|
|
|
xbundle_mirror_dst(const struct xbridge *xbridge, struct xbundle *xbundle)
|
2013-06-11 13:32:30 -07:00
|
|
|
|
{
|
2013-06-13 18:38:24 -07:00
|
|
|
|
return xbundle != &ofpp_none_bundle
|
|
|
|
|
? mirror_bundle_dst(xbridge->mbridge, xbundle->ofbundle)
|
|
|
|
|
: 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static struct xbundle *
|
|
|
|
|
lookup_input_bundle(const struct xbridge *xbridge, ofp_port_t in_port,
|
|
|
|
|
bool warn, struct xport **in_xportp)
|
|
|
|
|
{
|
|
|
|
|
struct xport *xport;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
|
|
|
|
|
/* Find the port and bundle for the received packet. */
|
2013-06-13 18:38:24 -07:00
|
|
|
|
xport = get_ofp_port(xbridge, in_port);
|
|
|
|
|
if (in_xportp) {
|
|
|
|
|
*in_xportp = xport;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
}
|
2013-06-13 18:38:24 -07:00
|
|
|
|
if (xport && xport->xbundle) {
|
|
|
|
|
return xport->xbundle;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Special-case OFPP_NONE, which a controller may use as the ingress
|
|
|
|
|
* port for traffic that it is sourcing. */
|
|
|
|
|
if (in_port == OFPP_NONE) {
|
2013-07-19 10:04:47 -07:00
|
|
|
|
ofpp_none_bundle.name = "OFPP_NONE";
|
|
|
|
|
ofpp_none_bundle.vlan_mode = PORT_VLAN_TRUNK;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
return &ofpp_none_bundle;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Odd. A few possible reasons here:
|
|
|
|
|
*
|
|
|
|
|
* - We deleted a port but there are still a few packets queued up
|
|
|
|
|
* from it.
|
|
|
|
|
*
|
|
|
|
|
* - Someone externally added a port (e.g. "ovs-dpctl add-if") that
|
|
|
|
|
* we don't know about.
|
|
|
|
|
*
|
|
|
|
|
* - The ofproto client didn't configure the port as part of a bundle.
|
|
|
|
|
* This is particularly likely to happen if a packet was received on the
|
|
|
|
|
* port after it was created, but before the client had a chance to
|
|
|
|
|
* configure its bundle.
|
|
|
|
|
*/
|
|
|
|
|
if (warn) {
|
|
|
|
|
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
|
|
|
|
|
|
|
|
|
|
VLOG_WARN_RL(&rl, "bridge %s: received packet on unknown "
|
2013-06-13 18:38:24 -07:00
|
|
|
|
"port %"PRIu16, xbridge->name, in_port);
|
2013-06-11 13:32:30 -07:00
|
|
|
|
}
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
add_mirror_actions(struct xlate_ctx *ctx, const struct flow *orig_flow)
|
|
|
|
|
{
|
2013-06-13 18:38:24 -07:00
|
|
|
|
const struct xbridge *xbridge = ctx->xbridge;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
mirror_mask_t mirrors;
|
2013-06-13 18:38:24 -07:00
|
|
|
|
struct xbundle *in_xbundle;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
uint16_t vlan;
|
|
|
|
|
uint16_t vid;
|
2013-06-19 18:51:28 -07:00
|
|
|
|
|
|
|
|
|
mirrors = ctx->xout->mirrors;
|
|
|
|
|
ctx->xout->mirrors = 0;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
|
2013-06-13 18:38:24 -07:00
|
|
|
|
in_xbundle = lookup_input_bundle(xbridge, orig_flow->in_port.ofp_port,
|
|
|
|
|
ctx->xin->packet != NULL, NULL);
|
|
|
|
|
if (!in_xbundle) {
|
2013-06-11 13:32:30 -07:00
|
|
|
|
return;
|
|
|
|
|
}
|
2013-06-13 18:38:24 -07:00
|
|
|
|
mirrors |= xbundle_mirror_src(xbridge, in_xbundle);
|
2013-06-11 13:32:30 -07:00
|
|
|
|
|
|
|
|
|
/* Drop frames on bundles reserved for mirroring. */
|
2013-06-13 18:38:24 -07:00
|
|
|
|
if (xbundle_mirror_out(xbridge, in_xbundle)) {
|
2013-06-11 13:32:30 -07:00
|
|
|
|
if (ctx->xin->packet != NULL) {
|
|
|
|
|
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
|
|
|
|
|
VLOG_WARN_RL(&rl, "bridge %s: dropping packet received on port "
|
|
|
|
|
"%s, which is reserved exclusively for mirroring",
|
2013-06-13 18:38:24 -07:00
|
|
|
|
ctx->xbridge->name, in_xbundle->name);
|
2013-06-11 13:32:30 -07:00
|
|
|
|
}
|
2013-07-07 03:52:16 -07:00
|
|
|
|
ofpbuf_clear(&ctx->xout->odp_actions);
|
2013-06-11 13:32:30 -07:00
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Check VLAN. */
|
|
|
|
|
vid = vlan_tci_to_vid(orig_flow->vlan_tci);
|
2013-06-13 18:38:24 -07:00
|
|
|
|
if (!input_vid_is_valid(vid, in_xbundle, ctx->xin->packet != NULL)) {
|
2013-06-11 13:32:30 -07:00
|
|
|
|
return;
|
|
|
|
|
}
|
2013-06-13 18:38:24 -07:00
|
|
|
|
vlan = input_vid_to_vlan(in_xbundle, vid);
|
2013-06-11 13:32:30 -07:00
|
|
|
|
|
|
|
|
|
if (!mirrors) {
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Restore the original packet before adding the mirror actions. */
|
|
|
|
|
ctx->xin->flow = *orig_flow;
|
|
|
|
|
|
|
|
|
|
while (mirrors) {
|
2013-06-20 13:00:27 -07:00
|
|
|
|
mirror_mask_t dup_mirrors;
|
|
|
|
|
struct ofbundle *out;
|
|
|
|
|
unsigned long *vlans;
|
|
|
|
|
bool vlan_mirrored;
|
|
|
|
|
bool has_mirror;
|
|
|
|
|
int out_vlan;
|
|
|
|
|
|
2013-12-18 09:20:49 -08:00
|
|
|
|
has_mirror = mirror_get(xbridge->mbridge, raw_ctz(mirrors),
|
2013-06-20 13:00:27 -07:00
|
|
|
|
&vlans, &dup_mirrors, &out, &out_vlan);
|
|
|
|
|
ovs_assert(has_mirror);
|
|
|
|
|
|
|
|
|
|
if (vlans) {
|
2013-06-11 13:32:30 -07:00
|
|
|
|
ctx->xout->wc.masks.vlan_tci |= htons(VLAN_CFI | VLAN_VID_MASK);
|
|
|
|
|
}
|
2013-06-20 13:00:27 -07:00
|
|
|
|
vlan_mirrored = !vlans || bitmap_is_set(vlans, vlan);
|
|
|
|
|
free(vlans);
|
2013-06-11 13:32:30 -07:00
|
|
|
|
|
2013-06-20 13:00:27 -07:00
|
|
|
|
if (!vlan_mirrored) {
|
2013-06-11 13:32:30 -07:00
|
|
|
|
mirrors = zero_rightmost_1bit(mirrors);
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
2013-06-20 13:00:27 -07:00
|
|
|
|
mirrors &= ~dup_mirrors;
|
|
|
|
|
ctx->xout->mirrors |= dup_mirrors;
|
|
|
|
|
if (out) {
|
2013-06-13 18:38:24 -07:00
|
|
|
|
struct xbundle *out_xbundle = xbundle_lookup(out);
|
|
|
|
|
if (out_xbundle) {
|
|
|
|
|
output_normal(ctx, out_xbundle, vlan);
|
|
|
|
|
}
|
2013-06-20 13:00:27 -07:00
|
|
|
|
} else if (vlan != out_vlan
|
2013-06-11 13:32:30 -07:00
|
|
|
|
&& !eth_addr_is_reserved(orig_flow->dl_dst)) {
|
2013-06-13 18:38:24 -07:00
|
|
|
|
struct xbundle *xbundle;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
|
2013-06-13 18:38:24 -07:00
|
|
|
|
LIST_FOR_EACH (xbundle, list_node, &xbridge->xbundles) {
|
|
|
|
|
if (xbundle_includes_vlan(xbundle, out_vlan)
|
|
|
|
|
&& !xbundle_mirror_out(xbridge, xbundle)) {
|
|
|
|
|
output_normal(ctx, xbundle, out_vlan);
|
2013-06-11 13:32:30 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Given 'vid', the VID obtained from the 802.1Q header that was received as
|
2013-06-13 18:38:24 -07:00
|
|
|
|
* part of a packet (specify 0 if there was no 802.1Q header), and 'in_xbundle',
|
2013-06-11 13:32:30 -07:00
|
|
|
|
* the bundle on which the packet was received, returns the VLAN to which the
|
|
|
|
|
* packet belongs.
|
|
|
|
|
*
|
|
|
|
|
* Both 'vid' and the return value are in the range 0...4095. */
|
|
|
|
|
static uint16_t
|
2013-06-13 18:38:24 -07:00
|
|
|
|
input_vid_to_vlan(const struct xbundle *in_xbundle, uint16_t vid)
|
2013-06-11 13:32:30 -07:00
|
|
|
|
{
|
2013-06-13 18:38:24 -07:00
|
|
|
|
switch (in_xbundle->vlan_mode) {
|
2013-06-11 13:32:30 -07:00
|
|
|
|
case PORT_VLAN_ACCESS:
|
2013-06-13 18:38:24 -07:00
|
|
|
|
return in_xbundle->vlan;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case PORT_VLAN_TRUNK:
|
|
|
|
|
return vid;
|
|
|
|
|
|
|
|
|
|
case PORT_VLAN_NATIVE_UNTAGGED:
|
|
|
|
|
case PORT_VLAN_NATIVE_TAGGED:
|
2013-06-13 18:38:24 -07:00
|
|
|
|
return vid ? vid : in_xbundle->vlan;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
|
|
|
|
|
default:
|
2013-12-17 10:32:12 -08:00
|
|
|
|
OVS_NOT_REACHED();
|
2013-06-11 13:32:30 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2013-06-13 18:38:24 -07:00
|
|
|
|
/* Checks whether a packet with the given 'vid' may ingress on 'in_xbundle'.
|
2013-06-11 13:32:30 -07:00
|
|
|
|
* If so, returns true. Otherwise, returns false and, if 'warn' is true, logs
|
|
|
|
|
* a warning.
|
|
|
|
|
*
|
|
|
|
|
* 'vid' should be the VID obtained from the 802.1Q header that was received as
|
|
|
|
|
* part of a packet (specify 0 if there was no 802.1Q header), in the range
|
|
|
|
|
* 0...4095. */
|
|
|
|
|
static bool
|
2013-06-13 18:38:24 -07:00
|
|
|
|
input_vid_is_valid(uint16_t vid, struct xbundle *in_xbundle, bool warn)
|
2013-06-11 13:32:30 -07:00
|
|
|
|
{
|
|
|
|
|
/* Allow any VID on the OFPP_NONE port. */
|
2013-06-13 18:38:24 -07:00
|
|
|
|
if (in_xbundle == &ofpp_none_bundle) {
|
2013-06-11 13:32:30 -07:00
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
2013-06-13 18:38:24 -07:00
|
|
|
|
switch (in_xbundle->vlan_mode) {
|
2013-06-11 13:32:30 -07:00
|
|
|
|
case PORT_VLAN_ACCESS:
|
|
|
|
|
if (vid) {
|
|
|
|
|
if (warn) {
|
|
|
|
|
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
|
2013-06-13 18:38:24 -07:00
|
|
|
|
VLOG_WARN_RL(&rl, "dropping VLAN %"PRIu16" tagged "
|
2013-06-11 13:32:30 -07:00
|
|
|
|
"packet received on port %s configured as VLAN "
|
2013-06-13 18:38:24 -07:00
|
|
|
|
"%"PRIu16" access port", vid, in_xbundle->name,
|
|
|
|
|
in_xbundle->vlan);
|
2013-06-11 13:32:30 -07:00
|
|
|
|
}
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
return true;
|
|
|
|
|
|
|
|
|
|
case PORT_VLAN_NATIVE_UNTAGGED:
|
|
|
|
|
case PORT_VLAN_NATIVE_TAGGED:
|
|
|
|
|
if (!vid) {
|
|
|
|
|
/* Port must always carry its native VLAN. */
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
/* Fall through. */
|
|
|
|
|
case PORT_VLAN_TRUNK:
|
2013-06-13 18:38:24 -07:00
|
|
|
|
if (!xbundle_includes_vlan(in_xbundle, vid)) {
|
2013-06-11 13:32:30 -07:00
|
|
|
|
if (warn) {
|
|
|
|
|
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
|
2013-06-13 18:38:24 -07:00
|
|
|
|
VLOG_WARN_RL(&rl, "dropping VLAN %"PRIu16" packet "
|
2013-06-11 13:32:30 -07:00
|
|
|
|
"received on port %s not configured for trunking "
|
2013-06-13 18:38:24 -07:00
|
|
|
|
"VLAN %"PRIu16, vid, in_xbundle->name, vid);
|
2013-06-11 13:32:30 -07:00
|
|
|
|
}
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
return true;
|
|
|
|
|
|
|
|
|
|
default:
|
2013-12-17 10:32:12 -08:00
|
|
|
|
OVS_NOT_REACHED();
|
2013-06-11 13:32:30 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Given 'vlan', the VLAN that a packet belongs to, and
|
2013-06-13 18:38:24 -07:00
|
|
|
|
* 'out_xbundle', a bundle on which the packet is to be output, returns the VID
|
2013-06-11 13:32:30 -07:00
|
|
|
|
* that should be included in the 802.1Q header. (If the return value is 0,
|
|
|
|
|
* then the 802.1Q header should only be included in the packet if there is a
|
|
|
|
|
* nonzero PCP.)
|
|
|
|
|
*
|
|
|
|
|
* Both 'vlan' and the return value are in the range 0...4095. */
|
|
|
|
|
static uint16_t
|
2013-06-13 18:38:24 -07:00
|
|
|
|
output_vlan_to_vid(const struct xbundle *out_xbundle, uint16_t vlan)
|
2013-06-11 13:32:30 -07:00
|
|
|
|
{
|
2013-06-13 18:38:24 -07:00
|
|
|
|
switch (out_xbundle->vlan_mode) {
|
2013-06-11 13:32:30 -07:00
|
|
|
|
case PORT_VLAN_ACCESS:
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
case PORT_VLAN_TRUNK:
|
|
|
|
|
case PORT_VLAN_NATIVE_TAGGED:
|
|
|
|
|
return vlan;
|
|
|
|
|
|
|
|
|
|
case PORT_VLAN_NATIVE_UNTAGGED:
|
2013-06-13 18:38:24 -07:00
|
|
|
|
return vlan == out_xbundle->vlan ? 0 : vlan;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
|
|
|
|
|
default:
|
2013-12-17 10:32:12 -08:00
|
|
|
|
OVS_NOT_REACHED();
|
2013-06-11 13:32:30 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
2013-06-13 18:38:24 -07:00
|
|
|
|
output_normal(struct xlate_ctx *ctx, const struct xbundle *out_xbundle,
|
2013-06-11 13:32:30 -07:00
|
|
|
|
uint16_t vlan)
|
|
|
|
|
{
|
2013-06-12 14:37:18 -07:00
|
|
|
|
ovs_be16 *flow_tci = &ctx->xin->flow.vlan_tci;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
uint16_t vid;
|
|
|
|
|
ovs_be16 tci, old_tci;
|
2013-06-13 18:38:24 -07:00
|
|
|
|
struct xport *xport;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
|
2013-06-13 18:38:24 -07:00
|
|
|
|
vid = output_vlan_to_vid(out_xbundle, vlan);
|
|
|
|
|
if (list_is_empty(&out_xbundle->xports)) {
|
|
|
|
|
/* Partially configured bundle with no slaves. Drop the packet. */
|
|
|
|
|
return;
|
|
|
|
|
} else if (!out_xbundle->bond) {
|
|
|
|
|
xport = CONTAINER_OF(list_front(&out_xbundle->xports), struct xport,
|
|
|
|
|
bundle_node);
|
2013-06-11 13:32:30 -07:00
|
|
|
|
} else {
|
2013-06-13 18:38:24 -07:00
|
|
|
|
struct ofport_dpif *ofport;
|
|
|
|
|
|
|
|
|
|
ofport = bond_choose_output_slave(out_xbundle->bond, &ctx->xin->flow,
|
2013-08-01 18:23:13 -07:00
|
|
|
|
&ctx->xout->wc, vid);
|
2013-08-02 19:31:02 -07:00
|
|
|
|
xport = xport_lookup(ofport);
|
2013-06-13 18:38:24 -07:00
|
|
|
|
|
|
|
|
|
if (!xport) {
|
2013-06-11 13:32:30 -07:00
|
|
|
|
/* No slaves enabled, so drop packet. */
|
|
|
|
|
return;
|
|
|
|
|
}
|
2013-11-12 18:18:01 -08:00
|
|
|
|
|
|
|
|
|
if (ctx->xin->resubmit_stats) {
|
|
|
|
|
bond_account(out_xbundle->bond, &ctx->xin->flow, vid,
|
|
|
|
|
ctx->xin->resubmit_stats->n_bytes);
|
|
|
|
|
}
|
2013-06-11 13:32:30 -07:00
|
|
|
|
}
|
|
|
|
|
|
2013-06-12 14:37:18 -07:00
|
|
|
|
old_tci = *flow_tci;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
tci = htons(vid);
|
2013-06-13 18:38:24 -07:00
|
|
|
|
if (tci || out_xbundle->use_priority_tags) {
|
2013-06-12 14:37:18 -07:00
|
|
|
|
tci |= *flow_tci & htons(VLAN_PCP_MASK);
|
2013-06-11 13:32:30 -07:00
|
|
|
|
if (tci) {
|
|
|
|
|
tci |= htons(VLAN_CFI);
|
|
|
|
|
}
|
|
|
|
|
}
|
2013-06-12 14:37:18 -07:00
|
|
|
|
*flow_tci = tci;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
|
2013-06-13 18:38:24 -07:00
|
|
|
|
compose_output_action(ctx, xport->ofp_port);
|
2013-06-12 14:37:18 -07:00
|
|
|
|
*flow_tci = old_tci;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* A VM broadcasts a gratuitous ARP to indicate that it has resumed after
|
|
|
|
|
* migration. Older Citrix-patched Linux DomU used gratuitous ARP replies to
|
|
|
|
|
* indicate this; newer upstream kernels use gratuitous ARP requests. */
|
|
|
|
|
static bool
|
|
|
|
|
is_gratuitous_arp(const struct flow *flow, struct flow_wildcards *wc)
|
|
|
|
|
{
|
|
|
|
|
if (flow->dl_type != htons(ETH_TYPE_ARP)) {
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
|
|
|
|
|
if (!eth_addr_is_broadcast(flow->dl_dst)) {
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
|
|
|
|
|
if (flow->nw_proto == ARP_OP_REPLY) {
|
|
|
|
|
return true;
|
|
|
|
|
} else if (flow->nw_proto == ARP_OP_REQUEST) {
|
|
|
|
|
memset(&wc->masks.nw_src, 0xff, sizeof wc->masks.nw_src);
|
|
|
|
|
memset(&wc->masks.nw_dst, 0xff, sizeof wc->masks.nw_dst);
|
|
|
|
|
|
|
|
|
|
return flow->nw_src == flow->nw_dst;
|
|
|
|
|
} else {
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2013-08-06 20:35:29 -07:00
|
|
|
|
/* Checks whether a MAC learning update is necessary for MAC learning table
|
|
|
|
|
* 'ml' given that a packet matching 'flow' was received on 'in_xbundle' in
|
|
|
|
|
* 'vlan'.
|
|
|
|
|
*
|
|
|
|
|
* Most packets processed through the MAC learning table do not actually
|
|
|
|
|
* change it in any way. This function requires only a read lock on the MAC
|
|
|
|
|
* learning table, so it is much cheaper in this common case.
|
|
|
|
|
*
|
|
|
|
|
* Keep the code here synchronized with that in update_learning_table__()
|
|
|
|
|
* below. */
|
|
|
|
|
static bool
|
|
|
|
|
is_mac_learning_update_needed(const struct mac_learning *ml,
|
|
|
|
|
const struct flow *flow,
|
|
|
|
|
struct flow_wildcards *wc,
|
|
|
|
|
int vlan, struct xbundle *in_xbundle)
|
2013-11-12 18:18:01 -08:00
|
|
|
|
OVS_REQ_RDLOCK(ml->rwlock)
|
2013-06-11 13:32:30 -07:00
|
|
|
|
{
|
|
|
|
|
struct mac_entry *mac;
|
|
|
|
|
|
2013-08-06 20:35:29 -07:00
|
|
|
|
if (!mac_learning_may_learn(ml, flow->dl_src, vlan)) {
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
mac = mac_learning_lookup(ml, flow->dl_src, vlan);
|
|
|
|
|
if (!mac || mac_entry_age(ml, mac)) {
|
|
|
|
|
return true;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
}
|
|
|
|
|
|
2013-08-06 20:35:29 -07:00
|
|
|
|
if (is_gratuitous_arp(flow, wc)) {
|
|
|
|
|
/* We don't want to learn from gratuitous ARP packets that are
|
|
|
|
|
* reflected back over bond slaves so we lock the learning table. */
|
|
|
|
|
if (!in_xbundle->bond) {
|
|
|
|
|
return true;
|
|
|
|
|
} else if (mac_entry_is_grat_arp_locked(mac)) {
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return mac->port.p != in_xbundle->ofbundle;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* Updates MAC learning table 'ml' given that a packet matching 'flow' was
|
|
|
|
|
* received on 'in_xbundle' in 'vlan'.
|
|
|
|
|
*
|
|
|
|
|
* This code repeats all the checks in is_mac_learning_update_needed() because
|
|
|
|
|
* the lock was released between there and here and thus the MAC learning state
|
|
|
|
|
* could have changed.
|
|
|
|
|
*
|
|
|
|
|
* Keep the code here synchronized with that in is_mac_learning_update_needed()
|
|
|
|
|
* above. */
|
|
|
|
|
static void
|
|
|
|
|
update_learning_table__(const struct xbridge *xbridge,
|
|
|
|
|
const struct flow *flow, struct flow_wildcards *wc,
|
|
|
|
|
int vlan, struct xbundle *in_xbundle)
|
2013-11-12 18:18:01 -08:00
|
|
|
|
OVS_REQ_WRLOCK(xbridge->ml->rwlock)
|
2013-08-06 20:35:29 -07:00
|
|
|
|
{
|
|
|
|
|
struct mac_entry *mac;
|
|
|
|
|
|
2013-06-13 18:38:24 -07:00
|
|
|
|
if (!mac_learning_may_learn(xbridge->ml, flow->dl_src, vlan)) {
|
2013-08-06 20:35:29 -07:00
|
|
|
|
return;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
}
|
|
|
|
|
|
2013-06-13 18:38:24 -07:00
|
|
|
|
mac = mac_learning_insert(xbridge->ml, flow->dl_src, vlan);
|
2013-06-11 13:32:30 -07:00
|
|
|
|
if (is_gratuitous_arp(flow, wc)) {
|
|
|
|
|
/* We don't want to learn from gratuitous ARP packets that are
|
|
|
|
|
* reflected back over bond slaves so we lock the learning table. */
|
2013-06-13 18:38:24 -07:00
|
|
|
|
if (!in_xbundle->bond) {
|
2013-06-11 13:32:30 -07:00
|
|
|
|
mac_entry_set_grat_arp_lock(mac);
|
|
|
|
|
} else if (mac_entry_is_grat_arp_locked(mac)) {
|
2013-08-06 20:35:29 -07:00
|
|
|
|
return;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2013-08-01 18:04:07 -07:00
|
|
|
|
if (mac->port.p != in_xbundle->ofbundle) {
|
2013-06-11 13:32:30 -07:00
|
|
|
|
/* The log messages here could actually be useful in debugging,
|
|
|
|
|
* so keep the rate limit relatively high. */
|
|
|
|
|
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(30, 300);
|
2013-08-06 20:35:29 -07:00
|
|
|
|
|
2013-06-11 13:32:30 -07:00
|
|
|
|
VLOG_DBG_RL(&rl, "bridge %s: learned that "ETH_ADDR_FMT" is "
|
|
|
|
|
"on port %s in VLAN %d",
|
2013-06-13 18:38:24 -07:00
|
|
|
|
xbridge->name, ETH_ADDR_ARGS(flow->dl_src),
|
|
|
|
|
in_xbundle->name, vlan);
|
2013-06-11 13:32:30 -07:00
|
|
|
|
|
2013-06-13 18:38:24 -07:00
|
|
|
|
mac->port.p = in_xbundle->ofbundle;
|
2013-08-01 18:04:07 -07:00
|
|
|
|
mac_learning_changed(xbridge->ml);
|
2013-06-11 13:32:30 -07:00
|
|
|
|
}
|
2013-08-06 20:35:29 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
update_learning_table(const struct xbridge *xbridge,
|
|
|
|
|
const struct flow *flow, struct flow_wildcards *wc,
|
|
|
|
|
int vlan, struct xbundle *in_xbundle)
|
|
|
|
|
{
|
|
|
|
|
bool need_update;
|
|
|
|
|
|
|
|
|
|
/* Don't learn the OFPP_NONE port. */
|
|
|
|
|
if (in_xbundle == &ofpp_none_bundle) {
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* First try the common case: no change to MAC learning table. */
|
|
|
|
|
ovs_rwlock_rdlock(&xbridge->ml->rwlock);
|
|
|
|
|
need_update = is_mac_learning_update_needed(xbridge->ml, flow, wc, vlan,
|
|
|
|
|
in_xbundle);
|
2013-07-22 11:11:54 -07:00
|
|
|
|
ovs_rwlock_unlock(&xbridge->ml->rwlock);
|
2013-08-06 20:35:29 -07:00
|
|
|
|
|
|
|
|
|
if (need_update) {
|
|
|
|
|
/* Slow path: MAC learning table might need an update. */
|
|
|
|
|
ovs_rwlock_wrlock(&xbridge->ml->rwlock);
|
|
|
|
|
update_learning_table__(xbridge, flow, wc, vlan, in_xbundle);
|
|
|
|
|
ovs_rwlock_unlock(&xbridge->ml->rwlock);
|
|
|
|
|
}
|
2013-06-11 13:32:30 -07:00
|
|
|
|
}
|
|
|
|
|
|
2013-06-13 18:38:24 -07:00
|
|
|
|
/* Determines whether packets in 'flow' within 'xbridge' should be forwarded or
|
2013-06-11 13:32:30 -07:00
|
|
|
|
* dropped. Returns true if they may be forwarded, false if they should be
|
|
|
|
|
* dropped.
|
|
|
|
|
*
|
2013-06-13 18:38:24 -07:00
|
|
|
|
* 'in_port' must be the xport that corresponds to flow->in_port.
|
2013-06-11 13:32:30 -07:00
|
|
|
|
* 'in_port' must be part of a bundle (e.g. in_port->bundle must be nonnull).
|
|
|
|
|
*
|
|
|
|
|
* 'vlan' must be the VLAN that corresponds to flow->vlan_tci on 'in_port', as
|
|
|
|
|
* returned by input_vid_to_vlan(). It must be a valid VLAN for 'in_port', as
|
|
|
|
|
* checked by input_vid_is_valid().
|
|
|
|
|
*
|
|
|
|
|
* May also add tags to '*tags', although the current implementation only does
|
|
|
|
|
* so in one special case.
|
|
|
|
|
*/
|
|
|
|
|
static bool
|
2013-06-13 18:38:24 -07:00
|
|
|
|
is_admissible(struct xlate_ctx *ctx, struct xport *in_port,
|
2013-06-11 13:32:30 -07:00
|
|
|
|
uint16_t vlan)
|
|
|
|
|
{
|
2013-06-13 18:38:24 -07:00
|
|
|
|
struct xbundle *in_xbundle = in_port->xbundle;
|
|
|
|
|
const struct xbridge *xbridge = ctx->xbridge;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
struct flow *flow = &ctx->xin->flow;
|
|
|
|
|
|
|
|
|
|
/* Drop frames for reserved multicast addresses
|
|
|
|
|
* only if forward_bpdu option is absent. */
|
2013-06-13 18:38:24 -07:00
|
|
|
|
if (!xbridge->forward_bpdu && eth_addr_is_reserved(flow->dl_dst)) {
|
2013-06-11 13:32:30 -07:00
|
|
|
|
xlate_report(ctx, "packet has reserved destination MAC, dropping");
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
2013-06-13 18:38:24 -07:00
|
|
|
|
if (in_xbundle->bond) {
|
2013-06-11 13:32:30 -07:00
|
|
|
|
struct mac_entry *mac;
|
|
|
|
|
|
2013-06-13 18:38:24 -07:00
|
|
|
|
switch (bond_check_admissibility(in_xbundle->bond, in_port->ofport,
|
2013-08-01 18:23:13 -07:00
|
|
|
|
flow->dl_dst)) {
|
2013-06-11 13:32:30 -07:00
|
|
|
|
case BV_ACCEPT:
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case BV_DROP:
|
|
|
|
|
xlate_report(ctx, "bonding refused admissibility, dropping");
|
|
|
|
|
return false;
|
|
|
|
|
|
|
|
|
|
case BV_DROP_IF_MOVED:
|
2013-07-22 11:11:54 -07:00
|
|
|
|
ovs_rwlock_rdlock(&xbridge->ml->rwlock);
|
2013-08-01 18:04:07 -07:00
|
|
|
|
mac = mac_learning_lookup(xbridge->ml, flow->dl_src, vlan);
|
2013-06-13 18:38:24 -07:00
|
|
|
|
if (mac && mac->port.p != in_xbundle->ofbundle &&
|
2013-06-11 13:32:30 -07:00
|
|
|
|
(!is_gratuitous_arp(flow, &ctx->xout->wc)
|
|
|
|
|
|| mac_entry_is_grat_arp_locked(mac))) {
|
2013-07-22 11:11:54 -07:00
|
|
|
|
ovs_rwlock_unlock(&xbridge->ml->rwlock);
|
2013-06-11 13:32:30 -07:00
|
|
|
|
xlate_report(ctx, "SLB bond thinks this packet looped back, "
|
2013-11-12 18:18:01 -08:00
|
|
|
|
"dropping");
|
2013-06-11 13:32:30 -07:00
|
|
|
|
return false;
|
|
|
|
|
}
|
2013-07-22 11:11:54 -07:00
|
|
|
|
ovs_rwlock_unlock(&xbridge->ml->rwlock);
|
2013-06-11 13:32:30 -07:00
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
xlate_normal(struct xlate_ctx *ctx)
|
|
|
|
|
{
|
2013-06-12 14:37:18 -07:00
|
|
|
|
struct flow_wildcards *wc = &ctx->xout->wc;
|
|
|
|
|
struct flow *flow = &ctx->xin->flow;
|
2013-06-13 18:38:24 -07:00
|
|
|
|
struct xbundle *in_xbundle;
|
|
|
|
|
struct xport *in_port;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
struct mac_entry *mac;
|
2013-09-03 17:34:00 -07:00
|
|
|
|
void *mac_port;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
uint16_t vlan;
|
|
|
|
|
uint16_t vid;
|
|
|
|
|
|
|
|
|
|
ctx->xout->has_normal = true;
|
|
|
|
|
|
2013-06-12 14:37:18 -07:00
|
|
|
|
memset(&wc->masks.dl_src, 0xff, sizeof wc->masks.dl_src);
|
|
|
|
|
memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
|
2013-06-18 23:55:47 -07:00
|
|
|
|
wc->masks.vlan_tci |= htons(VLAN_VID_MASK | VLAN_CFI);
|
2013-06-11 13:32:30 -07:00
|
|
|
|
|
2013-06-13 18:38:24 -07:00
|
|
|
|
in_xbundle = lookup_input_bundle(ctx->xbridge, flow->in_port.ofp_port,
|
|
|
|
|
ctx->xin->packet != NULL, &in_port);
|
|
|
|
|
if (!in_xbundle) {
|
2013-06-11 13:32:30 -07:00
|
|
|
|
xlate_report(ctx, "no input bundle, dropping");
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Drop malformed frames. */
|
2013-06-12 14:37:18 -07:00
|
|
|
|
if (flow->dl_type == htons(ETH_TYPE_VLAN) &&
|
|
|
|
|
!(flow->vlan_tci & htons(VLAN_CFI))) {
|
2013-06-11 13:32:30 -07:00
|
|
|
|
if (ctx->xin->packet != NULL) {
|
|
|
|
|
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
|
|
|
|
|
VLOG_WARN_RL(&rl, "bridge %s: dropping packet with partial "
|
|
|
|
|
"VLAN tag received on port %s",
|
2013-06-13 18:38:24 -07:00
|
|
|
|
ctx->xbridge->name, in_xbundle->name);
|
2013-06-11 13:32:30 -07:00
|
|
|
|
}
|
|
|
|
|
xlate_report(ctx, "partial VLAN tag, dropping");
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Drop frames on bundles reserved for mirroring. */
|
2013-06-13 18:38:24 -07:00
|
|
|
|
if (xbundle_mirror_out(ctx->xbridge, in_xbundle)) {
|
2013-06-11 13:32:30 -07:00
|
|
|
|
if (ctx->xin->packet != NULL) {
|
|
|
|
|
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
|
|
|
|
|
VLOG_WARN_RL(&rl, "bridge %s: dropping packet received on port "
|
|
|
|
|
"%s, which is reserved exclusively for mirroring",
|
2013-06-13 18:38:24 -07:00
|
|
|
|
ctx->xbridge->name, in_xbundle->name);
|
2013-06-11 13:32:30 -07:00
|
|
|
|
}
|
|
|
|
|
xlate_report(ctx, "input port is mirror output port, dropping");
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Check VLAN. */
|
2013-06-12 14:37:18 -07:00
|
|
|
|
vid = vlan_tci_to_vid(flow->vlan_tci);
|
2013-06-13 18:38:24 -07:00
|
|
|
|
if (!input_vid_is_valid(vid, in_xbundle, ctx->xin->packet != NULL)) {
|
2013-06-11 13:32:30 -07:00
|
|
|
|
xlate_report(ctx, "disallowed VLAN VID for this input port, dropping");
|
|
|
|
|
return;
|
|
|
|
|
}
|
2013-06-13 18:38:24 -07:00
|
|
|
|
vlan = input_vid_to_vlan(in_xbundle, vid);
|
2013-06-11 13:32:30 -07:00
|
|
|
|
|
|
|
|
|
/* Check other admissibility requirements. */
|
|
|
|
|
if (in_port && !is_admissible(ctx, in_port, vlan)) {
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Learn source MAC. */
|
|
|
|
|
if (ctx->xin->may_learn) {
|
2013-06-13 18:38:24 -07:00
|
|
|
|
update_learning_table(ctx->xbridge, flow, wc, vlan, in_xbundle);
|
2013-06-11 13:32:30 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Determine output bundle. */
|
2013-07-22 11:11:54 -07:00
|
|
|
|
ovs_rwlock_rdlock(&ctx->xbridge->ml->rwlock);
|
2013-08-01 18:04:07 -07:00
|
|
|
|
mac = mac_learning_lookup(ctx->xbridge->ml, flow->dl_dst, vlan);
|
2013-09-03 17:34:00 -07:00
|
|
|
|
mac_port = mac ? mac->port.p : NULL;
|
|
|
|
|
ovs_rwlock_unlock(&ctx->xbridge->ml->rwlock);
|
|
|
|
|
|
|
|
|
|
if (mac_port) {
|
|
|
|
|
struct xbundle *mac_xbundle = xbundle_lookup(mac_port);
|
2013-06-13 18:38:24 -07:00
|
|
|
|
if (mac_xbundle && mac_xbundle != in_xbundle) {
|
2013-06-11 13:32:30 -07:00
|
|
|
|
xlate_report(ctx, "forwarding to learned port");
|
2013-06-13 18:38:24 -07:00
|
|
|
|
output_normal(ctx, mac_xbundle, vlan);
|
|
|
|
|
} else if (!mac_xbundle) {
|
|
|
|
|
xlate_report(ctx, "learned port is unknown, dropping");
|
2013-06-11 13:32:30 -07:00
|
|
|
|
} else {
|
|
|
|
|
xlate_report(ctx, "learned port is input port, dropping");
|
|
|
|
|
}
|
|
|
|
|
} else {
|
2013-06-13 18:38:24 -07:00
|
|
|
|
struct xbundle *xbundle;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
|
|
|
|
|
xlate_report(ctx, "no learned MAC for destination, flooding");
|
2013-06-13 18:38:24 -07:00
|
|
|
|
LIST_FOR_EACH (xbundle, list_node, &ctx->xbridge->xbundles) {
|
|
|
|
|
if (xbundle != in_xbundle
|
|
|
|
|
&& xbundle_includes_vlan(xbundle, vlan)
|
|
|
|
|
&& xbundle->floodable
|
|
|
|
|
&& !xbundle_mirror_out(ctx->xbridge, xbundle)) {
|
|
|
|
|
output_normal(ctx, xbundle, vlan);
|
2013-06-11 13:32:30 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
ctx->xout->nf_output_iface = NF_OUT_FLOOD;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Compose SAMPLE action for sFlow or IPFIX. The given probability is
|
|
|
|
|
* the number of packets out of UINT32_MAX to sample. The given
|
|
|
|
|
* cookie is passed back in the callback for each sampled packet.
|
|
|
|
|
*/
|
|
|
|
|
static size_t
|
2013-06-13 18:38:24 -07:00
|
|
|
|
compose_sample_action(const struct xbridge *xbridge,
|
2013-06-11 13:32:30 -07:00
|
|
|
|
struct ofpbuf *odp_actions,
|
|
|
|
|
const struct flow *flow,
|
|
|
|
|
const uint32_t probability,
|
|
|
|
|
const union user_action_cookie *cookie,
|
|
|
|
|
const size_t cookie_size)
|
|
|
|
|
{
|
|
|
|
|
size_t sample_offset, actions_offset;
|
2013-07-06 11:46:48 -07:00
|
|
|
|
odp_port_t odp_port;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
int cookie_offset;
|
2013-07-06 11:46:48 -07:00
|
|
|
|
uint32_t pid;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
|
|
|
|
|
sample_offset = nl_msg_start_nested(odp_actions, OVS_ACTION_ATTR_SAMPLE);
|
|
|
|
|
|
|
|
|
|
nl_msg_put_u32(odp_actions, OVS_SAMPLE_ATTR_PROBABILITY, probability);
|
|
|
|
|
|
|
|
|
|
actions_offset = nl_msg_start_nested(odp_actions, OVS_SAMPLE_ATTR_ACTIONS);
|
2013-07-06 11:46:48 -07:00
|
|
|
|
|
|
|
|
|
odp_port = ofp_port_to_odp_port(xbridge, flow->in_port.ofp_port);
|
|
|
|
|
pid = dpif_port_get_pid(xbridge->dpif, odp_port);
|
|
|
|
|
cookie_offset = odp_put_userspace_action(pid, cookie, cookie_size, odp_actions);
|
2013-06-11 13:32:30 -07:00
|
|
|
|
|
|
|
|
|
nl_msg_end_nested(odp_actions, actions_offset);
|
|
|
|
|
nl_msg_end_nested(odp_actions, sample_offset);
|
|
|
|
|
return cookie_offset;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
2013-06-13 18:38:24 -07:00
|
|
|
|
compose_sflow_cookie(const struct xbridge *xbridge, ovs_be16 vlan_tci,
|
|
|
|
|
odp_port_t odp_port, unsigned int n_outputs,
|
|
|
|
|
union user_action_cookie *cookie)
|
2013-06-11 13:32:30 -07:00
|
|
|
|
{
|
|
|
|
|
int ifindex;
|
|
|
|
|
|
|
|
|
|
cookie->type = USER_ACTION_COOKIE_SFLOW;
|
|
|
|
|
cookie->sflow.vlan_tci = vlan_tci;
|
|
|
|
|
|
|
|
|
|
/* See http://www.sflow.org/sflow_version_5.txt (search for "Input/output
|
|
|
|
|
* port information") for the interpretation of cookie->output. */
|
|
|
|
|
switch (n_outputs) {
|
|
|
|
|
case 0:
|
|
|
|
|
/* 0x40000000 | 256 means "packet dropped for unknown reason". */
|
|
|
|
|
cookie->sflow.output = 0x40000000 | 256;
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case 1:
|
2013-06-13 18:38:24 -07:00
|
|
|
|
ifindex = dpif_sflow_odp_port_to_ifindex(xbridge->sflow, odp_port);
|
2013-06-11 13:32:30 -07:00
|
|
|
|
if (ifindex) {
|
|
|
|
|
cookie->sflow.output = ifindex;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
/* Fall through. */
|
|
|
|
|
default:
|
|
|
|
|
/* 0x80000000 means "multiple output ports. */
|
|
|
|
|
cookie->sflow.output = 0x80000000 | n_outputs;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Compose SAMPLE action for sFlow bridge sampling. */
|
|
|
|
|
static size_t
|
2013-06-13 18:38:24 -07:00
|
|
|
|
compose_sflow_action(const struct xbridge *xbridge,
|
2013-06-11 13:32:30 -07:00
|
|
|
|
struct ofpbuf *odp_actions,
|
|
|
|
|
const struct flow *flow,
|
2013-06-19 16:58:44 -07:00
|
|
|
|
odp_port_t odp_port)
|
2013-06-11 13:32:30 -07:00
|
|
|
|
{
|
|
|
|
|
uint32_t probability;
|
|
|
|
|
union user_action_cookie cookie;
|
|
|
|
|
|
2013-06-13 18:38:24 -07:00
|
|
|
|
if (!xbridge->sflow || flow->in_port.ofp_port == OFPP_NONE) {
|
2013-06-11 13:32:30 -07:00
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2013-06-13 18:38:24 -07:00
|
|
|
|
probability = dpif_sflow_get_probability(xbridge->sflow);
|
|
|
|
|
compose_sflow_cookie(xbridge, htons(0), odp_port,
|
2013-06-19 16:58:44 -07:00
|
|
|
|
odp_port == ODPP_NONE ? 0 : 1, &cookie);
|
2013-06-11 13:32:30 -07:00
|
|
|
|
|
2013-06-13 18:38:24 -07:00
|
|
|
|
return compose_sample_action(xbridge, odp_actions, flow, probability,
|
2013-06-11 13:32:30 -07:00
|
|
|
|
&cookie, sizeof cookie.sflow);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
compose_flow_sample_cookie(uint16_t probability, uint32_t collector_set_id,
|
|
|
|
|
uint32_t obs_domain_id, uint32_t obs_point_id,
|
|
|
|
|
union user_action_cookie *cookie)
|
|
|
|
|
{
|
|
|
|
|
cookie->type = USER_ACTION_COOKIE_FLOW_SAMPLE;
|
|
|
|
|
cookie->flow_sample.probability = probability;
|
|
|
|
|
cookie->flow_sample.collector_set_id = collector_set_id;
|
|
|
|
|
cookie->flow_sample.obs_domain_id = obs_domain_id;
|
|
|
|
|
cookie->flow_sample.obs_point_id = obs_point_id;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
compose_ipfix_cookie(union user_action_cookie *cookie)
|
|
|
|
|
{
|
|
|
|
|
cookie->type = USER_ACTION_COOKIE_IPFIX;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Compose SAMPLE action for IPFIX bridge sampling. */
|
|
|
|
|
static void
|
2013-06-13 18:38:24 -07:00
|
|
|
|
compose_ipfix_action(const struct xbridge *xbridge,
|
2013-06-11 13:32:30 -07:00
|
|
|
|
struct ofpbuf *odp_actions,
|
|
|
|
|
const struct flow *flow)
|
|
|
|
|
{
|
|
|
|
|
uint32_t probability;
|
|
|
|
|
union user_action_cookie cookie;
|
|
|
|
|
|
2013-06-13 18:38:24 -07:00
|
|
|
|
if (!xbridge->ipfix || flow->in_port.ofp_port == OFPP_NONE) {
|
2013-06-11 13:32:30 -07:00
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2013-06-13 18:38:24 -07:00
|
|
|
|
probability = dpif_ipfix_get_bridge_exporter_probability(xbridge->ipfix);
|
2013-06-11 13:32:30 -07:00
|
|
|
|
compose_ipfix_cookie(&cookie);
|
|
|
|
|
|
2013-06-13 18:38:24 -07:00
|
|
|
|
compose_sample_action(xbridge, odp_actions, flow, probability,
|
2013-06-11 13:32:30 -07:00
|
|
|
|
&cookie, sizeof cookie.ipfix);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* SAMPLE action for sFlow must be first action in any given list of
|
|
|
|
|
* actions. At this point we do not have all information required to
|
|
|
|
|
* build it. So try to build sample action as complete as possible. */
|
|
|
|
|
static void
|
|
|
|
|
add_sflow_action(struct xlate_ctx *ctx)
|
|
|
|
|
{
|
2013-06-13 18:38:24 -07:00
|
|
|
|
ctx->user_cookie_offset = compose_sflow_action(ctx->xbridge,
|
2013-06-11 13:32:30 -07:00
|
|
|
|
&ctx->xout->odp_actions,
|
2013-06-19 16:58:44 -07:00
|
|
|
|
&ctx->xin->flow, ODPP_NONE);
|
2013-06-11 13:32:30 -07:00
|
|
|
|
ctx->sflow_odp_port = 0;
|
|
|
|
|
ctx->sflow_n_outputs = 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* SAMPLE action for IPFIX must be 1st or 2nd action in any given list
|
|
|
|
|
* of actions, eventually after the SAMPLE action for sFlow. */
|
|
|
|
|
static void
|
|
|
|
|
add_ipfix_action(struct xlate_ctx *ctx)
|
|
|
|
|
{
|
2013-06-13 18:38:24 -07:00
|
|
|
|
compose_ipfix_action(ctx->xbridge, &ctx->xout->odp_actions,
|
2013-06-11 13:32:30 -07:00
|
|
|
|
&ctx->xin->flow);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Fix SAMPLE action according to data collected while composing ODP actions.
|
|
|
|
|
* We need to fix SAMPLE actions OVS_SAMPLE_ATTR_ACTIONS attribute, i.e. nested
|
|
|
|
|
* USERSPACE action's user-cookie which is required for sflow. */
|
|
|
|
|
static void
|
|
|
|
|
fix_sflow_action(struct xlate_ctx *ctx)
|
|
|
|
|
{
|
|
|
|
|
const struct flow *base = &ctx->base_flow;
|
|
|
|
|
union user_action_cookie *cookie;
|
|
|
|
|
|
|
|
|
|
if (!ctx->user_cookie_offset) {
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
cookie = ofpbuf_at(&ctx->xout->odp_actions, ctx->user_cookie_offset,
|
|
|
|
|
sizeof cookie->sflow);
|
|
|
|
|
ovs_assert(cookie->type == USER_ACTION_COOKIE_SFLOW);
|
|
|
|
|
|
2013-06-13 18:38:24 -07:00
|
|
|
|
compose_sflow_cookie(ctx->xbridge, base->vlan_tci,
|
2013-06-11 13:32:30 -07:00
|
|
|
|
ctx->sflow_odp_port, ctx->sflow_n_outputs, cookie);
|
|
|
|
|
}
|
|
|
|
|
|
2013-06-17 17:56:54 -07:00
|
|
|
|
static enum slow_path_reason
|
2013-06-17 18:07:33 -07:00
|
|
|
|
process_special(struct xlate_ctx *ctx, const struct flow *flow,
|
2013-06-13 18:38:24 -07:00
|
|
|
|
const struct xport *xport, const struct ofpbuf *packet)
|
2013-06-17 17:56:54 -07:00
|
|
|
|
{
|
2013-06-17 18:07:33 -07:00
|
|
|
|
struct flow_wildcards *wc = &ctx->xout->wc;
|
2013-06-13 18:38:24 -07:00
|
|
|
|
const struct xbridge *xbridge = ctx->xbridge;
|
2013-06-17 18:07:33 -07:00
|
|
|
|
|
2013-06-13 18:38:24 -07:00
|
|
|
|
if (!xport) {
|
2013-06-17 17:56:54 -07:00
|
|
|
|
return 0;
|
2013-06-13 18:38:24 -07:00
|
|
|
|
} else if (xport->cfm && cfm_should_process_flow(xport->cfm, flow, wc)) {
|
2013-06-17 17:56:54 -07:00
|
|
|
|
if (packet) {
|
2013-06-13 18:38:24 -07:00
|
|
|
|
cfm_process_heartbeat(xport->cfm, packet);
|
2013-06-17 17:56:54 -07:00
|
|
|
|
}
|
|
|
|
|
return SLOW_CFM;
|
2013-07-16 09:58:42 +00:00
|
|
|
|
} else if (xport->bfd && bfd_should_process_flow(xport->bfd, flow, wc)) {
|
2013-06-17 17:56:54 -07:00
|
|
|
|
if (packet) {
|
2013-06-13 18:38:24 -07:00
|
|
|
|
bfd_process_packet(xport->bfd, flow, packet);
|
2013-12-20 14:53:52 -08:00
|
|
|
|
/* If POLL received, immediately sends FINAL back. */
|
|
|
|
|
if (bfd_should_send_packet(xport->bfd)) {
|
|
|
|
|
if (xport->peer) {
|
|
|
|
|
ofproto_dpif_monitor_port_send_soon(xport->ofport);
|
|
|
|
|
} else {
|
|
|
|
|
ofproto_dpif_monitor_port_send_soon_safe(xport->ofport);
|
|
|
|
|
}
|
|
|
|
|
}
|
2013-06-17 17:56:54 -07:00
|
|
|
|
}
|
|
|
|
|
return SLOW_BFD;
|
2013-06-13 18:38:24 -07:00
|
|
|
|
} else if (xport->xbundle && xport->xbundle->lacp
|
2013-06-17 17:56:54 -07:00
|
|
|
|
&& flow->dl_type == htons(ETH_TYPE_LACP)) {
|
|
|
|
|
if (packet) {
|
2013-06-13 18:38:24 -07:00
|
|
|
|
lacp_process_packet(xport->xbundle->lacp, xport->ofport, packet);
|
2013-06-17 17:56:54 -07:00
|
|
|
|
}
|
|
|
|
|
return SLOW_LACP;
|
2013-07-06 09:31:35 -07:00
|
|
|
|
} else if (xbridge->stp && stp_should_process_flow(flow, wc)) {
|
2013-06-17 17:56:54 -07:00
|
|
|
|
if (packet) {
|
2013-07-06 09:31:35 -07:00
|
|
|
|
stp_process_packet(xport, packet);
|
2013-06-17 17:56:54 -07:00
|
|
|
|
}
|
|
|
|
|
return SLOW_STP;
|
|
|
|
|
} else {
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2013-06-11 13:32:30 -07:00
|
|
|
|
static void
|
2013-06-19 16:58:44 -07:00
|
|
|
|
compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port,
|
2013-06-11 13:32:30 -07:00
|
|
|
|
bool check_stp)
|
|
|
|
|
{
|
2013-06-13 18:38:24 -07:00
|
|
|
|
const struct xport *xport = get_ofp_port(ctx->xbridge, ofp_port);
|
2013-06-18 23:55:47 -07:00
|
|
|
|
struct flow_wildcards *wc = &ctx->xout->wc;
|
2013-06-12 14:37:18 -07:00
|
|
|
|
struct flow *flow = &ctx->xin->flow;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
ovs_be16 flow_vlan_tci;
|
2013-08-06 12:57:13 -07:00
|
|
|
|
uint32_t flow_pkt_mark;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
uint8_t flow_nw_tos;
|
2013-06-19 16:58:44 -07:00
|
|
|
|
odp_port_t out_port, odp_port;
|
2013-06-12 15:01:11 -07:00
|
|
|
|
uint8_t dscp;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
|
|
|
|
|
/* If 'struct flow' gets additional metadata, we'll need to zero it out
|
|
|
|
|
* before traversing a patch port. */
|
2013-11-19 17:31:29 -08:00
|
|
|
|
BUILD_ASSERT_DECL(FLOW_WC_SEQ == 23);
|
2013-06-11 13:32:30 -07:00
|
|
|
|
|
2013-06-13 18:38:24 -07:00
|
|
|
|
if (!xport) {
|
2013-06-11 13:32:30 -07:00
|
|
|
|
xlate_report(ctx, "Nonexistent output port");
|
|
|
|
|
return;
|
2013-06-13 18:38:24 -07:00
|
|
|
|
} else if (xport->config & OFPUTIL_PC_NO_FWD) {
|
2013-06-11 13:32:30 -07:00
|
|
|
|
xlate_report(ctx, "OFPPC_NO_FWD set, skipping output");
|
|
|
|
|
return;
|
2013-07-06 09:31:35 -07:00
|
|
|
|
} else if (check_stp && !xport_stp_forward_state(xport)) {
|
2013-06-11 13:32:30 -07:00
|
|
|
|
xlate_report(ctx, "STP not in forwarding state, skipping output");
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2013-06-13 18:38:24 -07:00
|
|
|
|
if (mbridge_has_mirrors(ctx->xbridge->mbridge) && xport->xbundle) {
|
|
|
|
|
ctx->xout->mirrors |= xbundle_mirror_dst(xport->xbundle->xbridge,
|
|
|
|
|
xport->xbundle);
|
2013-06-19 18:51:28 -07:00
|
|
|
|
}
|
|
|
|
|
|
2013-06-13 18:38:24 -07:00
|
|
|
|
if (xport->peer) {
|
|
|
|
|
const struct xport *peer = xport->peer;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
struct flow old_flow = ctx->xin->flow;
|
|
|
|
|
enum slow_path_reason special;
|
|
|
|
|
|
2013-06-13 18:38:24 -07:00
|
|
|
|
ctx->xbridge = peer->xbridge;
|
|
|
|
|
flow->in_port.ofp_port = peer->ofp_port;
|
2013-06-12 14:37:18 -07:00
|
|
|
|
flow->metadata = htonll(0);
|
|
|
|
|
memset(&flow->tunnel, 0, sizeof flow->tunnel);
|
|
|
|
|
memset(flow->regs, 0, sizeof flow->regs);
|
2013-06-11 13:32:30 -07:00
|
|
|
|
|
2013-06-17 18:07:33 -07:00
|
|
|
|
special = process_special(ctx, &ctx->xin->flow, peer,
|
2013-06-11 13:32:30 -07:00
|
|
|
|
ctx->xin->packet);
|
|
|
|
|
if (special) {
|
2013-09-20 12:54:51 -07:00
|
|
|
|
ctx->xout->slow |= special;
|
2013-06-13 12:51:05 -07:00
|
|
|
|
} else if (may_receive(peer, ctx)) {
|
2013-07-06 09:31:35 -07:00
|
|
|
|
if (xport_stp_forward_state(peer)) {
|
2013-06-19 16:58:44 -07:00
|
|
|
|
xlate_table_action(ctx, flow->in_port.ofp_port, 0, true);
|
2013-06-11 13:32:30 -07:00
|
|
|
|
} else {
|
|
|
|
|
/* Forwarding is disabled by STP. Let OFPP_NORMAL and the
|
|
|
|
|
* learning action look at the packet, then drop it. */
|
|
|
|
|
struct flow old_base_flow = ctx->base_flow;
|
|
|
|
|
size_t old_size = ctx->xout->odp_actions.size;
|
2013-06-19 18:51:28 -07:00
|
|
|
|
mirror_mask_t old_mirrors = ctx->xout->mirrors;
|
2013-06-19 16:58:44 -07:00
|
|
|
|
xlate_table_action(ctx, flow->in_port.ofp_port, 0, true);
|
2013-06-19 18:51:28 -07:00
|
|
|
|
ctx->xout->mirrors = old_mirrors;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
ctx->base_flow = old_base_flow;
|
|
|
|
|
ctx->xout->odp_actions.size = old_size;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ctx->xin->flow = old_flow;
|
2013-08-08 11:08:03 -07:00
|
|
|
|
ctx->xbridge = xport->xbridge;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
|
|
|
|
|
if (ctx->xin->resubmit_stats) {
|
2013-06-13 18:38:24 -07:00
|
|
|
|
netdev_vport_inc_tx(xport->netdev, ctx->xin->resubmit_stats);
|
|
|
|
|
netdev_vport_inc_rx(peer->netdev, ctx->xin->resubmit_stats);
|
2013-12-09 17:34:53 -08:00
|
|
|
|
if (peer->bfd) {
|
|
|
|
|
bfd_account_rx(peer->bfd, ctx->xin->resubmit_stats);
|
|
|
|
|
}
|
2013-06-11 13:32:30 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2013-06-12 14:37:18 -07:00
|
|
|
|
flow_vlan_tci = flow->vlan_tci;
|
2013-08-06 12:57:13 -07:00
|
|
|
|
flow_pkt_mark = flow->pkt_mark;
|
2013-06-12 14:37:18 -07:00
|
|
|
|
flow_nw_tos = flow->nw_tos;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
|
2013-07-06 10:25:06 -07:00
|
|
|
|
if (dscp_from_skb_priority(xport, flow->skb_priority, &dscp)) {
|
2013-06-18 23:55:47 -07:00
|
|
|
|
wc->masks.nw_tos |= IP_ECN_MASK;
|
2013-06-12 14:37:18 -07:00
|
|
|
|
flow->nw_tos &= ~IP_DSCP_MASK;
|
2013-06-12 15:01:11 -07:00
|
|
|
|
flow->nw_tos |= dscp;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
}
|
|
|
|
|
|
2013-06-13 18:38:24 -07:00
|
|
|
|
if (xport->is_tunnel) {
|
2013-06-11 13:32:30 -07:00
|
|
|
|
/* Save tunnel metadata so that changes made due to
|
|
|
|
|
* the Logical (tunnel) Port are not visible for any further
|
|
|
|
|
* matches, while explicit set actions on tunnel metadata are.
|
|
|
|
|
*/
|
2013-06-12 14:37:18 -07:00
|
|
|
|
struct flow_tnl flow_tnl = flow->tunnel;
|
2013-06-13 18:38:24 -07:00
|
|
|
|
odp_port = tnl_port_send(xport->ofport, flow, &ctx->xout->wc);
|
2013-06-19 16:58:44 -07:00
|
|
|
|
if (odp_port == ODPP_NONE) {
|
2013-06-11 13:32:30 -07:00
|
|
|
|
xlate_report(ctx, "Tunneling decided against output");
|
|
|
|
|
goto out; /* restore flow_nw_tos */
|
|
|
|
|
}
|
2013-06-12 14:37:18 -07:00
|
|
|
|
if (flow->tunnel.ip_dst == ctx->orig_tunnel_ip_dst) {
|
2013-06-11 13:32:30 -07:00
|
|
|
|
xlate_report(ctx, "Not tunneling to our own address");
|
|
|
|
|
goto out; /* restore flow_nw_tos */
|
|
|
|
|
}
|
|
|
|
|
if (ctx->xin->resubmit_stats) {
|
2013-06-13 18:38:24 -07:00
|
|
|
|
netdev_vport_inc_tx(xport->netdev, ctx->xin->resubmit_stats);
|
2013-06-11 13:32:30 -07:00
|
|
|
|
}
|
|
|
|
|
out_port = odp_port;
|
2013-06-12 14:37:18 -07:00
|
|
|
|
commit_odp_tunnel_action(flow, &ctx->base_flow,
|
2013-06-11 13:32:30 -07:00
|
|
|
|
&ctx->xout->odp_actions);
|
2013-06-12 14:37:18 -07:00
|
|
|
|
flow->tunnel = flow_tnl; /* Restore tunnel metadata */
|
2013-06-11 13:32:30 -07:00
|
|
|
|
} else {
|
2013-06-19 16:58:44 -07:00
|
|
|
|
ofp_port_t vlandev_port;
|
2013-06-18 23:55:47 -07:00
|
|
|
|
|
2013-06-13 18:38:24 -07:00
|
|
|
|
odp_port = xport->odp_port;
|
|
|
|
|
if (ofproto_has_vlan_splinters(ctx->xbridge->ofproto)) {
|
2013-06-18 23:55:47 -07:00
|
|
|
|
wc->masks.vlan_tci |= htons(VLAN_VID_MASK | VLAN_CFI);
|
|
|
|
|
}
|
2013-06-13 18:38:24 -07:00
|
|
|
|
vlandev_port = vsp_realdev_to_vlandev(ctx->xbridge->ofproto, ofp_port,
|
2013-06-12 14:37:18 -07:00
|
|
|
|
flow->vlan_tci);
|
2013-06-11 13:32:30 -07:00
|
|
|
|
if (vlandev_port == ofp_port) {
|
|
|
|
|
out_port = odp_port;
|
|
|
|
|
} else {
|
2013-06-13 18:38:24 -07:00
|
|
|
|
out_port = ofp_port_to_odp_port(ctx->xbridge, vlandev_port);
|
2013-06-12 14:37:18 -07:00
|
|
|
|
flow->vlan_tci = htons(0);
|
2013-06-11 13:32:30 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2013-06-19 16:58:44 -07:00
|
|
|
|
if (out_port != ODPP_NONE) {
|
2013-10-09 17:28:05 -07:00
|
|
|
|
ctx->xout->slow |= commit_odp_actions(flow, &ctx->base_flow,
|
|
|
|
|
&ctx->xout->odp_actions,
|
|
|
|
|
&ctx->xout->wc,
|
|
|
|
|
&ctx->mpls_depth_delta);
|
2013-06-19 16:58:44 -07:00
|
|
|
|
nl_msg_put_odp_port(&ctx->xout->odp_actions, OVS_ACTION_ATTR_OUTPUT,
|
|
|
|
|
out_port);
|
2013-06-11 13:32:30 -07:00
|
|
|
|
|
2013-06-17 14:04:36 -07:00
|
|
|
|
ctx->sflow_odp_port = odp_port;
|
|
|
|
|
ctx->sflow_n_outputs++;
|
|
|
|
|
ctx->xout->nf_output_iface = ofp_port;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
out:
|
2013-06-11 13:32:30 -07:00
|
|
|
|
/* Restore flow */
|
2013-06-12 14:37:18 -07:00
|
|
|
|
flow->vlan_tci = flow_vlan_tci;
|
2013-08-06 12:57:13 -07:00
|
|
|
|
flow->pkt_mark = flow_pkt_mark;
|
2013-06-12 14:37:18 -07:00
|
|
|
|
flow->nw_tos = flow_nw_tos;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
2013-06-19 16:58:44 -07:00
|
|
|
|
compose_output_action(struct xlate_ctx *ctx, ofp_port_t ofp_port)
|
2013-06-11 13:32:30 -07:00
|
|
|
|
{
|
|
|
|
|
compose_output_action__(ctx, ofp_port, true);
|
|
|
|
|
}
|
|
|
|
|
|
2013-08-23 11:03:55 -07:00
|
|
|
|
static void
|
|
|
|
|
xlate_recursively(struct xlate_ctx *ctx, struct rule_dpif *rule)
|
|
|
|
|
{
|
|
|
|
|
struct rule_dpif *old_rule = ctx->rule;
|
2013-09-09 13:05:52 -07:00
|
|
|
|
struct rule_actions *actions;
|
2013-08-23 11:03:55 -07:00
|
|
|
|
|
|
|
|
|
if (ctx->xin->resubmit_stats) {
|
2013-08-27 13:17:11 -07:00
|
|
|
|
rule_dpif_credit_stats(rule, ctx->xin->resubmit_stats);
|
2013-08-23 11:03:55 -07:00
|
|
|
|
}
|
|
|
|
|
|
2013-10-04 08:47:16 -07:00
|
|
|
|
ctx->resubmits++;
|
2013-08-23 11:03:55 -07:00
|
|
|
|
ctx->recurse++;
|
|
|
|
|
ctx->rule = rule;
|
2013-09-09 13:05:52 -07:00
|
|
|
|
actions = rule_dpif_get_actions(rule);
|
|
|
|
|
do_xlate_actions(actions->ofpacts, actions->ofpacts_len, ctx);
|
|
|
|
|
rule_actions_unref(actions);
|
2013-08-23 11:03:55 -07:00
|
|
|
|
ctx->rule = old_rule;
|
|
|
|
|
ctx->recurse--;
|
|
|
|
|
}
|
|
|
|
|
|
2013-10-30 18:17:13 +09:00
|
|
|
|
static bool
|
|
|
|
|
xlate_resubmit_resource_check(struct xlate_ctx *ctx)
|
2013-06-11 13:32:30 -07:00
|
|
|
|
{
|
2013-10-04 08:47:16 -07:00
|
|
|
|
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
|
|
|
|
|
|
|
|
|
|
if (ctx->recurse >= MAX_RESUBMIT_RECURSION) {
|
|
|
|
|
VLOG_ERR_RL(&rl, "resubmit actions recursed over %d times",
|
|
|
|
|
MAX_RESUBMIT_RECURSION);
|
|
|
|
|
} else if (ctx->resubmits >= MAX_RESUBMITS) {
|
|
|
|
|
VLOG_ERR_RL(&rl, "over %d resubmit actions", MAX_RESUBMITS);
|
|
|
|
|
} else if (ctx->xout->odp_actions.size > UINT16_MAX) {
|
|
|
|
|
VLOG_ERR_RL(&rl, "resubmits yielded over 64 kB of actions");
|
|
|
|
|
} else if (ctx->stack.size >= 65536) {
|
|
|
|
|
VLOG_ERR_RL(&rl, "resubmits yielded over 64 kB of stack");
|
|
|
|
|
} else {
|
2013-10-30 18:17:13 +09:00
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
xlate_table_action(struct xlate_ctx *ctx,
|
|
|
|
|
ofp_port_t in_port, uint8_t table_id, bool may_packet_in)
|
|
|
|
|
{
|
|
|
|
|
if (xlate_resubmit_resource_check(ctx)) {
|
2013-06-19 16:58:44 -07:00
|
|
|
|
ofp_port_t old_in_port = ctx->xin->flow.in_port.ofp_port;
|
2013-12-05 13:09:27 -08:00
|
|
|
|
bool skip_wildcards = ctx->xin->skip_wildcards;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
uint8_t old_table_id = ctx->table_id;
|
2013-12-05 13:09:27 -08:00
|
|
|
|
struct rule_dpif *rule;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
|
|
|
|
|
ctx->table_id = table_id;
|
|
|
|
|
|
2013-08-23 11:03:55 -07:00
|
|
|
|
/* Look up a flow with 'in_port' as the input port. Then restore the
|
|
|
|
|
* original input port (otherwise OFPP_NORMAL and OFPP_IN_PORT will
|
|
|
|
|
* have surprising behavior). */
|
2013-06-19 16:58:44 -07:00
|
|
|
|
ctx->xin->flow.in_port.ofp_port = in_port;
|
2013-12-05 13:09:27 -08:00
|
|
|
|
rule_dpif_lookup_in_table(ctx->xbridge->ofproto, &ctx->xin->flow,
|
|
|
|
|
!skip_wildcards ? &ctx->xout->wc : NULL,
|
2013-09-11 23:23:00 -07:00
|
|
|
|
table_id, &rule);
|
2013-06-19 16:58:44 -07:00
|
|
|
|
ctx->xin->flow.in_port.ofp_port = old_in_port;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
|
2013-07-17 16:14:02 -07:00
|
|
|
|
if (ctx->xin->resubmit_hook) {
|
|
|
|
|
ctx->xin->resubmit_hook(ctx->xin, rule, ctx->recurse);
|
|
|
|
|
}
|
|
|
|
|
|
2013-09-11 23:23:00 -07:00
|
|
|
|
if (!rule && may_packet_in) {
|
2013-07-17 16:14:02 -07:00
|
|
|
|
struct xport *xport;
|
|
|
|
|
|
|
|
|
|
/* XXX
|
|
|
|
|
* check if table configuration flags
|
2013-11-26 16:34:23 +09:00
|
|
|
|
* OFPTC11_TABLE_MISS_CONTROLLER, default.
|
|
|
|
|
* OFPTC11_TABLE_MISS_CONTINUE,
|
|
|
|
|
* OFPTC11_TABLE_MISS_DROP
|
|
|
|
|
* When OF1.0, OFPTC11_TABLE_MISS_CONTINUE is used. What to do? */
|
2013-07-17 16:14:02 -07:00
|
|
|
|
xport = get_ofp_port(ctx->xbridge, ctx->xin->flow.in_port.ofp_port);
|
2013-08-27 13:17:11 -07:00
|
|
|
|
choose_miss_rule(xport ? xport->config : 0,
|
|
|
|
|
ctx->xbridge->miss_rule,
|
|
|
|
|
ctx->xbridge->no_packet_in_rule, &rule);
|
2013-09-11 23:23:00 -07:00
|
|
|
|
}
|
|
|
|
|
if (rule) {
|
2013-08-23 11:03:55 -07:00
|
|
|
|
xlate_recursively(ctx, rule);
|
2013-09-11 23:23:00 -07:00
|
|
|
|
rule_dpif_unref(rule);
|
2013-07-17 16:14:02 -07:00
|
|
|
|
}
|
|
|
|
|
|
2013-06-11 13:32:30 -07:00
|
|
|
|
ctx->table_id = old_table_id;
|
2013-10-04 08:47:16 -07:00
|
|
|
|
return;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
}
|
2013-10-04 08:47:16 -07:00
|
|
|
|
|
|
|
|
|
ctx->exit = true;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
}
|
|
|
|
|
|
2013-10-30 18:17:14 +09:00
|
|
|
|
static void
|
|
|
|
|
xlate_group_bucket(struct xlate_ctx *ctx, const struct ofputil_bucket *bucket)
|
|
|
|
|
{
|
|
|
|
|
uint64_t action_list_stub[1024 / 8];
|
|
|
|
|
struct ofpbuf action_list, action_set;
|
|
|
|
|
|
|
|
|
|
ofpbuf_use_const(&action_set, bucket->ofpacts, bucket->ofpacts_len);
|
|
|
|
|
ofpbuf_use_stub(&action_list, action_list_stub, sizeof action_list_stub);
|
|
|
|
|
|
|
|
|
|
ofpacts_execute_action_set(&action_list, &action_set);
|
|
|
|
|
ctx->recurse++;
|
|
|
|
|
do_xlate_actions(action_list.data, action_list.size, ctx);
|
|
|
|
|
ctx->recurse--;
|
|
|
|
|
|
|
|
|
|
ofpbuf_uninit(&action_set);
|
|
|
|
|
ofpbuf_uninit(&action_list);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
xlate_all_group(struct xlate_ctx *ctx, struct group_dpif *group)
|
|
|
|
|
{
|
|
|
|
|
const struct ofputil_bucket *bucket;
|
|
|
|
|
const struct list *buckets;
|
|
|
|
|
struct flow old_flow = ctx->xin->flow;
|
|
|
|
|
|
|
|
|
|
group_dpif_get_buckets(group, &buckets);
|
|
|
|
|
|
|
|
|
|
LIST_FOR_EACH (bucket, list_node, buckets) {
|
|
|
|
|
xlate_group_bucket(ctx, bucket);
|
|
|
|
|
/* Roll back flow to previous state.
|
|
|
|
|
* This is equivalent to cloning the packet for each bucket.
|
|
|
|
|
*
|
|
|
|
|
* As a side effect any subsequently applied actions will
|
|
|
|
|
* also effectively be applied to a clone of the packet taken
|
|
|
|
|
* just before applying the all or indirect group. */
|
|
|
|
|
ctx->xin->flow = old_flow;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2013-10-30 18:17:18 +09:00
|
|
|
|
static void
|
|
|
|
|
xlate_ff_group(struct xlate_ctx *ctx, struct group_dpif *group)
|
|
|
|
|
{
|
|
|
|
|
const struct ofputil_bucket *bucket;
|
|
|
|
|
|
|
|
|
|
bucket = group_first_live_bucket(ctx, group, 0);
|
|
|
|
|
if (bucket) {
|
|
|
|
|
xlate_group_bucket(ctx, bucket);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2013-10-30 18:17:19 +09:00
|
|
|
|
static void
|
|
|
|
|
xlate_select_group(struct xlate_ctx *ctx, struct group_dpif *group)
|
|
|
|
|
{
|
|
|
|
|
struct flow_wildcards *wc = &ctx->xout->wc;
|
|
|
|
|
const struct ofputil_bucket *bucket;
|
|
|
|
|
uint32_t basis;
|
|
|
|
|
|
|
|
|
|
basis = hash_bytes(ctx->xin->flow.dl_dst, sizeof ctx->xin->flow.dl_dst, 0);
|
|
|
|
|
bucket = group_best_live_bucket(ctx, group, basis);
|
|
|
|
|
if (bucket) {
|
|
|
|
|
memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
|
|
|
|
|
xlate_group_bucket(ctx, bucket);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2013-10-30 18:17:14 +09:00
|
|
|
|
static void
|
|
|
|
|
xlate_group_action__(struct xlate_ctx *ctx, struct group_dpif *group)
|
|
|
|
|
{
|
|
|
|
|
switch (group_dpif_get_type(group)) {
|
|
|
|
|
case OFPGT11_ALL:
|
|
|
|
|
case OFPGT11_INDIRECT:
|
|
|
|
|
xlate_all_group(ctx, group);
|
|
|
|
|
break;
|
|
|
|
|
case OFPGT11_SELECT:
|
2013-10-30 18:17:19 +09:00
|
|
|
|
xlate_select_group(ctx, group);
|
2013-10-30 18:17:14 +09:00
|
|
|
|
break;
|
2013-10-30 18:17:18 +09:00
|
|
|
|
case OFPGT11_FF:
|
|
|
|
|
xlate_ff_group(ctx, group);
|
|
|
|
|
break;
|
2013-10-30 18:17:14 +09:00
|
|
|
|
default:
|
2013-12-17 10:32:12 -08:00
|
|
|
|
OVS_NOT_REACHED();
|
2013-10-30 18:17:14 +09:00
|
|
|
|
}
|
|
|
|
|
group_dpif_release(group);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static bool
|
|
|
|
|
xlate_group_action(struct xlate_ctx *ctx, uint32_t group_id)
|
|
|
|
|
{
|
|
|
|
|
if (xlate_resubmit_resource_check(ctx)) {
|
|
|
|
|
struct group_dpif *group;
|
|
|
|
|
bool got_group;
|
|
|
|
|
|
|
|
|
|
got_group = group_dpif_lookup(ctx->xbridge->ofproto, group_id, &group);
|
|
|
|
|
if (got_group) {
|
|
|
|
|
xlate_group_action__(ctx, group);
|
|
|
|
|
} else {
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
2013-06-11 13:32:30 -07:00
|
|
|
|
static void
|
|
|
|
|
xlate_ofpact_resubmit(struct xlate_ctx *ctx,
|
|
|
|
|
const struct ofpact_resubmit *resubmit)
|
|
|
|
|
{
|
2013-06-19 16:58:44 -07:00
|
|
|
|
ofp_port_t in_port;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
uint8_t table_id;
|
|
|
|
|
|
|
|
|
|
in_port = resubmit->in_port;
|
|
|
|
|
if (in_port == OFPP_IN_PORT) {
|
2013-06-19 16:58:44 -07:00
|
|
|
|
in_port = ctx->xin->flow.in_port.ofp_port;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
table_id = resubmit->table_id;
|
|
|
|
|
if (table_id == 255) {
|
|
|
|
|
table_id = ctx->table_id;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
xlate_table_action(ctx, in_port, table_id, false);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
flood_packets(struct xlate_ctx *ctx, bool all)
|
|
|
|
|
{
|
2013-06-13 18:38:24 -07:00
|
|
|
|
const struct xport *xport;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
|
2013-06-13 18:38:24 -07:00
|
|
|
|
HMAP_FOR_EACH (xport, ofp_node, &ctx->xbridge->xports) {
|
|
|
|
|
if (xport->ofp_port == ctx->xin->flow.in_port.ofp_port) {
|
2013-06-11 13:32:30 -07:00
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (all) {
|
2013-06-13 18:38:24 -07:00
|
|
|
|
compose_output_action__(ctx, xport->ofp_port, false);
|
|
|
|
|
} else if (!(xport->config & OFPUTIL_PC_NO_FLOOD)) {
|
|
|
|
|
compose_output_action(ctx, xport->ofp_port);
|
2013-06-11 13:32:30 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ctx->xout->nf_output_iface = NF_OUT_FLOOD;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
execute_controller_action(struct xlate_ctx *ctx, int len,
|
|
|
|
|
enum ofp_packet_in_reason reason,
|
|
|
|
|
uint16_t controller_id)
|
|
|
|
|
{
|
2013-10-22 16:16:31 -07:00
|
|
|
|
struct ofproto_packet_in *pin;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
struct ofpbuf *packet;
|
|
|
|
|
struct flow key;
|
|
|
|
|
|
2013-09-20 12:54:51 -07:00
|
|
|
|
ctx->xout->slow |= SLOW_CONTROLLER;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
if (!ctx->xin->packet) {
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
packet = ofpbuf_clone(ctx->xin->packet);
|
|
|
|
|
|
|
|
|
|
key.skb_priority = 0;
|
2013-08-06 12:57:13 -07:00
|
|
|
|
key.pkt_mark = 0;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
memset(&key.tunnel, 0, sizeof key.tunnel);
|
|
|
|
|
|
2013-10-09 17:28:05 -07:00
|
|
|
|
ctx->xout->slow |= commit_odp_actions(&ctx->xin->flow, &ctx->base_flow,
|
|
|
|
|
&ctx->xout->odp_actions,
|
|
|
|
|
&ctx->xout->wc,
|
|
|
|
|
&ctx->mpls_depth_delta);
|
2013-06-11 13:32:30 -07:00
|
|
|
|
|
|
|
|
|
odp_execute_actions(NULL, packet, &key, ctx->xout->odp_actions.data,
|
|
|
|
|
ctx->xout->odp_actions.size, NULL, NULL);
|
|
|
|
|
|
2013-08-03 10:04:57 -07:00
|
|
|
|
pin = xmalloc(sizeof *pin);
|
2013-10-22 16:16:31 -07:00
|
|
|
|
pin->up.packet_len = packet->size;
|
|
|
|
|
pin->up.packet = ofpbuf_steal_data(packet);
|
|
|
|
|
pin->up.reason = reason;
|
|
|
|
|
pin->up.table_id = ctx->table_id;
|
2013-10-22 16:57:46 -07:00
|
|
|
|
pin->up.cookie = (ctx->rule
|
|
|
|
|
? rule_dpif_get_flow_cookie(ctx->rule)
|
|
|
|
|
: OVS_BE64_MAX);
|
2013-10-22 16:16:31 -07:00
|
|
|
|
|
|
|
|
|
flow_get_metadata(&ctx->xin->flow, &pin->up.fmd);
|
2013-06-11 13:32:30 -07:00
|
|
|
|
|
2013-10-22 16:21:10 -07:00
|
|
|
|
pin->controller_id = controller_id;
|
2013-10-22 16:32:13 -07:00
|
|
|
|
pin->send_len = len;
|
2013-10-22 21:50:23 -07:00
|
|
|
|
pin->generated_by_table_miss = (ctx->rule
|
|
|
|
|
&& rule_dpif_is_table_miss(ctx->rule));
|
2013-08-03 10:04:57 -07:00
|
|
|
|
ofproto_dpif_send_packet_in(ctx->xbridge->ofproto, pin);
|
2013-06-11 13:32:30 -07:00
|
|
|
|
ofpbuf_delete(packet);
|
|
|
|
|
}
|
|
|
|
|
|
2013-09-27 06:55:19 +09:00
|
|
|
|
static bool
|
2013-06-12 14:33:17 -07:00
|
|
|
|
compose_mpls_push_action(struct xlate_ctx *ctx, ovs_be16 eth_type)
|
2013-06-11 13:32:30 -07:00
|
|
|
|
{
|
2013-06-12 14:37:18 -07:00
|
|
|
|
struct flow_wildcards *wc = &ctx->xout->wc;
|
|
|
|
|
struct flow *flow = &ctx->xin->flow;
|
|
|
|
|
|
2013-06-11 13:32:30 -07:00
|
|
|
|
ovs_assert(eth_type_mpls(eth_type));
|
|
|
|
|
|
2013-09-27 06:55:19 +09:00
|
|
|
|
/* If mpls_depth_delta is negative then an MPLS POP action has been
|
|
|
|
|
* composed and the resulting MPLS label stack is unknown. This means
|
|
|
|
|
* an MPLS PUSH action can't be composed as it needs to know either the
|
|
|
|
|
* top-most MPLS LSE to use as a template for the new MPLS LSE, or that
|
|
|
|
|
* there is no MPLS label stack present. Thus, stop processing.
|
|
|
|
|
*
|
|
|
|
|
* If mpls_depth_delta is positive then an MPLS PUSH action has been
|
|
|
|
|
* composed and no further MPLS PUSH action may be performed without
|
|
|
|
|
* losing MPLS LSE and ether type information held in xtx->xin->flow.
|
|
|
|
|
* Thus, stop processing.
|
|
|
|
|
*
|
|
|
|
|
* If the MPLS LSE of the flow and base_flow differ then the MPLS LSE
|
|
|
|
|
* has been updated. Performing a MPLS PUSH action may be would result in
|
|
|
|
|
* losing MPLS LSE and ether type information held in xtx->xin->flow.
|
|
|
|
|
* Thus, stop processing.
|
|
|
|
|
*
|
|
|
|
|
* It is planned that in the future this case will be handled
|
|
|
|
|
* by recirculation */
|
|
|
|
|
if (ctx->mpls_depth_delta ||
|
|
|
|
|
ctx->xin->flow.mpls_lse != ctx->base_flow.mpls_lse) {
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
2013-06-12 14:37:18 -07:00
|
|
|
|
memset(&wc->masks.mpls_lse, 0xff, sizeof wc->masks.mpls_lse);
|
2013-06-11 13:32:30 -07:00
|
|
|
|
|
2013-09-27 06:55:19 +09:00
|
|
|
|
ctx->pre_push_mpls_lse = ctx->xin->flow.mpls_lse;
|
|
|
|
|
|
|
|
|
|
if (eth_type_mpls(ctx->xin->flow.dl_type)) {
|
2013-06-12 14:37:18 -07:00
|
|
|
|
flow->mpls_lse &= ~htonl(MPLS_BOS_MASK);
|
2013-06-11 13:32:30 -07:00
|
|
|
|
} else {
|
|
|
|
|
ovs_be32 label;
|
|
|
|
|
uint8_t tc, ttl;
|
|
|
|
|
|
2013-06-12 14:37:18 -07:00
|
|
|
|
if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
|
2013-06-11 13:32:30 -07:00
|
|
|
|
label = htonl(0x2); /* IPV6 Explicit Null. */
|
|
|
|
|
} else {
|
|
|
|
|
label = htonl(0x0); /* IPV4 Explicit Null. */
|
|
|
|
|
}
|
2013-06-18 23:55:47 -07:00
|
|
|
|
wc->masks.nw_tos |= IP_DSCP_MASK;
|
|
|
|
|
wc->masks.nw_ttl = 0xff;
|
2013-06-12 14:37:18 -07:00
|
|
|
|
tc = (flow->nw_tos & IP_DSCP_MASK) >> 2;
|
|
|
|
|
ttl = flow->nw_ttl ? flow->nw_ttl : 0x40;
|
|
|
|
|
flow->mpls_lse = set_mpls_lse_values(ttl, tc, 1, label);
|
2013-06-11 13:32:30 -07:00
|
|
|
|
}
|
2013-06-12 14:37:18 -07:00
|
|
|
|
flow->dl_type = eth_type;
|
2013-09-27 06:55:19 +09:00
|
|
|
|
ctx->mpls_depth_delta++;
|
|
|
|
|
|
|
|
|
|
return false;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
}
|
|
|
|
|
|
2013-09-27 06:55:19 +09:00
|
|
|
|
static bool
|
2013-06-12 14:33:17 -07:00
|
|
|
|
compose_mpls_pop_action(struct xlate_ctx *ctx, ovs_be16 eth_type)
|
2013-06-11 13:32:30 -07:00
|
|
|
|
{
|
2013-06-12 14:37:18 -07:00
|
|
|
|
struct flow_wildcards *wc = &ctx->xout->wc;
|
|
|
|
|
|
2013-09-27 06:55:19 +09:00
|
|
|
|
if (!eth_type_mpls(ctx->xin->flow.dl_type)) {
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* If mpls_depth_delta is negative then an MPLS POP action has been
|
|
|
|
|
* composed. Performing another MPLS POP action
|
|
|
|
|
* would result in losing ether type that results from
|
|
|
|
|
* the already composed MPLS POP. Thus, stop processing.
|
|
|
|
|
*
|
|
|
|
|
* It is planned that in the future this case will be handled
|
|
|
|
|
* by recirculation */
|
|
|
|
|
if (ctx->mpls_depth_delta < 0) {
|
|
|
|
|
return true;
|
|
|
|
|
}
|
2013-06-11 13:32:30 -07:00
|
|
|
|
|
2013-06-12 14:37:18 -07:00
|
|
|
|
memset(&wc->masks.mpls_lse, 0xff, sizeof wc->masks.mpls_lse);
|
|
|
|
|
|
2013-09-27 06:55:19 +09:00
|
|
|
|
/* If mpls_depth_delta is positive then an MPLS PUSH action has been
|
|
|
|
|
* executed and the previous MPLS LSE saved in ctx->pre_push_mpls_lse. The
|
|
|
|
|
* flow's MPLS LSE should be restored to that value to allow any
|
|
|
|
|
* subsequent actions that update of the LSE to be executed correctly.
|
|
|
|
|
*/
|
|
|
|
|
if (ctx->mpls_depth_delta > 0) {
|
|
|
|
|
ctx->xin->flow.mpls_lse = ctx->pre_push_mpls_lse;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
}
|
2013-09-27 06:55:19 +09:00
|
|
|
|
|
|
|
|
|
ctx->xin->flow.dl_type = eth_type;
|
|
|
|
|
ctx->mpls_depth_delta--;
|
|
|
|
|
|
|
|
|
|
return false;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static bool
|
|
|
|
|
compose_dec_ttl(struct xlate_ctx *ctx, struct ofpact_cnt_ids *ids)
|
|
|
|
|
{
|
2013-06-12 14:37:18 -07:00
|
|
|
|
struct flow *flow = &ctx->xin->flow;
|
|
|
|
|
|
|
|
|
|
if (!is_ip_any(flow)) {
|
2013-06-11 13:32:30 -07:00
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
2013-06-18 23:55:47 -07:00
|
|
|
|
ctx->xout->wc.masks.nw_ttl = 0xff;
|
2013-06-12 14:37:18 -07:00
|
|
|
|
if (flow->nw_ttl > 1) {
|
|
|
|
|
flow->nw_ttl--;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
return false;
|
|
|
|
|
} else {
|
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < ids->n_controllers; i++) {
|
|
|
|
|
execute_controller_action(ctx, UINT16_MAX, OFPR_INVALID_TTL,
|
|
|
|
|
ids->cnt_ids[i]);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Stop processing for current table. */
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2013-10-24 13:19:34 -07:00
|
|
|
|
static bool
|
|
|
|
|
compose_set_mpls_label_action(struct xlate_ctx *ctx, ovs_be32 label)
|
|
|
|
|
{
|
|
|
|
|
if (!eth_type_mpls(ctx->xin->flow.dl_type)) {
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* If mpls_depth_delta is negative then an MPLS POP action has been
|
|
|
|
|
* executed and the resulting MPLS label stack is unknown. This means
|
|
|
|
|
* a SET MPLS LABEL action can't be executed as it needs to manipulate
|
|
|
|
|
* the top-most MPLS LSE. Thus, stop processing.
|
|
|
|
|
*
|
|
|
|
|
* It is planned that in the future this case will be handled
|
|
|
|
|
* by recirculation.
|
|
|
|
|
*/
|
|
|
|
|
if (ctx->mpls_depth_delta < 0) {
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ctx->xout->wc.masks.mpls_lse |= htonl(MPLS_LABEL_MASK);
|
|
|
|
|
set_mpls_lse_label(&ctx->xin->flow.mpls_lse, label);
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static bool
|
|
|
|
|
compose_set_mpls_tc_action(struct xlate_ctx *ctx, uint8_t tc)
|
|
|
|
|
{
|
|
|
|
|
if (!eth_type_mpls(ctx->xin->flow.dl_type)) {
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* If mpls_depth_delta is negative then an MPLS POP action has been
|
|
|
|
|
* executed and the resulting MPLS label stack is unknown. This means
|
|
|
|
|
* a SET MPLS TC action can't be executed as it needs to manipulate
|
|
|
|
|
* the top-most MPLS LSE. Thus, stop processing.
|
|
|
|
|
*
|
|
|
|
|
* It is planned that in the future this case will be handled
|
|
|
|
|
* by recirculation.
|
|
|
|
|
*/
|
|
|
|
|
if (ctx->mpls_depth_delta < 0) {
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ctx->xout->wc.masks.mpls_lse |= htonl(MPLS_TC_MASK);
|
|
|
|
|
set_mpls_lse_tc(&ctx->xin->flow.mpls_lse, tc);
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
2013-06-11 13:32:30 -07:00
|
|
|
|
static bool
|
2013-06-12 14:33:17 -07:00
|
|
|
|
compose_set_mpls_ttl_action(struct xlate_ctx *ctx, uint8_t ttl)
|
2013-06-11 13:32:30 -07:00
|
|
|
|
{
|
|
|
|
|
if (!eth_type_mpls(ctx->xin->flow.dl_type)) {
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
2013-09-27 06:55:19 +09:00
|
|
|
|
/* If mpls_depth_delta is negative then an MPLS POP action has been
|
|
|
|
|
* executed and the resulting MPLS label stack is unknown. This means
|
|
|
|
|
* a SET MPLS TTL push action can't be executed as it needs to manipulate
|
|
|
|
|
* the top-most MPLS LSE. Thus, stop processing.
|
|
|
|
|
*
|
|
|
|
|
* It is planned that in the future this case will be handled
|
|
|
|
|
* by recirculation.
|
|
|
|
|
*/
|
|
|
|
|
if (ctx->mpls_depth_delta < 0) {
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
2013-08-02 21:17:31 -07:00
|
|
|
|
ctx->xout->wc.masks.mpls_lse |= htonl(MPLS_TTL_MASK);
|
2013-06-11 13:32:30 -07:00
|
|
|
|
set_mpls_lse_ttl(&ctx->xin->flow.mpls_lse, ttl);
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static bool
|
2013-06-12 14:33:17 -07:00
|
|
|
|
compose_dec_mpls_ttl_action(struct xlate_ctx *ctx)
|
2013-06-11 13:32:30 -07:00
|
|
|
|
{
|
2013-06-12 14:37:18 -07:00
|
|
|
|
struct flow *flow = &ctx->xin->flow;
|
|
|
|
|
uint8_t ttl = mpls_lse_to_ttl(flow->mpls_lse);
|
2013-06-18 23:55:47 -07:00
|
|
|
|
struct flow_wildcards *wc = &ctx->xout->wc;
|
|
|
|
|
|
|
|
|
|
memset(&wc->masks.mpls_lse, 0xff, sizeof wc->masks.mpls_lse);
|
2013-06-11 13:32:30 -07:00
|
|
|
|
|
2013-06-12 14:37:18 -07:00
|
|
|
|
if (!eth_type_mpls(flow->dl_type)) {
|
2013-06-11 13:32:30 -07:00
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (ttl > 1) {
|
|
|
|
|
ttl--;
|
2013-06-12 14:37:18 -07:00
|
|
|
|
set_mpls_lse_ttl(&flow->mpls_lse, ttl);
|
2013-06-11 13:32:30 -07:00
|
|
|
|
return false;
|
|
|
|
|
} else {
|
|
|
|
|
execute_controller_action(ctx, UINT16_MAX, OFPR_INVALID_TTL, 0);
|
|
|
|
|
|
|
|
|
|
/* Stop processing for current table. */
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
xlate_output_action(struct xlate_ctx *ctx,
|
2013-06-19 16:58:44 -07:00
|
|
|
|
ofp_port_t port, uint16_t max_len, bool may_packet_in)
|
2013-06-11 13:32:30 -07:00
|
|
|
|
{
|
2013-06-19 16:58:44 -07:00
|
|
|
|
ofp_port_t prev_nf_output_iface = ctx->xout->nf_output_iface;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
|
|
|
|
|
ctx->xout->nf_output_iface = NF_OUT_DROP;
|
|
|
|
|
|
|
|
|
|
switch (port) {
|
|
|
|
|
case OFPP_IN_PORT:
|
2013-06-19 16:58:44 -07:00
|
|
|
|
compose_output_action(ctx, ctx->xin->flow.in_port.ofp_port);
|
2013-06-11 13:32:30 -07:00
|
|
|
|
break;
|
|
|
|
|
case OFPP_TABLE:
|
2013-06-19 16:58:44 -07:00
|
|
|
|
xlate_table_action(ctx, ctx->xin->flow.in_port.ofp_port,
|
|
|
|
|
0, may_packet_in);
|
2013-06-11 13:32:30 -07:00
|
|
|
|
break;
|
|
|
|
|
case OFPP_NORMAL:
|
|
|
|
|
xlate_normal(ctx);
|
|
|
|
|
break;
|
|
|
|
|
case OFPP_FLOOD:
|
|
|
|
|
flood_packets(ctx, false);
|
|
|
|
|
break;
|
|
|
|
|
case OFPP_ALL:
|
|
|
|
|
flood_packets(ctx, true);
|
|
|
|
|
break;
|
|
|
|
|
case OFPP_CONTROLLER:
|
|
|
|
|
execute_controller_action(ctx, max_len, OFPR_ACTION, 0);
|
|
|
|
|
break;
|
|
|
|
|
case OFPP_NONE:
|
|
|
|
|
break;
|
|
|
|
|
case OFPP_LOCAL:
|
|
|
|
|
default:
|
2013-06-19 16:58:44 -07:00
|
|
|
|
if (port != ctx->xin->flow.in_port.ofp_port) {
|
2013-06-11 13:32:30 -07:00
|
|
|
|
compose_output_action(ctx, port);
|
|
|
|
|
} else {
|
|
|
|
|
xlate_report(ctx, "skipping output to input port");
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (prev_nf_output_iface == NF_OUT_FLOOD) {
|
|
|
|
|
ctx->xout->nf_output_iface = NF_OUT_FLOOD;
|
|
|
|
|
} else if (ctx->xout->nf_output_iface == NF_OUT_DROP) {
|
|
|
|
|
ctx->xout->nf_output_iface = prev_nf_output_iface;
|
|
|
|
|
} else if (prev_nf_output_iface != NF_OUT_DROP &&
|
|
|
|
|
ctx->xout->nf_output_iface != NF_OUT_FLOOD) {
|
|
|
|
|
ctx->xout->nf_output_iface = NF_OUT_MULTI;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
xlate_output_reg_action(struct xlate_ctx *ctx,
|
|
|
|
|
const struct ofpact_output_reg *or)
|
|
|
|
|
{
|
|
|
|
|
uint64_t port = mf_get_subfield(&or->src, &ctx->xin->flow);
|
|
|
|
|
if (port <= UINT16_MAX) {
|
|
|
|
|
union mf_subvalue value;
|
|
|
|
|
|
|
|
|
|
memset(&value, 0xff, sizeof value);
|
|
|
|
|
mf_write_subfield_flow(&or->src, &value, &ctx->xout->wc.masks);
|
2013-06-19 16:58:44 -07:00
|
|
|
|
xlate_output_action(ctx, u16_to_ofp(port),
|
|
|
|
|
or->max_len, false);
|
2013-06-11 13:32:30 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
xlate_enqueue_action(struct xlate_ctx *ctx,
|
|
|
|
|
const struct ofpact_enqueue *enqueue)
|
|
|
|
|
{
|
2013-06-19 16:58:44 -07:00
|
|
|
|
ofp_port_t ofp_port = enqueue->port;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
uint32_t queue_id = enqueue->queue;
|
|
|
|
|
uint32_t flow_priority, priority;
|
|
|
|
|
int error;
|
|
|
|
|
|
|
|
|
|
/* Translate queue to priority. */
|
2013-07-06 11:46:48 -07:00
|
|
|
|
error = dpif_queue_to_priority(ctx->xbridge->dpif, queue_id, &priority);
|
2013-06-11 13:32:30 -07:00
|
|
|
|
if (error) {
|
|
|
|
|
/* Fall back to ordinary output action. */
|
|
|
|
|
xlate_output_action(ctx, enqueue->port, 0, false);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Check output port. */
|
|
|
|
|
if (ofp_port == OFPP_IN_PORT) {
|
2013-06-19 16:58:44 -07:00
|
|
|
|
ofp_port = ctx->xin->flow.in_port.ofp_port;
|
|
|
|
|
} else if (ofp_port == ctx->xin->flow.in_port.ofp_port) {
|
2013-06-11 13:32:30 -07:00
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Add datapath actions. */
|
|
|
|
|
flow_priority = ctx->xin->flow.skb_priority;
|
|
|
|
|
ctx->xin->flow.skb_priority = priority;
|
|
|
|
|
compose_output_action(ctx, ofp_port);
|
|
|
|
|
ctx->xin->flow.skb_priority = flow_priority;
|
|
|
|
|
|
|
|
|
|
/* Update NetFlow output port. */
|
|
|
|
|
if (ctx->xout->nf_output_iface == NF_OUT_DROP) {
|
|
|
|
|
ctx->xout->nf_output_iface = ofp_port;
|
|
|
|
|
} else if (ctx->xout->nf_output_iface != NF_OUT_FLOOD) {
|
|
|
|
|
ctx->xout->nf_output_iface = NF_OUT_MULTI;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
xlate_set_queue_action(struct xlate_ctx *ctx, uint32_t queue_id)
|
|
|
|
|
{
|
|
|
|
|
uint32_t skb_priority;
|
|
|
|
|
|
2013-07-06 11:46:48 -07:00
|
|
|
|
if (!dpif_queue_to_priority(ctx->xbridge->dpif, queue_id, &skb_priority)) {
|
2013-06-11 13:32:30 -07:00
|
|
|
|
ctx->xin->flow.skb_priority = skb_priority;
|
|
|
|
|
} else {
|
|
|
|
|
/* Couldn't translate queue to a priority. Nothing to do. A warning
|
|
|
|
|
* has already been logged. */
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static bool
|
2013-06-13 18:38:24 -07:00
|
|
|
|
slave_enabled_cb(ofp_port_t ofp_port, void *xbridge_)
|
2013-06-11 13:32:30 -07:00
|
|
|
|
{
|
2013-06-13 18:38:24 -07:00
|
|
|
|
const struct xbridge *xbridge = xbridge_;
|
|
|
|
|
struct xport *port;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
|
|
|
|
|
switch (ofp_port) {
|
|
|
|
|
case OFPP_IN_PORT:
|
|
|
|
|
case OFPP_TABLE:
|
|
|
|
|
case OFPP_NORMAL:
|
|
|
|
|
case OFPP_FLOOD:
|
|
|
|
|
case OFPP_ALL:
|
|
|
|
|
case OFPP_NONE:
|
|
|
|
|
return true;
|
|
|
|
|
case OFPP_CONTROLLER: /* Not supported by the bundle action. */
|
|
|
|
|
return false;
|
|
|
|
|
default:
|
2013-06-13 18:38:24 -07:00
|
|
|
|
port = get_ofp_port(xbridge, ofp_port);
|
2013-06-11 13:32:30 -07:00
|
|
|
|
return port ? port->may_enable : false;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
xlate_bundle_action(struct xlate_ctx *ctx,
|
|
|
|
|
const struct ofpact_bundle *bundle)
|
|
|
|
|
{
|
2013-06-19 16:58:44 -07:00
|
|
|
|
ofp_port_t port;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
|
|
|
|
|
port = bundle_execute(bundle, &ctx->xin->flow, &ctx->xout->wc,
|
2013-06-13 18:38:24 -07:00
|
|
|
|
slave_enabled_cb,
|
|
|
|
|
CONST_CAST(struct xbridge *, ctx->xbridge));
|
2013-06-11 13:32:30 -07:00
|
|
|
|
if (bundle->dst.field) {
|
2013-08-02 21:17:31 -07:00
|
|
|
|
nxm_reg_load(&bundle->dst, ofp_to_u16(port), &ctx->xin->flow,
|
|
|
|
|
&ctx->xout->wc);
|
2013-06-11 13:32:30 -07:00
|
|
|
|
} else {
|
|
|
|
|
xlate_output_action(ctx, port, 0, false);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
xlate_learn_action(struct xlate_ctx *ctx,
|
|
|
|
|
const struct ofpact_learn *learn)
|
|
|
|
|
{
|
2013-09-12 20:45:31 -07:00
|
|
|
|
uint64_t ofpacts_stub[1024 / 8];
|
|
|
|
|
struct ofputil_flow_mod fm;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
struct ofpbuf ofpacts;
|
|
|
|
|
|
|
|
|
|
ctx->xout->has_learn = true;
|
|
|
|
|
|
|
|
|
|
learn_mask(learn, &ctx->xout->wc);
|
|
|
|
|
|
|
|
|
|
if (!ctx->xin->may_learn) {
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2013-09-12 20:45:31 -07:00
|
|
|
|
ofpbuf_use_stub(&ofpacts, ofpacts_stub, sizeof ofpacts_stub);
|
|
|
|
|
learn_execute(learn, &ctx->xin->flow, &fm, &ofpacts);
|
|
|
|
|
ofproto_dpif_flow_mod(ctx->xbridge->ofproto, &fm);
|
|
|
|
|
ofpbuf_uninit(&ofpacts);
|
2013-06-11 13:32:30 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
xlate_fin_timeout(struct xlate_ctx *ctx,
|
|
|
|
|
const struct ofpact_fin_timeout *oft)
|
|
|
|
|
{
|
|
|
|
|
if (ctx->xin->tcp_flags & (TCP_FIN | TCP_RST) && ctx->rule) {
|
2013-08-27 13:17:11 -07:00
|
|
|
|
rule_dpif_reduce_timeouts(ctx->rule, oft->fin_idle_timeout,
|
|
|
|
|
oft->fin_hard_timeout);
|
2013-06-11 13:32:30 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
xlate_sample_action(struct xlate_ctx *ctx,
|
|
|
|
|
const struct ofpact_sample *os)
|
|
|
|
|
{
|
|
|
|
|
union user_action_cookie cookie;
|
|
|
|
|
/* Scale the probability from 16-bit to 32-bit while representing
|
|
|
|
|
* the same percentage. */
|
|
|
|
|
uint32_t probability = (os->probability << 16) | os->probability;
|
|
|
|
|
|
2013-10-09 17:28:05 -07:00
|
|
|
|
ctx->xout->slow |= commit_odp_actions(&ctx->xin->flow, &ctx->base_flow,
|
|
|
|
|
&ctx->xout->odp_actions,
|
|
|
|
|
&ctx->xout->wc,
|
|
|
|
|
&ctx->mpls_depth_delta);
|
2013-06-11 13:32:30 -07:00
|
|
|
|
|
|
|
|
|
compose_flow_sample_cookie(os->probability, os->collector_set_id,
|
|
|
|
|
os->obs_domain_id, os->obs_point_id, &cookie);
|
2013-06-13 18:38:24 -07:00
|
|
|
|
compose_sample_action(ctx->xbridge, &ctx->xout->odp_actions, &ctx->xin->flow,
|
2013-06-11 13:32:30 -07:00
|
|
|
|
probability, &cookie, sizeof cookie.flow_sample);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static bool
|
2013-06-13 18:38:24 -07:00
|
|
|
|
may_receive(const struct xport *xport, struct xlate_ctx *ctx)
|
2013-06-11 13:32:30 -07:00
|
|
|
|
{
|
2013-06-13 18:38:24 -07:00
|
|
|
|
if (xport->config & (eth_addr_equals(ctx->xin->flow.dl_dst, eth_addr_stp)
|
|
|
|
|
? OFPUTIL_PC_NO_RECV_STP
|
|
|
|
|
: OFPUTIL_PC_NO_RECV)) {
|
2013-06-11 13:32:30 -07:00
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Only drop packets here if both forwarding and learning are
|
|
|
|
|
* disabled. If just learning is enabled, we need to have
|
|
|
|
|
* OFPP_NORMAL and the learning action have a look at the packet
|
|
|
|
|
* before we can drop it. */
|
2013-07-06 09:31:35 -07:00
|
|
|
|
if (!xport_stp_forward_state(xport) && !xport_stp_learn_state(xport)) {
|
2013-06-11 13:32:30 -07:00
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
2013-10-11 13:23:29 +09:00
|
|
|
|
static void
|
|
|
|
|
xlate_write_actions(struct xlate_ctx *ctx, const struct ofpact *a)
|
|
|
|
|
{
|
|
|
|
|
struct ofpact_nest *on = ofpact_get_WRITE_ACTIONS(a);
|
|
|
|
|
ofpbuf_put(&ctx->action_set, on->actions, ofpact_nest_get_action_len(on));
|
|
|
|
|
ofpact_pad(&ctx->action_set);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
xlate_action_set(struct xlate_ctx *ctx)
|
|
|
|
|
{
|
|
|
|
|
uint64_t action_list_stub[1024 / 64];
|
|
|
|
|
struct ofpbuf action_list;
|
|
|
|
|
|
|
|
|
|
ofpbuf_use_stub(&action_list, action_list_stub, sizeof action_list_stub);
|
|
|
|
|
ofpacts_execute_action_set(&action_list, &ctx->action_set);
|
|
|
|
|
do_xlate_actions(action_list.data, action_list.size, ctx);
|
|
|
|
|
ofpbuf_uninit(&action_list);
|
|
|
|
|
}
|
|
|
|
|
|
2013-06-11 13:32:30 -07:00
|
|
|
|
static void
|
|
|
|
|
do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len,
|
|
|
|
|
struct xlate_ctx *ctx)
|
|
|
|
|
{
|
2013-06-12 14:37:18 -07:00
|
|
|
|
struct flow_wildcards *wc = &ctx->xout->wc;
|
|
|
|
|
struct flow *flow = &ctx->xin->flow;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
const struct ofpact *a;
|
|
|
|
|
|
Set datapath mask bits when setting a flow field.
Since at the datapath interface we do not have set actions for
individual fields, but larger sets of fields for a given protocol
layer, the set action will in practice only ever apply to exactly
matched flows for the given protocol layer. For example, if the
reg_load changes the IP TTL, the corresponding datapath action will
rewrite also the IP addresses and TOS byte. Since these other field
values may not be explicitly set, they depend on the incoming flow field
values, and are hence all of them are set in the wildcards masks, when
the action is committed to the datapath. For the rare case, where the
reg_load action does not actually change the value, and no other flow
field values are set (or loaded), the datapath action is skipped, and
no mask bits are set. Such a datapath flow should, however, be
dependent on the specific field value, so the corresponding wildcard
mask bits must be set, lest the datapath flow be applied to packets
containing some other value in the field and the field value remain
unchanged regardless of the incoming value.
Signed-off-by: Jarno Rajahalme <jrajahalme@nicira.com>
Signed-off-by: Ben Pfaff <blp@nicira.com>
2013-10-15 12:40:38 -07:00
|
|
|
|
/* dl_type already in the mask, not set below. */
|
|
|
|
|
|
2013-06-11 13:32:30 -07:00
|
|
|
|
OFPACT_FOR_EACH (a, ofpacts, ofpacts_len) {
|
|
|
|
|
struct ofpact_controller *controller;
|
|
|
|
|
const struct ofpact_metadata *metadata;
|
2013-10-24 13:19:29 -07:00
|
|
|
|
const struct ofpact_set_field *set_field;
|
|
|
|
|
const struct mf_field *mf;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
|
|
|
|
|
if (ctx->exit) {
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
switch (a->type) {
|
|
|
|
|
case OFPACT_OUTPUT:
|
|
|
|
|
xlate_output_action(ctx, ofpact_get_OUTPUT(a)->port,
|
|
|
|
|
ofpact_get_OUTPUT(a)->max_len, true);
|
|
|
|
|
break;
|
|
|
|
|
|
2013-09-01 18:30:17 -07:00
|
|
|
|
case OFPACT_GROUP:
|
2013-10-30 18:17:14 +09:00
|
|
|
|
if (xlate_group_action(ctx, ofpact_get_GROUP(a)->group_id)) {
|
|
|
|
|
return;
|
|
|
|
|
}
|
2013-09-01 18:30:17 -07:00
|
|
|
|
break;
|
|
|
|
|
|
2013-06-11 13:32:30 -07:00
|
|
|
|
case OFPACT_CONTROLLER:
|
|
|
|
|
controller = ofpact_get_CONTROLLER(a);
|
|
|
|
|
execute_controller_action(ctx, controller->max_len,
|
|
|
|
|
controller->reason,
|
|
|
|
|
controller->controller_id);
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case OFPACT_ENQUEUE:
|
|
|
|
|
xlate_enqueue_action(ctx, ofpact_get_ENQUEUE(a));
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case OFPACT_SET_VLAN_VID:
|
2013-08-02 21:17:31 -07:00
|
|
|
|
wc->masks.vlan_tci |= htons(VLAN_VID_MASK | VLAN_CFI);
|
2013-10-24 13:19:25 -07:00
|
|
|
|
if (flow->vlan_tci & htons(VLAN_CFI) ||
|
|
|
|
|
ofpact_get_SET_VLAN_VID(a)->push_vlan_if_needed) {
|
|
|
|
|
flow->vlan_tci &= ~htons(VLAN_VID_MASK);
|
|
|
|
|
flow->vlan_tci |= (htons(ofpact_get_SET_VLAN_VID(a)->vlan_vid)
|
|
|
|
|
| htons(VLAN_CFI));
|
|
|
|
|
}
|
2013-06-11 13:32:30 -07:00
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case OFPACT_SET_VLAN_PCP:
|
2013-08-02 21:17:31 -07:00
|
|
|
|
wc->masks.vlan_tci |= htons(VLAN_PCP_MASK | VLAN_CFI);
|
2013-10-24 13:19:25 -07:00
|
|
|
|
if (flow->vlan_tci & htons(VLAN_CFI) ||
|
|
|
|
|
ofpact_get_SET_VLAN_PCP(a)->push_vlan_if_needed) {
|
|
|
|
|
flow->vlan_tci &= ~htons(VLAN_PCP_MASK);
|
|
|
|
|
flow->vlan_tci |= htons((ofpact_get_SET_VLAN_PCP(a)->vlan_pcp
|
|
|
|
|
<< VLAN_PCP_SHIFT) | VLAN_CFI);
|
|
|
|
|
}
|
2013-06-11 13:32:30 -07:00
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case OFPACT_STRIP_VLAN:
|
2013-08-02 21:17:31 -07:00
|
|
|
|
memset(&wc->masks.vlan_tci, 0xff, sizeof wc->masks.vlan_tci);
|
2013-06-12 14:37:18 -07:00
|
|
|
|
flow->vlan_tci = htons(0);
|
2013-06-11 13:32:30 -07:00
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case OFPACT_PUSH_VLAN:
|
|
|
|
|
/* XXX 802.1AD(QinQ) */
|
2013-08-02 21:17:31 -07:00
|
|
|
|
memset(&wc->masks.vlan_tci, 0xff, sizeof wc->masks.vlan_tci);
|
2013-06-12 14:37:18 -07:00
|
|
|
|
flow->vlan_tci = htons(VLAN_CFI);
|
2013-06-11 13:32:30 -07:00
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case OFPACT_SET_ETH_SRC:
|
2013-08-02 21:17:31 -07:00
|
|
|
|
memset(&wc->masks.dl_src, 0xff, sizeof wc->masks.dl_src);
|
2013-06-12 14:37:18 -07:00
|
|
|
|
memcpy(flow->dl_src, ofpact_get_SET_ETH_SRC(a)->mac, ETH_ADDR_LEN);
|
2013-06-11 13:32:30 -07:00
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case OFPACT_SET_ETH_DST:
|
2013-08-02 21:17:31 -07:00
|
|
|
|
memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
|
2013-06-12 14:37:18 -07:00
|
|
|
|
memcpy(flow->dl_dst, ofpact_get_SET_ETH_DST(a)->mac, ETH_ADDR_LEN);
|
2013-06-11 13:32:30 -07:00
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case OFPACT_SET_IPV4_SRC:
|
2013-06-12 14:37:18 -07:00
|
|
|
|
if (flow->dl_type == htons(ETH_TYPE_IP)) {
|
Set datapath mask bits when setting a flow field.
Since at the datapath interface we do not have set actions for
individual fields, but larger sets of fields for a given protocol
layer, the set action will in practice only ever apply to exactly
matched flows for the given protocol layer. For example, if the
reg_load changes the IP TTL, the corresponding datapath action will
rewrite also the IP addresses and TOS byte. Since these other field
values may not be explicitly set, they depend on the incoming flow field
values, and are hence all of them are set in the wildcards masks, when
the action is committed to the datapath. For the rare case, where the
reg_load action does not actually change the value, and no other flow
field values are set (or loaded), the datapath action is skipped, and
no mask bits are set. Such a datapath flow should, however, be
dependent on the specific field value, so the corresponding wildcard
mask bits must be set, lest the datapath flow be applied to packets
containing some other value in the field and the field value remain
unchanged regardless of the incoming value.
Signed-off-by: Jarno Rajahalme <jrajahalme@nicira.com>
Signed-off-by: Ben Pfaff <blp@nicira.com>
2013-10-15 12:40:38 -07:00
|
|
|
|
memset(&wc->masks.nw_src, 0xff, sizeof wc->masks.nw_src);
|
2013-06-12 14:37:18 -07:00
|
|
|
|
flow->nw_src = ofpact_get_SET_IPV4_SRC(a)->ipv4;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case OFPACT_SET_IPV4_DST:
|
2013-06-12 14:37:18 -07:00
|
|
|
|
if (flow->dl_type == htons(ETH_TYPE_IP)) {
|
Set datapath mask bits when setting a flow field.
Since at the datapath interface we do not have set actions for
individual fields, but larger sets of fields for a given protocol
layer, the set action will in practice only ever apply to exactly
matched flows for the given protocol layer. For example, if the
reg_load changes the IP TTL, the corresponding datapath action will
rewrite also the IP addresses and TOS byte. Since these other field
values may not be explicitly set, they depend on the incoming flow field
values, and are hence all of them are set in the wildcards masks, when
the action is committed to the datapath. For the rare case, where the
reg_load action does not actually change the value, and no other flow
field values are set (or loaded), the datapath action is skipped, and
no mask bits are set. Such a datapath flow should, however, be
dependent on the specific field value, so the corresponding wildcard
mask bits must be set, lest the datapath flow be applied to packets
containing some other value in the field and the field value remain
unchanged regardless of the incoming value.
Signed-off-by: Jarno Rajahalme <jrajahalme@nicira.com>
Signed-off-by: Ben Pfaff <blp@nicira.com>
2013-10-15 12:40:38 -07:00
|
|
|
|
memset(&wc->masks.nw_dst, 0xff, sizeof wc->masks.nw_dst);
|
2013-06-12 14:37:18 -07:00
|
|
|
|
flow->nw_dst = ofpact_get_SET_IPV4_DST(a)->ipv4;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
|
2013-10-23 09:58:34 -07:00
|
|
|
|
case OFPACT_SET_IP_DSCP:
|
|
|
|
|
if (is_ip_any(flow)) {
|
Set datapath mask bits when setting a flow field.
Since at the datapath interface we do not have set actions for
individual fields, but larger sets of fields for a given protocol
layer, the set action will in practice only ever apply to exactly
matched flows for the given protocol layer. For example, if the
reg_load changes the IP TTL, the corresponding datapath action will
rewrite also the IP addresses and TOS byte. Since these other field
values may not be explicitly set, they depend on the incoming flow field
values, and are hence all of them are set in the wildcards masks, when
the action is committed to the datapath. For the rare case, where the
reg_load action does not actually change the value, and no other flow
field values are set (or loaded), the datapath action is skipped, and
no mask bits are set. Such a datapath flow should, however, be
dependent on the specific field value, so the corresponding wildcard
mask bits must be set, lest the datapath flow be applied to packets
containing some other value in the field and the field value remain
unchanged regardless of the incoming value.
Signed-off-by: Jarno Rajahalme <jrajahalme@nicira.com>
Signed-off-by: Ben Pfaff <blp@nicira.com>
2013-10-15 12:40:38 -07:00
|
|
|
|
wc->masks.nw_tos |= IP_DSCP_MASK;
|
2013-06-12 14:37:18 -07:00
|
|
|
|
flow->nw_tos &= ~IP_DSCP_MASK;
|
2013-10-23 09:58:34 -07:00
|
|
|
|
flow->nw_tos |= ofpact_get_SET_IP_DSCP(a)->dscp;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
|
2013-10-22 17:20:43 -07:00
|
|
|
|
case OFPACT_SET_IP_ECN:
|
|
|
|
|
if (is_ip_any(flow)) {
|
|
|
|
|
wc->masks.nw_tos |= IP_ECN_MASK;
|
|
|
|
|
flow->nw_tos &= ~IP_ECN_MASK;
|
|
|
|
|
flow->nw_tos |= ofpact_get_SET_IP_ECN(a)->ecn;
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
|
2013-10-22 17:20:44 -07:00
|
|
|
|
case OFPACT_SET_IP_TTL:
|
|
|
|
|
if (is_ip_any(flow)) {
|
|
|
|
|
wc->masks.nw_ttl = 0xff;
|
|
|
|
|
flow->nw_ttl = ofpact_get_SET_IP_TTL(a)->ttl;
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
|
2013-06-11 13:32:30 -07:00
|
|
|
|
case OFPACT_SET_L4_SRC_PORT:
|
2013-06-12 14:37:18 -07:00
|
|
|
|
if (is_ip_any(flow)) {
|
Set datapath mask bits when setting a flow field.
Since at the datapath interface we do not have set actions for
individual fields, but larger sets of fields for a given protocol
layer, the set action will in practice only ever apply to exactly
matched flows for the given protocol layer. For example, if the
reg_load changes the IP TTL, the corresponding datapath action will
rewrite also the IP addresses and TOS byte. Since these other field
values may not be explicitly set, they depend on the incoming flow field
values, and are hence all of them are set in the wildcards masks, when
the action is committed to the datapath. For the rare case, where the
reg_load action does not actually change the value, and no other flow
field values are set (or loaded), the datapath action is skipped, and
no mask bits are set. Such a datapath flow should, however, be
dependent on the specific field value, so the corresponding wildcard
mask bits must be set, lest the datapath flow be applied to packets
containing some other value in the field and the field value remain
unchanged regardless of the incoming value.
Signed-off-by: Jarno Rajahalme <jrajahalme@nicira.com>
Signed-off-by: Ben Pfaff <blp@nicira.com>
2013-10-15 12:40:38 -07:00
|
|
|
|
memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
|
|
|
|
|
memset(&wc->masks.tp_src, 0xff, sizeof wc->masks.tp_src);
|
2013-06-12 14:37:18 -07:00
|
|
|
|
flow->tp_src = htons(ofpact_get_SET_L4_SRC_PORT(a)->port);
|
2013-06-11 13:32:30 -07:00
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case OFPACT_SET_L4_DST_PORT:
|
2013-06-12 14:37:18 -07:00
|
|
|
|
if (is_ip_any(flow)) {
|
Set datapath mask bits when setting a flow field.
Since at the datapath interface we do not have set actions for
individual fields, but larger sets of fields for a given protocol
layer, the set action will in practice only ever apply to exactly
matched flows for the given protocol layer. For example, if the
reg_load changes the IP TTL, the corresponding datapath action will
rewrite also the IP addresses and TOS byte. Since these other field
values may not be explicitly set, they depend on the incoming flow field
values, and are hence all of them are set in the wildcards masks, when
the action is committed to the datapath. For the rare case, where the
reg_load action does not actually change the value, and no other flow
field values are set (or loaded), the datapath action is skipped, and
no mask bits are set. Such a datapath flow should, however, be
dependent on the specific field value, so the corresponding wildcard
mask bits must be set, lest the datapath flow be applied to packets
containing some other value in the field and the field value remain
unchanged regardless of the incoming value.
Signed-off-by: Jarno Rajahalme <jrajahalme@nicira.com>
Signed-off-by: Ben Pfaff <blp@nicira.com>
2013-10-15 12:40:38 -07:00
|
|
|
|
memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
|
|
|
|
|
memset(&wc->masks.tp_dst, 0xff, sizeof wc->masks.tp_dst);
|
2013-06-12 14:37:18 -07:00
|
|
|
|
flow->tp_dst = htons(ofpact_get_SET_L4_DST_PORT(a)->port);
|
2013-06-11 13:32:30 -07:00
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case OFPACT_RESUBMIT:
|
|
|
|
|
xlate_ofpact_resubmit(ctx, ofpact_get_RESUBMIT(a));
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case OFPACT_SET_TUNNEL:
|
2013-06-12 14:37:18 -07:00
|
|
|
|
flow->tunnel.tun_id = htonll(ofpact_get_SET_TUNNEL(a)->tun_id);
|
2013-06-11 13:32:30 -07:00
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case OFPACT_SET_QUEUE:
|
|
|
|
|
xlate_set_queue_action(ctx, ofpact_get_SET_QUEUE(a)->queue_id);
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case OFPACT_POP_QUEUE:
|
2013-06-12 14:37:18 -07:00
|
|
|
|
flow->skb_priority = ctx->orig_skb_priority;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case OFPACT_REG_MOVE:
|
2013-06-12 14:37:18 -07:00
|
|
|
|
nxm_execute_reg_move(ofpact_get_REG_MOVE(a), flow, wc);
|
2013-06-11 13:32:30 -07:00
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case OFPACT_REG_LOAD:
|
Set datapath mask bits when setting a flow field.
Since at the datapath interface we do not have set actions for
individual fields, but larger sets of fields for a given protocol
layer, the set action will in practice only ever apply to exactly
matched flows for the given protocol layer. For example, if the
reg_load changes the IP TTL, the corresponding datapath action will
rewrite also the IP addresses and TOS byte. Since these other field
values may not be explicitly set, they depend on the incoming flow field
values, and are hence all of them are set in the wildcards masks, when
the action is committed to the datapath. For the rare case, where the
reg_load action does not actually change the value, and no other flow
field values are set (or loaded), the datapath action is skipped, and
no mask bits are set. Such a datapath flow should, however, be
dependent on the specific field value, so the corresponding wildcard
mask bits must be set, lest the datapath flow be applied to packets
containing some other value in the field and the field value remain
unchanged regardless of the incoming value.
Signed-off-by: Jarno Rajahalme <jrajahalme@nicira.com>
Signed-off-by: Ben Pfaff <blp@nicira.com>
2013-10-15 12:40:38 -07:00
|
|
|
|
nxm_execute_reg_load(ofpact_get_REG_LOAD(a), flow, wc);
|
2013-06-11 13:32:30 -07:00
|
|
|
|
break;
|
|
|
|
|
|
2013-10-24 13:19:29 -07:00
|
|
|
|
case OFPACT_SET_FIELD:
|
|
|
|
|
set_field = ofpact_get_SET_FIELD(a);
|
|
|
|
|
mf = set_field->field;
|
|
|
|
|
mf_mask_field_and_prereqs(mf, &wc->masks);
|
|
|
|
|
|
|
|
|
|
/* Set field action only ever overwrites packet's outermost
|
|
|
|
|
* applicable header fields. Do nothing if no header exists. */
|
|
|
|
|
if ((mf->id != MFF_VLAN_VID || flow->vlan_tci & htons(VLAN_CFI))
|
|
|
|
|
&& ((mf->id != MFF_MPLS_LABEL && mf->id != MFF_MPLS_TC)
|
|
|
|
|
|| flow->mpls_lse)) {
|
|
|
|
|
mf_set_flow_value(mf, &set_field->value, flow);
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
|
2013-06-11 13:32:30 -07:00
|
|
|
|
case OFPACT_STACK_PUSH:
|
2013-06-12 14:37:18 -07:00
|
|
|
|
nxm_execute_stack_push(ofpact_get_STACK_PUSH(a), flow, wc,
|
|
|
|
|
&ctx->stack);
|
2013-06-11 13:32:30 -07:00
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case OFPACT_STACK_POP:
|
2013-08-02 21:17:31 -07:00
|
|
|
|
nxm_execute_stack_pop(ofpact_get_STACK_POP(a), flow, wc,
|
|
|
|
|
&ctx->stack);
|
2013-06-11 13:32:30 -07:00
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case OFPACT_PUSH_MPLS:
|
2013-09-27 06:55:19 +09:00
|
|
|
|
if (compose_mpls_push_action(ctx,
|
|
|
|
|
ofpact_get_PUSH_MPLS(a)->ethertype)) {
|
|
|
|
|
return;
|
|
|
|
|
}
|
2013-06-11 13:32:30 -07:00
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case OFPACT_POP_MPLS:
|
2013-09-27 06:55:19 +09:00
|
|
|
|
if (compose_mpls_pop_action(ctx,
|
|
|
|
|
ofpact_get_POP_MPLS(a)->ethertype)) {
|
|
|
|
|
return;
|
|
|
|
|
}
|
2013-06-11 13:32:30 -07:00
|
|
|
|
break;
|
|
|
|
|
|
2013-10-24 13:19:34 -07:00
|
|
|
|
case OFPACT_SET_MPLS_LABEL:
|
|
|
|
|
if (compose_set_mpls_label_action(ctx,
|
|
|
|
|
ofpact_get_SET_MPLS_LABEL(a)->label)) {
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case OFPACT_SET_MPLS_TC:
|
|
|
|
|
if (compose_set_mpls_tc_action(ctx,
|
|
|
|
|
ofpact_get_SET_MPLS_TC(a)->tc)) {
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
|
2013-06-11 13:32:30 -07:00
|
|
|
|
case OFPACT_SET_MPLS_TTL:
|
2013-06-12 14:33:17 -07:00
|
|
|
|
if (compose_set_mpls_ttl_action(ctx,
|
2013-06-11 13:32:30 -07:00
|
|
|
|
ofpact_get_SET_MPLS_TTL(a)->ttl)) {
|
2013-07-17 16:14:02 -07:00
|
|
|
|
return;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case OFPACT_DEC_MPLS_TTL:
|
2013-06-12 14:33:17 -07:00
|
|
|
|
if (compose_dec_mpls_ttl_action(ctx)) {
|
2013-07-17 16:14:02 -07:00
|
|
|
|
return;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case OFPACT_DEC_TTL:
|
2013-08-02 21:17:31 -07:00
|
|
|
|
wc->masks.nw_ttl = 0xff;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
if (compose_dec_ttl(ctx, ofpact_get_DEC_TTL(a))) {
|
2013-07-17 16:14:02 -07:00
|
|
|
|
return;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case OFPACT_NOTE:
|
|
|
|
|
/* Nothing to do. */
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case OFPACT_MULTIPATH:
|
2013-06-12 14:37:18 -07:00
|
|
|
|
multipath_execute(ofpact_get_MULTIPATH(a), flow, wc);
|
2013-06-11 13:32:30 -07:00
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case OFPACT_BUNDLE:
|
|
|
|
|
xlate_bundle_action(ctx, ofpact_get_BUNDLE(a));
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case OFPACT_OUTPUT_REG:
|
|
|
|
|
xlate_output_reg_action(ctx, ofpact_get_OUTPUT_REG(a));
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case OFPACT_LEARN:
|
|
|
|
|
xlate_learn_action(ctx, ofpact_get_LEARN(a));
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case OFPACT_EXIT:
|
|
|
|
|
ctx->exit = true;
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case OFPACT_FIN_TIMEOUT:
|
2013-06-12 14:37:18 -07:00
|
|
|
|
memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
|
2013-06-11 13:32:30 -07:00
|
|
|
|
ctx->xout->has_fin_timeout = true;
|
|
|
|
|
xlate_fin_timeout(ctx, ofpact_get_FIN_TIMEOUT(a));
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case OFPACT_CLEAR_ACTIONS:
|
2013-10-11 13:23:29 +09:00
|
|
|
|
ofpbuf_clear(&ctx->action_set);
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case OFPACT_WRITE_ACTIONS:
|
|
|
|
|
xlate_write_actions(ctx, a);
|
2013-06-11 13:32:30 -07:00
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case OFPACT_WRITE_METADATA:
|
|
|
|
|
metadata = ofpact_get_WRITE_METADATA(a);
|
2013-06-12 14:37:18 -07:00
|
|
|
|
flow->metadata &= ~metadata->mask;
|
|
|
|
|
flow->metadata |= metadata->metadata & metadata->mask;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
break;
|
|
|
|
|
|
2013-06-20 17:26:18 +03:00
|
|
|
|
case OFPACT_METER:
|
|
|
|
|
/* Not implemented yet. */
|
|
|
|
|
break;
|
|
|
|
|
|
2013-06-11 13:32:30 -07:00
|
|
|
|
case OFPACT_GOTO_TABLE: {
|
|
|
|
|
struct ofpact_goto_table *ogt = ofpact_get_GOTO_TABLE(a);
|
|
|
|
|
|
|
|
|
|
ovs_assert(ctx->table_id < ogt->table_id);
|
2013-07-27 12:24:15 -07:00
|
|
|
|
xlate_table_action(ctx, ctx->xin->flow.in_port.ofp_port,
|
|
|
|
|
ogt->table_id, true);
|
2013-06-11 13:32:30 -07:00
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
case OFPACT_SAMPLE:
|
|
|
|
|
xlate_sample_action(ctx, ofpact_get_SAMPLE(a));
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
xlate_in_init(struct xlate_in *xin, struct ofproto_dpif *ofproto,
|
|
|
|
|
const struct flow *flow, struct rule_dpif *rule,
|
2013-10-28 13:54:39 -07:00
|
|
|
|
uint16_t tcp_flags, const struct ofpbuf *packet)
|
2013-06-11 13:32:30 -07:00
|
|
|
|
{
|
|
|
|
|
xin->ofproto = ofproto;
|
|
|
|
|
xin->flow = *flow;
|
|
|
|
|
xin->packet = packet;
|
|
|
|
|
xin->may_learn = packet != NULL;
|
|
|
|
|
xin->rule = rule;
|
|
|
|
|
xin->ofpacts = NULL;
|
|
|
|
|
xin->ofpacts_len = 0;
|
|
|
|
|
xin->tcp_flags = tcp_flags;
|
|
|
|
|
xin->resubmit_hook = NULL;
|
|
|
|
|
xin->report_hook = NULL;
|
|
|
|
|
xin->resubmit_stats = NULL;
|
2013-12-05 13:09:27 -08:00
|
|
|
|
xin->skip_wildcards = false;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
xlate_out_uninit(struct xlate_out *xout)
|
|
|
|
|
{
|
|
|
|
|
if (xout) {
|
|
|
|
|
ofpbuf_uninit(&xout->odp_actions);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Translates the 'ofpacts_len' bytes of "struct ofpact"s starting at 'ofpacts'
|
|
|
|
|
* into datapath actions, using 'ctx', and discards the datapath actions. */
|
|
|
|
|
void
|
|
|
|
|
xlate_actions_for_side_effects(struct xlate_in *xin)
|
|
|
|
|
{
|
|
|
|
|
struct xlate_out xout;
|
|
|
|
|
|
|
|
|
|
xlate_actions(xin, &xout);
|
|
|
|
|
xlate_out_uninit(&xout);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
xlate_report(struct xlate_ctx *ctx, const char *s)
|
|
|
|
|
{
|
|
|
|
|
if (ctx->xin->report_hook) {
|
2013-06-12 12:51:52 -07:00
|
|
|
|
ctx->xin->report_hook(ctx->xin, s, ctx->recurse);
|
2013-06-11 13:32:30 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
xlate_out_copy(struct xlate_out *dst, const struct xlate_out *src)
|
|
|
|
|
{
|
|
|
|
|
dst->wc = src->wc;
|
|
|
|
|
dst->slow = src->slow;
|
|
|
|
|
dst->has_learn = src->has_learn;
|
|
|
|
|
dst->has_normal = src->has_normal;
|
|
|
|
|
dst->has_fin_timeout = src->has_fin_timeout;
|
|
|
|
|
dst->nf_output_iface = src->nf_output_iface;
|
|
|
|
|
dst->mirrors = src->mirrors;
|
|
|
|
|
|
|
|
|
|
ofpbuf_use_stub(&dst->odp_actions, dst->odp_actions_stub,
|
|
|
|
|
sizeof dst->odp_actions_stub);
|
|
|
|
|
ofpbuf_put(&dst->odp_actions, src->odp_actions.data,
|
|
|
|
|
src->odp_actions.size);
|
|
|
|
|
}
|
|
|
|
|
|
2013-07-06 10:25:06 -07:00
|
|
|
|
static struct skb_priority_to_dscp *
|
|
|
|
|
get_skb_priority(const struct xport *xport, uint32_t skb_priority)
|
|
|
|
|
{
|
|
|
|
|
struct skb_priority_to_dscp *pdscp;
|
|
|
|
|
uint32_t hash;
|
|
|
|
|
|
|
|
|
|
hash = hash_int(skb_priority, 0);
|
|
|
|
|
HMAP_FOR_EACH_IN_BUCKET (pdscp, hmap_node, hash, &xport->skb_priorities) {
|
|
|
|
|
if (pdscp->skb_priority == skb_priority) {
|
|
|
|
|
return pdscp;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static bool
|
|
|
|
|
dscp_from_skb_priority(const struct xport *xport, uint32_t skb_priority,
|
|
|
|
|
uint8_t *dscp)
|
|
|
|
|
{
|
|
|
|
|
struct skb_priority_to_dscp *pdscp = get_skb_priority(xport, skb_priority);
|
|
|
|
|
*dscp = pdscp ? pdscp->dscp : 0;
|
|
|
|
|
return pdscp != NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
clear_skb_priorities(struct xport *xport)
|
|
|
|
|
{
|
|
|
|
|
struct skb_priority_to_dscp *pdscp, *next;
|
|
|
|
|
|
|
|
|
|
HMAP_FOR_EACH_SAFE (pdscp, next, hmap_node, &xport->skb_priorities) {
|
|
|
|
|
hmap_remove(&xport->skb_priorities, &pdscp->hmap_node);
|
|
|
|
|
free(pdscp);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2013-06-26 14:44:39 -07:00
|
|
|
|
static bool
|
|
|
|
|
actions_output_to_local_port(const struct xlate_ctx *ctx)
|
|
|
|
|
{
|
2013-06-13 18:38:24 -07:00
|
|
|
|
odp_port_t local_odp_port = ofp_port_to_odp_port(ctx->xbridge, OFPP_LOCAL);
|
2013-06-26 14:44:39 -07:00
|
|
|
|
const struct nlattr *a;
|
|
|
|
|
unsigned int left;
|
|
|
|
|
|
|
|
|
|
NL_ATTR_FOR_EACH_UNSAFE (a, left, ctx->xout->odp_actions.data,
|
|
|
|
|
ctx->xout->odp_actions.size) {
|
|
|
|
|
if (nl_attr_type(a) == OVS_ACTION_ATTR_OUTPUT
|
|
|
|
|
&& nl_attr_get_odp_port(a) == local_odp_port) {
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return false;
|
|
|
|
|
}
|
2013-06-11 13:32:30 -07:00
|
|
|
|
|
2013-10-09 04:30:33 +00:00
|
|
|
|
/* Thread safe call to xlate_actions__(). */
|
|
|
|
|
void
|
|
|
|
|
xlate_actions(struct xlate_in *xin, struct xlate_out *xout)
|
|
|
|
|
{
|
|
|
|
|
ovs_rwlock_rdlock(&xlate_rwlock);
|
|
|
|
|
xlate_actions__(xin, xout);
|
|
|
|
|
ovs_rwlock_unlock(&xlate_rwlock);
|
|
|
|
|
}
|
|
|
|
|
|
2013-06-11 13:32:30 -07:00
|
|
|
|
/* Translates the 'ofpacts_len' bytes of "struct ofpacts" starting at 'ofpacts'
|
2013-08-20 11:16:14 -07:00
|
|
|
|
* into datapath actions in 'odp_actions', using 'ctx'.
|
|
|
|
|
*
|
|
|
|
|
* The caller must take responsibility for eventually freeing 'xout', with
|
|
|
|
|
* xlate_out_uninit(). */
|
2013-10-09 04:30:33 +00:00
|
|
|
|
static void
|
|
|
|
|
xlate_actions__(struct xlate_in *xin, struct xlate_out *xout)
|
|
|
|
|
OVS_REQ_RDLOCK(xlate_rwlock)
|
2013-06-11 13:32:30 -07:00
|
|
|
|
{
|
2013-06-12 14:37:18 -07:00
|
|
|
|
struct flow_wildcards *wc = &xout->wc;
|
|
|
|
|
struct flow *flow = &xin->flow;
|
2013-10-09 13:23:31 -07:00
|
|
|
|
struct rule_dpif *rule = NULL;
|
2013-06-12 14:37:18 -07:00
|
|
|
|
|
2013-09-09 13:05:52 -07:00
|
|
|
|
struct rule_actions *actions = NULL;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
enum slow_path_reason special;
|
|
|
|
|
const struct ofpact *ofpacts;
|
2013-06-13 18:38:24 -07:00
|
|
|
|
struct xport *in_port;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
struct flow orig_flow;
|
|
|
|
|
struct xlate_ctx ctx;
|
|
|
|
|
size_t ofpacts_len;
|
2013-08-06 12:57:14 -07:00
|
|
|
|
bool tnl_may_send;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
|
2013-06-13 18:38:24 -07:00
|
|
|
|
COVERAGE_INC(xlate_actions);
|
2013-06-11 13:32:30 -07:00
|
|
|
|
|
|
|
|
|
/* Flow initialization rules:
|
|
|
|
|
* - 'base_flow' must match the kernel's view of the packet at the
|
|
|
|
|
* time that action processing starts. 'flow' represents any
|
|
|
|
|
* transformations we wish to make through actions.
|
|
|
|
|
* - By default 'base_flow' and 'flow' are the same since the input
|
|
|
|
|
* packet matches the output before any actions are applied.
|
|
|
|
|
* - When using VLAN splinters, 'base_flow''s VLAN is set to the value
|
|
|
|
|
* of the received packet as seen by the kernel. If we later output
|
|
|
|
|
* to another device without any modifications this will cause us to
|
|
|
|
|
* insert a new tag since the original one was stripped off by the
|
|
|
|
|
* VLAN device.
|
|
|
|
|
* - Tunnel metadata as received is retained in 'flow'. This allows
|
|
|
|
|
* tunnel metadata matching also in later tables.
|
|
|
|
|
* Since a kernel action for setting the tunnel metadata will only be
|
|
|
|
|
* generated with actual tunnel output, changing the tunnel metadata
|
|
|
|
|
* values in 'flow' (such as tun_id) will only have effect with a later
|
|
|
|
|
* tunnel output action.
|
|
|
|
|
* - Tunnel 'base_flow' is completely cleared since that is what the
|
|
|
|
|
* kernel does. If we wish to maintain the original values an action
|
|
|
|
|
* needs to be generated. */
|
|
|
|
|
|
|
|
|
|
ctx.xin = xin;
|
|
|
|
|
ctx.xout = xout;
|
2013-06-13 18:38:24 -07:00
|
|
|
|
ctx.xout->slow = 0;
|
|
|
|
|
ctx.xout->has_learn = false;
|
|
|
|
|
ctx.xout->has_normal = false;
|
|
|
|
|
ctx.xout->has_fin_timeout = false;
|
|
|
|
|
ctx.xout->nf_output_iface = NF_OUT_DROP;
|
|
|
|
|
ctx.xout->mirrors = 0;
|
|
|
|
|
ofpbuf_use_stub(&ctx.xout->odp_actions, ctx.xout->odp_actions_stub,
|
|
|
|
|
sizeof ctx.xout->odp_actions_stub);
|
|
|
|
|
ofpbuf_reserve(&ctx.xout->odp_actions, NL_A_U32_SIZE);
|
|
|
|
|
|
|
|
|
|
ctx.xbridge = xbridge_lookup(xin->ofproto);
|
|
|
|
|
if (!ctx.xbridge) {
|
2013-07-21 11:31:32 -07:00
|
|
|
|
goto out;
|
2013-06-13 18:38:24 -07:00
|
|
|
|
}
|
2013-06-11 13:32:30 -07:00
|
|
|
|
|
|
|
|
|
ctx.rule = xin->rule;
|
|
|
|
|
|
2013-06-12 14:37:18 -07:00
|
|
|
|
ctx.base_flow = *flow;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
memset(&ctx.base_flow.tunnel, 0, sizeof ctx.base_flow.tunnel);
|
2013-06-12 14:37:18 -07:00
|
|
|
|
ctx.orig_tunnel_ip_dst = flow->tunnel.ip_dst;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
|
2013-06-12 14:37:18 -07:00
|
|
|
|
flow_wildcards_init_catchall(wc);
|
|
|
|
|
memset(&wc->masks.in_port, 0xff, sizeof wc->masks.in_port);
|
2013-06-18 23:55:47 -07:00
|
|
|
|
memset(&wc->masks.skb_priority, 0xff, sizeof wc->masks.skb_priority);
|
2013-06-25 16:55:36 -07:00
|
|
|
|
memset(&wc->masks.dl_type, 0xff, sizeof wc->masks.dl_type);
|
2013-06-18 23:55:47 -07:00
|
|
|
|
wc->masks.nw_frag |= FLOW_NW_FRAG_MASK;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
|
2013-08-06 12:57:14 -07:00
|
|
|
|
tnl_may_send = tnl_xlate_init(&ctx.base_flow, flow, wc);
|
2013-10-30 16:29:58 -07:00
|
|
|
|
if (ctx.xbridge->netflow) {
|
2013-06-26 17:13:33 -07:00
|
|
|
|
netflow_mask_wc(flow, wc);
|
2013-06-11 13:32:30 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ctx.recurse = 0;
|
2013-10-04 08:47:16 -07:00
|
|
|
|
ctx.resubmits = 0;
|
2013-06-12 14:37:18 -07:00
|
|
|
|
ctx.orig_skb_priority = flow->skb_priority;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
ctx.table_id = 0;
|
|
|
|
|
ctx.exit = false;
|
2013-09-27 06:55:19 +09:00
|
|
|
|
ctx.mpls_depth_delta = 0;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
|
2013-10-09 13:23:31 -07:00
|
|
|
|
if (!xin->ofpacts && !ctx.rule) {
|
2013-12-05 13:09:27 -08:00
|
|
|
|
rule_dpif_lookup(ctx.xbridge->ofproto, flow,
|
|
|
|
|
!xin->skip_wildcards ? wc : NULL, &rule);
|
2013-10-09 13:23:31 -07:00
|
|
|
|
if (ctx.xin->resubmit_stats) {
|
|
|
|
|
rule_dpif_credit_stats(rule, ctx.xin->resubmit_stats);
|
|
|
|
|
}
|
|
|
|
|
ctx.rule = rule;
|
|
|
|
|
}
|
2013-10-22 21:36:22 -07:00
|
|
|
|
xout->fail_open = ctx.rule && rule_dpif_is_fail_open(ctx.rule);
|
2013-10-09 13:23:31 -07:00
|
|
|
|
|
2013-06-11 13:32:30 -07:00
|
|
|
|
if (xin->ofpacts) {
|
|
|
|
|
ofpacts = xin->ofpacts;
|
|
|
|
|
ofpacts_len = xin->ofpacts_len;
|
2013-10-09 13:23:31 -07:00
|
|
|
|
} else if (ctx.rule) {
|
|
|
|
|
actions = rule_dpif_get_actions(ctx.rule);
|
2013-09-09 13:05:52 -07:00
|
|
|
|
ofpacts = actions->ofpacts;
|
|
|
|
|
ofpacts_len = actions->ofpacts_len;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
} else {
|
2013-12-17 10:32:12 -08:00
|
|
|
|
OVS_NOT_REACHED();
|
2013-06-11 13:32:30 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ofpbuf_use_stub(&ctx.stack, ctx.init_stack, sizeof ctx.init_stack);
|
2013-10-11 13:23:29 +09:00
|
|
|
|
ofpbuf_use_stub(&ctx.action_set,
|
|
|
|
|
ctx.action_set_stub, sizeof ctx.action_set_stub);
|
2013-06-11 13:32:30 -07:00
|
|
|
|
|
2013-08-01 20:52:01 -07:00
|
|
|
|
if (mbridge_has_mirrors(ctx.xbridge->mbridge)) {
|
2013-06-11 13:32:30 -07:00
|
|
|
|
/* Do this conditionally because the copy is expensive enough that it
|
|
|
|
|
* shows up in profiles. */
|
2013-06-12 14:37:18 -07:00
|
|
|
|
orig_flow = *flow;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
}
|
|
|
|
|
|
2013-06-12 14:37:18 -07:00
|
|
|
|
if (flow->nw_frag & FLOW_NW_FRAG_ANY) {
|
2013-06-13 18:38:24 -07:00
|
|
|
|
switch (ctx.xbridge->frag) {
|
2013-06-11 13:32:30 -07:00
|
|
|
|
case OFPC_FRAG_NORMAL:
|
|
|
|
|
/* We must pretend that transport ports are unavailable. */
|
2013-06-12 14:37:18 -07:00
|
|
|
|
flow->tp_src = ctx.base_flow.tp_src = htons(0);
|
|
|
|
|
flow->tp_dst = ctx.base_flow.tp_dst = htons(0);
|
2013-06-11 13:32:30 -07:00
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case OFPC_FRAG_DROP:
|
2013-07-21 11:31:32 -07:00
|
|
|
|
goto out;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
|
|
|
|
|
case OFPC_FRAG_REASM:
|
2013-12-17 10:32:12 -08:00
|
|
|
|
OVS_NOT_REACHED();
|
2013-06-11 13:32:30 -07:00
|
|
|
|
|
|
|
|
|
case OFPC_FRAG_NX_MATCH:
|
|
|
|
|
/* Nothing to do. */
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case OFPC_INVALID_TTL_TO_CONTROLLER:
|
2013-12-17 10:32:12 -08:00
|
|
|
|
OVS_NOT_REACHED();
|
2013-06-11 13:32:30 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2013-06-13 18:38:24 -07:00
|
|
|
|
in_port = get_ofp_port(ctx.xbridge, flow->in_port.ofp_port);
|
2013-11-12 18:18:01 -08:00
|
|
|
|
if (in_port && in_port->is_tunnel && ctx.xin->resubmit_stats) {
|
|
|
|
|
netdev_vport_inc_rx(in_port->netdev, ctx.xin->resubmit_stats);
|
|
|
|
|
if (in_port->bfd) {
|
|
|
|
|
bfd_account_rx(in_port->bfd, ctx.xin->resubmit_stats);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2013-06-17 18:07:33 -07:00
|
|
|
|
special = process_special(&ctx, flow, in_port, ctx.xin->packet);
|
2013-06-11 13:32:30 -07:00
|
|
|
|
if (special) {
|
2013-09-20 12:54:51 -07:00
|
|
|
|
ctx.xout->slow |= special;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
} else {
|
|
|
|
|
size_t sample_actions_len;
|
|
|
|
|
|
2013-06-19 16:58:44 -07:00
|
|
|
|
if (flow->in_port.ofp_port
|
2013-06-13 18:38:24 -07:00
|
|
|
|
!= vsp_realdev_to_vlandev(ctx.xbridge->ofproto,
|
|
|
|
|
flow->in_port.ofp_port,
|
2013-06-12 14:37:18 -07:00
|
|
|
|
flow->vlan_tci)) {
|
2013-06-11 13:32:30 -07:00
|
|
|
|
ctx.base_flow.vlan_tci = 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
add_sflow_action(&ctx);
|
|
|
|
|
add_ipfix_action(&ctx);
|
|
|
|
|
sample_actions_len = ctx.xout->odp_actions.size;
|
|
|
|
|
|
2013-08-06 12:57:14 -07:00
|
|
|
|
if (tnl_may_send && (!in_port || may_receive(in_port, &ctx))) {
|
2013-06-11 13:32:30 -07:00
|
|
|
|
do_xlate_actions(ofpacts, ofpacts_len, &ctx);
|
|
|
|
|
|
|
|
|
|
/* We've let OFPP_NORMAL and the learning action look at the
|
|
|
|
|
* packet, so drop it now if forwarding is disabled. */
|
2013-07-06 09:31:35 -07:00
|
|
|
|
if (in_port && !xport_stp_forward_state(in_port)) {
|
2013-06-11 13:32:30 -07:00
|
|
|
|
ctx.xout->odp_actions.size = sample_actions_len;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2013-10-11 13:23:29 +09:00
|
|
|
|
if (ctx.action_set.size) {
|
|
|
|
|
xlate_action_set(&ctx);
|
|
|
|
|
}
|
|
|
|
|
|
2013-06-13 18:38:24 -07:00
|
|
|
|
if (ctx.xbridge->has_in_band
|
2013-06-26 14:44:39 -07:00
|
|
|
|
&& in_band_must_output_to_local_port(flow)
|
|
|
|
|
&& !actions_output_to_local_port(&ctx)) {
|
2013-06-11 13:32:30 -07:00
|
|
|
|
compose_output_action(&ctx, OFPP_LOCAL);
|
|
|
|
|
}
|
2013-07-07 03:52:16 -07:00
|
|
|
|
|
|
|
|
|
fix_sflow_action(&ctx);
|
|
|
|
|
|
2013-06-13 18:38:24 -07:00
|
|
|
|
if (mbridge_has_mirrors(ctx.xbridge->mbridge)) {
|
2013-06-11 13:32:30 -07:00
|
|
|
|
add_mirror_actions(&ctx, &orig_flow);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2013-10-04 08:48:48 -07:00
|
|
|
|
if (nl_attr_oversized(ctx.xout->odp_actions.size)) {
|
|
|
|
|
/* These datapath actions are too big for a Netlink attribute, so we
|
2013-11-02 08:43:14 -07:00
|
|
|
|
* can't hand them to the kernel directly. dpif_execute() can execute
|
|
|
|
|
* them one by one with help, so just mark the result as SLOW_ACTION to
|
|
|
|
|
* prevent the flow from being installed. */
|
|
|
|
|
COVERAGE_INC(xlate_actions_oversize);
|
|
|
|
|
ctx.xout->slow |= SLOW_ACTION;
|
2013-10-04 08:48:48 -07:00
|
|
|
|
}
|
|
|
|
|
|
2013-11-12 18:18:01 -08:00
|
|
|
|
if (ctx.xin->resubmit_stats) {
|
|
|
|
|
mirror_update_stats(ctx.xbridge->mbridge, xout->mirrors,
|
|
|
|
|
ctx.xin->resubmit_stats->n_packets,
|
|
|
|
|
ctx.xin->resubmit_stats->n_bytes);
|
|
|
|
|
|
|
|
|
|
if (ctx.xbridge->netflow) {
|
|
|
|
|
const struct ofpact *ofpacts;
|
|
|
|
|
size_t ofpacts_len;
|
|
|
|
|
|
|
|
|
|
ofpacts_len = actions->ofpacts_len;
|
|
|
|
|
ofpacts = actions->ofpacts;
|
|
|
|
|
if (ofpacts_len == 0
|
|
|
|
|
|| ofpacts->type != OFPACT_CONTROLLER
|
|
|
|
|
|| ofpact_next(ofpacts) < ofpact_end(ofpacts, ofpacts_len)) {
|
|
|
|
|
/* Only update netflow if we don't have controller flow. We don't
|
|
|
|
|
* report NetFlow expiration messages for such facets because they
|
|
|
|
|
* are just part of the control logic for the network, not real
|
|
|
|
|
* traffic. */
|
|
|
|
|
netflow_flow_update(ctx.xbridge->netflow, flow,
|
|
|
|
|
xout->nf_output_iface,
|
|
|
|
|
ctx.xin->resubmit_stats);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2013-06-11 13:32:30 -07:00
|
|
|
|
ofpbuf_uninit(&ctx.stack);
|
2013-10-11 13:23:29 +09:00
|
|
|
|
ofpbuf_uninit(&ctx.action_set);
|
2013-06-11 13:32:30 -07:00
|
|
|
|
|
|
|
|
|
/* Clear the metadata and register wildcard masks, because we won't
|
|
|
|
|
* use non-header fields as part of the cache. */
|
2013-12-10 23:32:51 -08:00
|
|
|
|
flow_wildcards_clear_non_packet_fields(wc);
|
2013-07-21 11:31:32 -07:00
|
|
|
|
|
|
|
|
|
out:
|
2013-10-09 04:30:33 +00:00
|
|
|
|
rule_actions_unref(actions);
|
2013-10-09 13:23:31 -07:00
|
|
|
|
rule_dpif_unref(rule);
|
2013-10-09 04:30:33 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Sends 'packet' out 'ofport'.
|
|
|
|
|
* May modify 'packet'.
|
|
|
|
|
* Returns 0 if successful, otherwise a positive errno value. */
|
|
|
|
|
int
|
|
|
|
|
xlate_send_packet(const struct ofport_dpif *ofport, struct ofpbuf *packet)
|
|
|
|
|
{
|
|
|
|
|
struct xport *xport;
|
|
|
|
|
struct ofpact_output output;
|
|
|
|
|
struct flow flow;
|
|
|
|
|
union flow_in_port in_port_;
|
|
|
|
|
int error;
|
|
|
|
|
|
|
|
|
|
ofpact_init(&output.ofpact, OFPACT_OUTPUT, sizeof output);
|
|
|
|
|
/* Use OFPP_NONE as the in_port to avoid special packet processing. */
|
|
|
|
|
in_port_.ofp_port = OFPP_NONE;
|
|
|
|
|
flow_extract(packet, 0, 0, NULL, &in_port_, &flow);
|
|
|
|
|
|
|
|
|
|
ovs_rwlock_rdlock(&xlate_rwlock);
|
|
|
|
|
xport = xport_lookup(ofport);
|
|
|
|
|
if (!xport) {
|
|
|
|
|
ovs_rwlock_unlock(&xlate_rwlock);
|
2013-10-11 14:17:13 -07:00
|
|
|
|
return EINVAL;
|
2013-10-09 04:30:33 +00:00
|
|
|
|
}
|
|
|
|
|
output.port = xport->ofp_port;
|
|
|
|
|
output.max_len = 0;
|
2013-10-11 14:58:36 -07:00
|
|
|
|
error = ofproto_dpif_execute_actions(xport->xbridge->ofproto, &flow, NULL,
|
|
|
|
|
&output.ofpact, sizeof output,
|
|
|
|
|
packet);
|
2013-07-21 11:31:32 -07:00
|
|
|
|
ovs_rwlock_unlock(&xlate_rwlock);
|
2013-10-09 04:30:33 +00:00
|
|
|
|
return error;
|
2013-06-11 13:32:30 -07:00
|
|
|
|
}
|