2009-07-08 13:19:16 -07:00
|
|
|
/*
|
2014-04-08 11:13:42 +00:00
|
|
|
* Copyright (c) 2007-2014 Nicira, Inc.
|
2009-06-15 15:11:30 -07:00
|
|
|
*
|
2011-11-16 13:39:40 -08:00
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
|
* modify it under the terms of version 2 of the GNU General Public
|
|
|
|
|
* License as published by the Free Software Foundation.
|
|
|
|
|
*
|
|
|
|
|
* This program is distributed in the hope that it will be useful, but
|
|
|
|
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
|
* General Public License for more details.
|
|
|
|
|
*
|
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
|
* along with this program; if not, write to the Free Software
|
|
|
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
|
|
|
|
* 02110-1301, USA
|
2009-07-08 13:19:16 -07:00
|
|
|
*/
|
|
|
|
|
|
2011-10-06 21:52:39 -07:00
|
|
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
|
|
|
|
2009-07-08 13:19:16 -07:00
|
|
|
#include <linux/skbuff.h>
|
|
|
|
|
#include <linux/in.h>
|
|
|
|
|
#include <linux/ip.h>
|
2011-10-05 10:50:58 -07:00
|
|
|
#include <linux/openvswitch.h>
|
2013-08-22 20:24:43 +12:00
|
|
|
#include <linux/sctp.h>
|
2009-07-08 13:19:16 -07:00
|
|
|
#include <linux/tcp.h>
|
|
|
|
|
#include <linux/udp.h>
|
|
|
|
|
#include <linux/in6.h>
|
2010-08-24 16:00:27 -07:00
|
|
|
#include <linux/if_arp.h>
|
2009-07-08 13:19:16 -07:00
|
|
|
#include <linux/if_vlan.h>
|
|
|
|
|
#include <net/ip.h>
|
2012-11-05 15:53:32 +02:00
|
|
|
#include <net/ipv6.h>
|
2009-07-08 13:19:16 -07:00
|
|
|
#include <net/checksum.h>
|
2011-11-02 23:34:15 -07:00
|
|
|
#include <net/dsfield.h>
|
2013-08-22 20:24:43 +12:00
|
|
|
#include <net/sctp/checksum.h>
|
2010-04-12 15:53:39 -04:00
|
|
|
|
|
|
|
|
#include "datapath.h"
|
2014-06-24 20:56:57 +09:00
|
|
|
#include "gso.h"
|
|
|
|
|
#include "mpls.h"
|
2010-12-29 22:13:15 -08:00
|
|
|
#include "vlan.h"
|
2010-04-12 15:53:39 -04:00
|
|
|
#include "vport.h"
|
2009-07-08 13:19:16 -07:00
|
|
|
|
2014-08-11 00:14:05 -07:00
|
|
|
struct deferred_action {
|
|
|
|
|
struct sk_buff *skb;
|
|
|
|
|
const struct nlattr *actions;
|
|
|
|
|
|
|
|
|
|
/* Store pkt_key clone when creating deferred action. */
|
|
|
|
|
struct sw_flow_key pkt_key;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
#define DEFERRED_ACTION_FIFO_SIZE 10
|
|
|
|
|
struct action_fifo {
|
|
|
|
|
int head;
|
|
|
|
|
int tail;
|
|
|
|
|
/* Deferred action fifo queue storage. */
|
|
|
|
|
struct deferred_action fifo[DEFERRED_ACTION_FIFO_SIZE];
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
static struct action_fifo __percpu *action_fifos;
|
|
|
|
|
#define EXEC_ACTIONS_LEVEL_LIMIT 4 /* limit used to detect packet
|
|
|
|
|
looping by the network stack */
|
|
|
|
|
static DEFINE_PER_CPU(int, exec_actions_level);
|
|
|
|
|
|
|
|
|
|
static void action_fifo_init(struct action_fifo *fifo)
|
|
|
|
|
{
|
|
|
|
|
fifo->head = 0;
|
|
|
|
|
fifo->tail = 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static bool action_fifo_is_empty(struct action_fifo *fifo)
|
|
|
|
|
{
|
|
|
|
|
return (fifo->head == fifo->tail);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static struct deferred_action *
|
|
|
|
|
action_fifo_get(struct action_fifo *fifo)
|
|
|
|
|
{
|
|
|
|
|
if (action_fifo_is_empty(fifo))
|
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
|
|
return &fifo->fifo[fifo->tail++];
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static struct deferred_action *
|
|
|
|
|
action_fifo_put(struct action_fifo *fifo)
|
|
|
|
|
{
|
|
|
|
|
if (fifo->head >= DEFERRED_ACTION_FIFO_SIZE - 1)
|
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
|
|
return &fifo->fifo[fifo->head++];
|
|
|
|
|
}
|
|
|
|
|
|
2014-08-11 14:02:45 -07:00
|
|
|
static void flow_key_clone(struct sk_buff *skb, struct sw_flow_key *new_key)
|
|
|
|
|
{
|
|
|
|
|
*new_key = *OVS_CB(skb)->pkt_key;
|
|
|
|
|
OVS_CB(skb)->pkt_key = new_key;
|
|
|
|
|
}
|
|
|
|
|
|
2014-08-11 00:14:05 -07:00
|
|
|
/* Return true if fifo is not full */
|
|
|
|
|
static bool add_deferred_actions(struct sk_buff *skb,
|
|
|
|
|
const struct nlattr *attr)
|
|
|
|
|
{
|
|
|
|
|
struct action_fifo *fifo;
|
|
|
|
|
struct deferred_action *da;
|
|
|
|
|
|
|
|
|
|
fifo = this_cpu_ptr(action_fifos);
|
|
|
|
|
da = action_fifo_put(fifo);
|
|
|
|
|
if (da) {
|
|
|
|
|
da->skb = skb;
|
|
|
|
|
da->actions = attr;
|
|
|
|
|
flow_key_clone(skb, &da->pkt_key);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return (da != NULL);
|
|
|
|
|
}
|
|
|
|
|
|
2014-08-11 14:02:45 -07:00
|
|
|
static void flow_key_set_recirc_id(struct sk_buff *skb, u32 recirc_id)
|
|
|
|
|
{
|
|
|
|
|
OVS_CB(skb)->pkt_key->recirc_id = recirc_id;
|
|
|
|
|
}
|
|
|
|
|
|
2014-08-07 15:46:19 -07:00
|
|
|
static void flow_key_set_priority(struct sk_buff *skb, u32 priority)
|
|
|
|
|
{
|
|
|
|
|
OVS_CB(skb)->pkt_key->phy.priority = priority;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void flow_key_set_skb_mark(struct sk_buff *skb, u32 skb_mark)
|
|
|
|
|
{
|
|
|
|
|
OVS_CB(skb)->pkt_key->phy.skb_mark = skb_mark;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void flow_key_set_eth_src(struct sk_buff *skb, const u8 addr[])
|
|
|
|
|
{
|
|
|
|
|
ether_addr_copy(OVS_CB(skb)->pkt_key->eth.src, addr);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void flow_key_set_eth_dst(struct sk_buff *skb, const u8 addr[])
|
|
|
|
|
{
|
|
|
|
|
ether_addr_copy(OVS_CB(skb)->pkt_key->eth.dst, addr);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void flow_key_set_vlan_tci(struct sk_buff *skb, __be16 tci)
|
|
|
|
|
{
|
|
|
|
|
OVS_CB(skb)->pkt_key->eth.tci = tci;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void flow_key_set_mpls_top_lse(struct sk_buff *skb, __be32 top_lse)
|
|
|
|
|
{
|
|
|
|
|
OVS_CB(skb)->pkt_key->mpls.top_lse = top_lse;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void flow_key_set_ipv4_src(struct sk_buff *skb, __be32 addr)
|
|
|
|
|
{
|
|
|
|
|
OVS_CB(skb)->pkt_key->ipv4.addr.src = addr;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void flow_key_set_ipv4_dst(struct sk_buff *skb, __be32 addr)
|
|
|
|
|
{
|
|
|
|
|
OVS_CB(skb)->pkt_key->ipv4.addr.src = addr;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void flow_key_set_ip_tos(struct sk_buff *skb, u8 tos)
|
|
|
|
|
{
|
|
|
|
|
OVS_CB(skb)->pkt_key->ip.tos = tos;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void flow_key_set_ip_ttl(struct sk_buff *skb, u8 ttl)
|
|
|
|
|
{
|
|
|
|
|
OVS_CB(skb)->pkt_key->ip.ttl = ttl;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void flow_key_set_ipv6_src(struct sk_buff *skb,
|
|
|
|
|
const __be32 addr[4])
|
|
|
|
|
{
|
|
|
|
|
memcpy(&OVS_CB(skb)->pkt_key->ipv6.addr.src, addr, sizeof(__be32[4]));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void flow_key_set_ipv6_dst(struct sk_buff *skb,
|
|
|
|
|
const __be32 addr[4])
|
|
|
|
|
{
|
|
|
|
|
memcpy(&OVS_CB(skb)->pkt_key->ipv6.addr.dst, addr, sizeof(__be32[4]));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void flow_key_set_ipv6_fl(struct sk_buff *skb,
|
|
|
|
|
const struct ipv6hdr *nh)
|
|
|
|
|
{
|
|
|
|
|
OVS_CB(skb)->pkt_key->ipv6.label = *(__be32 *)nh &
|
|
|
|
|
htonl(IPV6_FLOWINFO_FLOWLABEL);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void flow_key_set_tp_src(struct sk_buff *skb, __be16 port)
|
|
|
|
|
{
|
|
|
|
|
OVS_CB(skb)->pkt_key->tp.src = port;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void flow_key_set_tp_dst(struct sk_buff *skb, __be16 port)
|
|
|
|
|
{
|
|
|
|
|
OVS_CB(skb)->pkt_key->tp.dst = port;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void invalidate_skb_flow_key(struct sk_buff *skb)
|
|
|
|
|
{
|
|
|
|
|
OVS_CB(skb)->pkt_key->eth.type = htons(0);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static bool is_skb_flow_key_valid(struct sk_buff *skb)
|
|
|
|
|
{
|
|
|
|
|
return !!OVS_CB(skb)->pkt_key->eth.type;
|
|
|
|
|
}
|
|
|
|
|
|
2011-09-28 10:43:07 -07:00
|
|
|
static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
|
2014-05-15 09:05:03 +09:00
|
|
|
const struct nlattr *attr, int len);
|
2010-12-23 09:35:15 -08:00
|
|
|
|
2011-06-06 19:17:25 -07:00
|
|
|
static int make_writable(struct sk_buff *skb, int write_len)
|
2009-07-08 13:19:16 -07:00
|
|
|
{
|
2014-09-07 14:36:01 -07:00
|
|
|
if (!pskb_may_pull(skb, write_len))
|
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
2011-06-06 19:17:25 -07:00
|
|
|
if (!skb_cloned(skb) || skb_clone_writable(skb, write_len))
|
|
|
|
|
return 0;
|
2009-11-17 19:03:27 -08:00
|
|
|
|
2011-06-06 19:17:25 -07:00
|
|
|
return pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
|
2009-07-08 13:19:16 -07:00
|
|
|
}
|
|
|
|
|
|
2014-06-24 20:56:57 +09:00
|
|
|
/* The end of the mac header.
|
|
|
|
|
*
|
|
|
|
|
* For non-MPLS skbs this will correspond to the network header.
|
|
|
|
|
* For MPLS skbs it will be before the network_header as the MPLS
|
|
|
|
|
* label stack lies between the end of the mac header and the network
|
|
|
|
|
* header. That is, for MPLS skbs the end of the mac header
|
|
|
|
|
* is the top of the MPLS label stack.
|
|
|
|
|
*/
|
|
|
|
|
static unsigned char *mac_header_end(const struct sk_buff *skb)
|
|
|
|
|
{
|
|
|
|
|
return skb_mac_header(skb) + skb->mac_len;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int push_mpls(struct sk_buff *skb,
|
|
|
|
|
const struct ovs_action_push_mpls *mpls)
|
|
|
|
|
{
|
|
|
|
|
__be32 *new_mpls_lse;
|
|
|
|
|
struct ethhdr *hdr;
|
|
|
|
|
|
|
|
|
|
if (skb_cow_head(skb, MPLS_HLEN) < 0)
|
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
|
|
skb_push(skb, MPLS_HLEN);
|
|
|
|
|
memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb),
|
|
|
|
|
skb->mac_len);
|
|
|
|
|
skb_reset_mac_header(skb);
|
|
|
|
|
|
|
|
|
|
new_mpls_lse = (__be32 *)mac_header_end(skb);
|
|
|
|
|
*new_mpls_lse = mpls->mpls_lse;
|
|
|
|
|
|
|
|
|
|
if (skb->ip_summed == CHECKSUM_COMPLETE)
|
|
|
|
|
skb->csum = csum_add(skb->csum, csum_partial(new_mpls_lse,
|
|
|
|
|
MPLS_HLEN, 0));
|
|
|
|
|
|
|
|
|
|
hdr = eth_hdr(skb);
|
|
|
|
|
hdr->h_proto = mpls->mpls_ethertype;
|
|
|
|
|
if (!ovs_skb_get_inner_protocol(skb))
|
|
|
|
|
ovs_skb_set_inner_protocol(skb, skb->protocol);
|
|
|
|
|
skb->protocol = mpls->mpls_ethertype;
|
2014-08-07 15:46:19 -07:00
|
|
|
invalidate_skb_flow_key(skb);
|
2014-06-24 20:56:57 +09:00
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int pop_mpls(struct sk_buff *skb, const __be16 ethertype)
|
|
|
|
|
{
|
|
|
|
|
struct ethhdr *hdr;
|
|
|
|
|
int err;
|
|
|
|
|
|
|
|
|
|
err = make_writable(skb, skb->mac_len + MPLS_HLEN);
|
|
|
|
|
if (unlikely(err))
|
|
|
|
|
return err;
|
|
|
|
|
|
|
|
|
|
if (skb->ip_summed == CHECKSUM_COMPLETE)
|
|
|
|
|
skb->csum = csum_sub(skb->csum,
|
|
|
|
|
csum_partial(mac_header_end(skb),
|
|
|
|
|
MPLS_HLEN, 0));
|
|
|
|
|
|
|
|
|
|
memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb),
|
|
|
|
|
skb->mac_len);
|
|
|
|
|
|
|
|
|
|
__skb_pull(skb, MPLS_HLEN);
|
|
|
|
|
skb_reset_mac_header(skb);
|
|
|
|
|
|
|
|
|
|
/* mac_header_end() is used to locate the ethertype
|
|
|
|
|
* field correctly in the presence of VLAN tags.
|
|
|
|
|
*/
|
|
|
|
|
hdr = (struct ethhdr *)(mac_header_end(skb) - ETH_HLEN);
|
|
|
|
|
hdr->h_proto = ethertype;
|
|
|
|
|
if (eth_p_mpls(skb->protocol))
|
|
|
|
|
skb->protocol = ethertype;
|
2014-08-07 15:46:19 -07:00
|
|
|
invalidate_skb_flow_key(skb);
|
2014-06-24 20:56:57 +09:00
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int set_mpls(struct sk_buff *skb, const __be32 *mpls_lse)
|
|
|
|
|
{
|
|
|
|
|
__be32 *stack = (__be32 *)mac_header_end(skb);
|
|
|
|
|
int err;
|
|
|
|
|
|
|
|
|
|
err = make_writable(skb, skb->mac_len + MPLS_HLEN);
|
|
|
|
|
if (unlikely(err))
|
|
|
|
|
return err;
|
|
|
|
|
|
|
|
|
|
if (skb->ip_summed == CHECKSUM_COMPLETE) {
|
|
|
|
|
__be32 diff[] = { ~(*stack), *mpls_lse };
|
|
|
|
|
skb->csum = ~csum_partial((char *)diff, sizeof(diff),
|
|
|
|
|
~skb->csum);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
*stack = *mpls_lse;
|
2014-08-07 15:46:19 -07:00
|
|
|
flow_key_set_mpls_top_lse(skb, *stack);
|
2014-06-24 20:56:57 +09:00
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2012-08-31 15:35:49 +12:00
|
|
|
/* remove VLAN header from packet and update csum accordingly. */
|
2011-09-09 18:13:26 -07:00
|
|
|
static int __pop_vlan_tci(struct sk_buff *skb, __be16 *current_tci)
|
2009-07-08 13:19:16 -07:00
|
|
|
{
|
2011-11-18 11:48:01 -08:00
|
|
|
struct vlan_hdr *vhdr;
|
2011-06-06 19:17:25 -07:00
|
|
|
int err;
|
2009-07-08 13:19:16 -07:00
|
|
|
|
2011-06-06 19:17:25 -07:00
|
|
|
err = make_writable(skb, VLAN_ETH_HLEN);
|
|
|
|
|
if (unlikely(err))
|
|
|
|
|
return err;
|
2010-12-29 22:13:15 -08:00
|
|
|
|
2013-08-26 11:18:07 -07:00
|
|
|
if (skb->ip_summed == CHECKSUM_COMPLETE)
|
2010-03-04 16:39:57 -05:00
|
|
|
skb->csum = csum_sub(skb->csum, csum_partial(skb->data
|
2013-02-22 19:22:41 -08:00
|
|
|
+ (2 * ETH_ALEN), VLAN_HLEN, 0));
|
2010-03-04 16:39:57 -05:00
|
|
|
|
2011-11-18 11:48:01 -08:00
|
|
|
vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN);
|
|
|
|
|
*current_tci = vhdr->h_vlan_TCI;
|
2011-09-09 18:13:26 -07:00
|
|
|
|
2010-12-29 22:13:15 -08:00
|
|
|
memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
|
2011-11-18 11:48:01 -08:00
|
|
|
__skb_pull(skb, VLAN_HLEN);
|
2009-07-08 13:19:16 -07:00
|
|
|
|
2011-11-18 11:48:01 -08:00
|
|
|
vlan_set_encap_proto(skb, vhdr);
|
2009-07-08 13:19:16 -07:00
|
|
|
skb->mac_header += VLAN_HLEN;
|
2014-06-24 20:56:57 +09:00
|
|
|
/* Update mac_len for subsequent MPLS actions */
|
|
|
|
|
skb->mac_len -= VLAN_HLEN;
|
2009-07-08 13:19:16 -07:00
|
|
|
|
2011-06-06 19:17:25 -07:00
|
|
|
return 0;
|
2009-07-08 13:19:16 -07:00
|
|
|
}
|
|
|
|
|
|
2011-09-09 18:13:26 -07:00
|
|
|
static int pop_vlan(struct sk_buff *skb)
|
2009-07-08 13:19:16 -07:00
|
|
|
{
|
2011-09-09 18:13:26 -07:00
|
|
|
__be16 tci;
|
|
|
|
|
int err;
|
2011-06-06 19:17:25 -07:00
|
|
|
|
2011-09-09 18:13:26 -07:00
|
|
|
if (likely(vlan_tx_tag_present(skb))) {
|
|
|
|
|
vlan_set_tci(skb, 0);
|
|
|
|
|
} else {
|
|
|
|
|
if (unlikely(skb->protocol != htons(ETH_P_8021Q) ||
|
2011-11-02 10:46:53 -07:00
|
|
|
skb->len < VLAN_ETH_HLEN))
|
2011-06-06 19:17:25 -07:00
|
|
|
return 0;
|
2009-07-08 13:19:16 -07:00
|
|
|
|
2011-09-09 18:13:26 -07:00
|
|
|
err = __pop_vlan_tci(skb, &tci);
|
|
|
|
|
if (err)
|
2011-06-06 19:17:25 -07:00
|
|
|
return err;
|
2009-07-08 13:19:16 -07:00
|
|
|
}
|
2011-09-09 18:13:26 -07:00
|
|
|
/* move next vlan tag to hw accel tag */
|
|
|
|
|
if (likely(skb->protocol != htons(ETH_P_8021Q) ||
|
2014-08-07 15:46:19 -07:00
|
|
|
skb->len < VLAN_ETH_HLEN)) {
|
|
|
|
|
flow_key_set_vlan_tci(skb, 0);
|
2011-09-09 18:13:26 -07:00
|
|
|
return 0;
|
2014-08-07 15:46:19 -07:00
|
|
|
}
|
2011-09-09 18:13:26 -07:00
|
|
|
|
2014-08-07 15:46:19 -07:00
|
|
|
invalidate_skb_flow_key(skb);
|
2011-09-09 18:13:26 -07:00
|
|
|
err = __pop_vlan_tci(skb, &tci);
|
|
|
|
|
if (unlikely(err))
|
|
|
|
|
return err;
|
2009-07-08 13:19:16 -07:00
|
|
|
|
2013-08-02 11:38:51 -07:00
|
|
|
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(tci));
|
2011-09-09 18:13:26 -07:00
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2011-11-14 15:56:43 -08:00
|
|
|
static int push_vlan(struct sk_buff *skb, const struct ovs_action_push_vlan *vlan)
|
2011-09-09 18:13:26 -07:00
|
|
|
{
|
|
|
|
|
if (unlikely(vlan_tx_tag_present(skb))) {
|
|
|
|
|
u16 current_tag;
|
|
|
|
|
|
|
|
|
|
/* push down current VLAN tag */
|
|
|
|
|
current_tag = vlan_tx_tag_get(skb);
|
|
|
|
|
|
2013-08-02 11:38:51 -07:00
|
|
|
if (!__vlan_put_tag(skb, skb->vlan_proto, current_tag))
|
2011-09-09 18:13:26 -07:00
|
|
|
return -ENOMEM;
|
2011-06-06 19:17:25 -07:00
|
|
|
|
2014-06-24 20:56:57 +09:00
|
|
|
/* Update mac_len for subsequent MPLS actions */
|
|
|
|
|
skb->mac_len += VLAN_HLEN;
|
|
|
|
|
|
2013-08-26 11:18:07 -07:00
|
|
|
if (skb->ip_summed == CHECKSUM_COMPLETE)
|
2011-09-09 18:13:26 -07:00
|
|
|
skb->csum = csum_add(skb->csum, csum_partial(skb->data
|
2013-02-22 19:22:41 -08:00
|
|
|
+ (2 * ETH_ALEN), VLAN_HLEN, 0));
|
2011-09-09 18:13:26 -07:00
|
|
|
|
2014-08-07 15:46:19 -07:00
|
|
|
invalidate_skb_flow_key(skb);
|
|
|
|
|
} else {
|
|
|
|
|
flow_key_set_vlan_tci(skb, vlan->vlan_tci);
|
2011-09-09 18:13:26 -07:00
|
|
|
}
|
2013-08-02 11:38:51 -07:00
|
|
|
__vlan_hwaccel_put_tag(skb, vlan->vlan_tpid, ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT);
|
2011-06-06 19:17:25 -07:00
|
|
|
return 0;
|
2009-07-08 13:19:16 -07:00
|
|
|
}
|
|
|
|
|
|
2011-10-21 14:38:54 -07:00
|
|
|
static int set_eth_addr(struct sk_buff *skb,
|
|
|
|
|
const struct ovs_key_ethernet *eth_key)
|
2010-08-13 10:46:12 -07:00
|
|
|
{
|
2011-10-21 14:38:54 -07:00
|
|
|
int err;
|
|
|
|
|
err = make_writable(skb, ETH_HLEN);
|
|
|
|
|
if (unlikely(err))
|
|
|
|
|
return err;
|
|
|
|
|
|
2013-08-26 11:18:07 -07:00
|
|
|
skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
|
2013-06-12 16:59:28 -07:00
|
|
|
|
2014-02-19 11:08:50 -08:00
|
|
|
ether_addr_copy(eth_hdr(skb)->h_source, eth_key->eth_src);
|
|
|
|
|
ether_addr_copy(eth_hdr(skb)->h_dest, eth_key->eth_dst);
|
2011-10-21 14:38:54 -07:00
|
|
|
|
2013-08-26 11:18:07 -07:00
|
|
|
ovs_skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
|
2013-06-12 16:59:28 -07:00
|
|
|
|
2014-08-07 15:46:19 -07:00
|
|
|
flow_key_set_eth_src(skb, eth_key->eth_src);
|
|
|
|
|
flow_key_set_eth_dst(skb, eth_key->eth_dst);
|
2011-10-21 14:38:54 -07:00
|
|
|
return 0;
|
2010-08-13 10:46:12 -07:00
|
|
|
}
|
|
|
|
|
|
2011-10-21 14:38:54 -07:00
|
|
|
static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
|
2014-08-07 15:46:19 -07:00
|
|
|
__be32 *addr, __be32 new_addr)
|
2010-08-13 10:46:12 -07:00
|
|
|
{
|
|
|
|
|
int transport_len = skb->len - skb_transport_offset(skb);
|
2011-10-21 14:38:54 -07:00
|
|
|
|
|
|
|
|
if (nh->protocol == IPPROTO_TCP) {
|
2010-08-13 10:46:12 -07:00
|
|
|
if (likely(transport_len >= sizeof(struct tcphdr)))
|
2011-10-21 14:38:54 -07:00
|
|
|
inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
|
|
|
|
|
*addr, new_addr, 1);
|
|
|
|
|
} else if (nh->protocol == IPPROTO_UDP) {
|
2012-03-06 13:09:13 -08:00
|
|
|
if (likely(transport_len >= sizeof(struct udphdr))) {
|
|
|
|
|
struct udphdr *uh = udp_hdr(skb);
|
|
|
|
|
|
2013-08-26 11:18:07 -07:00
|
|
|
if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
|
2012-03-06 13:09:13 -08:00
|
|
|
inet_proto_csum_replace4(&uh->check, skb,
|
|
|
|
|
*addr, new_addr, 1);
|
|
|
|
|
if (!uh->check)
|
|
|
|
|
uh->check = CSUM_MANGLED_0;
|
|
|
|
|
}
|
|
|
|
|
}
|
2010-08-13 10:46:12 -07:00
|
|
|
}
|
2011-10-21 14:38:54 -07:00
|
|
|
|
|
|
|
|
csum_replace4(&nh->check, *addr, new_addr);
|
2014-05-01 15:50:48 -07:00
|
|
|
skb_clear_hash(skb);
|
2011-10-21 14:38:54 -07:00
|
|
|
*addr = new_addr;
|
2010-08-13 10:46:12 -07:00
|
|
|
}
|
|
|
|
|
|
2012-11-05 15:53:32 +02:00
|
|
|
static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto,
|
|
|
|
|
__be32 addr[4], const __be32 new_addr[4])
|
|
|
|
|
{
|
|
|
|
|
int transport_len = skb->len - skb_transport_offset(skb);
|
|
|
|
|
|
2014-08-15 11:01:54 -07:00
|
|
|
if (l4_proto == NEXTHDR_TCP) {
|
2012-11-05 15:53:32 +02:00
|
|
|
if (likely(transport_len >= sizeof(struct tcphdr)))
|
|
|
|
|
inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb,
|
|
|
|
|
addr, new_addr, 1);
|
2014-08-15 11:01:54 -07:00
|
|
|
} else if (l4_proto == NEXTHDR_UDP) {
|
2012-11-05 15:53:32 +02:00
|
|
|
if (likely(transport_len >= sizeof(struct udphdr))) {
|
|
|
|
|
struct udphdr *uh = udp_hdr(skb);
|
|
|
|
|
|
2013-08-26 11:18:07 -07:00
|
|
|
if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
|
2012-11-05 15:53:32 +02:00
|
|
|
inet_proto_csum_replace16(&uh->check, skb,
|
|
|
|
|
addr, new_addr, 1);
|
|
|
|
|
if (!uh->check)
|
|
|
|
|
uh->check = CSUM_MANGLED_0;
|
|
|
|
|
}
|
|
|
|
|
}
|
2014-08-15 11:01:54 -07:00
|
|
|
} else if (l4_proto == NEXTHDR_ICMP) {
|
|
|
|
|
if (likely(transport_len >= sizeof(struct icmp6hdr)))
|
|
|
|
|
inet_proto_csum_replace16(&icmp6_hdr(skb)->icmp6_cksum,
|
|
|
|
|
skb, addr, new_addr, 1);
|
2012-11-05 15:53:32 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
|
|
|
|
|
__be32 addr[4], const __be32 new_addr[4],
|
|
|
|
|
bool recalculate_csum)
|
|
|
|
|
{
|
2014-08-11 08:38:58 -07:00
|
|
|
if (likely(recalculate_csum))
|
2012-11-05 15:53:32 +02:00
|
|
|
update_ipv6_checksum(skb, l4_proto, addr, new_addr);
|
|
|
|
|
|
2014-05-01 15:50:48 -07:00
|
|
|
skb_clear_hash(skb);
|
2012-11-05 15:53:32 +02:00
|
|
|
memcpy(addr, new_addr, sizeof(__be32[4]));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void set_ipv6_tc(struct ipv6hdr *nh, u8 tc)
|
|
|
|
|
{
|
|
|
|
|
nh->priority = tc >> 4;
|
|
|
|
|
nh->flow_lbl[0] = (nh->flow_lbl[0] & 0x0F) | ((tc & 0x0F) << 4);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl)
|
|
|
|
|
{
|
|
|
|
|
nh->flow_lbl[0] = (nh->flow_lbl[0] & 0xF0) | (fl & 0x000F0000) >> 16;
|
|
|
|
|
nh->flow_lbl[1] = (fl & 0x0000FF00) >> 8;
|
|
|
|
|
nh->flow_lbl[2] = fl & 0x000000FF;
|
|
|
|
|
}
|
|
|
|
|
|
2011-11-05 15:48:12 -07:00
|
|
|
static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl)
|
|
|
|
|
{
|
|
|
|
|
csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8));
|
|
|
|
|
nh->ttl = new_ttl;
|
|
|
|
|
}
|
|
|
|
|
|
2011-10-21 14:38:54 -07:00
|
|
|
static int set_ipv4(struct sk_buff *skb, const struct ovs_key_ipv4 *ipv4_key)
|
2009-07-08 13:19:16 -07:00
|
|
|
{
|
2010-08-13 10:46:12 -07:00
|
|
|
struct iphdr *nh;
|
2011-06-06 19:17:25 -07:00
|
|
|
int err;
|
2010-08-13 10:46:12 -07:00
|
|
|
|
2011-06-06 19:17:25 -07:00
|
|
|
err = make_writable(skb, skb_network_offset(skb) +
|
|
|
|
|
sizeof(struct iphdr));
|
|
|
|
|
if (unlikely(err))
|
|
|
|
|
return err;
|
2010-08-13 10:46:12 -07:00
|
|
|
|
|
|
|
|
nh = ip_hdr(skb);
|
|
|
|
|
|
2014-08-07 15:46:19 -07:00
|
|
|
if (ipv4_key->ipv4_src != nh->saddr) {
|
2011-10-21 14:38:54 -07:00
|
|
|
set_ip_addr(skb, nh, &nh->saddr, ipv4_key->ipv4_src);
|
2014-08-07 15:46:19 -07:00
|
|
|
flow_key_set_ipv4_src(skb, ipv4_key->ipv4_src);
|
|
|
|
|
}
|
2010-08-13 10:46:12 -07:00
|
|
|
|
2014-08-07 15:46:19 -07:00
|
|
|
if (ipv4_key->ipv4_dst != nh->daddr) {
|
2011-10-21 14:38:54 -07:00
|
|
|
set_ip_addr(skb, nh, &nh->daddr, ipv4_key->ipv4_dst);
|
2014-08-07 15:46:19 -07:00
|
|
|
flow_key_set_ipv4_dst(skb, ipv4_key->ipv4_dst);
|
|
|
|
|
}
|
2011-02-07 11:07:14 +09:00
|
|
|
|
2014-08-07 15:46:19 -07:00
|
|
|
if (ipv4_key->ipv4_tos != nh->tos) {
|
2011-11-02 23:34:15 -07:00
|
|
|
ipv4_change_dsfield(nh, 0, ipv4_key->ipv4_tos);
|
2014-08-07 15:46:19 -07:00
|
|
|
flow_key_set_ip_tos(skb, nh->tos);
|
|
|
|
|
}
|
2010-08-13 10:46:12 -07:00
|
|
|
|
2014-08-07 15:46:19 -07:00
|
|
|
if (ipv4_key->ipv4_ttl != nh->ttl) {
|
2011-11-05 15:48:12 -07:00
|
|
|
set_ip_ttl(skb, nh, ipv4_key->ipv4_ttl);
|
2014-08-07 15:46:19 -07:00
|
|
|
flow_key_set_ip_ttl(skb, ipv4_key->ipv4_ttl);
|
|
|
|
|
}
|
2011-11-05 15:48:12 -07:00
|
|
|
|
2011-06-06 19:17:25 -07:00
|
|
|
return 0;
|
2009-07-08 13:19:16 -07:00
|
|
|
}
|
|
|
|
|
|
2012-11-05 15:53:32 +02:00
|
|
|
static int set_ipv6(struct sk_buff *skb, const struct ovs_key_ipv6 *ipv6_key)
|
|
|
|
|
{
|
|
|
|
|
struct ipv6hdr *nh;
|
|
|
|
|
int err;
|
|
|
|
|
__be32 *saddr;
|
|
|
|
|
__be32 *daddr;
|
|
|
|
|
|
|
|
|
|
err = make_writable(skb, skb_network_offset(skb) +
|
|
|
|
|
sizeof(struct ipv6hdr));
|
|
|
|
|
if (unlikely(err))
|
|
|
|
|
return err;
|
|
|
|
|
|
|
|
|
|
nh = ipv6_hdr(skb);
|
|
|
|
|
saddr = (__be32 *)&nh->saddr;
|
|
|
|
|
daddr = (__be32 *)&nh->daddr;
|
|
|
|
|
|
2014-08-07 15:46:19 -07:00
|
|
|
if (memcmp(ipv6_key->ipv6_src, saddr, sizeof(ipv6_key->ipv6_src))) {
|
2012-11-05 15:53:32 +02:00
|
|
|
set_ipv6_addr(skb, ipv6_key->ipv6_proto, saddr,
|
|
|
|
|
ipv6_key->ipv6_src, true);
|
2014-08-07 15:46:19 -07:00
|
|
|
flow_key_set_ipv6_src(skb, ipv6_key->ipv6_src);
|
|
|
|
|
}
|
2012-11-05 15:53:32 +02:00
|
|
|
|
|
|
|
|
if (memcmp(ipv6_key->ipv6_dst, daddr, sizeof(ipv6_key->ipv6_dst))) {
|
|
|
|
|
unsigned int offset = 0;
|
|
|
|
|
int flags = OVS_IP6T_FH_F_SKIP_RH;
|
|
|
|
|
bool recalc_csum = true;
|
|
|
|
|
|
|
|
|
|
if (ipv6_ext_hdr(nh->nexthdr))
|
|
|
|
|
recalc_csum = ipv6_find_hdr(skb, &offset,
|
|
|
|
|
NEXTHDR_ROUTING, NULL,
|
|
|
|
|
&flags) != NEXTHDR_ROUTING;
|
|
|
|
|
|
|
|
|
|
set_ipv6_addr(skb, ipv6_key->ipv6_proto, daddr,
|
|
|
|
|
ipv6_key->ipv6_dst, recalc_csum);
|
2014-08-07 15:46:19 -07:00
|
|
|
flow_key_set_ipv6_dst(skb, ipv6_key->ipv6_dst);
|
2012-11-05 15:53:32 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
set_ipv6_tc(nh, ipv6_key->ipv6_tclass);
|
2014-08-07 15:46:19 -07:00
|
|
|
flow_key_set_ip_tos(skb, ipv6_get_dsfield(nh));
|
|
|
|
|
|
2012-11-05 15:53:32 +02:00
|
|
|
set_ipv6_fl(nh, ntohl(ipv6_key->ipv6_label));
|
2014-08-07 15:46:19 -07:00
|
|
|
flow_key_set_ipv6_fl(skb, nh);
|
2012-11-05 15:53:32 +02:00
|
|
|
|
2014-08-07 15:46:19 -07:00
|
|
|
nh->hop_limit = ipv6_key->ipv6_hlimit;
|
|
|
|
|
flow_key_set_ip_ttl(skb, ipv6_key->ipv6_hlimit);
|
2012-11-05 15:53:32 +02:00
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2011-10-21 14:38:54 -07:00
|
|
|
/* Must follow make_writable() since that can move the skb data. */
|
|
|
|
|
static void set_tp_port(struct sk_buff *skb, __be16 *port,
|
|
|
|
|
__be16 new_port, __sum16 *check)
|
2009-11-11 14:59:49 -08:00
|
|
|
{
|
2011-10-21 14:38:54 -07:00
|
|
|
inet_proto_csum_replace2(check, skb, *port, new_port, 0);
|
|
|
|
|
*port = new_port;
|
2014-05-01 15:50:48 -07:00
|
|
|
skb_clear_hash(skb);
|
2011-10-21 14:38:54 -07:00
|
|
|
}
|
2011-06-06 19:17:25 -07:00
|
|
|
|
2012-03-06 13:09:13 -08:00
|
|
|
static void set_udp_port(struct sk_buff *skb, __be16 *port, __be16 new_port)
|
|
|
|
|
{
|
|
|
|
|
struct udphdr *uh = udp_hdr(skb);
|
|
|
|
|
|
2013-08-26 11:18:07 -07:00
|
|
|
if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) {
|
2012-03-06 13:09:13 -08:00
|
|
|
set_tp_port(skb, port, new_port, &uh->check);
|
|
|
|
|
|
|
|
|
|
if (!uh->check)
|
|
|
|
|
uh->check = CSUM_MANGLED_0;
|
|
|
|
|
} else {
|
|
|
|
|
*port = new_port;
|
2014-05-01 15:50:48 -07:00
|
|
|
skb_clear_hash(skb);
|
2012-03-06 13:09:13 -08:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int set_udp(struct sk_buff *skb, const struct ovs_key_udp *udp_port_key)
|
2011-10-21 14:38:54 -07:00
|
|
|
{
|
|
|
|
|
struct udphdr *uh;
|
|
|
|
|
int err;
|
2011-06-06 19:17:25 -07:00
|
|
|
|
2011-10-21 14:38:54 -07:00
|
|
|
err = make_writable(skb, skb_transport_offset(skb) +
|
|
|
|
|
sizeof(struct udphdr));
|
2011-06-06 19:17:25 -07:00
|
|
|
if (unlikely(err))
|
|
|
|
|
return err;
|
|
|
|
|
|
2011-10-21 14:38:54 -07:00
|
|
|
uh = udp_hdr(skb);
|
2014-08-07 15:46:19 -07:00
|
|
|
if (udp_port_key->udp_src != uh->source) {
|
2012-03-06 13:09:13 -08:00
|
|
|
set_udp_port(skb, &uh->source, udp_port_key->udp_src);
|
2014-08-07 15:46:19 -07:00
|
|
|
flow_key_set_tp_src(skb, udp_port_key->udp_src);
|
|
|
|
|
}
|
2011-10-21 14:38:54 -07:00
|
|
|
|
2014-08-07 15:46:19 -07:00
|
|
|
if (udp_port_key->udp_dst != uh->dest) {
|
2012-03-06 13:09:13 -08:00
|
|
|
set_udp_port(skb, &uh->dest, udp_port_key->udp_dst);
|
2014-08-07 15:46:19 -07:00
|
|
|
flow_key_set_tp_dst(skb, udp_port_key->udp_dst);
|
|
|
|
|
}
|
2011-06-06 19:17:25 -07:00
|
|
|
|
|
|
|
|
return 0;
|
2009-11-11 14:59:49 -08:00
|
|
|
}
|
|
|
|
|
|
2012-03-06 13:09:13 -08:00
|
|
|
static int set_tcp(struct sk_buff *skb, const struct ovs_key_tcp *tcp_port_key)
|
2009-07-08 13:19:16 -07:00
|
|
|
{
|
2011-10-21 14:38:54 -07:00
|
|
|
struct tcphdr *th;
|
2011-06-06 19:17:25 -07:00
|
|
|
int err;
|
2009-07-08 13:19:16 -07:00
|
|
|
|
2011-06-06 19:17:25 -07:00
|
|
|
err = make_writable(skb, skb_transport_offset(skb) +
|
|
|
|
|
sizeof(struct tcphdr));
|
|
|
|
|
if (unlikely(err))
|
|
|
|
|
return err;
|
2010-08-13 10:46:12 -07:00
|
|
|
|
2011-10-21 14:38:54 -07:00
|
|
|
th = tcp_hdr(skb);
|
2014-08-07 15:46:19 -07:00
|
|
|
if (tcp_port_key->tcp_src != th->source) {
|
2011-10-21 14:38:54 -07:00
|
|
|
set_tp_port(skb, &th->source, tcp_port_key->tcp_src, &th->check);
|
2014-08-07 15:46:19 -07:00
|
|
|
flow_key_set_tp_src(skb, tcp_port_key->tcp_src);
|
|
|
|
|
}
|
2009-07-08 13:19:16 -07:00
|
|
|
|
2014-08-07 15:46:19 -07:00
|
|
|
if (tcp_port_key->tcp_dst != th->dest) {
|
2011-10-21 14:38:54 -07:00
|
|
|
set_tp_port(skb, &th->dest, tcp_port_key->tcp_dst, &th->check);
|
2014-08-07 15:46:19 -07:00
|
|
|
flow_key_set_tp_dst(skb, tcp_port_key->tcp_dst);
|
|
|
|
|
}
|
2010-08-13 10:46:12 -07:00
|
|
|
|
2011-06-06 19:17:25 -07:00
|
|
|
return 0;
|
2009-07-08 13:19:16 -07:00
|
|
|
}
|
|
|
|
|
|
2013-08-22 20:24:43 +12:00
|
|
|
static int set_sctp(struct sk_buff *skb,
|
2014-08-07 15:46:19 -07:00
|
|
|
const struct ovs_key_sctp *sctp_port_key)
|
2013-08-22 20:24:43 +12:00
|
|
|
{
|
|
|
|
|
struct sctphdr *sh;
|
|
|
|
|
int err;
|
|
|
|
|
unsigned int sctphoff = skb_transport_offset(skb);
|
|
|
|
|
|
|
|
|
|
err = make_writable(skb, sctphoff + sizeof(struct sctphdr));
|
|
|
|
|
if (unlikely(err))
|
|
|
|
|
return err;
|
|
|
|
|
|
|
|
|
|
sh = sctp_hdr(skb);
|
|
|
|
|
if (sctp_port_key->sctp_src != sh->source ||
|
|
|
|
|
sctp_port_key->sctp_dst != sh->dest) {
|
|
|
|
|
__le32 old_correct_csum, new_csum, old_csum;
|
|
|
|
|
|
|
|
|
|
old_csum = sh->checksum;
|
|
|
|
|
old_correct_csum = sctp_compute_cksum(skb, sctphoff);
|
|
|
|
|
|
|
|
|
|
sh->source = sctp_port_key->sctp_src;
|
|
|
|
|
sh->dest = sctp_port_key->sctp_dst;
|
|
|
|
|
|
|
|
|
|
new_csum = sctp_compute_cksum(skb, sctphoff);
|
|
|
|
|
|
|
|
|
|
/* Carry any checksum errors through. */
|
|
|
|
|
sh->checksum = old_csum ^ old_correct_csum ^ new_csum;
|
|
|
|
|
|
2014-05-01 15:50:48 -07:00
|
|
|
skb_clear_hash(skb);
|
2014-08-07 15:46:19 -07:00
|
|
|
flow_key_set_tp_src(skb, sctp_port_key->sctp_src);
|
|
|
|
|
flow_key_set_tp_dst(skb, sctp_port_key->sctp_dst);
|
2013-08-22 20:24:43 +12:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2014-07-10 00:50:26 -07:00
|
|
|
static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port)
|
2009-07-08 13:19:16 -07:00
|
|
|
{
|
2014-07-10 00:50:26 -07:00
|
|
|
struct vport *vport = ovs_vport_rcu(dp, out_port);
|
2009-07-08 13:19:16 -07:00
|
|
|
|
2014-07-10 00:50:26 -07:00
|
|
|
if (likely(vport))
|
|
|
|
|
ovs_vport_send(vport, skb);
|
|
|
|
|
else
|
2011-09-20 16:44:46 -07:00
|
|
|
kfree_skb(skb);
|
2009-07-08 13:19:16 -07:00
|
|
|
}
|
|
|
|
|
|
2011-10-12 16:24:54 -07:00
|
|
|
static int output_userspace(struct datapath *dp, struct sk_buff *skb,
|
|
|
|
|
const struct nlattr *attr)
|
2009-07-08 13:19:16 -07:00
|
|
|
{
|
datapath: Report kernel's flow key when passing packets up to userspace.
One of the goals for Open vSwitch is to decouple kernel and userspace
software, so that either one can be upgraded or rolled back independent of
the other. To do this in full generality, it must be possible to change
the kernel's idea of the flow key separately from the userspace version.
This commit takes one step in that direction by making the kernel report
its idea of the flow that a packet belongs to whenever it passes a packet
up to userspace. This means that userspace can intelligently figure out
what to do:
- If userspace's notion of the flow for the packet matches the kernel's,
then nothing special is necessary.
- If the kernel has a more specific notion for the flow than userspace,
for example if the kernel decoded IPv6 headers but userspace stopped
at the Ethernet type (because it does not understand IPv6), then again
nothing special is necessary: userspace can still set up the flow in
the usual way.
- If userspace has a more specific notion for the flow than the kernel,
for example if userspace decoded an IPv6 header but the kernel
stopped at the Ethernet type, then userspace can forward the packet
manually, without setting up a flow in the kernel. (This case is
bad from a performance point of view, but at least it is correct.)
This commit does not actually make userspace flexible enough to handle
changes in the kernel flow key structure, although userspace does now
have enough information to do that intelligently. This will have to wait
for later commits.
This commit is bigger than it would otherwise be because it is rolled
together with changing "struct odp_msg" to a sequence of Netlink
attributes. The alternative, to do each of those changes in a separate
patch, seemed like overkill because it meant that either we would have to
introduce and then kill off Netlink attributes for in_port and tun_id, if
Netlink conversion went first, or shove yet another variable-length header
into the stuff already after odp_msg, if adding the flow key to odp_msg
went first.
This commit will slow down performance of checksumming packets sent up to
userspace. I'm not entirely pleased with how I did it. I considered a
couple of alternatives, but none of them seemed that much better.
Suggestions welcome. Not changing anything wasn't an option,
unfortunately. At any rate some slowdown will become unavoidable when OVS
actually starts using Netlink instead of just Netlink framing.
(Actually, I thought of one option where we could avoid that: make
userspace do the checksum instead, by passing csum_start and csum_offset as
part of what goes to userspace. But that's not perfect either.)
Signed-off-by: Ben Pfaff <blp@nicira.com>
Acked-by: Jesse Gross <jesse@nicira.com>
2011-01-24 14:59:57 -08:00
|
|
|
struct dp_upcall_info upcall;
|
2011-10-12 16:24:54 -07:00
|
|
|
const struct nlattr *a;
|
|
|
|
|
int rem;
|
2014-08-17 20:19:36 -07:00
|
|
|
struct ovs_tunnel_info info;
|
datapath: Report kernel's flow key when passing packets up to userspace.
One of the goals for Open vSwitch is to decouple kernel and userspace
software, so that either one can be upgraded or rolled back independent of
the other. To do this in full generality, it must be possible to change
the kernel's idea of the flow key separately from the userspace version.
This commit takes one step in that direction by making the kernel report
its idea of the flow that a packet belongs to whenever it passes a packet
up to userspace. This means that userspace can intelligently figure out
what to do:
- If userspace's notion of the flow for the packet matches the kernel's,
then nothing special is necessary.
- If the kernel has a more specific notion for the flow than userspace,
for example if the kernel decoded IPv6 headers but userspace stopped
at the Ethernet type (because it does not understand IPv6), then again
nothing special is necessary: userspace can still set up the flow in
the usual way.
- If userspace has a more specific notion for the flow than the kernel,
for example if userspace decoded an IPv6 header but the kernel
stopped at the Ethernet type, then userspace can forward the packet
manually, without setting up a flow in the kernel. (This case is
bad from a performance point of view, but at least it is correct.)
This commit does not actually make userspace flexible enough to handle
changes in the kernel flow key structure, although userspace does now
have enough information to do that intelligently. This will have to wait
for later commits.
This commit is bigger than it would otherwise be because it is rolled
together with changing "struct odp_msg" to a sequence of Netlink
attributes. The alternative, to do each of those changes in a separate
patch, seemed like overkill because it meant that either we would have to
introduce and then kill off Netlink attributes for in_port and tun_id, if
Netlink conversion went first, or shove yet another variable-length header
into the stuff already after odp_msg, if adding the flow key to odp_msg
went first.
This commit will slow down performance of checksumming packets sent up to
userspace. I'm not entirely pleased with how I did it. I considered a
couple of alternatives, but none of them seemed that much better.
Suggestions welcome. Not changing anything wasn't an option,
unfortunately. At any rate some slowdown will become unavoidable when OVS
actually starts using Netlink instead of just Netlink framing.
(Actually, I thought of one option where we could avoid that: make
userspace do the checksum instead, by passing csum_start and csum_offset as
part of what goes to userspace. But that's not perfect either.)
Signed-off-by: Ben Pfaff <blp@nicira.com>
Acked-by: Jesse Gross <jesse@nicira.com>
2011-01-24 14:59:57 -08:00
|
|
|
|
2011-08-18 10:35:40 -07:00
|
|
|
upcall.cmd = OVS_PACKET_CMD_ACTION;
|
2011-10-12 16:24:54 -07:00
|
|
|
upcall.userdata = NULL;
|
2012-12-19 17:43:09 +09:00
|
|
|
upcall.portid = 0;
|
2014-08-17 20:19:36 -07:00
|
|
|
upcall.egress_tun_info = NULL;
|
2011-10-12 16:24:54 -07:00
|
|
|
|
|
|
|
|
for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
|
|
|
|
|
a = nla_next(a, &rem)) {
|
|
|
|
|
switch (nla_type(a)) {
|
|
|
|
|
case OVS_USERSPACE_ATTR_USERDATA:
|
|
|
|
|
upcall.userdata = a;
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case OVS_USERSPACE_ATTR_PID:
|
2012-12-19 17:43:09 +09:00
|
|
|
upcall.portid = nla_get_u32(a);
|
2011-10-12 16:24:54 -07:00
|
|
|
break;
|
2014-08-17 20:19:36 -07:00
|
|
|
|
|
|
|
|
case OVS_USERSPACE_ATTR_EGRESS_TUN_PORT: {
|
|
|
|
|
/* Get out tunnel info. */
|
|
|
|
|
struct vport *vport;
|
|
|
|
|
|
|
|
|
|
vport = ovs_vport_rcu(dp, nla_get_u32(a));
|
|
|
|
|
if (vport) {
|
|
|
|
|
int err;
|
|
|
|
|
|
|
|
|
|
err = ovs_vport_get_egress_tun_info(vport, skb,
|
|
|
|
|
&info);
|
|
|
|
|
if (!err)
|
|
|
|
|
upcall.egress_tun_info = &info;
|
|
|
|
|
}
|
|
|
|
|
break;
|
2011-10-12 16:24:54 -07:00
|
|
|
}
|
2014-08-17 20:19:36 -07:00
|
|
|
|
|
|
|
|
} /* End of switch. */
|
2011-10-12 16:24:54 -07:00
|
|
|
}
|
|
|
|
|
|
2011-11-21 17:15:20 -08:00
|
|
|
return ovs_dp_upcall(dp, skb, &upcall);
|
2009-07-08 13:19:16 -07:00
|
|
|
}
|
|
|
|
|
|
2014-05-15 09:05:03 +09:00
|
|
|
static bool last_action(const struct nlattr *a, int rem)
|
|
|
|
|
{
|
|
|
|
|
return a->nla_len == rem;
|
|
|
|
|
}
|
|
|
|
|
|
2011-09-28 10:43:07 -07:00
|
|
|
static int sample(struct datapath *dp, struct sk_buff *skb,
|
2013-03-04 13:00:25 -08:00
|
|
|
const struct nlattr *attr)
|
2011-09-28 10:43:07 -07:00
|
|
|
{
|
|
|
|
|
const struct nlattr *acts_list = NULL;
|
|
|
|
|
const struct nlattr *a;
|
|
|
|
|
int rem;
|
|
|
|
|
|
|
|
|
|
for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
|
|
|
|
|
a = nla_next(a, &rem)) {
|
|
|
|
|
switch (nla_type(a)) {
|
|
|
|
|
case OVS_SAMPLE_ATTR_PROBABILITY:
|
2014-05-01 15:50:48 -07:00
|
|
|
if (prandom_u32() >= nla_get_u32(a))
|
2011-09-28 10:43:07 -07:00
|
|
|
return 0;
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case OVS_SAMPLE_ATTR_ACTIONS:
|
|
|
|
|
acts_list = a;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2014-05-15 09:05:03 +09:00
|
|
|
rem = nla_len(acts_list);
|
|
|
|
|
a = nla_data(acts_list);
|
|
|
|
|
|
2014-08-29 13:20:23 -07:00
|
|
|
/* Actions list is empty, do nothing */
|
|
|
|
|
if (unlikely(!rem))
|
|
|
|
|
return 0;
|
2014-08-07 15:46:19 -07:00
|
|
|
|
2014-08-29 13:20:23 -07:00
|
|
|
/* The only known usage of sample action is having a single user-space
|
|
|
|
|
* action. Treat this usage as a special case.
|
|
|
|
|
* The output_userspace() should clone the skb to be sent to the
|
|
|
|
|
* user space. This skb will be consumed by its caller. */
|
|
|
|
|
if (likely(nla_type(a) == OVS_ACTION_ATTR_USERSPACE &&
|
|
|
|
|
last_action(a, rem)))
|
|
|
|
|
return output_userspace(dp, skb, a);
|
|
|
|
|
|
|
|
|
|
skb = skb_clone(skb, GFP_ATOMIC);
|
|
|
|
|
if (!skb)
|
|
|
|
|
/* Skip the sample action when out of memory. */
|
|
|
|
|
return 0;
|
|
|
|
|
|
2014-08-11 00:14:05 -07:00
|
|
|
if (!add_deferred_actions(skb, a)) {
|
|
|
|
|
if (net_ratelimit())
|
|
|
|
|
pr_warn("%s: deferred actions limit reached, dropping sample action\n",
|
|
|
|
|
ovs_dp_name(dp));
|
2014-05-15 09:05:03 +09:00
|
|
|
|
2014-08-11 00:14:05 -07:00
|
|
|
kfree_skb(skb);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return 0;
|
2011-09-28 10:43:07 -07:00
|
|
|
}
|
|
|
|
|
|
2014-04-11 01:41:18 -07:00
|
|
|
static void execute_hash(struct sk_buff *skb, const struct nlattr *attr)
|
|
|
|
|
{
|
|
|
|
|
struct sw_flow_key *key = OVS_CB(skb)->pkt_key;
|
|
|
|
|
struct ovs_action_hash *hash_act = nla_data(attr);
|
|
|
|
|
u32 hash = 0;
|
|
|
|
|
|
|
|
|
|
/* OVS_HASH_ALG_L4 is the only possible hash algorithm. */
|
2014-05-01 15:50:48 -07:00
|
|
|
hash = skb_get_hash(skb);
|
2014-04-11 01:41:18 -07:00
|
|
|
hash = jhash_1word(hash, hash_act->hash_basis);
|
|
|
|
|
if (!hash)
|
|
|
|
|
hash = 0x1;
|
|
|
|
|
|
|
|
|
|
key->ovs_flow_hash = hash;
|
|
|
|
|
}
|
|
|
|
|
|
2011-10-21 14:38:54 -07:00
|
|
|
static int execute_set_action(struct sk_buff *skb,
|
2014-08-11 00:14:05 -07:00
|
|
|
const struct nlattr *nested_attr)
|
2011-10-21 14:38:54 -07:00
|
|
|
{
|
2011-10-21 15:19:33 -07:00
|
|
|
int err = 0;
|
2011-10-21 14:38:54 -07:00
|
|
|
|
|
|
|
|
switch (nla_type(nested_attr)) {
|
2011-11-01 10:13:16 -07:00
|
|
|
case OVS_KEY_ATTR_PRIORITY:
|
|
|
|
|
skb->priority = nla_get_u32(nested_attr);
|
2014-08-07 15:46:19 -07:00
|
|
|
flow_key_set_priority(skb, skb->priority);
|
2011-11-01 10:13:16 -07:00
|
|
|
break;
|
|
|
|
|
|
2012-11-13 19:19:36 +02:00
|
|
|
case OVS_KEY_ATTR_SKB_MARK:
|
2013-08-26 13:27:02 -07:00
|
|
|
skb->mark = nla_get_u32(nested_attr);
|
2014-08-07 15:46:19 -07:00
|
|
|
flow_key_set_skb_mark(skb, skb->mark);
|
2012-11-13 19:19:36 +02:00
|
|
|
break;
|
|
|
|
|
|
2014-05-27 18:15:59 -07:00
|
|
|
case OVS_KEY_ATTR_TUNNEL_INFO:
|
2014-08-05 13:49:57 -07:00
|
|
|
OVS_CB(skb)->egress_tun_info = nla_data(nested_attr);
|
2011-10-21 14:38:54 -07:00
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case OVS_KEY_ATTR_ETHERNET:
|
|
|
|
|
err = set_eth_addr(skb, nla_data(nested_attr));
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case OVS_KEY_ATTR_IPV4:
|
|
|
|
|
err = set_ipv4(skb, nla_data(nested_attr));
|
|
|
|
|
break;
|
|
|
|
|
|
2012-11-05 15:53:32 +02:00
|
|
|
case OVS_KEY_ATTR_IPV6:
|
|
|
|
|
err = set_ipv6(skb, nla_data(nested_attr));
|
|
|
|
|
break;
|
|
|
|
|
|
2011-10-21 14:38:54 -07:00
|
|
|
case OVS_KEY_ATTR_TCP:
|
2012-03-06 13:09:13 -08:00
|
|
|
err = set_tcp(skb, nla_data(nested_attr));
|
2011-10-21 14:38:54 -07:00
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case OVS_KEY_ATTR_UDP:
|
2012-03-06 13:09:13 -08:00
|
|
|
err = set_udp(skb, nla_data(nested_attr));
|
2011-10-21 14:38:54 -07:00
|
|
|
break;
|
2013-08-22 20:24:43 +12:00
|
|
|
|
|
|
|
|
case OVS_KEY_ATTR_SCTP:
|
|
|
|
|
err = set_sctp(skb, nla_data(nested_attr));
|
|
|
|
|
break;
|
2014-06-24 20:56:57 +09:00
|
|
|
|
|
|
|
|
case OVS_KEY_ATTR_MPLS:
|
|
|
|
|
err = set_mpls(skb, nla_data(nested_attr));
|
|
|
|
|
break;
|
2011-10-21 14:38:54 -07:00
|
|
|
}
|
2011-10-21 15:19:33 -07:00
|
|
|
|
2011-10-21 14:38:54 -07:00
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
|
2014-04-08 11:13:42 +00:00
|
|
|
static int execute_recirc(struct datapath *dp, struct sk_buff *skb,
|
2014-08-07 15:46:19 -07:00
|
|
|
const struct nlattr *a, int rem)
|
2014-04-08 11:13:42 +00:00
|
|
|
{
|
2014-08-25 15:18:19 -07:00
|
|
|
if (!is_skb_flow_key_valid(skb)) {
|
|
|
|
|
int err;
|
|
|
|
|
|
|
|
|
|
err = ovs_flow_key_update(skb, OVS_CB(skb)->pkt_key);
|
|
|
|
|
if (err)
|
|
|
|
|
return err;
|
|
|
|
|
|
|
|
|
|
}
|
|
|
|
|
BUG_ON(!is_skb_flow_key_valid(skb));
|
2014-04-08 11:13:42 +00:00
|
|
|
|
2014-08-07 15:46:19 -07:00
|
|
|
if (!last_action(a, rem)) {
|
|
|
|
|
/* Recirc action is the not the last action
|
2014-08-11 00:14:05 -07:00
|
|
|
* of the action list, need to clone the skb. */
|
2014-08-07 15:46:19 -07:00
|
|
|
skb = skb_clone(skb, GFP_ATOMIC);
|
|
|
|
|
|
|
|
|
|
/* Skip the recirc action when out of memory, but
|
|
|
|
|
* continue on with the rest of the action list. */
|
|
|
|
|
if (!skb)
|
|
|
|
|
return 0;
|
2014-08-11 00:14:05 -07:00
|
|
|
}
|
2014-04-08 11:13:42 +00:00
|
|
|
|
2014-08-11 00:14:05 -07:00
|
|
|
if (add_deferred_actions(skb, NULL)) {
|
|
|
|
|
flow_key_set_recirc_id(skb, nla_get_u32(a));
|
|
|
|
|
} else {
|
|
|
|
|
kfree_skb(skb);
|
|
|
|
|
|
|
|
|
|
if (net_ratelimit())
|
|
|
|
|
pr_warn("%s: deferred action limit reached, drop recirc action\n",
|
|
|
|
|
ovs_dp_name(dp));
|
2014-08-25 15:18:19 -07:00
|
|
|
}
|
2014-04-08 11:13:42 +00:00
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2009-07-08 13:19:16 -07:00
|
|
|
/* Execute a list of actions against 'skb'. */
|
2010-12-23 09:35:15 -08:00
|
|
|
static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
|
2014-08-11 00:14:05 -07:00
|
|
|
const struct nlattr *attr, int len)
|
2009-07-08 13:19:16 -07:00
|
|
|
{
|
|
|
|
|
/* Every output action needs a separate clone of 'skb', but the common
|
|
|
|
|
* case is just a single output action, so that doing a clone and
|
|
|
|
|
* then freeing the original skbuff is wasteful. So the following code
|
|
|
|
|
* is slightly obscure just to avoid that. */
|
|
|
|
|
int prev_port = -1;
|
2010-12-10 10:40:58 -08:00
|
|
|
const struct nlattr *a;
|
2011-06-06 19:17:25 -07:00
|
|
|
int rem;
|
2010-01-04 13:08:37 -08:00
|
|
|
|
2011-09-28 10:43:07 -07:00
|
|
|
for (a = attr, rem = len; rem > 0;
|
2011-04-29 10:49:06 -07:00
|
|
|
a = nla_next(a, &rem)) {
|
2011-06-06 19:17:25 -07:00
|
|
|
int err = 0;
|
|
|
|
|
|
2014-07-10 00:50:26 -07:00
|
|
|
if (unlikely(prev_port != -1)) {
|
|
|
|
|
struct sk_buff *out_skb = skb_clone(skb, GFP_ATOMIC);
|
|
|
|
|
|
|
|
|
|
if (out_skb)
|
|
|
|
|
do_output(dp, out_skb, prev_port);
|
|
|
|
|
|
2009-07-08 13:19:16 -07:00
|
|
|
prev_port = -1;
|
|
|
|
|
}
|
|
|
|
|
|
2010-12-10 10:40:58 -08:00
|
|
|
switch (nla_type(a)) {
|
2011-08-18 10:35:40 -07:00
|
|
|
case OVS_ACTION_ATTR_OUTPUT:
|
2010-12-10 10:40:58 -08:00
|
|
|
prev_port = nla_get_u32(a);
|
2009-07-08 13:19:16 -07:00
|
|
|
break;
|
|
|
|
|
|
2011-08-18 10:35:40 -07:00
|
|
|
case OVS_ACTION_ATTR_USERSPACE:
|
2011-10-12 16:24:54 -07:00
|
|
|
output_userspace(dp, skb, a);
|
2009-07-08 13:19:16 -07:00
|
|
|
break;
|
2014-04-11 01:41:18 -07:00
|
|
|
|
|
|
|
|
case OVS_ACTION_ATTR_HASH:
|
|
|
|
|
execute_hash(skb, a);
|
|
|
|
|
break;
|
2009-07-08 13:19:16 -07:00
|
|
|
|
2014-06-24 20:56:57 +09:00
|
|
|
case OVS_ACTION_ATTR_PUSH_MPLS:
|
|
|
|
|
err = push_mpls(skb, nla_data(a));
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case OVS_ACTION_ATTR_POP_MPLS:
|
|
|
|
|
err = pop_mpls(skb, nla_get_be16(a));
|
|
|
|
|
break;
|
|
|
|
|
|
2011-11-14 15:56:43 -08:00
|
|
|
case OVS_ACTION_ATTR_PUSH_VLAN:
|
|
|
|
|
err = push_vlan(skb, nla_data(a));
|
2011-10-21 14:38:54 -07:00
|
|
|
if (unlikely(err)) /* skb already freed. */
|
2011-09-09 18:13:26 -07:00
|
|
|
return err;
|
2009-07-08 13:19:16 -07:00
|
|
|
break;
|
|
|
|
|
|
2011-11-14 15:56:43 -08:00
|
|
|
case OVS_ACTION_ATTR_POP_VLAN:
|
2011-09-09 18:13:26 -07:00
|
|
|
err = pop_vlan(skb);
|
2009-07-08 13:19:16 -07:00
|
|
|
break;
|
|
|
|
|
|
2014-08-07 15:46:19 -07:00
|
|
|
case OVS_ACTION_ATTR_RECIRC:
|
|
|
|
|
err = execute_recirc(dp, skb, a, rem);
|
2014-08-25 15:18:19 -07:00
|
|
|
if (last_action(a, rem)) {
|
|
|
|
|
/* If this is the last action, the skb has
|
|
|
|
|
* been consumed or freed.
|
|
|
|
|
* Return immediately. */
|
|
|
|
|
return err;
|
|
|
|
|
}
|
2014-04-08 11:13:42 +00:00
|
|
|
break;
|
|
|
|
|
|
2011-10-21 14:38:54 -07:00
|
|
|
case OVS_ACTION_ATTR_SET:
|
2013-03-04 13:00:25 -08:00
|
|
|
err = execute_set_action(skb, nla_data(a));
|
2009-07-08 13:19:16 -07:00
|
|
|
break;
|
2010-06-17 15:04:12 -07:00
|
|
|
|
2011-09-28 10:43:07 -07:00
|
|
|
case OVS_ACTION_ATTR_SAMPLE:
|
2013-03-04 13:00:25 -08:00
|
|
|
err = sample(dp, skb, a);
|
2011-09-28 10:43:07 -07:00
|
|
|
break;
|
|
|
|
|
}
|
2011-10-21 15:19:33 -07:00
|
|
|
|
2011-06-06 19:17:25 -07:00
|
|
|
if (unlikely(err)) {
|
|
|
|
|
kfree_skb(skb);
|
|
|
|
|
return err;
|
|
|
|
|
}
|
2009-07-08 13:19:16 -07:00
|
|
|
}
|
2011-06-09 15:43:18 -07:00
|
|
|
|
2014-05-15 09:05:03 +09:00
|
|
|
if (prev_port != -1)
|
2009-07-08 13:19:16 -07:00
|
|
|
do_output(dp, skb, prev_port);
|
2014-05-15 09:05:03 +09:00
|
|
|
else
|
2011-06-16 15:32:26 -07:00
|
|
|
consume_skb(skb);
|
2011-06-06 19:17:25 -07:00
|
|
|
|
2009-06-19 14:03:12 -07:00
|
|
|
return 0;
|
2009-07-08 13:19:16 -07:00
|
|
|
}
|
2010-12-23 09:35:15 -08:00
|
|
|
|
2014-08-11 00:14:05 -07:00
|
|
|
static void process_deferred_actions(struct datapath *dp)
|
|
|
|
|
{
|
|
|
|
|
struct action_fifo *fifo = this_cpu_ptr(action_fifos);
|
|
|
|
|
|
|
|
|
|
/* Do not touch the FIFO in case there is no deferred actions. */
|
|
|
|
|
if (action_fifo_is_empty(fifo))
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
/* Finishing executing all deferred actions. */
|
|
|
|
|
do {
|
|
|
|
|
struct deferred_action *da = action_fifo_get(fifo);
|
|
|
|
|
struct sk_buff *skb = da->skb;
|
|
|
|
|
const struct nlattr *actions = da->actions;
|
|
|
|
|
|
|
|
|
|
if (actions)
|
|
|
|
|
do_execute_actions(dp, skb, actions,
|
|
|
|
|
nla_len(actions));
|
|
|
|
|
else
|
|
|
|
|
ovs_dp_process_packet(skb);
|
|
|
|
|
} while (!action_fifo_is_empty(fifo));
|
|
|
|
|
|
|
|
|
|
/* Reset FIFO for the next packet. */
|
|
|
|
|
action_fifo_init(fifo);
|
|
|
|
|
}
|
|
|
|
|
|
2010-12-23 09:35:15 -08:00
|
|
|
/* Execute a list of actions against 'skb'. */
|
2014-08-11 00:14:05 -07:00
|
|
|
int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
|
|
|
|
|
struct sw_flow_actions *acts)
|
|
|
|
|
{
|
|
|
|
|
int level = this_cpu_read(exec_actions_level);
|
|
|
|
|
int err;
|
|
|
|
|
|
|
|
|
|
if (unlikely(level >= EXEC_ACTIONS_LEVEL_LIMIT)) {
|
|
|
|
|
if (net_ratelimit())
|
|
|
|
|
pr_warn("%s: packet loop detected, dropping.\n",
|
|
|
|
|
ovs_dp_name(dp));
|
|
|
|
|
|
|
|
|
|
kfree_skb(skb);
|
|
|
|
|
return -ELOOP;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
this_cpu_inc(exec_actions_level);
|
|
|
|
|
|
|
|
|
|
err = do_execute_actions(dp, skb, acts->actions, acts->actions_len);
|
|
|
|
|
|
|
|
|
|
if (!level)
|
|
|
|
|
process_deferred_actions(dp);
|
|
|
|
|
|
|
|
|
|
this_cpu_dec(exec_actions_level);
|
|
|
|
|
|
|
|
|
|
/* This return status currently does not reflect the errors
|
|
|
|
|
* encounted during deferred actions execution. Probably needs to
|
|
|
|
|
* be fixed in the future. */
|
|
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int action_fifos_init(void)
|
|
|
|
|
{
|
|
|
|
|
action_fifos = alloc_percpu(struct action_fifo);
|
|
|
|
|
if (!action_fifos)
|
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void action_fifos_exit(void)
|
2014-08-15 01:53:30 -07:00
|
|
|
{
|
2014-08-11 00:14:05 -07:00
|
|
|
free_percpu(action_fifos);
|
2010-12-23 09:35:15 -08:00
|
|
|
}
|