2009-07-08 13:19:16 -07:00
|
|
|
/*
|
2014-04-08 11:13:42 +00:00
|
|
|
* Copyright (c) 2007-2014 Nicira, Inc.
|
2009-06-15 15:11:30 -07:00
|
|
|
*
|
2011-11-16 13:39:40 -08:00
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
|
* modify it under the terms of version 2 of the GNU General Public
|
|
|
|
|
* License as published by the Free Software Foundation.
|
|
|
|
|
*
|
|
|
|
|
* This program is distributed in the hope that it will be useful, but
|
|
|
|
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
|
* General Public License for more details.
|
|
|
|
|
*
|
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
|
* along with this program; if not, write to the Free Software
|
|
|
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
|
|
|
|
* 02110-1301, USA
|
2009-07-08 13:19:16 -07:00
|
|
|
*/
|
|
|
|
|
|
2011-10-06 21:52:39 -07:00
|
|
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
|
|
|
|
2009-07-08 13:19:16 -07:00
|
|
|
#include <linux/skbuff.h>
|
|
|
|
|
#include <linux/in.h>
|
|
|
|
|
#include <linux/ip.h>
|
2011-10-05 10:50:58 -07:00
|
|
|
#include <linux/openvswitch.h>
|
2013-08-22 20:24:43 +12:00
|
|
|
#include <linux/sctp.h>
|
2009-07-08 13:19:16 -07:00
|
|
|
#include <linux/tcp.h>
|
|
|
|
|
#include <linux/udp.h>
|
|
|
|
|
#include <linux/in6.h>
|
2010-08-24 16:00:27 -07:00
|
|
|
#include <linux/if_arp.h>
|
2009-07-08 13:19:16 -07:00
|
|
|
#include <linux/if_vlan.h>
|
|
|
|
|
#include <net/ip.h>
|
2012-11-05 15:53:32 +02:00
|
|
|
#include <net/ipv6.h>
|
2009-07-08 13:19:16 -07:00
|
|
|
#include <net/checksum.h>
|
2011-11-02 23:34:15 -07:00
|
|
|
#include <net/dsfield.h>
|
2013-08-22 20:24:43 +12:00
|
|
|
#include <net/sctp/checksum.h>
|
2010-04-12 15:53:39 -04:00
|
|
|
|
|
|
|
|
#include "datapath.h"
|
2014-06-24 20:56:57 +09:00
|
|
|
#include "gso.h"
|
|
|
|
|
#include "mpls.h"
|
2010-12-29 22:13:15 -08:00
|
|
|
#include "vlan.h"
|
2010-04-12 15:53:39 -04:00
|
|
|
#include "vport.h"
|
2009-07-08 13:19:16 -07:00
|
|
|
|
2011-09-28 10:43:07 -07:00
|
|
|
static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
|
2014-05-15 09:05:03 +09:00
|
|
|
const struct nlattr *attr, int len);
|
2010-12-23 09:35:15 -08:00
|
|
|
|
2011-06-06 19:17:25 -07:00
|
|
|
static int make_writable(struct sk_buff *skb, int write_len)
|
2009-07-08 13:19:16 -07:00
|
|
|
{
|
2011-06-06 19:17:25 -07:00
|
|
|
if (!skb_cloned(skb) || skb_clone_writable(skb, write_len))
|
|
|
|
|
return 0;
|
2009-11-17 19:03:27 -08:00
|
|
|
|
2011-06-06 19:17:25 -07:00
|
|
|
return pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
|
2009-07-08 13:19:16 -07:00
|
|
|
}
|
|
|
|
|
|
2014-06-24 20:56:57 +09:00
|
|
|
/* The end of the mac header.
|
|
|
|
|
*
|
|
|
|
|
* For non-MPLS skbs this will correspond to the network header.
|
|
|
|
|
* For MPLS skbs it will be before the network_header as the MPLS
|
|
|
|
|
* label stack lies between the end of the mac header and the network
|
|
|
|
|
* header. That is, for MPLS skbs the end of the mac header
|
|
|
|
|
* is the top of the MPLS label stack.
|
|
|
|
|
*/
|
|
|
|
|
static unsigned char *mac_header_end(const struct sk_buff *skb)
|
|
|
|
|
{
|
|
|
|
|
return skb_mac_header(skb) + skb->mac_len;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int push_mpls(struct sk_buff *skb,
|
|
|
|
|
const struct ovs_action_push_mpls *mpls)
|
|
|
|
|
{
|
|
|
|
|
__be32 *new_mpls_lse;
|
|
|
|
|
struct ethhdr *hdr;
|
|
|
|
|
|
|
|
|
|
if (skb_cow_head(skb, MPLS_HLEN) < 0)
|
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
|
|
skb_push(skb, MPLS_HLEN);
|
|
|
|
|
memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb),
|
|
|
|
|
skb->mac_len);
|
|
|
|
|
skb_reset_mac_header(skb);
|
|
|
|
|
|
|
|
|
|
new_mpls_lse = (__be32 *)mac_header_end(skb);
|
|
|
|
|
*new_mpls_lse = mpls->mpls_lse;
|
|
|
|
|
|
|
|
|
|
if (skb->ip_summed == CHECKSUM_COMPLETE)
|
|
|
|
|
skb->csum = csum_add(skb->csum, csum_partial(new_mpls_lse,
|
|
|
|
|
MPLS_HLEN, 0));
|
|
|
|
|
|
|
|
|
|
hdr = eth_hdr(skb);
|
|
|
|
|
hdr->h_proto = mpls->mpls_ethertype;
|
|
|
|
|
if (!ovs_skb_get_inner_protocol(skb))
|
|
|
|
|
ovs_skb_set_inner_protocol(skb, skb->protocol);
|
|
|
|
|
skb->protocol = mpls->mpls_ethertype;
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int pop_mpls(struct sk_buff *skb, const __be16 ethertype)
|
|
|
|
|
{
|
|
|
|
|
struct ethhdr *hdr;
|
|
|
|
|
int err;
|
|
|
|
|
|
|
|
|
|
err = make_writable(skb, skb->mac_len + MPLS_HLEN);
|
|
|
|
|
if (unlikely(err))
|
|
|
|
|
return err;
|
|
|
|
|
|
|
|
|
|
if (skb->ip_summed == CHECKSUM_COMPLETE)
|
|
|
|
|
skb->csum = csum_sub(skb->csum,
|
|
|
|
|
csum_partial(mac_header_end(skb),
|
|
|
|
|
MPLS_HLEN, 0));
|
|
|
|
|
|
|
|
|
|
memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb),
|
|
|
|
|
skb->mac_len);
|
|
|
|
|
|
|
|
|
|
__skb_pull(skb, MPLS_HLEN);
|
|
|
|
|
skb_reset_mac_header(skb);
|
|
|
|
|
|
|
|
|
|
/* mac_header_end() is used to locate the ethertype
|
|
|
|
|
* field correctly in the presence of VLAN tags.
|
|
|
|
|
*/
|
|
|
|
|
hdr = (struct ethhdr *)(mac_header_end(skb) - ETH_HLEN);
|
|
|
|
|
hdr->h_proto = ethertype;
|
|
|
|
|
if (eth_p_mpls(skb->protocol))
|
|
|
|
|
skb->protocol = ethertype;
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int set_mpls(struct sk_buff *skb, const __be32 *mpls_lse)
|
|
|
|
|
{
|
|
|
|
|
__be32 *stack = (__be32 *)mac_header_end(skb);
|
|
|
|
|
int err;
|
|
|
|
|
|
|
|
|
|
err = make_writable(skb, skb->mac_len + MPLS_HLEN);
|
|
|
|
|
if (unlikely(err))
|
|
|
|
|
return err;
|
|
|
|
|
|
|
|
|
|
if (skb->ip_summed == CHECKSUM_COMPLETE) {
|
|
|
|
|
__be32 diff[] = { ~(*stack), *mpls_lse };
|
|
|
|
|
skb->csum = ~csum_partial((char *)diff, sizeof(diff),
|
|
|
|
|
~skb->csum);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
*stack = *mpls_lse;
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2012-08-31 15:35:49 +12:00
|
|
|
/* remove VLAN header from packet and update csum accordingly. */
|
2011-09-09 18:13:26 -07:00
|
|
|
static int __pop_vlan_tci(struct sk_buff *skb, __be16 *current_tci)
|
2009-07-08 13:19:16 -07:00
|
|
|
{
|
2011-11-18 11:48:01 -08:00
|
|
|
struct vlan_hdr *vhdr;
|
2011-06-06 19:17:25 -07:00
|
|
|
int err;
|
2009-07-08 13:19:16 -07:00
|
|
|
|
2011-06-06 19:17:25 -07:00
|
|
|
err = make_writable(skb, VLAN_ETH_HLEN);
|
|
|
|
|
if (unlikely(err))
|
|
|
|
|
return err;
|
2010-12-29 22:13:15 -08:00
|
|
|
|
2013-08-26 11:18:07 -07:00
|
|
|
if (skb->ip_summed == CHECKSUM_COMPLETE)
|
2010-03-04 16:39:57 -05:00
|
|
|
skb->csum = csum_sub(skb->csum, csum_partial(skb->data
|
2013-02-22 19:22:41 -08:00
|
|
|
+ (2 * ETH_ALEN), VLAN_HLEN, 0));
|
2010-03-04 16:39:57 -05:00
|
|
|
|
2011-11-18 11:48:01 -08:00
|
|
|
vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN);
|
|
|
|
|
*current_tci = vhdr->h_vlan_TCI;
|
2011-09-09 18:13:26 -07:00
|
|
|
|
2010-12-29 22:13:15 -08:00
|
|
|
memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
|
2011-11-18 11:48:01 -08:00
|
|
|
__skb_pull(skb, VLAN_HLEN);
|
2009-07-08 13:19:16 -07:00
|
|
|
|
2011-11-18 11:48:01 -08:00
|
|
|
vlan_set_encap_proto(skb, vhdr);
|
2009-07-08 13:19:16 -07:00
|
|
|
skb->mac_header += VLAN_HLEN;
|
2014-06-24 20:56:57 +09:00
|
|
|
/* Update mac_len for subsequent MPLS actions */
|
|
|
|
|
skb->mac_len -= VLAN_HLEN;
|
2009-07-08 13:19:16 -07:00
|
|
|
|
2011-06-06 19:17:25 -07:00
|
|
|
return 0;
|
2009-07-08 13:19:16 -07:00
|
|
|
}
|
|
|
|
|
|
2011-09-09 18:13:26 -07:00
|
|
|
static int pop_vlan(struct sk_buff *skb)
|
2009-07-08 13:19:16 -07:00
|
|
|
{
|
2011-09-09 18:13:26 -07:00
|
|
|
__be16 tci;
|
|
|
|
|
int err;
|
2011-06-06 19:17:25 -07:00
|
|
|
|
2011-09-09 18:13:26 -07:00
|
|
|
if (likely(vlan_tx_tag_present(skb))) {
|
|
|
|
|
vlan_set_tci(skb, 0);
|
|
|
|
|
} else {
|
|
|
|
|
if (unlikely(skb->protocol != htons(ETH_P_8021Q) ||
|
2011-11-02 10:46:53 -07:00
|
|
|
skb->len < VLAN_ETH_HLEN))
|
2011-06-06 19:17:25 -07:00
|
|
|
return 0;
|
2009-07-08 13:19:16 -07:00
|
|
|
|
2011-09-09 18:13:26 -07:00
|
|
|
err = __pop_vlan_tci(skb, &tci);
|
|
|
|
|
if (err)
|
2011-06-06 19:17:25 -07:00
|
|
|
return err;
|
2009-07-08 13:19:16 -07:00
|
|
|
}
|
2011-09-09 18:13:26 -07:00
|
|
|
/* move next vlan tag to hw accel tag */
|
|
|
|
|
if (likely(skb->protocol != htons(ETH_P_8021Q) ||
|
2011-11-02 10:46:53 -07:00
|
|
|
skb->len < VLAN_ETH_HLEN))
|
2011-09-09 18:13:26 -07:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
err = __pop_vlan_tci(skb, &tci);
|
|
|
|
|
if (unlikely(err))
|
|
|
|
|
return err;
|
2009-07-08 13:19:16 -07:00
|
|
|
|
2013-08-02 11:38:51 -07:00
|
|
|
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(tci));
|
2011-09-09 18:13:26 -07:00
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2011-11-14 15:56:43 -08:00
|
|
|
static int push_vlan(struct sk_buff *skb, const struct ovs_action_push_vlan *vlan)
|
2011-09-09 18:13:26 -07:00
|
|
|
{
|
|
|
|
|
if (unlikely(vlan_tx_tag_present(skb))) {
|
|
|
|
|
u16 current_tag;
|
|
|
|
|
|
|
|
|
|
/* push down current VLAN tag */
|
|
|
|
|
current_tag = vlan_tx_tag_get(skb);
|
|
|
|
|
|
2013-08-02 11:38:51 -07:00
|
|
|
if (!__vlan_put_tag(skb, skb->vlan_proto, current_tag))
|
2011-09-09 18:13:26 -07:00
|
|
|
return -ENOMEM;
|
2011-06-06 19:17:25 -07:00
|
|
|
|
2014-06-24 20:56:57 +09:00
|
|
|
/* Update mac_len for subsequent MPLS actions */
|
|
|
|
|
skb->mac_len += VLAN_HLEN;
|
|
|
|
|
|
2013-08-26 11:18:07 -07:00
|
|
|
if (skb->ip_summed == CHECKSUM_COMPLETE)
|
2011-09-09 18:13:26 -07:00
|
|
|
skb->csum = csum_add(skb->csum, csum_partial(skb->data
|
2013-02-22 19:22:41 -08:00
|
|
|
+ (2 * ETH_ALEN), VLAN_HLEN, 0));
|
2011-09-09 18:13:26 -07:00
|
|
|
|
|
|
|
|
}
|
2013-08-02 11:38:51 -07:00
|
|
|
__vlan_hwaccel_put_tag(skb, vlan->vlan_tpid, ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT);
|
2011-06-06 19:17:25 -07:00
|
|
|
return 0;
|
2009-07-08 13:19:16 -07:00
|
|
|
}
|
|
|
|
|
|
2011-10-21 14:38:54 -07:00
|
|
|
static int set_eth_addr(struct sk_buff *skb,
|
|
|
|
|
const struct ovs_key_ethernet *eth_key)
|
2010-08-13 10:46:12 -07:00
|
|
|
{
|
2011-10-21 14:38:54 -07:00
|
|
|
int err;
|
|
|
|
|
err = make_writable(skb, ETH_HLEN);
|
|
|
|
|
if (unlikely(err))
|
|
|
|
|
return err;
|
|
|
|
|
|
2013-08-26 11:18:07 -07:00
|
|
|
skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
|
2013-06-12 16:59:28 -07:00
|
|
|
|
2014-02-19 11:08:50 -08:00
|
|
|
ether_addr_copy(eth_hdr(skb)->h_source, eth_key->eth_src);
|
|
|
|
|
ether_addr_copy(eth_hdr(skb)->h_dest, eth_key->eth_dst);
|
2011-10-21 14:38:54 -07:00
|
|
|
|
2013-08-26 11:18:07 -07:00
|
|
|
ovs_skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
|
2013-06-12 16:59:28 -07:00
|
|
|
|
2011-10-21 14:38:54 -07:00
|
|
|
return 0;
|
2010-08-13 10:46:12 -07:00
|
|
|
}
|
|
|
|
|
|
2011-10-21 14:38:54 -07:00
|
|
|
static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
|
|
|
|
|
__be32 *addr, __be32 new_addr)
|
2010-08-13 10:46:12 -07:00
|
|
|
{
|
|
|
|
|
int transport_len = skb->len - skb_transport_offset(skb);
|
2011-10-21 14:38:54 -07:00
|
|
|
|
|
|
|
|
if (nh->protocol == IPPROTO_TCP) {
|
2010-08-13 10:46:12 -07:00
|
|
|
if (likely(transport_len >= sizeof(struct tcphdr)))
|
2011-10-21 14:38:54 -07:00
|
|
|
inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
|
|
|
|
|
*addr, new_addr, 1);
|
|
|
|
|
} else if (nh->protocol == IPPROTO_UDP) {
|
2012-03-06 13:09:13 -08:00
|
|
|
if (likely(transport_len >= sizeof(struct udphdr))) {
|
|
|
|
|
struct udphdr *uh = udp_hdr(skb);
|
|
|
|
|
|
2013-08-26 11:18:07 -07:00
|
|
|
if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
|
2012-03-06 13:09:13 -08:00
|
|
|
inet_proto_csum_replace4(&uh->check, skb,
|
|
|
|
|
*addr, new_addr, 1);
|
|
|
|
|
if (!uh->check)
|
|
|
|
|
uh->check = CSUM_MANGLED_0;
|
|
|
|
|
}
|
|
|
|
|
}
|
2010-08-13 10:46:12 -07:00
|
|
|
}
|
2011-10-21 14:38:54 -07:00
|
|
|
|
|
|
|
|
csum_replace4(&nh->check, *addr, new_addr);
|
2014-05-01 15:50:48 -07:00
|
|
|
skb_clear_hash(skb);
|
2011-10-21 14:38:54 -07:00
|
|
|
*addr = new_addr;
|
2010-08-13 10:46:12 -07:00
|
|
|
}
|
|
|
|
|
|
2012-11-05 15:53:32 +02:00
|
|
|
static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto,
|
|
|
|
|
__be32 addr[4], const __be32 new_addr[4])
|
|
|
|
|
{
|
|
|
|
|
int transport_len = skb->len - skb_transport_offset(skb);
|
|
|
|
|
|
|
|
|
|
if (l4_proto == IPPROTO_TCP) {
|
|
|
|
|
if (likely(transport_len >= sizeof(struct tcphdr)))
|
|
|
|
|
inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb,
|
|
|
|
|
addr, new_addr, 1);
|
|
|
|
|
} else if (l4_proto == IPPROTO_UDP) {
|
|
|
|
|
if (likely(transport_len >= sizeof(struct udphdr))) {
|
|
|
|
|
struct udphdr *uh = udp_hdr(skb);
|
|
|
|
|
|
2013-08-26 11:18:07 -07:00
|
|
|
if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
|
2012-11-05 15:53:32 +02:00
|
|
|
inet_proto_csum_replace16(&uh->check, skb,
|
|
|
|
|
addr, new_addr, 1);
|
|
|
|
|
if (!uh->check)
|
|
|
|
|
uh->check = CSUM_MANGLED_0;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
|
|
|
|
|
__be32 addr[4], const __be32 new_addr[4],
|
|
|
|
|
bool recalculate_csum)
|
|
|
|
|
{
|
|
|
|
|
if (recalculate_csum)
|
|
|
|
|
update_ipv6_checksum(skb, l4_proto, addr, new_addr);
|
|
|
|
|
|
2014-05-01 15:50:48 -07:00
|
|
|
skb_clear_hash(skb);
|
2012-11-05 15:53:32 +02:00
|
|
|
memcpy(addr, new_addr, sizeof(__be32[4]));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void set_ipv6_tc(struct ipv6hdr *nh, u8 tc)
|
|
|
|
|
{
|
|
|
|
|
nh->priority = tc >> 4;
|
|
|
|
|
nh->flow_lbl[0] = (nh->flow_lbl[0] & 0x0F) | ((tc & 0x0F) << 4);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl)
|
|
|
|
|
{
|
|
|
|
|
nh->flow_lbl[0] = (nh->flow_lbl[0] & 0xF0) | (fl & 0x000F0000) >> 16;
|
|
|
|
|
nh->flow_lbl[1] = (fl & 0x0000FF00) >> 8;
|
|
|
|
|
nh->flow_lbl[2] = fl & 0x000000FF;
|
|
|
|
|
}
|
|
|
|
|
|
2011-11-05 15:48:12 -07:00
|
|
|
static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl)
|
|
|
|
|
{
|
|
|
|
|
csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8));
|
|
|
|
|
nh->ttl = new_ttl;
|
|
|
|
|
}
|
|
|
|
|
|
2011-10-21 14:38:54 -07:00
|
|
|
static int set_ipv4(struct sk_buff *skb, const struct ovs_key_ipv4 *ipv4_key)
|
2009-07-08 13:19:16 -07:00
|
|
|
{
|
2010-08-13 10:46:12 -07:00
|
|
|
struct iphdr *nh;
|
2011-06-06 19:17:25 -07:00
|
|
|
int err;
|
2010-08-13 10:46:12 -07:00
|
|
|
|
2011-06-06 19:17:25 -07:00
|
|
|
err = make_writable(skb, skb_network_offset(skb) +
|
|
|
|
|
sizeof(struct iphdr));
|
|
|
|
|
if (unlikely(err))
|
|
|
|
|
return err;
|
2010-08-13 10:46:12 -07:00
|
|
|
|
|
|
|
|
nh = ip_hdr(skb);
|
|
|
|
|
|
2011-10-21 14:38:54 -07:00
|
|
|
if (ipv4_key->ipv4_src != nh->saddr)
|
|
|
|
|
set_ip_addr(skb, nh, &nh->saddr, ipv4_key->ipv4_src);
|
2010-08-13 10:46:12 -07:00
|
|
|
|
2011-10-21 14:38:54 -07:00
|
|
|
if (ipv4_key->ipv4_dst != nh->daddr)
|
|
|
|
|
set_ip_addr(skb, nh, &nh->daddr, ipv4_key->ipv4_dst);
|
2011-02-07 11:07:14 +09:00
|
|
|
|
2011-10-21 14:38:54 -07:00
|
|
|
if (ipv4_key->ipv4_tos != nh->tos)
|
2011-11-02 23:34:15 -07:00
|
|
|
ipv4_change_dsfield(nh, 0, ipv4_key->ipv4_tos);
|
2010-08-13 10:46:12 -07:00
|
|
|
|
2011-11-05 15:48:12 -07:00
|
|
|
if (ipv4_key->ipv4_ttl != nh->ttl)
|
|
|
|
|
set_ip_ttl(skb, nh, ipv4_key->ipv4_ttl);
|
|
|
|
|
|
2011-06-06 19:17:25 -07:00
|
|
|
return 0;
|
2009-07-08 13:19:16 -07:00
|
|
|
}
|
|
|
|
|
|
2012-11-05 15:53:32 +02:00
|
|
|
static int set_ipv6(struct sk_buff *skb, const struct ovs_key_ipv6 *ipv6_key)
|
|
|
|
|
{
|
|
|
|
|
struct ipv6hdr *nh;
|
|
|
|
|
int err;
|
|
|
|
|
__be32 *saddr;
|
|
|
|
|
__be32 *daddr;
|
|
|
|
|
|
|
|
|
|
err = make_writable(skb, skb_network_offset(skb) +
|
|
|
|
|
sizeof(struct ipv6hdr));
|
|
|
|
|
if (unlikely(err))
|
|
|
|
|
return err;
|
|
|
|
|
|
|
|
|
|
nh = ipv6_hdr(skb);
|
|
|
|
|
saddr = (__be32 *)&nh->saddr;
|
|
|
|
|
daddr = (__be32 *)&nh->daddr;
|
|
|
|
|
|
|
|
|
|
if (memcmp(ipv6_key->ipv6_src, saddr, sizeof(ipv6_key->ipv6_src)))
|
|
|
|
|
set_ipv6_addr(skb, ipv6_key->ipv6_proto, saddr,
|
|
|
|
|
ipv6_key->ipv6_src, true);
|
|
|
|
|
|
|
|
|
|
if (memcmp(ipv6_key->ipv6_dst, daddr, sizeof(ipv6_key->ipv6_dst))) {
|
|
|
|
|
unsigned int offset = 0;
|
|
|
|
|
int flags = OVS_IP6T_FH_F_SKIP_RH;
|
|
|
|
|
bool recalc_csum = true;
|
|
|
|
|
|
|
|
|
|
if (ipv6_ext_hdr(nh->nexthdr))
|
|
|
|
|
recalc_csum = ipv6_find_hdr(skb, &offset,
|
|
|
|
|
NEXTHDR_ROUTING, NULL,
|
|
|
|
|
&flags) != NEXTHDR_ROUTING;
|
|
|
|
|
|
|
|
|
|
set_ipv6_addr(skb, ipv6_key->ipv6_proto, daddr,
|
|
|
|
|
ipv6_key->ipv6_dst, recalc_csum);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
set_ipv6_tc(nh, ipv6_key->ipv6_tclass);
|
|
|
|
|
set_ipv6_fl(nh, ntohl(ipv6_key->ipv6_label));
|
|
|
|
|
nh->hop_limit = ipv6_key->ipv6_hlimit;
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2011-10-21 14:38:54 -07:00
|
|
|
/* Must follow make_writable() since that can move the skb data. */
|
|
|
|
|
static void set_tp_port(struct sk_buff *skb, __be16 *port,
|
|
|
|
|
__be16 new_port, __sum16 *check)
|
2009-11-11 14:59:49 -08:00
|
|
|
{
|
2011-10-21 14:38:54 -07:00
|
|
|
inet_proto_csum_replace2(check, skb, *port, new_port, 0);
|
|
|
|
|
*port = new_port;
|
2014-05-01 15:50:48 -07:00
|
|
|
skb_clear_hash(skb);
|
2011-10-21 14:38:54 -07:00
|
|
|
}
|
2011-06-06 19:17:25 -07:00
|
|
|
|
2012-03-06 13:09:13 -08:00
|
|
|
static void set_udp_port(struct sk_buff *skb, __be16 *port, __be16 new_port)
|
|
|
|
|
{
|
|
|
|
|
struct udphdr *uh = udp_hdr(skb);
|
|
|
|
|
|
2013-08-26 11:18:07 -07:00
|
|
|
if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) {
|
2012-03-06 13:09:13 -08:00
|
|
|
set_tp_port(skb, port, new_port, &uh->check);
|
|
|
|
|
|
|
|
|
|
if (!uh->check)
|
|
|
|
|
uh->check = CSUM_MANGLED_0;
|
|
|
|
|
} else {
|
|
|
|
|
*port = new_port;
|
2014-05-01 15:50:48 -07:00
|
|
|
skb_clear_hash(skb);
|
2012-03-06 13:09:13 -08:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int set_udp(struct sk_buff *skb, const struct ovs_key_udp *udp_port_key)
|
2011-10-21 14:38:54 -07:00
|
|
|
{
|
|
|
|
|
struct udphdr *uh;
|
|
|
|
|
int err;
|
2011-06-06 19:17:25 -07:00
|
|
|
|
2011-10-21 14:38:54 -07:00
|
|
|
err = make_writable(skb, skb_transport_offset(skb) +
|
|
|
|
|
sizeof(struct udphdr));
|
2011-06-06 19:17:25 -07:00
|
|
|
if (unlikely(err))
|
|
|
|
|
return err;
|
|
|
|
|
|
2011-10-21 14:38:54 -07:00
|
|
|
uh = udp_hdr(skb);
|
|
|
|
|
if (udp_port_key->udp_src != uh->source)
|
2012-03-06 13:09:13 -08:00
|
|
|
set_udp_port(skb, &uh->source, udp_port_key->udp_src);
|
2011-10-21 14:38:54 -07:00
|
|
|
|
|
|
|
|
if (udp_port_key->udp_dst != uh->dest)
|
2012-03-06 13:09:13 -08:00
|
|
|
set_udp_port(skb, &uh->dest, udp_port_key->udp_dst);
|
2011-06-06 19:17:25 -07:00
|
|
|
|
|
|
|
|
return 0;
|
2009-11-11 14:59:49 -08:00
|
|
|
}
|
|
|
|
|
|
2012-03-06 13:09:13 -08:00
|
|
|
static int set_tcp(struct sk_buff *skb, const struct ovs_key_tcp *tcp_port_key)
|
2009-07-08 13:19:16 -07:00
|
|
|
{
|
2011-10-21 14:38:54 -07:00
|
|
|
struct tcphdr *th;
|
2011-06-06 19:17:25 -07:00
|
|
|
int err;
|
2009-07-08 13:19:16 -07:00
|
|
|
|
2011-06-06 19:17:25 -07:00
|
|
|
err = make_writable(skb, skb_transport_offset(skb) +
|
|
|
|
|
sizeof(struct tcphdr));
|
|
|
|
|
if (unlikely(err))
|
|
|
|
|
return err;
|
2010-08-13 10:46:12 -07:00
|
|
|
|
2011-10-21 14:38:54 -07:00
|
|
|
th = tcp_hdr(skb);
|
|
|
|
|
if (tcp_port_key->tcp_src != th->source)
|
|
|
|
|
set_tp_port(skb, &th->source, tcp_port_key->tcp_src, &th->check);
|
2009-07-08 13:19:16 -07:00
|
|
|
|
2011-10-21 14:38:54 -07:00
|
|
|
if (tcp_port_key->tcp_dst != th->dest)
|
|
|
|
|
set_tp_port(skb, &th->dest, tcp_port_key->tcp_dst, &th->check);
|
2010-08-13 10:46:12 -07:00
|
|
|
|
2011-06-06 19:17:25 -07:00
|
|
|
return 0;
|
2009-07-08 13:19:16 -07:00
|
|
|
}
|
|
|
|
|
|
2013-08-22 20:24:43 +12:00
|
|
|
static int set_sctp(struct sk_buff *skb,
|
|
|
|
|
const struct ovs_key_sctp *sctp_port_key)
|
|
|
|
|
{
|
|
|
|
|
struct sctphdr *sh;
|
|
|
|
|
int err;
|
|
|
|
|
unsigned int sctphoff = skb_transport_offset(skb);
|
|
|
|
|
|
|
|
|
|
err = make_writable(skb, sctphoff + sizeof(struct sctphdr));
|
|
|
|
|
if (unlikely(err))
|
|
|
|
|
return err;
|
|
|
|
|
|
|
|
|
|
sh = sctp_hdr(skb);
|
|
|
|
|
if (sctp_port_key->sctp_src != sh->source ||
|
|
|
|
|
sctp_port_key->sctp_dst != sh->dest) {
|
|
|
|
|
__le32 old_correct_csum, new_csum, old_csum;
|
|
|
|
|
|
|
|
|
|
old_csum = sh->checksum;
|
|
|
|
|
old_correct_csum = sctp_compute_cksum(skb, sctphoff);
|
|
|
|
|
|
|
|
|
|
sh->source = sctp_port_key->sctp_src;
|
|
|
|
|
sh->dest = sctp_port_key->sctp_dst;
|
|
|
|
|
|
|
|
|
|
new_csum = sctp_compute_cksum(skb, sctphoff);
|
|
|
|
|
|
|
|
|
|
/* Carry any checksum errors through. */
|
|
|
|
|
sh->checksum = old_csum ^ old_correct_csum ^ new_csum;
|
|
|
|
|
|
2014-05-01 15:50:48 -07:00
|
|
|
skb_clear_hash(skb);
|
2013-08-22 20:24:43 +12:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2011-09-20 16:44:46 -07:00
|
|
|
static int do_output(struct datapath *dp, struct sk_buff *skb, int out_port)
|
2009-07-08 13:19:16 -07:00
|
|
|
{
|
2011-09-20 16:44:46 -07:00
|
|
|
struct vport *vport;
|
2009-07-08 13:19:16 -07:00
|
|
|
|
2011-09-20 16:44:46 -07:00
|
|
|
if (unlikely(!skb))
|
|
|
|
|
return -ENOMEM;
|
2009-07-08 13:19:16 -07:00
|
|
|
|
2012-02-16 17:12:36 -08:00
|
|
|
vport = ovs_vport_rcu(dp, out_port);
|
2011-09-20 16:44:46 -07:00
|
|
|
if (unlikely(!vport)) {
|
|
|
|
|
kfree_skb(skb);
|
|
|
|
|
return -ENODEV;
|
|
|
|
|
}
|
2009-07-08 13:19:16 -07:00
|
|
|
|
2011-11-21 17:15:20 -08:00
|
|
|
ovs_vport_send(vport, skb);
|
2011-09-20 16:44:46 -07:00
|
|
|
return 0;
|
2009-07-08 13:19:16 -07:00
|
|
|
}
|
|
|
|
|
|
2011-10-12 16:24:54 -07:00
|
|
|
static int output_userspace(struct datapath *dp, struct sk_buff *skb,
|
|
|
|
|
const struct nlattr *attr)
|
2009-07-08 13:19:16 -07:00
|
|
|
{
|
datapath: Report kernel's flow key when passing packets up to userspace.
One of the goals for Open vSwitch is to decouple kernel and userspace
software, so that either one can be upgraded or rolled back independent of
the other. To do this in full generality, it must be possible to change
the kernel's idea of the flow key separately from the userspace version.
This commit takes one step in that direction by making the kernel report
its idea of the flow that a packet belongs to whenever it passes a packet
up to userspace. This means that userspace can intelligently figure out
what to do:
- If userspace's notion of the flow for the packet matches the kernel's,
then nothing special is necessary.
- If the kernel has a more specific notion for the flow than userspace,
for example if the kernel decoded IPv6 headers but userspace stopped
at the Ethernet type (because it does not understand IPv6), then again
nothing special is necessary: userspace can still set up the flow in
the usual way.
- If userspace has a more specific notion for the flow than the kernel,
for example if userspace decoded an IPv6 header but the kernel
stopped at the Ethernet type, then userspace can forward the packet
manually, without setting up a flow in the kernel. (This case is
bad from a performance point of view, but at least it is correct.)
This commit does not actually make userspace flexible enough to handle
changes in the kernel flow key structure, although userspace does now
have enough information to do that intelligently. This will have to wait
for later commits.
This commit is bigger than it would otherwise be because it is rolled
together with changing "struct odp_msg" to a sequence of Netlink
attributes. The alternative, to do each of those changes in a separate
patch, seemed like overkill because it meant that either we would have to
introduce and then kill off Netlink attributes for in_port and tun_id, if
Netlink conversion went first, or shove yet another variable-length header
into the stuff already after odp_msg, if adding the flow key to odp_msg
went first.
This commit will slow down performance of checksumming packets sent up to
userspace. I'm not entirely pleased with how I did it. I considered a
couple of alternatives, but none of them seemed that much better.
Suggestions welcome. Not changing anything wasn't an option,
unfortunately. At any rate some slowdown will become unavoidable when OVS
actually starts using Netlink instead of just Netlink framing.
(Actually, I thought of one option where we could avoid that: make
userspace do the checksum instead, by passing csum_start and csum_offset as
part of what goes to userspace. But that's not perfect either.)
Signed-off-by: Ben Pfaff <blp@nicira.com>
Acked-by: Jesse Gross <jesse@nicira.com>
2011-01-24 14:59:57 -08:00
|
|
|
struct dp_upcall_info upcall;
|
2011-10-12 16:24:54 -07:00
|
|
|
const struct nlattr *a;
|
|
|
|
|
int rem;
|
datapath: Report kernel's flow key when passing packets up to userspace.
One of the goals for Open vSwitch is to decouple kernel and userspace
software, so that either one can be upgraded or rolled back independent of
the other. To do this in full generality, it must be possible to change
the kernel's idea of the flow key separately from the userspace version.
This commit takes one step in that direction by making the kernel report
its idea of the flow that a packet belongs to whenever it passes a packet
up to userspace. This means that userspace can intelligently figure out
what to do:
- If userspace's notion of the flow for the packet matches the kernel's,
then nothing special is necessary.
- If the kernel has a more specific notion for the flow than userspace,
for example if the kernel decoded IPv6 headers but userspace stopped
at the Ethernet type (because it does not understand IPv6), then again
nothing special is necessary: userspace can still set up the flow in
the usual way.
- If userspace has a more specific notion for the flow than the kernel,
for example if userspace decoded an IPv6 header but the kernel
stopped at the Ethernet type, then userspace can forward the packet
manually, without setting up a flow in the kernel. (This case is
bad from a performance point of view, but at least it is correct.)
This commit does not actually make userspace flexible enough to handle
changes in the kernel flow key structure, although userspace does now
have enough information to do that intelligently. This will have to wait
for later commits.
This commit is bigger than it would otherwise be because it is rolled
together with changing "struct odp_msg" to a sequence of Netlink
attributes. The alternative, to do each of those changes in a separate
patch, seemed like overkill because it meant that either we would have to
introduce and then kill off Netlink attributes for in_port and tun_id, if
Netlink conversion went first, or shove yet another variable-length header
into the stuff already after odp_msg, if adding the flow key to odp_msg
went first.
This commit will slow down performance of checksumming packets sent up to
userspace. I'm not entirely pleased with how I did it. I considered a
couple of alternatives, but none of them seemed that much better.
Suggestions welcome. Not changing anything wasn't an option,
unfortunately. At any rate some slowdown will become unavoidable when OVS
actually starts using Netlink instead of just Netlink framing.
(Actually, I thought of one option where we could avoid that: make
userspace do the checksum instead, by passing csum_start and csum_offset as
part of what goes to userspace. But that's not perfect either.)
Signed-off-by: Ben Pfaff <blp@nicira.com>
Acked-by: Jesse Gross <jesse@nicira.com>
2011-01-24 14:59:57 -08:00
|
|
|
|
2013-06-25 09:21:16 -07:00
|
|
|
BUG_ON(!OVS_CB(skb)->pkt_key);
|
|
|
|
|
|
2011-08-18 10:35:40 -07:00
|
|
|
upcall.cmd = OVS_PACKET_CMD_ACTION;
|
2013-06-25 09:21:16 -07:00
|
|
|
upcall.key = OVS_CB(skb)->pkt_key;
|
2011-10-12 16:24:54 -07:00
|
|
|
upcall.userdata = NULL;
|
2012-12-19 17:43:09 +09:00
|
|
|
upcall.portid = 0;
|
2011-10-12 16:24:54 -07:00
|
|
|
|
|
|
|
|
for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
|
|
|
|
|
a = nla_next(a, &rem)) {
|
|
|
|
|
switch (nla_type(a)) {
|
|
|
|
|
case OVS_USERSPACE_ATTR_USERDATA:
|
|
|
|
|
upcall.userdata = a;
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case OVS_USERSPACE_ATTR_PID:
|
2012-12-19 17:43:09 +09:00
|
|
|
upcall.portid = nla_get_u32(a);
|
2011-10-12 16:24:54 -07:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2011-11-21 17:15:20 -08:00
|
|
|
return ovs_dp_upcall(dp, skb, &upcall);
|
2009-07-08 13:19:16 -07:00
|
|
|
}
|
|
|
|
|
|
2014-05-15 09:05:03 +09:00
|
|
|
static bool last_action(const struct nlattr *a, int rem)
|
|
|
|
|
{
|
|
|
|
|
return a->nla_len == rem;
|
|
|
|
|
}
|
|
|
|
|
|
2011-09-28 10:43:07 -07:00
|
|
|
static int sample(struct datapath *dp, struct sk_buff *skb,
|
2013-03-04 13:00:25 -08:00
|
|
|
const struct nlattr *attr)
|
2011-09-28 10:43:07 -07:00
|
|
|
{
|
|
|
|
|
const struct nlattr *acts_list = NULL;
|
|
|
|
|
const struct nlattr *a;
|
2014-05-15 09:05:03 +09:00
|
|
|
struct sk_buff *sample_skb;
|
2011-09-28 10:43:07 -07:00
|
|
|
int rem;
|
|
|
|
|
|
|
|
|
|
for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
|
|
|
|
|
a = nla_next(a, &rem)) {
|
|
|
|
|
switch (nla_type(a)) {
|
|
|
|
|
case OVS_SAMPLE_ATTR_PROBABILITY:
|
2014-05-01 15:50:48 -07:00
|
|
|
if (prandom_u32() >= nla_get_u32(a))
|
2011-09-28 10:43:07 -07:00
|
|
|
return 0;
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case OVS_SAMPLE_ATTR_ACTIONS:
|
|
|
|
|
acts_list = a;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2014-05-15 09:05:03 +09:00
|
|
|
rem = nla_len(acts_list);
|
|
|
|
|
a = nla_data(acts_list);
|
|
|
|
|
|
|
|
|
|
/* Actions list is either empty or only contains a single user-space
|
|
|
|
|
* action, the latter being a special case as it is the only known
|
|
|
|
|
* usage of the sample action.
|
|
|
|
|
* In these special cases don't clone the skb as there are no
|
|
|
|
|
* side-effects in the nested actions.
|
|
|
|
|
* Otherwise, clone in case the nested actions have side effects. */
|
|
|
|
|
if (likely(rem == 0 ||
|
|
|
|
|
(nla_type(a) == OVS_ACTION_ATTR_USERSPACE &&
|
|
|
|
|
last_action(a, rem)))) {
|
|
|
|
|
sample_skb = skb;
|
|
|
|
|
skb_get(skb);
|
|
|
|
|
} else {
|
|
|
|
|
sample_skb = skb_clone(skb, GFP_ATOMIC);
|
2014-07-10 00:49:06 -07:00
|
|
|
if (!sample_skb)
|
|
|
|
|
/* Skip the sample action when out of memory. */
|
|
|
|
|
return 0;
|
2014-05-15 09:05:03 +09:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Note that do_execute_actions() never consumes skb.
|
|
|
|
|
* In the case where skb has been cloned above it is the clone that
|
|
|
|
|
* is consumed. Otherwise the skb_get(skb) call prevents
|
|
|
|
|
* consumption by do_execute_actions(). Thus, it is safe to simply
|
|
|
|
|
* return the error code and let the caller (also
|
|
|
|
|
* do_execute_actions()) free skb on error. */
|
|
|
|
|
return do_execute_actions(dp, sample_skb, a, rem);
|
2011-09-28 10:43:07 -07:00
|
|
|
}
|
|
|
|
|
|
2014-04-11 01:41:18 -07:00
|
|
|
static void execute_hash(struct sk_buff *skb, const struct nlattr *attr)
|
|
|
|
|
{
|
|
|
|
|
struct sw_flow_key *key = OVS_CB(skb)->pkt_key;
|
|
|
|
|
struct ovs_action_hash *hash_act = nla_data(attr);
|
|
|
|
|
u32 hash = 0;
|
|
|
|
|
|
|
|
|
|
/* OVS_HASH_ALG_L4 is the only possible hash algorithm. */
|
2014-05-01 15:50:48 -07:00
|
|
|
hash = skb_get_hash(skb);
|
2014-04-11 01:41:18 -07:00
|
|
|
hash = jhash_1word(hash, hash_act->hash_basis);
|
|
|
|
|
if (!hash)
|
|
|
|
|
hash = 0x1;
|
|
|
|
|
|
|
|
|
|
key->ovs_flow_hash = hash;
|
|
|
|
|
}
|
|
|
|
|
|
2011-10-21 14:38:54 -07:00
|
|
|
static int execute_set_action(struct sk_buff *skb,
|
2013-03-04 13:00:25 -08:00
|
|
|
const struct nlattr *nested_attr)
|
2011-10-21 14:38:54 -07:00
|
|
|
{
|
2011-10-21 15:19:33 -07:00
|
|
|
int err = 0;
|
2011-10-21 14:38:54 -07:00
|
|
|
|
|
|
|
|
switch (nla_type(nested_attr)) {
|
2011-11-01 10:13:16 -07:00
|
|
|
case OVS_KEY_ATTR_PRIORITY:
|
|
|
|
|
skb->priority = nla_get_u32(nested_attr);
|
|
|
|
|
break;
|
|
|
|
|
|
2012-11-13 19:19:36 +02:00
|
|
|
case OVS_KEY_ATTR_SKB_MARK:
|
2013-08-26 13:27:02 -07:00
|
|
|
skb->mark = nla_get_u32(nested_attr);
|
2012-11-13 19:19:36 +02:00
|
|
|
break;
|
|
|
|
|
|
2014-05-27 18:15:59 -07:00
|
|
|
case OVS_KEY_ATTR_TUNNEL_INFO:
|
|
|
|
|
OVS_CB(skb)->tun_info = nla_data(nested_attr);
|
2011-10-21 14:38:54 -07:00
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case OVS_KEY_ATTR_ETHERNET:
|
|
|
|
|
err = set_eth_addr(skb, nla_data(nested_attr));
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case OVS_KEY_ATTR_IPV4:
|
|
|
|
|
err = set_ipv4(skb, nla_data(nested_attr));
|
|
|
|
|
break;
|
|
|
|
|
|
2012-11-05 15:53:32 +02:00
|
|
|
case OVS_KEY_ATTR_IPV6:
|
|
|
|
|
err = set_ipv6(skb, nla_data(nested_attr));
|
|
|
|
|
break;
|
|
|
|
|
|
2011-10-21 14:38:54 -07:00
|
|
|
case OVS_KEY_ATTR_TCP:
|
2012-03-06 13:09:13 -08:00
|
|
|
err = set_tcp(skb, nla_data(nested_attr));
|
2011-10-21 14:38:54 -07:00
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case OVS_KEY_ATTR_UDP:
|
2012-03-06 13:09:13 -08:00
|
|
|
err = set_udp(skb, nla_data(nested_attr));
|
2011-10-21 14:38:54 -07:00
|
|
|
break;
|
2013-08-22 20:24:43 +12:00
|
|
|
|
|
|
|
|
case OVS_KEY_ATTR_SCTP:
|
|
|
|
|
err = set_sctp(skb, nla_data(nested_attr));
|
|
|
|
|
break;
|
2014-06-24 20:56:57 +09:00
|
|
|
|
|
|
|
|
case OVS_KEY_ATTR_MPLS:
|
|
|
|
|
err = set_mpls(skb, nla_data(nested_attr));
|
|
|
|
|
break;
|
2011-10-21 14:38:54 -07:00
|
|
|
}
|
2011-10-21 15:19:33 -07:00
|
|
|
|
2011-10-21 14:38:54 -07:00
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
|
2014-04-08 11:13:42 +00:00
|
|
|
static int execute_recirc(struct datapath *dp, struct sk_buff *skb,
|
|
|
|
|
const struct nlattr *a)
|
|
|
|
|
{
|
|
|
|
|
struct sw_flow_key recirc_key;
|
|
|
|
|
const struct vport *p = OVS_CB(skb)->input_vport;
|
|
|
|
|
uint32_t hash = OVS_CB(skb)->pkt_key->ovs_flow_hash;
|
|
|
|
|
int err;
|
|
|
|
|
|
|
|
|
|
err = ovs_flow_extract(skb, p->port_no, &recirc_key);
|
2014-05-13 14:46:18 +09:00
|
|
|
if (err) {
|
|
|
|
|
kfree_skb(skb);
|
2014-04-08 11:13:42 +00:00
|
|
|
return err;
|
2014-05-13 14:46:18 +09:00
|
|
|
}
|
2014-04-08 11:13:42 +00:00
|
|
|
|
|
|
|
|
recirc_key.ovs_flow_hash = hash;
|
|
|
|
|
recirc_key.recirc_id = nla_get_u32(a);
|
|
|
|
|
|
2014-04-30 16:13:27 -07:00
|
|
|
ovs_dp_process_packet_with_key(skb, &recirc_key, true);
|
2014-04-08 11:13:42 +00:00
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2009-07-08 13:19:16 -07:00
|
|
|
/* Execute a list of actions against 'skb'. */
|
2010-12-23 09:35:15 -08:00
|
|
|
static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
|
2014-05-15 09:05:03 +09:00
|
|
|
const struct nlattr *attr, int len)
|
2009-07-08 13:19:16 -07:00
|
|
|
{
|
|
|
|
|
/* Every output action needs a separate clone of 'skb', but the common
|
|
|
|
|
* case is just a single output action, so that doing a clone and
|
|
|
|
|
* then freeing the original skbuff is wasteful. So the following code
|
|
|
|
|
* is slightly obscure just to avoid that. */
|
|
|
|
|
int prev_port = -1;
|
2010-12-10 10:40:58 -08:00
|
|
|
const struct nlattr *a;
|
2011-06-06 19:17:25 -07:00
|
|
|
int rem;
|
2010-01-04 13:08:37 -08:00
|
|
|
|
2011-09-28 10:43:07 -07:00
|
|
|
for (a = attr, rem = len; rem > 0;
|
2011-04-29 10:49:06 -07:00
|
|
|
a = nla_next(a, &rem)) {
|
2011-06-06 19:17:25 -07:00
|
|
|
int err = 0;
|
|
|
|
|
|
2009-07-08 13:19:16 -07:00
|
|
|
if (prev_port != -1) {
|
2010-09-10 11:16:31 -07:00
|
|
|
do_output(dp, skb_clone(skb, GFP_ATOMIC), prev_port);
|
2009-07-08 13:19:16 -07:00
|
|
|
prev_port = -1;
|
|
|
|
|
}
|
|
|
|
|
|
2010-12-10 10:40:58 -08:00
|
|
|
switch (nla_type(a)) {
|
2011-08-18 10:35:40 -07:00
|
|
|
case OVS_ACTION_ATTR_OUTPUT:
|
2010-12-10 10:40:58 -08:00
|
|
|
prev_port = nla_get_u32(a);
|
2009-07-08 13:19:16 -07:00
|
|
|
break;
|
|
|
|
|
|
2011-08-18 10:35:40 -07:00
|
|
|
case OVS_ACTION_ATTR_USERSPACE:
|
2011-10-12 16:24:54 -07:00
|
|
|
output_userspace(dp, skb, a);
|
2009-07-08 13:19:16 -07:00
|
|
|
break;
|
2014-04-11 01:41:18 -07:00
|
|
|
|
|
|
|
|
case OVS_ACTION_ATTR_HASH:
|
|
|
|
|
execute_hash(skb, a);
|
|
|
|
|
break;
|
2009-07-08 13:19:16 -07:00
|
|
|
|
2014-06-24 20:56:57 +09:00
|
|
|
case OVS_ACTION_ATTR_PUSH_MPLS:
|
|
|
|
|
err = push_mpls(skb, nla_data(a));
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case OVS_ACTION_ATTR_POP_MPLS:
|
|
|
|
|
err = pop_mpls(skb, nla_get_be16(a));
|
|
|
|
|
break;
|
|
|
|
|
|
2011-11-14 15:56:43 -08:00
|
|
|
case OVS_ACTION_ATTR_PUSH_VLAN:
|
|
|
|
|
err = push_vlan(skb, nla_data(a));
|
2011-10-21 14:38:54 -07:00
|
|
|
if (unlikely(err)) /* skb already freed. */
|
2011-09-09 18:13:26 -07:00
|
|
|
return err;
|
2009-07-08 13:19:16 -07:00
|
|
|
break;
|
|
|
|
|
|
2011-11-14 15:56:43 -08:00
|
|
|
case OVS_ACTION_ATTR_POP_VLAN:
|
2011-09-09 18:13:26 -07:00
|
|
|
err = pop_vlan(skb);
|
2009-07-08 13:19:16 -07:00
|
|
|
break;
|
|
|
|
|
|
2014-04-08 11:13:42 +00:00
|
|
|
case OVS_ACTION_ATTR_RECIRC: {
|
|
|
|
|
struct sk_buff *recirc_skb;
|
|
|
|
|
|
2014-07-10 00:30:27 -07:00
|
|
|
if (last_action(a, rem))
|
|
|
|
|
return execute_recirc(dp, skb, a);
|
2014-04-08 11:13:42 +00:00
|
|
|
|
2014-07-10 00:30:27 -07:00
|
|
|
/* Recirc action is the not the last action
|
|
|
|
|
* of the action list. */
|
|
|
|
|
recirc_skb = skb_clone(skb, GFP_ATOMIC);
|
2014-04-08 11:13:42 +00:00
|
|
|
|
2014-07-10 00:30:27 -07:00
|
|
|
/* Skip the recirc action when out of memory, but
|
|
|
|
|
* continue on with the rest of the action list. */
|
|
|
|
|
if (recirc_skb)
|
|
|
|
|
err = execute_recirc(dp, recirc_skb, a);
|
2014-04-08 11:13:42 +00:00
|
|
|
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2011-10-21 14:38:54 -07:00
|
|
|
case OVS_ACTION_ATTR_SET:
|
2013-03-04 13:00:25 -08:00
|
|
|
err = execute_set_action(skb, nla_data(a));
|
2009-07-08 13:19:16 -07:00
|
|
|
break;
|
2010-06-17 15:04:12 -07:00
|
|
|
|
2011-09-28 10:43:07 -07:00
|
|
|
case OVS_ACTION_ATTR_SAMPLE:
|
2013-03-04 13:00:25 -08:00
|
|
|
err = sample(dp, skb, a);
|
2011-09-28 10:43:07 -07:00
|
|
|
break;
|
|
|
|
|
}
|
2011-10-21 15:19:33 -07:00
|
|
|
|
2011-06-06 19:17:25 -07:00
|
|
|
if (unlikely(err)) {
|
|
|
|
|
kfree_skb(skb);
|
|
|
|
|
return err;
|
|
|
|
|
}
|
2009-07-08 13:19:16 -07:00
|
|
|
}
|
2011-06-09 15:43:18 -07:00
|
|
|
|
2014-05-15 09:05:03 +09:00
|
|
|
if (prev_port != -1)
|
2009-07-08 13:19:16 -07:00
|
|
|
do_output(dp, skb, prev_port);
|
2014-05-15 09:05:03 +09:00
|
|
|
else
|
2011-06-16 15:32:26 -07:00
|
|
|
consume_skb(skb);
|
2011-06-06 19:17:25 -07:00
|
|
|
|
2009-06-19 14:03:12 -07:00
|
|
|
return 0;
|
2009-07-08 13:19:16 -07:00
|
|
|
}
|
2010-12-23 09:35:15 -08:00
|
|
|
|
2011-10-06 21:52:39 -07:00
|
|
|
/* We limit the number of times that we pass into execute_actions()
|
2014-04-30 16:13:27 -07:00
|
|
|
* to avoid blowing out the stack in the event that we have a loop.
|
|
|
|
|
*
|
|
|
|
|
* Each loop adds some (estimated) cost to the kernel stack.
|
|
|
|
|
* The loop terminates when the max cost is exceeded.
|
|
|
|
|
* */
|
|
|
|
|
#define RECIRC_STACK_COST 1
|
|
|
|
|
#define DEFAULT_STACK_COST 4
|
|
|
|
|
/* Allow up to 4 regular services, and up to 3 recirculations */
|
|
|
|
|
#define MAX_STACK_COST (DEFAULT_STACK_COST * 4 + RECIRC_STACK_COST * 3)
|
2011-10-06 21:52:39 -07:00
|
|
|
|
|
|
|
|
struct loop_counter {
|
2014-04-30 16:13:27 -07:00
|
|
|
u8 stack_cost; /* loop stack cost. */
|
2011-10-06 21:52:39 -07:00
|
|
|
bool looping; /* Loop detected? */
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
static DEFINE_PER_CPU(struct loop_counter, loop_counters);
|
|
|
|
|
|
|
|
|
|
static int loop_suppress(struct datapath *dp, struct sw_flow_actions *actions)
|
|
|
|
|
{
|
|
|
|
|
if (net_ratelimit())
|
2014-04-30 16:13:27 -07:00
|
|
|
pr_warn("%s: flow loop detected, dropping\n",
|
|
|
|
|
ovs_dp_name(dp));
|
2011-10-06 21:52:39 -07:00
|
|
|
actions->actions_len = 0;
|
|
|
|
|
return -ELOOP;
|
|
|
|
|
}
|
|
|
|
|
|
2010-12-23 09:35:15 -08:00
|
|
|
/* Execute a list of actions against 'skb'. */
|
2014-04-30 16:13:27 -07:00
|
|
|
int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb, bool recirc)
|
2010-12-23 09:35:15 -08:00
|
|
|
{
|
2011-04-29 10:49:06 -07:00
|
|
|
struct sw_flow_actions *acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts);
|
2014-04-30 16:13:27 -07:00
|
|
|
const u8 stack_cost = recirc ? RECIRC_STACK_COST : DEFAULT_STACK_COST;
|
2011-04-29 10:49:06 -07:00
|
|
|
struct loop_counter *loop;
|
|
|
|
|
int error;
|
|
|
|
|
|
|
|
|
|
/* Check whether we've looped too much. */
|
2011-10-06 21:52:39 -07:00
|
|
|
loop = &__get_cpu_var(loop_counters);
|
2014-04-30 16:13:27 -07:00
|
|
|
loop->stack_cost += stack_cost;
|
|
|
|
|
if (unlikely(loop->stack_cost > MAX_STACK_COST))
|
2011-04-29 10:49:06 -07:00
|
|
|
loop->looping = true;
|
|
|
|
|
if (unlikely(loop->looping)) {
|
|
|
|
|
error = loop_suppress(dp, acts);
|
|
|
|
|
kfree_skb(skb);
|
|
|
|
|
goto out_loop;
|
|
|
|
|
}
|
2010-12-23 09:35:15 -08:00
|
|
|
|
2014-05-27 18:15:59 -07:00
|
|
|
OVS_CB(skb)->tun_info = NULL;
|
2014-05-15 09:05:03 +09:00
|
|
|
error = do_execute_actions(dp, skb, acts->actions, acts->actions_len);
|
2011-04-29 10:49:06 -07:00
|
|
|
|
|
|
|
|
/* Check whether sub-actions looped too much. */
|
|
|
|
|
if (unlikely(loop->looping))
|
|
|
|
|
error = loop_suppress(dp, acts);
|
|
|
|
|
|
|
|
|
|
out_loop:
|
2014-04-30 16:13:27 -07:00
|
|
|
/* Decrement loop stack cost. */
|
|
|
|
|
loop->stack_cost -= stack_cost;
|
|
|
|
|
if (!loop->stack_cost)
|
2011-04-29 10:49:06 -07:00
|
|
|
loop->looping = false;
|
2010-12-23 09:35:15 -08:00
|
|
|
|
2011-04-29 10:49:06 -07:00
|
|
|
return error;
|
2010-12-23 09:35:15 -08:00
|
|
|
}
|