2009-07-08 13:19:16 -07:00
|
|
|
/*
|
|
|
|
|
* Distributed under the terms of the GNU GPL version 2.
|
2011-01-26 13:41:54 -08:00
|
|
|
* Copyright (c) 2007, 2008, 2009, 2010, 2011 Nicira Networks.
|
2009-06-15 15:11:30 -07:00
|
|
|
*
|
|
|
|
|
* Significant portions of this file may be copied from parts of the Linux
|
|
|
|
|
* kernel, by Linus Torvalds and others.
|
2009-07-08 13:19:16 -07:00
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
/* Functions for executing flow actions. */
|
|
|
|
|
|
2011-10-06 21:52:39 -07:00
|
|
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
|
|
|
|
2009-07-08 13:19:16 -07:00
|
|
|
#include <linux/skbuff.h>
|
|
|
|
|
#include <linux/in.h>
|
|
|
|
|
#include <linux/ip.h>
|
2011-10-05 10:50:58 -07:00
|
|
|
#include <linux/openvswitch.h>
|
2009-07-08 13:19:16 -07:00
|
|
|
#include <linux/tcp.h>
|
|
|
|
|
#include <linux/udp.h>
|
|
|
|
|
#include <linux/in6.h>
|
2010-08-24 16:00:27 -07:00
|
|
|
#include <linux/if_arp.h>
|
2009-07-08 13:19:16 -07:00
|
|
|
#include <linux/if_vlan.h>
|
|
|
|
|
#include <net/ip.h>
|
|
|
|
|
#include <net/checksum.h>
|
2011-11-02 23:34:15 -07:00
|
|
|
#include <net/dsfield.h>
|
2010-04-12 15:53:39 -04:00
|
|
|
|
2010-11-22 14:17:24 -08:00
|
|
|
#include "checksum.h"
|
2010-04-12 15:53:39 -04:00
|
|
|
#include "datapath.h"
|
2010-12-29 22:13:15 -08:00
|
|
|
#include "vlan.h"
|
2010-04-12 15:53:39 -04:00
|
|
|
#include "vport.h"
|
2009-07-08 13:19:16 -07:00
|
|
|
|
2011-09-28 10:43:07 -07:00
|
|
|
static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
|
|
|
|
|
const struct nlattr *attr, int len, bool keep_skb);
|
2010-12-23 09:35:15 -08:00
|
|
|
|
2011-06-06 19:17:25 -07:00
|
|
|
static int make_writable(struct sk_buff *skb, int write_len)
|
2009-07-08 13:19:16 -07:00
|
|
|
{
|
2011-06-06 19:17:25 -07:00
|
|
|
if (!skb_cloned(skb) || skb_clone_writable(skb, write_len))
|
|
|
|
|
return 0;
|
2009-11-17 19:03:27 -08:00
|
|
|
|
2011-06-06 19:17:25 -07:00
|
|
|
return pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
|
2009-07-08 13:19:16 -07:00
|
|
|
}
|
|
|
|
|
|
2011-09-09 18:13:26 -07:00
|
|
|
/* remove VLAN header from packet and update csum accrodingly. */
|
|
|
|
|
static int __pop_vlan_tci(struct sk_buff *skb, __be16 *current_tci)
|
2009-07-08 13:19:16 -07:00
|
|
|
{
|
|
|
|
|
struct ethhdr *eh;
|
2011-09-09 18:13:26 -07:00
|
|
|
struct vlan_ethhdr *veth;
|
2011-06-06 19:17:25 -07:00
|
|
|
int err;
|
2009-07-08 13:19:16 -07:00
|
|
|
|
2011-06-06 19:17:25 -07:00
|
|
|
err = make_writable(skb, VLAN_ETH_HLEN);
|
|
|
|
|
if (unlikely(err))
|
|
|
|
|
return err;
|
2010-12-29 22:13:15 -08:00
|
|
|
|
2010-11-22 14:17:24 -08:00
|
|
|
if (get_ip_summed(skb) == OVS_CSUM_COMPLETE)
|
2010-03-04 16:39:57 -05:00
|
|
|
skb->csum = csum_sub(skb->csum, csum_partial(skb->data
|
|
|
|
|
+ ETH_HLEN, VLAN_HLEN, 0));
|
|
|
|
|
|
2011-09-09 18:13:26 -07:00
|
|
|
veth = (struct vlan_ethhdr *) skb->data;
|
|
|
|
|
*current_tci = veth->h_vlan_TCI;
|
|
|
|
|
|
2010-12-29 22:13:15 -08:00
|
|
|
memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
|
2009-07-08 13:19:16 -07:00
|
|
|
|
2011-09-09 18:13:26 -07:00
|
|
|
eh = (struct ethhdr *)__skb_pull(skb, VLAN_HLEN);
|
2009-07-08 13:19:16 -07:00
|
|
|
|
|
|
|
|
skb->protocol = eh->h_proto;
|
|
|
|
|
skb->mac_header += VLAN_HLEN;
|
|
|
|
|
|
2011-06-06 19:17:25 -07:00
|
|
|
return 0;
|
2009-07-08 13:19:16 -07:00
|
|
|
}
|
|
|
|
|
|
2011-09-09 18:13:26 -07:00
|
|
|
static int pop_vlan(struct sk_buff *skb)
|
2009-07-08 13:19:16 -07:00
|
|
|
{
|
2011-09-09 18:13:26 -07:00
|
|
|
__be16 tci;
|
|
|
|
|
int err;
|
2011-06-06 19:17:25 -07:00
|
|
|
|
2011-09-09 18:13:26 -07:00
|
|
|
if (likely(vlan_tx_tag_present(skb))) {
|
|
|
|
|
vlan_set_tci(skb, 0);
|
|
|
|
|
} else {
|
|
|
|
|
if (unlikely(skb->protocol != htons(ETH_P_8021Q) ||
|
2011-11-02 10:46:53 -07:00
|
|
|
skb->len < VLAN_ETH_HLEN))
|
2011-06-06 19:17:25 -07:00
|
|
|
return 0;
|
2009-07-08 13:19:16 -07:00
|
|
|
|
2011-09-09 18:13:26 -07:00
|
|
|
err = __pop_vlan_tci(skb, &tci);
|
|
|
|
|
if (err)
|
2011-06-06 19:17:25 -07:00
|
|
|
return err;
|
2009-07-08 13:19:16 -07:00
|
|
|
}
|
2011-09-09 18:13:26 -07:00
|
|
|
/* move next vlan tag to hw accel tag */
|
|
|
|
|
if (likely(skb->protocol != htons(ETH_P_8021Q) ||
|
2011-11-02 10:46:53 -07:00
|
|
|
skb->len < VLAN_ETH_HLEN))
|
2011-09-09 18:13:26 -07:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
err = __pop_vlan_tci(skb, &tci);
|
|
|
|
|
if (unlikely(err))
|
|
|
|
|
return err;
|
2009-07-08 13:19:16 -07:00
|
|
|
|
2011-06-06 19:17:25 -07:00
|
|
|
__vlan_hwaccel_put_tag(skb, ntohs(tci));
|
2011-09-09 18:13:26 -07:00
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2011-10-21 14:38:54 -07:00
|
|
|
static int push_vlan(struct sk_buff *skb, const struct ovs_key_8021q *q_key)
|
2011-09-09 18:13:26 -07:00
|
|
|
{
|
|
|
|
|
if (unlikely(vlan_tx_tag_present(skb))) {
|
|
|
|
|
u16 current_tag;
|
|
|
|
|
|
|
|
|
|
/* push down current VLAN tag */
|
|
|
|
|
current_tag = vlan_tx_tag_get(skb);
|
|
|
|
|
|
|
|
|
|
if (!__vlan_put_tag(skb, current_tag))
|
|
|
|
|
return -ENOMEM;
|
2011-06-06 19:17:25 -07:00
|
|
|
|
2011-09-09 18:13:26 -07:00
|
|
|
if (get_ip_summed(skb) == OVS_CSUM_COMPLETE)
|
|
|
|
|
skb->csum = csum_add(skb->csum, csum_partial(skb->data
|
|
|
|
|
+ ETH_HLEN, VLAN_HLEN, 0));
|
|
|
|
|
|
|
|
|
|
}
|
2011-10-21 14:38:54 -07:00
|
|
|
__vlan_hwaccel_put_tag(skb, ntohs(q_key->q_tci));
|
2011-06-06 19:17:25 -07:00
|
|
|
return 0;
|
2009-07-08 13:19:16 -07:00
|
|
|
}
|
|
|
|
|
|
2011-10-21 14:38:54 -07:00
|
|
|
static int set_eth_addr(struct sk_buff *skb,
|
|
|
|
|
const struct ovs_key_ethernet *eth_key)
|
2010-08-13 10:46:12 -07:00
|
|
|
{
|
2011-10-21 14:38:54 -07:00
|
|
|
int err;
|
|
|
|
|
err = make_writable(skb, ETH_HLEN);
|
|
|
|
|
if (unlikely(err))
|
|
|
|
|
return err;
|
|
|
|
|
|
2011-11-11 16:09:20 -08:00
|
|
|
memcpy(eth_hdr(skb)->h_source, eth_key->eth_src, ETH_ALEN);
|
|
|
|
|
memcpy(eth_hdr(skb)->h_dest, eth_key->eth_dst, ETH_ALEN);
|
2011-10-21 14:38:54 -07:00
|
|
|
|
|
|
|
|
return 0;
|
2010-08-13 10:46:12 -07:00
|
|
|
}
|
|
|
|
|
|
2011-10-21 14:38:54 -07:00
|
|
|
static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
|
|
|
|
|
__be32 *addr, __be32 new_addr)
|
2010-08-13 10:46:12 -07:00
|
|
|
{
|
|
|
|
|
int transport_len = skb->len - skb_transport_offset(skb);
|
2011-10-21 14:38:54 -07:00
|
|
|
|
|
|
|
|
if (nh->protocol == IPPROTO_TCP) {
|
2010-08-13 10:46:12 -07:00
|
|
|
if (likely(transport_len >= sizeof(struct tcphdr)))
|
2011-10-21 14:38:54 -07:00
|
|
|
inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
|
|
|
|
|
*addr, new_addr, 1);
|
|
|
|
|
} else if (nh->protocol == IPPROTO_UDP) {
|
2010-08-13 10:46:12 -07:00
|
|
|
if (likely(transport_len >= sizeof(struct udphdr)))
|
2011-10-21 14:38:54 -07:00
|
|
|
inet_proto_csum_replace4(&udp_hdr(skb)->check, skb,
|
|
|
|
|
*addr, new_addr, 1);
|
2010-08-13 10:46:12 -07:00
|
|
|
}
|
2011-10-21 14:38:54 -07:00
|
|
|
|
|
|
|
|
csum_replace4(&nh->check, *addr, new_addr);
|
|
|
|
|
skb_clear_rxhash(skb);
|
|
|
|
|
*addr = new_addr;
|
2010-08-13 10:46:12 -07:00
|
|
|
}
|
|
|
|
|
|
2011-11-05 15:48:12 -07:00
|
|
|
static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl)
|
|
|
|
|
{
|
|
|
|
|
csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8));
|
|
|
|
|
nh->ttl = new_ttl;
|
|
|
|
|
}
|
|
|
|
|
|
2011-10-21 14:38:54 -07:00
|
|
|
static int set_ipv4(struct sk_buff *skb, const struct ovs_key_ipv4 *ipv4_key)
|
2009-07-08 13:19:16 -07:00
|
|
|
{
|
2010-08-13 10:46:12 -07:00
|
|
|
struct iphdr *nh;
|
2011-06-06 19:17:25 -07:00
|
|
|
int err;
|
2010-08-13 10:46:12 -07:00
|
|
|
|
2011-06-06 19:17:25 -07:00
|
|
|
err = make_writable(skb, skb_network_offset(skb) +
|
|
|
|
|
sizeof(struct iphdr));
|
|
|
|
|
if (unlikely(err))
|
|
|
|
|
return err;
|
2010-08-13 10:46:12 -07:00
|
|
|
|
|
|
|
|
nh = ip_hdr(skb);
|
|
|
|
|
|
2011-10-21 14:38:54 -07:00
|
|
|
if (ipv4_key->ipv4_src != nh->saddr)
|
|
|
|
|
set_ip_addr(skb, nh, &nh->saddr, ipv4_key->ipv4_src);
|
2010-08-13 10:46:12 -07:00
|
|
|
|
2011-10-21 14:38:54 -07:00
|
|
|
if (ipv4_key->ipv4_dst != nh->daddr)
|
|
|
|
|
set_ip_addr(skb, nh, &nh->daddr, ipv4_key->ipv4_dst);
|
2011-02-07 11:07:14 +09:00
|
|
|
|
2011-10-21 14:38:54 -07:00
|
|
|
if (ipv4_key->ipv4_tos != nh->tos)
|
2011-11-02 23:34:15 -07:00
|
|
|
ipv4_change_dsfield(nh, 0, ipv4_key->ipv4_tos);
|
2010-08-13 10:46:12 -07:00
|
|
|
|
2011-11-05 15:48:12 -07:00
|
|
|
if (ipv4_key->ipv4_ttl != nh->ttl)
|
|
|
|
|
set_ip_ttl(skb, nh, ipv4_key->ipv4_ttl);
|
|
|
|
|
|
2011-06-06 19:17:25 -07:00
|
|
|
return 0;
|
2009-07-08 13:19:16 -07:00
|
|
|
}
|
|
|
|
|
|
2011-10-21 14:38:54 -07:00
|
|
|
/* Must follow make_writable() since that can move the skb data. */
|
|
|
|
|
static void set_tp_port(struct sk_buff *skb, __be16 *port,
|
|
|
|
|
__be16 new_port, __sum16 *check)
|
2009-11-11 14:59:49 -08:00
|
|
|
{
|
2011-10-21 14:38:54 -07:00
|
|
|
inet_proto_csum_replace2(check, skb, *port, new_port, 0);
|
|
|
|
|
*port = new_port;
|
|
|
|
|
skb_clear_rxhash(skb);
|
|
|
|
|
}
|
2011-06-06 19:17:25 -07:00
|
|
|
|
2011-10-21 14:38:54 -07:00
|
|
|
static int set_udp_port(struct sk_buff *skb,
|
|
|
|
|
const struct ovs_key_udp *udp_port_key)
|
|
|
|
|
{
|
|
|
|
|
struct udphdr *uh;
|
|
|
|
|
int err;
|
2011-06-06 19:17:25 -07:00
|
|
|
|
2011-10-21 14:38:54 -07:00
|
|
|
err = make_writable(skb, skb_transport_offset(skb) +
|
|
|
|
|
sizeof(struct udphdr));
|
2011-06-06 19:17:25 -07:00
|
|
|
if (unlikely(err))
|
|
|
|
|
return err;
|
|
|
|
|
|
2011-10-21 14:38:54 -07:00
|
|
|
uh = udp_hdr(skb);
|
|
|
|
|
if (udp_port_key->udp_src != uh->source)
|
|
|
|
|
set_tp_port(skb, &uh->source, udp_port_key->udp_src, &uh->check);
|
|
|
|
|
|
|
|
|
|
if (udp_port_key->udp_dst != uh->dest)
|
|
|
|
|
set_tp_port(skb, &uh->dest, udp_port_key->udp_dst, &uh->check);
|
2011-06-06 19:17:25 -07:00
|
|
|
|
|
|
|
|
return 0;
|
2009-11-11 14:59:49 -08:00
|
|
|
}
|
|
|
|
|
|
2011-10-21 14:38:54 -07:00
|
|
|
static int set_tcp_port(struct sk_buff *skb,
|
|
|
|
|
const struct ovs_key_tcp *tcp_port_key)
|
2009-07-08 13:19:16 -07:00
|
|
|
{
|
2011-10-21 14:38:54 -07:00
|
|
|
struct tcphdr *th;
|
2011-06-06 19:17:25 -07:00
|
|
|
int err;
|
2009-07-08 13:19:16 -07:00
|
|
|
|
2011-06-06 19:17:25 -07:00
|
|
|
err = make_writable(skb, skb_transport_offset(skb) +
|
|
|
|
|
sizeof(struct tcphdr));
|
|
|
|
|
if (unlikely(err))
|
|
|
|
|
return err;
|
2010-08-13 10:46:12 -07:00
|
|
|
|
2011-10-21 14:38:54 -07:00
|
|
|
th = tcp_hdr(skb);
|
|
|
|
|
if (tcp_port_key->tcp_src != th->source)
|
|
|
|
|
set_tp_port(skb, &th->source, tcp_port_key->tcp_src, &th->check);
|
2009-07-08 13:19:16 -07:00
|
|
|
|
2011-10-21 14:38:54 -07:00
|
|
|
if (tcp_port_key->tcp_dst != th->dest)
|
|
|
|
|
set_tp_port(skb, &th->dest, tcp_port_key->tcp_dst, &th->check);
|
2010-08-13 10:46:12 -07:00
|
|
|
|
2011-06-06 19:17:25 -07:00
|
|
|
return 0;
|
2009-07-08 13:19:16 -07:00
|
|
|
}
|
|
|
|
|
|
2011-09-20 16:44:46 -07:00
|
|
|
static int do_output(struct datapath *dp, struct sk_buff *skb, int out_port)
|
2009-07-08 13:19:16 -07:00
|
|
|
{
|
2011-09-20 16:44:46 -07:00
|
|
|
struct vport *vport;
|
2009-07-08 13:19:16 -07:00
|
|
|
|
2011-09-20 16:44:46 -07:00
|
|
|
if (unlikely(!skb))
|
|
|
|
|
return -ENOMEM;
|
2009-07-08 13:19:16 -07:00
|
|
|
|
2011-09-20 16:44:46 -07:00
|
|
|
vport = rcu_dereference(dp->ports[out_port]);
|
|
|
|
|
if (unlikely(!vport)) {
|
|
|
|
|
kfree_skb(skb);
|
|
|
|
|
return -ENODEV;
|
|
|
|
|
}
|
2009-07-08 13:19:16 -07:00
|
|
|
|
2011-09-20 16:44:46 -07:00
|
|
|
vport_send(vport, skb);
|
|
|
|
|
return 0;
|
2009-07-08 13:19:16 -07:00
|
|
|
}
|
|
|
|
|
|
2011-10-12 16:24:54 -07:00
|
|
|
static int output_userspace(struct datapath *dp, struct sk_buff *skb,
|
|
|
|
|
const struct nlattr *attr)
|
2009-07-08 13:19:16 -07:00
|
|
|
{
|
datapath: Report kernel's flow key when passing packets up to userspace.
One of the goals for Open vSwitch is to decouple kernel and userspace
software, so that either one can be upgraded or rolled back independent of
the other. To do this in full generality, it must be possible to change
the kernel's idea of the flow key separately from the userspace version.
This commit takes one step in that direction by making the kernel report
its idea of the flow that a packet belongs to whenever it passes a packet
up to userspace. This means that userspace can intelligently figure out
what to do:
- If userspace's notion of the flow for the packet matches the kernel's,
then nothing special is necessary.
- If the kernel has a more specific notion for the flow than userspace,
for example if the kernel decoded IPv6 headers but userspace stopped
at the Ethernet type (because it does not understand IPv6), then again
nothing special is necessary: userspace can still set up the flow in
the usual way.
- If userspace has a more specific notion for the flow than the kernel,
for example if userspace decoded an IPv6 header but the kernel
stopped at the Ethernet type, then userspace can forward the packet
manually, without setting up a flow in the kernel. (This case is
bad from a performance point of view, but at least it is correct.)
This commit does not actually make userspace flexible enough to handle
changes in the kernel flow key structure, although userspace does now
have enough information to do that intelligently. This will have to wait
for later commits.
This commit is bigger than it would otherwise be because it is rolled
together with changing "struct odp_msg" to a sequence of Netlink
attributes. The alternative, to do each of those changes in a separate
patch, seemed like overkill because it meant that either we would have to
introduce and then kill off Netlink attributes for in_port and tun_id, if
Netlink conversion went first, or shove yet another variable-length header
into the stuff already after odp_msg, if adding the flow key to odp_msg
went first.
This commit will slow down performance of checksumming packets sent up to
userspace. I'm not entirely pleased with how I did it. I considered a
couple of alternatives, but none of them seemed that much better.
Suggestions welcome. Not changing anything wasn't an option,
unfortunately. At any rate some slowdown will become unavoidable when OVS
actually starts using Netlink instead of just Netlink framing.
(Actually, I thought of one option where we could avoid that: make
userspace do the checksum instead, by passing csum_start and csum_offset as
part of what goes to userspace. But that's not perfect either.)
Signed-off-by: Ben Pfaff <blp@nicira.com>
Acked-by: Jesse Gross <jesse@nicira.com>
2011-01-24 14:59:57 -08:00
|
|
|
struct dp_upcall_info upcall;
|
2011-10-12 16:24:54 -07:00
|
|
|
const struct nlattr *a;
|
|
|
|
|
int rem;
|
datapath: Report kernel's flow key when passing packets up to userspace.
One of the goals for Open vSwitch is to decouple kernel and userspace
software, so that either one can be upgraded or rolled back independent of
the other. To do this in full generality, it must be possible to change
the kernel's idea of the flow key separately from the userspace version.
This commit takes one step in that direction by making the kernel report
its idea of the flow that a packet belongs to whenever it passes a packet
up to userspace. This means that userspace can intelligently figure out
what to do:
- If userspace's notion of the flow for the packet matches the kernel's,
then nothing special is necessary.
- If the kernel has a more specific notion for the flow than userspace,
for example if the kernel decoded IPv6 headers but userspace stopped
at the Ethernet type (because it does not understand IPv6), then again
nothing special is necessary: userspace can still set up the flow in
the usual way.
- If userspace has a more specific notion for the flow than the kernel,
for example if userspace decoded an IPv6 header but the kernel
stopped at the Ethernet type, then userspace can forward the packet
manually, without setting up a flow in the kernel. (This case is
bad from a performance point of view, but at least it is correct.)
This commit does not actually make userspace flexible enough to handle
changes in the kernel flow key structure, although userspace does now
have enough information to do that intelligently. This will have to wait
for later commits.
This commit is bigger than it would otherwise be because it is rolled
together with changing "struct odp_msg" to a sequence of Netlink
attributes. The alternative, to do each of those changes in a separate
patch, seemed like overkill because it meant that either we would have to
introduce and then kill off Netlink attributes for in_port and tun_id, if
Netlink conversion went first, or shove yet another variable-length header
into the stuff already after odp_msg, if adding the flow key to odp_msg
went first.
This commit will slow down performance of checksumming packets sent up to
userspace. I'm not entirely pleased with how I did it. I considered a
couple of alternatives, but none of them seemed that much better.
Suggestions welcome. Not changing anything wasn't an option,
unfortunately. At any rate some slowdown will become unavoidable when OVS
actually starts using Netlink instead of just Netlink framing.
(Actually, I thought of one option where we could avoid that: make
userspace do the checksum instead, by passing csum_start and csum_offset as
part of what goes to userspace. But that's not perfect either.)
Signed-off-by: Ben Pfaff <blp@nicira.com>
Acked-by: Jesse Gross <jesse@nicira.com>
2011-01-24 14:59:57 -08:00
|
|
|
|
2011-08-18 10:35:40 -07:00
|
|
|
upcall.cmd = OVS_PACKET_CMD_ACTION;
|
2011-04-29 10:49:06 -07:00
|
|
|
upcall.key = &OVS_CB(skb)->flow->key;
|
2011-10-12 16:24:54 -07:00
|
|
|
upcall.userdata = NULL;
|
|
|
|
|
upcall.pid = 0;
|
|
|
|
|
|
|
|
|
|
for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
|
|
|
|
|
a = nla_next(a, &rem)) {
|
|
|
|
|
switch (nla_type(a)) {
|
|
|
|
|
case OVS_USERSPACE_ATTR_USERDATA:
|
|
|
|
|
upcall.userdata = a;
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case OVS_USERSPACE_ATTR_PID:
|
|
|
|
|
upcall.pid = nla_get_u32(a);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
datapath: Report kernel's flow key when passing packets up to userspace.
One of the goals for Open vSwitch is to decouple kernel and userspace
software, so that either one can be upgraded or rolled back independent of
the other. To do this in full generality, it must be possible to change
the kernel's idea of the flow key separately from the userspace version.
This commit takes one step in that direction by making the kernel report
its idea of the flow that a packet belongs to whenever it passes a packet
up to userspace. This means that userspace can intelligently figure out
what to do:
- If userspace's notion of the flow for the packet matches the kernel's,
then nothing special is necessary.
- If the kernel has a more specific notion for the flow than userspace,
for example if the kernel decoded IPv6 headers but userspace stopped
at the Ethernet type (because it does not understand IPv6), then again
nothing special is necessary: userspace can still set up the flow in
the usual way.
- If userspace has a more specific notion for the flow than the kernel,
for example if userspace decoded an IPv6 header but the kernel
stopped at the Ethernet type, then userspace can forward the packet
manually, without setting up a flow in the kernel. (This case is
bad from a performance point of view, but at least it is correct.)
This commit does not actually make userspace flexible enough to handle
changes in the kernel flow key structure, although userspace does now
have enough information to do that intelligently. This will have to wait
for later commits.
This commit is bigger than it would otherwise be because it is rolled
together with changing "struct odp_msg" to a sequence of Netlink
attributes. The alternative, to do each of those changes in a separate
patch, seemed like overkill because it meant that either we would have to
introduce and then kill off Netlink attributes for in_port and tun_id, if
Netlink conversion went first, or shove yet another variable-length header
into the stuff already after odp_msg, if adding the flow key to odp_msg
went first.
This commit will slow down performance of checksumming packets sent up to
userspace. I'm not entirely pleased with how I did it. I considered a
couple of alternatives, but none of them seemed that much better.
Suggestions welcome. Not changing anything wasn't an option,
unfortunately. At any rate some slowdown will become unavoidable when OVS
actually starts using Netlink instead of just Netlink framing.
(Actually, I thought of one option where we could avoid that: make
userspace do the checksum instead, by passing csum_start and csum_offset as
part of what goes to userspace. But that's not perfect either.)
Signed-off-by: Ben Pfaff <blp@nicira.com>
Acked-by: Jesse Gross <jesse@nicira.com>
2011-01-24 14:59:57 -08:00
|
|
|
return dp_upcall(dp, skb, &upcall);
|
2009-07-08 13:19:16 -07:00
|
|
|
}
|
|
|
|
|
|
2011-09-28 10:43:07 -07:00
|
|
|
static int sample(struct datapath *dp, struct sk_buff *skb,
|
|
|
|
|
const struct nlattr *attr)
|
|
|
|
|
{
|
|
|
|
|
const struct nlattr *acts_list = NULL;
|
|
|
|
|
const struct nlattr *a;
|
|
|
|
|
int rem;
|
|
|
|
|
|
|
|
|
|
for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
|
|
|
|
|
a = nla_next(a, &rem)) {
|
|
|
|
|
switch (nla_type(a)) {
|
|
|
|
|
case OVS_SAMPLE_ATTR_PROBABILITY:
|
|
|
|
|
if (net_random() >= nla_get_u32(a))
|
|
|
|
|
return 0;
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case OVS_SAMPLE_ATTR_ACTIONS:
|
|
|
|
|
acts_list = a;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return do_execute_actions(dp, skb, nla_data(acts_list),
|
|
|
|
|
nla_len(acts_list), true);
|
|
|
|
|
}
|
|
|
|
|
|
2011-10-21 14:38:54 -07:00
|
|
|
static int execute_set_action(struct sk_buff *skb,
|
|
|
|
|
const struct nlattr *nested_attr)
|
|
|
|
|
{
|
2011-10-21 15:19:33 -07:00
|
|
|
int err = 0;
|
2011-10-21 14:38:54 -07:00
|
|
|
|
|
|
|
|
switch (nla_type(nested_attr)) {
|
2011-11-01 10:13:16 -07:00
|
|
|
case OVS_KEY_ATTR_PRIORITY:
|
|
|
|
|
skb->priority = nla_get_u32(nested_attr);
|
|
|
|
|
break;
|
|
|
|
|
|
2011-10-21 14:38:54 -07:00
|
|
|
case OVS_KEY_ATTR_TUN_ID:
|
|
|
|
|
OVS_CB(skb)->tun_id = nla_get_be64(nested_attr);
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case OVS_KEY_ATTR_ETHERNET:
|
|
|
|
|
err = set_eth_addr(skb, nla_data(nested_attr));
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case OVS_KEY_ATTR_IPV4:
|
|
|
|
|
err = set_ipv4(skb, nla_data(nested_attr));
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case OVS_KEY_ATTR_TCP:
|
|
|
|
|
err = set_tcp_port(skb, nla_data(nested_attr));
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case OVS_KEY_ATTR_UDP:
|
|
|
|
|
err = set_udp_port(skb, nla_data(nested_attr));
|
|
|
|
|
break;
|
|
|
|
|
}
|
2011-10-21 15:19:33 -07:00
|
|
|
|
2011-10-21 14:38:54 -07:00
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
|
2009-07-08 13:19:16 -07:00
|
|
|
/* Execute a list of actions against 'skb'. */
|
2010-12-23 09:35:15 -08:00
|
|
|
static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
|
2011-09-28 10:43:07 -07:00
|
|
|
const struct nlattr *attr, int len, bool keep_skb)
|
2009-07-08 13:19:16 -07:00
|
|
|
{
|
|
|
|
|
/* Every output action needs a separate clone of 'skb', but the common
|
|
|
|
|
* case is just a single output action, so that doing a clone and
|
|
|
|
|
* then freeing the original skbuff is wasteful. So the following code
|
|
|
|
|
* is slightly obscure just to avoid that. */
|
|
|
|
|
int prev_port = -1;
|
2010-12-10 10:40:58 -08:00
|
|
|
const struct nlattr *a;
|
2011-06-06 19:17:25 -07:00
|
|
|
int rem;
|
2010-01-04 13:08:37 -08:00
|
|
|
|
2011-09-28 10:43:07 -07:00
|
|
|
for (a = attr, rem = len; rem > 0;
|
2011-04-29 10:49:06 -07:00
|
|
|
a = nla_next(a, &rem)) {
|
2011-06-06 19:17:25 -07:00
|
|
|
int err = 0;
|
|
|
|
|
|
2009-07-08 13:19:16 -07:00
|
|
|
if (prev_port != -1) {
|
2010-09-10 11:16:31 -07:00
|
|
|
do_output(dp, skb_clone(skb, GFP_ATOMIC), prev_port);
|
2009-07-08 13:19:16 -07:00
|
|
|
prev_port = -1;
|
|
|
|
|
}
|
|
|
|
|
|
2010-12-10 10:40:58 -08:00
|
|
|
switch (nla_type(a)) {
|
2011-08-18 10:35:40 -07:00
|
|
|
case OVS_ACTION_ATTR_OUTPUT:
|
2010-12-10 10:40:58 -08:00
|
|
|
prev_port = nla_get_u32(a);
|
2009-07-08 13:19:16 -07:00
|
|
|
break;
|
|
|
|
|
|
2011-08-18 10:35:40 -07:00
|
|
|
case OVS_ACTION_ATTR_USERSPACE:
|
2011-10-12 16:24:54 -07:00
|
|
|
output_userspace(dp, skb, a);
|
2009-07-08 13:19:16 -07:00
|
|
|
break;
|
|
|
|
|
|
2011-10-21 14:38:54 -07:00
|
|
|
case OVS_ACTION_ATTR_PUSH:
|
|
|
|
|
/* Only supported push action is on vlan tag. */
|
|
|
|
|
err = push_vlan(skb, nla_data(nla_data(a)));
|
|
|
|
|
if (unlikely(err)) /* skb already freed. */
|
2011-09-09 18:13:26 -07:00
|
|
|
return err;
|
2009-07-08 13:19:16 -07:00
|
|
|
break;
|
|
|
|
|
|
2011-10-21 14:38:54 -07:00
|
|
|
case OVS_ACTION_ATTR_POP:
|
|
|
|
|
/* Only supported pop action is on vlan tag. */
|
2011-09-09 18:13:26 -07:00
|
|
|
err = pop_vlan(skb);
|
2009-07-08 13:19:16 -07:00
|
|
|
break;
|
|
|
|
|
|
2011-10-21 14:38:54 -07:00
|
|
|
case OVS_ACTION_ATTR_SET:
|
|
|
|
|
err = execute_set_action(skb, nla_data(a));
|
2009-07-08 13:19:16 -07:00
|
|
|
break;
|
2010-06-17 15:04:12 -07:00
|
|
|
|
2011-09-28 10:43:07 -07:00
|
|
|
case OVS_ACTION_ATTR_SAMPLE:
|
|
|
|
|
err = sample(dp, skb, a);
|
|
|
|
|
break;
|
|
|
|
|
}
|
2011-10-21 15:19:33 -07:00
|
|
|
|
2011-06-06 19:17:25 -07:00
|
|
|
if (unlikely(err)) {
|
|
|
|
|
kfree_skb(skb);
|
|
|
|
|
return err;
|
|
|
|
|
}
|
2009-07-08 13:19:16 -07:00
|
|
|
}
|
2011-06-09 15:43:18 -07:00
|
|
|
|
2011-09-28 10:43:07 -07:00
|
|
|
if (prev_port != -1) {
|
|
|
|
|
if (keep_skb)
|
|
|
|
|
skb = skb_clone(skb, GFP_ATOMIC);
|
|
|
|
|
|
2009-07-08 13:19:16 -07:00
|
|
|
do_output(dp, skb, prev_port);
|
2011-09-28 10:43:07 -07:00
|
|
|
} else if (!keep_skb)
|
2011-06-16 15:32:26 -07:00
|
|
|
consume_skb(skb);
|
2011-06-06 19:17:25 -07:00
|
|
|
|
2009-06-19 14:03:12 -07:00
|
|
|
return 0;
|
2009-07-08 13:19:16 -07:00
|
|
|
}
|
2010-12-23 09:35:15 -08:00
|
|
|
|
2011-10-06 21:52:39 -07:00
|
|
|
/* We limit the number of times that we pass into execute_actions()
|
|
|
|
|
* to avoid blowing out the stack in the event that we have a loop. */
|
|
|
|
|
#define MAX_LOOPS 5
|
|
|
|
|
|
|
|
|
|
struct loop_counter {
|
|
|
|
|
u8 count; /* Count. */
|
|
|
|
|
bool looping; /* Loop detected? */
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
static DEFINE_PER_CPU(struct loop_counter, loop_counters);
|
|
|
|
|
|
|
|
|
|
static int loop_suppress(struct datapath *dp, struct sw_flow_actions *actions)
|
|
|
|
|
{
|
|
|
|
|
if (net_ratelimit())
|
|
|
|
|
pr_warn("%s: flow looped %d times, dropping\n",
|
|
|
|
|
dp_name(dp), MAX_LOOPS);
|
|
|
|
|
actions->actions_len = 0;
|
|
|
|
|
return -ELOOP;
|
|
|
|
|
}
|
|
|
|
|
|
2010-12-23 09:35:15 -08:00
|
|
|
/* Execute a list of actions against 'skb'. */
|
2011-04-29 10:49:06 -07:00
|
|
|
int execute_actions(struct datapath *dp, struct sk_buff *skb)
|
2010-12-23 09:35:15 -08:00
|
|
|
{
|
2011-04-29 10:49:06 -07:00
|
|
|
struct sw_flow_actions *acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts);
|
|
|
|
|
struct loop_counter *loop;
|
|
|
|
|
int error;
|
|
|
|
|
|
|
|
|
|
/* Check whether we've looped too much. */
|
2011-10-06 21:52:39 -07:00
|
|
|
loop = &__get_cpu_var(loop_counters);
|
2011-04-29 10:49:06 -07:00
|
|
|
if (unlikely(++loop->count > MAX_LOOPS))
|
|
|
|
|
loop->looping = true;
|
|
|
|
|
if (unlikely(loop->looping)) {
|
|
|
|
|
error = loop_suppress(dp, acts);
|
|
|
|
|
kfree_skb(skb);
|
|
|
|
|
goto out_loop;
|
|
|
|
|
}
|
2010-12-23 09:35:15 -08:00
|
|
|
|
|
|
|
|
OVS_CB(skb)->tun_id = 0;
|
2011-09-28 10:43:07 -07:00
|
|
|
error = do_execute_actions(dp, skb, acts->actions,
|
|
|
|
|
acts->actions_len, false);
|
2011-04-29 10:49:06 -07:00
|
|
|
|
|
|
|
|
/* Check whether sub-actions looped too much. */
|
|
|
|
|
if (unlikely(loop->looping))
|
|
|
|
|
error = loop_suppress(dp, acts);
|
|
|
|
|
|
|
|
|
|
out_loop:
|
|
|
|
|
/* Decrement loop counter. */
|
|
|
|
|
if (!--loop->count)
|
|
|
|
|
loop->looping = false;
|
2010-12-23 09:35:15 -08:00
|
|
|
|
2011-04-29 10:49:06 -07:00
|
|
|
return error;
|
2010-12-23 09:35:15 -08:00
|
|
|
}
|