2009-07-08 13:19:16 -07:00
|
|
|
|
/*
|
2015-01-11 13:25:24 -08:00
|
|
|
|
* Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015 Nicira, Inc.
|
2009-07-08 13:19:16 -07:00
|
|
|
|
*
|
2009-06-15 15:11:30 -07:00
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
|
* You may obtain a copy of the License at:
|
2009-07-08 13:19:16 -07:00
|
|
|
|
*
|
2009-06-15 15:11:30 -07:00
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
*
|
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
|
* limitations under the License.
|
2009-07-08 13:19:16 -07:00
|
|
|
|
*/
|
|
|
|
|
#include <config.h>
|
|
|
|
|
#include <sys/types.h>
|
|
|
|
|
#include "flow.h"
|
2010-12-29 19:03:46 -08:00
|
|
|
|
#include <errno.h>
|
2009-07-08 13:19:16 -07:00
|
|
|
|
#include <inttypes.h>
|
2012-09-04 12:43:53 -07:00
|
|
|
|
#include <limits.h>
|
2009-07-08 13:19:16 -07:00
|
|
|
|
#include <netinet/in.h>
|
2010-12-29 19:03:46 -08:00
|
|
|
|
#include <netinet/icmp6.h>
|
|
|
|
|
#include <netinet/ip6.h>
|
2012-09-04 12:43:53 -07:00
|
|
|
|
#include <stdint.h>
|
2009-07-08 13:19:16 -07:00
|
|
|
|
#include <stdlib.h>
|
|
|
|
|
#include <string.h>
|
2010-10-28 17:13:18 -07:00
|
|
|
|
#include "byte-order.h"
|
2009-07-08 13:19:16 -07:00
|
|
|
|
#include "coverage.h"
|
2012-08-02 16:11:58 -07:00
|
|
|
|
#include "csum.h"
|
2009-07-08 13:19:16 -07:00
|
|
|
|
#include "dynamic-string.h"
|
|
|
|
|
#include "hash.h"
|
hash: Replace primary hash functions by murmurhash.
murmurhash is faster than Jenkins and slightly higher quality, so switch to
it for hashing words.
The best timings I got for hashing for data lengths of the following
numbers of 32-bit words, in seconds per 1,000,000,000 hashes, were:
words murmurhash Jenkins hash
----- ---------- ------------
1 8.4 10.4
2 10.3 10.3
3 11.2 10.7
4 12.6 18.0
5 13.9 18.3
6 15.2 18.7
In other words, murmurhash outperforms Jenkins for all input lengths other
than exactly 3 32-bit words (12 bytes). (It's understandable that Jenkins
would have a best case at 12 bytes, because Jenkins works in 12-byte
chunks.) Even in the case where Jenkins is faster, it's only by 5%. On
average within this data set, murmurhash is 15% faster, and for 4-word
input it is 30% faster.
We retain Jenkins for flow_hash_symmetric_l4() and flow_hash_fields(),
which are cases where the hash value is exposed externally.
This commit appears to improve "ovs-benchmark rate" results slightly by
a few hundred connections per second (under 1%), when used with an NVP
controller.
Signed-off-by: Ben Pfaff <blp@nicira.com>
Acked-by: Ethan Jackson <ethan@nicira.com>
2013-01-16 16:14:42 -08:00
|
|
|
|
#include "jhash.h"
|
2012-10-22 14:00:35 -07:00
|
|
|
|
#include "match.h"
|
2009-07-08 13:19:16 -07:00
|
|
|
|
#include "ofpbuf.h"
|
|
|
|
|
#include "openflow/openflow.h"
|
|
|
|
|
#include "packets.h"
|
2014-02-26 18:08:04 -08:00
|
|
|
|
#include "odp-util.h"
|
2013-10-17 14:28:20 -07:00
|
|
|
|
#include "random.h"
|
2010-05-07 11:43:18 -07:00
|
|
|
|
#include "unaligned.h"
|
2009-07-08 13:19:16 -07:00
|
|
|
|
|
coverage: Make the coverage counters catalog program-specific.
Until now, the collection of coverage counters supported by a given OVS
program was not specific to that program. That means that, for example,
even though ovs-dpctl does not have anything to do with mac_learning, it
still has a coverage counter for it. This is confusing, at best.
This commit fixes the problem on some systems, in particular on ones that
use GCC and the GNU linker. It uses the feature of the GNU linker
described in its manual as:
If an orphaned section's name is representable as a C identifier then
the linker will automatically see PROVIDE two symbols: __start_SECNAME
and __end_SECNAME, where SECNAME is the name of the section. These
indicate the start address and end address of the orphaned section
respectively.
Systems that don't support these features retain the earlier behavior.
This commit also fixes the annoyance that files that include coverage
counters must be listed on COVERAGE_FILES in lib/automake.mk.
This commit also fixes the annoyance that modifying any source file that
includes a coverage counter caused all programs that link against
libopenvswitch.a to relink, even programs that the source file was not
linked into. For example, modifying ofproto/ofproto.c (which includes
coverage counters) caused tests/test-aes128 to relink, even though
test-aes128 does not link again ofproto.o.
2010-11-01 14:14:27 -07:00
|
|
|
|
COVERAGE_DEFINE(flow_extract);
|
2012-09-04 12:43:53 -07:00
|
|
|
|
COVERAGE_DEFINE(miniflow_malloc);
|
coverage: Make the coverage counters catalog program-specific.
Until now, the collection of coverage counters supported by a given OVS
program was not specific to that program. That means that, for example,
even though ovs-dpctl does not have anything to do with mac_learning, it
still has a coverage counter for it. This is confusing, at best.
This commit fixes the problem on some systems, in particular on ones that
use GCC and the GNU linker. It uses the feature of the GNU linker
described in its manual as:
If an orphaned section's name is representable as a C identifier then
the linker will automatically see PROVIDE two symbols: __start_SECNAME
and __end_SECNAME, where SECNAME is the name of the section. These
indicate the start address and end address of the orphaned section
respectively.
Systems that don't support these features retain the earlier behavior.
This commit also fixes the annoyance that files that include coverage
counters must be listed on COVERAGE_FILES in lib/automake.mk.
This commit also fixes the annoyance that modifying any source file that
includes a coverage counter caused all programs that link against
libopenvswitch.a to relink, even programs that the source file was not
linked into. For example, modifying ofproto/ofproto.c (which includes
coverage counters) caused tests/test-aes128 to relink, even though
test-aes128 does not link again ofproto.o.
2010-11-01 14:14:27 -07:00
|
|
|
|
|
2015-01-06 11:10:42 -08:00
|
|
|
|
/* U64 indices for segmented flow classification. */
|
|
|
|
|
const uint8_t flow_segment_u64s[4] = {
|
|
|
|
|
FLOW_SEGMENT_1_ENDS_AT / sizeof(uint64_t),
|
|
|
|
|
FLOW_SEGMENT_2_ENDS_AT / sizeof(uint64_t),
|
|
|
|
|
FLOW_SEGMENT_3_ENDS_AT / sizeof(uint64_t),
|
|
|
|
|
FLOW_U64S
|
2013-11-19 17:31:29 -08:00
|
|
|
|
};
|
|
|
|
|
|
2014-04-18 08:26:56 -07:00
|
|
|
|
/* miniflow_extract() assumes the following to be true to optimize the
|
|
|
|
|
* extraction process. */
|
|
|
|
|
BUILD_ASSERT_DECL(offsetof(struct flow, dl_type) + 2
|
|
|
|
|
== offsetof(struct flow, vlan_tci) &&
|
|
|
|
|
offsetof(struct flow, dl_type) / 4
|
|
|
|
|
== offsetof(struct flow, vlan_tci) / 4 );
|
|
|
|
|
|
|
|
|
|
BUILD_ASSERT_DECL(offsetof(struct flow, nw_frag) + 3
|
|
|
|
|
== offsetof(struct flow, nw_proto) &&
|
|
|
|
|
offsetof(struct flow, nw_tos) + 2
|
|
|
|
|
== offsetof(struct flow, nw_proto) &&
|
|
|
|
|
offsetof(struct flow, nw_ttl) + 1
|
|
|
|
|
== offsetof(struct flow, nw_proto) &&
|
|
|
|
|
offsetof(struct flow, nw_frag) / 4
|
|
|
|
|
== offsetof(struct flow, nw_tos) / 4 &&
|
|
|
|
|
offsetof(struct flow, nw_ttl) / 4
|
|
|
|
|
== offsetof(struct flow, nw_tos) / 4 &&
|
|
|
|
|
offsetof(struct flow, nw_proto) / 4
|
|
|
|
|
== offsetof(struct flow, nw_tos) / 4);
|
|
|
|
|
|
2015-01-06 11:10:42 -08:00
|
|
|
|
/* TCP flags in the middle of a BE64, zeroes in the other half. */
|
|
|
|
|
BUILD_ASSERT_DECL(offsetof(struct flow, tcp_flags) % 8 == 4);
|
|
|
|
|
|
2014-04-18 08:26:56 -07:00
|
|
|
|
#if WORDS_BIGENDIAN
|
|
|
|
|
#define TCP_FLAGS_BE32(tcp_ctl) ((OVS_FORCE ovs_be32)TCP_FLAGS_BE16(tcp_ctl) \
|
|
|
|
|
<< 16)
|
|
|
|
|
#else
|
|
|
|
|
#define TCP_FLAGS_BE32(tcp_ctl) ((OVS_FORCE ovs_be32)TCP_FLAGS_BE16(tcp_ctl))
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
BUILD_ASSERT_DECL(offsetof(struct flow, tp_src) + 2
|
|
|
|
|
== offsetof(struct flow, tp_dst) &&
|
|
|
|
|
offsetof(struct flow, tp_src) / 4
|
|
|
|
|
== offsetof(struct flow, tp_dst) / 4);
|
|
|
|
|
|
|
|
|
|
/* Removes 'size' bytes from the head end of '*datap', of size '*sizep', which
|
|
|
|
|
* must contain at least 'size' bytes of data. Returns the first byte of data
|
|
|
|
|
* removed. */
|
|
|
|
|
static inline const void *
|
|
|
|
|
data_pull(void **datap, size_t *sizep, size_t size)
|
2009-07-16 12:58:28 -07:00
|
|
|
|
{
|
2014-04-18 08:26:56 -07:00
|
|
|
|
char *data = (char *)*datap;
|
|
|
|
|
*datap = data + size;
|
|
|
|
|
*sizep -= size;
|
|
|
|
|
return data;
|
2009-07-16 12:58:28 -07:00
|
|
|
|
}
|
|
|
|
|
|
2014-04-18 08:26:56 -07:00
|
|
|
|
/* If '*datap' has at least 'size' bytes of data, removes that many bytes from
|
|
|
|
|
* the head end of '*datap' and returns the first byte removed. Otherwise,
|
|
|
|
|
* returns a null pointer without modifying '*datap'. */
|
|
|
|
|
static inline const void *
|
|
|
|
|
data_try_pull(void **datap, size_t *sizep, size_t size)
|
2009-07-08 13:19:16 -07:00
|
|
|
|
{
|
2014-04-18 08:26:56 -07:00
|
|
|
|
return OVS_LIKELY(*sizep >= size) ? data_pull(datap, sizep, size) : NULL;
|
2009-07-08 13:19:16 -07:00
|
|
|
|
}
|
|
|
|
|
|
2014-04-18 08:26:56 -07:00
|
|
|
|
/* Context for pushing data to a miniflow. */
|
|
|
|
|
struct mf_ctx {
|
|
|
|
|
uint64_t map;
|
2015-01-06 11:10:42 -08:00
|
|
|
|
uint64_t *data;
|
|
|
|
|
uint64_t * const end;
|
2014-04-18 08:26:56 -07:00
|
|
|
|
};
|
2009-07-08 13:19:16 -07:00
|
|
|
|
|
2014-04-18 08:26:56 -07:00
|
|
|
|
/* miniflow_push_* macros allow filling in a miniflow data values in order.
|
|
|
|
|
* Assertions are needed only when the layout of the struct flow is modified.
|
|
|
|
|
* 'ofs' is a compile-time constant, which allows most of the code be optimized
|
2014-08-08 10:15:57 -07:00
|
|
|
|
* away. Some GCC versions gave warnings on ALWAYS_INLINE, so these are
|
2014-04-18 08:26:56 -07:00
|
|
|
|
* defined as macros. */
|
|
|
|
|
|
2015-01-11 13:25:24 -08:00
|
|
|
|
#if (FLOW_WC_SEQ != 30)
|
2014-04-18 08:26:56 -07:00
|
|
|
|
#define MINIFLOW_ASSERT(X) ovs_assert(X)
|
2014-08-29 16:08:11 -07:00
|
|
|
|
BUILD_MESSAGE("FLOW_WC_SEQ changed: miniflow_extract() will have runtime "
|
|
|
|
|
"assertions enabled. Consider updating FLOW_WC_SEQ after "
|
|
|
|
|
"testing")
|
2014-04-18 08:26:56 -07:00
|
|
|
|
#else
|
|
|
|
|
#define MINIFLOW_ASSERT(X)
|
|
|
|
|
#endif
|
|
|
|
|
|
2015-01-06 11:10:42 -08:00
|
|
|
|
#define miniflow_push_uint64_(MF, OFS, VALUE) \
|
2014-04-18 08:26:56 -07:00
|
|
|
|
{ \
|
2015-01-06 11:10:42 -08:00
|
|
|
|
MINIFLOW_ASSERT(MF.data < MF.end && (OFS) % 8 == 0 \
|
|
|
|
|
&& !(MF.map & (UINT64_MAX << (OFS) / 8))); \
|
2014-04-18 08:26:56 -07:00
|
|
|
|
*MF.data++ = VALUE; \
|
2015-01-06 11:10:42 -08:00
|
|
|
|
MF.map |= UINT64_C(1) << (OFS) / 8; \
|
2010-12-29 19:03:46 -08:00
|
|
|
|
}
|
|
|
|
|
|
2015-01-06 11:10:42 -08:00
|
|
|
|
#define miniflow_push_be64_(MF, OFS, VALUE) \
|
|
|
|
|
miniflow_push_uint64_(MF, OFS, (OVS_FORCE uint64_t)(VALUE))
|
2014-04-18 08:26:56 -07:00
|
|
|
|
|
2015-01-06 11:10:42 -08:00
|
|
|
|
#define miniflow_push_uint32_(MF, OFS, VALUE) \
|
2014-04-18 08:26:56 -07:00
|
|
|
|
{ \
|
|
|
|
|
MINIFLOW_ASSERT(MF.data < MF.end && \
|
2015-01-06 11:10:42 -08:00
|
|
|
|
(((OFS) % 8 == 0 && !(MF.map & (UINT64_MAX << (OFS) / 8))) \
|
|
|
|
|
|| ((OFS) % 8 == 4 && MF.map & (UINT64_C(1) << (OFS) / 8) \
|
|
|
|
|
&& !(MF.map & (UINT64_MAX << ((OFS) / 8 + 1)))))); \
|
|
|
|
|
\
|
|
|
|
|
if ((OFS) % 8 == 0) { \
|
|
|
|
|
*(uint32_t *)MF.data = VALUE; \
|
|
|
|
|
MF.map |= UINT64_C(1) << (OFS) / 8; \
|
|
|
|
|
} else if ((OFS) % 8 == 4) { \
|
|
|
|
|
*((uint32_t *)MF.data + 1) = VALUE; \
|
|
|
|
|
MF.data++; \
|
|
|
|
|
} \
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#define miniflow_push_be32_(MF, OFS, VALUE) \
|
|
|
|
|
miniflow_push_uint32_(MF, OFS, (OVS_FORCE uint32_t)(VALUE))
|
|
|
|
|
|
|
|
|
|
#define miniflow_push_uint16_(MF, OFS, VALUE) \
|
|
|
|
|
{ \
|
|
|
|
|
MINIFLOW_ASSERT(MF.data < MF.end && \
|
|
|
|
|
(((OFS) % 8 == 0 && !(MF.map & (UINT64_MAX << (OFS) / 8))) \
|
|
|
|
|
|| ((OFS) % 2 == 0 && MF.map & (UINT64_C(1) << (OFS) / 8) \
|
|
|
|
|
&& !(MF.map & (UINT64_MAX << ((OFS) / 8 + 1)))))); \
|
2014-04-18 08:26:56 -07:00
|
|
|
|
\
|
2015-01-06 11:10:42 -08:00
|
|
|
|
if ((OFS) % 8 == 0) { \
|
2014-04-18 08:26:56 -07:00
|
|
|
|
*(uint16_t *)MF.data = VALUE; \
|
2015-01-06 11:10:42 -08:00
|
|
|
|
MF.map |= UINT64_C(1) << (OFS) / 8; \
|
|
|
|
|
} else if ((OFS) % 8 == 2) { \
|
2014-04-18 08:26:56 -07:00
|
|
|
|
*((uint16_t *)MF.data + 1) = VALUE; \
|
2015-01-06 11:10:42 -08:00
|
|
|
|
} else if ((OFS) % 8 == 4) { \
|
|
|
|
|
*((uint16_t *)MF.data + 2) = VALUE; \
|
|
|
|
|
} else if ((OFS) % 8 == 6) { \
|
|
|
|
|
*((uint16_t *)MF.data + 3) = VALUE; \
|
2014-04-18 08:26:56 -07:00
|
|
|
|
MF.data++; \
|
|
|
|
|
} \
|
2013-01-25 16:22:07 +09:00
|
|
|
|
}
|
|
|
|
|
|
2015-01-06 11:10:42 -08:00
|
|
|
|
#define miniflow_pad_to_64_(MF, OFS) \
|
|
|
|
|
{ \
|
|
|
|
|
MINIFLOW_ASSERT((OFS) % 8 != 0); \
|
|
|
|
|
MINIFLOW_ASSERT(MF.map & (UINT64_C(1) << (OFS) / 8)); \
|
|
|
|
|
MINIFLOW_ASSERT(!(MF.map & (UINT64_MAX << ((OFS) / 8 + 1)))); \
|
|
|
|
|
\
|
|
|
|
|
memset((uint8_t *)MF.data + (OFS) % 8, 0, 8 - (OFS) % 8); \
|
|
|
|
|
MF.data++; \
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#define miniflow_push_be16_(MF, OFS, VALUE) \
|
2014-04-18 08:26:56 -07:00
|
|
|
|
miniflow_push_uint16_(MF, OFS, (OVS_FORCE uint16_t)VALUE);
|
|
|
|
|
|
|
|
|
|
/* Data at 'valuep' may be unaligned. */
|
|
|
|
|
#define miniflow_push_words_(MF, OFS, VALUEP, N_WORDS) \
|
|
|
|
|
{ \
|
2015-01-06 11:10:42 -08:00
|
|
|
|
int ofs64 = (OFS) / 8; \
|
2014-04-18 08:26:56 -07:00
|
|
|
|
\
|
2015-01-06 11:10:42 -08:00
|
|
|
|
MINIFLOW_ASSERT(MF.data + (N_WORDS) <= MF.end && (OFS) % 8 == 0 \
|
|
|
|
|
&& !(MF.map & (UINT64_MAX << ofs64))); \
|
2014-04-18 08:26:56 -07:00
|
|
|
|
\
|
|
|
|
|
memcpy(MF.data, (VALUEP), (N_WORDS) * sizeof *MF.data); \
|
|
|
|
|
MF.data += (N_WORDS); \
|
2015-01-06 11:10:42 -08:00
|
|
|
|
MF.map |= ((UINT64_MAX >> (64 - (N_WORDS))) << ofs64); \
|
2009-07-08 13:19:16 -07:00
|
|
|
|
}
|
|
|
|
|
|
2015-01-06 11:10:42 -08:00
|
|
|
|
/* Push 32-bit words padded to 64-bits. */
|
|
|
|
|
#define miniflow_push_words_32_(MF, OFS, VALUEP, N_WORDS) \
|
|
|
|
|
{ \
|
|
|
|
|
int ofs64 = (OFS) / 8; \
|
|
|
|
|
\
|
|
|
|
|
MINIFLOW_ASSERT(MF.data + DIV_ROUND_UP(N_WORDS, 2) <= MF.end \
|
|
|
|
|
&& (OFS) % 8 == 0 \
|
|
|
|
|
&& !(MF.map & (UINT64_MAX << ofs64))); \
|
|
|
|
|
\
|
|
|
|
|
memcpy(MF.data, (VALUEP), (N_WORDS) * sizeof(uint32_t)); \
|
|
|
|
|
MF.data += DIV_ROUND_UP(N_WORDS, 2); \
|
|
|
|
|
MF.map |= ((UINT64_MAX >> (64 - DIV_ROUND_UP(N_WORDS, 2))) << ofs64); \
|
|
|
|
|
if ((N_WORDS) & 1) { \
|
|
|
|
|
*((uint32_t *)MF.data - 1) = 0; \
|
|
|
|
|
} \
|
|
|
|
|
}
|
datapath: Fix handling of 802.1Q and SNAP headers.
The kernel and user datapaths have code that assumes that 802.1Q headers
are used only inside Ethernet II frames, not inside SNAP-encapsulated
frames. But the kernel and user flow_extract() implementations would
interpret 802.1Q headers inside SNAP headers as being valid VLANs. This
would cause packet corruption if any VLAN-related actions were to be taken,
so change the two flow_extract() implementations only to accept 802.1Q as
an Ethernet II frame type, not as a SNAP-encoded frame type.
802.1Q-2005 says that this is correct anyhow:
Where the ISS instance used to transmit and receive tagged frames is
provided by a media access control method that can support Ethernet
Type encoding directly (e.g., is an IEEE 802.3 or IEEE 802.11 MAC) or
is media access method independent (e.g., 6.6), the TPID is Ethernet
Type encoded, i.e., is two octets in length and comprises solely the
assigned Ethernet Type value.
Where the ISS instance is provided by a media access method that
cannot directly support Ethernet Type encoding (e.g., is an IEEE
802.5 or FDDI MAC), the TPID is encoded according to the rule for
a Subnetwork Access Protocol (Clause 10 of IEEE Std 802) that
encapsulates Ethernet frames over LLC, and comprises the SNAP
header (AA-AA-03) followed by the SNAP PID (00-00-00) followed by
the two octets of the assigned Ethernet Type value.
All of the media that OVS handles supports Ethernet Type fields, so to me
that means that we don't have to handle 802.1Q-inside-SNAP.
On the other hand, we *do* have to handle SNAP-inside-802.1Q, because this
is actually allowed by the standards. So this commit also adds that
support.
I verified that, with this change, both SNAP and Ethernet packets are
properly recognized both with and without 802.1Q encapsulation.
I was a bit surprised to find out that Linux does not accept
SNAP-encapsulated IP frames on Ethernet.
Here's a summary of how frames are handled before and after this commit:
Common cases
------------
Ethernet
+------------+
1. |dst|src|TYPE|
+------------+
Ethernet LLC SNAP
+------------+ +--------+ +-----------+
2. |dst|src| len| |aa|aa|03| |000000|TYPE|
+------------+ +--------+ +-----------+
Ethernet 802.1Q
+------------+ +---------+
3. |dst|src|8100| |VLAN|TYPE|
+------------+ +---------+
Ethernet 802.1Q LLC SNAP
+------------+ +---------+ +--------+ +-----------+
4. |dst|src|8100| |VLAN| LEN| |aa|aa|03| |000000|TYPE|
+------------+ +---------+ +--------+ +-----------+
Unusual cases
-------------
Ethernet LLC SNAP 802.1Q
+------------+ +--------+ +-----------+ +---------+
5. |dst|src| len| |aa|aa|03| |000000|8100| |VLAN|TYPE|
+------------+ +--------+ +-----------+ +---------+
Ethernet LLC
+------------+ +--------+
6. |dst|src| len| |xx|xx|xx|
+------------+ +--------+
Ethernet LLC SNAP
+------------+ +--------+ +-----------+
7. |dst|src| len| |aa|aa|03| |xxxxxx|xxxx|
+------------+ +--------+ +-----------+
Ethernet 802.1Q LLC
+------------+ +---------+ +--------+
8. |dst|src|8100| |VLAN| LEN| |xx|xx|xx|
+------------+ +---------+ +--------+
Ethernet 802.1Q LLC SNAP
+------------+ +---------+ +--------+ +-----------+
9. |dst|src|8100| |VLAN| LEN| |aa|aa|03| |xxxxxx|xxxx|
+------------+ +---------+ +--------+ +-----------+
Behavior
--------
--------------- --------------- -------------------------------------
Before After
this commit this commit
dl_type dl_vlan dl_type dl_vlan Notes
------- ------- ------- ------- -------------------------------------
1. TYPE ffff TYPE ffff no change
2. TYPE ffff TYPE ffff no change
3. TYPE VLAN TYPE VLAN no change
4. LEN VLAN TYPE VLAN proposal fixes behavior
5. TYPE VLAN 8100 ffff 802.1Q says this is invalid framing
6. 05ff ffff 05ff ffff no change
7. 05ff ffff 05ff ffff no change
8. LEN VLAN 05ff VLAN proposal fixes behavior
9. LEN VLAN 05ff VLAN proposal fixes behavior
Signed-off-by: Ben Pfaff <blp@nicira.com>
2010-08-10 11:35:46 -07:00
|
|
|
|
|
2015-01-06 11:10:42 -08:00
|
|
|
|
/* Data at 'valuep' may be unaligned. */
|
|
|
|
|
/* MACs start 64-aligned, and must be followed by other data or padding. */
|
|
|
|
|
#define miniflow_push_macs_(MF, OFS, VALUEP) \
|
|
|
|
|
{ \
|
|
|
|
|
int ofs64 = (OFS) / 8; \
|
|
|
|
|
\
|
|
|
|
|
MINIFLOW_ASSERT(MF.data + 2 <= MF.end && (OFS) % 8 == 0 \
|
|
|
|
|
&& !(MF.map & (UINT64_MAX << ofs64))); \
|
|
|
|
|
\
|
|
|
|
|
memcpy(MF.data, (VALUEP), 2 * ETH_ADDR_LEN); \
|
|
|
|
|
MF.data += 1; /* First word only. */ \
|
|
|
|
|
MF.map |= UINT64_C(3) << ofs64; /* Both words. */ \
|
|
|
|
|
}
|
datapath: Fix handling of 802.1Q and SNAP headers.
The kernel and user datapaths have code that assumes that 802.1Q headers
are used only inside Ethernet II frames, not inside SNAP-encapsulated
frames. But the kernel and user flow_extract() implementations would
interpret 802.1Q headers inside SNAP headers as being valid VLANs. This
would cause packet corruption if any VLAN-related actions were to be taken,
so change the two flow_extract() implementations only to accept 802.1Q as
an Ethernet II frame type, not as a SNAP-encoded frame type.
802.1Q-2005 says that this is correct anyhow:
Where the ISS instance used to transmit and receive tagged frames is
provided by a media access control method that can support Ethernet
Type encoding directly (e.g., is an IEEE 802.3 or IEEE 802.11 MAC) or
is media access method independent (e.g., 6.6), the TPID is Ethernet
Type encoded, i.e., is two octets in length and comprises solely the
assigned Ethernet Type value.
Where the ISS instance is provided by a media access method that
cannot directly support Ethernet Type encoding (e.g., is an IEEE
802.5 or FDDI MAC), the TPID is encoded according to the rule for
a Subnetwork Access Protocol (Clause 10 of IEEE Std 802) that
encapsulates Ethernet frames over LLC, and comprises the SNAP
header (AA-AA-03) followed by the SNAP PID (00-00-00) followed by
the two octets of the assigned Ethernet Type value.
All of the media that OVS handles supports Ethernet Type fields, so to me
that means that we don't have to handle 802.1Q-inside-SNAP.
On the other hand, we *do* have to handle SNAP-inside-802.1Q, because this
is actually allowed by the standards. So this commit also adds that
support.
I verified that, with this change, both SNAP and Ethernet packets are
properly recognized both with and without 802.1Q encapsulation.
I was a bit surprised to find out that Linux does not accept
SNAP-encapsulated IP frames on Ethernet.
Here's a summary of how frames are handled before and after this commit:
Common cases
------------
Ethernet
+------------+
1. |dst|src|TYPE|
+------------+
Ethernet LLC SNAP
+------------+ +--------+ +-----------+
2. |dst|src| len| |aa|aa|03| |000000|TYPE|
+------------+ +--------+ +-----------+
Ethernet 802.1Q
+------------+ +---------+
3. |dst|src|8100| |VLAN|TYPE|
+------------+ +---------+
Ethernet 802.1Q LLC SNAP
+------------+ +---------+ +--------+ +-----------+
4. |dst|src|8100| |VLAN| LEN| |aa|aa|03| |000000|TYPE|
+------------+ +---------+ +--------+ +-----------+
Unusual cases
-------------
Ethernet LLC SNAP 802.1Q
+------------+ +--------+ +-----------+ +---------+
5. |dst|src| len| |aa|aa|03| |000000|8100| |VLAN|TYPE|
+------------+ +--------+ +-----------+ +---------+
Ethernet LLC
+------------+ +--------+
6. |dst|src| len| |xx|xx|xx|
+------------+ +--------+
Ethernet LLC SNAP
+------------+ +--------+ +-----------+
7. |dst|src| len| |aa|aa|03| |xxxxxx|xxxx|
+------------+ +--------+ +-----------+
Ethernet 802.1Q LLC
+------------+ +---------+ +--------+
8. |dst|src|8100| |VLAN| LEN| |xx|xx|xx|
+------------+ +---------+ +--------+
Ethernet 802.1Q LLC SNAP
+------------+ +---------+ +--------+ +-----------+
9. |dst|src|8100| |VLAN| LEN| |aa|aa|03| |xxxxxx|xxxx|
+------------+ +---------+ +--------+ +-----------+
Behavior
--------
--------------- --------------- -------------------------------------
Before After
this commit this commit
dl_type dl_vlan dl_type dl_vlan Notes
------- ------- ------- ------- -------------------------------------
1. TYPE ffff TYPE ffff no change
2. TYPE ffff TYPE ffff no change
3. TYPE VLAN TYPE VLAN no change
4. LEN VLAN TYPE VLAN proposal fixes behavior
5. TYPE VLAN 8100 ffff 802.1Q says this is invalid framing
6. 05ff ffff 05ff ffff no change
7. 05ff ffff 05ff ffff no change
8. LEN VLAN 05ff VLAN proposal fixes behavior
9. LEN VLAN 05ff VLAN proposal fixes behavior
Signed-off-by: Ben Pfaff <blp@nicira.com>
2010-08-10 11:35:46 -07:00
|
|
|
|
|
2015-01-06 11:10:42 -08:00
|
|
|
|
#define miniflow_push_uint32(MF, FIELD, VALUE) \
|
|
|
|
|
miniflow_push_uint32_(MF, offsetof(struct flow, FIELD), VALUE)
|
datapath: Fix handling of 802.1Q and SNAP headers.
The kernel and user datapaths have code that assumes that 802.1Q headers
are used only inside Ethernet II frames, not inside SNAP-encapsulated
frames. But the kernel and user flow_extract() implementations would
interpret 802.1Q headers inside SNAP headers as being valid VLANs. This
would cause packet corruption if any VLAN-related actions were to be taken,
so change the two flow_extract() implementations only to accept 802.1Q as
an Ethernet II frame type, not as a SNAP-encoded frame type.
802.1Q-2005 says that this is correct anyhow:
Where the ISS instance used to transmit and receive tagged frames is
provided by a media access control method that can support Ethernet
Type encoding directly (e.g., is an IEEE 802.3 or IEEE 802.11 MAC) or
is media access method independent (e.g., 6.6), the TPID is Ethernet
Type encoded, i.e., is two octets in length and comprises solely the
assigned Ethernet Type value.
Where the ISS instance is provided by a media access method that
cannot directly support Ethernet Type encoding (e.g., is an IEEE
802.5 or FDDI MAC), the TPID is encoded according to the rule for
a Subnetwork Access Protocol (Clause 10 of IEEE Std 802) that
encapsulates Ethernet frames over LLC, and comprises the SNAP
header (AA-AA-03) followed by the SNAP PID (00-00-00) followed by
the two octets of the assigned Ethernet Type value.
All of the media that OVS handles supports Ethernet Type fields, so to me
that means that we don't have to handle 802.1Q-inside-SNAP.
On the other hand, we *do* have to handle SNAP-inside-802.1Q, because this
is actually allowed by the standards. So this commit also adds that
support.
I verified that, with this change, both SNAP and Ethernet packets are
properly recognized both with and without 802.1Q encapsulation.
I was a bit surprised to find out that Linux does not accept
SNAP-encapsulated IP frames on Ethernet.
Here's a summary of how frames are handled before and after this commit:
Common cases
------------
Ethernet
+------------+
1. |dst|src|TYPE|
+------------+
Ethernet LLC SNAP
+------------+ +--------+ +-----------+
2. |dst|src| len| |aa|aa|03| |000000|TYPE|
+------------+ +--------+ +-----------+
Ethernet 802.1Q
+------------+ +---------+
3. |dst|src|8100| |VLAN|TYPE|
+------------+ +---------+
Ethernet 802.1Q LLC SNAP
+------------+ +---------+ +--------+ +-----------+
4. |dst|src|8100| |VLAN| LEN| |aa|aa|03| |000000|TYPE|
+------------+ +---------+ +--------+ +-----------+
Unusual cases
-------------
Ethernet LLC SNAP 802.1Q
+------------+ +--------+ +-----------+ +---------+
5. |dst|src| len| |aa|aa|03| |000000|8100| |VLAN|TYPE|
+------------+ +--------+ +-----------+ +---------+
Ethernet LLC
+------------+ +--------+
6. |dst|src| len| |xx|xx|xx|
+------------+ +--------+
Ethernet LLC SNAP
+------------+ +--------+ +-----------+
7. |dst|src| len| |aa|aa|03| |xxxxxx|xxxx|
+------------+ +--------+ +-----------+
Ethernet 802.1Q LLC
+------------+ +---------+ +--------+
8. |dst|src|8100| |VLAN| LEN| |xx|xx|xx|
+------------+ +---------+ +--------+
Ethernet 802.1Q LLC SNAP
+------------+ +---------+ +--------+ +-----------+
9. |dst|src|8100| |VLAN| LEN| |aa|aa|03| |xxxxxx|xxxx|
+------------+ +---------+ +--------+ +-----------+
Behavior
--------
--------------- --------------- -------------------------------------
Before After
this commit this commit
dl_type dl_vlan dl_type dl_vlan Notes
------- ------- ------- ------- -------------------------------------
1. TYPE ffff TYPE ffff no change
2. TYPE ffff TYPE ffff no change
3. TYPE VLAN TYPE VLAN no change
4. LEN VLAN TYPE VLAN proposal fixes behavior
5. TYPE VLAN 8100 ffff 802.1Q says this is invalid framing
6. 05ff ffff 05ff ffff no change
7. 05ff ffff 05ff ffff no change
8. LEN VLAN 05ff VLAN proposal fixes behavior
9. LEN VLAN 05ff VLAN proposal fixes behavior
Signed-off-by: Ben Pfaff <blp@nicira.com>
2010-08-10 11:35:46 -07:00
|
|
|
|
|
2015-01-06 11:10:42 -08:00
|
|
|
|
#define miniflow_push_be32(MF, FIELD, VALUE) \
|
|
|
|
|
miniflow_push_be32_(MF, offsetof(struct flow, FIELD), VALUE)
|
datapath: Fix handling of 802.1Q and SNAP headers.
The kernel and user datapaths have code that assumes that 802.1Q headers
are used only inside Ethernet II frames, not inside SNAP-encapsulated
frames. But the kernel and user flow_extract() implementations would
interpret 802.1Q headers inside SNAP headers as being valid VLANs. This
would cause packet corruption if any VLAN-related actions were to be taken,
so change the two flow_extract() implementations only to accept 802.1Q as
an Ethernet II frame type, not as a SNAP-encoded frame type.
802.1Q-2005 says that this is correct anyhow:
Where the ISS instance used to transmit and receive tagged frames is
provided by a media access control method that can support Ethernet
Type encoding directly (e.g., is an IEEE 802.3 or IEEE 802.11 MAC) or
is media access method independent (e.g., 6.6), the TPID is Ethernet
Type encoded, i.e., is two octets in length and comprises solely the
assigned Ethernet Type value.
Where the ISS instance is provided by a media access method that
cannot directly support Ethernet Type encoding (e.g., is an IEEE
802.5 or FDDI MAC), the TPID is encoded according to the rule for
a Subnetwork Access Protocol (Clause 10 of IEEE Std 802) that
encapsulates Ethernet frames over LLC, and comprises the SNAP
header (AA-AA-03) followed by the SNAP PID (00-00-00) followed by
the two octets of the assigned Ethernet Type value.
All of the media that OVS handles supports Ethernet Type fields, so to me
that means that we don't have to handle 802.1Q-inside-SNAP.
On the other hand, we *do* have to handle SNAP-inside-802.1Q, because this
is actually allowed by the standards. So this commit also adds that
support.
I verified that, with this change, both SNAP and Ethernet packets are
properly recognized both with and without 802.1Q encapsulation.
I was a bit surprised to find out that Linux does not accept
SNAP-encapsulated IP frames on Ethernet.
Here's a summary of how frames are handled before and after this commit:
Common cases
------------
Ethernet
+------------+
1. |dst|src|TYPE|
+------------+
Ethernet LLC SNAP
+------------+ +--------+ +-----------+
2. |dst|src| len| |aa|aa|03| |000000|TYPE|
+------------+ +--------+ +-----------+
Ethernet 802.1Q
+------------+ +---------+
3. |dst|src|8100| |VLAN|TYPE|
+------------+ +---------+
Ethernet 802.1Q LLC SNAP
+------------+ +---------+ +--------+ +-----------+
4. |dst|src|8100| |VLAN| LEN| |aa|aa|03| |000000|TYPE|
+------------+ +---------+ +--------+ +-----------+
Unusual cases
-------------
Ethernet LLC SNAP 802.1Q
+------------+ +--------+ +-----------+ +---------+
5. |dst|src| len| |aa|aa|03| |000000|8100| |VLAN|TYPE|
+------------+ +--------+ +-----------+ +---------+
Ethernet LLC
+------------+ +--------+
6. |dst|src| len| |xx|xx|xx|
+------------+ +--------+
Ethernet LLC SNAP
+------------+ +--------+ +-----------+
7. |dst|src| len| |aa|aa|03| |xxxxxx|xxxx|
+------------+ +--------+ +-----------+
Ethernet 802.1Q LLC
+------------+ +---------+ +--------+
8. |dst|src|8100| |VLAN| LEN| |xx|xx|xx|
+------------+ +---------+ +--------+
Ethernet 802.1Q LLC SNAP
+------------+ +---------+ +--------+ +-----------+
9. |dst|src|8100| |VLAN| LEN| |aa|aa|03| |xxxxxx|xxxx|
+------------+ +---------+ +--------+ +-----------+
Behavior
--------
--------------- --------------- -------------------------------------
Before After
this commit this commit
dl_type dl_vlan dl_type dl_vlan Notes
------- ------- ------- ------- -------------------------------------
1. TYPE ffff TYPE ffff no change
2. TYPE ffff TYPE ffff no change
3. TYPE VLAN TYPE VLAN no change
4. LEN VLAN TYPE VLAN proposal fixes behavior
5. TYPE VLAN 8100 ffff 802.1Q says this is invalid framing
6. 05ff ffff 05ff ffff no change
7. 05ff ffff 05ff ffff no change
8. LEN VLAN 05ff VLAN proposal fixes behavior
9. LEN VLAN 05ff VLAN proposal fixes behavior
Signed-off-by: Ben Pfaff <blp@nicira.com>
2010-08-10 11:35:46 -07:00
|
|
|
|
|
2015-01-06 11:10:42 -08:00
|
|
|
|
#define miniflow_push_uint16(MF, FIELD, VALUE) \
|
2014-04-18 08:26:56 -07:00
|
|
|
|
miniflow_push_uint16_(MF, offsetof(struct flow, FIELD), VALUE)
|
2013-02-08 15:29:57 -08:00
|
|
|
|
|
2015-01-06 11:10:42 -08:00
|
|
|
|
#define miniflow_push_be16(MF, FIELD, VALUE) \
|
2014-04-18 08:26:56 -07:00
|
|
|
|
miniflow_push_be16_(MF, offsetof(struct flow, FIELD), VALUE)
|
2013-02-08 15:29:57 -08:00
|
|
|
|
|
2015-01-06 11:10:42 -08:00
|
|
|
|
#define miniflow_pad_to_64(MF, FIELD) \
|
|
|
|
|
miniflow_pad_to_64_(MF, offsetof(struct flow, FIELD))
|
|
|
|
|
|
2014-04-18 08:26:56 -07:00
|
|
|
|
#define miniflow_push_words(MF, FIELD, VALUEP, N_WORDS) \
|
|
|
|
|
miniflow_push_words_(MF, offsetof(struct flow, FIELD), VALUEP, N_WORDS)
|
2009-07-08 13:19:16 -07:00
|
|
|
|
|
2015-01-06 11:10:42 -08:00
|
|
|
|
#define miniflow_push_words_32(MF, FIELD, VALUEP, N_WORDS) \
|
|
|
|
|
miniflow_push_words_32_(MF, offsetof(struct flow, FIELD), VALUEP, N_WORDS)
|
|
|
|
|
|
|
|
|
|
#define miniflow_push_macs(MF, FIELD, VALUEP) \
|
|
|
|
|
miniflow_push_macs_(MF, offsetof(struct flow, FIELD), VALUEP)
|
|
|
|
|
|
2014-04-18 08:26:56 -07:00
|
|
|
|
/* Pulls the MPLS headers at '*datap' and returns the count of them. */
|
|
|
|
|
static inline int
|
|
|
|
|
parse_mpls(void **datap, size_t *sizep)
|
2010-12-29 19:03:46 -08:00
|
|
|
|
{
|
2014-04-18 08:26:56 -07:00
|
|
|
|
const struct mpls_hdr *mh;
|
|
|
|
|
int count = 0;
|
2010-12-29 19:03:46 -08:00
|
|
|
|
|
2014-04-18 08:26:56 -07:00
|
|
|
|
while ((mh = data_try_pull(datap, sizep, sizeof *mh))) {
|
|
|
|
|
count++;
|
|
|
|
|
if (mh->mpls_lse.lo & htons(1 << MPLS_BOS_SHIFT)) {
|
2010-12-29 19:03:46 -08:00
|
|
|
|
break;
|
|
|
|
|
}
|
2014-04-18 08:26:56 -07:00
|
|
|
|
}
|
2014-08-01 14:47:44 -07:00
|
|
|
|
return MIN(count, FLOW_MAX_MPLS_LABELS);
|
2014-04-18 08:26:56 -07:00
|
|
|
|
}
|
2010-12-29 19:03:46 -08:00
|
|
|
|
|
2014-04-18 08:26:56 -07:00
|
|
|
|
static inline ovs_be16
|
|
|
|
|
parse_vlan(void **datap, size_t *sizep)
|
|
|
|
|
{
|
|
|
|
|
const struct eth_header *eth = *datap;
|
2010-12-29 19:03:46 -08:00
|
|
|
|
|
2014-04-18 08:26:56 -07:00
|
|
|
|
struct qtag_prefix {
|
|
|
|
|
ovs_be16 eth_type; /* ETH_TYPE_VLAN */
|
|
|
|
|
ovs_be16 tci;
|
|
|
|
|
};
|
2010-12-29 19:03:46 -08:00
|
|
|
|
|
2014-04-18 08:26:56 -07:00
|
|
|
|
data_pull(datap, sizep, ETH_ADDR_LEN * 2);
|
2010-12-29 19:03:46 -08:00
|
|
|
|
|
2014-04-18 08:26:56 -07:00
|
|
|
|
if (eth->eth_type == htons(ETH_TYPE_VLAN)) {
|
|
|
|
|
if (OVS_LIKELY(*sizep
|
|
|
|
|
>= sizeof(struct qtag_prefix) + sizeof(ovs_be16))) {
|
|
|
|
|
const struct qtag_prefix *qp = data_pull(datap, sizep, sizeof *qp);
|
|
|
|
|
return qp->tci | htons(VLAN_CFI);
|
2010-12-29 19:03:46 -08:00
|
|
|
|
}
|
|
|
|
|
}
|
2011-03-02 15:12:48 -08:00
|
|
|
|
return 0;
|
2010-12-29 19:03:46 -08:00
|
|
|
|
}
|
|
|
|
|
|
2014-04-18 08:26:56 -07:00
|
|
|
|
static inline ovs_be16
|
|
|
|
|
parse_ethertype(void **datap, size_t *sizep)
|
2011-03-02 15:12:48 -08:00
|
|
|
|
{
|
2014-04-18 08:26:56 -07:00
|
|
|
|
const struct llc_snap_header *llc;
|
|
|
|
|
ovs_be16 proto;
|
2014-03-25 15:26:23 -07:00
|
|
|
|
|
2014-04-18 08:26:56 -07:00
|
|
|
|
proto = *(ovs_be16 *) data_pull(datap, sizep, sizeof proto);
|
|
|
|
|
if (OVS_LIKELY(ntohs(proto) >= ETH_TYPE_MIN)) {
|
|
|
|
|
return proto;
|
2011-03-02 15:12:48 -08:00
|
|
|
|
}
|
2014-03-25 15:26:23 -07:00
|
|
|
|
|
2014-04-18 08:26:56 -07:00
|
|
|
|
if (OVS_UNLIKELY(*sizep < sizeof *llc)) {
|
|
|
|
|
return htons(FLOW_DL_TYPE_NONE);
|
2011-03-02 15:12:48 -08:00
|
|
|
|
}
|
2014-03-25 15:26:23 -07:00
|
|
|
|
|
2014-04-18 08:26:56 -07:00
|
|
|
|
llc = *datap;
|
|
|
|
|
if (OVS_UNLIKELY(llc->llc.llc_dsap != LLC_DSAP_SNAP
|
|
|
|
|
|| llc->llc.llc_ssap != LLC_SSAP_SNAP
|
|
|
|
|
|| llc->llc.llc_cntl != LLC_CNTL_SNAP
|
|
|
|
|
|| memcmp(llc->snap.snap_org, SNAP_ORG_ETHERNET,
|
|
|
|
|
sizeof llc->snap.snap_org))) {
|
|
|
|
|
return htons(FLOW_DL_TYPE_NONE);
|
2013-08-22 20:24:44 +12:00
|
|
|
|
}
|
|
|
|
|
|
2014-04-18 08:26:56 -07:00
|
|
|
|
data_pull(datap, sizep, sizeof *llc);
|
2011-02-01 22:54:11 -08:00
|
|
|
|
|
2014-04-18 08:26:56 -07:00
|
|
|
|
if (OVS_LIKELY(ntohs(llc->snap.snap_type) >= ETH_TYPE_MIN)) {
|
|
|
|
|
return llc->snap.snap_type;
|
2011-02-01 22:54:11 -08:00
|
|
|
|
}
|
|
|
|
|
|
2014-04-18 08:26:56 -07:00
|
|
|
|
return htons(FLOW_DL_TYPE_NONE);
|
|
|
|
|
}
|
2011-02-01 22:54:11 -08:00
|
|
|
|
|
2014-04-18 08:26:56 -07:00
|
|
|
|
static inline bool
|
|
|
|
|
parse_icmpv6(void **datap, size_t *sizep, const struct icmp6_hdr *icmp,
|
|
|
|
|
const struct in6_addr **nd_target,
|
|
|
|
|
uint8_t arp_buf[2][ETH_ADDR_LEN])
|
|
|
|
|
{
|
2011-03-02 15:12:48 -08:00
|
|
|
|
if (icmp->icmp6_code == 0 &&
|
|
|
|
|
(icmp->icmp6_type == ND_NEIGHBOR_SOLICIT ||
|
|
|
|
|
icmp->icmp6_type == ND_NEIGHBOR_ADVERT)) {
|
2011-02-01 22:54:11 -08:00
|
|
|
|
|
2014-06-02 14:02:19 -07:00
|
|
|
|
*nd_target = data_try_pull(datap, sizep, sizeof **nd_target);
|
2014-04-18 08:26:56 -07:00
|
|
|
|
if (OVS_UNLIKELY(!*nd_target)) {
|
|
|
|
|
return false;
|
2011-02-01 22:54:11 -08:00
|
|
|
|
}
|
|
|
|
|
|
2014-04-18 08:26:56 -07:00
|
|
|
|
while (*sizep >= 8) {
|
2011-02-01 22:54:11 -08:00
|
|
|
|
/* The minimum size of an option is 8 bytes, which also is
|
|
|
|
|
* the size of Ethernet link-layer options. */
|
2014-04-18 08:26:56 -07:00
|
|
|
|
const struct nd_opt_hdr *nd_opt = *datap;
|
2011-03-02 15:12:48 -08:00
|
|
|
|
int opt_len = nd_opt->nd_opt_len * 8;
|
|
|
|
|
|
2014-04-18 08:26:56 -07:00
|
|
|
|
if (!opt_len || opt_len > *sizep) {
|
2011-02-01 22:54:11 -08:00
|
|
|
|
goto invalid;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Store the link layer address if the appropriate option is
|
|
|
|
|
* provided. It is considered an error if the same link
|
|
|
|
|
* layer option is specified twice. */
|
|
|
|
|
if (nd_opt->nd_opt_type == ND_OPT_SOURCE_LINKADDR
|
|
|
|
|
&& opt_len == 8) {
|
2014-04-18 08:26:56 -07:00
|
|
|
|
if (OVS_LIKELY(eth_addr_is_zero(arp_buf[0]))) {
|
|
|
|
|
memcpy(arp_buf[0], nd_opt + 1, ETH_ADDR_LEN);
|
2011-02-01 22:54:11 -08:00
|
|
|
|
} else {
|
|
|
|
|
goto invalid;
|
|
|
|
|
}
|
|
|
|
|
} else if (nd_opt->nd_opt_type == ND_OPT_TARGET_LINKADDR
|
|
|
|
|
&& opt_len == 8) {
|
2014-04-18 08:26:56 -07:00
|
|
|
|
if (OVS_LIKELY(eth_addr_is_zero(arp_buf[1]))) {
|
|
|
|
|
memcpy(arp_buf[1], nd_opt + 1, ETH_ADDR_LEN);
|
2011-02-01 22:54:11 -08:00
|
|
|
|
} else {
|
|
|
|
|
goto invalid;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2014-04-18 08:26:56 -07:00
|
|
|
|
if (OVS_UNLIKELY(!data_try_pull(datap, sizep, opt_len))) {
|
2011-02-01 22:54:11 -08:00
|
|
|
|
goto invalid;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2014-04-18 08:26:56 -07:00
|
|
|
|
return true;
|
2011-02-01 22:54:11 -08:00
|
|
|
|
|
|
|
|
|
invalid:
|
2014-04-18 08:26:56 -07:00
|
|
|
|
return false;
|
2011-02-01 22:54:11 -08:00
|
|
|
|
}
|
|
|
|
|
|
2014-02-26 18:08:04 -08:00
|
|
|
|
/* Initializes 'flow' members from 'packet' and 'md'
|
2011-12-21 15:52:23 -08:00
|
|
|
|
*
|
lib/ofpbuf: Compact
This patch shrinks the struct ofpbuf from 104 to 48 bytes on 64-bit
systems, or from 52 to 36 bytes on 32-bit systems (counting in the
'l7' removal from an earlier patch). This may help contribute to
cache efficiency, and will speed up initializing, copying and
manipulating ofpbufs. This is potentially important for the DPDK
datapath, but the rest of the code base may also see a little benefit.
Changes are:
- Remove 'l7' pointer (previous patch).
- Use offsets instead of layer pointers for l2_5, l3, and l4 using
'l2' as basis. Usually 'data' is the same as 'l2', but this is not
always the case (e.g., when parsing or constructing a packet), so it
can not be easily used as the offset basis. Also, packet parsing is
faster if we do not need to maintain the offsets each time we pull
data from the ofpbuf.
- Use uint32_t for 'allocated' and 'size', as 2^32 is enough even for
largest possible messages/packets.
- Use packed enum for 'source'.
- Rearrange to avoid unnecessary padding.
- Remove 'private_p', which was used only in two cases, both of which
had the invariant ('l2' == 'data'), so we can temporarily use 'l2'
as a private pointer.
Signed-off-by: Jarno Rajahalme <jrajahalme@nicira.com>
Signed-off-by: Ben Pfaff <blp@nicira.com>
2014-03-24 09:17:01 -07:00
|
|
|
|
* Initializes 'packet' header l2 pointer to the start of the Ethernet
|
|
|
|
|
* header, and the layer offsets as follows:
|
2010-08-13 10:46:12 -07:00
|
|
|
|
*
|
lib/ofpbuf: Compact
This patch shrinks the struct ofpbuf from 104 to 48 bytes on 64-bit
systems, or from 52 to 36 bytes on 32-bit systems (counting in the
'l7' removal from an earlier patch). This may help contribute to
cache efficiency, and will speed up initializing, copying and
manipulating ofpbufs. This is potentially important for the DPDK
datapath, but the rest of the code base may also see a little benefit.
Changes are:
- Remove 'l7' pointer (previous patch).
- Use offsets instead of layer pointers for l2_5, l3, and l4 using
'l2' as basis. Usually 'data' is the same as 'l2', but this is not
always the case (e.g., when parsing or constructing a packet), so it
can not be easily used as the offset basis. Also, packet parsing is
faster if we do not need to maintain the offsets each time we pull
data from the ofpbuf.
- Use uint32_t for 'allocated' and 'size', as 2^32 is enough even for
largest possible messages/packets.
- Use packed enum for 'source'.
- Rearrange to avoid unnecessary padding.
- Remove 'private_p', which was used only in two cases, both of which
had the invariant ('l2' == 'data'), so we can temporarily use 'l2'
as a private pointer.
Signed-off-by: Jarno Rajahalme <jrajahalme@nicira.com>
Signed-off-by: Ben Pfaff <blp@nicira.com>
2014-03-24 09:17:01 -07:00
|
|
|
|
* - packet->l2_5_ofs to the start of the MPLS shim header, or UINT16_MAX
|
|
|
|
|
* when there is no MPLS shim header.
|
2010-08-13 10:46:12 -07:00
|
|
|
|
*
|
lib/ofpbuf: Compact
This patch shrinks the struct ofpbuf from 104 to 48 bytes on 64-bit
systems, or from 52 to 36 bytes on 32-bit systems (counting in the
'l7' removal from an earlier patch). This may help contribute to
cache efficiency, and will speed up initializing, copying and
manipulating ofpbufs. This is potentially important for the DPDK
datapath, but the rest of the code base may also see a little benefit.
Changes are:
- Remove 'l7' pointer (previous patch).
- Use offsets instead of layer pointers for l2_5, l3, and l4 using
'l2' as basis. Usually 'data' is the same as 'l2', but this is not
always the case (e.g., when parsing or constructing a packet), so it
can not be easily used as the offset basis. Also, packet parsing is
faster if we do not need to maintain the offsets each time we pull
data from the ofpbuf.
- Use uint32_t for 'allocated' and 'size', as 2^32 is enough even for
largest possible messages/packets.
- Use packed enum for 'source'.
- Rearrange to avoid unnecessary padding.
- Remove 'private_p', which was used only in two cases, both of which
had the invariant ('l2' == 'data'), so we can temporarily use 'l2'
as a private pointer.
Signed-off-by: Jarno Rajahalme <jrajahalme@nicira.com>
Signed-off-by: Ben Pfaff <blp@nicira.com>
2014-03-24 09:17:01 -07:00
|
|
|
|
* - packet->l3_ofs to just past the Ethernet header, or just past the
|
2010-08-13 10:46:12 -07:00
|
|
|
|
* vlan_header if one is present, to the first byte of the payload of the
|
lib/ofpbuf: Compact
This patch shrinks the struct ofpbuf from 104 to 48 bytes on 64-bit
systems, or from 52 to 36 bytes on 32-bit systems (counting in the
'l7' removal from an earlier patch). This may help contribute to
cache efficiency, and will speed up initializing, copying and
manipulating ofpbufs. This is potentially important for the DPDK
datapath, but the rest of the code base may also see a little benefit.
Changes are:
- Remove 'l7' pointer (previous patch).
- Use offsets instead of layer pointers for l2_5, l3, and l4 using
'l2' as basis. Usually 'data' is the same as 'l2', but this is not
always the case (e.g., when parsing or constructing a packet), so it
can not be easily used as the offset basis. Also, packet parsing is
faster if we do not need to maintain the offsets each time we pull
data from the ofpbuf.
- Use uint32_t for 'allocated' and 'size', as 2^32 is enough even for
largest possible messages/packets.
- Use packed enum for 'source'.
- Rearrange to avoid unnecessary padding.
- Remove 'private_p', which was used only in two cases, both of which
had the invariant ('l2' == 'data'), so we can temporarily use 'l2'
as a private pointer.
Signed-off-by: Jarno Rajahalme <jrajahalme@nicira.com>
Signed-off-by: Ben Pfaff <blp@nicira.com>
2014-03-24 09:17:01 -07:00
|
|
|
|
* Ethernet frame. UINT16_MAX if the frame is too short to contain an
|
|
|
|
|
* Ethernet header.
|
2010-08-13 10:46:12 -07:00
|
|
|
|
*
|
lib/ofpbuf: Compact
This patch shrinks the struct ofpbuf from 104 to 48 bytes on 64-bit
systems, or from 52 to 36 bytes on 32-bit systems (counting in the
'l7' removal from an earlier patch). This may help contribute to
cache efficiency, and will speed up initializing, copying and
manipulating ofpbufs. This is potentially important for the DPDK
datapath, but the rest of the code base may also see a little benefit.
Changes are:
- Remove 'l7' pointer (previous patch).
- Use offsets instead of layer pointers for l2_5, l3, and l4 using
'l2' as basis. Usually 'data' is the same as 'l2', but this is not
always the case (e.g., when parsing or constructing a packet), so it
can not be easily used as the offset basis. Also, packet parsing is
faster if we do not need to maintain the offsets each time we pull
data from the ofpbuf.
- Use uint32_t for 'allocated' and 'size', as 2^32 is enough even for
largest possible messages/packets.
- Use packed enum for 'source'.
- Rearrange to avoid unnecessary padding.
- Remove 'private_p', which was used only in two cases, both of which
had the invariant ('l2' == 'data'), so we can temporarily use 'l2'
as a private pointer.
Signed-off-by: Jarno Rajahalme <jrajahalme@nicira.com>
Signed-off-by: Ben Pfaff <blp@nicira.com>
2014-03-24 09:17:01 -07:00
|
|
|
|
* - packet->l4_ofs to just past the IPv4 header, if one is present and
|
|
|
|
|
* has at least the content used for the fields of interest for the flow,
|
|
|
|
|
* otherwise UINT16_MAX.
|
2010-08-13 10:46:12 -07:00
|
|
|
|
*/
|
Implement new fragment handling policy.
Until now, OVS has handled IP fragments more awkwardly than necessary. It
has not been possible to match on L4 headers, even in fragments with offset
0 where they are actually present. This means that there was no way to
implement ACLs that treat, say, different TCP ports differently, on
fragmented traffic; instead, all decisions for fragment forwarding had to
be made on the basis of L2 and L3 headers alone.
This commit improves the situation significantly. It is still not possible
to match on L4 headers in fragments with nonzero offset, because that
information is simply not present in such fragments, but this commit adds
the ability to match on L4 headers for fragments with zero offset. This
means that it becomes possible to implement ACLs that drop such "first
fragments" on the basis of L4 headers. In practice, that effectively
blocks even fragmented traffic on an L4 basis, because the receiving IP
stack cannot reassemble a full packet when the first fragment is missing.
This commit works by adding a new "fragment type" to the kernel flow match
and making it available through OpenFlow as a new NXM field named
NXM_NX_IP_FRAG. Because OpenFlow 1.0 explicitly says that the L4 fields
are always 0 for IP fragments, it adds a new OpenFlow fragment handling
mode that fills in the L4 fields for "first fragments". It also enhances
ovs-ofctl to allow users to configure this new fragment handling mode and
to parse the new field.
Signed-off-by: Ben Pfaff <blp@nicira.com>
Bug #7557.
2011-10-19 21:33:44 -07:00
|
|
|
|
void
|
2014-02-26 18:08:04 -08:00
|
|
|
|
flow_extract(struct ofpbuf *packet, const struct pkt_metadata *md,
|
2012-09-13 20:11:08 -07:00
|
|
|
|
struct flow *flow)
|
2009-07-08 13:19:16 -07:00
|
|
|
|
{
|
2014-04-29 15:50:39 -07:00
|
|
|
|
struct {
|
|
|
|
|
struct miniflow mf;
|
2015-01-06 11:10:42 -08:00
|
|
|
|
uint64_t buf[FLOW_U64S];
|
2014-04-29 15:50:39 -07:00
|
|
|
|
} m;
|
2009-07-08 13:19:16 -07:00
|
|
|
|
|
|
|
|
|
COVERAGE_INC(flow_extract);
|
|
|
|
|
|
2014-04-29 15:50:39 -07:00
|
|
|
|
miniflow_initialize(&m.mf, m.buf);
|
|
|
|
|
miniflow_extract(packet, md, &m.mf);
|
|
|
|
|
miniflow_expand(&m.mf, flow);
|
2014-04-18 08:26:56 -07:00
|
|
|
|
}
|
2012-09-13 20:11:08 -07:00
|
|
|
|
|
2014-04-29 15:50:39 -07:00
|
|
|
|
/* Caller is responsible for initializing 'dst' with enough storage for
|
2015-01-06 11:10:42 -08:00
|
|
|
|
* FLOW_U64S * 8 bytes. */
|
2014-04-18 08:26:56 -07:00
|
|
|
|
void
|
|
|
|
|
miniflow_extract(struct ofpbuf *packet, const struct pkt_metadata *md,
|
|
|
|
|
struct miniflow *dst)
|
|
|
|
|
{
|
|
|
|
|
void *data = ofpbuf_data(packet);
|
|
|
|
|
size_t size = ofpbuf_size(packet);
|
2015-01-06 11:10:42 -08:00
|
|
|
|
uint64_t *values = miniflow_values(dst);
|
|
|
|
|
struct mf_ctx mf = { 0, values, values + FLOW_U64S };
|
2014-04-18 08:26:56 -07:00
|
|
|
|
char *l2;
|
|
|
|
|
ovs_be16 dl_type;
|
|
|
|
|
uint8_t nw_frag, nw_tos, nw_ttl, nw_proto;
|
|
|
|
|
|
|
|
|
|
/* Metadata. */
|
2014-02-26 18:08:04 -08:00
|
|
|
|
if (md) {
|
2014-04-18 08:26:56 -07:00
|
|
|
|
if (md->tunnel.ip_dst) {
|
|
|
|
|
miniflow_push_words(mf, tunnel, &md->tunnel,
|
2015-01-06 11:10:42 -08:00
|
|
|
|
sizeof md->tunnel / sizeof(uint64_t));
|
|
|
|
|
}
|
|
|
|
|
if (md->skb_priority || md->pkt_mark) {
|
|
|
|
|
miniflow_push_uint32(mf, skb_priority, md->skb_priority);
|
|
|
|
|
miniflow_push_uint32(mf, pkt_mark, md->pkt_mark);
|
2014-04-18 08:26:56 -07:00
|
|
|
|
}
|
2015-01-06 11:10:42 -08:00
|
|
|
|
miniflow_push_uint32(mf, dp_hash, md->dp_hash);
|
2014-04-18 08:26:56 -07:00
|
|
|
|
miniflow_push_uint32(mf, in_port, odp_to_u32(md->in_port.odp_port));
|
2015-01-06 11:10:42 -08:00
|
|
|
|
if (md->recirc_id) {
|
|
|
|
|
miniflow_push_uint32(mf, recirc_id, md->recirc_id);
|
2015-01-11 13:25:24 -08:00
|
|
|
|
miniflow_pad_to_64(mf, conj_id);
|
2015-01-06 11:10:42 -08:00
|
|
|
|
}
|
2012-09-13 20:11:08 -07:00
|
|
|
|
}
|
2009-07-08 13:19:16 -07:00
|
|
|
|
|
2014-04-18 08:26:56 -07:00
|
|
|
|
/* Initialize packet's layer pointer and offsets. */
|
|
|
|
|
l2 = data;
|
|
|
|
|
ofpbuf_set_frame(packet, data);
|
2009-07-08 13:19:16 -07:00
|
|
|
|
|
2014-04-18 08:26:56 -07:00
|
|
|
|
/* Must have full Ethernet header to proceed. */
|
|
|
|
|
if (OVS_UNLIKELY(size < sizeof(struct eth_header))) {
|
|
|
|
|
goto out;
|
|
|
|
|
} else {
|
|
|
|
|
ovs_be16 vlan_tci;
|
datapath: Fix handling of 802.1Q and SNAP headers.
The kernel and user datapaths have code that assumes that 802.1Q headers
are used only inside Ethernet II frames, not inside SNAP-encapsulated
frames. But the kernel and user flow_extract() implementations would
interpret 802.1Q headers inside SNAP headers as being valid VLANs. This
would cause packet corruption if any VLAN-related actions were to be taken,
so change the two flow_extract() implementations only to accept 802.1Q as
an Ethernet II frame type, not as a SNAP-encoded frame type.
802.1Q-2005 says that this is correct anyhow:
Where the ISS instance used to transmit and receive tagged frames is
provided by a media access control method that can support Ethernet
Type encoding directly (e.g., is an IEEE 802.3 or IEEE 802.11 MAC) or
is media access method independent (e.g., 6.6), the TPID is Ethernet
Type encoded, i.e., is two octets in length and comprises solely the
assigned Ethernet Type value.
Where the ISS instance is provided by a media access method that
cannot directly support Ethernet Type encoding (e.g., is an IEEE
802.5 or FDDI MAC), the TPID is encoded according to the rule for
a Subnetwork Access Protocol (Clause 10 of IEEE Std 802) that
encapsulates Ethernet frames over LLC, and comprises the SNAP
header (AA-AA-03) followed by the SNAP PID (00-00-00) followed by
the two octets of the assigned Ethernet Type value.
All of the media that OVS handles supports Ethernet Type fields, so to me
that means that we don't have to handle 802.1Q-inside-SNAP.
On the other hand, we *do* have to handle SNAP-inside-802.1Q, because this
is actually allowed by the standards. So this commit also adds that
support.
I verified that, with this change, both SNAP and Ethernet packets are
properly recognized both with and without 802.1Q encapsulation.
I was a bit surprised to find out that Linux does not accept
SNAP-encapsulated IP frames on Ethernet.
Here's a summary of how frames are handled before and after this commit:
Common cases
------------
Ethernet
+------------+
1. |dst|src|TYPE|
+------------+
Ethernet LLC SNAP
+------------+ +--------+ +-----------+
2. |dst|src| len| |aa|aa|03| |000000|TYPE|
+------------+ +--------+ +-----------+
Ethernet 802.1Q
+------------+ +---------+
3. |dst|src|8100| |VLAN|TYPE|
+------------+ +---------+
Ethernet 802.1Q LLC SNAP
+------------+ +---------+ +--------+ +-----------+
4. |dst|src|8100| |VLAN| LEN| |aa|aa|03| |000000|TYPE|
+------------+ +---------+ +--------+ +-----------+
Unusual cases
-------------
Ethernet LLC SNAP 802.1Q
+------------+ +--------+ +-----------+ +---------+
5. |dst|src| len| |aa|aa|03| |000000|8100| |VLAN|TYPE|
+------------+ +--------+ +-----------+ +---------+
Ethernet LLC
+------------+ +--------+
6. |dst|src| len| |xx|xx|xx|
+------------+ +--------+
Ethernet LLC SNAP
+------------+ +--------+ +-----------+
7. |dst|src| len| |aa|aa|03| |xxxxxx|xxxx|
+------------+ +--------+ +-----------+
Ethernet 802.1Q LLC
+------------+ +---------+ +--------+
8. |dst|src|8100| |VLAN| LEN| |xx|xx|xx|
+------------+ +---------+ +--------+
Ethernet 802.1Q LLC SNAP
+------------+ +---------+ +--------+ +-----------+
9. |dst|src|8100| |VLAN| LEN| |aa|aa|03| |xxxxxx|xxxx|
+------------+ +---------+ +--------+ +-----------+
Behavior
--------
--------------- --------------- -------------------------------------
Before After
this commit this commit
dl_type dl_vlan dl_type dl_vlan Notes
------- ------- ------- ------- -------------------------------------
1. TYPE ffff TYPE ffff no change
2. TYPE ffff TYPE ffff no change
3. TYPE VLAN TYPE VLAN no change
4. LEN VLAN TYPE VLAN proposal fixes behavior
5. TYPE VLAN 8100 ffff 802.1Q says this is invalid framing
6. 05ff ffff 05ff ffff no change
7. 05ff ffff 05ff ffff no change
8. LEN VLAN 05ff VLAN proposal fixes behavior
9. LEN VLAN 05ff VLAN proposal fixes behavior
Signed-off-by: Ben Pfaff <blp@nicira.com>
2010-08-10 11:35:46 -07:00
|
|
|
|
|
2014-04-18 08:26:56 -07:00
|
|
|
|
/* Link layer. */
|
|
|
|
|
BUILD_ASSERT(offsetof(struct flow, dl_dst) + 6
|
|
|
|
|
== offsetof(struct flow, dl_src));
|
2015-01-06 11:10:42 -08:00
|
|
|
|
miniflow_push_macs(mf, dl_dst, data);
|
2014-04-18 08:26:56 -07:00
|
|
|
|
/* dl_type, vlan_tci. */
|
|
|
|
|
vlan_tci = parse_vlan(&data, &size);
|
|
|
|
|
dl_type = parse_ethertype(&data, &size);
|
|
|
|
|
miniflow_push_be16(mf, dl_type, dl_type);
|
|
|
|
|
miniflow_push_be16(mf, vlan_tci, vlan_tci);
|
datapath: Fix handling of 802.1Q and SNAP headers.
The kernel and user datapaths have code that assumes that 802.1Q headers
are used only inside Ethernet II frames, not inside SNAP-encapsulated
frames. But the kernel and user flow_extract() implementations would
interpret 802.1Q headers inside SNAP headers as being valid VLANs. This
would cause packet corruption if any VLAN-related actions were to be taken,
so change the two flow_extract() implementations only to accept 802.1Q as
an Ethernet II frame type, not as a SNAP-encoded frame type.
802.1Q-2005 says that this is correct anyhow:
Where the ISS instance used to transmit and receive tagged frames is
provided by a media access control method that can support Ethernet
Type encoding directly (e.g., is an IEEE 802.3 or IEEE 802.11 MAC) or
is media access method independent (e.g., 6.6), the TPID is Ethernet
Type encoded, i.e., is two octets in length and comprises solely the
assigned Ethernet Type value.
Where the ISS instance is provided by a media access method that
cannot directly support Ethernet Type encoding (e.g., is an IEEE
802.5 or FDDI MAC), the TPID is encoded according to the rule for
a Subnetwork Access Protocol (Clause 10 of IEEE Std 802) that
encapsulates Ethernet frames over LLC, and comprises the SNAP
header (AA-AA-03) followed by the SNAP PID (00-00-00) followed by
the two octets of the assigned Ethernet Type value.
All of the media that OVS handles supports Ethernet Type fields, so to me
that means that we don't have to handle 802.1Q-inside-SNAP.
On the other hand, we *do* have to handle SNAP-inside-802.1Q, because this
is actually allowed by the standards. So this commit also adds that
support.
I verified that, with this change, both SNAP and Ethernet packets are
properly recognized both with and without 802.1Q encapsulation.
I was a bit surprised to find out that Linux does not accept
SNAP-encapsulated IP frames on Ethernet.
Here's a summary of how frames are handled before and after this commit:
Common cases
------------
Ethernet
+------------+
1. |dst|src|TYPE|
+------------+
Ethernet LLC SNAP
+------------+ +--------+ +-----------+
2. |dst|src| len| |aa|aa|03| |000000|TYPE|
+------------+ +--------+ +-----------+
Ethernet 802.1Q
+------------+ +---------+
3. |dst|src|8100| |VLAN|TYPE|
+------------+ +---------+
Ethernet 802.1Q LLC SNAP
+------------+ +---------+ +--------+ +-----------+
4. |dst|src|8100| |VLAN| LEN| |aa|aa|03| |000000|TYPE|
+------------+ +---------+ +--------+ +-----------+
Unusual cases
-------------
Ethernet LLC SNAP 802.1Q
+------------+ +--------+ +-----------+ +---------+
5. |dst|src| len| |aa|aa|03| |000000|8100| |VLAN|TYPE|
+------------+ +--------+ +-----------+ +---------+
Ethernet LLC
+------------+ +--------+
6. |dst|src| len| |xx|xx|xx|
+------------+ +--------+
Ethernet LLC SNAP
+------------+ +--------+ +-----------+
7. |dst|src| len| |aa|aa|03| |xxxxxx|xxxx|
+------------+ +--------+ +-----------+
Ethernet 802.1Q LLC
+------------+ +---------+ +--------+
8. |dst|src|8100| |VLAN| LEN| |xx|xx|xx|
+------------+ +---------+ +--------+
Ethernet 802.1Q LLC SNAP
+------------+ +---------+ +--------+ +-----------+
9. |dst|src|8100| |VLAN| LEN| |aa|aa|03| |xxxxxx|xxxx|
+------------+ +---------+ +--------+ +-----------+
Behavior
--------
--------------- --------------- -------------------------------------
Before After
this commit this commit
dl_type dl_vlan dl_type dl_vlan Notes
------- ------- ------- ------- -------------------------------------
1. TYPE ffff TYPE ffff no change
2. TYPE ffff TYPE ffff no change
3. TYPE VLAN TYPE VLAN no change
4. LEN VLAN TYPE VLAN proposal fixes behavior
5. TYPE VLAN 8100 ffff 802.1Q says this is invalid framing
6. 05ff ffff 05ff ffff no change
7. 05ff ffff 05ff ffff no change
8. LEN VLAN 05ff VLAN proposal fixes behavior
9. LEN VLAN 05ff VLAN proposal fixes behavior
Signed-off-by: Ben Pfaff <blp@nicira.com>
2010-08-10 11:35:46 -07:00
|
|
|
|
}
|
|
|
|
|
|
2014-04-18 08:26:56 -07:00
|
|
|
|
/* Parse mpls. */
|
|
|
|
|
if (OVS_UNLIKELY(eth_type_mpls(dl_type))) {
|
|
|
|
|
int count;
|
|
|
|
|
const void *mpls = data;
|
|
|
|
|
|
|
|
|
|
packet->l2_5_ofs = (char *)data - l2;
|
|
|
|
|
count = parse_mpls(&data, &size);
|
2015-01-06 11:10:42 -08:00
|
|
|
|
miniflow_push_words_32(mf, mpls_lse, mpls, count);
|
2013-01-25 16:22:07 +09:00
|
|
|
|
}
|
|
|
|
|
|
2012-12-27 14:23:09 +09:00
|
|
|
|
/* Network layer. */
|
2014-04-18 08:26:56 -07:00
|
|
|
|
packet->l3_ofs = (char *)data - l2;
|
|
|
|
|
|
|
|
|
|
nw_frag = 0;
|
|
|
|
|
if (OVS_LIKELY(dl_type == htons(ETH_TYPE_IP))) {
|
|
|
|
|
const struct ip_header *nh = data;
|
|
|
|
|
int ip_len;
|
2014-11-05 10:10:13 -08:00
|
|
|
|
uint16_t tot_len;
|
2014-04-18 08:26:56 -07:00
|
|
|
|
|
|
|
|
|
if (OVS_UNLIKELY(size < IP_HEADER_LEN)) {
|
|
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
ip_len = IP_IHL(nh->ip_ihl_ver) * 4;
|
|
|
|
|
|
|
|
|
|
if (OVS_UNLIKELY(ip_len < IP_HEADER_LEN)) {
|
|
|
|
|
goto out;
|
|
|
|
|
}
|
2014-11-05 10:10:13 -08:00
|
|
|
|
if (OVS_UNLIKELY(size < ip_len)) {
|
|
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
tot_len = ntohs(nh->ip_tot_len);
|
|
|
|
|
if (OVS_UNLIKELY(tot_len > size)) {
|
|
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
if (OVS_UNLIKELY(size - tot_len > UINT8_MAX)) {
|
|
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
ofpbuf_set_l2_pad_size(packet, size - tot_len);
|
|
|
|
|
size = tot_len; /* Never pull padding. */
|
2014-04-18 08:26:56 -07:00
|
|
|
|
|
|
|
|
|
/* Push both source and destination address at once. */
|
2015-01-06 11:10:42 -08:00
|
|
|
|
miniflow_push_words(mf, nw_src, &nh->ip_src, 1);
|
|
|
|
|
|
|
|
|
|
miniflow_push_be32(mf, ipv6_label, 0); /* Padding for IPv4. */
|
2014-04-18 08:26:56 -07:00
|
|
|
|
|
|
|
|
|
nw_tos = nh->ip_tos;
|
|
|
|
|
nw_ttl = nh->ip_ttl;
|
|
|
|
|
nw_proto = nh->ip_proto;
|
|
|
|
|
if (OVS_UNLIKELY(IP_IS_FRAGMENT(nh->ip_frag_off))) {
|
|
|
|
|
nw_frag = FLOW_NW_FRAG_ANY;
|
|
|
|
|
if (nh->ip_frag_off & htons(IP_FRAG_OFF_MASK)) {
|
|
|
|
|
nw_frag |= FLOW_NW_FRAG_LATER;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
data_pull(&data, &size, ip_len);
|
|
|
|
|
} else if (dl_type == htons(ETH_TYPE_IPV6)) {
|
|
|
|
|
const struct ovs_16aligned_ip6_hdr *nh;
|
|
|
|
|
ovs_be32 tc_flow;
|
2014-11-05 10:10:13 -08:00
|
|
|
|
uint16_t plen;
|
2014-04-18 08:26:56 -07:00
|
|
|
|
|
|
|
|
|
if (OVS_UNLIKELY(size < sizeof *nh)) {
|
|
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
nh = data_pull(&data, &size, sizeof *nh);
|
|
|
|
|
|
2014-11-05 10:10:13 -08:00
|
|
|
|
plen = ntohs(nh->ip6_plen);
|
|
|
|
|
if (OVS_UNLIKELY(plen > size)) {
|
|
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
/* Jumbo Payload option not supported yet. */
|
|
|
|
|
if (OVS_UNLIKELY(size - plen > UINT8_MAX)) {
|
|
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
ofpbuf_set_l2_pad_size(packet, size - plen);
|
|
|
|
|
size = plen; /* Never pull padding. */
|
|
|
|
|
|
2014-04-18 08:26:56 -07:00
|
|
|
|
miniflow_push_words(mf, ipv6_src, &nh->ip6_src,
|
2015-01-06 11:10:42 -08:00
|
|
|
|
sizeof nh->ip6_src / 8);
|
2014-04-18 08:26:56 -07:00
|
|
|
|
miniflow_push_words(mf, ipv6_dst, &nh->ip6_dst,
|
2015-01-06 11:10:42 -08:00
|
|
|
|
sizeof nh->ip6_dst / 8);
|
2014-04-18 08:26:56 -07:00
|
|
|
|
|
|
|
|
|
tc_flow = get_16aligned_be32(&nh->ip6_flow);
|
|
|
|
|
{
|
|
|
|
|
ovs_be32 label = tc_flow & htonl(IPV6_LABEL_MASK);
|
2015-01-06 11:10:42 -08:00
|
|
|
|
miniflow_push_be32(mf, ipv6_label, label);
|
2014-04-18 08:26:56 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
nw_tos = ntohl(tc_flow) >> 20;
|
|
|
|
|
nw_ttl = nh->ip6_hlim;
|
|
|
|
|
nw_proto = nh->ip6_nxt;
|
|
|
|
|
|
|
|
|
|
while (1) {
|
|
|
|
|
if (OVS_LIKELY((nw_proto != IPPROTO_HOPOPTS)
|
|
|
|
|
&& (nw_proto != IPPROTO_ROUTING)
|
|
|
|
|
&& (nw_proto != IPPROTO_DSTOPTS)
|
|
|
|
|
&& (nw_proto != IPPROTO_AH)
|
|
|
|
|
&& (nw_proto != IPPROTO_FRAGMENT))) {
|
|
|
|
|
/* It's either a terminal header (e.g., TCP, UDP) or one we
|
|
|
|
|
* don't understand. In either case, we're done with the
|
|
|
|
|
* packet, so use it to fill in 'nw_proto'. */
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* We only verify that at least 8 bytes of the next header are
|
|
|
|
|
* available, but many of these headers are longer. Ensure that
|
|
|
|
|
* accesses within the extension header are within those first 8
|
|
|
|
|
* bytes. All extension headers are required to be at least 8
|
|
|
|
|
* bytes. */
|
|
|
|
|
if (OVS_UNLIKELY(size < 8)) {
|
|
|
|
|
goto out;
|
Implement new fragment handling policy.
Until now, OVS has handled IP fragments more awkwardly than necessary. It
has not been possible to match on L4 headers, even in fragments with offset
0 where they are actually present. This means that there was no way to
implement ACLs that treat, say, different TCP ports differently, on
fragmented traffic; instead, all decisions for fragment forwarding had to
be made on the basis of L2 and L3 headers alone.
This commit improves the situation significantly. It is still not possible
to match on L4 headers in fragments with nonzero offset, because that
information is simply not present in such fragments, but this commit adds
the ability to match on L4 headers for fragments with zero offset. This
means that it becomes possible to implement ACLs that drop such "first
fragments" on the basis of L4 headers. In practice, that effectively
blocks even fragmented traffic on an L4 basis, because the receiving IP
stack cannot reassemble a full packet when the first fragment is missing.
This commit works by adding a new "fragment type" to the kernel flow match
and making it available through OpenFlow as a new NXM field named
NXM_NX_IP_FRAG. Because OpenFlow 1.0 explicitly says that the L4 fields
are always 0 for IP fragments, it adds a new OpenFlow fragment handling
mode that fills in the L4 fields for "first fragments". It also enhances
ovs-ofctl to allow users to configure this new fragment handling mode and
to parse the new field.
Signed-off-by: Ben Pfaff <blp@nicira.com>
Bug #7557.
2011-10-19 21:33:44 -07:00
|
|
|
|
}
|
2014-04-18 08:26:56 -07:00
|
|
|
|
|
|
|
|
|
if ((nw_proto == IPPROTO_HOPOPTS)
|
|
|
|
|
|| (nw_proto == IPPROTO_ROUTING)
|
|
|
|
|
|| (nw_proto == IPPROTO_DSTOPTS)) {
|
|
|
|
|
/* These headers, while different, have the fields we care
|
|
|
|
|
* about in the same location and with the same
|
|
|
|
|
* interpretation. */
|
|
|
|
|
const struct ip6_ext *ext_hdr = data;
|
|
|
|
|
nw_proto = ext_hdr->ip6e_nxt;
|
|
|
|
|
if (OVS_UNLIKELY(!data_try_pull(&data, &size,
|
|
|
|
|
(ext_hdr->ip6e_len + 1) * 8))) {
|
|
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
} else if (nw_proto == IPPROTO_AH) {
|
|
|
|
|
/* A standard AH definition isn't available, but the fields
|
|
|
|
|
* we care about are in the same location as the generic
|
|
|
|
|
* option header--only the header length is calculated
|
|
|
|
|
* differently. */
|
|
|
|
|
const struct ip6_ext *ext_hdr = data;
|
|
|
|
|
nw_proto = ext_hdr->ip6e_nxt;
|
|
|
|
|
if (OVS_UNLIKELY(!data_try_pull(&data, &size,
|
|
|
|
|
(ext_hdr->ip6e_len + 2) * 4))) {
|
|
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
} else if (nw_proto == IPPROTO_FRAGMENT) {
|
|
|
|
|
const struct ovs_16aligned_ip6_frag *frag_hdr = data;
|
|
|
|
|
|
|
|
|
|
nw_proto = frag_hdr->ip6f_nxt;
|
|
|
|
|
if (!data_try_pull(&data, &size, sizeof *frag_hdr)) {
|
|
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* We only process the first fragment. */
|
|
|
|
|
if (frag_hdr->ip6f_offlg != htons(0)) {
|
|
|
|
|
nw_frag = FLOW_NW_FRAG_ANY;
|
|
|
|
|
if ((frag_hdr->ip6f_offlg & IP6F_OFF_MASK) != htons(0)) {
|
|
|
|
|
nw_frag |= FLOW_NW_FRAG_LATER;
|
|
|
|
|
nw_proto = IPPROTO_FRAGMENT;
|
|
|
|
|
break;
|
2009-07-08 13:19:16 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
datapath: Fix handling of 802.1Q and SNAP headers.
The kernel and user datapaths have code that assumes that 802.1Q headers
are used only inside Ethernet II frames, not inside SNAP-encapsulated
frames. But the kernel and user flow_extract() implementations would
interpret 802.1Q headers inside SNAP headers as being valid VLANs. This
would cause packet corruption if any VLAN-related actions were to be taken,
so change the two flow_extract() implementations only to accept 802.1Q as
an Ethernet II frame type, not as a SNAP-encoded frame type.
802.1Q-2005 says that this is correct anyhow:
Where the ISS instance used to transmit and receive tagged frames is
provided by a media access control method that can support Ethernet
Type encoding directly (e.g., is an IEEE 802.3 or IEEE 802.11 MAC) or
is media access method independent (e.g., 6.6), the TPID is Ethernet
Type encoded, i.e., is two octets in length and comprises solely the
assigned Ethernet Type value.
Where the ISS instance is provided by a media access method that
cannot directly support Ethernet Type encoding (e.g., is an IEEE
802.5 or FDDI MAC), the TPID is encoded according to the rule for
a Subnetwork Access Protocol (Clause 10 of IEEE Std 802) that
encapsulates Ethernet frames over LLC, and comprises the SNAP
header (AA-AA-03) followed by the SNAP PID (00-00-00) followed by
the two octets of the assigned Ethernet Type value.
All of the media that OVS handles supports Ethernet Type fields, so to me
that means that we don't have to handle 802.1Q-inside-SNAP.
On the other hand, we *do* have to handle SNAP-inside-802.1Q, because this
is actually allowed by the standards. So this commit also adds that
support.
I verified that, with this change, both SNAP and Ethernet packets are
properly recognized both with and without 802.1Q encapsulation.
I was a bit surprised to find out that Linux does not accept
SNAP-encapsulated IP frames on Ethernet.
Here's a summary of how frames are handled before and after this commit:
Common cases
------------
Ethernet
+------------+
1. |dst|src|TYPE|
+------------+
Ethernet LLC SNAP
+------------+ +--------+ +-----------+
2. |dst|src| len| |aa|aa|03| |000000|TYPE|
+------------+ +--------+ +-----------+
Ethernet 802.1Q
+------------+ +---------+
3. |dst|src|8100| |VLAN|TYPE|
+------------+ +---------+
Ethernet 802.1Q LLC SNAP
+------------+ +---------+ +--------+ +-----------+
4. |dst|src|8100| |VLAN| LEN| |aa|aa|03| |000000|TYPE|
+------------+ +---------+ +--------+ +-----------+
Unusual cases
-------------
Ethernet LLC SNAP 802.1Q
+------------+ +--------+ +-----------+ +---------+
5. |dst|src| len| |aa|aa|03| |000000|8100| |VLAN|TYPE|
+------------+ +--------+ +-----------+ +---------+
Ethernet LLC
+------------+ +--------+
6. |dst|src| len| |xx|xx|xx|
+------------+ +--------+
Ethernet LLC SNAP
+------------+ +--------+ +-----------+
7. |dst|src| len| |aa|aa|03| |xxxxxx|xxxx|
+------------+ +--------+ +-----------+
Ethernet 802.1Q LLC
+------------+ +---------+ +--------+
8. |dst|src|8100| |VLAN| LEN| |xx|xx|xx|
+------------+ +---------+ +--------+
Ethernet 802.1Q LLC SNAP
+------------+ +---------+ +--------+ +-----------+
9. |dst|src|8100| |VLAN| LEN| |aa|aa|03| |xxxxxx|xxxx|
+------------+ +---------+ +--------+ +-----------+
Behavior
--------
--------------- --------------- -------------------------------------
Before After
this commit this commit
dl_type dl_vlan dl_type dl_vlan Notes
------- ------- ------- ------- -------------------------------------
1. TYPE ffff TYPE ffff no change
2. TYPE ffff TYPE ffff no change
3. TYPE VLAN TYPE VLAN no change
4. LEN VLAN TYPE VLAN proposal fixes behavior
5. TYPE VLAN 8100 ffff 802.1Q says this is invalid framing
6. 05ff ffff 05ff ffff no change
7. 05ff ffff 05ff ffff no change
8. LEN VLAN 05ff VLAN proposal fixes behavior
9. LEN VLAN 05ff VLAN proposal fixes behavior
Signed-off-by: Ben Pfaff <blp@nicira.com>
2010-08-10 11:35:46 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
2014-04-18 08:26:56 -07:00
|
|
|
|
} else {
|
|
|
|
|
if (dl_type == htons(ETH_TYPE_ARP) ||
|
|
|
|
|
dl_type == htons(ETH_TYPE_RARP)) {
|
|
|
|
|
uint8_t arp_buf[2][ETH_ADDR_LEN];
|
|
|
|
|
const struct arp_eth_header *arp = (const struct arp_eth_header *)
|
|
|
|
|
data_try_pull(&data, &size, ARP_ETH_HEADER_LEN);
|
|
|
|
|
|
|
|
|
|
if (OVS_LIKELY(arp) && OVS_LIKELY(arp->ar_hrd == htons(1))
|
|
|
|
|
&& OVS_LIKELY(arp->ar_pro == htons(ETH_TYPE_IP))
|
|
|
|
|
&& OVS_LIKELY(arp->ar_hln == ETH_ADDR_LEN)
|
|
|
|
|
&& OVS_LIKELY(arp->ar_pln == 4)) {
|
2015-01-06 11:10:42 -08:00
|
|
|
|
miniflow_push_be32(mf, nw_src,
|
|
|
|
|
get_16aligned_be32(&arp->ar_spa));
|
|
|
|
|
miniflow_push_be32(mf, nw_dst,
|
|
|
|
|
get_16aligned_be32(&arp->ar_tpa));
|
2014-04-18 08:26:56 -07:00
|
|
|
|
|
|
|
|
|
/* We only match on the lower 8 bits of the opcode. */
|
|
|
|
|
if (OVS_LIKELY(ntohs(arp->ar_op) <= 0xff)) {
|
2015-01-06 11:10:42 -08:00
|
|
|
|
miniflow_push_be32(mf, ipv6_label, 0); /* Pad with ARP. */
|
2014-04-18 08:26:56 -07:00
|
|
|
|
miniflow_push_be32(mf, nw_frag, htonl(ntohs(arp->ar_op)));
|
|
|
|
|
}
|
2010-12-29 19:03:46 -08:00
|
|
|
|
|
2014-04-18 08:26:56 -07:00
|
|
|
|
/* Must be adjacent. */
|
|
|
|
|
BUILD_ASSERT(offsetof(struct flow, arp_sha) + 6
|
|
|
|
|
== offsetof(struct flow, arp_tha));
|
|
|
|
|
|
|
|
|
|
memcpy(arp_buf[0], arp->ar_sha, ETH_ADDR_LEN);
|
|
|
|
|
memcpy(arp_buf[1], arp->ar_tha, ETH_ADDR_LEN);
|
2015-01-06 11:10:42 -08:00
|
|
|
|
miniflow_push_macs(mf, arp_sha, arp_buf);
|
|
|
|
|
miniflow_pad_to_64(mf, tcp_flags);
|
2014-04-18 08:26:56 -07:00
|
|
|
|
}
|
2010-12-29 19:03:46 -08:00
|
|
|
|
}
|
2014-04-18 08:26:56 -07:00
|
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
packet->l4_ofs = (char *)data - l2;
|
|
|
|
|
miniflow_push_be32(mf, nw_frag,
|
|
|
|
|
BYTES_TO_BE32(nw_frag, nw_tos, nw_ttl, nw_proto));
|
|
|
|
|
|
|
|
|
|
if (OVS_LIKELY(!(nw_frag & FLOW_NW_FRAG_LATER))) {
|
|
|
|
|
if (OVS_LIKELY(nw_proto == IPPROTO_TCP)) {
|
|
|
|
|
if (OVS_LIKELY(size >= TCP_HEADER_LEN)) {
|
|
|
|
|
const struct tcp_header *tcp = data;
|
|
|
|
|
|
2015-01-06 11:10:42 -08:00
|
|
|
|
miniflow_push_be32(mf, arp_tha[2], 0);
|
2014-04-18 08:26:56 -07:00
|
|
|
|
miniflow_push_be32(mf, tcp_flags,
|
|
|
|
|
TCP_FLAGS_BE32(tcp->tcp_ctl));
|
2015-02-02 18:06:50 -08:00
|
|
|
|
miniflow_push_be16(mf, tp_src, tcp->tcp_src);
|
|
|
|
|
miniflow_push_be16(mf, tp_dst, tcp->tcp_dst);
|
2015-01-06 11:10:42 -08:00
|
|
|
|
miniflow_pad_to_64(mf, igmp_group_ip4);
|
2014-04-18 08:26:56 -07:00
|
|
|
|
}
|
|
|
|
|
} else if (OVS_LIKELY(nw_proto == IPPROTO_UDP)) {
|
|
|
|
|
if (OVS_LIKELY(size >= UDP_HEADER_LEN)) {
|
|
|
|
|
const struct udp_header *udp = data;
|
|
|
|
|
|
2015-02-02 18:06:50 -08:00
|
|
|
|
miniflow_push_be16(mf, tp_src, udp->udp_src);
|
|
|
|
|
miniflow_push_be16(mf, tp_dst, udp->udp_dst);
|
2015-01-06 11:10:42 -08:00
|
|
|
|
miniflow_pad_to_64(mf, igmp_group_ip4);
|
2009-07-08 13:19:16 -07:00
|
|
|
|
}
|
2014-04-18 08:26:56 -07:00
|
|
|
|
} else if (OVS_LIKELY(nw_proto == IPPROTO_SCTP)) {
|
|
|
|
|
if (OVS_LIKELY(size >= SCTP_HEADER_LEN)) {
|
|
|
|
|
const struct sctp_header *sctp = data;
|
2009-07-16 12:58:28 -07:00
|
|
|
|
|
2015-02-02 18:06:50 -08:00
|
|
|
|
miniflow_push_be16(mf, tp_src, sctp->sctp_src);
|
|
|
|
|
miniflow_push_be16(mf, tp_dst, sctp->sctp_dst);
|
2015-01-06 11:10:42 -08:00
|
|
|
|
miniflow_pad_to_64(mf, igmp_group_ip4);
|
2014-04-18 08:26:56 -07:00
|
|
|
|
}
|
|
|
|
|
} else if (OVS_LIKELY(nw_proto == IPPROTO_ICMP)) {
|
|
|
|
|
if (OVS_LIKELY(size >= ICMP_HEADER_LEN)) {
|
|
|
|
|
const struct icmp_header *icmp = data;
|
|
|
|
|
|
|
|
|
|
miniflow_push_be16(mf, tp_src, htons(icmp->icmp_type));
|
|
|
|
|
miniflow_push_be16(mf, tp_dst, htons(icmp->icmp_code));
|
2015-01-06 11:10:42 -08:00
|
|
|
|
miniflow_pad_to_64(mf, igmp_group_ip4);
|
2014-04-18 08:26:56 -07:00
|
|
|
|
}
|
2014-06-18 22:14:30 -03:00
|
|
|
|
} else if (OVS_LIKELY(nw_proto == IPPROTO_IGMP)) {
|
|
|
|
|
if (OVS_LIKELY(size >= IGMP_HEADER_LEN)) {
|
|
|
|
|
const struct igmp_header *igmp = data;
|
|
|
|
|
|
|
|
|
|
miniflow_push_be16(mf, tp_src, htons(igmp->igmp_type));
|
|
|
|
|
miniflow_push_be16(mf, tp_dst, htons(igmp->igmp_code));
|
|
|
|
|
miniflow_push_be32(mf, igmp_group_ip4,
|
|
|
|
|
get_16aligned_be32(&igmp->group));
|
|
|
|
|
}
|
2014-04-18 08:26:56 -07:00
|
|
|
|
} else if (OVS_LIKELY(nw_proto == IPPROTO_ICMPV6)) {
|
|
|
|
|
if (OVS_LIKELY(size >= sizeof(struct icmp6_hdr))) {
|
|
|
|
|
const struct in6_addr *nd_target = NULL;
|
|
|
|
|
uint8_t arp_buf[2][ETH_ADDR_LEN];
|
|
|
|
|
const struct icmp6_hdr *icmp = data_pull(&data, &size,
|
|
|
|
|
sizeof *icmp);
|
|
|
|
|
memset(arp_buf, 0, sizeof arp_buf);
|
|
|
|
|
if (OVS_LIKELY(parse_icmpv6(&data, &size, icmp, &nd_target,
|
|
|
|
|
arp_buf))) {
|
|
|
|
|
if (nd_target) {
|
|
|
|
|
miniflow_push_words(mf, nd_target, nd_target,
|
2015-01-06 11:10:42 -08:00
|
|
|
|
sizeof *nd_target / 8);
|
2014-04-18 08:26:56 -07:00
|
|
|
|
}
|
2015-01-06 11:10:42 -08:00
|
|
|
|
miniflow_push_macs(mf, arp_sha, arp_buf);
|
|
|
|
|
miniflow_pad_to_64(mf, tcp_flags);
|
2014-04-18 08:26:56 -07:00
|
|
|
|
miniflow_push_be16(mf, tp_src, htons(icmp->icmp6_type));
|
|
|
|
|
miniflow_push_be16(mf, tp_dst, htons(icmp->icmp6_code));
|
2015-01-06 11:10:42 -08:00
|
|
|
|
miniflow_pad_to_64(mf, igmp_group_ip4);
|
2014-04-18 08:26:56 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
2009-07-08 13:19:16 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
2014-04-18 08:26:56 -07:00
|
|
|
|
out:
|
|
|
|
|
dst->map = mf.map;
|
2009-07-08 13:19:16 -07:00
|
|
|
|
}
|
|
|
|
|
|
2011-08-19 09:39:16 -07:00
|
|
|
|
/* For every bit of a field that is wildcarded in 'wildcards', sets the
|
|
|
|
|
* corresponding bit in 'flow' to zero. */
|
|
|
|
|
void
|
|
|
|
|
flow_zero_wildcards(struct flow *flow, const struct flow_wildcards *wildcards)
|
|
|
|
|
{
|
2015-01-06 11:10:42 -08:00
|
|
|
|
uint64_t *flow_u64 = (uint64_t *) flow;
|
|
|
|
|
const uint64_t *wc_u64 = (const uint64_t *) &wildcards->masks;
|
2012-08-07 13:43:18 -07:00
|
|
|
|
size_t i;
|
2011-08-19 09:39:16 -07:00
|
|
|
|
|
2015-01-06 11:10:42 -08:00
|
|
|
|
for (i = 0; i < FLOW_U64S; i++) {
|
|
|
|
|
flow_u64[i] &= wc_u64[i];
|
2012-08-07 13:38:38 -07:00
|
|
|
|
}
|
2011-08-19 09:39:16 -07:00
|
|
|
|
}
|
|
|
|
|
|
2013-12-06 18:53:12 -08:00
|
|
|
|
void
|
|
|
|
|
flow_unwildcard_tp_ports(const struct flow *flow, struct flow_wildcards *wc)
|
|
|
|
|
{
|
|
|
|
|
if (flow->nw_proto != IPPROTO_ICMP) {
|
|
|
|
|
memset(&wc->masks.tp_src, 0xff, sizeof wc->masks.tp_src);
|
|
|
|
|
memset(&wc->masks.tp_dst, 0xff, sizeof wc->masks.tp_dst);
|
|
|
|
|
} else {
|
|
|
|
|
wc->masks.tp_src = htons(0xff);
|
|
|
|
|
wc->masks.tp_dst = htons(0xff);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2012-01-04 16:40:13 -08:00
|
|
|
|
/* Initializes 'fmd' with the metadata found in 'flow'. */
|
|
|
|
|
void
|
|
|
|
|
flow_get_metadata(const struct flow *flow, struct flow_metadata *fmd)
|
|
|
|
|
{
|
2015-01-11 13:25:24 -08:00
|
|
|
|
BUILD_ASSERT_DECL(FLOW_WC_SEQ == 30);
|
2012-03-08 14:44:54 -08:00
|
|
|
|
|
2014-03-04 14:20:19 -08:00
|
|
|
|
fmd->dp_hash = flow->dp_hash;
|
|
|
|
|
fmd->recirc_id = flow->recirc_id;
|
2012-09-13 20:11:08 -07:00
|
|
|
|
fmd->tun_id = flow->tunnel.tun_id;
|
2013-05-09 15:24:16 +03:00
|
|
|
|
fmd->tun_src = flow->tunnel.ip_src;
|
|
|
|
|
fmd->tun_dst = flow->tunnel.ip_dst;
|
2012-06-27 01:09:44 +12:00
|
|
|
|
fmd->metadata = flow->metadata;
|
2012-01-04 16:40:13 -08:00
|
|
|
|
memcpy(fmd->regs, flow->regs, sizeof fmd->regs);
|
2013-08-06 12:57:16 -07:00
|
|
|
|
fmd->pkt_mark = flow->pkt_mark;
|
2013-06-19 16:58:44 -07:00
|
|
|
|
fmd->in_port = flow->in_port.ofp_port;
|
2012-01-04 16:40:13 -08:00
|
|
|
|
}
|
|
|
|
|
|
2009-07-08 13:19:16 -07:00
|
|
|
|
char *
|
2010-09-03 11:30:02 -07:00
|
|
|
|
flow_to_string(const struct flow *flow)
|
2009-07-08 13:19:16 -07:00
|
|
|
|
{
|
|
|
|
|
struct ds ds = DS_EMPTY_INITIALIZER;
|
|
|
|
|
flow_format(&ds, flow);
|
|
|
|
|
return ds_cstr(&ds);
|
|
|
|
|
}
|
|
|
|
|
|
2012-11-21 18:51:36 -08:00
|
|
|
|
const char *
|
|
|
|
|
flow_tun_flag_to_string(uint32_t flags)
|
|
|
|
|
{
|
|
|
|
|
switch (flags) {
|
|
|
|
|
case FLOW_TNL_F_DONT_FRAGMENT:
|
|
|
|
|
return "df";
|
|
|
|
|
case FLOW_TNL_F_CSUM:
|
|
|
|
|
return "csum";
|
|
|
|
|
case FLOW_TNL_F_KEY:
|
|
|
|
|
return "key";
|
2014-05-27 21:50:35 -07:00
|
|
|
|
case FLOW_TNL_F_OAM:
|
|
|
|
|
return "oam";
|
2012-11-21 18:51:36 -08:00
|
|
|
|
default:
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
format_flags(struct ds *ds, const char *(*bit_to_string)(uint32_t),
|
|
|
|
|
uint32_t flags, char del)
|
|
|
|
|
{
|
|
|
|
|
uint32_t bad = 0;
|
|
|
|
|
|
|
|
|
|
if (!flags) {
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
while (flags) {
|
|
|
|
|
uint32_t bit = rightmost_1bit(flags);
|
|
|
|
|
const char *s;
|
|
|
|
|
|
|
|
|
|
s = bit_to_string(bit);
|
|
|
|
|
if (s) {
|
|
|
|
|
ds_put_format(ds, "%s%c", s, del);
|
|
|
|
|
} else {
|
|
|
|
|
bad |= bit;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
flags &= ~bit;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (bad) {
|
|
|
|
|
ds_put_format(ds, "0x%"PRIx32"%c", bad, del);
|
|
|
|
|
}
|
|
|
|
|
ds_chomp(ds, del);
|
|
|
|
|
}
|
|
|
|
|
|
2013-12-02 15:14:09 -08:00
|
|
|
|
void
|
|
|
|
|
format_flags_masked(struct ds *ds, const char *name,
|
|
|
|
|
const char *(*bit_to_string)(uint32_t), uint32_t flags,
|
|
|
|
|
uint32_t mask)
|
|
|
|
|
{
|
|
|
|
|
if (name) {
|
|
|
|
|
ds_put_format(ds, "%s=", name);
|
|
|
|
|
}
|
|
|
|
|
while (mask) {
|
|
|
|
|
uint32_t bit = rightmost_1bit(mask);
|
|
|
|
|
const char *s = bit_to_string(bit);
|
|
|
|
|
|
|
|
|
|
ds_put_format(ds, "%s%s", (flags & bit) ? "+" : "-",
|
|
|
|
|
s ? s : "[Unknown]");
|
|
|
|
|
mask &= ~bit;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2009-07-08 13:19:16 -07:00
|
|
|
|
void
|
2010-09-03 11:30:02 -07:00
|
|
|
|
flow_format(struct ds *ds, const struct flow *flow)
|
2009-07-08 13:19:16 -07:00
|
|
|
|
{
|
2012-10-22 14:00:35 -07:00
|
|
|
|
struct match match;
|
2014-10-01 15:35:45 -07:00
|
|
|
|
struct flow_wildcards *wc = &match.wc;
|
2012-09-13 20:11:08 -07:00
|
|
|
|
|
2012-10-22 14:00:35 -07:00
|
|
|
|
match_wc_init(&match, flow);
|
2014-10-01 15:35:45 -07:00
|
|
|
|
|
|
|
|
|
/* As this function is most often used for formatting a packet in a
|
|
|
|
|
* packet-in message, skip formatting the packet context fields that are
|
|
|
|
|
* all-zeroes (Openflow spec encourages leaving out all-zeroes context
|
|
|
|
|
* fields from the packet-in messages). We make an exception with the
|
|
|
|
|
* 'in_port' field, which we always format, as packets usually have an
|
|
|
|
|
* in_port, and 0 is a port just like any other port. */
|
|
|
|
|
if (!flow->skb_priority) {
|
|
|
|
|
WC_UNMASK_FIELD(wc, skb_priority);
|
|
|
|
|
}
|
|
|
|
|
if (!flow->pkt_mark) {
|
|
|
|
|
WC_UNMASK_FIELD(wc, pkt_mark);
|
|
|
|
|
}
|
|
|
|
|
if (!flow->recirc_id) {
|
|
|
|
|
WC_UNMASK_FIELD(wc, recirc_id);
|
|
|
|
|
}
|
|
|
|
|
for (int i = 0; i < FLOW_N_REGS; i++) {
|
|
|
|
|
if (!flow->regs[i]) {
|
|
|
|
|
WC_UNMASK_FIELD(wc, regs[i]);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
if (!flow->metadata) {
|
|
|
|
|
WC_UNMASK_FIELD(wc, metadata);
|
|
|
|
|
}
|
|
|
|
|
|
2012-11-26 15:40:48 -08:00
|
|
|
|
match_format(&match, ds, OFP_DEFAULT_PRIORITY);
|
2009-07-08 13:19:16 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
2010-09-03 11:30:02 -07:00
|
|
|
|
flow_print(FILE *stream, const struct flow *flow)
|
2009-07-08 13:19:16 -07:00
|
|
|
|
{
|
|
|
|
|
char *s = flow_to_string(flow);
|
|
|
|
|
fputs(s, stream);
|
|
|
|
|
free(s);
|
|
|
|
|
}
|
2010-10-20 16:33:10 -07:00
|
|
|
|
|
|
|
|
|
/* flow_wildcards functions. */
|
|
|
|
|
|
2010-11-10 14:39:54 -08:00
|
|
|
|
/* Initializes 'wc' as a set of wildcards that matches every packet. */
|
2010-10-20 16:33:10 -07:00
|
|
|
|
void
|
2010-11-10 14:39:54 -08:00
|
|
|
|
flow_wildcards_init_catchall(struct flow_wildcards *wc)
|
2010-10-20 16:33:10 -07:00
|
|
|
|
{
|
2012-08-07 13:43:18 -07:00
|
|
|
|
memset(&wc->masks, 0, sizeof wc->masks);
|
2010-10-20 16:33:10 -07:00
|
|
|
|
}
|
|
|
|
|
|
2014-10-01 15:35:45 -07:00
|
|
|
|
/* Converts a flow into flow wildcards. It sets the wildcard masks based on
|
|
|
|
|
* the packet headers extracted to 'flow'. It will not set the mask for fields
|
|
|
|
|
* that do not make sense for the packet type. OpenFlow-only metadata is
|
|
|
|
|
* wildcarded, but other metadata is unconditionally exact-matched. */
|
|
|
|
|
void flow_wildcards_init_for_packet(struct flow_wildcards *wc,
|
|
|
|
|
const struct flow *flow)
|
|
|
|
|
{
|
|
|
|
|
memset(&wc->masks, 0x0, sizeof wc->masks);
|
|
|
|
|
|
2014-10-17 09:37:11 -07:00
|
|
|
|
/* Update this function whenever struct flow changes. */
|
2015-01-11 13:25:24 -08:00
|
|
|
|
BUILD_ASSERT_DECL(FLOW_WC_SEQ == 30);
|
2014-10-17 09:37:11 -07:00
|
|
|
|
|
2014-10-01 15:35:45 -07:00
|
|
|
|
if (flow->tunnel.ip_dst) {
|
|
|
|
|
if (flow->tunnel.flags & FLOW_TNL_F_KEY) {
|
|
|
|
|
WC_MASK_FIELD(wc, tunnel.tun_id);
|
|
|
|
|
}
|
|
|
|
|
WC_MASK_FIELD(wc, tunnel.ip_src);
|
|
|
|
|
WC_MASK_FIELD(wc, tunnel.ip_dst);
|
|
|
|
|
WC_MASK_FIELD(wc, tunnel.flags);
|
|
|
|
|
WC_MASK_FIELD(wc, tunnel.ip_tos);
|
|
|
|
|
WC_MASK_FIELD(wc, tunnel.ip_ttl);
|
|
|
|
|
WC_MASK_FIELD(wc, tunnel.tp_src);
|
|
|
|
|
WC_MASK_FIELD(wc, tunnel.tp_dst);
|
|
|
|
|
} else if (flow->tunnel.tun_id) {
|
|
|
|
|
WC_MASK_FIELD(wc, tunnel.tun_id);
|
|
|
|
|
}
|
|
|
|
|
|
2015-01-11 13:25:24 -08:00
|
|
|
|
/* metadata, regs, and conj_id wildcarded. */
|
2014-10-01 15:35:45 -07:00
|
|
|
|
|
|
|
|
|
WC_MASK_FIELD(wc, skb_priority);
|
|
|
|
|
WC_MASK_FIELD(wc, pkt_mark);
|
|
|
|
|
WC_MASK_FIELD(wc, recirc_id);
|
|
|
|
|
WC_MASK_FIELD(wc, dp_hash);
|
|
|
|
|
WC_MASK_FIELD(wc, in_port);
|
|
|
|
|
|
2014-11-03 14:24:01 -08:00
|
|
|
|
/* actset_output wildcarded. */
|
|
|
|
|
|
2014-10-01 15:35:45 -07:00
|
|
|
|
WC_MASK_FIELD(wc, dl_dst);
|
|
|
|
|
WC_MASK_FIELD(wc, dl_src);
|
|
|
|
|
WC_MASK_FIELD(wc, dl_type);
|
|
|
|
|
WC_MASK_FIELD(wc, vlan_tci);
|
|
|
|
|
|
|
|
|
|
if (flow->dl_type == htons(ETH_TYPE_IP)) {
|
|
|
|
|
WC_MASK_FIELD(wc, nw_src);
|
|
|
|
|
WC_MASK_FIELD(wc, nw_dst);
|
|
|
|
|
} else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
|
|
|
|
|
WC_MASK_FIELD(wc, ipv6_src);
|
|
|
|
|
WC_MASK_FIELD(wc, ipv6_dst);
|
|
|
|
|
WC_MASK_FIELD(wc, ipv6_label);
|
|
|
|
|
} else if (flow->dl_type == htons(ETH_TYPE_ARP) ||
|
|
|
|
|
flow->dl_type == htons(ETH_TYPE_RARP)) {
|
|
|
|
|
WC_MASK_FIELD(wc, nw_src);
|
|
|
|
|
WC_MASK_FIELD(wc, nw_dst);
|
|
|
|
|
WC_MASK_FIELD(wc, nw_proto);
|
|
|
|
|
WC_MASK_FIELD(wc, arp_sha);
|
|
|
|
|
WC_MASK_FIELD(wc, arp_tha);
|
|
|
|
|
return;
|
|
|
|
|
} else if (eth_type_mpls(flow->dl_type)) {
|
|
|
|
|
for (int i = 0; i < FLOW_MAX_MPLS_LABELS; i++) {
|
|
|
|
|
WC_MASK_FIELD(wc, mpls_lse[i]);
|
|
|
|
|
if (flow->mpls_lse[i] & htonl(MPLS_BOS_MASK)) {
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return;
|
|
|
|
|
} else {
|
|
|
|
|
return; /* Unknown ethertype. */
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* IPv4 or IPv6. */
|
|
|
|
|
WC_MASK_FIELD(wc, nw_frag);
|
|
|
|
|
WC_MASK_FIELD(wc, nw_tos);
|
|
|
|
|
WC_MASK_FIELD(wc, nw_ttl);
|
|
|
|
|
WC_MASK_FIELD(wc, nw_proto);
|
|
|
|
|
|
|
|
|
|
/* No transport layer header in later fragments. */
|
|
|
|
|
if (!(flow->nw_frag & FLOW_NW_FRAG_LATER) &&
|
|
|
|
|
(flow->nw_proto == IPPROTO_ICMP ||
|
|
|
|
|
flow->nw_proto == IPPROTO_ICMPV6 ||
|
|
|
|
|
flow->nw_proto == IPPROTO_TCP ||
|
|
|
|
|
flow->nw_proto == IPPROTO_UDP ||
|
|
|
|
|
flow->nw_proto == IPPROTO_SCTP ||
|
|
|
|
|
flow->nw_proto == IPPROTO_IGMP)) {
|
|
|
|
|
WC_MASK_FIELD(wc, tp_src);
|
|
|
|
|
WC_MASK_FIELD(wc, tp_dst);
|
|
|
|
|
|
|
|
|
|
if (flow->nw_proto == IPPROTO_TCP) {
|
|
|
|
|
WC_MASK_FIELD(wc, tcp_flags);
|
|
|
|
|
} else if (flow->nw_proto == IPPROTO_ICMPV6) {
|
|
|
|
|
WC_MASK_FIELD(wc, arp_sha);
|
|
|
|
|
WC_MASK_FIELD(wc, arp_tha);
|
|
|
|
|
WC_MASK_FIELD(wc, nd_target);
|
|
|
|
|
} else if (flow->nw_proto == IPPROTO_IGMP) {
|
|
|
|
|
WC_MASK_FIELD(wc, igmp_group_ip4);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2014-10-17 09:37:11 -07:00
|
|
|
|
/* Return a map of possible fields for a packet of the same type as 'flow'.
|
|
|
|
|
* Including extra bits in the returned mask is not wrong, it is just less
|
|
|
|
|
* optimal.
|
|
|
|
|
*
|
|
|
|
|
* This is a less precise version of flow_wildcards_init_for_packet() above. */
|
|
|
|
|
uint64_t
|
|
|
|
|
flow_wc_map(const struct flow *flow)
|
|
|
|
|
{
|
|
|
|
|
/* Update this function whenever struct flow changes. */
|
2015-01-11 13:25:24 -08:00
|
|
|
|
BUILD_ASSERT_DECL(FLOW_WC_SEQ == 30);
|
2014-10-17 09:37:11 -07:00
|
|
|
|
|
|
|
|
|
uint64_t map = (flow->tunnel.ip_dst) ? MINIFLOW_MAP(tunnel) : 0;
|
|
|
|
|
|
|
|
|
|
/* Metadata fields that can appear on packet input. */
|
|
|
|
|
map |= MINIFLOW_MAP(skb_priority) | MINIFLOW_MAP(pkt_mark)
|
|
|
|
|
| MINIFLOW_MAP(recirc_id) | MINIFLOW_MAP(dp_hash)
|
|
|
|
|
| MINIFLOW_MAP(in_port)
|
|
|
|
|
| MINIFLOW_MAP(dl_dst) | MINIFLOW_MAP(dl_src)
|
|
|
|
|
| MINIFLOW_MAP(dl_type) | MINIFLOW_MAP(vlan_tci);
|
|
|
|
|
|
|
|
|
|
/* Ethertype-dependent fields. */
|
|
|
|
|
if (OVS_LIKELY(flow->dl_type == htons(ETH_TYPE_IP))) {
|
|
|
|
|
map |= MINIFLOW_MAP(nw_src) | MINIFLOW_MAP(nw_dst)
|
|
|
|
|
| MINIFLOW_MAP(nw_proto) | MINIFLOW_MAP(nw_frag)
|
|
|
|
|
| MINIFLOW_MAP(nw_tos) | MINIFLOW_MAP(nw_ttl);
|
|
|
|
|
if (OVS_UNLIKELY(flow->nw_proto == IPPROTO_IGMP)) {
|
|
|
|
|
map |= MINIFLOW_MAP(igmp_group_ip4);
|
|
|
|
|
} else {
|
|
|
|
|
map |= MINIFLOW_MAP(tcp_flags)
|
|
|
|
|
| MINIFLOW_MAP(tp_src) | MINIFLOW_MAP(tp_dst);
|
|
|
|
|
}
|
|
|
|
|
} else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
|
|
|
|
|
map |= MINIFLOW_MAP(ipv6_src) | MINIFLOW_MAP(ipv6_dst)
|
|
|
|
|
| MINIFLOW_MAP(ipv6_label)
|
|
|
|
|
| MINIFLOW_MAP(nw_proto) | MINIFLOW_MAP(nw_frag)
|
|
|
|
|
| MINIFLOW_MAP(nw_tos) | MINIFLOW_MAP(nw_ttl);
|
|
|
|
|
if (OVS_UNLIKELY(flow->nw_proto == IPPROTO_ICMPV6)) {
|
|
|
|
|
map |= MINIFLOW_MAP(nd_target)
|
|
|
|
|
| MINIFLOW_MAP(arp_sha) | MINIFLOW_MAP(arp_tha);
|
|
|
|
|
} else {
|
|
|
|
|
map |= MINIFLOW_MAP(tcp_flags)
|
|
|
|
|
| MINIFLOW_MAP(tp_src) | MINIFLOW_MAP(tp_dst);
|
|
|
|
|
}
|
|
|
|
|
} else if (eth_type_mpls(flow->dl_type)) {
|
|
|
|
|
map |= MINIFLOW_MAP(mpls_lse);
|
|
|
|
|
} else if (flow->dl_type == htons(ETH_TYPE_ARP) ||
|
|
|
|
|
flow->dl_type == htons(ETH_TYPE_RARP)) {
|
|
|
|
|
map |= MINIFLOW_MAP(nw_src) | MINIFLOW_MAP(nw_dst)
|
|
|
|
|
| MINIFLOW_MAP(nw_proto)
|
|
|
|
|
| MINIFLOW_MAP(arp_sha) | MINIFLOW_MAP(arp_tha);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return map;
|
|
|
|
|
}
|
|
|
|
|
|
2013-12-10 23:32:51 -08:00
|
|
|
|
/* Clear the metadata and register wildcard masks. They are not packet
|
|
|
|
|
* header fields. */
|
|
|
|
|
void
|
|
|
|
|
flow_wildcards_clear_non_packet_fields(struct flow_wildcards *wc)
|
|
|
|
|
{
|
2014-10-17 09:37:11 -07:00
|
|
|
|
/* Update this function whenever struct flow changes. */
|
2015-01-11 13:25:24 -08:00
|
|
|
|
BUILD_ASSERT_DECL(FLOW_WC_SEQ == 30);
|
2014-10-17 09:37:11 -07:00
|
|
|
|
|
2013-12-10 23:32:51 -08:00
|
|
|
|
memset(&wc->masks.metadata, 0, sizeof wc->masks.metadata);
|
|
|
|
|
memset(&wc->masks.regs, 0, sizeof wc->masks.regs);
|
2014-11-03 14:24:01 -08:00
|
|
|
|
wc->masks.actset_output = 0;
|
2015-01-11 13:25:24 -08:00
|
|
|
|
wc->masks.conj_id = 0;
|
2013-12-10 23:32:51 -08:00
|
|
|
|
}
|
|
|
|
|
|
2011-09-12 16:38:52 -07:00
|
|
|
|
/* Returns true if 'wc' matches every packet, false if 'wc' fixes any bits or
|
|
|
|
|
* fields. */
|
|
|
|
|
bool
|
|
|
|
|
flow_wildcards_is_catchall(const struct flow_wildcards *wc)
|
|
|
|
|
{
|
2015-01-06 11:10:42 -08:00
|
|
|
|
const uint64_t *wc_u64 = (const uint64_t *) &wc->masks;
|
2012-08-07 13:43:18 -07:00
|
|
|
|
size_t i;
|
2011-09-12 16:38:52 -07:00
|
|
|
|
|
2015-01-06 11:10:42 -08:00
|
|
|
|
for (i = 0; i < FLOW_U64S; i++) {
|
|
|
|
|
if (wc_u64[i]) {
|
2011-09-12 16:38:52 -07:00
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
2013-06-10 22:48:58 -07:00
|
|
|
|
/* Sets 'dst' as the bitwise AND of wildcards in 'src1' and 'src2'.
|
|
|
|
|
* That is, a bit or a field is wildcarded in 'dst' if it is wildcarded
|
|
|
|
|
* in 'src1' or 'src2' or both. */
|
2010-11-03 11:00:58 -07:00
|
|
|
|
void
|
2013-06-10 22:48:58 -07:00
|
|
|
|
flow_wildcards_and(struct flow_wildcards *dst,
|
|
|
|
|
const struct flow_wildcards *src1,
|
|
|
|
|
const struct flow_wildcards *src2)
|
2010-11-03 11:00:58 -07:00
|
|
|
|
{
|
2015-01-06 11:10:42 -08:00
|
|
|
|
uint64_t *dst_u64 = (uint64_t *) &dst->masks;
|
|
|
|
|
const uint64_t *src1_u64 = (const uint64_t *) &src1->masks;
|
|
|
|
|
const uint64_t *src2_u64 = (const uint64_t *) &src2->masks;
|
2012-08-07 13:43:18 -07:00
|
|
|
|
size_t i;
|
2012-01-27 15:38:53 -08:00
|
|
|
|
|
2015-01-06 11:10:42 -08:00
|
|
|
|
for (i = 0; i < FLOW_U64S; i++) {
|
|
|
|
|
dst_u64[i] = src1_u64[i] & src2_u64[i];
|
2012-08-07 13:38:38 -07:00
|
|
|
|
}
|
2010-11-03 11:00:58 -07:00
|
|
|
|
}
|
|
|
|
|
|
2013-06-10 22:48:58 -07:00
|
|
|
|
/* Sets 'dst' as the bitwise OR of wildcards in 'src1' and 'src2'. That
|
|
|
|
|
* is, a bit or a field is wildcarded in 'dst' if it is neither
|
|
|
|
|
* wildcarded in 'src1' nor 'src2'. */
|
|
|
|
|
void
|
|
|
|
|
flow_wildcards_or(struct flow_wildcards *dst,
|
|
|
|
|
const struct flow_wildcards *src1,
|
|
|
|
|
const struct flow_wildcards *src2)
|
|
|
|
|
{
|
2015-01-06 11:10:42 -08:00
|
|
|
|
uint64_t *dst_u64 = (uint64_t *) &dst->masks;
|
|
|
|
|
const uint64_t *src1_u64 = (const uint64_t *) &src1->masks;
|
|
|
|
|
const uint64_t *src2_u64 = (const uint64_t *) &src2->masks;
|
2013-06-10 22:48:58 -07:00
|
|
|
|
size_t i;
|
|
|
|
|
|
2015-01-06 11:10:42 -08:00
|
|
|
|
for (i = 0; i < FLOW_U64S; i++) {
|
|
|
|
|
dst_u64[i] = src1_u64[i] | src2_u64[i];
|
2013-06-10 22:48:58 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2010-11-03 11:00:58 -07:00
|
|
|
|
/* Returns a hash of the wildcards in 'wc'. */
|
|
|
|
|
uint32_t
|
2011-05-26 16:23:21 -07:00
|
|
|
|
flow_wildcards_hash(const struct flow_wildcards *wc, uint32_t basis)
|
2010-11-03 11:00:58 -07:00
|
|
|
|
{
|
2012-12-13 16:38:22 -08:00
|
|
|
|
return flow_hash(&wc->masks, basis);
|
2010-11-03 11:00:58 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Returns true if 'a' and 'b' represent the same wildcards, false if they are
|
|
|
|
|
* different. */
|
|
|
|
|
bool
|
|
|
|
|
flow_wildcards_equal(const struct flow_wildcards *a,
|
|
|
|
|
const struct flow_wildcards *b)
|
|
|
|
|
{
|
2012-08-07 13:43:18 -07:00
|
|
|
|
return flow_equal(&a->masks, &b->masks);
|
2010-11-03 11:00:58 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Returns true if at least one bit or field is wildcarded in 'a' but not in
|
|
|
|
|
* 'b', false otherwise. */
|
|
|
|
|
bool
|
|
|
|
|
flow_wildcards_has_extra(const struct flow_wildcards *a,
|
|
|
|
|
const struct flow_wildcards *b)
|
|
|
|
|
{
|
2015-01-06 11:10:42 -08:00
|
|
|
|
const uint64_t *a_u64 = (const uint64_t *) &a->masks;
|
|
|
|
|
const uint64_t *b_u64 = (const uint64_t *) &b->masks;
|
2012-08-07 13:43:18 -07:00
|
|
|
|
size_t i;
|
2012-01-27 15:38:53 -08:00
|
|
|
|
|
2015-01-06 11:10:42 -08:00
|
|
|
|
for (i = 0; i < FLOW_U64S; i++) {
|
|
|
|
|
if ((a_u64[i] & b_u64[i]) != b_u64[i]) {
|
2010-11-11 10:41:33 -08:00
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
}
|
2012-08-07 13:43:18 -07:00
|
|
|
|
return false;
|
|
|
|
|
}
|
2010-11-11 10:41:33 -08:00
|
|
|
|
|
2012-08-07 13:43:18 -07:00
|
|
|
|
/* Returns true if 'a' and 'b' are equal, except that 0-bits (wildcarded bits)
|
|
|
|
|
* in 'wc' do not need to be equal in 'a' and 'b'. */
|
|
|
|
|
bool
|
|
|
|
|
flow_equal_except(const struct flow *a, const struct flow *b,
|
|
|
|
|
const struct flow_wildcards *wc)
|
|
|
|
|
{
|
2015-01-06 11:10:42 -08:00
|
|
|
|
const uint64_t *a_u64 = (const uint64_t *) a;
|
|
|
|
|
const uint64_t *b_u64 = (const uint64_t *) b;
|
|
|
|
|
const uint64_t *wc_u64 = (const uint64_t *) &wc->masks;
|
2012-08-07 13:43:18 -07:00
|
|
|
|
size_t i;
|
2010-12-29 19:03:46 -08:00
|
|
|
|
|
2015-01-06 11:10:42 -08:00
|
|
|
|
for (i = 0; i < FLOW_U64S; i++) {
|
|
|
|
|
if ((a_u64[i] ^ b_u64[i]) & wc_u64[i]) {
|
2012-08-07 13:43:18 -07:00
|
|
|
|
return false;
|
|
|
|
|
}
|
2012-04-25 15:48:40 -07:00
|
|
|
|
}
|
2012-08-07 13:43:18 -07:00
|
|
|
|
return true;
|
2010-11-03 11:00:58 -07:00
|
|
|
|
}
|
|
|
|
|
|
2010-11-11 10:41:33 -08:00
|
|
|
|
/* Sets the wildcard mask for register 'idx' in 'wc' to 'mask'.
|
|
|
|
|
* (A 0-bit indicates a wildcard bit.) */
|
|
|
|
|
void
|
|
|
|
|
flow_wildcards_set_reg_mask(struct flow_wildcards *wc, int idx, uint32_t mask)
|
|
|
|
|
{
|
2012-08-07 13:38:38 -07:00
|
|
|
|
wc->masks.regs[idx] = mask;
|
2010-11-11 10:41:33 -08:00
|
|
|
|
}
|
2011-02-01 18:50:25 -08:00
|
|
|
|
|
2014-07-28 09:50:37 -07:00
|
|
|
|
/* Sets the wildcard mask for register 'idx' in 'wc' to 'mask'.
|
|
|
|
|
* (A 0-bit indicates a wildcard bit.) */
|
|
|
|
|
void
|
|
|
|
|
flow_wildcards_set_xreg_mask(struct flow_wildcards *wc, int idx, uint64_t mask)
|
|
|
|
|
{
|
|
|
|
|
flow_set_xreg(&wc->masks, idx, mask);
|
|
|
|
|
}
|
|
|
|
|
|
2014-04-29 15:50:38 -07:00
|
|
|
|
/* Calculates the 5-tuple hash from the given miniflow.
|
|
|
|
|
* This returns the same value as flow_hash_5tuple for the corresponding
|
|
|
|
|
* flow. */
|
2014-04-18 08:26:57 -07:00
|
|
|
|
uint32_t
|
|
|
|
|
miniflow_hash_5tuple(const struct miniflow *flow, uint32_t basis)
|
|
|
|
|
{
|
2014-04-29 15:50:38 -07:00
|
|
|
|
uint32_t hash = basis;
|
2014-04-18 08:26:57 -07:00
|
|
|
|
|
2014-04-29 15:50:38 -07:00
|
|
|
|
if (flow) {
|
|
|
|
|
ovs_be16 dl_type = MINIFLOW_GET_BE16(flow, dl_type);
|
|
|
|
|
|
2014-07-04 07:57:18 -07:00
|
|
|
|
hash = hash_add(hash, MINIFLOW_GET_U8(flow, nw_proto));
|
2014-04-29 15:50:38 -07:00
|
|
|
|
|
|
|
|
|
/* Separate loops for better optimization. */
|
|
|
|
|
if (dl_type == htons(ETH_TYPE_IPV6)) {
|
2015-01-06 11:10:42 -08:00
|
|
|
|
uint64_t map = MINIFLOW_MAP(ipv6_src) | MINIFLOW_MAP(ipv6_dst);
|
|
|
|
|
uint64_t value;
|
2014-04-18 08:26:57 -07:00
|
|
|
|
|
2014-04-29 15:50:38 -07:00
|
|
|
|
MINIFLOW_FOR_EACH_IN_MAP(value, flow, map) {
|
2015-01-06 11:10:42 -08:00
|
|
|
|
hash = hash_add64(hash, value);
|
2014-04-29 15:50:38 -07:00
|
|
|
|
}
|
|
|
|
|
} else {
|
2015-01-06 11:10:42 -08:00
|
|
|
|
hash = hash_add(hash, MINIFLOW_GET_U32(flow, nw_src));
|
|
|
|
|
hash = hash_add(hash, MINIFLOW_GET_U32(flow, nw_dst));
|
2014-04-29 15:50:38 -07:00
|
|
|
|
}
|
2015-01-06 11:10:42 -08:00
|
|
|
|
/* Add both ports at once. */
|
|
|
|
|
hash = hash_add(hash, MINIFLOW_GET_U32(flow, tp_src));
|
2014-07-04 07:57:18 -07:00
|
|
|
|
hash = hash_finish(hash, 42); /* Arbitrary number. */
|
2014-04-29 15:50:38 -07:00
|
|
|
|
}
|
|
|
|
|
return hash;
|
2014-04-18 08:26:57 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
BUILD_ASSERT_DECL(offsetof(struct flow, tp_src) + 2
|
|
|
|
|
== offsetof(struct flow, tp_dst) &&
|
|
|
|
|
offsetof(struct flow, tp_src) / 4
|
|
|
|
|
== offsetof(struct flow, tp_dst) / 4);
|
2014-04-29 15:50:38 -07:00
|
|
|
|
BUILD_ASSERT_DECL(offsetof(struct flow, ipv6_src) + 16
|
|
|
|
|
== offsetof(struct flow, ipv6_dst));
|
2014-04-18 08:26:57 -07:00
|
|
|
|
|
2014-02-26 10:07:38 -08:00
|
|
|
|
/* Calculates the 5-tuple hash from the given flow. */
|
|
|
|
|
uint32_t
|
|
|
|
|
flow_hash_5tuple(const struct flow *flow, uint32_t basis)
|
|
|
|
|
{
|
2014-04-29 15:50:38 -07:00
|
|
|
|
uint32_t hash = basis;
|
2014-02-26 10:07:38 -08:00
|
|
|
|
|
2014-04-29 15:50:38 -07:00
|
|
|
|
if (flow) {
|
2014-07-04 07:57:18 -07:00
|
|
|
|
hash = hash_add(hash, flow->nw_proto);
|
2014-04-29 15:50:38 -07:00
|
|
|
|
|
|
|
|
|
if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
|
2015-01-06 11:10:42 -08:00
|
|
|
|
const uint64_t *flow_u64 = (const uint64_t *)flow;
|
|
|
|
|
int ofs = offsetof(struct flow, ipv6_src) / 8;
|
|
|
|
|
int end = ofs + 2 * sizeof flow->ipv6_src / 8;
|
2014-02-26 10:07:38 -08:00
|
|
|
|
|
2015-01-06 11:10:42 -08:00
|
|
|
|
for (;ofs < end; ofs++) {
|
|
|
|
|
hash = hash_add64(hash, flow_u64[ofs]);
|
2014-04-29 15:50:38 -07:00
|
|
|
|
}
|
|
|
|
|
} else {
|
2014-07-04 07:57:18 -07:00
|
|
|
|
hash = hash_add(hash, (OVS_FORCE uint32_t) flow->nw_src);
|
|
|
|
|
hash = hash_add(hash, (OVS_FORCE uint32_t) flow->nw_dst);
|
2014-04-29 15:50:38 -07:00
|
|
|
|
}
|
2015-01-06 11:10:42 -08:00
|
|
|
|
/* Add both ports at once. */
|
|
|
|
|
hash = hash_add(hash,
|
|
|
|
|
((const uint32_t *)flow)[offsetof(struct flow, tp_src)
|
|
|
|
|
/ sizeof(uint32_t)]);
|
2014-07-04 07:57:18 -07:00
|
|
|
|
hash = hash_finish(hash, 42); /* Arbitrary number. */
|
2014-04-29 15:50:38 -07:00
|
|
|
|
}
|
|
|
|
|
return hash;
|
2014-02-26 10:07:38 -08:00
|
|
|
|
}
|
|
|
|
|
|
2011-02-01 18:50:25 -08:00
|
|
|
|
/* Hashes 'flow' based on its L2 through L4 protocol information. */
|
|
|
|
|
uint32_t
|
|
|
|
|
flow_hash_symmetric_l4(const struct flow *flow, uint32_t basis)
|
|
|
|
|
{
|
|
|
|
|
struct {
|
2010-12-29 19:03:46 -08:00
|
|
|
|
union {
|
|
|
|
|
ovs_be32 ipv4_addr;
|
|
|
|
|
struct in6_addr ipv6_addr;
|
|
|
|
|
};
|
2011-02-01 18:50:25 -08:00
|
|
|
|
ovs_be16 eth_type;
|
|
|
|
|
ovs_be16 vlan_tci;
|
2012-02-02 21:57:54 -08:00
|
|
|
|
ovs_be16 tp_port;
|
2011-02-01 18:50:25 -08:00
|
|
|
|
uint8_t eth_addr[ETH_ADDR_LEN];
|
|
|
|
|
uint8_t ip_proto;
|
|
|
|
|
} fields;
|
|
|
|
|
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
memset(&fields, 0, sizeof fields);
|
|
|
|
|
for (i = 0; i < ETH_ADDR_LEN; i++) {
|
|
|
|
|
fields.eth_addr[i] = flow->dl_src[i] ^ flow->dl_dst[i];
|
|
|
|
|
}
|
|
|
|
|
fields.vlan_tci = flow->vlan_tci & htons(VLAN_VID_MASK);
|
|
|
|
|
fields.eth_type = flow->dl_type;
|
2011-05-23 11:39:17 -07:00
|
|
|
|
|
|
|
|
|
/* UDP source and destination port are not taken into account because they
|
|
|
|
|
* will not necessarily be symmetric in a bidirectional flow. */
|
2011-02-01 18:50:25 -08:00
|
|
|
|
if (fields.eth_type == htons(ETH_TYPE_IP)) {
|
2010-12-29 19:03:46 -08:00
|
|
|
|
fields.ipv4_addr = flow->nw_src ^ flow->nw_dst;
|
|
|
|
|
fields.ip_proto = flow->nw_proto;
|
2013-08-22 20:24:44 +12:00
|
|
|
|
if (fields.ip_proto == IPPROTO_TCP || fields.ip_proto == IPPROTO_SCTP) {
|
2012-02-02 21:57:54 -08:00
|
|
|
|
fields.tp_port = flow->tp_src ^ flow->tp_dst;
|
2010-12-29 19:03:46 -08:00
|
|
|
|
}
|
|
|
|
|
} else if (fields.eth_type == htons(ETH_TYPE_IPV6)) {
|
|
|
|
|
const uint8_t *a = &flow->ipv6_src.s6_addr[0];
|
|
|
|
|
const uint8_t *b = &flow->ipv6_dst.s6_addr[0];
|
|
|
|
|
uint8_t *ipv6_addr = &fields.ipv6_addr.s6_addr[0];
|
|
|
|
|
|
|
|
|
|
for (i=0; i<16; i++) {
|
|
|
|
|
ipv6_addr[i] = a[i] ^ b[i];
|
|
|
|
|
}
|
2011-02-01 18:50:25 -08:00
|
|
|
|
fields.ip_proto = flow->nw_proto;
|
2013-08-22 20:24:44 +12:00
|
|
|
|
if (fields.ip_proto == IPPROTO_TCP || fields.ip_proto == IPPROTO_SCTP) {
|
2012-02-02 21:57:54 -08:00
|
|
|
|
fields.tp_port = flow->tp_src ^ flow->tp_dst;
|
2011-02-01 18:50:25 -08:00
|
|
|
|
}
|
|
|
|
|
}
|
hash: Replace primary hash functions by murmurhash.
murmurhash is faster than Jenkins and slightly higher quality, so switch to
it for hashing words.
The best timings I got for hashing for data lengths of the following
numbers of 32-bit words, in seconds per 1,000,000,000 hashes, were:
words murmurhash Jenkins hash
----- ---------- ------------
1 8.4 10.4
2 10.3 10.3
3 11.2 10.7
4 12.6 18.0
5 13.9 18.3
6 15.2 18.7
In other words, murmurhash outperforms Jenkins for all input lengths other
than exactly 3 32-bit words (12 bytes). (It's understandable that Jenkins
would have a best case at 12 bytes, because Jenkins works in 12-byte
chunks.) Even in the case where Jenkins is faster, it's only by 5%. On
average within this data set, murmurhash is 15% faster, and for 4-word
input it is 30% faster.
We retain Jenkins for flow_hash_symmetric_l4() and flow_hash_fields(),
which are cases where the hash value is exposed externally.
This commit appears to improve "ovs-benchmark rate" results slightly by
a few hundred connections per second (under 1%), when used with an NVP
controller.
Signed-off-by: Ben Pfaff <blp@nicira.com>
Acked-by: Ethan Jackson <ethan@nicira.com>
2013-01-16 16:14:42 -08:00
|
|
|
|
return jhash_bytes(&fields, sizeof fields, basis);
|
2011-02-01 18:50:25 -08:00
|
|
|
|
}
|
2011-07-13 16:20:24 -07:00
|
|
|
|
|
2013-10-17 14:28:20 -07:00
|
|
|
|
/* Initialize a flow with random fields that matter for nx_hash_fields. */
|
|
|
|
|
void
|
|
|
|
|
flow_random_hash_fields(struct flow *flow)
|
|
|
|
|
{
|
|
|
|
|
uint16_t rnd = random_uint16();
|
|
|
|
|
|
|
|
|
|
/* Initialize to all zeros. */
|
|
|
|
|
memset(flow, 0, sizeof *flow);
|
|
|
|
|
|
|
|
|
|
eth_addr_random(flow->dl_src);
|
|
|
|
|
eth_addr_random(flow->dl_dst);
|
|
|
|
|
|
|
|
|
|
flow->vlan_tci = (OVS_FORCE ovs_be16) (random_uint16() & VLAN_VID_MASK);
|
|
|
|
|
|
|
|
|
|
/* Make most of the random flows IPv4, some IPv6, and rest random. */
|
|
|
|
|
flow->dl_type = rnd < 0x8000 ? htons(ETH_TYPE_IP) :
|
|
|
|
|
rnd < 0xc000 ? htons(ETH_TYPE_IPV6) : (OVS_FORCE ovs_be16)rnd;
|
|
|
|
|
|
|
|
|
|
if (dl_type_is_ip_any(flow->dl_type)) {
|
|
|
|
|
if (flow->dl_type == htons(ETH_TYPE_IP)) {
|
|
|
|
|
flow->nw_src = (OVS_FORCE ovs_be32)random_uint32();
|
|
|
|
|
flow->nw_dst = (OVS_FORCE ovs_be32)random_uint32();
|
|
|
|
|
} else {
|
|
|
|
|
random_bytes(&flow->ipv6_src, sizeof flow->ipv6_src);
|
|
|
|
|
random_bytes(&flow->ipv6_dst, sizeof flow->ipv6_dst);
|
|
|
|
|
}
|
|
|
|
|
/* Make most of IP flows TCP, some UDP or SCTP, and rest random. */
|
|
|
|
|
rnd = random_uint16();
|
|
|
|
|
flow->nw_proto = rnd < 0x8000 ? IPPROTO_TCP :
|
|
|
|
|
rnd < 0xc000 ? IPPROTO_UDP :
|
|
|
|
|
rnd < 0xd000 ? IPPROTO_SCTP : (uint8_t)rnd;
|
|
|
|
|
if (flow->nw_proto == IPPROTO_TCP ||
|
|
|
|
|
flow->nw_proto == IPPROTO_UDP ||
|
|
|
|
|
flow->nw_proto == IPPROTO_SCTP) {
|
|
|
|
|
flow->tp_src = (OVS_FORCE ovs_be16)random_uint16();
|
|
|
|
|
flow->tp_dst = (OVS_FORCE ovs_be16)random_uint16();
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2013-05-14 18:24:43 -07:00
|
|
|
|
/* Masks the fields in 'wc' that are used by the flow hash 'fields'. */
|
|
|
|
|
void
|
2013-06-26 16:37:16 -07:00
|
|
|
|
flow_mask_hash_fields(const struct flow *flow, struct flow_wildcards *wc,
|
|
|
|
|
enum nx_hash_fields fields)
|
2013-05-14 18:24:43 -07:00
|
|
|
|
{
|
|
|
|
|
switch (fields) {
|
|
|
|
|
case NX_HASH_FIELDS_ETH_SRC:
|
|
|
|
|
memset(&wc->masks.dl_src, 0xff, sizeof wc->masks.dl_src);
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case NX_HASH_FIELDS_SYMMETRIC_L4:
|
|
|
|
|
memset(&wc->masks.dl_src, 0xff, sizeof wc->masks.dl_src);
|
|
|
|
|
memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
|
2013-06-26 16:37:16 -07:00
|
|
|
|
if (flow->dl_type == htons(ETH_TYPE_IP)) {
|
|
|
|
|
memset(&wc->masks.nw_src, 0xff, sizeof wc->masks.nw_src);
|
|
|
|
|
memset(&wc->masks.nw_dst, 0xff, sizeof wc->masks.nw_dst);
|
2013-06-28 11:31:48 -07:00
|
|
|
|
} else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
|
2013-06-26 16:37:16 -07:00
|
|
|
|
memset(&wc->masks.ipv6_src, 0xff, sizeof wc->masks.ipv6_src);
|
|
|
|
|
memset(&wc->masks.ipv6_dst, 0xff, sizeof wc->masks.ipv6_dst);
|
|
|
|
|
}
|
|
|
|
|
if (is_ip_any(flow)) {
|
|
|
|
|
memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
|
2013-12-06 18:53:12 -08:00
|
|
|
|
flow_unwildcard_tp_ports(flow, wc);
|
2013-06-26 16:37:16 -07:00
|
|
|
|
}
|
2013-06-18 23:55:47 -07:00
|
|
|
|
wc->masks.vlan_tci |= htons(VLAN_VID_MASK | VLAN_CFI);
|
2013-05-14 18:24:43 -07:00
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
default:
|
2013-12-17 10:32:12 -08:00
|
|
|
|
OVS_NOT_REACHED();
|
2013-05-14 18:24:43 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2011-07-13 16:20:24 -07:00
|
|
|
|
/* Hashes the portions of 'flow' designated by 'fields'. */
|
|
|
|
|
uint32_t
|
|
|
|
|
flow_hash_fields(const struct flow *flow, enum nx_hash_fields fields,
|
|
|
|
|
uint16_t basis)
|
|
|
|
|
{
|
|
|
|
|
switch (fields) {
|
|
|
|
|
|
|
|
|
|
case NX_HASH_FIELDS_ETH_SRC:
|
hash: Replace primary hash functions by murmurhash.
murmurhash is faster than Jenkins and slightly higher quality, so switch to
it for hashing words.
The best timings I got for hashing for data lengths of the following
numbers of 32-bit words, in seconds per 1,000,000,000 hashes, were:
words murmurhash Jenkins hash
----- ---------- ------------
1 8.4 10.4
2 10.3 10.3
3 11.2 10.7
4 12.6 18.0
5 13.9 18.3
6 15.2 18.7
In other words, murmurhash outperforms Jenkins for all input lengths other
than exactly 3 32-bit words (12 bytes). (It's understandable that Jenkins
would have a best case at 12 bytes, because Jenkins works in 12-byte
chunks.) Even in the case where Jenkins is faster, it's only by 5%. On
average within this data set, murmurhash is 15% faster, and for 4-word
input it is 30% faster.
We retain Jenkins for flow_hash_symmetric_l4() and flow_hash_fields(),
which are cases where the hash value is exposed externally.
This commit appears to improve "ovs-benchmark rate" results slightly by
a few hundred connections per second (under 1%), when used with an NVP
controller.
Signed-off-by: Ben Pfaff <blp@nicira.com>
Acked-by: Ethan Jackson <ethan@nicira.com>
2013-01-16 16:14:42 -08:00
|
|
|
|
return jhash_bytes(flow->dl_src, sizeof flow->dl_src, basis);
|
2011-07-13 16:20:24 -07:00
|
|
|
|
|
|
|
|
|
case NX_HASH_FIELDS_SYMMETRIC_L4:
|
|
|
|
|
return flow_hash_symmetric_l4(flow, basis);
|
|
|
|
|
}
|
|
|
|
|
|
2013-12-17 10:32:12 -08:00
|
|
|
|
OVS_NOT_REACHED();
|
2011-07-13 16:20:24 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Returns a string representation of 'fields'. */
|
|
|
|
|
const char *
|
|
|
|
|
flow_hash_fields_to_str(enum nx_hash_fields fields)
|
|
|
|
|
{
|
|
|
|
|
switch (fields) {
|
|
|
|
|
case NX_HASH_FIELDS_ETH_SRC: return "eth_src";
|
|
|
|
|
case NX_HASH_FIELDS_SYMMETRIC_L4: return "symmetric_l4";
|
|
|
|
|
default: return "<unknown>";
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Returns true if the value of 'fields' is supported. Otherwise false. */
|
|
|
|
|
bool
|
|
|
|
|
flow_hash_fields_valid(enum nx_hash_fields fields)
|
|
|
|
|
{
|
|
|
|
|
return fields == NX_HASH_FIELDS_ETH_SRC
|
|
|
|
|
|| fields == NX_HASH_FIELDS_SYMMETRIC_L4;
|
|
|
|
|
}
|
2011-09-08 14:32:13 -07:00
|
|
|
|
|
2013-06-10 22:48:58 -07:00
|
|
|
|
/* Returns a hash value for the bits of 'flow' that are active based on
|
|
|
|
|
* 'wc', given 'basis'. */
|
|
|
|
|
uint32_t
|
|
|
|
|
flow_hash_in_wildcards(const struct flow *flow,
|
|
|
|
|
const struct flow_wildcards *wc, uint32_t basis)
|
|
|
|
|
{
|
2015-01-06 11:10:42 -08:00
|
|
|
|
const uint64_t *wc_u64 = (const uint64_t *) &wc->masks;
|
|
|
|
|
const uint64_t *flow_u64 = (const uint64_t *) flow;
|
2013-06-10 22:48:58 -07:00
|
|
|
|
uint32_t hash;
|
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
|
|
hash = basis;
|
2015-01-06 11:10:42 -08:00
|
|
|
|
for (i = 0; i < FLOW_U64S; i++) {
|
|
|
|
|
hash = hash_add64(hash, flow_u64[i] & wc_u64[i]);
|
2013-06-10 22:48:58 -07:00
|
|
|
|
}
|
2015-01-06 11:10:42 -08:00
|
|
|
|
return hash_finish(hash, 8 * FLOW_U64S);
|
2013-06-10 22:48:58 -07:00
|
|
|
|
}
|
|
|
|
|
|
2011-11-21 14:14:02 -08:00
|
|
|
|
/* Sets the VLAN VID that 'flow' matches to 'vid', which is interpreted as an
|
|
|
|
|
* OpenFlow 1.0 "dl_vlan" value:
|
|
|
|
|
*
|
|
|
|
|
* - If it is in the range 0...4095, 'flow->vlan_tci' is set to match
|
|
|
|
|
* that VLAN. Any existing PCP match is unchanged (it becomes 0 if
|
|
|
|
|
* 'flow' previously matched packets without a VLAN header).
|
|
|
|
|
*
|
|
|
|
|
* - If it is OFP_VLAN_NONE, 'flow->vlan_tci' is set to match a packet
|
|
|
|
|
* without a VLAN tag.
|
|
|
|
|
*
|
|
|
|
|
* - Other values of 'vid' should not be used. */
|
|
|
|
|
void
|
2012-07-22 22:42:55 -07:00
|
|
|
|
flow_set_dl_vlan(struct flow *flow, ovs_be16 vid)
|
2011-11-21 14:14:02 -08:00
|
|
|
|
{
|
2012-07-05 17:41:10 +09:00
|
|
|
|
if (vid == htons(OFP10_VLAN_NONE)) {
|
2011-11-21 14:14:02 -08:00
|
|
|
|
flow->vlan_tci = htons(0);
|
|
|
|
|
} else {
|
|
|
|
|
vid &= htons(VLAN_VID_MASK);
|
|
|
|
|
flow->vlan_tci &= ~htons(VLAN_VID_MASK);
|
|
|
|
|
flow->vlan_tci |= htons(VLAN_CFI) | vid;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2012-07-22 23:20:22 -07:00
|
|
|
|
/* Sets the VLAN VID that 'flow' matches to 'vid', which is interpreted as an
|
|
|
|
|
* OpenFlow 1.2 "vlan_vid" value, that is, the low 13 bits of 'vlan_tci' (VID
|
|
|
|
|
* plus CFI). */
|
|
|
|
|
void
|
|
|
|
|
flow_set_vlan_vid(struct flow *flow, ovs_be16 vid)
|
|
|
|
|
{
|
|
|
|
|
ovs_be16 mask = htons(VLAN_VID_MASK | VLAN_CFI);
|
|
|
|
|
flow->vlan_tci &= ~mask;
|
|
|
|
|
flow->vlan_tci |= vid & mask;
|
|
|
|
|
}
|
|
|
|
|
|
2011-11-21 14:14:02 -08:00
|
|
|
|
/* Sets the VLAN PCP that 'flow' matches to 'pcp', which should be in the
|
|
|
|
|
* range 0...7.
|
|
|
|
|
*
|
|
|
|
|
* This function has no effect on the VLAN ID that 'flow' matches.
|
|
|
|
|
*
|
|
|
|
|
* After calling this function, 'flow' will not match packets without a VLAN
|
|
|
|
|
* header. */
|
|
|
|
|
void
|
|
|
|
|
flow_set_vlan_pcp(struct flow *flow, uint8_t pcp)
|
|
|
|
|
{
|
|
|
|
|
pcp &= 0x07;
|
|
|
|
|
flow->vlan_tci &= ~htons(VLAN_PCP_MASK);
|
|
|
|
|
flow->vlan_tci |= htons((pcp << VLAN_PCP_SHIFT) | VLAN_CFI);
|
|
|
|
|
}
|
|
|
|
|
|
2014-02-04 10:32:35 -08:00
|
|
|
|
/* Returns the number of MPLS LSEs present in 'flow'
|
|
|
|
|
*
|
|
|
|
|
* Returns 0 if the 'dl_type' of 'flow' is not an MPLS ethernet type.
|
|
|
|
|
* Otherwise traverses 'flow''s MPLS label stack stopping at the
|
|
|
|
|
* first entry that has the BoS bit set. If no such entry exists then
|
|
|
|
|
* the maximum number of LSEs that can be stored in 'flow' is returned.
|
|
|
|
|
*/
|
|
|
|
|
int
|
|
|
|
|
flow_count_mpls_labels(const struct flow *flow, struct flow_wildcards *wc)
|
|
|
|
|
{
|
2014-09-30 13:34:43 -07:00
|
|
|
|
/* dl_type is always masked. */
|
2014-02-04 10:32:35 -08:00
|
|
|
|
if (eth_type_mpls(flow->dl_type)) {
|
|
|
|
|
int i;
|
2014-11-25 07:39:20 -08:00
|
|
|
|
int cnt;
|
2014-02-04 10:32:35 -08:00
|
|
|
|
|
2014-11-25 07:39:20 -08:00
|
|
|
|
cnt = 0;
|
|
|
|
|
for (i = 0; i < FLOW_MAX_MPLS_LABELS; i++) {
|
2014-02-04 10:32:35 -08:00
|
|
|
|
if (wc) {
|
|
|
|
|
wc->masks.mpls_lse[i] |= htonl(MPLS_BOS_MASK);
|
|
|
|
|
}
|
|
|
|
|
if (flow->mpls_lse[i] & htonl(MPLS_BOS_MASK)) {
|
|
|
|
|
return i + 1;
|
|
|
|
|
}
|
2014-11-25 07:39:20 -08:00
|
|
|
|
if (flow->mpls_lse[i]) {
|
|
|
|
|
cnt++;
|
|
|
|
|
}
|
2014-02-04 10:32:35 -08:00
|
|
|
|
}
|
2014-11-25 07:39:20 -08:00
|
|
|
|
return cnt;
|
2014-02-04 10:32:35 -08:00
|
|
|
|
} else {
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Returns the number consecutive of MPLS LSEs, starting at the
|
|
|
|
|
* innermost LSE, that are common in 'a' and 'b'.
|
|
|
|
|
*
|
|
|
|
|
* 'an' must be flow_count_mpls_labels(a).
|
|
|
|
|
* 'bn' must be flow_count_mpls_labels(b).
|
|
|
|
|
*/
|
|
|
|
|
int
|
|
|
|
|
flow_count_common_mpls_labels(const struct flow *a, int an,
|
|
|
|
|
const struct flow *b, int bn,
|
|
|
|
|
struct flow_wildcards *wc)
|
|
|
|
|
{
|
|
|
|
|
int min_n = MIN(an, bn);
|
|
|
|
|
if (min_n == 0) {
|
|
|
|
|
return 0;
|
|
|
|
|
} else {
|
|
|
|
|
int common_n = 0;
|
|
|
|
|
int a_last = an - 1;
|
|
|
|
|
int b_last = bn - 1;
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < min_n; i++) {
|
|
|
|
|
if (wc) {
|
|
|
|
|
wc->masks.mpls_lse[a_last - i] = OVS_BE32_MAX;
|
|
|
|
|
wc->masks.mpls_lse[b_last - i] = OVS_BE32_MAX;
|
|
|
|
|
}
|
|
|
|
|
if (a->mpls_lse[a_last - i] != b->mpls_lse[b_last - i]) {
|
|
|
|
|
break;
|
|
|
|
|
} else {
|
|
|
|
|
common_n++;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return common_n;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Adds a new outermost MPLS label to 'flow' and changes 'flow''s Ethernet type
|
|
|
|
|
* to 'mpls_eth_type', which must be an MPLS Ethertype.
|
|
|
|
|
*
|
|
|
|
|
* If the new label is the first MPLS label in 'flow', it is generated as;
|
|
|
|
|
*
|
|
|
|
|
* - label: 2, if 'flow' is IPv6, otherwise 0.
|
|
|
|
|
*
|
|
|
|
|
* - TTL: IPv4 or IPv6 TTL, if present and nonzero, otherwise 64.
|
|
|
|
|
*
|
|
|
|
|
* - TC: IPv4 or IPv6 TOS, if present, otherwise 0.
|
|
|
|
|
*
|
|
|
|
|
* - BoS: 1.
|
|
|
|
|
*
|
2014-09-30 13:34:43 -07:00
|
|
|
|
* If the new label is the second or later label MPLS label in 'flow', it is
|
2014-02-04 10:32:35 -08:00
|
|
|
|
* generated as;
|
|
|
|
|
*
|
2014-02-07 16:39:53 -08:00
|
|
|
|
* - label: Copied from outer label.
|
2014-02-04 10:32:35 -08:00
|
|
|
|
*
|
|
|
|
|
* - TTL: Copied from outer label.
|
|
|
|
|
*
|
|
|
|
|
* - TC: Copied from outer label.
|
|
|
|
|
*
|
|
|
|
|
* - BoS: 0.
|
|
|
|
|
*
|
|
|
|
|
* 'n' must be flow_count_mpls_labels(flow). 'n' must be less than
|
|
|
|
|
* FLOW_MAX_MPLS_LABELS (because otherwise flow->mpls_lse[] would overflow).
|
|
|
|
|
*/
|
|
|
|
|
void
|
|
|
|
|
flow_push_mpls(struct flow *flow, int n, ovs_be16 mpls_eth_type,
|
|
|
|
|
struct flow_wildcards *wc)
|
|
|
|
|
{
|
|
|
|
|
ovs_assert(eth_type_mpls(mpls_eth_type));
|
|
|
|
|
ovs_assert(n < FLOW_MAX_MPLS_LABELS);
|
|
|
|
|
|
|
|
|
|
if (n) {
|
|
|
|
|
int i;
|
|
|
|
|
|
2014-09-30 13:34:43 -07:00
|
|
|
|
if (wc) {
|
|
|
|
|
memset(&wc->masks.mpls_lse, 0xff, sizeof *wc->masks.mpls_lse * n);
|
|
|
|
|
}
|
2014-02-04 10:32:35 -08:00
|
|
|
|
for (i = n; i >= 1; i--) {
|
|
|
|
|
flow->mpls_lse[i] = flow->mpls_lse[i - 1];
|
|
|
|
|
}
|
2014-09-30 13:34:43 -07:00
|
|
|
|
flow->mpls_lse[0] = (flow->mpls_lse[1] & htonl(~MPLS_BOS_MASK));
|
2014-02-04 10:32:35 -08:00
|
|
|
|
} else {
|
|
|
|
|
int label = 0; /* IPv4 Explicit Null. */
|
|
|
|
|
int tc = 0;
|
|
|
|
|
int ttl = 64;
|
|
|
|
|
|
|
|
|
|
if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
|
|
|
|
|
label = 2;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (is_ip_any(flow)) {
|
|
|
|
|
tc = (flow->nw_tos & IP_DSCP_MASK) >> 2;
|
2014-09-30 13:34:43 -07:00
|
|
|
|
if (wc) {
|
|
|
|
|
wc->masks.nw_tos |= IP_DSCP_MASK;
|
|
|
|
|
wc->masks.nw_ttl = 0xff;
|
|
|
|
|
}
|
2014-02-04 10:32:35 -08:00
|
|
|
|
|
|
|
|
|
if (flow->nw_ttl) {
|
|
|
|
|
ttl = flow->nw_ttl;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
flow->mpls_lse[0] = set_mpls_lse_values(ttl, tc, 1, htonl(label));
|
|
|
|
|
|
2015-01-06 11:10:42 -08:00
|
|
|
|
/* Clear all L3 and L4 fields and dp_hash. */
|
2015-01-11 13:25:24 -08:00
|
|
|
|
BUILD_ASSERT(FLOW_WC_SEQ == 30);
|
2014-02-04 10:32:35 -08:00
|
|
|
|
memset((char *) flow + FLOW_SEGMENT_2_ENDS_AT, 0,
|
|
|
|
|
sizeof(struct flow) - FLOW_SEGMENT_2_ENDS_AT);
|
2015-01-06 11:10:42 -08:00
|
|
|
|
flow->dp_hash = 0;
|
2014-02-04 10:32:35 -08:00
|
|
|
|
}
|
|
|
|
|
flow->dl_type = mpls_eth_type;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Tries to remove the outermost MPLS label from 'flow'. Returns true if
|
|
|
|
|
* successful, false otherwise. On success, sets 'flow''s Ethernet type to
|
|
|
|
|
* 'eth_type'.
|
|
|
|
|
*
|
|
|
|
|
* 'n' must be flow_count_mpls_labels(flow). */
|
|
|
|
|
bool
|
|
|
|
|
flow_pop_mpls(struct flow *flow, int n, ovs_be16 eth_type,
|
|
|
|
|
struct flow_wildcards *wc)
|
|
|
|
|
{
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
if (n == 0) {
|
|
|
|
|
/* Nothing to pop. */
|
|
|
|
|
return false;
|
2014-09-30 13:34:43 -07:00
|
|
|
|
} else if (n == FLOW_MAX_MPLS_LABELS) {
|
|
|
|
|
if (wc) {
|
|
|
|
|
wc->masks.mpls_lse[n - 1] |= htonl(MPLS_BOS_MASK);
|
|
|
|
|
}
|
|
|
|
|
if (!(flow->mpls_lse[n - 1] & htonl(MPLS_BOS_MASK))) {
|
|
|
|
|
/* Can't pop because don't know what to fill in mpls_lse[n - 1]. */
|
|
|
|
|
return false;
|
|
|
|
|
}
|
2014-02-04 10:32:35 -08:00
|
|
|
|
}
|
|
|
|
|
|
2014-09-30 13:34:43 -07:00
|
|
|
|
if (wc) {
|
|
|
|
|
memset(&wc->masks.mpls_lse[1], 0xff,
|
|
|
|
|
sizeof *wc->masks.mpls_lse * (n - 1));
|
|
|
|
|
}
|
2014-02-04 10:32:35 -08:00
|
|
|
|
for (i = 1; i < n; i++) {
|
|
|
|
|
flow->mpls_lse[i - 1] = flow->mpls_lse[i];
|
|
|
|
|
}
|
|
|
|
|
flow->mpls_lse[n - 1] = 0;
|
|
|
|
|
flow->dl_type = eth_type;
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
2013-01-25 16:22:07 +09:00
|
|
|
|
/* Sets the MPLS Label that 'flow' matches to 'label', which is interpreted
|
|
|
|
|
* as an OpenFlow 1.1 "mpls_label" value. */
|
|
|
|
|
void
|
2014-02-04 10:32:35 -08:00
|
|
|
|
flow_set_mpls_label(struct flow *flow, int idx, ovs_be32 label)
|
2013-01-25 16:22:07 +09:00
|
|
|
|
{
|
2014-02-04 10:32:35 -08:00
|
|
|
|
set_mpls_lse_label(&flow->mpls_lse[idx], label);
|
2013-01-25 16:22:07 +09:00
|
|
|
|
}
|
|
|
|
|
|
2013-03-06 16:08:23 +09:00
|
|
|
|
/* Sets the MPLS TTL that 'flow' matches to 'ttl', which should be in the
|
|
|
|
|
* range 0...255. */
|
|
|
|
|
void
|
2014-02-04 10:32:35 -08:00
|
|
|
|
flow_set_mpls_ttl(struct flow *flow, int idx, uint8_t ttl)
|
2013-03-06 16:08:23 +09:00
|
|
|
|
{
|
2014-02-04 10:32:35 -08:00
|
|
|
|
set_mpls_lse_ttl(&flow->mpls_lse[idx], ttl);
|
2013-03-06 16:08:23 +09:00
|
|
|
|
}
|
|
|
|
|
|
2013-01-25 16:22:07 +09:00
|
|
|
|
/* Sets the MPLS TC that 'flow' matches to 'tc', which should be in the
|
|
|
|
|
* range 0...7. */
|
|
|
|
|
void
|
2014-02-04 10:32:35 -08:00
|
|
|
|
flow_set_mpls_tc(struct flow *flow, int idx, uint8_t tc)
|
2013-01-25 16:22:07 +09:00
|
|
|
|
{
|
2014-02-04 10:32:35 -08:00
|
|
|
|
set_mpls_lse_tc(&flow->mpls_lse[idx], tc);
|
2013-01-25 16:22:07 +09:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Sets the MPLS BOS bit that 'flow' matches to which should be 0 or 1. */
|
|
|
|
|
void
|
2014-02-04 10:32:35 -08:00
|
|
|
|
flow_set_mpls_bos(struct flow *flow, int idx, uint8_t bos)
|
2013-01-25 16:22:07 +09:00
|
|
|
|
{
|
2014-02-04 10:32:35 -08:00
|
|
|
|
set_mpls_lse_bos(&flow->mpls_lse[idx], bos);
|
2013-01-25 16:22:07 +09:00
|
|
|
|
}
|
|
|
|
|
|
2014-02-04 10:32:35 -08:00
|
|
|
|
/* Sets the entire MPLS LSE. */
|
|
|
|
|
void
|
|
|
|
|
flow_set_mpls_lse(struct flow *flow, int idx, ovs_be32 lse)
|
|
|
|
|
{
|
|
|
|
|
flow->mpls_lse[idx] = lse;
|
|
|
|
|
}
|
2013-12-06 12:43:20 -08:00
|
|
|
|
|
lib/ofpbuf: Compact
This patch shrinks the struct ofpbuf from 104 to 48 bytes on 64-bit
systems, or from 52 to 36 bytes on 32-bit systems (counting in the
'l7' removal from an earlier patch). This may help contribute to
cache efficiency, and will speed up initializing, copying and
manipulating ofpbufs. This is potentially important for the DPDK
datapath, but the rest of the code base may also see a little benefit.
Changes are:
- Remove 'l7' pointer (previous patch).
- Use offsets instead of layer pointers for l2_5, l3, and l4 using
'l2' as basis. Usually 'data' is the same as 'l2', but this is not
always the case (e.g., when parsing or constructing a packet), so it
can not be easily used as the offset basis. Also, packet parsing is
faster if we do not need to maintain the offsets each time we pull
data from the ofpbuf.
- Use uint32_t for 'allocated' and 'size', as 2^32 is enough even for
largest possible messages/packets.
- Use packed enum for 'source'.
- Rearrange to avoid unnecessary padding.
- Remove 'private_p', which was used only in two cases, both of which
had the invariant ('l2' == 'data'), so we can temporarily use 'l2'
as a private pointer.
Signed-off-by: Jarno Rajahalme <jrajahalme@nicira.com>
Signed-off-by: Ben Pfaff <blp@nicira.com>
2014-03-24 09:17:01 -07:00
|
|
|
|
static size_t
|
2013-12-06 12:43:20 -08:00
|
|
|
|
flow_compose_l4(struct ofpbuf *b, const struct flow *flow)
|
|
|
|
|
{
|
lib/ofpbuf: Compact
This patch shrinks the struct ofpbuf from 104 to 48 bytes on 64-bit
systems, or from 52 to 36 bytes on 32-bit systems (counting in the
'l7' removal from an earlier patch). This may help contribute to
cache efficiency, and will speed up initializing, copying and
manipulating ofpbufs. This is potentially important for the DPDK
datapath, but the rest of the code base may also see a little benefit.
Changes are:
- Remove 'l7' pointer (previous patch).
- Use offsets instead of layer pointers for l2_5, l3, and l4 using
'l2' as basis. Usually 'data' is the same as 'l2', but this is not
always the case (e.g., when parsing or constructing a packet), so it
can not be easily used as the offset basis. Also, packet parsing is
faster if we do not need to maintain the offsets each time we pull
data from the ofpbuf.
- Use uint32_t for 'allocated' and 'size', as 2^32 is enough even for
largest possible messages/packets.
- Use packed enum for 'source'.
- Rearrange to avoid unnecessary padding.
- Remove 'private_p', which was used only in two cases, both of which
had the invariant ('l2' == 'data'), so we can temporarily use 'l2'
as a private pointer.
Signed-off-by: Jarno Rajahalme <jrajahalme@nicira.com>
Signed-off-by: Ben Pfaff <blp@nicira.com>
2014-03-24 09:17:01 -07:00
|
|
|
|
size_t l4_len = 0;
|
|
|
|
|
|
2013-12-06 12:43:20 -08:00
|
|
|
|
if (!(flow->nw_frag & FLOW_NW_FRAG_ANY)
|
|
|
|
|
|| !(flow->nw_frag & FLOW_NW_FRAG_LATER)) {
|
|
|
|
|
if (flow->nw_proto == IPPROTO_TCP) {
|
|
|
|
|
struct tcp_header *tcp;
|
|
|
|
|
|
lib/ofpbuf: Compact
This patch shrinks the struct ofpbuf from 104 to 48 bytes on 64-bit
systems, or from 52 to 36 bytes on 32-bit systems (counting in the
'l7' removal from an earlier patch). This may help contribute to
cache efficiency, and will speed up initializing, copying and
manipulating ofpbufs. This is potentially important for the DPDK
datapath, but the rest of the code base may also see a little benefit.
Changes are:
- Remove 'l7' pointer (previous patch).
- Use offsets instead of layer pointers for l2_5, l3, and l4 using
'l2' as basis. Usually 'data' is the same as 'l2', but this is not
always the case (e.g., when parsing or constructing a packet), so it
can not be easily used as the offset basis. Also, packet parsing is
faster if we do not need to maintain the offsets each time we pull
data from the ofpbuf.
- Use uint32_t for 'allocated' and 'size', as 2^32 is enough even for
largest possible messages/packets.
- Use packed enum for 'source'.
- Rearrange to avoid unnecessary padding.
- Remove 'private_p', which was used only in two cases, both of which
had the invariant ('l2' == 'data'), so we can temporarily use 'l2'
as a private pointer.
Signed-off-by: Jarno Rajahalme <jrajahalme@nicira.com>
Signed-off-by: Ben Pfaff <blp@nicira.com>
2014-03-24 09:17:01 -07:00
|
|
|
|
l4_len = sizeof *tcp;
|
|
|
|
|
tcp = ofpbuf_put_zeros(b, l4_len);
|
2013-12-06 12:43:20 -08:00
|
|
|
|
tcp->tcp_src = flow->tp_src;
|
|
|
|
|
tcp->tcp_dst = flow->tp_dst;
|
|
|
|
|
tcp->tcp_ctl = TCP_CTL(ntohs(flow->tcp_flags), 5);
|
|
|
|
|
} else if (flow->nw_proto == IPPROTO_UDP) {
|
|
|
|
|
struct udp_header *udp;
|
|
|
|
|
|
lib/ofpbuf: Compact
This patch shrinks the struct ofpbuf from 104 to 48 bytes on 64-bit
systems, or from 52 to 36 bytes on 32-bit systems (counting in the
'l7' removal from an earlier patch). This may help contribute to
cache efficiency, and will speed up initializing, copying and
manipulating ofpbufs. This is potentially important for the DPDK
datapath, but the rest of the code base may also see a little benefit.
Changes are:
- Remove 'l7' pointer (previous patch).
- Use offsets instead of layer pointers for l2_5, l3, and l4 using
'l2' as basis. Usually 'data' is the same as 'l2', but this is not
always the case (e.g., when parsing or constructing a packet), so it
can not be easily used as the offset basis. Also, packet parsing is
faster if we do not need to maintain the offsets each time we pull
data from the ofpbuf.
- Use uint32_t for 'allocated' and 'size', as 2^32 is enough even for
largest possible messages/packets.
- Use packed enum for 'source'.
- Rearrange to avoid unnecessary padding.
- Remove 'private_p', which was used only in two cases, both of which
had the invariant ('l2' == 'data'), so we can temporarily use 'l2'
as a private pointer.
Signed-off-by: Jarno Rajahalme <jrajahalme@nicira.com>
Signed-off-by: Ben Pfaff <blp@nicira.com>
2014-03-24 09:17:01 -07:00
|
|
|
|
l4_len = sizeof *udp;
|
|
|
|
|
udp = ofpbuf_put_zeros(b, l4_len);
|
2013-12-06 12:43:20 -08:00
|
|
|
|
udp->udp_src = flow->tp_src;
|
|
|
|
|
udp->udp_dst = flow->tp_dst;
|
|
|
|
|
} else if (flow->nw_proto == IPPROTO_SCTP) {
|
|
|
|
|
struct sctp_header *sctp;
|
|
|
|
|
|
lib/ofpbuf: Compact
This patch shrinks the struct ofpbuf from 104 to 48 bytes on 64-bit
systems, or from 52 to 36 bytes on 32-bit systems (counting in the
'l7' removal from an earlier patch). This may help contribute to
cache efficiency, and will speed up initializing, copying and
manipulating ofpbufs. This is potentially important for the DPDK
datapath, but the rest of the code base may also see a little benefit.
Changes are:
- Remove 'l7' pointer (previous patch).
- Use offsets instead of layer pointers for l2_5, l3, and l4 using
'l2' as basis. Usually 'data' is the same as 'l2', but this is not
always the case (e.g., when parsing or constructing a packet), so it
can not be easily used as the offset basis. Also, packet parsing is
faster if we do not need to maintain the offsets each time we pull
data from the ofpbuf.
- Use uint32_t for 'allocated' and 'size', as 2^32 is enough even for
largest possible messages/packets.
- Use packed enum for 'source'.
- Rearrange to avoid unnecessary padding.
- Remove 'private_p', which was used only in two cases, both of which
had the invariant ('l2' == 'data'), so we can temporarily use 'l2'
as a private pointer.
Signed-off-by: Jarno Rajahalme <jrajahalme@nicira.com>
Signed-off-by: Ben Pfaff <blp@nicira.com>
2014-03-24 09:17:01 -07:00
|
|
|
|
l4_len = sizeof *sctp;
|
|
|
|
|
sctp = ofpbuf_put_zeros(b, l4_len);
|
2013-12-06 12:43:20 -08:00
|
|
|
|
sctp->sctp_src = flow->tp_src;
|
|
|
|
|
sctp->sctp_dst = flow->tp_dst;
|
|
|
|
|
} else if (flow->nw_proto == IPPROTO_ICMP) {
|
|
|
|
|
struct icmp_header *icmp;
|
|
|
|
|
|
lib/ofpbuf: Compact
This patch shrinks the struct ofpbuf from 104 to 48 bytes on 64-bit
systems, or from 52 to 36 bytes on 32-bit systems (counting in the
'l7' removal from an earlier patch). This may help contribute to
cache efficiency, and will speed up initializing, copying and
manipulating ofpbufs. This is potentially important for the DPDK
datapath, but the rest of the code base may also see a little benefit.
Changes are:
- Remove 'l7' pointer (previous patch).
- Use offsets instead of layer pointers for l2_5, l3, and l4 using
'l2' as basis. Usually 'data' is the same as 'l2', but this is not
always the case (e.g., when parsing or constructing a packet), so it
can not be easily used as the offset basis. Also, packet parsing is
faster if we do not need to maintain the offsets each time we pull
data from the ofpbuf.
- Use uint32_t for 'allocated' and 'size', as 2^32 is enough even for
largest possible messages/packets.
- Use packed enum for 'source'.
- Rearrange to avoid unnecessary padding.
- Remove 'private_p', which was used only in two cases, both of which
had the invariant ('l2' == 'data'), so we can temporarily use 'l2'
as a private pointer.
Signed-off-by: Jarno Rajahalme <jrajahalme@nicira.com>
Signed-off-by: Ben Pfaff <blp@nicira.com>
2014-03-24 09:17:01 -07:00
|
|
|
|
l4_len = sizeof *icmp;
|
|
|
|
|
icmp = ofpbuf_put_zeros(b, l4_len);
|
2013-12-06 12:43:20 -08:00
|
|
|
|
icmp->icmp_type = ntohs(flow->tp_src);
|
|
|
|
|
icmp->icmp_code = ntohs(flow->tp_dst);
|
|
|
|
|
icmp->icmp_csum = csum(icmp, ICMP_HEADER_LEN);
|
2014-06-18 22:14:30 -03:00
|
|
|
|
} else if (flow->nw_proto == IPPROTO_IGMP) {
|
|
|
|
|
struct igmp_header *igmp;
|
|
|
|
|
|
|
|
|
|
l4_len = sizeof *igmp;
|
|
|
|
|
igmp = ofpbuf_put_zeros(b, l4_len);
|
|
|
|
|
igmp->igmp_type = ntohs(flow->tp_src);
|
|
|
|
|
igmp->igmp_code = ntohs(flow->tp_dst);
|
|
|
|
|
put_16aligned_be32(&igmp->group, flow->igmp_group_ip4);
|
|
|
|
|
igmp->igmp_csum = csum(igmp, IGMP_HEADER_LEN);
|
2013-12-06 12:43:20 -08:00
|
|
|
|
} else if (flow->nw_proto == IPPROTO_ICMPV6) {
|
|
|
|
|
struct icmp6_hdr *icmp;
|
|
|
|
|
|
lib/ofpbuf: Compact
This patch shrinks the struct ofpbuf from 104 to 48 bytes on 64-bit
systems, or from 52 to 36 bytes on 32-bit systems (counting in the
'l7' removal from an earlier patch). This may help contribute to
cache efficiency, and will speed up initializing, copying and
manipulating ofpbufs. This is potentially important for the DPDK
datapath, but the rest of the code base may also see a little benefit.
Changes are:
- Remove 'l7' pointer (previous patch).
- Use offsets instead of layer pointers for l2_5, l3, and l4 using
'l2' as basis. Usually 'data' is the same as 'l2', but this is not
always the case (e.g., when parsing or constructing a packet), so it
can not be easily used as the offset basis. Also, packet parsing is
faster if we do not need to maintain the offsets each time we pull
data from the ofpbuf.
- Use uint32_t for 'allocated' and 'size', as 2^32 is enough even for
largest possible messages/packets.
- Use packed enum for 'source'.
- Rearrange to avoid unnecessary padding.
- Remove 'private_p', which was used only in two cases, both of which
had the invariant ('l2' == 'data'), so we can temporarily use 'l2'
as a private pointer.
Signed-off-by: Jarno Rajahalme <jrajahalme@nicira.com>
Signed-off-by: Ben Pfaff <blp@nicira.com>
2014-03-24 09:17:01 -07:00
|
|
|
|
l4_len = sizeof *icmp;
|
|
|
|
|
icmp = ofpbuf_put_zeros(b, l4_len);
|
2013-12-06 12:43:20 -08:00
|
|
|
|
icmp->icmp6_type = ntohs(flow->tp_src);
|
|
|
|
|
icmp->icmp6_code = ntohs(flow->tp_dst);
|
|
|
|
|
|
|
|
|
|
if (icmp->icmp6_code == 0 &&
|
|
|
|
|
(icmp->icmp6_type == ND_NEIGHBOR_SOLICIT ||
|
|
|
|
|
icmp->icmp6_type == ND_NEIGHBOR_ADVERT)) {
|
|
|
|
|
struct in6_addr *nd_target;
|
|
|
|
|
struct nd_opt_hdr *nd_opt;
|
|
|
|
|
|
lib/ofpbuf: Compact
This patch shrinks the struct ofpbuf from 104 to 48 bytes on 64-bit
systems, or from 52 to 36 bytes on 32-bit systems (counting in the
'l7' removal from an earlier patch). This may help contribute to
cache efficiency, and will speed up initializing, copying and
manipulating ofpbufs. This is potentially important for the DPDK
datapath, but the rest of the code base may also see a little benefit.
Changes are:
- Remove 'l7' pointer (previous patch).
- Use offsets instead of layer pointers for l2_5, l3, and l4 using
'l2' as basis. Usually 'data' is the same as 'l2', but this is not
always the case (e.g., when parsing or constructing a packet), so it
can not be easily used as the offset basis. Also, packet parsing is
faster if we do not need to maintain the offsets each time we pull
data from the ofpbuf.
- Use uint32_t for 'allocated' and 'size', as 2^32 is enough even for
largest possible messages/packets.
- Use packed enum for 'source'.
- Rearrange to avoid unnecessary padding.
- Remove 'private_p', which was used only in two cases, both of which
had the invariant ('l2' == 'data'), so we can temporarily use 'l2'
as a private pointer.
Signed-off-by: Jarno Rajahalme <jrajahalme@nicira.com>
Signed-off-by: Ben Pfaff <blp@nicira.com>
2014-03-24 09:17:01 -07:00
|
|
|
|
l4_len += sizeof *nd_target;
|
2013-12-06 12:43:20 -08:00
|
|
|
|
nd_target = ofpbuf_put_zeros(b, sizeof *nd_target);
|
|
|
|
|
*nd_target = flow->nd_target;
|
|
|
|
|
|
|
|
|
|
if (!eth_addr_is_zero(flow->arp_sha)) {
|
lib/ofpbuf: Compact
This patch shrinks the struct ofpbuf from 104 to 48 bytes on 64-bit
systems, or from 52 to 36 bytes on 32-bit systems (counting in the
'l7' removal from an earlier patch). This may help contribute to
cache efficiency, and will speed up initializing, copying and
manipulating ofpbufs. This is potentially important for the DPDK
datapath, but the rest of the code base may also see a little benefit.
Changes are:
- Remove 'l7' pointer (previous patch).
- Use offsets instead of layer pointers for l2_5, l3, and l4 using
'l2' as basis. Usually 'data' is the same as 'l2', but this is not
always the case (e.g., when parsing or constructing a packet), so it
can not be easily used as the offset basis. Also, packet parsing is
faster if we do not need to maintain the offsets each time we pull
data from the ofpbuf.
- Use uint32_t for 'allocated' and 'size', as 2^32 is enough even for
largest possible messages/packets.
- Use packed enum for 'source'.
- Rearrange to avoid unnecessary padding.
- Remove 'private_p', which was used only in two cases, both of which
had the invariant ('l2' == 'data'), so we can temporarily use 'l2'
as a private pointer.
Signed-off-by: Jarno Rajahalme <jrajahalme@nicira.com>
Signed-off-by: Ben Pfaff <blp@nicira.com>
2014-03-24 09:17:01 -07:00
|
|
|
|
l4_len += 8;
|
2013-12-06 12:43:20 -08:00
|
|
|
|
nd_opt = ofpbuf_put_zeros(b, 8);
|
|
|
|
|
nd_opt->nd_opt_len = 1;
|
|
|
|
|
nd_opt->nd_opt_type = ND_OPT_SOURCE_LINKADDR;
|
|
|
|
|
memcpy(nd_opt + 1, flow->arp_sha, ETH_ADDR_LEN);
|
|
|
|
|
}
|
|
|
|
|
if (!eth_addr_is_zero(flow->arp_tha)) {
|
lib/ofpbuf: Compact
This patch shrinks the struct ofpbuf from 104 to 48 bytes on 64-bit
systems, or from 52 to 36 bytes on 32-bit systems (counting in the
'l7' removal from an earlier patch). This may help contribute to
cache efficiency, and will speed up initializing, copying and
manipulating ofpbufs. This is potentially important for the DPDK
datapath, but the rest of the code base may also see a little benefit.
Changes are:
- Remove 'l7' pointer (previous patch).
- Use offsets instead of layer pointers for l2_5, l3, and l4 using
'l2' as basis. Usually 'data' is the same as 'l2', but this is not
always the case (e.g., when parsing or constructing a packet), so it
can not be easily used as the offset basis. Also, packet parsing is
faster if we do not need to maintain the offsets each time we pull
data from the ofpbuf.
- Use uint32_t for 'allocated' and 'size', as 2^32 is enough even for
largest possible messages/packets.
- Use packed enum for 'source'.
- Rearrange to avoid unnecessary padding.
- Remove 'private_p', which was used only in two cases, both of which
had the invariant ('l2' == 'data'), so we can temporarily use 'l2'
as a private pointer.
Signed-off-by: Jarno Rajahalme <jrajahalme@nicira.com>
Signed-off-by: Ben Pfaff <blp@nicira.com>
2014-03-24 09:17:01 -07:00
|
|
|
|
l4_len += 8;
|
2013-12-06 12:43:20 -08:00
|
|
|
|
nd_opt = ofpbuf_put_zeros(b, 8);
|
|
|
|
|
nd_opt->nd_opt_len = 1;
|
|
|
|
|
nd_opt->nd_opt_type = ND_OPT_TARGET_LINKADDR;
|
|
|
|
|
memcpy(nd_opt + 1, flow->arp_tha, ETH_ADDR_LEN);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
icmp->icmp6_cksum = (OVS_FORCE uint16_t)
|
|
|
|
|
csum(icmp, (char *)ofpbuf_tail(b) - (char *)icmp);
|
|
|
|
|
}
|
|
|
|
|
}
|
lib/ofpbuf: Compact
This patch shrinks the struct ofpbuf from 104 to 48 bytes on 64-bit
systems, or from 52 to 36 bytes on 32-bit systems (counting in the
'l7' removal from an earlier patch). This may help contribute to
cache efficiency, and will speed up initializing, copying and
manipulating ofpbufs. This is potentially important for the DPDK
datapath, but the rest of the code base may also see a little benefit.
Changes are:
- Remove 'l7' pointer (previous patch).
- Use offsets instead of layer pointers for l2_5, l3, and l4 using
'l2' as basis. Usually 'data' is the same as 'l2', but this is not
always the case (e.g., when parsing or constructing a packet), so it
can not be easily used as the offset basis. Also, packet parsing is
faster if we do not need to maintain the offsets each time we pull
data from the ofpbuf.
- Use uint32_t for 'allocated' and 'size', as 2^32 is enough even for
largest possible messages/packets.
- Use packed enum for 'source'.
- Rearrange to avoid unnecessary padding.
- Remove 'private_p', which was used only in two cases, both of which
had the invariant ('l2' == 'data'), so we can temporarily use 'l2'
as a private pointer.
Signed-off-by: Jarno Rajahalme <jrajahalme@nicira.com>
Signed-off-by: Ben Pfaff <blp@nicira.com>
2014-03-24 09:17:01 -07:00
|
|
|
|
return l4_len;
|
2013-12-06 12:43:20 -08:00
|
|
|
|
}
|
|
|
|
|
|
2011-09-08 14:32:13 -07:00
|
|
|
|
/* Puts into 'b' a packet that flow_extract() would parse as having the given
|
|
|
|
|
* 'flow'.
|
|
|
|
|
*
|
|
|
|
|
* (This is useful only for testing, obviously, and the packet isn't really
|
2012-08-02 16:11:58 -07:00
|
|
|
|
* valid. It hasn't got some checksums filled in, for one, and lots of fields
|
2011-09-08 14:32:13 -07:00
|
|
|
|
* are just zeroed.) */
|
|
|
|
|
void
|
|
|
|
|
flow_compose(struct ofpbuf *b, const struct flow *flow)
|
|
|
|
|
{
|
lib/ofpbuf: Compact
This patch shrinks the struct ofpbuf from 104 to 48 bytes on 64-bit
systems, or from 52 to 36 bytes on 32-bit systems (counting in the
'l7' removal from an earlier patch). This may help contribute to
cache efficiency, and will speed up initializing, copying and
manipulating ofpbufs. This is potentially important for the DPDK
datapath, but the rest of the code base may also see a little benefit.
Changes are:
- Remove 'l7' pointer (previous patch).
- Use offsets instead of layer pointers for l2_5, l3, and l4 using
'l2' as basis. Usually 'data' is the same as 'l2', but this is not
always the case (e.g., when parsing or constructing a packet), so it
can not be easily used as the offset basis. Also, packet parsing is
faster if we do not need to maintain the offsets each time we pull
data from the ofpbuf.
- Use uint32_t for 'allocated' and 'size', as 2^32 is enough even for
largest possible messages/packets.
- Use packed enum for 'source'.
- Rearrange to avoid unnecessary padding.
- Remove 'private_p', which was used only in two cases, both of which
had the invariant ('l2' == 'data'), so we can temporarily use 'l2'
as a private pointer.
Signed-off-by: Jarno Rajahalme <jrajahalme@nicira.com>
Signed-off-by: Ben Pfaff <blp@nicira.com>
2014-03-24 09:17:01 -07:00
|
|
|
|
size_t l4_len;
|
|
|
|
|
|
2013-12-06 12:43:20 -08:00
|
|
|
|
/* eth_compose() sets l3 pointer and makes sure it is 32-bit aligned. */
|
2011-09-08 14:32:13 -07:00
|
|
|
|
eth_compose(b, flow->dl_dst, flow->dl_src, ntohs(flow->dl_type), 0);
|
|
|
|
|
if (flow->dl_type == htons(FLOW_DL_TYPE_NONE)) {
|
2014-04-02 15:44:21 -07:00
|
|
|
|
struct eth_header *eth = ofpbuf_l2(b);
|
2014-03-30 01:31:50 -07:00
|
|
|
|
eth->eth_type = htons(ofpbuf_size(b));
|
2011-09-08 14:32:13 -07:00
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (flow->vlan_tci & htons(VLAN_CFI)) {
|
2014-01-15 17:17:01 +09:00
|
|
|
|
eth_push_vlan(b, htons(ETH_TYPE_VLAN), flow->vlan_tci);
|
2011-09-08 14:32:13 -07:00
|
|
|
|
}
|
|
|
|
|
|
2013-03-15 15:27:11 +01:00
|
|
|
|
if (flow->dl_type == htons(ETH_TYPE_IP)) {
|
2011-09-08 14:32:13 -07:00
|
|
|
|
struct ip_header *ip;
|
|
|
|
|
|
2013-12-06 12:43:20 -08:00
|
|
|
|
ip = ofpbuf_put_zeros(b, sizeof *ip);
|
2011-09-08 14:32:13 -07:00
|
|
|
|
ip->ip_ihl_ver = IP_IHL_VER(5, 4);
|
2011-11-09 17:10:27 -08:00
|
|
|
|
ip->ip_tos = flow->nw_tos;
|
2012-10-27 15:05:55 +09:00
|
|
|
|
ip->ip_ttl = flow->nw_ttl;
|
2011-09-08 14:32:13 -07:00
|
|
|
|
ip->ip_proto = flow->nw_proto;
|
packets: Do not assume that IPv4, TCP, or ARP headers are 32-bit aligned.
Ethernet headers are 14 bytes long, so when the beginning of such a header
is 32-bit aligned, the following data is misaligned. The usual trick to
fix that is to start the Ethernet header on an odd-numbered 16-bit
boundary. That trick works OK for Open vSwitch, but there are two
problems:
- OVS doesn't use that trick everywhere. Maybe it should, but it's
difficult to make sure that it does consistently because the CPUs
most commonly used with OVS don't care about misalignment, so we
only find problems when porting.
- Some protocols (GRE, VXLAN) don't use that trick, so in such a case
one can properly align the inner or outer L3/L4/L7 but not both. (OVS
userspace doesn't directly deal with such protocols yet, so this is
just future-proofing.)
- OpenFlow uses the alignment trick in a few places but not all of them.
This commit starts the adoption of what I hope will be a more robust way
to avoid misalignment problems and the resulting bus errors on RISC
architectures. Instead of trying to ensure that 32-bit quantities are
always aligned, we always read them as if they were misaligned. To ensure
that they are read this way, we change their types from 32-bit types to
pairs of 16-bit types. (I don't know of any protocols that offset the
next header by an odd number of bytes, so a 16-bit alignment assumption
seems OK.)
The same would be necessary for 64-bit types in protocol headers, but we
don't yet have any protocol definitions with 64-bit types.
IPv6 protocol headers need the same treatment, but for those we rely on
structs provided by system headers, so I'll leave them for an upcoming
patch.
Signed-off-by: Ben Pfaff <blp@nicira.com>
2013-08-15 10:47:39 -07:00
|
|
|
|
put_16aligned_be32(&ip->ip_src, flow->nw_src);
|
|
|
|
|
put_16aligned_be32(&ip->ip_dst, flow->nw_dst);
|
2011-09-08 14:32:13 -07:00
|
|
|
|
|
2011-11-09 17:10:27 -08:00
|
|
|
|
if (flow->nw_frag & FLOW_NW_FRAG_ANY) {
|
Implement new fragment handling policy.
Until now, OVS has handled IP fragments more awkwardly than necessary. It
has not been possible to match on L4 headers, even in fragments with offset
0 where they are actually present. This means that there was no way to
implement ACLs that treat, say, different TCP ports differently, on
fragmented traffic; instead, all decisions for fragment forwarding had to
be made on the basis of L2 and L3 headers alone.
This commit improves the situation significantly. It is still not possible
to match on L4 headers in fragments with nonzero offset, because that
information is simply not present in such fragments, but this commit adds
the ability to match on L4 headers for fragments with zero offset. This
means that it becomes possible to implement ACLs that drop such "first
fragments" on the basis of L4 headers. In practice, that effectively
blocks even fragmented traffic on an L4 basis, because the receiving IP
stack cannot reassemble a full packet when the first fragment is missing.
This commit works by adding a new "fragment type" to the kernel flow match
and making it available through OpenFlow as a new NXM field named
NXM_NX_IP_FRAG. Because OpenFlow 1.0 explicitly says that the L4 fields
are always 0 for IP fragments, it adds a new OpenFlow fragment handling
mode that fills in the L4 fields for "first fragments". It also enhances
ovs-ofctl to allow users to configure this new fragment handling mode and
to parse the new field.
Signed-off-by: Ben Pfaff <blp@nicira.com>
Bug #7557.
2011-10-19 21:33:44 -07:00
|
|
|
|
ip->ip_frag_off |= htons(IP_MORE_FRAGMENTS);
|
2011-11-09 17:10:27 -08:00
|
|
|
|
if (flow->nw_frag & FLOW_NW_FRAG_LATER) {
|
Implement new fragment handling policy.
Until now, OVS has handled IP fragments more awkwardly than necessary. It
has not been possible to match on L4 headers, even in fragments with offset
0 where they are actually present. This means that there was no way to
implement ACLs that treat, say, different TCP ports differently, on
fragmented traffic; instead, all decisions for fragment forwarding had to
be made on the basis of L2 and L3 headers alone.
This commit improves the situation significantly. It is still not possible
to match on L4 headers in fragments with nonzero offset, because that
information is simply not present in such fragments, but this commit adds
the ability to match on L4 headers for fragments with zero offset. This
means that it becomes possible to implement ACLs that drop such "first
fragments" on the basis of L4 headers. In practice, that effectively
blocks even fragmented traffic on an L4 basis, because the receiving IP
stack cannot reassemble a full packet when the first fragment is missing.
This commit works by adding a new "fragment type" to the kernel flow match
and making it available through OpenFlow as a new NXM field named
NXM_NX_IP_FRAG. Because OpenFlow 1.0 explicitly says that the L4 fields
are always 0 for IP fragments, it adds a new OpenFlow fragment handling
mode that fills in the L4 fields for "first fragments". It also enhances
ovs-ofctl to allow users to configure this new fragment handling mode and
to parse the new field.
Signed-off-by: Ben Pfaff <blp@nicira.com>
Bug #7557.
2011-10-19 21:33:44 -07:00
|
|
|
|
ip->ip_frag_off |= htons(100);
|
|
|
|
|
}
|
|
|
|
|
}
|
2011-12-19 14:45:23 -08:00
|
|
|
|
|
lib/ofpbuf: Compact
This patch shrinks the struct ofpbuf from 104 to 48 bytes on 64-bit
systems, or from 52 to 36 bytes on 32-bit systems (counting in the
'l7' removal from an earlier patch). This may help contribute to
cache efficiency, and will speed up initializing, copying and
manipulating ofpbufs. This is potentially important for the DPDK
datapath, but the rest of the code base may also see a little benefit.
Changes are:
- Remove 'l7' pointer (previous patch).
- Use offsets instead of layer pointers for l2_5, l3, and l4 using
'l2' as basis. Usually 'data' is the same as 'l2', but this is not
always the case (e.g., when parsing or constructing a packet), so it
can not be easily used as the offset basis. Also, packet parsing is
faster if we do not need to maintain the offsets each time we pull
data from the ofpbuf.
- Use uint32_t for 'allocated' and 'size', as 2^32 is enough even for
largest possible messages/packets.
- Use packed enum for 'source'.
- Rearrange to avoid unnecessary padding.
- Remove 'private_p', which was used only in two cases, both of which
had the invariant ('l2' == 'data'), so we can temporarily use 'l2'
as a private pointer.
Signed-off-by: Jarno Rajahalme <jrajahalme@nicira.com>
Signed-off-by: Ben Pfaff <blp@nicira.com>
2014-03-24 09:17:01 -07:00
|
|
|
|
ofpbuf_set_l4(b, ofpbuf_tail(b));
|
2013-12-06 12:43:20 -08:00
|
|
|
|
|
lib/ofpbuf: Compact
This patch shrinks the struct ofpbuf from 104 to 48 bytes on 64-bit
systems, or from 52 to 36 bytes on 32-bit systems (counting in the
'l7' removal from an earlier patch). This may help contribute to
cache efficiency, and will speed up initializing, copying and
manipulating ofpbufs. This is potentially important for the DPDK
datapath, but the rest of the code base may also see a little benefit.
Changes are:
- Remove 'l7' pointer (previous patch).
- Use offsets instead of layer pointers for l2_5, l3, and l4 using
'l2' as basis. Usually 'data' is the same as 'l2', but this is not
always the case (e.g., when parsing or constructing a packet), so it
can not be easily used as the offset basis. Also, packet parsing is
faster if we do not need to maintain the offsets each time we pull
data from the ofpbuf.
- Use uint32_t for 'allocated' and 'size', as 2^32 is enough even for
largest possible messages/packets.
- Use packed enum for 'source'.
- Rearrange to avoid unnecessary padding.
- Remove 'private_p', which was used only in two cases, both of which
had the invariant ('l2' == 'data'), so we can temporarily use 'l2'
as a private pointer.
Signed-off-by: Jarno Rajahalme <jrajahalme@nicira.com>
Signed-off-by: Ben Pfaff <blp@nicira.com>
2014-03-24 09:17:01 -07:00
|
|
|
|
l4_len = flow_compose_l4(b, flow);
|
2013-12-06 12:43:20 -08:00
|
|
|
|
|
2014-06-04 15:42:13 -07:00
|
|
|
|
ip = ofpbuf_l3(b);
|
lib/ofpbuf: Compact
This patch shrinks the struct ofpbuf from 104 to 48 bytes on 64-bit
systems, or from 52 to 36 bytes on 32-bit systems (counting in the
'l7' removal from an earlier patch). This may help contribute to
cache efficiency, and will speed up initializing, copying and
manipulating ofpbufs. This is potentially important for the DPDK
datapath, but the rest of the code base may also see a little benefit.
Changes are:
- Remove 'l7' pointer (previous patch).
- Use offsets instead of layer pointers for l2_5, l3, and l4 using
'l2' as basis. Usually 'data' is the same as 'l2', but this is not
always the case (e.g., when parsing or constructing a packet), so it
can not be easily used as the offset basis. Also, packet parsing is
faster if we do not need to maintain the offsets each time we pull
data from the ofpbuf.
- Use uint32_t for 'allocated' and 'size', as 2^32 is enough even for
largest possible messages/packets.
- Use packed enum for 'source'.
- Rearrange to avoid unnecessary padding.
- Remove 'private_p', which was used only in two cases, both of which
had the invariant ('l2' == 'data'), so we can temporarily use 'l2'
as a private pointer.
Signed-off-by: Jarno Rajahalme <jrajahalme@nicira.com>
Signed-off-by: Ben Pfaff <blp@nicira.com>
2014-03-24 09:17:01 -07:00
|
|
|
|
ip->ip_tot_len = htons(b->l4_ofs - b->l3_ofs + l4_len);
|
2012-08-02 16:11:58 -07:00
|
|
|
|
ip->ip_csum = csum(ip, sizeof *ip);
|
2013-03-15 15:27:11 +01:00
|
|
|
|
} else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
|
2013-12-06 12:43:20 -08:00
|
|
|
|
struct ovs_16aligned_ip6_hdr *nh;
|
|
|
|
|
|
|
|
|
|
nh = ofpbuf_put_zeros(b, sizeof *nh);
|
|
|
|
|
put_16aligned_be32(&nh->ip6_flow, htonl(6 << 28) |
|
|
|
|
|
htonl(flow->nw_tos << 20) | flow->ipv6_label);
|
|
|
|
|
nh->ip6_hlim = flow->nw_ttl;
|
|
|
|
|
nh->ip6_nxt = flow->nw_proto;
|
|
|
|
|
|
|
|
|
|
memcpy(&nh->ip6_src, &flow->ipv6_src, sizeof(nh->ip6_src));
|
|
|
|
|
memcpy(&nh->ip6_dst, &flow->ipv6_dst, sizeof(nh->ip6_dst));
|
|
|
|
|
|
lib/ofpbuf: Compact
This patch shrinks the struct ofpbuf from 104 to 48 bytes on 64-bit
systems, or from 52 to 36 bytes on 32-bit systems (counting in the
'l7' removal from an earlier patch). This may help contribute to
cache efficiency, and will speed up initializing, copying and
manipulating ofpbufs. This is potentially important for the DPDK
datapath, but the rest of the code base may also see a little benefit.
Changes are:
- Remove 'l7' pointer (previous patch).
- Use offsets instead of layer pointers for l2_5, l3, and l4 using
'l2' as basis. Usually 'data' is the same as 'l2', but this is not
always the case (e.g., when parsing or constructing a packet), so it
can not be easily used as the offset basis. Also, packet parsing is
faster if we do not need to maintain the offsets each time we pull
data from the ofpbuf.
- Use uint32_t for 'allocated' and 'size', as 2^32 is enough even for
largest possible messages/packets.
- Use packed enum for 'source'.
- Rearrange to avoid unnecessary padding.
- Remove 'private_p', which was used only in two cases, both of which
had the invariant ('l2' == 'data'), so we can temporarily use 'l2'
as a private pointer.
Signed-off-by: Jarno Rajahalme <jrajahalme@nicira.com>
Signed-off-by: Ben Pfaff <blp@nicira.com>
2014-03-24 09:17:01 -07:00
|
|
|
|
ofpbuf_set_l4(b, ofpbuf_tail(b));
|
2013-12-06 12:43:20 -08:00
|
|
|
|
|
lib/ofpbuf: Compact
This patch shrinks the struct ofpbuf from 104 to 48 bytes on 64-bit
systems, or from 52 to 36 bytes on 32-bit systems (counting in the
'l7' removal from an earlier patch). This may help contribute to
cache efficiency, and will speed up initializing, copying and
manipulating ofpbufs. This is potentially important for the DPDK
datapath, but the rest of the code base may also see a little benefit.
Changes are:
- Remove 'l7' pointer (previous patch).
- Use offsets instead of layer pointers for l2_5, l3, and l4 using
'l2' as basis. Usually 'data' is the same as 'l2', but this is not
always the case (e.g., when parsing or constructing a packet), so it
can not be easily used as the offset basis. Also, packet parsing is
faster if we do not need to maintain the offsets each time we pull
data from the ofpbuf.
- Use uint32_t for 'allocated' and 'size', as 2^32 is enough even for
largest possible messages/packets.
- Use packed enum for 'source'.
- Rearrange to avoid unnecessary padding.
- Remove 'private_p', which was used only in two cases, both of which
had the invariant ('l2' == 'data'), so we can temporarily use 'l2'
as a private pointer.
Signed-off-by: Jarno Rajahalme <jrajahalme@nicira.com>
Signed-off-by: Ben Pfaff <blp@nicira.com>
2014-03-24 09:17:01 -07:00
|
|
|
|
l4_len = flow_compose_l4(b, flow);
|
2013-12-06 12:43:20 -08:00
|
|
|
|
|
2014-06-04 15:42:13 -07:00
|
|
|
|
nh = ofpbuf_l3(b);
|
lib/ofpbuf: Compact
This patch shrinks the struct ofpbuf from 104 to 48 bytes on 64-bit
systems, or from 52 to 36 bytes on 32-bit systems (counting in the
'l7' removal from an earlier patch). This may help contribute to
cache efficiency, and will speed up initializing, copying and
manipulating ofpbufs. This is potentially important for the DPDK
datapath, but the rest of the code base may also see a little benefit.
Changes are:
- Remove 'l7' pointer (previous patch).
- Use offsets instead of layer pointers for l2_5, l3, and l4 using
'l2' as basis. Usually 'data' is the same as 'l2', but this is not
always the case (e.g., when parsing or constructing a packet), so it
can not be easily used as the offset basis. Also, packet parsing is
faster if we do not need to maintain the offsets each time we pull
data from the ofpbuf.
- Use uint32_t for 'allocated' and 'size', as 2^32 is enough even for
largest possible messages/packets.
- Use packed enum for 'source'.
- Rearrange to avoid unnecessary padding.
- Remove 'private_p', which was used only in two cases, both of which
had the invariant ('l2' == 'data'), so we can temporarily use 'l2'
as a private pointer.
Signed-off-by: Jarno Rajahalme <jrajahalme@nicira.com>
Signed-off-by: Ben Pfaff <blp@nicira.com>
2014-03-24 09:17:01 -07:00
|
|
|
|
nh->ip6_plen = htons(l4_len);
|
2013-03-15 15:27:11 +01:00
|
|
|
|
} else if (flow->dl_type == htons(ETH_TYPE_ARP) ||
|
|
|
|
|
flow->dl_type == htons(ETH_TYPE_RARP)) {
|
2011-09-08 14:32:13 -07:00
|
|
|
|
struct arp_eth_header *arp;
|
|
|
|
|
|
lib/ofpbuf: Compact
This patch shrinks the struct ofpbuf from 104 to 48 bytes on 64-bit
systems, or from 52 to 36 bytes on 32-bit systems (counting in the
'l7' removal from an earlier patch). This may help contribute to
cache efficiency, and will speed up initializing, copying and
manipulating ofpbufs. This is potentially important for the DPDK
datapath, but the rest of the code base may also see a little benefit.
Changes are:
- Remove 'l7' pointer (previous patch).
- Use offsets instead of layer pointers for l2_5, l3, and l4 using
'l2' as basis. Usually 'data' is the same as 'l2', but this is not
always the case (e.g., when parsing or constructing a packet), so it
can not be easily used as the offset basis. Also, packet parsing is
faster if we do not need to maintain the offsets each time we pull
data from the ofpbuf.
- Use uint32_t for 'allocated' and 'size', as 2^32 is enough even for
largest possible messages/packets.
- Use packed enum for 'source'.
- Rearrange to avoid unnecessary padding.
- Remove 'private_p', which was used only in two cases, both of which
had the invariant ('l2' == 'data'), so we can temporarily use 'l2'
as a private pointer.
Signed-off-by: Jarno Rajahalme <jrajahalme@nicira.com>
Signed-off-by: Ben Pfaff <blp@nicira.com>
2014-03-24 09:17:01 -07:00
|
|
|
|
arp = ofpbuf_put_zeros(b, sizeof *arp);
|
|
|
|
|
ofpbuf_set_l3(b, arp);
|
2011-09-08 14:32:13 -07:00
|
|
|
|
arp->ar_hrd = htons(1);
|
|
|
|
|
arp->ar_pro = htons(ETH_TYPE_IP);
|
|
|
|
|
arp->ar_hln = ETH_ADDR_LEN;
|
|
|
|
|
arp->ar_pln = 4;
|
|
|
|
|
arp->ar_op = htons(flow->nw_proto);
|
|
|
|
|
|
|
|
|
|
if (flow->nw_proto == ARP_OP_REQUEST ||
|
|
|
|
|
flow->nw_proto == ARP_OP_REPLY) {
|
packets: Do not assume that IPv4, TCP, or ARP headers are 32-bit aligned.
Ethernet headers are 14 bytes long, so when the beginning of such a header
is 32-bit aligned, the following data is misaligned. The usual trick to
fix that is to start the Ethernet header on an odd-numbered 16-bit
boundary. That trick works OK for Open vSwitch, but there are two
problems:
- OVS doesn't use that trick everywhere. Maybe it should, but it's
difficult to make sure that it does consistently because the CPUs
most commonly used with OVS don't care about misalignment, so we
only find problems when porting.
- Some protocols (GRE, VXLAN) don't use that trick, so in such a case
one can properly align the inner or outer L3/L4/L7 but not both. (OVS
userspace doesn't directly deal with such protocols yet, so this is
just future-proofing.)
- OpenFlow uses the alignment trick in a few places but not all of them.
This commit starts the adoption of what I hope will be a more robust way
to avoid misalignment problems and the resulting bus errors on RISC
architectures. Instead of trying to ensure that 32-bit quantities are
always aligned, we always read them as if they were misaligned. To ensure
that they are read this way, we change their types from 32-bit types to
pairs of 16-bit types. (I don't know of any protocols that offset the
next header by an odd number of bytes, so a 16-bit alignment assumption
seems OK.)
The same would be necessary for 64-bit types in protocol headers, but we
don't yet have any protocol definitions with 64-bit types.
IPv6 protocol headers need the same treatment, but for those we rely on
structs provided by system headers, so I'll leave them for an upcoming
patch.
Signed-off-by: Ben Pfaff <blp@nicira.com>
2013-08-15 10:47:39 -07:00
|
|
|
|
put_16aligned_be32(&arp->ar_spa, flow->nw_src);
|
|
|
|
|
put_16aligned_be32(&arp->ar_tpa, flow->nw_dst);
|
2011-09-08 14:32:13 -07:00
|
|
|
|
memcpy(arp->ar_sha, flow->arp_sha, ETH_ADDR_LEN);
|
|
|
|
|
memcpy(arp->ar_tha, flow->arp_tha, ETH_ADDR_LEN);
|
|
|
|
|
}
|
|
|
|
|
}
|
2013-01-25 16:22:07 +09:00
|
|
|
|
|
|
|
|
|
if (eth_type_mpls(flow->dl_type)) {
|
2014-02-04 10:32:35 -08:00
|
|
|
|
int n;
|
|
|
|
|
|
lib/ofpbuf: Compact
This patch shrinks the struct ofpbuf from 104 to 48 bytes on 64-bit
systems, or from 52 to 36 bytes on 32-bit systems (counting in the
'l7' removal from an earlier patch). This may help contribute to
cache efficiency, and will speed up initializing, copying and
manipulating ofpbufs. This is potentially important for the DPDK
datapath, but the rest of the code base may also see a little benefit.
Changes are:
- Remove 'l7' pointer (previous patch).
- Use offsets instead of layer pointers for l2_5, l3, and l4 using
'l2' as basis. Usually 'data' is the same as 'l2', but this is not
always the case (e.g., when parsing or constructing a packet), so it
can not be easily used as the offset basis. Also, packet parsing is
faster if we do not need to maintain the offsets each time we pull
data from the ofpbuf.
- Use uint32_t for 'allocated' and 'size', as 2^32 is enough even for
largest possible messages/packets.
- Use packed enum for 'source'.
- Rearrange to avoid unnecessary padding.
- Remove 'private_p', which was used only in two cases, both of which
had the invariant ('l2' == 'data'), so we can temporarily use 'l2'
as a private pointer.
Signed-off-by: Jarno Rajahalme <jrajahalme@nicira.com>
Signed-off-by: Ben Pfaff <blp@nicira.com>
2014-03-24 09:17:01 -07:00
|
|
|
|
b->l2_5_ofs = b->l3_ofs;
|
2014-02-04 10:32:35 -08:00
|
|
|
|
for (n = 1; n < FLOW_MAX_MPLS_LABELS; n++) {
|
|
|
|
|
if (flow->mpls_lse[n - 1] & htonl(MPLS_BOS_MASK)) {
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
while (n > 0) {
|
|
|
|
|
push_mpls(b, flow->dl_type, flow->mpls_lse[--n]);
|
|
|
|
|
}
|
2013-01-25 16:22:07 +09:00
|
|
|
|
}
|
2011-09-08 14:32:13 -07:00
|
|
|
|
}
|
2012-09-04 12:43:53 -07:00
|
|
|
|
|
|
|
|
|
/* Compressed flow. */
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
miniflow_n_values(const struct miniflow *flow)
|
|
|
|
|
{
|
2013-11-19 10:59:10 -08:00
|
|
|
|
return count_1bits(flow->map);
|
2012-09-04 12:43:53 -07:00
|
|
|
|
}
|
|
|
|
|
|
2015-01-06 11:10:42 -08:00
|
|
|
|
static uint64_t *
|
2012-09-04 12:43:53 -07:00
|
|
|
|
miniflow_alloc_values(struct miniflow *flow, int n)
|
|
|
|
|
{
|
2014-04-29 15:50:39 -07:00
|
|
|
|
int size = MINIFLOW_VALUES_SIZE(n);
|
|
|
|
|
|
|
|
|
|
if (size <= sizeof flow->inline_values) {
|
2014-04-29 15:50:39 -07:00
|
|
|
|
flow->values_inline = true;
|
2012-09-04 12:43:53 -07:00
|
|
|
|
return flow->inline_values;
|
|
|
|
|
} else {
|
|
|
|
|
COVERAGE_INC(miniflow_malloc);
|
2014-04-29 15:50:39 -07:00
|
|
|
|
flow->values_inline = false;
|
2014-04-29 15:50:39 -07:00
|
|
|
|
flow->offline_values = xmalloc(size);
|
2014-04-29 15:50:39 -07:00
|
|
|
|
return flow->offline_values;
|
2012-09-04 12:43:53 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2013-02-06 16:13:19 -08:00
|
|
|
|
/* Completes an initialization of 'dst' as a miniflow copy of 'src' begun by
|
|
|
|
|
* the caller. The caller must have already initialized 'dst->map' properly
|
2015-01-06 11:10:42 -08:00
|
|
|
|
* to indicate the significant uint64_t elements of 'src'. 'n' must be the
|
2013-12-11 11:07:01 -08:00
|
|
|
|
* number of 1-bits in 'dst->map'.
|
|
|
|
|
*
|
|
|
|
|
* Normally the significant elements are the ones that are non-zero. However,
|
|
|
|
|
* when a miniflow is initialized from a (mini)mask, the values can be zeroes,
|
|
|
|
|
* so that the flow and mask always have the same maps.
|
2013-02-06 16:13:19 -08:00
|
|
|
|
*
|
2014-04-29 15:50:39 -07:00
|
|
|
|
* This function initializes values (either inline if possible or with
|
2015-01-06 11:10:42 -08:00
|
|
|
|
* malloc() otherwise) and copies the uint64_t elements of 'src' indicated by
|
2013-12-11 11:07:01 -08:00
|
|
|
|
* 'dst->map' into it. */
|
2013-02-06 16:13:19 -08:00
|
|
|
|
static void
|
|
|
|
|
miniflow_init__(struct miniflow *dst, const struct flow *src, int n)
|
|
|
|
|
{
|
2015-01-06 11:10:42 -08:00
|
|
|
|
const uint64_t *src_u64 = (const uint64_t *) src;
|
|
|
|
|
uint64_t *dst_u64 = miniflow_alloc_values(dst, n);
|
2014-11-26 15:30:33 -08:00
|
|
|
|
int idx;
|
2013-02-06 16:13:19 -08:00
|
|
|
|
|
2014-11-26 15:30:33 -08:00
|
|
|
|
MAP_FOR_EACH_INDEX(idx, dst->map) {
|
2015-01-06 11:10:42 -08:00
|
|
|
|
*dst_u64++ = src_u64[idx];
|
2013-02-06 16:13:19 -08:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2012-09-04 12:43:53 -07:00
|
|
|
|
/* Initializes 'dst' as a copy of 'src'. The caller must eventually free 'dst'
|
2014-04-29 15:50:39 -07:00
|
|
|
|
* with miniflow_destroy().
|
|
|
|
|
* Always allocates offline storage. */
|
2012-09-04 12:43:53 -07:00
|
|
|
|
void
|
|
|
|
|
miniflow_init(struct miniflow *dst, const struct flow *src)
|
|
|
|
|
{
|
2015-01-06 11:10:42 -08:00
|
|
|
|
const uint64_t *src_u64 = (const uint64_t *) src;
|
2012-09-04 12:43:53 -07:00
|
|
|
|
unsigned int i;
|
|
|
|
|
int n;
|
|
|
|
|
|
|
|
|
|
/* Initialize dst->map, counting the number of nonzero elements. */
|
|
|
|
|
n = 0;
|
2013-11-18 09:28:44 -08:00
|
|
|
|
dst->map = 0;
|
|
|
|
|
|
2015-01-06 11:10:42 -08:00
|
|
|
|
for (i = 0; i < FLOW_U64S; i++) {
|
|
|
|
|
if (src_u64[i]) {
|
2013-11-18 09:28:44 -08:00
|
|
|
|
dst->map |= UINT64_C(1) << i;
|
2012-09-04 12:43:53 -07:00
|
|
|
|
n++;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2013-02-06 16:13:19 -08:00
|
|
|
|
miniflow_init__(dst, src, n);
|
|
|
|
|
}
|
2012-09-04 12:43:53 -07:00
|
|
|
|
|
2013-02-06 16:13:19 -08:00
|
|
|
|
/* Initializes 'dst' as a copy of 'src', using 'mask->map' as 'dst''s map. The
|
|
|
|
|
* caller must eventually free 'dst' with miniflow_destroy(). */
|
|
|
|
|
void
|
|
|
|
|
miniflow_init_with_minimask(struct miniflow *dst, const struct flow *src,
|
|
|
|
|
const struct minimask *mask)
|
|
|
|
|
{
|
2013-11-18 09:28:44 -08:00
|
|
|
|
dst->map = mask->masks.map;
|
2013-02-06 16:13:19 -08:00
|
|
|
|
miniflow_init__(dst, src, miniflow_n_values(dst));
|
2012-09-04 12:43:53 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Initializes 'dst' as a copy of 'src'. The caller must eventually free 'dst'
|
|
|
|
|
* with miniflow_destroy(). */
|
|
|
|
|
void
|
|
|
|
|
miniflow_clone(struct miniflow *dst, const struct miniflow *src)
|
|
|
|
|
{
|
2014-04-29 15:50:39 -07:00
|
|
|
|
int size = MINIFLOW_VALUES_SIZE(miniflow_n_values(src));
|
2015-01-06 11:10:42 -08:00
|
|
|
|
uint64_t *values;
|
2014-04-29 15:50:39 -07:00
|
|
|
|
|
2013-11-18 09:28:44 -08:00
|
|
|
|
dst->map = src->map;
|
2014-04-29 15:50:39 -07:00
|
|
|
|
if (size <= sizeof dst->inline_values) {
|
|
|
|
|
dst->values_inline = true;
|
|
|
|
|
values = dst->inline_values;
|
|
|
|
|
} else {
|
|
|
|
|
dst->values_inline = false;
|
|
|
|
|
COVERAGE_INC(miniflow_malloc);
|
|
|
|
|
dst->offline_values = xmalloc(size);
|
|
|
|
|
values = dst->offline_values;
|
|
|
|
|
}
|
|
|
|
|
memcpy(values, miniflow_get_values(src), size);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Initializes 'dst' as a copy of 'src'. The caller must have allocated
|
|
|
|
|
* 'dst' to have inline space all data in 'src'. */
|
|
|
|
|
void
|
|
|
|
|
miniflow_clone_inline(struct miniflow *dst, const struct miniflow *src,
|
|
|
|
|
size_t n_values)
|
|
|
|
|
{
|
|
|
|
|
dst->values_inline = true;
|
|
|
|
|
dst->map = src->map;
|
|
|
|
|
memcpy(dst->inline_values, miniflow_get_values(src),
|
|
|
|
|
MINIFLOW_VALUES_SIZE(n_values));
|
2012-09-04 12:43:53 -07:00
|
|
|
|
}
|
|
|
|
|
|
2013-08-27 12:25:48 -07:00
|
|
|
|
/* Initializes 'dst' with the data in 'src', destroying 'src'.
|
2014-04-29 15:50:39 -07:00
|
|
|
|
* The caller must eventually free 'dst' with miniflow_destroy().
|
|
|
|
|
* 'dst' must be regularly sized miniflow, but 'src' can have
|
2014-08-26 15:11:39 -07:00
|
|
|
|
* storage for more than the default MINI_N_INLINE inline
|
|
|
|
|
* values. */
|
2013-08-27 12:25:48 -07:00
|
|
|
|
void
|
|
|
|
|
miniflow_move(struct miniflow *dst, struct miniflow *src)
|
|
|
|
|
{
|
2014-04-29 15:50:39 -07:00
|
|
|
|
int size = MINIFLOW_VALUES_SIZE(miniflow_n_values(src));
|
|
|
|
|
|
|
|
|
|
dst->map = src->map;
|
|
|
|
|
if (size <= sizeof dst->inline_values) {
|
|
|
|
|
dst->values_inline = true;
|
|
|
|
|
memcpy(dst->inline_values, miniflow_get_values(src), size);
|
|
|
|
|
miniflow_destroy(src);
|
2014-08-26 15:48:48 -07:00
|
|
|
|
} else if (src->values_inline) {
|
|
|
|
|
dst->values_inline = false;
|
|
|
|
|
COVERAGE_INC(miniflow_malloc);
|
|
|
|
|
dst->offline_values = xmalloc(size);
|
|
|
|
|
memcpy(dst->offline_values, src->inline_values, size);
|
2014-04-29 15:50:39 -07:00
|
|
|
|
} else {
|
|
|
|
|
dst->values_inline = false;
|
|
|
|
|
dst->offline_values = src->offline_values;
|
|
|
|
|
}
|
2013-08-27 12:25:48 -07:00
|
|
|
|
}
|
|
|
|
|
|
2012-09-04 12:43:53 -07:00
|
|
|
|
/* Frees any memory owned by 'flow'. Does not free the storage in which 'flow'
|
|
|
|
|
* itself resides; the caller is responsible for that. */
|
|
|
|
|
void
|
|
|
|
|
miniflow_destroy(struct miniflow *flow)
|
|
|
|
|
{
|
2014-04-29 15:50:39 -07:00
|
|
|
|
if (!flow->values_inline) {
|
|
|
|
|
free(flow->offline_values);
|
2012-09-04 12:43:53 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Initializes 'dst' as a copy of 'src'. */
|
|
|
|
|
void
|
|
|
|
|
miniflow_expand(const struct miniflow *src, struct flow *dst)
|
|
|
|
|
{
|
2013-05-09 19:14:20 -07:00
|
|
|
|
memset(dst, 0, sizeof *dst);
|
|
|
|
|
flow_union_with_miniflow(dst, src);
|
2012-09-04 12:43:53 -07:00
|
|
|
|
}
|
|
|
|
|
|
2014-09-29 15:21:37 +08:00
|
|
|
|
/* Returns true if 'a' and 'b' are the equal miniflow, false otherwise. */
|
2012-09-04 12:43:53 -07:00
|
|
|
|
bool
|
|
|
|
|
miniflow_equal(const struct miniflow *a, const struct miniflow *b)
|
|
|
|
|
{
|
2015-01-06 11:10:42 -08:00
|
|
|
|
const uint64_t *ap = miniflow_get_values(a);
|
|
|
|
|
const uint64_t *bp = miniflow_get_values(b);
|
2012-09-04 12:43:53 -07:00
|
|
|
|
|
2014-11-26 15:30:33 -08:00
|
|
|
|
if (OVS_LIKELY(a->map == b->map)) {
|
2014-04-29 15:50:39 -07:00
|
|
|
|
int count = miniflow_n_values(a);
|
|
|
|
|
|
2014-05-27 15:20:08 -07:00
|
|
|
|
return !memcmp(ap, bp, count * sizeof *ap);
|
2013-11-18 09:28:44 -08:00
|
|
|
|
} else {
|
2014-04-29 15:50:39 -07:00
|
|
|
|
uint64_t map;
|
|
|
|
|
|
2014-11-26 15:30:33 -08:00
|
|
|
|
for (map = a->map | b->map; map; map = zero_rightmost_1bit(map)) {
|
2013-11-18 09:28:44 -08:00
|
|
|
|
uint64_t bit = rightmost_1bit(map);
|
2013-02-06 16:13:19 -08:00
|
|
|
|
|
2014-11-26 15:30:33 -08:00
|
|
|
|
if ((a->map & bit ? *ap++ : 0) != (b->map & bit ? *bp++ : 0)) {
|
2013-11-18 09:28:44 -08:00
|
|
|
|
return false;
|
2013-02-06 16:13:19 -08:00
|
|
|
|
}
|
2012-09-04 12:43:53 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2013-02-06 16:13:19 -08:00
|
|
|
|
return true;
|
2012-09-04 12:43:53 -07:00
|
|
|
|
}
|
|
|
|
|
|
2014-11-13 11:54:31 -08:00
|
|
|
|
/* Returns false if 'a' and 'b' differ at the places where there are 1-bits
|
|
|
|
|
* in 'mask', true otherwise. */
|
2012-09-04 12:43:53 -07:00
|
|
|
|
bool
|
|
|
|
|
miniflow_equal_in_minimask(const struct miniflow *a, const struct miniflow *b,
|
|
|
|
|
const struct minimask *mask)
|
|
|
|
|
{
|
2015-01-06 11:10:42 -08:00
|
|
|
|
const uint64_t *p = miniflow_get_values(&mask->masks);
|
2014-11-26 15:30:33 -08:00
|
|
|
|
int idx;
|
2012-09-04 12:43:53 -07:00
|
|
|
|
|
2014-11-26 15:30:33 -08:00
|
|
|
|
MAP_FOR_EACH_INDEX(idx, mask->masks.map) {
|
|
|
|
|
if ((miniflow_get(a, idx) ^ miniflow_get(b, idx)) & *p++) {
|
2013-11-18 09:28:44 -08:00
|
|
|
|
return false;
|
2012-09-04 12:43:53 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Returns true if 'a' and 'b' are equal at the places where there are 1-bits
|
|
|
|
|
* in 'mask', false if they differ. */
|
|
|
|
|
bool
|
|
|
|
|
miniflow_equal_flow_in_minimask(const struct miniflow *a, const struct flow *b,
|
|
|
|
|
const struct minimask *mask)
|
|
|
|
|
{
|
2015-01-06 11:10:42 -08:00
|
|
|
|
const uint64_t *b_u64 = (const uint64_t *) b;
|
|
|
|
|
const uint64_t *p = miniflow_get_values(&mask->masks);
|
2014-11-26 15:30:33 -08:00
|
|
|
|
int idx;
|
2012-09-04 12:43:53 -07:00
|
|
|
|
|
2014-11-26 15:30:33 -08:00
|
|
|
|
MAP_FOR_EACH_INDEX(idx, mask->masks.map) {
|
2015-01-06 11:10:42 -08:00
|
|
|
|
if ((miniflow_get(a, idx) ^ b_u64[idx]) & *p++) {
|
2013-11-18 09:28:44 -08:00
|
|
|
|
return false;
|
2012-09-04 12:43:53 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* Initializes 'dst' as a copy of 'src'. The caller must eventually free 'dst'
|
|
|
|
|
* with minimask_destroy(). */
|
|
|
|
|
void
|
|
|
|
|
minimask_init(struct minimask *mask, const struct flow_wildcards *wc)
|
|
|
|
|
{
|
|
|
|
|
miniflow_init(&mask->masks, &wc->masks);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Initializes 'dst' as a copy of 'src'. The caller must eventually free 'dst'
|
|
|
|
|
* with minimask_destroy(). */
|
|
|
|
|
void
|
|
|
|
|
minimask_clone(struct minimask *dst, const struct minimask *src)
|
|
|
|
|
{
|
|
|
|
|
miniflow_clone(&dst->masks, &src->masks);
|
|
|
|
|
}
|
|
|
|
|
|
2013-08-27 12:25:48 -07:00
|
|
|
|
/* Initializes 'dst' with the data in 'src', destroying 'src'.
|
|
|
|
|
* The caller must eventually free 'dst' with minimask_destroy(). */
|
|
|
|
|
void
|
|
|
|
|
minimask_move(struct minimask *dst, struct minimask *src)
|
|
|
|
|
{
|
2013-09-04 12:39:19 -07:00
|
|
|
|
miniflow_move(&dst->masks, &src->masks);
|
2013-08-27 12:25:48 -07:00
|
|
|
|
}
|
|
|
|
|
|
2012-09-04 12:43:53 -07:00
|
|
|
|
/* Initializes 'dst_' as the bit-wise "and" of 'a_' and 'b_'.
|
|
|
|
|
*
|
2015-01-06 11:10:42 -08:00
|
|
|
|
* The caller must provide room for FLOW_U64S "uint64_t"s in 'storage', for use
|
2012-09-04 12:43:53 -07:00
|
|
|
|
* by 'dst_'. The caller must *not* free 'dst_' with minimask_destroy(). */
|
|
|
|
|
void
|
|
|
|
|
minimask_combine(struct minimask *dst_,
|
|
|
|
|
const struct minimask *a_, const struct minimask *b_,
|
2015-01-06 11:10:42 -08:00
|
|
|
|
uint64_t storage[FLOW_U64S])
|
2012-09-04 12:43:53 -07:00
|
|
|
|
{
|
|
|
|
|
struct miniflow *dst = &dst_->masks;
|
2015-01-06 11:10:42 -08:00
|
|
|
|
uint64_t *dst_values = storage;
|
2012-09-04 12:43:53 -07:00
|
|
|
|
const struct miniflow *a = &a_->masks;
|
|
|
|
|
const struct miniflow *b = &b_->masks;
|
2014-11-26 15:30:33 -08:00
|
|
|
|
int idx;
|
2012-09-04 12:43:53 -07:00
|
|
|
|
|
2014-04-29 15:50:39 -07:00
|
|
|
|
dst->values_inline = false;
|
|
|
|
|
dst->offline_values = storage;
|
2013-11-18 09:28:44 -08:00
|
|
|
|
|
|
|
|
|
dst->map = 0;
|
2014-11-26 15:30:33 -08:00
|
|
|
|
MAP_FOR_EACH_INDEX(idx, a->map & b->map) {
|
2014-11-26 15:17:26 -08:00
|
|
|
|
/* Both 'a' and 'b' have non-zero data at 'idx'. */
|
2015-01-06 11:10:42 -08:00
|
|
|
|
uint64_t mask = miniflow_get__(a, idx) & miniflow_get__(b, idx);
|
2013-11-18 09:28:44 -08:00
|
|
|
|
|
|
|
|
|
if (mask) {
|
2014-11-26 15:30:33 -08:00
|
|
|
|
dst->map |= UINT64_C(1) << idx;
|
|
|
|
|
*dst_values++ = mask;
|
2012-09-04 12:43:53 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Frees any memory owned by 'mask'. Does not free the storage in which 'mask'
|
|
|
|
|
* itself resides; the caller is responsible for that. */
|
|
|
|
|
void
|
|
|
|
|
minimask_destroy(struct minimask *mask)
|
|
|
|
|
{
|
|
|
|
|
miniflow_destroy(&mask->masks);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Initializes 'dst' as a copy of 'src'. */
|
|
|
|
|
void
|
|
|
|
|
minimask_expand(const struct minimask *mask, struct flow_wildcards *wc)
|
|
|
|
|
{
|
|
|
|
|
miniflow_expand(&mask->masks, &wc->masks);
|
|
|
|
|
}
|
|
|
|
|
|
2014-11-26 15:17:26 -08:00
|
|
|
|
/* Returns true if 'a' and 'b' are the same flow mask, false otherwise.
|
|
|
|
|
* Minimasks may not have zero data values, so for the minimasks to be the
|
|
|
|
|
* same, they need to have the same map and the same data values. */
|
2012-09-04 12:43:53 -07:00
|
|
|
|
bool
|
|
|
|
|
minimask_equal(const struct minimask *a, const struct minimask *b)
|
|
|
|
|
{
|
2014-11-26 15:17:26 -08:00
|
|
|
|
return a->masks.map == b->masks.map &&
|
2015-01-06 11:10:42 -08:00
|
|
|
|
!memcmp(miniflow_get_values(&a->masks),
|
|
|
|
|
miniflow_get_values(&b->masks),
|
2014-11-26 15:17:26 -08:00
|
|
|
|
count_1bits(a->masks.map) * sizeof *a->masks.inline_values);
|
2012-09-04 12:43:53 -07:00
|
|
|
|
}
|
|
|
|
|
|
2014-04-29 15:50:38 -07:00
|
|
|
|
/* Returns true if at least one bit matched by 'b' is wildcarded by 'a',
|
2012-09-04 12:43:53 -07:00
|
|
|
|
* false otherwise. */
|
|
|
|
|
bool
|
2014-04-29 15:50:38 -07:00
|
|
|
|
minimask_has_extra(const struct minimask *a, const struct minimask *b)
|
2012-09-04 12:43:53 -07:00
|
|
|
|
{
|
2015-01-06 11:10:42 -08:00
|
|
|
|
const uint64_t *ap = miniflow_get_values(&a->masks);
|
|
|
|
|
const uint64_t *bp = miniflow_get_values(&b->masks);
|
2014-11-26 15:17:26 -08:00
|
|
|
|
int idx;
|
2012-09-04 12:43:53 -07:00
|
|
|
|
|
2014-11-26 15:17:26 -08:00
|
|
|
|
MAP_FOR_EACH_INDEX(idx, b->masks.map) {
|
2015-01-06 11:10:42 -08:00
|
|
|
|
uint64_t b_u64 = *bp++;
|
2012-09-04 12:43:53 -07:00
|
|
|
|
|
2015-01-06 11:10:42 -08:00
|
|
|
|
/* 'b_u64' is non-zero, check if the data in 'a' is either zero
|
|
|
|
|
* or misses some of the bits in 'b_u64'. */
|
2014-11-26 15:17:26 -08:00
|
|
|
|
if (!(a->masks.map & (UINT64_C(1) << idx))
|
2015-01-06 11:10:42 -08:00
|
|
|
|
|| ((miniflow_values_get__(ap, a->masks.map, idx) & b_u64)
|
|
|
|
|
!= b_u64)) {
|
2014-11-26 15:17:26 -08:00
|
|
|
|
return true; /* 'a' wildcards some bits 'b' doesn't. */
|
2012-09-04 12:43:53 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return false;
|
|
|
|
|
}
|