2
0
mirror of https://github.com/openvswitch/ovs synced 2025-08-30 22:05:19 +00:00

netdev-dpdk-offload: Add vxlan pattern matching function.

For VXLAN offload, matches should be done on outer header for tunnel
properties as well as inner packet matches. Add a function for parsing
VXLAN tunnel matches.

Signed-off-by: Eli Britstein <elibr@nvidia.com>
Reviewed-by: Gaetan Rivet <gaetanr@nvidia.com>
Acked-by: Sriharsha Basavapatna <sriharsha.basavapatna@broadcom.com>
Tested-by: Emma Finn <emma.finn@intel.com>
Tested-by: Marko Kovacevic <marko.kovacevic@intel.com>
Signed-off-by: Ilya Maximets <i.maximets@ovn.org>
This commit is contained in:
Eli Britstein
2021-06-23 15:52:52 +00:00
committed by Ilya Maximets
parent 507d20e77b
commit e098c2f966
2 changed files with 156 additions and 1 deletions

2
NEWS
View File

@@ -20,6 +20,8 @@ Post-v2.15.0
* New debug appctl command 'dpdk/get-malloc-stats'.
* Add hardware offload support for tunnel pop action (experimental).
Available only if DPDK experimantal APIs enabled during the build.
* Add hardware offload support for VXLAN flows (experimental).
Available only if DPDK experimantal APIs enabled during the build.
- ovsdb-tool:
* New option '--election-timer' to the 'create-cluster' command to set the
leader election timer during cluster creation.

View File

@@ -372,6 +372,20 @@ dump_flow_pattern(struct ds *s,
ipv6_mask->hdr.hop_limits);
}
ds_put_cstr(s, "/ ");
} else if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
const struct rte_flow_item_vxlan *vxlan_spec = item->spec;
const struct rte_flow_item_vxlan *vxlan_mask = item->mask;
ds_put_cstr(s, "vxlan ");
if (vxlan_spec) {
if (!vxlan_mask) {
vxlan_mask = &rte_flow_item_vxlan_mask;
}
DUMP_PATTERN_ITEM(vxlan_mask->vni, "vni", "%"PRIu32,
ntohl(*(ovs_be32 *) vxlan_spec->vni) >> 8,
ntohl(*(ovs_be32 *) vxlan_mask->vni) >> 8);
}
ds_put_cstr(s, "/ ");
} else {
ds_put_format(s, "unknown rte flow pattern (%d)\n", item->type);
}
@@ -865,15 +879,154 @@ out:
return ret;
}
static int
parse_tnl_ip_match(struct flow_patterns *patterns,
struct match *match,
uint8_t proto)
{
struct flow *consumed_masks;
consumed_masks = &match->wc.masks;
/* IP v4 */
if (match->wc.masks.tunnel.ip_src || match->wc.masks.tunnel.ip_dst) {
struct rte_flow_item_ipv4 *spec, *mask;
spec = xzalloc(sizeof *spec);
mask = xzalloc(sizeof *mask);
spec->hdr.type_of_service = match->flow.tunnel.ip_tos;
spec->hdr.time_to_live = match->flow.tunnel.ip_ttl;
spec->hdr.next_proto_id = proto;
spec->hdr.src_addr = match->flow.tunnel.ip_src;
spec->hdr.dst_addr = match->flow.tunnel.ip_dst;
mask->hdr.type_of_service = match->wc.masks.tunnel.ip_tos;
mask->hdr.time_to_live = match->wc.masks.tunnel.ip_ttl;
mask->hdr.next_proto_id = UINT8_MAX;
mask->hdr.src_addr = match->wc.masks.tunnel.ip_src;
mask->hdr.dst_addr = match->wc.masks.tunnel.ip_dst;
consumed_masks->tunnel.ip_tos = 0;
consumed_masks->tunnel.ip_ttl = 0;
consumed_masks->tunnel.ip_src = 0;
consumed_masks->tunnel.ip_dst = 0;
add_flow_pattern(patterns, RTE_FLOW_ITEM_TYPE_IPV4, spec, mask);
} else if (!is_all_zeros(&match->wc.masks.tunnel.ipv6_src,
sizeof(struct in6_addr)) ||
!is_all_zeros(&match->wc.masks.tunnel.ipv6_dst,
sizeof(struct in6_addr))) {
/* IP v6 */
struct rte_flow_item_ipv6 *spec, *mask;
spec = xzalloc(sizeof *spec);
mask = xzalloc(sizeof *mask);
spec->hdr.proto = proto;
spec->hdr.hop_limits = match->flow.tunnel.ip_ttl;
spec->hdr.vtc_flow = htonl((uint32_t) match->flow.tunnel.ip_tos <<
RTE_IPV6_HDR_TC_SHIFT);
memcpy(spec->hdr.src_addr, &match->flow.tunnel.ipv6_src,
sizeof spec->hdr.src_addr);
memcpy(spec->hdr.dst_addr, &match->flow.tunnel.ipv6_dst,
sizeof spec->hdr.dst_addr);
mask->hdr.proto = UINT8_MAX;
mask->hdr.hop_limits = match->wc.masks.tunnel.ip_ttl;
mask->hdr.vtc_flow = htonl((uint32_t) match->wc.masks.tunnel.ip_tos <<
RTE_IPV6_HDR_TC_SHIFT);
memcpy(mask->hdr.src_addr, &match->wc.masks.tunnel.ipv6_src,
sizeof mask->hdr.src_addr);
memcpy(mask->hdr.dst_addr, &match->wc.masks.tunnel.ipv6_dst,
sizeof mask->hdr.dst_addr);
consumed_masks->tunnel.ip_tos = 0;
consumed_masks->tunnel.ip_ttl = 0;
memset(&consumed_masks->tunnel.ipv6_src, 0,
sizeof consumed_masks->tunnel.ipv6_src);
memset(&consumed_masks->tunnel.ipv6_dst, 0,
sizeof consumed_masks->tunnel.ipv6_dst);
add_flow_pattern(patterns, RTE_FLOW_ITEM_TYPE_IPV6, spec, mask);
} else {
VLOG_ERR_RL(&rl, "Tunnel L3 protocol is neither IPv4 nor IPv6");
return -1;
}
return 0;
}
static void
parse_tnl_udp_match(struct flow_patterns *patterns,
struct match *match)
{
struct flow *consumed_masks;
struct rte_flow_item_udp *spec, *mask;
consumed_masks = &match->wc.masks;
spec = xzalloc(sizeof *spec);
mask = xzalloc(sizeof *mask);
spec->hdr.src_port = match->flow.tunnel.tp_src;
spec->hdr.dst_port = match->flow.tunnel.tp_dst;
mask->hdr.src_port = match->wc.masks.tunnel.tp_src;
mask->hdr.dst_port = match->wc.masks.tunnel.tp_dst;
consumed_masks->tunnel.tp_src = 0;
consumed_masks->tunnel.tp_dst = 0;
add_flow_pattern(patterns, RTE_FLOW_ITEM_TYPE_UDP, spec, mask);
}
static int
parse_vxlan_match(struct flow_patterns *patterns,
struct match *match)
{
struct rte_flow_item_vxlan *vx_spec, *vx_mask;
struct flow *consumed_masks;
int ret;
ret = parse_tnl_ip_match(patterns, match, IPPROTO_UDP);
if (ret) {
return -1;
}
parse_tnl_udp_match(patterns, match);
consumed_masks = &match->wc.masks;
/* VXLAN */
vx_spec = xzalloc(sizeof *vx_spec);
vx_mask = xzalloc(sizeof *vx_mask);
put_unaligned_be32((ovs_be32 *) vx_spec->vni,
htonl(ntohll(match->flow.tunnel.tun_id) << 8));
put_unaligned_be32((ovs_be32 *) vx_mask->vni,
htonl(ntohll(match->wc.masks.tunnel.tun_id) << 8));
consumed_masks->tunnel.tun_id = 0;
consumed_masks->tunnel.flags = 0;
add_flow_pattern(patterns, RTE_FLOW_ITEM_TYPE_VXLAN, vx_spec, vx_mask);
return 0;
}
static int OVS_UNUSED
parse_flow_tnl_match(struct netdev *tnldev,
struct flow_patterns *patterns,
odp_port_t orig_in_port,
struct match *match OVS_UNUSED)
struct match *match)
{
int ret;
ret = add_vport_match(patterns, orig_in_port, tnldev);
if (ret) {
return ret;
}
if (!strcmp(netdev_get_type(tnldev), "vxlan")) {
ret = parse_vxlan_match(patterns, match);
}
return ret;
}