2
0
mirror of https://github.com/openvswitch/ovs synced 2025-08-31 14:25:26 +00:00

netdev-offload-dpdk: Support IPv4 fragmentation types.

Support IPv4 fragmentation matching.

Signed-off-by: Eli Britstein <elibr@nvidia.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
Tested-by: Emma Finn <emma.finn@intel.com>
Signed-off-by: Ilya Maximets <i.maximets@ovn.org>
This commit is contained in:
Eli Britstein
2021-08-16 16:53:18 +03:00
committed by Ilya Maximets
parent 26b18f1895
commit c1a5d0e2b5

View File

@@ -250,12 +250,18 @@ dump_flow_pattern(struct ds *s,
} else if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
const struct rte_flow_item_ipv4 *ipv4_spec = item->spec;
const struct rte_flow_item_ipv4 *ipv4_mask = item->mask;
const struct rte_flow_item_ipv4 *ipv4_last = item->last;
ds_put_cstr(s, "ipv4 ");
if (ipv4_spec) {
ovs_be16 fragment_offset_mask;
if (!ipv4_mask) {
ipv4_mask = &rte_flow_item_ipv4_mask;
}
if (!ipv4_last) {
ipv4_last = &rte_flow_item_ipv4_mask;
}
DUMP_PATTERN_ITEM(ipv4_mask->hdr.src_addr, false, "src", IP_FMT,
IP_ARGS(ipv4_spec->hdr.src_addr),
IP_ARGS(ipv4_mask->hdr.src_addr), IP_ARGS(0));
@@ -271,6 +277,16 @@ dump_flow_pattern(struct ds *s,
DUMP_PATTERN_ITEM(ipv4_mask->hdr.time_to_live, false, "ttl",
"0x%"PRIx8, ipv4_spec->hdr.time_to_live,
ipv4_mask->hdr.time_to_live, 0);
fragment_offset_mask = ipv4_mask->hdr.fragment_offset ==
htons(RTE_IPV4_HDR_OFFSET_MASK |
RTE_IPV4_HDR_MF_FLAG)
? OVS_BE16_MAX
: ipv4_mask->hdr.fragment_offset;
DUMP_PATTERN_ITEM(fragment_offset_mask, item->last,
"fragment_offset", "0x%"PRIx16,
ntohs(ipv4_spec->hdr.fragment_offset),
ntohs(ipv4_mask->hdr.fragment_offset),
ntohs(ipv4_last->hdr.fragment_offset));
}
ds_put_cstr(s, "/ ");
} else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
@@ -1134,7 +1150,7 @@ parse_flow_match(struct netdev *netdev,
/* IP v4 */
if (match->flow.dl_type == htons(ETH_TYPE_IP)) {
struct rte_flow_item_ipv4 *spec, *mask;
struct rte_flow_item_ipv4 *spec, *mask, *last = NULL;
spec = xzalloc(sizeof *spec);
mask = xzalloc(sizeof *mask);
@@ -1157,7 +1173,37 @@ parse_flow_match(struct netdev *netdev,
consumed_masks->nw_src = 0;
consumed_masks->nw_dst = 0;
add_flow_pattern(patterns, RTE_FLOW_ITEM_TYPE_IPV4, spec, mask, NULL);
if (match->wc.masks.nw_frag & FLOW_NW_FRAG_ANY) {
if (!(match->flow.nw_frag & FLOW_NW_FRAG_ANY)) {
/* frag=no. */
spec->hdr.fragment_offset = 0;
mask->hdr.fragment_offset = htons(RTE_IPV4_HDR_OFFSET_MASK
| RTE_IPV4_HDR_MF_FLAG);
} else if (match->wc.masks.nw_frag & FLOW_NW_FRAG_LATER) {
if (!(match->flow.nw_frag & FLOW_NW_FRAG_LATER)) {
/* frag=first. */
spec->hdr.fragment_offset = htons(RTE_IPV4_HDR_MF_FLAG);
mask->hdr.fragment_offset = htons(RTE_IPV4_HDR_OFFSET_MASK
| RTE_IPV4_HDR_MF_FLAG);
} else {
/* frag=later. */
last = xzalloc(sizeof *last);
spec->hdr.fragment_offset =
htons(1 << RTE_IPV4_HDR_FO_SHIFT);
mask->hdr.fragment_offset =
htons(RTE_IPV4_HDR_OFFSET_MASK);
last->hdr.fragment_offset =
htons(RTE_IPV4_HDR_OFFSET_MASK);
}
} else {
VLOG_WARN_RL(&rl, "Unknown IPv4 frag (0x%x/0x%x)",
match->flow.nw_frag, match->wc.masks.nw_frag);
return -1;
}
consumed_masks->nw_frag = 0;
}
add_flow_pattern(patterns, RTE_FLOW_ITEM_TYPE_IPV4, spec, mask, last);
/* Save proto for L4 protocol setup. */
proto = spec->hdr.next_proto_id &