2014-06-23 11:43:57 -07:00
|
|
|
/*
|
2016-04-06 16:28:51 -07:00
|
|
|
* Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013, 2016 Nicira, Inc.
|
2014-06-23 11:43:57 -07:00
|
|
|
*
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at:
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <config.h>
|
2015-02-22 03:21:09 -08:00
|
|
|
#include <stdlib.h>
|
|
|
|
#include <string.h>
|
2016-10-04 17:58:05 -07:00
|
|
|
|
2015-02-25 12:01:53 -08:00
|
|
|
#include "dp-packet.h"
|
2019-07-18 13:11:14 -07:00
|
|
|
#include "netdev-afxdp.h"
|
2016-10-04 17:58:05 -07:00
|
|
|
#include "netdev-dpdk.h"
|
2023-06-14 15:03:26 -04:00
|
|
|
#include "netdev-provider.h"
|
2016-10-04 17:58:05 -07:00
|
|
|
#include "openvswitch/dynamic-string.h"
|
2015-02-22 03:21:09 -08:00
|
|
|
#include "util.h"
|
2014-06-23 11:43:57 -07:00
|
|
|
|
2015-02-22 03:21:09 -08:00
|
|
|
static void
|
|
|
|
dp_packet_init__(struct dp_packet *b, size_t allocated, enum dp_packet_source source)
|
|
|
|
{
|
2015-05-18 10:47:47 -07:00
|
|
|
dp_packet_set_allocated(b, allocated);
|
2015-02-22 03:21:09 -08:00
|
|
|
b->source = source;
|
2015-12-01 15:03:09 +09:00
|
|
|
dp_packet_reset_offsets(b);
|
2015-06-30 19:19:40 -07:00
|
|
|
pkt_metadata_init(&b->md, 0);
|
ofp-actions: Add truncate action.
The patch adds a new action to support packet truncation. The new action
is formatted as 'output(port=n,max_len=m)', as output to port n, with
packet size being MIN(original_size, m).
One use case is to enable port mirroring to send smaller packets to the
destination port so that only useful packet information is mirrored/copied,
saving some performance overhead of copying entire packet payload. Example
use case is below as well as shown in the testcases:
- Output to port 1 with max_len 100 bytes.
- The output packet size on port 1 will be MIN(original_packet_size, 100).
# ovs-ofctl add-flow br0 'actions=output(port=1,max_len=100)'
- The scope of max_len is limited to output action itself. The following
packet size of output:1 and output:2 will be intact.
# ovs-ofctl add-flow br0 \
'actions=output(port=1,max_len=100),output:1,output:2'
- The Datapath actions shows:
# Datapath actions: trunc(100),1,1,2
Tested-at: https://travis-ci.org/williamtu/ovs-travis/builds/140037134
Signed-off-by: William Tu <u9012063@gmail.com>
Acked-by: Pravin B Shelar <pshelar@ovn.org>
2016-06-24 07:42:30 -07:00
|
|
|
dp_packet_reset_cutlen(b);
|
2019-02-26 13:38:37 +03:00
|
|
|
dp_packet_reset_offload(b);
|
2023-11-21 14:26:51 -05:00
|
|
|
dp_packet_set_tso_segsz(b, 0);
|
2019-02-26 13:38:37 +03:00
|
|
|
/* Initialize implementation-specific fields of dp_packet. */
|
|
|
|
dp_packet_init_specific(b);
|
2017-04-25 16:29:59 +00:00
|
|
|
/* By default assume the packet type to be Ethernet. */
|
|
|
|
b->packet_type = htonl(PT_ETH);
|
2015-02-22 03:21:09 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
dp_packet_use__(struct dp_packet *b, void *base, size_t allocated,
|
|
|
|
enum dp_packet_source source)
|
|
|
|
{
|
|
|
|
dp_packet_set_base(b, base);
|
|
|
|
dp_packet_set_data(b, base);
|
|
|
|
dp_packet_set_size(b, 0);
|
|
|
|
|
|
|
|
dp_packet_init__(b, allocated, source);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Initializes 'b' as an empty dp_packet that contains the 'allocated' bytes of
|
|
|
|
* memory starting at 'base'. 'base' should be the first byte of a region
|
|
|
|
* obtained from malloc(). It will be freed (with free()) if 'b' is resized or
|
|
|
|
* freed. */
|
|
|
|
void
|
|
|
|
dp_packet_use(struct dp_packet *b, void *base, size_t allocated)
|
|
|
|
{
|
|
|
|
dp_packet_use__(b, base, allocated, DPBUF_MALLOC);
|
|
|
|
}
|
|
|
|
|
2019-07-18 13:11:14 -07:00
|
|
|
#if HAVE_AF_XDP
|
|
|
|
/* Initialize 'b' as an empty dp_packet that contains
|
|
|
|
* memory starting at AF_XDP umem base.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
dp_packet_use_afxdp(struct dp_packet *b, void *data, size_t allocated,
|
|
|
|
size_t headroom)
|
|
|
|
{
|
|
|
|
dp_packet_set_base(b, (char *)data - headroom);
|
|
|
|
dp_packet_set_data(b, data);
|
|
|
|
dp_packet_set_size(b, 0);
|
|
|
|
|
|
|
|
dp_packet_init__(b, allocated, DPBUF_AFXDP);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2015-02-22 03:21:09 -08:00
|
|
|
/* Initializes 'b' as an empty dp_packet that contains the 'allocated' bytes of
|
|
|
|
* memory starting at 'base'. 'base' should point to a buffer on the stack.
|
|
|
|
* (Nothing actually relies on 'base' being allocated on the stack. It could
|
|
|
|
* be static or malloc()'d memory. But stack space is the most common use
|
|
|
|
* case.)
|
|
|
|
*
|
|
|
|
* 'base' should be appropriately aligned. Using an array of uint32_t or
|
|
|
|
* uint64_t for the buffer is a reasonable way to ensure appropriate alignment
|
|
|
|
* for 32- or 64-bit data.
|
|
|
|
*
|
|
|
|
* An dp_packet operation that requires reallocating data will copy the provided
|
|
|
|
* buffer into a malloc()'d buffer. Thus, it is wise to call dp_packet_uninit()
|
|
|
|
* on an dp_packet initialized by this function, so that if it expanded into the
|
|
|
|
* heap, that memory is freed. */
|
|
|
|
void
|
|
|
|
dp_packet_use_stub(struct dp_packet *b, void *base, size_t allocated)
|
|
|
|
{
|
|
|
|
dp_packet_use__(b, base, allocated, DPBUF_STUB);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Initializes 'b' as an dp_packet whose data starts at 'data' and continues for
|
|
|
|
* 'size' bytes. This is appropriate for an dp_packet that will be used to
|
|
|
|
* inspect existing data, without moving it around or reallocating it, and
|
|
|
|
* generally without modifying it at all.
|
|
|
|
*
|
|
|
|
* An dp_packet operation that requires reallocating data will assert-fail if this
|
|
|
|
* function was used to initialize it. */
|
|
|
|
void
|
|
|
|
dp_packet_use_const(struct dp_packet *b, const void *data, size_t size)
|
|
|
|
{
|
|
|
|
dp_packet_use__(b, CONST_CAST(void *, data), size, DPBUF_STACK);
|
|
|
|
dp_packet_set_size(b, size);
|
|
|
|
}
|
|
|
|
|
dp-packet: Fix allocated size on DPDK init.
When enabled with DPDK OvS deals with two types of packets, the ones
coming from the mempool and the ones locally created by OvS - which are
copied to mempool mbufs before output. In the latter, the space is
allocated from the system, while in the former the mbufs are allocated
from a mempool, which takes care of initialising them appropriately.
In the current implementation, during mempool's initialisation of mbufs,
dp_packet_set_allocated() is called from dp_packet_init_dpdk() without
considering that the allocated space, in the case of multi-segment
mbufs, might be greater than a single mbuf. Furthermore, given that
dp_packet_init_dpdk() is on the code path that's called upon mempool's
initialisation, a call to dp_packet_set_allocated() is redundant, since
mempool takes care of initialising it.
To fix this, dp_packet_set_allocated() is no longer called after
initialisation of a mempool, only in dp_packet_init__(), which is still
called by OvS when initialising locally created packets.
Signed-off-by: Tiago Lam <tiago.lam@intel.com>
Acked-by: Eelco Chaudron <echaudro@redhat.com>
Acked-by: Flavio Leitner <fbl@sysclose.org>
Signed-off-by: Ian Stokes <ian.stokes@intel.com>
2018-11-02 09:06:34 +00:00
|
|
|
/* Initializes 'b' as a DPDK dp-packet, which must have been allocated from a
|
|
|
|
* DPDK memory pool. */
|
2015-02-22 03:21:09 -08:00
|
|
|
void
|
dp-packet: Fix allocated size on DPDK init.
When enabled with DPDK OvS deals with two types of packets, the ones
coming from the mempool and the ones locally created by OvS - which are
copied to mempool mbufs before output. In the latter, the space is
allocated from the system, while in the former the mbufs are allocated
from a mempool, which takes care of initialising them appropriately.
In the current implementation, during mempool's initialisation of mbufs,
dp_packet_set_allocated() is called from dp_packet_init_dpdk() without
considering that the allocated space, in the case of multi-segment
mbufs, might be greater than a single mbuf. Furthermore, given that
dp_packet_init_dpdk() is on the code path that's called upon mempool's
initialisation, a call to dp_packet_set_allocated() is redundant, since
mempool takes care of initialising it.
To fix this, dp_packet_set_allocated() is no longer called after
initialisation of a mempool, only in dp_packet_init__(), which is still
called by OvS when initialising locally created packets.
Signed-off-by: Tiago Lam <tiago.lam@intel.com>
Acked-by: Eelco Chaudron <echaudro@redhat.com>
Acked-by: Flavio Leitner <fbl@sysclose.org>
Signed-off-by: Ian Stokes <ian.stokes@intel.com>
2018-11-02 09:06:34 +00:00
|
|
|
dp_packet_init_dpdk(struct dp_packet *b)
|
2015-02-22 03:21:09 -08:00
|
|
|
{
|
dp-packet: Refactor DPDK packet initialization.
DPDK uses dp-packet pools and manages the mbuf portion of
each packet. When a pool is created, partial initialization is
also done on the OVS portion (i.e. non-mbuf). Since packet
memory is reused, this is not very useful for transient
fields and is also misleading. Furthermore, some of these
transient fields are properly initialized for DPDK packets
entering OVS anyways, which is the only reasonable way to do this.
Another field, cutlen, is initialized in this manner in the pool
and intended to be reset when cutlen is applied on sending the
packet out. However, if cutlen context is set but the packet is
not sent out for some reason, then the packet header would be
corrupted in the memory pool. It is better to just reset the
cutlen in the packets when received. I did not detect a
degradation in performance, however, I would be willing to
have some degradation, since this is a proper way to handle
this. In addition to initializing cutlen in received packets,
the other OVS transient fields are removed from the DPDK pool
initialization.
Acked-by: Sugesh Chandran <sugesh.chandran@intel.com>
Signed-off-by: Darrell Ball <dlu998@gmail.com>
2017-08-24 22:09:58 -07:00
|
|
|
b->source = DPBUF_DPDK;
|
2015-02-22 03:21:09 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Initializes 'b' as an empty dp_packet with an initial capacity of 'size'
|
|
|
|
* bytes. */
|
|
|
|
void
|
|
|
|
dp_packet_init(struct dp_packet *b, size_t size)
|
|
|
|
{
|
|
|
|
dp_packet_use(b, size ? xmalloc(size) : NULL, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Frees memory that 'b' points to. */
|
|
|
|
void
|
|
|
|
dp_packet_uninit(struct dp_packet *b)
|
|
|
|
{
|
|
|
|
if (b) {
|
|
|
|
if (b->source == DPBUF_MALLOC) {
|
|
|
|
free(dp_packet_base(b));
|
|
|
|
} else if (b->source == DPBUF_DPDK) {
|
2022-08-25 12:25:24 +02:00
|
|
|
free_dpdk_buf(b);
|
2019-07-18 13:11:14 -07:00
|
|
|
} else if (b->source == DPBUF_AFXDP) {
|
|
|
|
free_afxdp_buf(b);
|
2015-02-22 03:21:09 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Creates and returns a new dp_packet with an initial capacity of 'size'
|
|
|
|
* bytes. */
|
|
|
|
struct dp_packet *
|
|
|
|
dp_packet_new(size_t size)
|
|
|
|
{
|
2023-01-30 17:04:16 -05:00
|
|
|
#ifdef DPDK_NETDEV
|
|
|
|
struct dp_packet *b = xmalloc_cacheline(sizeof *b);
|
|
|
|
#else
|
2015-02-22 03:21:09 -08:00
|
|
|
struct dp_packet *b = xmalloc(sizeof *b);
|
2023-01-30 17:04:16 -05:00
|
|
|
#endif
|
2015-02-22 03:21:09 -08:00
|
|
|
dp_packet_init(b, size);
|
|
|
|
return b;
|
|
|
|
}
|
2014-06-23 11:43:57 -07:00
|
|
|
|
2015-02-22 03:21:09 -08:00
|
|
|
/* Creates and returns a new dp_packet with an initial capacity of 'size +
|
|
|
|
* headroom' bytes, reserving the first 'headroom' bytes as headroom. */
|
2015-02-25 12:01:53 -08:00
|
|
|
struct dp_packet *
|
|
|
|
dp_packet_new_with_headroom(size_t size, size_t headroom)
|
2014-06-23 11:43:57 -07:00
|
|
|
{
|
2015-02-22 03:21:09 -08:00
|
|
|
struct dp_packet *b = dp_packet_new(size + headroom);
|
|
|
|
dp_packet_reserve(b, headroom);
|
|
|
|
return b;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Creates and returns a new dp_packet that initially contains a copy of the
|
|
|
|
* 'dp_packet_size(buffer)' bytes of data starting at 'buffer->data' with no headroom or
|
|
|
|
* tailroom. */
|
|
|
|
struct dp_packet *
|
|
|
|
dp_packet_clone(const struct dp_packet *buffer)
|
|
|
|
{
|
2023-08-04 00:19:12 +08:00
|
|
|
ovs_assert(buffer);
|
2015-02-22 03:21:09 -08:00
|
|
|
return dp_packet_clone_with_headroom(buffer, 0);
|
|
|
|
}
|
2014-06-23 11:43:57 -07:00
|
|
|
|
2017-08-25 00:37:33 -07:00
|
|
|
/* Creates and returns a new dp_packet whose data are copied from 'buffer'.
|
|
|
|
* The returned dp_packet will additionally have 'headroom' bytes of
|
|
|
|
* headroom. */
|
2015-02-22 03:21:09 -08:00
|
|
|
struct dp_packet *
|
|
|
|
dp_packet_clone_with_headroom(const struct dp_packet *buffer, size_t headroom)
|
|
|
|
{
|
2023-09-03 23:21:54 +08:00
|
|
|
const void *data_dp = dp_packet_data(buffer);
|
2015-02-22 03:21:09 -08:00
|
|
|
struct dp_packet *new_buffer;
|
2019-02-26 13:38:39 +03:00
|
|
|
uint32_t mark;
|
2014-06-23 11:43:57 -07:00
|
|
|
|
2023-09-03 23:21:54 +08:00
|
|
|
ovs_assert(data_dp);
|
|
|
|
|
|
|
|
new_buffer = dp_packet_clone_data_with_headroom(data_dp,
|
|
|
|
dp_packet_size(buffer),
|
|
|
|
headroom);
|
2017-08-25 00:37:33 -07:00
|
|
|
/* Copy the following fields into the returned buffer: l2_pad_size,
|
2025-06-17 09:20:54 +02:00
|
|
|
* l2_5_ofs, l3_ofs, l4_ofs, cutlen, packet_type, offloads and md. */
|
2017-08-25 00:37:33 -07:00
|
|
|
memcpy(&new_buffer->l2_pad_size, &buffer->l2_pad_size,
|
|
|
|
sizeof(struct dp_packet) -
|
|
|
|
offsetof(struct dp_packet, l2_pad_size));
|
|
|
|
|
2023-11-21 14:26:51 -05:00
|
|
|
dp_packet_set_tso_segsz(new_buffer, dp_packet_get_tso_segsz(buffer));
|
|
|
|
|
2019-02-26 13:38:37 +03:00
|
|
|
if (dp_packet_rss_valid(buffer)) {
|
|
|
|
dp_packet_set_rss_hash(new_buffer, dp_packet_get_rss_hash(buffer));
|
2016-04-06 16:28:51 -07:00
|
|
|
}
|
2019-02-26 13:38:39 +03:00
|
|
|
if (dp_packet_has_flow_mark(buffer, &mark)) {
|
|
|
|
dp_packet_set_flow_mark(new_buffer, mark);
|
|
|
|
}
|
2015-02-22 03:21:09 -08:00
|
|
|
|
|
|
|
return new_buffer;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Creates and returns a new dp_packet that initially contains a copy of the
|
|
|
|
* 'size' bytes of data starting at 'data' with no headroom or tailroom. */
|
|
|
|
struct dp_packet *
|
|
|
|
dp_packet_clone_data(const void *data, size_t size)
|
|
|
|
{
|
|
|
|
return dp_packet_clone_data_with_headroom(data, size, 0);
|
2014-06-23 11:43:57 -07:00
|
|
|
}
|
|
|
|
|
2015-02-22 03:21:09 -08:00
|
|
|
/* Creates and returns a new dp_packet that initially contains 'headroom' bytes of
|
|
|
|
* headroom followed by a copy of the 'size' bytes of data starting at
|
|
|
|
* 'data'. */
|
2015-02-25 12:01:53 -08:00
|
|
|
struct dp_packet *
|
2015-02-22 03:21:09 -08:00
|
|
|
dp_packet_clone_data_with_headroom(const void *data, size_t size, size_t headroom)
|
2014-06-23 11:43:57 -07:00
|
|
|
{
|
2015-02-22 03:21:09 -08:00
|
|
|
struct dp_packet *b = dp_packet_new_with_headroom(size, headroom);
|
|
|
|
dp_packet_put(b, data, size);
|
|
|
|
return b;
|
|
|
|
}
|
2014-06-23 11:43:57 -07:00
|
|
|
|
2015-02-22 03:21:09 -08:00
|
|
|
static void
|
|
|
|
dp_packet_copy__(struct dp_packet *b, uint8_t *new_base,
|
|
|
|
size_t new_headroom, size_t new_tailroom)
|
|
|
|
{
|
|
|
|
const uint8_t *old_base = dp_packet_base(b);
|
|
|
|
size_t old_headroom = dp_packet_headroom(b);
|
|
|
|
size_t old_tailroom = dp_packet_tailroom(b);
|
|
|
|
size_t copy_headroom = MIN(old_headroom, new_headroom);
|
|
|
|
size_t copy_tailroom = MIN(old_tailroom, new_tailroom);
|
2014-06-23 11:43:57 -07:00
|
|
|
|
2015-02-22 03:21:09 -08:00
|
|
|
memcpy(&new_base[new_headroom - copy_headroom],
|
|
|
|
&old_base[old_headroom - copy_headroom],
|
|
|
|
copy_headroom + dp_packet_size(b) + copy_tailroom);
|
|
|
|
}
|
2014-06-23 11:43:57 -07:00
|
|
|
|
2015-02-22 03:21:09 -08:00
|
|
|
/* Reallocates 'b' so that it has exactly 'new_headroom' and 'new_tailroom'
|
|
|
|
* bytes of headroom and tailroom, respectively. */
|
2020-02-03 18:45:50 -03:00
|
|
|
void
|
|
|
|
dp_packet_resize(struct dp_packet *b, size_t new_headroom, size_t new_tailroom)
|
2015-02-22 03:21:09 -08:00
|
|
|
{
|
|
|
|
void *new_base, *new_data;
|
|
|
|
size_t new_allocated;
|
|
|
|
|
|
|
|
new_allocated = new_headroom + dp_packet_size(b) + new_tailroom;
|
|
|
|
|
|
|
|
switch (b->source) {
|
|
|
|
case DPBUF_DPDK:
|
|
|
|
OVS_NOT_REACHED();
|
|
|
|
|
|
|
|
case DPBUF_MALLOC:
|
|
|
|
if (new_headroom == dp_packet_headroom(b)) {
|
|
|
|
new_base = xrealloc(dp_packet_base(b), new_allocated);
|
|
|
|
} else {
|
|
|
|
new_base = xmalloc(new_allocated);
|
|
|
|
dp_packet_copy__(b, new_base, new_headroom, new_tailroom);
|
|
|
|
free(dp_packet_base(b));
|
|
|
|
}
|
|
|
|
break;
|
2014-06-23 11:43:57 -07:00
|
|
|
|
2015-02-22 03:21:09 -08:00
|
|
|
case DPBUF_STACK:
|
|
|
|
OVS_NOT_REACHED();
|
|
|
|
|
2019-07-18 13:11:14 -07:00
|
|
|
case DPBUF_AFXDP:
|
|
|
|
OVS_NOT_REACHED();
|
|
|
|
|
2015-02-22 03:21:09 -08:00
|
|
|
case DPBUF_STUB:
|
|
|
|
b->source = DPBUF_MALLOC;
|
|
|
|
new_base = xmalloc(new_allocated);
|
|
|
|
dp_packet_copy__(b, new_base, new_headroom, new_tailroom);
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
OVS_NOT_REACHED();
|
|
|
|
}
|
|
|
|
|
2015-05-18 10:47:47 -07:00
|
|
|
dp_packet_set_allocated(b, new_allocated);
|
2015-02-22 03:21:09 -08:00
|
|
|
dp_packet_set_base(b, new_base);
|
|
|
|
|
|
|
|
new_data = (char *) new_base + new_headroom;
|
|
|
|
if (dp_packet_data(b) != new_data) {
|
|
|
|
dp_packet_set_data(b, new_data);
|
2014-06-23 11:43:57 -07:00
|
|
|
}
|
2015-02-22 03:21:09 -08:00
|
|
|
}
|
2014-06-23 11:43:57 -07:00
|
|
|
|
2015-02-22 03:21:09 -08:00
|
|
|
/* Ensures that 'b' has room for at least 'size' bytes at its tail end,
|
|
|
|
* reallocating and copying its data if necessary. Its headroom, if any, is
|
|
|
|
* preserved. */
|
|
|
|
void
|
|
|
|
dp_packet_prealloc_tailroom(struct dp_packet *b, size_t size)
|
|
|
|
{
|
2022-01-24 15:18:50 +01:00
|
|
|
if ((size && !dp_packet_base(b)) || (size > dp_packet_tailroom(b))) {
|
2020-02-03 18:45:50 -03:00
|
|
|
dp_packet_resize(b, dp_packet_headroom(b), MAX(size, 64));
|
2015-02-22 03:21:09 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Ensures that 'b' has room for at least 'size' bytes at its head,
|
|
|
|
* reallocating and copying its data if necessary. Its tailroom, if any, is
|
|
|
|
* preserved. */
|
|
|
|
void
|
|
|
|
dp_packet_prealloc_headroom(struct dp_packet *b, size_t size)
|
|
|
|
{
|
|
|
|
if (size > dp_packet_headroom(b)) {
|
2020-02-03 18:45:50 -03:00
|
|
|
dp_packet_resize(b, MAX(size, 64), dp_packet_tailroom(b));
|
2015-02-22 03:21:09 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Shifts all of the data within the allocated space in 'b' by 'delta' bytes.
|
|
|
|
* For example, a 'delta' of 1 would cause each byte of data to move one byte
|
|
|
|
* forward (from address 'p' to 'p+1'), and a 'delta' of -1 would cause each
|
|
|
|
* byte to move one byte backward (from 'p' to 'p-1'). */
|
|
|
|
void
|
|
|
|
dp_packet_shift(struct dp_packet *b, int delta)
|
|
|
|
{
|
|
|
|
ovs_assert(delta > 0 ? delta <= dp_packet_tailroom(b)
|
|
|
|
: delta < 0 ? -delta <= dp_packet_headroom(b)
|
|
|
|
: true);
|
|
|
|
|
|
|
|
if (delta != 0) {
|
2023-09-03 23:21:54 +08:00
|
|
|
const void *data_dp = dp_packet_data(b);
|
|
|
|
char *dst = (char *) data_dp + delta;
|
|
|
|
|
|
|
|
ovs_assert(data_dp);
|
|
|
|
|
|
|
|
memmove(dst, data_dp, dp_packet_size(b));
|
2015-02-22 03:21:09 -08:00
|
|
|
dp_packet_set_data(b, dst);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Appends 'size' bytes of data to the tail end of 'b', reallocating and
|
|
|
|
* copying its data if necessary. Returns a pointer to the first byte of the
|
|
|
|
* new data, which is left uninitialized. */
|
|
|
|
void *
|
|
|
|
dp_packet_put_uninit(struct dp_packet *b, size_t size)
|
|
|
|
{
|
|
|
|
void *p;
|
|
|
|
dp_packet_prealloc_tailroom(b, size);
|
|
|
|
p = dp_packet_tail(b);
|
|
|
|
dp_packet_set_size(b, dp_packet_size(b) + size);
|
2014-06-23 11:43:57 -07:00
|
|
|
return p;
|
|
|
|
}
|
|
|
|
|
2015-02-22 03:21:09 -08:00
|
|
|
/* Appends 'size' zeroed bytes to the tail end of 'b'. Data in 'b' is
|
|
|
|
* reallocated and copied if necessary. Returns a pointer to the first byte of
|
|
|
|
* the data's location in the dp_packet. */
|
|
|
|
void *
|
|
|
|
dp_packet_put_zeros(struct dp_packet *b, size_t size)
|
|
|
|
{
|
|
|
|
void *dst = dp_packet_put_uninit(b, size);
|
2023-09-03 23:21:55 +08:00
|
|
|
nullable_memset(dst, 0, size);
|
2015-02-22 03:21:09 -08:00
|
|
|
return dst;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Appends the 'size' bytes of data in 'p' to the tail end of 'b'. Data in 'b'
|
|
|
|
* is reallocated and copied if necessary. Returns a pointer to the first
|
|
|
|
* byte of the data's location in the dp_packet. */
|
|
|
|
void *
|
|
|
|
dp_packet_put(struct dp_packet *b, const void *p, size_t size)
|
|
|
|
{
|
|
|
|
void *dst = dp_packet_put_uninit(b, size);
|
2023-09-03 23:21:55 +08:00
|
|
|
nullable_memcpy(dst, p, size);
|
2015-02-22 03:21:09 -08:00
|
|
|
return dst;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Parses as many pairs of hex digits as possible (possibly separated by
|
|
|
|
* spaces) from the beginning of 's', appending bytes for their values to 'b'.
|
|
|
|
* Returns the first character of 's' that is not the first of a pair of hex
|
|
|
|
* digits. If 'n' is nonnull, stores the number of bytes added to 'b' in
|
|
|
|
* '*n'. */
|
|
|
|
char *
|
|
|
|
dp_packet_put_hex(struct dp_packet *b, const char *s, size_t *n)
|
|
|
|
{
|
|
|
|
size_t initial_size = dp_packet_size(b);
|
|
|
|
for (;;) {
|
|
|
|
uint8_t byte;
|
|
|
|
bool ok;
|
|
|
|
|
|
|
|
s += strspn(s, " \t\r\n");
|
|
|
|
byte = hexits_value(s, 2, &ok);
|
|
|
|
if (!ok) {
|
|
|
|
if (n) {
|
|
|
|
*n = dp_packet_size(b) - initial_size;
|
|
|
|
}
|
|
|
|
return CONST_CAST(char *, s);
|
|
|
|
}
|
|
|
|
|
|
|
|
dp_packet_put(b, &byte, 1);
|
|
|
|
s += 2;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Reserves 'size' bytes of headroom so that they can be later allocated with
|
|
|
|
* dp_packet_push_uninit() without reallocating the dp_packet. */
|
|
|
|
void
|
|
|
|
dp_packet_reserve(struct dp_packet *b, size_t size)
|
2014-06-23 11:43:57 -07:00
|
|
|
{
|
2015-02-22 03:21:09 -08:00
|
|
|
ovs_assert(!dp_packet_size(b));
|
|
|
|
dp_packet_prealloc_tailroom(b, size);
|
|
|
|
dp_packet_set_data(b, (char*)dp_packet_data(b) + size);
|
|
|
|
}
|
2014-06-23 11:43:57 -07:00
|
|
|
|
2015-02-22 03:21:09 -08:00
|
|
|
/* Reserves 'headroom' bytes at the head and 'tailroom' at the end so that
|
|
|
|
* they can be later allocated with dp_packet_push_uninit() or
|
|
|
|
* dp_packet_put_uninit() without reallocating the dp_packet. */
|
|
|
|
void
|
|
|
|
dp_packet_reserve_with_tailroom(struct dp_packet *b, size_t headroom,
|
|
|
|
size_t tailroom)
|
|
|
|
{
|
|
|
|
ovs_assert(!dp_packet_size(b));
|
|
|
|
dp_packet_prealloc_tailroom(b, headroom + tailroom);
|
|
|
|
dp_packet_set_data(b, (char*)dp_packet_data(b) + headroom);
|
|
|
|
}
|
2014-06-23 11:43:57 -07:00
|
|
|
|
2015-02-22 03:21:09 -08:00
|
|
|
/* Prefixes 'size' bytes to the head end of 'b', reallocating and copying its
|
|
|
|
* data if necessary. Returns a pointer to the first byte of the data's
|
|
|
|
* location in the dp_packet. The new data is left uninitialized. */
|
|
|
|
void *
|
|
|
|
dp_packet_push_uninit(struct dp_packet *b, size_t size)
|
|
|
|
{
|
|
|
|
dp_packet_prealloc_headroom(b, size);
|
|
|
|
dp_packet_set_data(b, (char*)dp_packet_data(b) - size);
|
|
|
|
dp_packet_set_size(b, dp_packet_size(b) + size);
|
|
|
|
return dp_packet_data(b);
|
|
|
|
}
|
2014-06-23 11:43:59 -07:00
|
|
|
|
2015-02-22 03:21:09 -08:00
|
|
|
/* Prefixes 'size' zeroed bytes to the head end of 'b', reallocating and
|
|
|
|
* copying its data if necessary. Returns a pointer to the first byte of the
|
|
|
|
* data's location in the dp_packet. */
|
|
|
|
void *
|
|
|
|
dp_packet_push_zeros(struct dp_packet *b, size_t size)
|
|
|
|
{
|
|
|
|
void *dst = dp_packet_push_uninit(b, size);
|
2023-09-03 23:21:55 +08:00
|
|
|
nullable_memset(dst, 0, size);
|
2015-02-22 03:21:09 -08:00
|
|
|
return dst;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Copies the 'size' bytes starting at 'p' to the head end of 'b', reallocating
|
|
|
|
* and copying its data if necessary. Returns a pointer to the first byte of
|
|
|
|
* the data's location in the dp_packet. */
|
|
|
|
void *
|
|
|
|
dp_packet_push(struct dp_packet *b, const void *p, size_t size)
|
|
|
|
{
|
|
|
|
void *dst = dp_packet_push_uninit(b, size);
|
2023-09-03 23:21:55 +08:00
|
|
|
nullable_memcpy(dst, p, size);
|
2015-02-22 03:21:09 -08:00
|
|
|
return dst;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Returns the data in 'b' as a block of malloc()'d memory and frees the buffer
|
|
|
|
* within 'b'. (If 'b' itself was dynamically allocated, e.g. with
|
|
|
|
* dp_packet_new(), then it should still be freed with, e.g., dp_packet_delete().) */
|
|
|
|
void *
|
|
|
|
dp_packet_steal_data(struct dp_packet *b)
|
|
|
|
{
|
|
|
|
void *p;
|
|
|
|
ovs_assert(b->source != DPBUF_DPDK);
|
2019-07-18 13:11:14 -07:00
|
|
|
ovs_assert(b->source != DPBUF_AFXDP);
|
2015-02-22 03:21:09 -08:00
|
|
|
|
|
|
|
if (b->source == DPBUF_MALLOC && dp_packet_data(b) == dp_packet_base(b)) {
|
|
|
|
p = dp_packet_data(b);
|
|
|
|
} else {
|
|
|
|
p = xmemdup(dp_packet_data(b), dp_packet_size(b));
|
|
|
|
if (b->source == DPBUF_MALLOC) {
|
|
|
|
free(dp_packet_base(b));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
dp_packet_set_base(b, NULL);
|
|
|
|
dp_packet_set_data(b, NULL);
|
|
|
|
return p;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
dp_packet_adjust_layer_offset(uint16_t *offset, int increment)
|
|
|
|
{
|
|
|
|
if (*offset != UINT16_MAX) {
|
|
|
|
*offset += increment;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Adjust the size of the l2_5 portion of the dp_packet, updating the l2
|
|
|
|
* pointer and the layer offsets. The caller is responsible for
|
|
|
|
* modifying the contents. */
|
|
|
|
void *
|
|
|
|
dp_packet_resize_l2_5(struct dp_packet *b, int increment)
|
|
|
|
{
|
|
|
|
if (increment >= 0) {
|
|
|
|
dp_packet_push_uninit(b, increment);
|
|
|
|
} else {
|
|
|
|
dp_packet_pull(b, -increment);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Adjust layer offsets after l2_5. */
|
|
|
|
dp_packet_adjust_layer_offset(&b->l3_ofs, increment);
|
|
|
|
dp_packet_adjust_layer_offset(&b->l4_ofs, increment);
|
2024-02-12 01:50:20 -05:00
|
|
|
dp_packet_adjust_layer_offset(&b->inner_l3_ofs, increment);
|
|
|
|
dp_packet_adjust_layer_offset(&b->inner_l4_ofs, increment);
|
2015-02-22 03:21:09 -08:00
|
|
|
|
2015-05-18 10:47:46 -07:00
|
|
|
return dp_packet_data(b);
|
2015-02-22 03:21:09 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Adjust the size of the l2 portion of the dp_packet, updating the l2
|
|
|
|
* pointer and the layer offsets. The caller is responsible for
|
|
|
|
* modifying the contents. */
|
|
|
|
void *
|
|
|
|
dp_packet_resize_l2(struct dp_packet *b, int increment)
|
|
|
|
{
|
|
|
|
dp_packet_resize_l2_5(b, increment);
|
|
|
|
dp_packet_adjust_layer_offset(&b->l2_5_ofs, increment);
|
2015-05-18 10:47:46 -07:00
|
|
|
return dp_packet_data(b);
|
2014-06-23 11:43:57 -07:00
|
|
|
}
|
2022-07-15 10:16:16 +00:00
|
|
|
|
|
|
|
bool
|
|
|
|
dp_packet_compare_offsets(struct dp_packet *b1, struct dp_packet *b2,
|
|
|
|
struct ds *err_str)
|
|
|
|
{
|
|
|
|
if ((b1->l2_pad_size != b2->l2_pad_size) ||
|
|
|
|
(b1->l2_5_ofs != b2->l2_5_ofs) ||
|
|
|
|
(b1->l3_ofs != b2->l3_ofs) ||
|
2024-02-12 01:50:20 -05:00
|
|
|
(b1->l4_ofs != b2->l4_ofs) ||
|
|
|
|
(b1->inner_l3_ofs != b2->inner_l3_ofs) ||
|
|
|
|
(b1->inner_l4_ofs != b2->inner_l4_ofs)) {
|
2022-07-15 10:16:16 +00:00
|
|
|
if (err_str) {
|
|
|
|
ds_put_format(err_str, "Packet offset comparison failed\n");
|
|
|
|
ds_put_format(err_str, "Buffer 1 offsets: l2_pad_size %u,"
|
2024-02-12 01:50:20 -05:00
|
|
|
" l2_5_ofs : %u l3_ofs %u, l4_ofs %u,"
|
|
|
|
" inner_l3_ofs %u, inner_l4_ofs %u\n",
|
2022-07-15 10:16:16 +00:00
|
|
|
b1->l2_pad_size, b1->l2_5_ofs,
|
2024-02-12 01:50:20 -05:00
|
|
|
b1->l3_ofs, b1->l4_ofs,
|
|
|
|
b1->inner_l3_ofs, b1->inner_l4_ofs);
|
2022-07-15 10:16:16 +00:00
|
|
|
ds_put_format(err_str, "Buffer 2 offsets: l2_pad_size %u,"
|
2024-02-12 01:50:20 -05:00
|
|
|
" l2_5_ofs : %u l3_ofs %u, l4_ofs %u,"
|
|
|
|
" inner_l3_ofs %u, inner_l4_ofs %u\n",
|
2022-07-15 10:16:16 +00:00
|
|
|
b2->l2_pad_size, b2->l2_5_ofs,
|
2024-02-12 01:50:20 -05:00
|
|
|
b2->l3_ofs, b2->l4_ofs,
|
|
|
|
b2->inner_l3_ofs, b2->inner_l4_ofs);
|
2022-07-15 10:16:16 +00:00
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
2023-06-14 15:03:26 -04:00
|
|
|
|
|
|
|
/* Checks if the packet 'p' is compatible with netdev_ol_flags 'flags'
|
|
|
|
* and if not, updates the packet with the software fall back. */
|
|
|
|
void
|
|
|
|
dp_packet_ol_send_prepare(struct dp_packet *p, uint64_t flags)
|
|
|
|
{
|
dp-packet: Rework IP checksum offloads.
As the packet traverses through OVS, offloading Tx flags must be carefully
evaluated and updated which results in a bit of complexity because of a
separate "outer" Tx offloading flag coming from DPDK API,
and a "normal"/"inner" Tx offloading flag.
On the other hand, the DPDK mbuf API specifies 4 status when it comes to
IP checksums:
- RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN: no information about the RX IP checksum
- RTE_MBUF_F_RX_IP_CKSUM_BAD: the IP checksum in the packet is wrong
- RTE_MBUF_F_RX_IP_CKSUM_GOOD: the IP checksum in the packet is valid
- RTE_MBUF_F_RX_IP_CKSUM_NONE: the IP checksum is not correct in the
packet data, but the integrity of the IP header is verified.
This patch changes OVS API so that OVS code only tracks the status of
the checksum of the "current" L3 header and let the Tx flags aspect to
the netdev-* implementations.
With this API, the flow extraction can be cleaned up.
During packet processing, OVS can simply look for the IP checksum validity
(either good, or partial) before changing some IP header, and then mark
the checksum as partial.
In the conntrack case, when natting packets, the checksum status of the
inner part (ICMP error case) must be forced temporarily as unknown
to force checksum resolution.
When tunneling comes into play, IP checksums status is bit-shifted for
future considerations in the processing if, for example, the tunnel
header gets decapsulated again, or in the netdev-* implementations that
support tunnel offloading.
Finally, netdev-* implementations only need to care about packets in
partial status: a good checksum does not need touching, a bad checksum
has been updated by kept as bad by OVS, an unknown checksum is either
an IPv6 or if it was an IPv4, OVS updated it too (keeping it good or bad
accordingly).
Rename current API for consistency with dp_packet_(inner_)?ip_checksum_.
Signed-off-by: David Marchand <david.marchand@redhat.com>
Signed-off-by: Ilya Maximets <i.maximets@ovn.org>
2025-06-17 09:20:57 +02:00
|
|
|
if (!dp_packet_ip_checksum_partial(p)
|
2025-06-17 09:20:58 +02:00
|
|
|
&& !dp_packet_l4_checksum_partial(p)
|
dp-packet: Rework IP checksum offloads.
As the packet traverses through OVS, offloading Tx flags must be carefully
evaluated and updated which results in a bit of complexity because of a
separate "outer" Tx offloading flag coming from DPDK API,
and a "normal"/"inner" Tx offloading flag.
On the other hand, the DPDK mbuf API specifies 4 status when it comes to
IP checksums:
- RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN: no information about the RX IP checksum
- RTE_MBUF_F_RX_IP_CKSUM_BAD: the IP checksum in the packet is wrong
- RTE_MBUF_F_RX_IP_CKSUM_GOOD: the IP checksum in the packet is valid
- RTE_MBUF_F_RX_IP_CKSUM_NONE: the IP checksum is not correct in the
packet data, but the integrity of the IP header is verified.
This patch changes OVS API so that OVS code only tracks the status of
the checksum of the "current" L3 header and let the Tx flags aspect to
the netdev-* implementations.
With this API, the flow extraction can be cleaned up.
During packet processing, OVS can simply look for the IP checksum validity
(either good, or partial) before changing some IP header, and then mark
the checksum as partial.
In the conntrack case, when natting packets, the checksum status of the
inner part (ICMP error case) must be forced temporarily as unknown
to force checksum resolution.
When tunneling comes into play, IP checksums status is bit-shifted for
future considerations in the processing if, for example, the tunnel
header gets decapsulated again, or in the netdev-* implementations that
support tunnel offloading.
Finally, netdev-* implementations only need to care about packets in
partial status: a good checksum does not need touching, a bad checksum
has been updated by kept as bad by OVS, an unknown checksum is either
an IPv6 or if it was an IPv4, OVS updated it too (keeping it good or bad
accordingly).
Rename current API for consistency with dp_packet_(inner_)?ip_checksum_.
Signed-off-by: David Marchand <david.marchand@redhat.com>
Signed-off-by: Ilya Maximets <i.maximets@ovn.org>
2025-06-17 09:20:57 +02:00
|
|
|
&& !dp_packet_inner_ip_checksum_partial(p)
|
2025-06-17 09:20:58 +02:00
|
|
|
&& !dp_packet_inner_l4_checksum_partial(p)) {
|
2024-01-18 17:38:19 +01:00
|
|
|
/* Only checksumming needs actions. */
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2025-06-17 09:20:56 +02:00
|
|
|
if (!dp_packet_tunnel(p)) {
|
dp-packet: Rework IP checksum offloads.
As the packet traverses through OVS, offloading Tx flags must be carefully
evaluated and updated which results in a bit of complexity because of a
separate "outer" Tx offloading flag coming from DPDK API,
and a "normal"/"inner" Tx offloading flag.
On the other hand, the DPDK mbuf API specifies 4 status when it comes to
IP checksums:
- RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN: no information about the RX IP checksum
- RTE_MBUF_F_RX_IP_CKSUM_BAD: the IP checksum in the packet is wrong
- RTE_MBUF_F_RX_IP_CKSUM_GOOD: the IP checksum in the packet is valid
- RTE_MBUF_F_RX_IP_CKSUM_NONE: the IP checksum is not correct in the
packet data, but the integrity of the IP header is verified.
This patch changes OVS API so that OVS code only tracks the status of
the checksum of the "current" L3 header and let the Tx flags aspect to
the netdev-* implementations.
With this API, the flow extraction can be cleaned up.
During packet processing, OVS can simply look for the IP checksum validity
(either good, or partial) before changing some IP header, and then mark
the checksum as partial.
In the conntrack case, when natting packets, the checksum status of the
inner part (ICMP error case) must be forced temporarily as unknown
to force checksum resolution.
When tunneling comes into play, IP checksums status is bit-shifted for
future considerations in the processing if, for example, the tunnel
header gets decapsulated again, or in the netdev-* implementations that
support tunnel offloading.
Finally, netdev-* implementations only need to care about packets in
partial status: a good checksum does not need touching, a bad checksum
has been updated by kept as bad by OVS, an unknown checksum is either
an IPv6 or if it was an IPv4, OVS updated it too (keeping it good or bad
accordingly).
Rename current API for consistency with dp_packet_(inner_)?ip_checksum_.
Signed-off-by: David Marchand <david.marchand@redhat.com>
Signed-off-by: Ilya Maximets <i.maximets@ovn.org>
2025-06-17 09:20:57 +02:00
|
|
|
if (dp_packet_ip_checksum_partial(p)
|
|
|
|
&& !(flags & NETDEV_TX_OFFLOAD_IPV4_CKSUM)) {
|
|
|
|
dp_packet_ip_set_header_csum(p, false);
|
2025-06-17 09:20:55 +02:00
|
|
|
}
|
|
|
|
|
2025-06-17 09:20:58 +02:00
|
|
|
if (dp_packet_l4_checksum_partial(p)) {
|
|
|
|
if (dp_packet_l4_proto_tcp(p)) {
|
|
|
|
if (!(flags & NETDEV_TX_OFFLOAD_TCP_CKSUM)) {
|
|
|
|
packet_tcp_complete_csum(p, false);
|
|
|
|
}
|
|
|
|
} else if (dp_packet_l4_proto_udp(p)) {
|
|
|
|
if (!(flags & NETDEV_TX_OFFLOAD_UDP_CKSUM)) {
|
|
|
|
packet_udp_complete_csum(p, false);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
ovs_assert(dp_packet_l4_proto_sctp(p));
|
|
|
|
if (!(flags & NETDEV_TX_OFFLOAD_SCTP_CKSUM)) {
|
|
|
|
packet_sctp_complete_csum(p, false);
|
|
|
|
}
|
2025-06-17 09:20:55 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2025-06-17 09:20:56 +02:00
|
|
|
if (dp_packet_tunnel_geneve(p)
|
|
|
|
|| dp_packet_tunnel_vxlan(p)) {
|
2024-02-26 08:38:37 -05:00
|
|
|
|
|
|
|
/* If the TX interface doesn't support UDP tunnel offload but does
|
|
|
|
* support inner checksum offload and an outer UDP checksum is
|
|
|
|
* required, then we can't offload inner checksum either. As that would
|
|
|
|
* invalidate the outer checksum. */
|
2025-06-17 09:20:58 +02:00
|
|
|
if (!(flags & NETDEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
|
|
|
|
&& dp_packet_l4_checksum_partial(p)) {
|
2024-02-26 08:38:37 -05:00
|
|
|
flags &= ~(NETDEV_TX_OFFLOAD_TCP_CKSUM |
|
|
|
|
NETDEV_TX_OFFLOAD_UDP_CKSUM |
|
|
|
|
NETDEV_TX_OFFLOAD_SCTP_CKSUM |
|
|
|
|
NETDEV_TX_OFFLOAD_IPV4_CKSUM);
|
|
|
|
}
|
2024-01-17 14:26:30 -05:00
|
|
|
}
|
|
|
|
|
dp-packet: Rework IP checksum offloads.
As the packet traverses through OVS, offloading Tx flags must be carefully
evaluated and updated which results in a bit of complexity because of a
separate "outer" Tx offloading flag coming from DPDK API,
and a "normal"/"inner" Tx offloading flag.
On the other hand, the DPDK mbuf API specifies 4 status when it comes to
IP checksums:
- RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN: no information about the RX IP checksum
- RTE_MBUF_F_RX_IP_CKSUM_BAD: the IP checksum in the packet is wrong
- RTE_MBUF_F_RX_IP_CKSUM_GOOD: the IP checksum in the packet is valid
- RTE_MBUF_F_RX_IP_CKSUM_NONE: the IP checksum is not correct in the
packet data, but the integrity of the IP header is verified.
This patch changes OVS API so that OVS code only tracks the status of
the checksum of the "current" L3 header and let the Tx flags aspect to
the netdev-* implementations.
With this API, the flow extraction can be cleaned up.
During packet processing, OVS can simply look for the IP checksum validity
(either good, or partial) before changing some IP header, and then mark
the checksum as partial.
In the conntrack case, when natting packets, the checksum status of the
inner part (ICMP error case) must be forced temporarily as unknown
to force checksum resolution.
When tunneling comes into play, IP checksums status is bit-shifted for
future considerations in the processing if, for example, the tunnel
header gets decapsulated again, or in the netdev-* implementations that
support tunnel offloading.
Finally, netdev-* implementations only need to care about packets in
partial status: a good checksum does not need touching, a bad checksum
has been updated by kept as bad by OVS, an unknown checksum is either
an IPv6 or if it was an IPv4, OVS updated it too (keeping it good or bad
accordingly).
Rename current API for consistency with dp_packet_(inner_)?ip_checksum_.
Signed-off-by: David Marchand <david.marchand@redhat.com>
Signed-off-by: Ilya Maximets <i.maximets@ovn.org>
2025-06-17 09:20:57 +02:00
|
|
|
if (dp_packet_inner_ip_checksum_partial(p)
|
|
|
|
&& !(flags & NETDEV_TX_OFFLOAD_IPV4_CKSUM)) {
|
|
|
|
dp_packet_ip_set_header_csum(p, true);
|
2023-06-14 15:03:26 -04:00
|
|
|
}
|
2023-06-14 15:03:27 -04:00
|
|
|
|
2025-06-17 09:20:58 +02:00
|
|
|
if (dp_packet_inner_l4_checksum_partial(p)) {
|
|
|
|
if (dp_packet_inner_l4_proto_tcp(p)) {
|
|
|
|
if (!(flags & NETDEV_TX_OFFLOAD_TCP_CKSUM)) {
|
|
|
|
packet_tcp_complete_csum(p, true);
|
|
|
|
}
|
|
|
|
} else if (dp_packet_inner_l4_proto_udp(p)) {
|
|
|
|
if (!(flags & NETDEV_TX_OFFLOAD_UDP_CKSUM)) {
|
|
|
|
packet_udp_complete_csum(p, true);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
ovs_assert(dp_packet_inner_l4_proto_sctp(p));
|
|
|
|
if (!(flags & NETDEV_TX_OFFLOAD_SCTP_CKSUM)) {
|
|
|
|
packet_sctp_complete_csum(p, true);
|
|
|
|
}
|
2024-01-17 14:26:31 -05:00
|
|
|
}
|
2023-06-14 15:03:27 -04:00
|
|
|
}
|
|
|
|
|
dp-packet: Rework IP checksum offloads.
As the packet traverses through OVS, offloading Tx flags must be carefully
evaluated and updated which results in a bit of complexity because of a
separate "outer" Tx offloading flag coming from DPDK API,
and a "normal"/"inner" Tx offloading flag.
On the other hand, the DPDK mbuf API specifies 4 status when it comes to
IP checksums:
- RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN: no information about the RX IP checksum
- RTE_MBUF_F_RX_IP_CKSUM_BAD: the IP checksum in the packet is wrong
- RTE_MBUF_F_RX_IP_CKSUM_GOOD: the IP checksum in the packet is valid
- RTE_MBUF_F_RX_IP_CKSUM_NONE: the IP checksum is not correct in the
packet data, but the integrity of the IP header is verified.
This patch changes OVS API so that OVS code only tracks the status of
the checksum of the "current" L3 header and let the Tx flags aspect to
the netdev-* implementations.
With this API, the flow extraction can be cleaned up.
During packet processing, OVS can simply look for the IP checksum validity
(either good, or partial) before changing some IP header, and then mark
the checksum as partial.
In the conntrack case, when natting packets, the checksum status of the
inner part (ICMP error case) must be forced temporarily as unknown
to force checksum resolution.
When tunneling comes into play, IP checksums status is bit-shifted for
future considerations in the processing if, for example, the tunnel
header gets decapsulated again, or in the netdev-* implementations that
support tunnel offloading.
Finally, netdev-* implementations only need to care about packets in
partial status: a good checksum does not need touching, a bad checksum
has been updated by kept as bad by OVS, an unknown checksum is either
an IPv6 or if it was an IPv4, OVS updated it too (keeping it good or bad
accordingly).
Rename current API for consistency with dp_packet_(inner_)?ip_checksum_.
Signed-off-by: David Marchand <david.marchand@redhat.com>
Signed-off-by: Ilya Maximets <i.maximets@ovn.org>
2025-06-17 09:20:57 +02:00
|
|
|
if (dp_packet_ip_checksum_partial(p)
|
|
|
|
&& !(flags & NETDEV_TX_OFFLOAD_OUTER_IP_CKSUM)) {
|
|
|
|
dp_packet_ip_set_header_csum(p, false);
|
2023-06-14 15:03:27 -04:00
|
|
|
}
|
|
|
|
|
2025-06-17 09:20:58 +02:00
|
|
|
if (dp_packet_l4_checksum_partial(p)) {
|
|
|
|
ovs_assert(dp_packet_l4_proto_udp(p));
|
|
|
|
if (!(flags & NETDEV_TX_OFFLOAD_OUTER_UDP_CKSUM)) {
|
|
|
|
packet_udp_complete_csum(p, false);
|
|
|
|
}
|
2024-01-17 14:26:31 -05:00
|
|
|
}
|
2023-06-14 15:03:26 -04:00
|
|
|
}
|