2
0
mirror of https://github.com/openvswitch/ovs synced 2025-08-22 01:51:26 +00:00

dp-packet: Rework TCP segmentation.

Rather than mark with a offload flags + mark with a segmentation size,
simply rely on the netdev implementation which sets a segmentation size
when appropriate.

Signed-off-by: David Marchand <david.marchand@redhat.com>
Signed-off-by: Ilya Maximets <i.maximets@ovn.org>
This commit is contained in:
David Marchand 2025-06-17 09:21:00 +02:00 committed by Ilya Maximets
parent e36793e11f
commit cf7b86db1f
7 changed files with 18 additions and 57 deletions

View File

@ -63,8 +63,6 @@ dp_packet_gso_seg_new(const struct dp_packet *p, size_t hdr_len,
& DP_PACKET_OL_SUPPORTED_MASK;
seg->offloads = p->offloads;
dp_packet_hwol_reset_tcp_seg(seg);
return seg;
}

View File

@ -55,16 +55,7 @@ enum OVS_PACKED_ENUM dp_packet_source {
#define DEF_OL_FLAG(NAME, DPDK_DEF, GENERIC_DEF) NAME = GENERIC_DEF
#endif
/* Bit masks for the 'ol_flags' member of the 'dp_packet' structure. */
enum {
/* Value 0 is not used. */
/* TCP Segmentation Offload. */
DEF_OL_FLAG(DP_PACKET_OL_TX_TCP_SEG, RTE_MBUF_F_TX_TCP_SEG, 0x40),
/* Adding new field requires adding to DP_PACKET_OL_SUPPORTED_MASK. */
};
#define DP_PACKET_OL_SUPPORTED_MASK DP_PACKET_OL_TX_TCP_SEG
#define DP_PACKET_OL_SUPPORTED_MASK 0
/* Bit masks for the 'offloads' member of the 'dp_packet' structure. */
enum OVS_PACKED_ENUM dp_packet_offload_mask {
@ -1122,29 +1113,6 @@ dp_packet_tunnel(const struct dp_packet *b)
return !!(b->offloads & DP_PACKET_OL_TUNNEL_MASK);
}
/* Returns 'true' if packet 'b' is marked for TCP segmentation offloading. */
static inline bool
dp_packet_hwol_is_tso(const struct dp_packet *b)
{
return !!(*dp_packet_ol_flags_ptr(b) & DP_PACKET_OL_TX_TCP_SEG);
}
/* Mark packet 'b' for TCP segmentation offloading. It implies that
* either the packet 'b' is marked for IPv4 or IPv6 checksum offloading
* and also for TCP checksum offloading. */
static inline void
dp_packet_hwol_set_tcp_seg(struct dp_packet *b)
{
*dp_packet_ol_flags_ptr(b) |= DP_PACKET_OL_TX_TCP_SEG;
}
/* Resets TCP Segmentation in packet 'p'. */
static inline void
dp_packet_hwol_reset_tcp_seg(struct dp_packet *p)
{
*dp_packet_ol_flags_ptr(p) &= ~DP_PACKET_OL_TX_TCP_SEG;
}
/* Marks packet 'p' with good IPv4 checksum. */
static inline void
dp_packet_ip_checksum_set_good(struct dp_packet *p)

View File

@ -2661,13 +2661,11 @@ netdev_dpdk_prep_hwol_packet(struct netdev_dpdk *dev, struct rte_mbuf *mbuf)
void *l3;
void *l4;
const uint64_t all_inner_requests = RTE_MBUF_F_TX_TCP_SEG;
if (!dp_packet_ip_checksum_partial(pkt)
&& !dp_packet_inner_ip_checksum_partial(pkt)
&& !dp_packet_l4_checksum_partial(pkt)
&& !dp_packet_inner_l4_checksum_partial(pkt)
&& !(mbuf->ol_flags & all_inner_requests)) {
&& !mbuf->tso_segsz) {
uint64_t unexpected = mbuf->ol_flags & RTE_MBUF_F_TX_OFFLOAD_MASK;
if (OVS_UNLIKELY(unexpected)) {
@ -2683,7 +2681,7 @@ netdev_dpdk_prep_hwol_packet(struct netdev_dpdk *dev, struct rte_mbuf *mbuf)
if (dp_packet_tunnel(pkt)
&& (dp_packet_inner_ip_checksum_partial(pkt)
|| dp_packet_inner_l4_checksum_partial(pkt)
|| (mbuf->ol_flags & all_inner_requests))) {
|| mbuf->tso_segsz)) {
if (dp_packet_ip_checksum_partial(pkt)
|| dp_packet_l4_checksum_partial(pkt)) {
mbuf->outer_l2_len = (char *) dp_packet_l3(pkt) -
@ -2775,7 +2773,7 @@ netdev_dpdk_prep_hwol_packet(struct netdev_dpdk *dev, struct rte_mbuf *mbuf)
mbuf->l2_len = (char *) l3 - (char *) l2;
mbuf->l3_len = (char *) l4 - (char *) l3;
if (mbuf->ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
if (mbuf->tso_segsz) {
struct tcp_header *th = l4;
uint16_t link_tso_segsz;
int hdr_len;
@ -2800,6 +2798,7 @@ netdev_dpdk_prep_hwol_packet(struct netdev_dpdk *dev, struct rte_mbuf *mbuf)
dev->max_packet_len);
return false;
}
mbuf->ol_flags |= RTE_MBUF_F_TX_TCP_SEG;
}
/* If L4 checksum is requested, IPv4 should be requested as well. */
@ -3113,7 +3112,7 @@ netdev_dpdk_filter_packet_len(struct netdev_dpdk *dev, struct rte_mbuf **pkts,
for (i = 0; i < pkt_cnt; i++) {
pkt = pkts[i];
if (OVS_UNLIKELY((pkt->pkt_len > dev->max_packet_len)
&& !(pkt->ol_flags & RTE_MBUF_F_TX_TCP_SEG))) {
&& !pkt->tso_segsz)) {
VLOG_WARN_RL(&rl, "%s: Too big size %" PRIu32 " "
"max_packet_len %d", dev->up.name, pkt->pkt_len,
dev->max_packet_len);

View File

@ -1223,7 +1223,6 @@ netdev_dummy_rxq_recv(struct netdev_rxq *rxq_, struct dp_packet_batch *batch,
if (userspace_tso_enabled() && netdev->ol_tso_segsz) {
dp_packet_set_tso_segsz(packet, netdev->ol_tso_segsz);
dp_packet_hwol_set_tcp_seg(packet);
}
if (VLOG_IS_DBG_ENABLED()) {
@ -1312,7 +1311,7 @@ netdev_dummy_send(struct netdev *netdev, int qid,
flags &= ~NETDEV_TX_OFFLOAD_UDP_CKSUM;
}
is_tso = userspace_tso_enabled() && dev->ol_tso_segsz &&
dp_packet_hwol_is_tso(packet);
dp_packet_get_tso_segsz(packet);
ovs_mutex_unlock(&dev->mutex);
if (!dp_packet_is_eth(packet)) {
@ -1350,14 +1349,12 @@ netdev_dummy_send(struct netdev *netdev, int qid,
ip_csum_bad = !!(packet->offloads & DP_PACKET_OL_IP_CKSUM_BAD);
l4_csum_good = !!(packet->offloads & DP_PACKET_OL_L4_CKSUM_GOOD);
l4_csum_bad = !!(packet->offloads & DP_PACKET_OL_L4_CKSUM_BAD);
VLOG_DBG("Tx: packet with csum IP %s, L4 %s, segsz %"PRIu16
", Tx flags %s",
VLOG_DBG("Tx: packet with csum IP %s, L4 %s, segsz %"PRIu16,
ip_csum_good ? (ip_csum_bad ? "partial" : "good")
: (ip_csum_bad ? "bad" : "unknown"),
l4_csum_good ? (l4_csum_bad ? "partial" : "good")
: (l4_csum_bad ? "bad" : "unknown"),
dp_packet_get_tso_segsz(packet),
dp_packet_hwol_is_tso(packet) ? "tso" : "none");
dp_packet_get_tso_segsz(packet));
}
if (dp_packet_ip_checksum_partial(packet)

View File

@ -7104,7 +7104,6 @@ netdev_linux_parse_vnet_hdr(struct dp_packet *b)
case VIRTIO_NET_HDR_GSO_TCPV4:
case VIRTIO_NET_HDR_GSO_TCPV6:
dp_packet_set_tso_segsz(b, (OVS_FORCE uint16_t) vnet->gso_size);
dp_packet_hwol_set_tcp_seg(b);
break;
case VIRTIO_NET_HDR_GSO_UDP:
@ -7134,7 +7133,7 @@ netdev_linux_prepend_vnet_hdr(struct dp_packet *b, int mtu)
struct virtio_net_hdr v;
struct virtio_net_hdr *vnet = &v;
if (dp_packet_hwol_is_tso(b)) {
if (dp_packet_get_tso_segsz(b)) {
uint16_t tso_segsz = dp_packet_get_tso_segsz(b);
const struct tcp_header *tcp;
const struct ip_header *ip;

View File

@ -548,7 +548,7 @@ netdev_gre_push_header(const struct netdev *netdev,
}
if (greh->flags & htons(GRE_SEQ)) {
if (!dp_packet_hwol_is_tso(packet)) {
if (!dp_packet_get_tso_segsz(packet)) {
/* Last 4 bytes are GRE seqno. */
int seq_ofs = gre_header_len(greh->flags) - 4;
ovs_16aligned_be32 *seq_opt =

View File

@ -816,7 +816,7 @@ netdev_send_tso(struct netdev *netdev, int qid,
* the segmentation. */
n_packets = 0;
DP_PACKET_BATCH_FOR_EACH (i, packet, batch) {
if (dp_packet_hwol_is_tso(packet)) {
if (dp_packet_get_tso_segsz(packet)) {
n_packets += dp_packet_gso_nr_segs(packet);
} else {
n_packets++;
@ -842,7 +842,7 @@ netdev_send_tso(struct netdev *netdev, int qid,
size_t k;
curr_batch = batches;
DP_PACKET_BATCH_REFILL_FOR_EACH (k, size, packet, batch) {
if (dp_packet_hwol_is_tso(packet)) {
if (dp_packet_get_tso_segsz(packet)) {
if (dp_packet_gso(packet, &curr_batch)) {
COVERAGE_INC(netdev_soft_seg_good);
} else {
@ -911,7 +911,7 @@ netdev_send(struct netdev *netdev, int qid, struct dp_packet_batch *batch,
if (userspace_tso_enabled()) {
if (!(netdev_flags & NETDEV_TX_OFFLOAD_TCP_TSO)) {
DP_PACKET_BATCH_FOR_EACH (i, packet, batch) {
if (dp_packet_hwol_is_tso(packet)) {
if (dp_packet_get_tso_segsz(packet)) {
return netdev_send_tso(netdev, qid, batch, concurrent_txq);
}
}
@ -919,14 +919,14 @@ netdev_send(struct netdev *netdev, int qid, struct dp_packet_batch *batch,
NETDEV_TX_GRE_TNL_TSO |
NETDEV_TX_GENEVE_TNL_TSO))) {
DP_PACKET_BATCH_FOR_EACH (i, packet, batch) {
if (dp_packet_hwol_is_tso(packet)
if (dp_packet_get_tso_segsz(packet)
&& dp_packet_tunnel(packet)) {
return netdev_send_tso(netdev, qid, batch, concurrent_txq);
}
}
} else if (!(netdev_flags & NETDEV_TX_OFFLOAD_OUTER_UDP_CKSUM)) {
DP_PACKET_BATCH_FOR_EACH (i, packet, batch) {
if (dp_packet_hwol_is_tso(packet)
if (dp_packet_get_tso_segsz(packet)
&& (dp_packet_tunnel_vxlan(packet)
|| dp_packet_tunnel_geneve(packet))
&& dp_packet_l4_checksum_partial(packet)) {
@ -1013,7 +1013,7 @@ netdev_push_header(const struct netdev *netdev,
data->tnl_type != OVS_VPORT_TYPE_VXLAN &&
data->tnl_type != OVS_VPORT_TYPE_GRE &&
data->tnl_type != OVS_VPORT_TYPE_IP6GRE &&
dp_packet_hwol_is_tso(packet))) {
dp_packet_get_tso_segsz(packet))) {
COVERAGE_INC(netdev_push_header_drops);
dp_packet_delete(packet);
VLOG_WARN_RL(&rl, "%s: Tunneling packets with TSO is not "
@ -1026,7 +1026,7 @@ netdev_push_header(const struct netdev *netdev,
data->tnl_type != OVS_VPORT_TYPE_IP6GRE) {
dp_packet_ol_send_prepare(packet, 0);
} else if (dp_packet_tunnel(packet)) {
if (dp_packet_hwol_is_tso(packet)) {
if (dp_packet_get_tso_segsz(packet)) {
COVERAGE_INC(netdev_push_header_drops);
dp_packet_delete(packet);
VLOG_WARN_RL(&rl, "%s: Tunneling packets with TSO is not "