Skip to content

Commit

Permalink
netdev-dpdk: Fallback to non tunnel checksum offloading.
Browse files Browse the repository at this point in the history
The outer checksum offloading API in DPDK is ambiguous and was
implemented by Intel folks in their drivers with the assumption that
any outer offloading always goes with an inner offloading request.

With net/i40e and net/ice drivers, in the case of encapsulating a ARP
packet in a vxlan tunnel (which results in requesting outer ip checksum
with a tunnel context but no inner offloading request), a Tx failure is
triggered, associated with a port MDD event.
2024-03-27T16:02:07.084Z|00018|dpdk|WARN|ice_interrupt_handler(): OICR:
	MDD event

To avoid this situation, if no checksum or segmentation offloading is
requested on the inner part of a packet, fallback to "normal" (non outer)
offloading request.

Reported-at: openvswitch/ovs-issues#321
Fixes: 084c808 ("userspace: Support VXLAN and GENEVE TSO.")
Fixes: f81d782 ("netdev-native-tnl: Mark all vxlan/geneve packets as tunneled.")
Signed-off-by: David Marchand <david.marchand@redhat.com>
Acked-by: Kevin Traynor <ktraynor@redhat.com>
Signed-off-by: Kevin Traynor <ktraynor@redhat.com>
(cherry picked from commit 2e03f55)
Signed-off-by: Roi Dayan <roid@nvidia.com>
Change-Id: Ibc94d237c35d785aed8921e9e5c6cac29dbd7ea7
  • Loading branch information
david-marchand authored and roidayan committed Jun 30, 2024
1 parent 9c2e837 commit 7ead5d5
Showing 1 changed file with 41 additions and 30 deletions.
71 changes: 41 additions & 30 deletions lib/netdev-dpdk.c
Original file line number Diff line number Diff line change
Expand Up @@ -2916,16 +2916,18 @@ netdev_dpdk_prep_hwol_packet(struct netdev_dpdk *dev, struct rte_mbuf *mbuf)
void *l3;
void *l4;

const uint64_t all_requests = (RTE_MBUF_F_TX_IP_CKSUM |
RTE_MBUF_F_TX_L4_MASK |
RTE_MBUF_F_TX_OUTER_IP_CKSUM |
RTE_MBUF_F_TX_OUTER_UDP_CKSUM |
RTE_MBUF_F_TX_TCP_SEG);
const uint64_t all_marks = (RTE_MBUF_F_TX_IPV4 |
RTE_MBUF_F_TX_IPV6 |
RTE_MBUF_F_TX_OUTER_IPV4 |
RTE_MBUF_F_TX_OUTER_IPV6 |
RTE_MBUF_F_TX_TUNNEL_MASK);
const uint64_t all_inner_requests = (RTE_MBUF_F_TX_IP_CKSUM |
RTE_MBUF_F_TX_L4_MASK |
RTE_MBUF_F_TX_TCP_SEG);
const uint64_t all_outer_requests = (RTE_MBUF_F_TX_OUTER_IP_CKSUM |
RTE_MBUF_F_TX_OUTER_UDP_CKSUM);
const uint64_t all_requests = all_inner_requests | all_outer_requests;
const uint64_t all_inner_marks = (RTE_MBUF_F_TX_IPV4 |
RTE_MBUF_F_TX_IPV6);
const uint64_t all_outer_marks = (RTE_MBUF_F_TX_OUTER_IPV4 |
RTE_MBUF_F_TX_OUTER_IPV6 |
RTE_MBUF_F_TX_TUNNEL_MASK);
const uint64_t all_marks = all_inner_marks | all_outer_marks;

if (dp_packet_l4(pkt)) {
l2 = dp_packet_eth(pkt);
Expand Down Expand Up @@ -2962,34 +2964,43 @@ netdev_dpdk_prep_hwol_packet(struct netdev_dpdk *dev, struct rte_mbuf *mbuf)
* l2 len and outer l3 len. Inner l2/l3/l4 len are calculated
* before. */
const uint64_t tunnel_type = mbuf->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK;
if (tunnel_type == RTE_MBUF_F_TX_TUNNEL_GENEVE ||
tunnel_type == RTE_MBUF_F_TX_TUNNEL_VXLAN) {
mbuf->outer_l2_len = (char *) dp_packet_l3(pkt) -
(char *) dp_packet_eth(pkt);
mbuf->outer_l3_len = (char *) dp_packet_l4(pkt) -
(char *) dp_packet_l3(pkt);

/* If neither inner checksums nor TSO is requested, inner marks
* should not be set. */
if (!(mbuf->ol_flags & (RTE_MBUF_F_TX_IP_CKSUM |
RTE_MBUF_F_TX_L4_MASK |
RTE_MBUF_F_TX_TCP_SEG))) {
mbuf->ol_flags &= ~(RTE_MBUF_F_TX_IPV4 |
RTE_MBUF_F_TX_IPV6);
}
} else if (OVS_UNLIKELY(tunnel_type)) {
if (OVS_UNLIKELY(tunnel_type &&
tunnel_type != RTE_MBUF_F_TX_TUNNEL_GENEVE &&
tunnel_type != RTE_MBUF_F_TX_TUNNEL_VXLAN)) {
VLOG_WARN_RL(&rl, "%s: Unexpected tunnel type: %#"PRIx64,
netdev_get_name(&dev->up), tunnel_type);
netdev_dpdk_mbuf_dump(netdev_get_name(&dev->up),
"Packet with unexpected tunnel type", mbuf);
return false;
}

if (tunnel_type && (mbuf->ol_flags & all_inner_requests)) {
mbuf->outer_l2_len = (char *) dp_packet_l3(pkt) -
(char *) dp_packet_eth(pkt);
mbuf->outer_l3_len = (char *) dp_packet_l4(pkt) -
(char *) dp_packet_l3(pkt);
} else {
mbuf->l2_len = (char *) dp_packet_l3(pkt) -
(char *) dp_packet_eth(pkt);
mbuf->l3_len = (char *) dp_packet_l4(pkt) -
(char *) dp_packet_l3(pkt);
if (tunnel_type) {
/* No inner offload is requested, fallback to non tunnel
* checksum offloads. */
mbuf->ol_flags &= ~all_inner_marks;
if (mbuf->ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) {
mbuf->ol_flags |= RTE_MBUF_F_TX_IP_CKSUM;
mbuf->ol_flags |= RTE_MBUF_F_TX_IPV4;
}
if (mbuf->ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM) {
mbuf->ol_flags |= RTE_MBUF_F_TX_UDP_CKSUM;
mbuf->ol_flags |= mbuf->ol_flags & RTE_MBUF_F_TX_OUTER_IPV4
? RTE_MBUF_F_TX_IPV4 : RTE_MBUF_F_TX_IPV6;
}
mbuf->ol_flags &= ~(all_outer_requests | all_outer_marks);
}
mbuf->outer_l2_len = 0;
mbuf->outer_l3_len = 0;
mbuf->l2_len = (char *) dp_packet_l3(pkt) -
(char *) dp_packet_eth(pkt);
mbuf->l3_len = (char *) dp_packet_l4(pkt) -
(char *) dp_packet_l3(pkt);
}
th = dp_packet_l4(pkt);

Expand Down

0 comments on commit 7ead5d5

Please sign in to comment.