net/mlx5e: Implement PSP Tx data path

Setup PSP offload on Tx data path based on whether skb indicates that it is
intended for PSP or not. Support driver side encapsulation of the UDP
headers, PSP headers, and PSP trailer for the PSP traffic that will be
encrypted by the NIC.

Signed-off-by: Raed Salem <raeds@nvidia.com>
Signed-off-by: Rahul Rameshbabu <rrameshbabu@nvidia.com>
Signed-off-by: Cosmin Ratiu <cratiu@nvidia.com>
Signed-off-by: Daniel Zahka <daniel.zahka@gmail.com>
Link: https://patch.msgid.link/20250917000954.859376-15-daniel.zahka@gmail.com
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
This commit is contained in:
Raed Salem
2025-09-16 17:09:41 -07:00
committed by Paolo Abeni
parent fc72451574
commit e5a1861a29
6 changed files with 305 additions and 4 deletions

View File

@@ -112,7 +112,7 @@ mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/ktls_stats.o \
en_accel/fs_tcp.o en_accel/ktls.o en_accel/ktls_txrx.o \ en_accel/fs_tcp.o en_accel/ktls.o en_accel/ktls_txrx.o \
en_accel/ktls_tx.o en_accel/ktls_rx.o en_accel/ktls_tx.o en_accel/ktls_rx.o
mlx5_core-$(CONFIG_MLX5_EN_PSP) += en_accel/psp.o mlx5_core-$(CONFIG_MLX5_EN_PSP) += en_accel/psp.o en_accel/psp_rxtx.o
# #
# SW Steering # SW Steering

View File

@@ -47,6 +47,7 @@
#include <linux/rhashtable.h> #include <linux/rhashtable.h>
#include <net/udp_tunnel.h> #include <net/udp_tunnel.h>
#include <net/switchdev.h> #include <net/switchdev.h>
#include <net/psp/types.h>
#include <net/xdp.h> #include <net/xdp.h>
#include <linux/dim.h> #include <linux/dim.h>
#include <linux/bits.h> #include <linux/bits.h>
@@ -68,7 +69,7 @@ struct page_pool;
#define MLX5E_METADATA_ETHER_TYPE (0x8CE4) #define MLX5E_METADATA_ETHER_TYPE (0x8CE4)
#define MLX5E_METADATA_ETHER_LEN 8 #define MLX5E_METADATA_ETHER_LEN 8
#define MLX5E_ETH_HARD_MTU (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN) #define MLX5E_ETH_HARD_MTU (ETH_HLEN + PSP_ENCAP_HLEN + PSP_TRL_SIZE + VLAN_HLEN + ETH_FCS_LEN)
#define MLX5E_HW2SW_MTU(params, hwmtu) ((hwmtu) - ((params)->hard_mtu)) #define MLX5E_HW2SW_MTU(params, hwmtu) ((hwmtu) - ((params)->hard_mtu))
#define MLX5E_SW2HW_MTU(params, swmtu) ((swmtu) + ((params)->hard_mtu)) #define MLX5E_SW2HW_MTU(params, swmtu) ((swmtu) + ((params)->hard_mtu))

View File

@@ -43,6 +43,7 @@
#include "en.h" #include "en.h"
#include "en/txrx.h" #include "en/txrx.h"
#include "en_accel/psp.h" #include "en_accel/psp.h"
#include "en_accel/psp_rxtx.h"
#if IS_ENABLED(CONFIG_GENEVE) #if IS_ENABLED(CONFIG_GENEVE)
#include <net/geneve.h> #include <net/geneve.h>
@@ -120,6 +121,9 @@ struct mlx5e_accel_tx_state {
#ifdef CONFIG_MLX5_EN_IPSEC #ifdef CONFIG_MLX5_EN_IPSEC
struct mlx5e_accel_tx_ipsec_state ipsec; struct mlx5e_accel_tx_ipsec_state ipsec;
#endif #endif
#ifdef CONFIG_MLX5_EN_PSP
struct mlx5e_accel_tx_psp_state psp_st;
#endif
}; };
static inline bool mlx5e_accel_tx_begin(struct net_device *dev, static inline bool mlx5e_accel_tx_begin(struct net_device *dev,
@@ -138,6 +142,13 @@ static inline bool mlx5e_accel_tx_begin(struct net_device *dev,
return false; return false;
#endif #endif
#ifdef CONFIG_MLX5_EN_PSP
if (mlx5e_psp_is_offload(skb, dev)) {
if (unlikely(!mlx5e_psp_handle_tx_skb(dev, skb, &state->psp_st)))
return false;
}
#endif
#ifdef CONFIG_MLX5_EN_IPSEC #ifdef CONFIG_MLX5_EN_IPSEC
if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state) && xfrm_offload(skb)) { if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state) && xfrm_offload(skb)) {
if (unlikely(!mlx5e_ipsec_handle_tx_skb(dev, skb, &state->ipsec))) if (unlikely(!mlx5e_ipsec_handle_tx_skb(dev, skb, &state->ipsec)))
@@ -158,8 +169,14 @@ static inline bool mlx5e_accel_tx_begin(struct net_device *dev,
} }
static inline unsigned int mlx5e_accel_tx_ids_len(struct mlx5e_txqsq *sq, static inline unsigned int mlx5e_accel_tx_ids_len(struct mlx5e_txqsq *sq,
struct sk_buff *skb,
struct mlx5e_accel_tx_state *state) struct mlx5e_accel_tx_state *state)
{ {
#ifdef CONFIG_MLX5_EN_PSP
if (mlx5e_psp_is_offload_state(&state->psp_st))
return mlx5e_psp_tx_ids_len(&state->psp_st);
#endif
#ifdef CONFIG_MLX5_EN_IPSEC #ifdef CONFIG_MLX5_EN_IPSEC
if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state)) if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state))
return mlx5e_ipsec_tx_ids_len(&state->ipsec); return mlx5e_ipsec_tx_ids_len(&state->ipsec);
@@ -173,8 +190,14 @@ static inline unsigned int mlx5e_accel_tx_ids_len(struct mlx5e_txqsq *sq,
static inline void mlx5e_accel_tx_eseg(struct mlx5e_priv *priv, static inline void mlx5e_accel_tx_eseg(struct mlx5e_priv *priv,
struct sk_buff *skb, struct sk_buff *skb,
struct mlx5e_accel_tx_state *accel,
struct mlx5_wqe_eth_seg *eseg, u16 ihs) struct mlx5_wqe_eth_seg *eseg, u16 ihs)
{ {
#ifdef CONFIG_MLX5_EN_PSP
if (mlx5e_psp_is_offload_state(&accel->psp_st))
mlx5e_psp_tx_build_eseg(priv, skb, &accel->psp_st, eseg);
#endif
#ifdef CONFIG_MLX5_EN_IPSEC #ifdef CONFIG_MLX5_EN_IPSEC
if (xfrm_offload(skb)) if (xfrm_offload(skb))
mlx5e_ipsec_tx_build_eseg(priv, skb, eseg); mlx5e_ipsec_tx_build_eseg(priv, skb, eseg);
@@ -200,6 +223,11 @@ static inline void mlx5e_accel_tx_finish(struct mlx5e_txqsq *sq,
mlx5e_ktls_handle_tx_wqe(&wqe->ctrl, &state->tls); mlx5e_ktls_handle_tx_wqe(&wqe->ctrl, &state->tls);
#endif #endif
#ifdef CONFIG_MLX5_EN_PSP
if (mlx5e_psp_is_offload_state(&state->psp_st))
mlx5e_psp_handle_tx_wqe(wqe, &state->psp_st, inlseg);
#endif
#ifdef CONFIG_MLX5_EN_IPSEC #ifdef CONFIG_MLX5_EN_IPSEC
if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state) && if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state) &&
state->ipsec.xo && state->ipsec.tailen) state->ipsec.xo && state->ipsec.tailen)

View File

@@ -0,0 +1,170 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
#include <linux/skbuff.h>
#include <linux/ip.h>
#include <linux/udp.h>
#include <net/protocol.h>
#include <net/udp.h>
#include <net/ip6_checksum.h>
#include <net/psp/types.h>
#include "en.h"
#include "psp.h"
#include "en_accel/psp_rxtx.h"
#include "en_accel/psp.h"
static void mlx5e_psp_set_swp(struct sk_buff *skb,
struct mlx5e_accel_tx_psp_state *psp_st,
struct mlx5_wqe_eth_seg *eseg)
{
/* Tunnel Mode:
* SWP: OutL3 InL3 InL4
* Pkt: MAC IP ESP IP L4
*
* Transport Mode:
* SWP: OutL3 OutL4
* Pkt: MAC IP ESP L4
*
* Tunnel(VXLAN TCP/UDP) over Transport Mode
* SWP: OutL3 InL3 InL4
* Pkt: MAC IP ESP UDP VXLAN IP L4
*/
u8 inner_ipproto = 0;
struct ethhdr *eth;
/* Shared settings */
eseg->swp_outer_l3_offset = skb_network_offset(skb) / 2;
if (skb->protocol == htons(ETH_P_IPV6))
eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L3_IPV6;
if (skb->inner_protocol_type == ENCAP_TYPE_IPPROTO) {
inner_ipproto = skb->inner_ipproto;
/* Set SWP additional flags for packet of type IP|UDP|PSP|[ TCP | UDP ] */
switch (inner_ipproto) {
case IPPROTO_UDP:
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
fallthrough;
case IPPROTO_TCP:
eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
break;
default:
break;
}
} else {
/* IP in IP tunneling like vxlan*/
if (skb->inner_protocol_type != ENCAP_TYPE_ETHER)
return;
eth = (struct ethhdr *)skb_inner_mac_header(skb);
switch (ntohs(eth->h_proto)) {
case ETH_P_IP:
inner_ipproto = ((struct iphdr *)((char *)skb->data +
skb_inner_network_offset(skb)))->protocol;
break;
case ETH_P_IPV6:
inner_ipproto = ((struct ipv6hdr *)((char *)skb->data +
skb_inner_network_offset(skb)))->nexthdr;
break;
default:
break;
}
/* Tunnel(VXLAN TCP/UDP) over Transport Mode PSP i.e. PSP payload is vxlan tunnel */
switch (inner_ipproto) {
case IPPROTO_UDP:
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
fallthrough;
case IPPROTO_TCP:
eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
eseg->swp_inner_l4_offset =
(skb->csum_start + skb->head - skb->data) / 2;
if (skb->protocol == htons(ETH_P_IPV6))
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
break;
default:
break;
}
psp_st->inner_ipproto = inner_ipproto;
}
}
static bool mlx5e_psp_set_state(struct mlx5e_priv *priv,
struct sk_buff *skb,
struct mlx5e_accel_tx_psp_state *psp_st)
{
struct psp_assoc *pas;
bool ret = false;
rcu_read_lock();
pas = psp_skb_get_assoc_rcu(skb);
if (!pas)
goto out;
ret = true;
psp_st->tailen = PSP_TRL_SIZE;
psp_st->spi = pas->tx.spi;
psp_st->ver = pas->version;
psp_st->keyid = *(u32 *)pas->drv_data;
out:
rcu_read_unlock();
return ret;
}
void mlx5e_psp_tx_build_eseg(struct mlx5e_priv *priv, struct sk_buff *skb,
struct mlx5e_accel_tx_psp_state *psp_st,
struct mlx5_wqe_eth_seg *eseg)
{
if (!mlx5_is_psp_device(priv->mdev))
return;
if (unlikely(skb->protocol != htons(ETH_P_IP) &&
skb->protocol != htons(ETH_P_IPV6)))
return;
mlx5e_psp_set_swp(skb, psp_st, eseg);
/* Special WA for PSP LSO in ConnectX7 */
eseg->swp_outer_l3_offset = 0;
eseg->swp_inner_l3_offset = 0;
eseg->flow_table_metadata |= cpu_to_be32(psp_st->keyid);
eseg->trailer |= cpu_to_be32(MLX5_ETH_WQE_INSERT_TRAILER) |
cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_OUTER_L4_ASSOC);
}
void mlx5e_psp_handle_tx_wqe(struct mlx5e_tx_wqe *wqe,
struct mlx5e_accel_tx_psp_state *psp_st,
struct mlx5_wqe_inline_seg *inlseg)
{
inlseg->byte_count = cpu_to_be32(psp_st->tailen | MLX5_INLINE_SEG);
}
bool mlx5e_psp_handle_tx_skb(struct net_device *netdev,
struct sk_buff *skb,
struct mlx5e_accel_tx_psp_state *psp_st)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct net *net = sock_net(skb->sk);
const struct ipv6hdr *ip6;
struct tcphdr *th;
if (!mlx5e_psp_set_state(priv, skb, psp_st))
return true;
/* psp_encap of the packet */
if (!psp_dev_encapsulate(net, skb, psp_st->spi, psp_st->ver, 0)) {
kfree_skb_reason(skb, SKB_DROP_REASON_PSP_OUTPUT);
return false;
}
if (skb_is_gso(skb)) {
ip6 = ipv6_hdr(skb);
th = inner_tcp_hdr(skb);
th->check = ~tcp_v6_check(skb_shinfo(skb)->gso_size + inner_tcp_hdrlen(skb), &ip6->saddr,
&ip6->daddr, 0);
}
return true;
}

View File

@@ -0,0 +1,96 @@
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
#ifndef __MLX5E_PSP_RXTX_H__
#define __MLX5E_PSP_RXTX_H__
#include <linux/skbuff.h>
#include <net/xfrm.h>
#include <net/psp.h>
#include "en.h"
#include "en/txrx.h"
struct mlx5e_accel_tx_psp_state {
u32 tailen;
u32 keyid;
__be32 spi;
u8 inner_ipproto;
u8 ver;
};
#ifdef CONFIG_MLX5_EN_PSP
static inline bool mlx5e_psp_is_offload_state(struct mlx5e_accel_tx_psp_state *psp_state)
{
return (psp_state->tailen != 0);
}
static inline bool mlx5e_psp_is_offload(struct sk_buff *skb, struct net_device *netdev)
{
bool ret;
rcu_read_lock();
ret = !!psp_skb_get_assoc_rcu(skb);
rcu_read_unlock();
return ret;
}
bool mlx5e_psp_handle_tx_skb(struct net_device *netdev,
struct sk_buff *skb,
struct mlx5e_accel_tx_psp_state *psp_st);
void mlx5e_psp_tx_build_eseg(struct mlx5e_priv *priv, struct sk_buff *skb,
struct mlx5e_accel_tx_psp_state *psp_st,
struct mlx5_wqe_eth_seg *eseg);
void mlx5e_psp_handle_tx_wqe(struct mlx5e_tx_wqe *wqe,
struct mlx5e_accel_tx_psp_state *psp_st,
struct mlx5_wqe_inline_seg *inlseg);
static inline bool mlx5e_psp_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5e_accel_tx_psp_state *psp_st,
struct mlx5_wqe_eth_seg *eseg)
{
u8 inner_ipproto;
if (!mlx5e_psp_is_offload_state(psp_st))
return false;
inner_ipproto = psp_st->inner_ipproto;
eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
if (inner_ipproto) {
eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM;
if (inner_ipproto == IPPROTO_TCP || inner_ipproto == IPPROTO_UDP)
eseg->cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM;
if (likely(skb->ip_summed == CHECKSUM_PARTIAL))
sq->stats->csum_partial_inner++;
} else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
eseg->cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM;
sq->stats->csum_partial_inner++;
}
return true;
}
static inline unsigned int mlx5e_psp_tx_ids_len(struct mlx5e_accel_tx_psp_state *psp_st)
{
return psp_st->tailen;
}
#else
static inline bool mlx5e_psp_is_offload_state(struct mlx5e_accel_tx_psp_state *psp_state)
{
return false;
}
static inline bool mlx5e_psp_is_offload(struct sk_buff *skb, struct net_device *netdev)
{
return false;
}
static inline bool mlx5e_psp_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5e_accel_tx_psp_state *psp_st,
struct mlx5_wqe_eth_seg *eseg)
{
return false;
}
#endif /* CONFIG_MLX5_EN_PSP */
#endif /* __MLX5E_PSP_RXTX_H__ */

View File

@@ -39,6 +39,7 @@
#include "ipoib/ipoib.h" #include "ipoib/ipoib.h"
#include "en_accel/en_accel.h" #include "en_accel/en_accel.h"
#include "en_accel/ipsec_rxtx.h" #include "en_accel/ipsec_rxtx.h"
#include "en_accel/psp_rxtx.h"
#include "en_accel/macsec.h" #include "en_accel/macsec.h"
#include "en/ptp.h" #include "en/ptp.h"
#include <net/ipv6.h> #include <net/ipv6.h>
@@ -120,6 +121,11 @@ mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5e_accel_tx_state *accel, struct mlx5e_accel_tx_state *accel,
struct mlx5_wqe_eth_seg *eseg) struct mlx5_wqe_eth_seg *eseg)
{ {
#ifdef CONFIG_MLX5_EN_PSP
if (unlikely(mlx5e_psp_txwqe_build_eseg_csum(sq, skb, &accel->psp_st, eseg)))
return;
#endif
if (unlikely(mlx5e_ipsec_txwqe_build_eseg_csum(sq, skb, eseg))) if (unlikely(mlx5e_ipsec_txwqe_build_eseg_csum(sq, skb, eseg)))
return; return;
@@ -297,7 +303,7 @@ static void mlx5e_sq_xmit_prepare(struct mlx5e_txqsq *sq, struct sk_buff *skb,
stats->packets++; stats->packets++;
} }
attr->insz = mlx5e_accel_tx_ids_len(sq, accel); attr->insz = mlx5e_accel_tx_ids_len(sq, skb, accel);
stats->bytes += attr->num_bytes; stats->bytes += attr->num_bytes;
} }
@@ -661,7 +667,7 @@ static void mlx5e_txwqe_build_eseg(struct mlx5e_priv *priv, struct mlx5e_txqsq *
struct sk_buff *skb, struct mlx5e_accel_tx_state *accel, struct sk_buff *skb, struct mlx5e_accel_tx_state *accel,
struct mlx5_wqe_eth_seg *eseg, u16 ihs) struct mlx5_wqe_eth_seg *eseg, u16 ihs)
{ {
mlx5e_accel_tx_eseg(priv, skb, eseg, ihs); mlx5e_accel_tx_eseg(priv, skb, accel, eseg, ihs);
mlx5e_txwqe_build_eseg_csum(sq, skb, accel, eseg); mlx5e_txwqe_build_eseg_csum(sq, skb, accel, eseg);
if (unlikely(sq->ptpsq)) if (unlikely(sq->ptpsq))
mlx5e_cqe_ts_id_eseg(sq->ptpsq, skb, eseg); mlx5e_cqe_ts_id_eseg(sq->ptpsq, skb, eseg);