mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 14:11:52 +00:00
net/mlx5_fpga: Drop INNOVA TLS support
Mellanox INNOVA TLS cards are EOL in May, 2018 [1]. As such, the code is unmaintained, untested and not in-use by any upstream/distro oriented customers. In order to reduce code complexity, drop the kernel code. [1] https://network.nvidia.com/related-docs/eol/LCR-000286.pdf Link: https://lore.kernel.org/r/b88add368def721ea9d054cb69def72d9e3f67aa.1649073691.git.leonro@nvidia.com Reviewed-by: Tariq Toukan <tariqt@nvidia.com> Reviewed-by: Saeed Mahameed <saeedm@nvidia.com> Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
This commit is contained in:
parent
3123109284
commit
40379a0084
@ -177,20 +177,6 @@ config MLX5_EN_IPSEC
|
||||
Note: Support for hardware with this capability needs to be selected
|
||||
for this option to become available.
|
||||
|
||||
config MLX5_FPGA_TLS
|
||||
bool "Mellanox Technologies TLS Innova support"
|
||||
depends on TLS_DEVICE
|
||||
depends on TLS=y || MLX5_CORE=m
|
||||
depends on MLX5_CORE_EN
|
||||
depends on MLX5_FPGA
|
||||
select MLX5_EN_TLS
|
||||
help
|
||||
Build TLS support for the Innova family of network cards by Mellanox
|
||||
Technologies. Innova network cards are comprised of a ConnectX chip
|
||||
and an FPGA chip on one board. If you select this option, the
|
||||
mlx5_core driver will include the Innova FPGA core and allow building
|
||||
sandbox-specific client drivers.
|
||||
|
||||
config MLX5_TLS
|
||||
bool "Mellanox Technologies TLS Connect-X support"
|
||||
depends on TLS_DEVICE
|
||||
|
@ -90,7 +90,6 @@ mlx5_core-$(CONFIG_MLX5_CORE_IPOIB) += ipoib/ipoib.o ipoib/ethtool.o ipoib/ipoib
|
||||
#
|
||||
mlx5_core-$(CONFIG_MLX5_IPSEC) += accel/ipsec_offload.o
|
||||
mlx5_core-$(CONFIG_MLX5_FPGA_IPSEC) += fpga/ipsec.o
|
||||
mlx5_core-$(CONFIG_MLX5_FPGA_TLS) += fpga/tls.o
|
||||
mlx5_core-$(CONFIG_MLX5_ACCEL) += lib/crypto.o accel/tls.o accel/ipsec.o
|
||||
|
||||
mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o
|
||||
|
@ -37,53 +37,6 @@
|
||||
#include "mlx5_core.h"
|
||||
#include "lib/mlx5.h"
|
||||
|
||||
#ifdef CONFIG_MLX5_FPGA_TLS
|
||||
#include "fpga/tls.h"
|
||||
|
||||
int mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
|
||||
struct tls_crypto_info *crypto_info,
|
||||
u32 start_offload_tcp_sn, u32 *p_swid,
|
||||
bool direction_sx)
|
||||
{
|
||||
return mlx5_fpga_tls_add_flow(mdev, flow, crypto_info,
|
||||
start_offload_tcp_sn, p_swid,
|
||||
direction_sx);
|
||||
}
|
||||
|
||||
void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
|
||||
bool direction_sx)
|
||||
{
|
||||
mlx5_fpga_tls_del_flow(mdev, swid, GFP_KERNEL, direction_sx);
|
||||
}
|
||||
|
||||
int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, __be32 handle,
|
||||
u32 seq, __be64 rcd_sn)
|
||||
{
|
||||
return mlx5_fpga_tls_resync_rx(mdev, handle, seq, rcd_sn);
|
||||
}
|
||||
|
||||
bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev)
|
||||
{
|
||||
return mlx5_fpga_is_tls_device(mdev) ||
|
||||
mlx5_accel_is_ktls_device(mdev);
|
||||
}
|
||||
|
||||
u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev)
|
||||
{
|
||||
return mlx5_fpga_tls_device_caps(mdev);
|
||||
}
|
||||
|
||||
int mlx5_accel_tls_init(struct mlx5_core_dev *mdev)
|
||||
{
|
||||
return mlx5_fpga_tls_init(mdev);
|
||||
}
|
||||
|
||||
void mlx5_accel_tls_cleanup(struct mlx5_core_dev *mdev)
|
||||
{
|
||||
mlx5_fpga_tls_cleanup(mdev);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_MLX5_TLS
|
||||
int mlx5_ktls_create_key(struct mlx5_core_dev *mdev,
|
||||
struct tls_crypto_info *crypto_info,
|
||||
|
@ -97,60 +97,4 @@ static inline bool
|
||||
mlx5e_ktls_type_check(struct mlx5_core_dev *mdev,
|
||||
struct tls_crypto_info *crypto_info) { return false; }
|
||||
#endif
|
||||
|
||||
enum {
|
||||
MLX5_ACCEL_TLS_TX = BIT(0),
|
||||
MLX5_ACCEL_TLS_RX = BIT(1),
|
||||
MLX5_ACCEL_TLS_V12 = BIT(2),
|
||||
MLX5_ACCEL_TLS_V13 = BIT(3),
|
||||
MLX5_ACCEL_TLS_LRO = BIT(4),
|
||||
MLX5_ACCEL_TLS_IPV6 = BIT(5),
|
||||
MLX5_ACCEL_TLS_AES_GCM128 = BIT(30),
|
||||
MLX5_ACCEL_TLS_AES_GCM256 = BIT(31),
|
||||
};
|
||||
|
||||
struct mlx5_ifc_tls_flow_bits {
|
||||
u8 src_port[0x10];
|
||||
u8 dst_port[0x10];
|
||||
union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits src_ipv4_src_ipv6;
|
||||
union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits dst_ipv4_dst_ipv6;
|
||||
u8 ipv6[0x1];
|
||||
u8 direction_sx[0x1];
|
||||
u8 reserved_at_2[0x1e];
|
||||
};
|
||||
|
||||
#ifdef CONFIG_MLX5_FPGA_TLS
|
||||
int mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
|
||||
struct tls_crypto_info *crypto_info,
|
||||
u32 start_offload_tcp_sn, u32 *p_swid,
|
||||
bool direction_sx);
|
||||
void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
|
||||
bool direction_sx);
|
||||
int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, __be32 handle,
|
||||
u32 seq, __be64 rcd_sn);
|
||||
bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev);
|
||||
u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev);
|
||||
int mlx5_accel_tls_init(struct mlx5_core_dev *mdev);
|
||||
void mlx5_accel_tls_cleanup(struct mlx5_core_dev *mdev);
|
||||
|
||||
#else
|
||||
|
||||
static inline int
|
||||
mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
|
||||
struct tls_crypto_info *crypto_info,
|
||||
u32 start_offload_tcp_sn, u32 *p_swid,
|
||||
bool direction_sx) { return -ENOTSUPP; }
|
||||
static inline void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
|
||||
bool direction_sx) { }
|
||||
static inline int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, __be32 handle,
|
||||
u32 seq, __be64 rcd_sn) { return 0; }
|
||||
static inline bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev)
|
||||
{
|
||||
return mlx5_accel_is_ktls_device(mdev);
|
||||
}
|
||||
static inline u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev) { return 0; }
|
||||
static inline int mlx5_accel_tls_init(struct mlx5_core_dev *mdev) { return 0; }
|
||||
static inline void mlx5_accel_tls_cleanup(struct mlx5_core_dev *mdev) { }
|
||||
#endif
|
||||
|
||||
#endif /* __MLX5_ACCEL_TLS_H__ */
|
||||
|
@ -354,7 +354,6 @@ enum {
|
||||
MLX5E_RQ_STATE_AM,
|
||||
MLX5E_RQ_STATE_NO_CSUM_COMPLETE,
|
||||
MLX5E_RQ_STATE_CSUM_FULL, /* cqe_csum_full hw bit is set */
|
||||
MLX5E_RQ_STATE_FPGA_TLS, /* FPGA TLS enabled */
|
||||
MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, /* set when mini_cqe_resp_stride_index cap is used */
|
||||
MLX5E_RQ_STATE_SHAMPO, /* set when SHAMPO cap is used */
|
||||
};
|
||||
|
@ -36,188 +36,12 @@
|
||||
#include "en_accel/tls.h"
|
||||
#include "accel/tls.h"
|
||||
|
||||
static void mlx5e_tls_set_ipv4_flow(void *flow, struct sock *sk)
|
||||
{
|
||||
struct inet_sock *inet = inet_sk(sk);
|
||||
|
||||
MLX5_SET(tls_flow, flow, ipv6, 0);
|
||||
memcpy(MLX5_ADDR_OF(tls_flow, flow, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
|
||||
&inet->inet_daddr, MLX5_FLD_SZ_BYTES(ipv4_layout, ipv4));
|
||||
memcpy(MLX5_ADDR_OF(tls_flow, flow, src_ipv4_src_ipv6.ipv4_layout.ipv4),
|
||||
&inet->inet_rcv_saddr, MLX5_FLD_SZ_BYTES(ipv4_layout, ipv4));
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
static void mlx5e_tls_set_ipv6_flow(void *flow, struct sock *sk)
|
||||
{
|
||||
struct ipv6_pinfo *np = inet6_sk(sk);
|
||||
|
||||
MLX5_SET(tls_flow, flow, ipv6, 1);
|
||||
memcpy(MLX5_ADDR_OF(tls_flow, flow, dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
|
||||
&sk->sk_v6_daddr, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
|
||||
memcpy(MLX5_ADDR_OF(tls_flow, flow, src_ipv4_src_ipv6.ipv6_layout.ipv6),
|
||||
&np->saddr, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
|
||||
}
|
||||
#endif
|
||||
|
||||
static void mlx5e_tls_set_flow_tcp_ports(void *flow, struct sock *sk)
|
||||
{
|
||||
struct inet_sock *inet = inet_sk(sk);
|
||||
|
||||
memcpy(MLX5_ADDR_OF(tls_flow, flow, src_port), &inet->inet_sport,
|
||||
MLX5_FLD_SZ_BYTES(tls_flow, src_port));
|
||||
memcpy(MLX5_ADDR_OF(tls_flow, flow, dst_port), &inet->inet_dport,
|
||||
MLX5_FLD_SZ_BYTES(tls_flow, dst_port));
|
||||
}
|
||||
|
||||
static int mlx5e_tls_set_flow(void *flow, struct sock *sk, u32 caps)
|
||||
{
|
||||
switch (sk->sk_family) {
|
||||
case AF_INET:
|
||||
mlx5e_tls_set_ipv4_flow(flow, sk);
|
||||
break;
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
case AF_INET6:
|
||||
if (!sk->sk_ipv6only &&
|
||||
ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED) {
|
||||
mlx5e_tls_set_ipv4_flow(flow, sk);
|
||||
break;
|
||||
}
|
||||
if (!(caps & MLX5_ACCEL_TLS_IPV6))
|
||||
goto error_out;
|
||||
|
||||
mlx5e_tls_set_ipv6_flow(flow, sk);
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
goto error_out;
|
||||
}
|
||||
|
||||
mlx5e_tls_set_flow_tcp_ports(flow, sk);
|
||||
return 0;
|
||||
error_out:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int mlx5e_tls_add(struct net_device *netdev, struct sock *sk,
|
||||
enum tls_offload_ctx_dir direction,
|
||||
struct tls_crypto_info *crypto_info,
|
||||
u32 start_offload_tcp_sn)
|
||||
{
|
||||
struct mlx5e_priv *priv = netdev_priv(netdev);
|
||||
struct tls_context *tls_ctx = tls_get_ctx(sk);
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
u32 caps = mlx5_accel_tls_device_caps(mdev);
|
||||
int ret = -ENOMEM;
|
||||
void *flow;
|
||||
u32 swid;
|
||||
|
||||
flow = kzalloc(MLX5_ST_SZ_BYTES(tls_flow), GFP_KERNEL);
|
||||
if (!flow)
|
||||
return ret;
|
||||
|
||||
ret = mlx5e_tls_set_flow(flow, sk, caps);
|
||||
if (ret)
|
||||
goto free_flow;
|
||||
|
||||
ret = mlx5_accel_tls_add_flow(mdev, flow, crypto_info,
|
||||
start_offload_tcp_sn, &swid,
|
||||
direction == TLS_OFFLOAD_CTX_DIR_TX);
|
||||
if (ret < 0)
|
||||
goto free_flow;
|
||||
|
||||
if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
|
||||
struct mlx5e_tls_offload_context_tx *tx_ctx =
|
||||
mlx5e_get_tls_tx_context(tls_ctx);
|
||||
|
||||
tx_ctx->swid = htonl(swid);
|
||||
tx_ctx->expected_seq = start_offload_tcp_sn;
|
||||
} else {
|
||||
struct mlx5e_tls_offload_context_rx *rx_ctx =
|
||||
mlx5e_get_tls_rx_context(tls_ctx);
|
||||
|
||||
rx_ctx->handle = htonl(swid);
|
||||
}
|
||||
|
||||
return 0;
|
||||
free_flow:
|
||||
kfree(flow);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void mlx5e_tls_del(struct net_device *netdev,
|
||||
struct tls_context *tls_ctx,
|
||||
enum tls_offload_ctx_dir direction)
|
||||
{
|
||||
struct mlx5e_priv *priv = netdev_priv(netdev);
|
||||
unsigned int handle;
|
||||
|
||||
handle = ntohl((direction == TLS_OFFLOAD_CTX_DIR_TX) ?
|
||||
mlx5e_get_tls_tx_context(tls_ctx)->swid :
|
||||
mlx5e_get_tls_rx_context(tls_ctx)->handle);
|
||||
|
||||
mlx5_accel_tls_del_flow(priv->mdev, handle,
|
||||
direction == TLS_OFFLOAD_CTX_DIR_TX);
|
||||
}
|
||||
|
||||
static int mlx5e_tls_resync(struct net_device *netdev, struct sock *sk,
|
||||
u32 seq, u8 *rcd_sn_data,
|
||||
enum tls_offload_ctx_dir direction)
|
||||
{
|
||||
struct tls_context *tls_ctx = tls_get_ctx(sk);
|
||||
struct mlx5e_priv *priv = netdev_priv(netdev);
|
||||
struct mlx5e_tls_offload_context_rx *rx_ctx;
|
||||
__be64 rcd_sn = *(__be64 *)rcd_sn_data;
|
||||
|
||||
if (WARN_ON_ONCE(direction != TLS_OFFLOAD_CTX_DIR_RX))
|
||||
return -EINVAL;
|
||||
rx_ctx = mlx5e_get_tls_rx_context(tls_ctx);
|
||||
|
||||
netdev_info(netdev, "resyncing seq %d rcd %lld\n", seq,
|
||||
be64_to_cpu(rcd_sn));
|
||||
mlx5_accel_tls_resync_rx(priv->mdev, rx_ctx->handle, seq, rcd_sn);
|
||||
atomic64_inc(&priv->tls->sw_stats.rx_tls_resync_reply);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct tlsdev_ops mlx5e_tls_ops = {
|
||||
.tls_dev_add = mlx5e_tls_add,
|
||||
.tls_dev_del = mlx5e_tls_del,
|
||||
.tls_dev_resync = mlx5e_tls_resync,
|
||||
};
|
||||
|
||||
void mlx5e_tls_build_netdev(struct mlx5e_priv *priv)
|
||||
{
|
||||
struct net_device *netdev = priv->netdev;
|
||||
u32 caps;
|
||||
|
||||
if (mlx5e_accel_is_ktls_device(priv->mdev)) {
|
||||
mlx5e_ktls_build_netdev(priv);
|
||||
return;
|
||||
}
|
||||
|
||||
/* FPGA */
|
||||
if (!mlx5e_accel_is_tls_device(priv->mdev))
|
||||
if (!mlx5e_accel_is_ktls_device(priv->mdev))
|
||||
return;
|
||||
|
||||
caps = mlx5_accel_tls_device_caps(priv->mdev);
|
||||
if (caps & MLX5_ACCEL_TLS_TX) {
|
||||
netdev->features |= NETIF_F_HW_TLS_TX;
|
||||
netdev->hw_features |= NETIF_F_HW_TLS_TX;
|
||||
}
|
||||
|
||||
if (caps & MLX5_ACCEL_TLS_RX) {
|
||||
netdev->features |= NETIF_F_HW_TLS_RX;
|
||||
netdev->hw_features |= NETIF_F_HW_TLS_RX;
|
||||
}
|
||||
|
||||
if (!(caps & MLX5_ACCEL_TLS_LRO)) {
|
||||
netdev->features &= ~NETIF_F_LRO;
|
||||
netdev->hw_features &= ~NETIF_F_LRO;
|
||||
}
|
||||
|
||||
netdev->tlsdev_ops = &mlx5e_tls_ops;
|
||||
mlx5e_ktls_build_netdev(priv);
|
||||
}
|
||||
|
||||
int mlx5e_tls_init(struct mlx5e_priv *priv)
|
||||
|
@ -43,16 +43,8 @@
|
||||
struct mlx5e_tls_sw_stats {
|
||||
atomic64_t tx_tls_ctx;
|
||||
atomic64_t tx_tls_del;
|
||||
atomic64_t tx_tls_drop_metadata;
|
||||
atomic64_t tx_tls_drop_resync_alloc;
|
||||
atomic64_t tx_tls_drop_no_sync_data;
|
||||
atomic64_t tx_tls_drop_bypass_required;
|
||||
atomic64_t rx_tls_ctx;
|
||||
atomic64_t rx_tls_del;
|
||||
atomic64_t rx_tls_drop_resync_request;
|
||||
atomic64_t rx_tls_resync_request;
|
||||
atomic64_t rx_tls_resync_reply;
|
||||
atomic64_t rx_tls_auth_fail;
|
||||
};
|
||||
|
||||
struct mlx5e_tls {
|
||||
@ -60,42 +52,6 @@ struct mlx5e_tls {
|
||||
struct workqueue_struct *rx_wq;
|
||||
};
|
||||
|
||||
struct mlx5e_tls_offload_context_tx {
|
||||
struct tls_offload_context_tx base;
|
||||
u32 expected_seq;
|
||||
__be32 swid;
|
||||
};
|
||||
|
||||
static inline struct mlx5e_tls_offload_context_tx *
|
||||
mlx5e_get_tls_tx_context(struct tls_context *tls_ctx)
|
||||
{
|
||||
BUILD_BUG_ON(sizeof(struct mlx5e_tls_offload_context_tx) >
|
||||
TLS_OFFLOAD_CONTEXT_SIZE_TX);
|
||||
return container_of(tls_offload_ctx_tx(tls_ctx),
|
||||
struct mlx5e_tls_offload_context_tx,
|
||||
base);
|
||||
}
|
||||
|
||||
struct mlx5e_tls_offload_context_rx {
|
||||
struct tls_offload_context_rx base;
|
||||
__be32 handle;
|
||||
};
|
||||
|
||||
static inline struct mlx5e_tls_offload_context_rx *
|
||||
mlx5e_get_tls_rx_context(struct tls_context *tls_ctx)
|
||||
{
|
||||
BUILD_BUG_ON(sizeof(struct mlx5e_tls_offload_context_rx) >
|
||||
TLS_OFFLOAD_CONTEXT_SIZE_RX);
|
||||
return container_of(tls_offload_ctx_rx(tls_ctx),
|
||||
struct mlx5e_tls_offload_context_rx,
|
||||
base);
|
||||
}
|
||||
|
||||
static inline bool mlx5e_is_tls_on(struct mlx5e_priv *priv)
|
||||
{
|
||||
return priv->tls;
|
||||
}
|
||||
|
||||
void mlx5e_tls_build_netdev(struct mlx5e_priv *priv);
|
||||
int mlx5e_tls_init(struct mlx5e_priv *priv);
|
||||
void mlx5e_tls_cleanup(struct mlx5e_priv *priv);
|
||||
@ -106,8 +62,7 @@ int mlx5e_tls_get_stats(struct mlx5e_priv *priv, u64 *data);
|
||||
|
||||
static inline bool mlx5e_accel_is_tls_device(struct mlx5_core_dev *mdev)
|
||||
{
|
||||
return !is_kdump_kernel() &&
|
||||
mlx5_accel_is_tls_device(mdev);
|
||||
return !is_kdump_kernel() && mlx5_accel_is_ktls_device(mdev);
|
||||
}
|
||||
|
||||
#else
|
||||
@ -119,7 +74,6 @@ static inline void mlx5e_tls_build_netdev(struct mlx5e_priv *priv)
|
||||
mlx5e_ktls_build_netdev(priv);
|
||||
}
|
||||
|
||||
static inline bool mlx5e_is_tls_on(struct mlx5e_priv *priv) { return false; }
|
||||
static inline int mlx5e_tls_init(struct mlx5e_priv *priv) { return 0; }
|
||||
static inline void mlx5e_tls_cleanup(struct mlx5e_priv *priv) { }
|
||||
static inline int mlx5e_tls_get_count(struct mlx5e_priv *priv) { return 0; }
|
||||
|
@ -38,230 +38,11 @@
|
||||
#include <net/inet6_hashtables.h>
|
||||
#include <linux/ipv6.h>
|
||||
|
||||
#define SYNDROM_DECRYPTED 0x30
|
||||
#define SYNDROM_RESYNC_REQUEST 0x31
|
||||
#define SYNDROM_AUTH_FAILED 0x32
|
||||
|
||||
#define SYNDROME_OFFLOAD_REQUIRED 32
|
||||
#define SYNDROME_SYNC 33
|
||||
|
||||
struct sync_info {
|
||||
u64 rcd_sn;
|
||||
s32 sync_len;
|
||||
int nr_frags;
|
||||
skb_frag_t frags[MAX_SKB_FRAGS];
|
||||
};
|
||||
|
||||
struct recv_metadata_content {
|
||||
u8 syndrome;
|
||||
u8 reserved;
|
||||
__be32 sync_seq;
|
||||
} __packed;
|
||||
|
||||
struct send_metadata_content {
|
||||
/* One byte of syndrome followed by 3 bytes of swid */
|
||||
__be32 syndrome_swid;
|
||||
__be16 first_seq;
|
||||
} __packed;
|
||||
|
||||
struct mlx5e_tls_metadata {
|
||||
union {
|
||||
/* from fpga to host */
|
||||
struct recv_metadata_content recv;
|
||||
/* from host to fpga */
|
||||
struct send_metadata_content send;
|
||||
unsigned char raw[6];
|
||||
} __packed content;
|
||||
/* packet type ID field */
|
||||
__be16 ethertype;
|
||||
} __packed;
|
||||
|
||||
static int mlx5e_tls_add_metadata(struct sk_buff *skb, __be32 swid)
|
||||
{
|
||||
struct mlx5e_tls_metadata *pet;
|
||||
struct ethhdr *eth;
|
||||
|
||||
if (skb_cow_head(skb, sizeof(struct mlx5e_tls_metadata)))
|
||||
return -ENOMEM;
|
||||
|
||||
eth = (struct ethhdr *)skb_push(skb, sizeof(struct mlx5e_tls_metadata));
|
||||
skb->mac_header -= sizeof(struct mlx5e_tls_metadata);
|
||||
pet = (struct mlx5e_tls_metadata *)(eth + 1);
|
||||
|
||||
memmove(skb->data, skb->data + sizeof(struct mlx5e_tls_metadata),
|
||||
2 * ETH_ALEN);
|
||||
|
||||
eth->h_proto = cpu_to_be16(MLX5E_METADATA_ETHER_TYPE);
|
||||
pet->content.send.syndrome_swid =
|
||||
htonl(SYNDROME_OFFLOAD_REQUIRED << 24) | swid;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mlx5e_tls_get_sync_data(struct mlx5e_tls_offload_context_tx *context,
|
||||
u32 tcp_seq, struct sync_info *info)
|
||||
{
|
||||
int remaining, i = 0, ret = -EINVAL;
|
||||
struct tls_record_info *record;
|
||||
unsigned long flags;
|
||||
s32 sync_size;
|
||||
|
||||
spin_lock_irqsave(&context->base.lock, flags);
|
||||
record = tls_get_record(&context->base, tcp_seq, &info->rcd_sn);
|
||||
|
||||
if (unlikely(!record))
|
||||
goto out;
|
||||
|
||||
sync_size = tcp_seq - tls_record_start_seq(record);
|
||||
info->sync_len = sync_size;
|
||||
if (unlikely(sync_size < 0)) {
|
||||
if (tls_record_is_start_marker(record))
|
||||
goto done;
|
||||
|
||||
goto out;
|
||||
}
|
||||
|
||||
remaining = sync_size;
|
||||
while (remaining > 0) {
|
||||
info->frags[i] = record->frags[i];
|
||||
__skb_frag_ref(&info->frags[i]);
|
||||
remaining -= skb_frag_size(&info->frags[i]);
|
||||
|
||||
if (remaining < 0)
|
||||
skb_frag_size_add(&info->frags[i], remaining);
|
||||
|
||||
i++;
|
||||
}
|
||||
info->nr_frags = i;
|
||||
done:
|
||||
ret = 0;
|
||||
out:
|
||||
spin_unlock_irqrestore(&context->base.lock, flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void mlx5e_tls_complete_sync_skb(struct sk_buff *skb,
|
||||
struct sk_buff *nskb, u32 tcp_seq,
|
||||
int headln, __be64 rcd_sn)
|
||||
{
|
||||
struct mlx5e_tls_metadata *pet;
|
||||
u8 syndrome = SYNDROME_SYNC;
|
||||
struct iphdr *iph;
|
||||
struct tcphdr *th;
|
||||
int data_len, mss;
|
||||
|
||||
nskb->dev = skb->dev;
|
||||
skb_reset_mac_header(nskb);
|
||||
skb_set_network_header(nskb, skb_network_offset(skb));
|
||||
skb_set_transport_header(nskb, skb_transport_offset(skb));
|
||||
memcpy(nskb->data, skb->data, headln);
|
||||
memcpy(nskb->data + headln, &rcd_sn, sizeof(rcd_sn));
|
||||
|
||||
iph = ip_hdr(nskb);
|
||||
iph->tot_len = htons(nskb->len - skb_network_offset(nskb));
|
||||
th = tcp_hdr(nskb);
|
||||
data_len = nskb->len - headln;
|
||||
tcp_seq -= data_len;
|
||||
th->seq = htonl(tcp_seq);
|
||||
|
||||
mss = nskb->dev->mtu - (headln - skb_network_offset(nskb));
|
||||
skb_shinfo(nskb)->gso_size = 0;
|
||||
if (data_len > mss) {
|
||||
skb_shinfo(nskb)->gso_size = mss;
|
||||
skb_shinfo(nskb)->gso_segs = DIV_ROUND_UP(data_len, mss);
|
||||
}
|
||||
skb_shinfo(nskb)->gso_type = skb_shinfo(skb)->gso_type;
|
||||
|
||||
pet = (struct mlx5e_tls_metadata *)(nskb->data + sizeof(struct ethhdr));
|
||||
memcpy(pet, &syndrome, sizeof(syndrome));
|
||||
pet->content.send.first_seq = htons(tcp_seq);
|
||||
|
||||
/* MLX5 devices don't care about the checksum partial start, offset
|
||||
* and pseudo header
|
||||
*/
|
||||
nskb->ip_summed = CHECKSUM_PARTIAL;
|
||||
|
||||
nskb->queue_mapping = skb->queue_mapping;
|
||||
}
|
||||
|
||||
static bool mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context_tx *context,
|
||||
struct mlx5e_txqsq *sq, struct sk_buff *skb,
|
||||
struct mlx5e_tls *tls)
|
||||
{
|
||||
u32 tcp_seq = ntohl(tcp_hdr(skb)->seq);
|
||||
struct sync_info info;
|
||||
struct sk_buff *nskb;
|
||||
int linear_len = 0;
|
||||
int headln;
|
||||
int i;
|
||||
|
||||
sq->stats->tls_ooo++;
|
||||
|
||||
if (mlx5e_tls_get_sync_data(context, tcp_seq, &info)) {
|
||||
/* We might get here if a retransmission reaches the driver
|
||||
* after the relevant record is acked.
|
||||
* It should be safe to drop the packet in this case
|
||||
*/
|
||||
atomic64_inc(&tls->sw_stats.tx_tls_drop_no_sync_data);
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
if (unlikely(info.sync_len < 0)) {
|
||||
u32 payload;
|
||||
|
||||
headln = skb_transport_offset(skb) + tcp_hdrlen(skb);
|
||||
payload = skb->len - headln;
|
||||
if (likely(payload <= -info.sync_len))
|
||||
/* SKB payload doesn't require offload
|
||||
*/
|
||||
return true;
|
||||
|
||||
atomic64_inc(&tls->sw_stats.tx_tls_drop_bypass_required);
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
if (unlikely(mlx5e_tls_add_metadata(skb, context->swid))) {
|
||||
atomic64_inc(&tls->sw_stats.tx_tls_drop_metadata);
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
headln = skb_transport_offset(skb) + tcp_hdrlen(skb);
|
||||
linear_len += headln + sizeof(info.rcd_sn);
|
||||
nskb = alloc_skb(linear_len, GFP_ATOMIC);
|
||||
if (unlikely(!nskb)) {
|
||||
atomic64_inc(&tls->sw_stats.tx_tls_drop_resync_alloc);
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
context->expected_seq = tcp_seq + skb->len - headln;
|
||||
skb_put(nskb, linear_len);
|
||||
for (i = 0; i < info.nr_frags; i++)
|
||||
skb_shinfo(nskb)->frags[i] = info.frags[i];
|
||||
|
||||
skb_shinfo(nskb)->nr_frags = info.nr_frags;
|
||||
nskb->data_len = info.sync_len;
|
||||
nskb->len += info.sync_len;
|
||||
sq->stats->tls_resync_bytes += nskb->len;
|
||||
mlx5e_tls_complete_sync_skb(skb, nskb, tcp_seq, headln,
|
||||
cpu_to_be64(info.rcd_sn));
|
||||
mlx5e_sq_xmit_simple(sq, nskb, true);
|
||||
|
||||
return true;
|
||||
|
||||
err_out:
|
||||
dev_kfree_skb_any(skb);
|
||||
return false;
|
||||
}
|
||||
|
||||
bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
|
||||
struct sk_buff *skb, struct mlx5e_accel_tx_tls_state *state)
|
||||
{
|
||||
struct mlx5e_priv *priv = netdev_priv(netdev);
|
||||
struct mlx5e_tls_offload_context_tx *context;
|
||||
struct tls_context *tls_ctx;
|
||||
u32 expected_seq;
|
||||
int datalen;
|
||||
u32 skb_seq;
|
||||
|
||||
datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
|
||||
if (!datalen)
|
||||
@ -273,118 +54,17 @@ bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
|
||||
if (WARN_ON_ONCE(tls_ctx->netdev != netdev))
|
||||
goto err_out;
|
||||
|
||||
if (mlx5e_accel_is_ktls_tx(sq->mdev))
|
||||
return mlx5e_ktls_handle_tx_skb(tls_ctx, sq, skb, datalen, state);
|
||||
|
||||
/* FPGA */
|
||||
skb_seq = ntohl(tcp_hdr(skb)->seq);
|
||||
context = mlx5e_get_tls_tx_context(tls_ctx);
|
||||
expected_seq = context->expected_seq;
|
||||
|
||||
if (unlikely(expected_seq != skb_seq))
|
||||
return mlx5e_tls_handle_ooo(context, sq, skb, priv->tls);
|
||||
|
||||
if (unlikely(mlx5e_tls_add_metadata(skb, context->swid))) {
|
||||
atomic64_inc(&priv->tls->sw_stats.tx_tls_drop_metadata);
|
||||
dev_kfree_skb_any(skb);
|
||||
return false;
|
||||
}
|
||||
|
||||
context->expected_seq = skb_seq + datalen;
|
||||
return true;
|
||||
return mlx5e_ktls_handle_tx_skb(tls_ctx, sq, skb, datalen, state);
|
||||
|
||||
err_out:
|
||||
dev_kfree_skb_any(skb);
|
||||
return false;
|
||||
}
|
||||
|
||||
static int tls_update_resync_sn(struct net_device *netdev,
|
||||
struct sk_buff *skb,
|
||||
struct mlx5e_tls_metadata *mdata)
|
||||
{
|
||||
struct sock *sk = NULL;
|
||||
struct iphdr *iph;
|
||||
struct tcphdr *th;
|
||||
__be32 seq;
|
||||
|
||||
if (mdata->ethertype != htons(ETH_P_IP))
|
||||
return -EINVAL;
|
||||
|
||||
iph = (struct iphdr *)(mdata + 1);
|
||||
|
||||
th = ((void *)iph) + iph->ihl * 4;
|
||||
|
||||
if (iph->version == 4) {
|
||||
sk = inet_lookup_established(dev_net(netdev), &tcp_hashinfo,
|
||||
iph->saddr, th->source, iph->daddr,
|
||||
th->dest, netdev->ifindex);
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
} else {
|
||||
struct ipv6hdr *ipv6h = (struct ipv6hdr *)iph;
|
||||
|
||||
sk = __inet6_lookup_established(dev_net(netdev), &tcp_hashinfo,
|
||||
&ipv6h->saddr, th->source,
|
||||
&ipv6h->daddr, ntohs(th->dest),
|
||||
netdev->ifindex, 0);
|
||||
#endif
|
||||
}
|
||||
if (!sk || sk->sk_state == TCP_TIME_WAIT) {
|
||||
struct mlx5e_priv *priv = netdev_priv(netdev);
|
||||
|
||||
atomic64_inc(&priv->tls->sw_stats.rx_tls_drop_resync_request);
|
||||
goto out;
|
||||
}
|
||||
|
||||
skb->sk = sk;
|
||||
skb->destructor = sock_edemux;
|
||||
|
||||
memcpy(&seq, &mdata->content.recv.sync_seq, sizeof(seq));
|
||||
tls_offload_rx_resync_request(sk, seq);
|
||||
out:
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* FPGA tls rx handler */
|
||||
void mlx5e_tls_handle_rx_skb_metadata(struct mlx5e_rq *rq, struct sk_buff *skb,
|
||||
u32 *cqe_bcnt)
|
||||
{
|
||||
struct mlx5e_tls_metadata *mdata;
|
||||
struct mlx5e_priv *priv;
|
||||
|
||||
/* Use the metadata */
|
||||
mdata = (struct mlx5e_tls_metadata *)(skb->data + ETH_HLEN);
|
||||
switch (mdata->content.recv.syndrome) {
|
||||
case SYNDROM_DECRYPTED:
|
||||
skb->decrypted = 1;
|
||||
break;
|
||||
case SYNDROM_RESYNC_REQUEST:
|
||||
tls_update_resync_sn(rq->netdev, skb, mdata);
|
||||
priv = netdev_priv(rq->netdev);
|
||||
atomic64_inc(&priv->tls->sw_stats.rx_tls_resync_request);
|
||||
break;
|
||||
case SYNDROM_AUTH_FAILED:
|
||||
/* Authentication failure will be observed and verified by kTLS */
|
||||
priv = netdev_priv(rq->netdev);
|
||||
atomic64_inc(&priv->tls->sw_stats.rx_tls_auth_fail);
|
||||
break;
|
||||
default:
|
||||
/* Bypass the metadata header to others */
|
||||
return;
|
||||
}
|
||||
|
||||
remove_metadata_hdr(skb);
|
||||
*cqe_bcnt -= MLX5E_METADATA_ETHER_LEN;
|
||||
}
|
||||
|
||||
u16 mlx5e_tls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
|
||||
{
|
||||
if (!mlx5e_accel_is_tls_device(mdev))
|
||||
return 0;
|
||||
|
||||
if (mlx5e_accel_is_ktls_device(mdev))
|
||||
return mlx5e_ktls_get_stop_room(mdev, params);
|
||||
|
||||
/* FPGA */
|
||||
/* Resync SKB. */
|
||||
return mlx5e_stop_room_for_max_wqe(mdev);
|
||||
return mlx5e_ktls_get_stop_room(mdev, params);
|
||||
}
|
||||
|
@ -60,18 +60,12 @@ mlx5e_tls_handle_tx_wqe(struct mlx5_wqe_ctrl_seg *cseg,
|
||||
cseg->tis_tir_num = cpu_to_be32(state->tls_tisn << 8);
|
||||
}
|
||||
|
||||
void mlx5e_tls_handle_rx_skb_metadata(struct mlx5e_rq *rq, struct sk_buff *skb,
|
||||
u32 *cqe_bcnt);
|
||||
|
||||
static inline void
|
||||
mlx5e_tls_handle_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb,
|
||||
struct mlx5_cqe64 *cqe, u32 *cqe_bcnt)
|
||||
{
|
||||
if (unlikely(get_cqe_tls_offload(cqe))) /* cqe bit indicates a TLS device */
|
||||
return mlx5e_ktls_handle_rx_skb(rq, skb, cqe, cqe_bcnt);
|
||||
|
||||
if (unlikely(test_bit(MLX5E_RQ_STATE_FPGA_TLS, &rq->state) && is_metadata_hdr_valid(skb)))
|
||||
return mlx5e_tls_handle_rx_skb_metadata(rq, skb, cqe_bcnt);
|
||||
mlx5e_ktls_handle_rx_skb(rq, skb, cqe, cqe_bcnt);
|
||||
}
|
||||
|
||||
#else
|
||||
|
@ -38,13 +38,6 @@
|
||||
#include "fpga/sdk.h"
|
||||
#include "en_accel/tls.h"
|
||||
|
||||
static const struct counter_desc mlx5e_tls_sw_stats_desc[] = {
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_drop_metadata) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_drop_resync_alloc) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_drop_no_sync_data) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_drop_bypass_required) },
|
||||
};
|
||||
|
||||
static const struct counter_desc mlx5e_ktls_sw_stats_desc[] = {
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_ctx) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_del) },
|
||||
@ -59,18 +52,16 @@ static const struct counter_desc *get_tls_atomic_stats(struct mlx5e_priv *priv)
|
||||
{
|
||||
if (!priv->tls)
|
||||
return NULL;
|
||||
if (mlx5e_accel_is_ktls_device(priv->mdev))
|
||||
return mlx5e_ktls_sw_stats_desc;
|
||||
return mlx5e_tls_sw_stats_desc;
|
||||
|
||||
return mlx5e_ktls_sw_stats_desc;
|
||||
}
|
||||
|
||||
int mlx5e_tls_get_count(struct mlx5e_priv *priv)
|
||||
{
|
||||
if (!priv->tls)
|
||||
return 0;
|
||||
if (mlx5e_accel_is_ktls_device(priv->mdev))
|
||||
return ARRAY_SIZE(mlx5e_ktls_sw_stats_desc);
|
||||
return ARRAY_SIZE(mlx5e_tls_sw_stats_desc);
|
||||
|
||||
return ARRAY_SIZE(mlx5e_ktls_sw_stats_desc);
|
||||
}
|
||||
|
||||
int mlx5e_tls_get_strings(struct mlx5e_priv *priv, uint8_t *data)
|
||||
|
@ -1036,9 +1036,6 @@ int mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *param,
|
||||
if (err)
|
||||
goto err_destroy_rq;
|
||||
|
||||
if (mlx5e_is_tls_on(rq->priv) && !mlx5e_accel_is_ktls_device(mdev))
|
||||
__set_bit(MLX5E_RQ_STATE_FPGA_TLS, &rq->state); /* must be FPGA */
|
||||
|
||||
if (MLX5_CAP_ETH(mdev, cqe_checksum_full))
|
||||
__set_bit(MLX5E_RQ_STATE_CSUM_FULL, &rq->state);
|
||||
|
||||
|
@ -59,7 +59,6 @@ struct mlx5_fpga_device {
|
||||
} conn_res;
|
||||
|
||||
struct mlx5_fpga_ipsec *ipsec;
|
||||
struct mlx5_fpga_tls *tls;
|
||||
};
|
||||
|
||||
#define mlx5_fpga_dbg(__adev, format, ...) \
|
||||
|
@ -1,622 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2018 Mellanox Technologies. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/mlx5/device.h>
|
||||
#include "fpga/tls.h"
|
||||
#include "fpga/cmd.h"
|
||||
#include "fpga/sdk.h"
|
||||
#include "fpga/core.h"
|
||||
#include "accel/tls.h"
|
||||
|
||||
struct mlx5_fpga_tls_command_context;
|
||||
|
||||
typedef void (*mlx5_fpga_tls_command_complete)
|
||||
(struct mlx5_fpga_conn *conn, struct mlx5_fpga_device *fdev,
|
||||
struct mlx5_fpga_tls_command_context *ctx,
|
||||
struct mlx5_fpga_dma_buf *resp);
|
||||
|
||||
struct mlx5_fpga_tls_command_context {
|
||||
struct list_head list;
|
||||
/* There is no guarantee on the order between the TX completion
|
||||
* and the command response.
|
||||
* The TX completion is going to touch cmd->buf even in
|
||||
* the case of successful transmission.
|
||||
* So instead of requiring separate allocations for cmd
|
||||
* and cmd->buf we've decided to use a reference counter
|
||||
*/
|
||||
refcount_t ref;
|
||||
struct mlx5_fpga_dma_buf buf;
|
||||
mlx5_fpga_tls_command_complete complete;
|
||||
};
|
||||
|
||||
static void
|
||||
mlx5_fpga_tls_put_command_ctx(struct mlx5_fpga_tls_command_context *ctx)
|
||||
{
|
||||
if (refcount_dec_and_test(&ctx->ref))
|
||||
kfree(ctx);
|
||||
}
|
||||
|
||||
static void mlx5_fpga_tls_cmd_complete(struct mlx5_fpga_device *fdev,
|
||||
struct mlx5_fpga_dma_buf *resp)
|
||||
{
|
||||
struct mlx5_fpga_conn *conn = fdev->tls->conn;
|
||||
struct mlx5_fpga_tls_command_context *ctx;
|
||||
struct mlx5_fpga_tls *tls = fdev->tls;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&tls->pending_cmds_lock, flags);
|
||||
ctx = list_first_entry(&tls->pending_cmds,
|
||||
struct mlx5_fpga_tls_command_context, list);
|
||||
list_del(&ctx->list);
|
||||
spin_unlock_irqrestore(&tls->pending_cmds_lock, flags);
|
||||
ctx->complete(conn, fdev, ctx, resp);
|
||||
}
|
||||
|
||||
static void mlx5_fpga_cmd_send_complete(struct mlx5_fpga_conn *conn,
|
||||
struct mlx5_fpga_device *fdev,
|
||||
struct mlx5_fpga_dma_buf *buf,
|
||||
u8 status)
|
||||
{
|
||||
struct mlx5_fpga_tls_command_context *ctx =
|
||||
container_of(buf, struct mlx5_fpga_tls_command_context, buf);
|
||||
|
||||
mlx5_fpga_tls_put_command_ctx(ctx);
|
||||
|
||||
if (unlikely(status))
|
||||
mlx5_fpga_tls_cmd_complete(fdev, NULL);
|
||||
}
|
||||
|
||||
static void mlx5_fpga_tls_cmd_send(struct mlx5_fpga_device *fdev,
|
||||
struct mlx5_fpga_tls_command_context *cmd,
|
||||
mlx5_fpga_tls_command_complete complete)
|
||||
{
|
||||
struct mlx5_fpga_tls *tls = fdev->tls;
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
refcount_set(&cmd->ref, 2);
|
||||
cmd->complete = complete;
|
||||
cmd->buf.complete = mlx5_fpga_cmd_send_complete;
|
||||
|
||||
spin_lock_irqsave(&tls->pending_cmds_lock, flags);
|
||||
/* mlx5_fpga_sbu_conn_sendmsg is called under pending_cmds_lock
|
||||
* to make sure commands are inserted to the tls->pending_cmds list
|
||||
* and the command QP in the same order.
|
||||
*/
|
||||
ret = mlx5_fpga_sbu_conn_sendmsg(tls->conn, &cmd->buf);
|
||||
if (likely(!ret))
|
||||
list_add_tail(&cmd->list, &tls->pending_cmds);
|
||||
else
|
||||
complete(tls->conn, fdev, cmd, NULL);
|
||||
spin_unlock_irqrestore(&tls->pending_cmds_lock, flags);
|
||||
}
|
||||
|
||||
/* Start of context identifiers range (inclusive) */
|
||||
#define SWID_START 0
|
||||
/* End of context identifiers range (exclusive) */
|
||||
#define SWID_END BIT(24)
|
||||
|
||||
static int mlx5_fpga_tls_alloc_swid(struct idr *idr, spinlock_t *idr_spinlock,
|
||||
void *ptr)
|
||||
{
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
/* TLS metadata format is 1 byte for syndrome followed
|
||||
* by 3 bytes of swid (software ID)
|
||||
* swid must not exceed 3 bytes.
|
||||
* See tls_rxtx.c:insert_pet() for details
|
||||
*/
|
||||
BUILD_BUG_ON((SWID_END - 1) & 0xFF000000);
|
||||
|
||||
idr_preload(GFP_KERNEL);
|
||||
spin_lock_irqsave(idr_spinlock, flags);
|
||||
ret = idr_alloc(idr, ptr, SWID_START, SWID_END, GFP_ATOMIC);
|
||||
spin_unlock_irqrestore(idr_spinlock, flags);
|
||||
idr_preload_end();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void *mlx5_fpga_tls_release_swid(struct idr *idr,
|
||||
spinlock_t *idr_spinlock, u32 swid)
|
||||
{
|
||||
unsigned long flags;
|
||||
void *ptr;
|
||||
|
||||
spin_lock_irqsave(idr_spinlock, flags);
|
||||
ptr = idr_remove(idr, swid);
|
||||
spin_unlock_irqrestore(idr_spinlock, flags);
|
||||
return ptr;
|
||||
}
|
||||
|
||||
static void mlx_tls_kfree_complete(struct mlx5_fpga_conn *conn,
|
||||
struct mlx5_fpga_device *fdev,
|
||||
struct mlx5_fpga_dma_buf *buf, u8 status)
|
||||
{
|
||||
kfree(buf);
|
||||
}
|
||||
|
||||
static void
|
||||
mlx5_fpga_tls_teardown_completion(struct mlx5_fpga_conn *conn,
|
||||
struct mlx5_fpga_device *fdev,
|
||||
struct mlx5_fpga_tls_command_context *cmd,
|
||||
struct mlx5_fpga_dma_buf *resp)
|
||||
{
|
||||
if (resp) {
|
||||
u32 syndrome = MLX5_GET(tls_resp, resp->sg[0].data, syndrome);
|
||||
|
||||
if (syndrome)
|
||||
mlx5_fpga_err(fdev,
|
||||
"Teardown stream failed with syndrome = %d",
|
||||
syndrome);
|
||||
}
|
||||
mlx5_fpga_tls_put_command_ctx(cmd);
|
||||
}
|
||||
|
||||
static void mlx5_fpga_tls_flow_to_cmd(void *flow, void *cmd)
|
||||
{
|
||||
memcpy(MLX5_ADDR_OF(tls_cmd, cmd, src_port), flow,
|
||||
MLX5_BYTE_OFF(tls_flow, ipv6));
|
||||
|
||||
MLX5_SET(tls_cmd, cmd, ipv6, MLX5_GET(tls_flow, flow, ipv6));
|
||||
MLX5_SET(tls_cmd, cmd, direction_sx,
|
||||
MLX5_GET(tls_flow, flow, direction_sx));
|
||||
}
|
||||
|
||||
int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, __be32 handle,
|
||||
u32 seq, __be64 rcd_sn)
|
||||
{
|
||||
struct mlx5_fpga_dma_buf *buf;
|
||||
int size = sizeof(*buf) + MLX5_TLS_COMMAND_SIZE;
|
||||
void *flow;
|
||||
void *cmd;
|
||||
int ret;
|
||||
|
||||
buf = kzalloc(size, GFP_ATOMIC);
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
|
||||
cmd = (buf + 1);
|
||||
|
||||
rcu_read_lock();
|
||||
flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle));
|
||||
if (unlikely(!flow)) {
|
||||
rcu_read_unlock();
|
||||
WARN_ONCE(1, "Received NULL pointer for handle\n");
|
||||
kfree(buf);
|
||||
return -EINVAL;
|
||||
}
|
||||
mlx5_fpga_tls_flow_to_cmd(flow, cmd);
|
||||
rcu_read_unlock();
|
||||
|
||||
MLX5_SET(tls_cmd, cmd, swid, ntohl(handle));
|
||||
MLX5_SET64(tls_cmd, cmd, tls_rcd_sn, be64_to_cpu(rcd_sn));
|
||||
MLX5_SET(tls_cmd, cmd, tcp_sn, seq);
|
||||
MLX5_SET(tls_cmd, cmd, command_type, CMD_RESYNC_RX);
|
||||
|
||||
buf->sg[0].data = cmd;
|
||||
buf->sg[0].size = MLX5_TLS_COMMAND_SIZE;
|
||||
buf->complete = mlx_tls_kfree_complete;
|
||||
|
||||
ret = mlx5_fpga_sbu_conn_sendmsg(mdev->fpga->tls->conn, buf);
|
||||
if (ret < 0)
|
||||
kfree(buf);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev,
|
||||
void *flow, u32 swid, gfp_t flags)
|
||||
{
|
||||
struct mlx5_fpga_tls_command_context *ctx;
|
||||
struct mlx5_fpga_dma_buf *buf;
|
||||
void *cmd;
|
||||
|
||||
ctx = kzalloc(sizeof(*ctx) + MLX5_TLS_COMMAND_SIZE, flags);
|
||||
if (!ctx)
|
||||
return;
|
||||
|
||||
buf = &ctx->buf;
|
||||
cmd = (ctx + 1);
|
||||
MLX5_SET(tls_cmd, cmd, command_type, CMD_TEARDOWN_STREAM);
|
||||
MLX5_SET(tls_cmd, cmd, swid, swid);
|
||||
|
||||
mlx5_fpga_tls_flow_to_cmd(flow, cmd);
|
||||
kfree(flow);
|
||||
|
||||
buf->sg[0].data = cmd;
|
||||
buf->sg[0].size = MLX5_TLS_COMMAND_SIZE;
|
||||
|
||||
mlx5_fpga_tls_cmd_send(mdev->fpga, ctx,
|
||||
mlx5_fpga_tls_teardown_completion);
|
||||
}
|
||||
|
||||
void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
|
||||
gfp_t flags, bool direction_sx)
|
||||
{
|
||||
struct mlx5_fpga_tls *tls = mdev->fpga->tls;
|
||||
void *flow;
|
||||
|
||||
if (direction_sx)
|
||||
flow = mlx5_fpga_tls_release_swid(&tls->tx_idr,
|
||||
&tls->tx_idr_spinlock,
|
||||
swid);
|
||||
else
|
||||
flow = mlx5_fpga_tls_release_swid(&tls->rx_idr,
|
||||
&tls->rx_idr_spinlock,
|
||||
swid);
|
||||
|
||||
if (!flow) {
|
||||
mlx5_fpga_err(mdev->fpga, "No flow information for swid %u\n",
|
||||
swid);
|
||||
return;
|
||||
}
|
||||
|
||||
synchronize_rcu(); /* before kfree(flow) */
|
||||
mlx5_fpga_tls_send_teardown_cmd(mdev, flow, swid, flags);
|
||||
}
|
||||
|
||||
enum mlx5_fpga_setup_stream_status {
|
||||
MLX5_FPGA_CMD_PENDING,
|
||||
MLX5_FPGA_CMD_SEND_FAILED,
|
||||
MLX5_FPGA_CMD_RESPONSE_RECEIVED,
|
||||
MLX5_FPGA_CMD_ABANDONED,
|
||||
};
|
||||
|
||||
struct mlx5_setup_stream_context {
|
||||
struct mlx5_fpga_tls_command_context cmd;
|
||||
atomic_t status;
|
||||
u32 syndrome;
|
||||
struct completion comp;
|
||||
};
|
||||
|
||||
static void
|
||||
mlx5_fpga_tls_setup_completion(struct mlx5_fpga_conn *conn,
|
||||
struct mlx5_fpga_device *fdev,
|
||||
struct mlx5_fpga_tls_command_context *cmd,
|
||||
struct mlx5_fpga_dma_buf *resp)
|
||||
{
|
||||
struct mlx5_setup_stream_context *ctx =
|
||||
container_of(cmd, struct mlx5_setup_stream_context, cmd);
|
||||
int status = MLX5_FPGA_CMD_SEND_FAILED;
|
||||
void *tls_cmd = ctx + 1;
|
||||
|
||||
/* If we failed to send to command resp == NULL */
|
||||
if (resp) {
|
||||
ctx->syndrome = MLX5_GET(tls_resp, resp->sg[0].data, syndrome);
|
||||
status = MLX5_FPGA_CMD_RESPONSE_RECEIVED;
|
||||
}
|
||||
|
||||
status = atomic_xchg_release(&ctx->status, status);
|
||||
if (likely(status != MLX5_FPGA_CMD_ABANDONED)) {
|
||||
complete(&ctx->comp);
|
||||
return;
|
||||
}
|
||||
|
||||
mlx5_fpga_err(fdev, "Command was abandoned, syndrome = %u\n",
|
||||
ctx->syndrome);
|
||||
|
||||
if (!ctx->syndrome) {
|
||||
/* The process was killed while waiting for the context to be
|
||||
* added, and the add completed successfully.
|
||||
* We need to destroy the HW context, and we can't can't reuse
|
||||
* the command context because we might not have received
|
||||
* the tx completion yet.
|
||||
*/
|
||||
mlx5_fpga_tls_del_flow(fdev->mdev,
|
||||
MLX5_GET(tls_cmd, tls_cmd, swid),
|
||||
GFP_ATOMIC,
|
||||
MLX5_GET(tls_cmd, tls_cmd,
|
||||
direction_sx));
|
||||
}
|
||||
|
||||
mlx5_fpga_tls_put_command_ctx(cmd);
|
||||
}
|
||||
|
||||
static int mlx5_fpga_tls_setup_stream_cmd(struct mlx5_core_dev *mdev,
|
||||
struct mlx5_setup_stream_context *ctx)
|
||||
{
|
||||
struct mlx5_fpga_dma_buf *buf;
|
||||
void *cmd = ctx + 1;
|
||||
int status, ret = 0;
|
||||
|
||||
buf = &ctx->cmd.buf;
|
||||
buf->sg[0].data = cmd;
|
||||
buf->sg[0].size = MLX5_TLS_COMMAND_SIZE;
|
||||
MLX5_SET(tls_cmd, cmd, command_type, CMD_SETUP_STREAM);
|
||||
|
||||
init_completion(&ctx->comp);
|
||||
atomic_set(&ctx->status, MLX5_FPGA_CMD_PENDING);
|
||||
ctx->syndrome = -1;
|
||||
|
||||
mlx5_fpga_tls_cmd_send(mdev->fpga, &ctx->cmd,
|
||||
mlx5_fpga_tls_setup_completion);
|
||||
wait_for_completion_killable(&ctx->comp);
|
||||
|
||||
status = atomic_xchg_acquire(&ctx->status, MLX5_FPGA_CMD_ABANDONED);
|
||||
if (unlikely(status == MLX5_FPGA_CMD_PENDING))
|
||||
/* ctx is going to be released in mlx5_fpga_tls_setup_completion */
|
||||
return -EINTR;
|
||||
|
||||
if (unlikely(ctx->syndrome))
|
||||
ret = -ENOMEM;
|
||||
|
||||
mlx5_fpga_tls_put_command_ctx(&ctx->cmd);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void mlx5_fpga_tls_hw_qp_recv_cb(void *cb_arg,
|
||||
struct mlx5_fpga_dma_buf *buf)
|
||||
{
|
||||
struct mlx5_fpga_device *fdev = (struct mlx5_fpga_device *)cb_arg;
|
||||
|
||||
mlx5_fpga_tls_cmd_complete(fdev, buf);
|
||||
}
|
||||
|
||||
bool mlx5_fpga_is_tls_device(struct mlx5_core_dev *mdev)
|
||||
{
|
||||
if (!mdev->fpga || !MLX5_CAP_GEN(mdev, fpga))
|
||||
return false;
|
||||
|
||||
if (MLX5_CAP_FPGA(mdev, ieee_vendor_id) !=
|
||||
MLX5_FPGA_CAP_SANDBOX_VENDOR_ID_MLNX)
|
||||
return false;
|
||||
|
||||
if (MLX5_CAP_FPGA(mdev, sandbox_product_id) !=
|
||||
MLX5_FPGA_CAP_SANDBOX_PRODUCT_ID_TLS)
|
||||
return false;
|
||||
|
||||
if (MLX5_CAP_FPGA(mdev, sandbox_product_version) != 0)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static int mlx5_fpga_tls_get_caps(struct mlx5_fpga_device *fdev,
|
||||
u32 *p_caps)
|
||||
{
|
||||
int err, cap_size = MLX5_ST_SZ_BYTES(tls_extended_cap);
|
||||
u32 caps = 0;
|
||||
void *buf;
|
||||
|
||||
buf = kzalloc(cap_size, GFP_KERNEL);
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
|
||||
err = mlx5_fpga_get_sbu_caps(fdev, cap_size, buf);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
if (MLX5_GET(tls_extended_cap, buf, tx))
|
||||
caps |= MLX5_ACCEL_TLS_TX;
|
||||
if (MLX5_GET(tls_extended_cap, buf, rx))
|
||||
caps |= MLX5_ACCEL_TLS_RX;
|
||||
if (MLX5_GET(tls_extended_cap, buf, tls_v12))
|
||||
caps |= MLX5_ACCEL_TLS_V12;
|
||||
if (MLX5_GET(tls_extended_cap, buf, tls_v13))
|
||||
caps |= MLX5_ACCEL_TLS_V13;
|
||||
if (MLX5_GET(tls_extended_cap, buf, lro))
|
||||
caps |= MLX5_ACCEL_TLS_LRO;
|
||||
if (MLX5_GET(tls_extended_cap, buf, ipv6))
|
||||
caps |= MLX5_ACCEL_TLS_IPV6;
|
||||
|
||||
if (MLX5_GET(tls_extended_cap, buf, aes_gcm_128))
|
||||
caps |= MLX5_ACCEL_TLS_AES_GCM128;
|
||||
if (MLX5_GET(tls_extended_cap, buf, aes_gcm_256))
|
||||
caps |= MLX5_ACCEL_TLS_AES_GCM256;
|
||||
|
||||
*p_caps = caps;
|
||||
err = 0;
|
||||
out:
|
||||
kfree(buf);
|
||||
return err;
|
||||
}
|
||||
|
||||
int mlx5_fpga_tls_init(struct mlx5_core_dev *mdev)
|
||||
{
|
||||
struct mlx5_fpga_device *fdev = mdev->fpga;
|
||||
struct mlx5_fpga_conn_attr init_attr = {0};
|
||||
struct mlx5_fpga_conn *conn;
|
||||
struct mlx5_fpga_tls *tls;
|
||||
int err = 0;
|
||||
|
||||
if (!mlx5_fpga_is_tls_device(mdev) || !fdev)
|
||||
return 0;
|
||||
|
||||
tls = kzalloc(sizeof(*tls), GFP_KERNEL);
|
||||
if (!tls)
|
||||
return -ENOMEM;
|
||||
|
||||
err = mlx5_fpga_tls_get_caps(fdev, &tls->caps);
|
||||
if (err)
|
||||
goto error;
|
||||
|
||||
if (!(tls->caps & (MLX5_ACCEL_TLS_V12 | MLX5_ACCEL_TLS_AES_GCM128))) {
|
||||
err = -ENOTSUPP;
|
||||
goto error;
|
||||
}
|
||||
|
||||
init_attr.rx_size = SBU_QP_QUEUE_SIZE;
|
||||
init_attr.tx_size = SBU_QP_QUEUE_SIZE;
|
||||
init_attr.recv_cb = mlx5_fpga_tls_hw_qp_recv_cb;
|
||||
init_attr.cb_arg = fdev;
|
||||
conn = mlx5_fpga_sbu_conn_create(fdev, &init_attr);
|
||||
if (IS_ERR(conn)) {
|
||||
err = PTR_ERR(conn);
|
||||
mlx5_fpga_err(fdev, "Error creating TLS command connection %d\n",
|
||||
err);
|
||||
goto error;
|
||||
}
|
||||
|
||||
tls->conn = conn;
|
||||
spin_lock_init(&tls->pending_cmds_lock);
|
||||
INIT_LIST_HEAD(&tls->pending_cmds);
|
||||
|
||||
idr_init(&tls->tx_idr);
|
||||
idr_init(&tls->rx_idr);
|
||||
spin_lock_init(&tls->tx_idr_spinlock);
|
||||
spin_lock_init(&tls->rx_idr_spinlock);
|
||||
fdev->tls = tls;
|
||||
return 0;
|
||||
|
||||
error:
|
||||
kfree(tls);
|
||||
return err;
|
||||
}
|
||||
|
||||
void mlx5_fpga_tls_cleanup(struct mlx5_core_dev *mdev)
|
||||
{
|
||||
struct mlx5_fpga_device *fdev = mdev->fpga;
|
||||
|
||||
if (!fdev || !fdev->tls)
|
||||
return;
|
||||
|
||||
mlx5_fpga_sbu_conn_destroy(fdev->tls->conn);
|
||||
kfree(fdev->tls);
|
||||
fdev->tls = NULL;
|
||||
}
|
||||
|
||||
static void mlx5_fpga_tls_set_aes_gcm128_ctx(void *cmd,
|
||||
struct tls_crypto_info *info,
|
||||
__be64 *rcd_sn)
|
||||
{
|
||||
struct tls12_crypto_info_aes_gcm_128 *crypto_info =
|
||||
(struct tls12_crypto_info_aes_gcm_128 *)info;
|
||||
|
||||
memcpy(MLX5_ADDR_OF(tls_cmd, cmd, tls_rcd_sn), crypto_info->rec_seq,
|
||||
TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE);
|
||||
|
||||
memcpy(MLX5_ADDR_OF(tls_cmd, cmd, tls_implicit_iv),
|
||||
crypto_info->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
|
||||
memcpy(MLX5_ADDR_OF(tls_cmd, cmd, encryption_key),
|
||||
crypto_info->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE);
|
||||
|
||||
/* in AES-GCM 128 we need to write the key twice */
|
||||
memcpy(MLX5_ADDR_OF(tls_cmd, cmd, encryption_key) +
|
||||
TLS_CIPHER_AES_GCM_128_KEY_SIZE,
|
||||
crypto_info->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE);
|
||||
|
||||
MLX5_SET(tls_cmd, cmd, alg, MLX5_TLS_ALG_AES_GCM_128);
|
||||
}
|
||||
|
||||
static int mlx5_fpga_tls_set_key_material(void *cmd, u32 caps,
|
||||
struct tls_crypto_info *crypto_info)
|
||||
{
|
||||
__be64 rcd_sn;
|
||||
|
||||
switch (crypto_info->cipher_type) {
|
||||
case TLS_CIPHER_AES_GCM_128:
|
||||
if (!(caps & MLX5_ACCEL_TLS_AES_GCM128))
|
||||
return -EINVAL;
|
||||
mlx5_fpga_tls_set_aes_gcm128_ctx(cmd, crypto_info, &rcd_sn);
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int _mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
|
||||
struct tls_crypto_info *crypto_info,
|
||||
u32 swid, u32 tcp_sn)
|
||||
{
|
||||
u32 caps = mlx5_fpga_tls_device_caps(mdev);
|
||||
struct mlx5_setup_stream_context *ctx;
|
||||
int ret = -ENOMEM;
|
||||
size_t cmd_size;
|
||||
void *cmd;
|
||||
|
||||
cmd_size = MLX5_TLS_COMMAND_SIZE + sizeof(*ctx);
|
||||
ctx = kzalloc(cmd_size, GFP_KERNEL);
|
||||
if (!ctx)
|
||||
goto out;
|
||||
|
||||
cmd = ctx + 1;
|
||||
ret = mlx5_fpga_tls_set_key_material(cmd, caps, crypto_info);
|
||||
if (ret)
|
||||
goto free_ctx;
|
||||
|
||||
mlx5_fpga_tls_flow_to_cmd(flow, cmd);
|
||||
|
||||
MLX5_SET(tls_cmd, cmd, swid, swid);
|
||||
MLX5_SET(tls_cmd, cmd, tcp_sn, tcp_sn);
|
||||
|
||||
return mlx5_fpga_tls_setup_stream_cmd(mdev, ctx);
|
||||
|
||||
free_ctx:
|
||||
kfree(ctx);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
|
||||
struct tls_crypto_info *crypto_info,
|
||||
u32 start_offload_tcp_sn, u32 *p_swid,
|
||||
bool direction_sx)
|
||||
{
|
||||
struct mlx5_fpga_tls *tls = mdev->fpga->tls;
|
||||
int ret = -ENOMEM;
|
||||
u32 swid;
|
||||
|
||||
if (direction_sx)
|
||||
ret = mlx5_fpga_tls_alloc_swid(&tls->tx_idr,
|
||||
&tls->tx_idr_spinlock, flow);
|
||||
else
|
||||
ret = mlx5_fpga_tls_alloc_swid(&tls->rx_idr,
|
||||
&tls->rx_idr_spinlock, flow);
|
||||
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
swid = ret;
|
||||
MLX5_SET(tls_flow, flow, direction_sx, direction_sx ? 1 : 0);
|
||||
|
||||
ret = _mlx5_fpga_tls_add_flow(mdev, flow, crypto_info, swid,
|
||||
start_offload_tcp_sn);
|
||||
if (ret && ret != -EINTR)
|
||||
goto free_swid;
|
||||
|
||||
*p_swid = swid;
|
||||
return 0;
|
||||
free_swid:
|
||||
if (direction_sx)
|
||||
mlx5_fpga_tls_release_swid(&tls->tx_idr,
|
||||
&tls->tx_idr_spinlock, swid);
|
||||
else
|
||||
mlx5_fpga_tls_release_swid(&tls->rx_idr,
|
||||
&tls->rx_idr_spinlock, swid);
|
||||
|
||||
return ret;
|
||||
}
|
@ -1,74 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2018 Mellanox Technologies. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __MLX5_FPGA_TLS_H__
|
||||
#define __MLX5_FPGA_TLS_H__
|
||||
|
||||
#include <linux/mlx5/driver.h>
|
||||
|
||||
#include <net/tls.h>
|
||||
#include "fpga/core.h"
|
||||
|
||||
struct mlx5_fpga_tls {
|
||||
struct list_head pending_cmds;
|
||||
spinlock_t pending_cmds_lock; /* Protects pending_cmds */
|
||||
u32 caps;
|
||||
struct mlx5_fpga_conn *conn;
|
||||
|
||||
struct idr tx_idr;
|
||||
struct idr rx_idr;
|
||||
spinlock_t tx_idr_spinlock; /* protects the IDR */
|
||||
spinlock_t rx_idr_spinlock; /* protects the IDR */
|
||||
};
|
||||
|
||||
int mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
|
||||
struct tls_crypto_info *crypto_info,
|
||||
u32 start_offload_tcp_sn, u32 *p_swid,
|
||||
bool direction_sx);
|
||||
|
||||
void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
|
||||
gfp_t flags, bool direction_sx);
|
||||
|
||||
bool mlx5_fpga_is_tls_device(struct mlx5_core_dev *mdev);
|
||||
int mlx5_fpga_tls_init(struct mlx5_core_dev *mdev);
|
||||
void mlx5_fpga_tls_cleanup(struct mlx5_core_dev *mdev);
|
||||
|
||||
static inline u32 mlx5_fpga_tls_device_caps(struct mlx5_core_dev *mdev)
|
||||
{
|
||||
return mdev->fpga->tls->caps;
|
||||
}
|
||||
|
||||
int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, __be32 handle,
|
||||
u32 seq, __be64 rcd_sn);
|
||||
|
||||
#endif /* __MLX5_FPGA_TLS_H__ */
|
@ -64,7 +64,6 @@
|
||||
#include "fpga/core.h"
|
||||
#include "fpga/ipsec.h"
|
||||
#include "accel/ipsec.h"
|
||||
#include "accel/tls.h"
|
||||
#include "lib/clock.h"
|
||||
#include "lib/vxlan.h"
|
||||
#include "lib/geneve.h"
|
||||
@ -1185,12 +1184,6 @@ static int mlx5_load(struct mlx5_core_dev *dev)
|
||||
|
||||
mlx5_accel_ipsec_init(dev);
|
||||
|
||||
err = mlx5_accel_tls_init(dev);
|
||||
if (err) {
|
||||
mlx5_core_err(dev, "TLS device start failed %d\n", err);
|
||||
goto err_tls_start;
|
||||
}
|
||||
|
||||
err = mlx5_init_fs(dev);
|
||||
if (err) {
|
||||
mlx5_core_err(dev, "Failed to init flow steering\n");
|
||||
@ -1238,8 +1231,6 @@ err_vhca:
|
||||
err_set_hca:
|
||||
mlx5_cleanup_fs(dev);
|
||||
err_fs:
|
||||
mlx5_accel_tls_cleanup(dev);
|
||||
err_tls_start:
|
||||
mlx5_accel_ipsec_cleanup(dev);
|
||||
mlx5_fpga_device_stop(dev);
|
||||
err_fpga_start:
|
||||
@ -1267,7 +1258,6 @@ static void mlx5_unload(struct mlx5_core_dev *dev)
|
||||
mlx5_vhca_event_stop(dev);
|
||||
mlx5_cleanup_fs(dev);
|
||||
mlx5_accel_ipsec_cleanup(dev);
|
||||
mlx5_accel_tls_cleanup(dev);
|
||||
mlx5_fpga_device_stop(dev);
|
||||
mlx5_rsc_dump_cleanup(dev);
|
||||
mlx5_hv_vhca_cleanup(dev->hv_vhca);
|
||||
|
@ -54,7 +54,6 @@ enum {
|
||||
|
||||
enum {
|
||||
MLX5_FPGA_CAP_SANDBOX_PRODUCT_ID_IPSEC = 0x2,
|
||||
MLX5_FPGA_CAP_SANDBOX_PRODUCT_ID_TLS = 0x3,
|
||||
};
|
||||
|
||||
struct mlx5_ifc_fpga_shell_caps_bits {
|
||||
@ -387,27 +386,6 @@ struct mlx5_ifc_fpga_destroy_qp_out_bits {
|
||||
u8 reserved_at_40[0x40];
|
||||
};
|
||||
|
||||
struct mlx5_ifc_tls_extended_cap_bits {
|
||||
u8 aes_gcm_128[0x1];
|
||||
u8 aes_gcm_256[0x1];
|
||||
u8 reserved_at_2[0x1e];
|
||||
u8 reserved_at_20[0x20];
|
||||
u8 context_capacity_total[0x20];
|
||||
u8 context_capacity_rx[0x20];
|
||||
u8 context_capacity_tx[0x20];
|
||||
u8 reserved_at_a0[0x10];
|
||||
u8 tls_counter_size[0x10];
|
||||
u8 tls_counters_addr_low[0x20];
|
||||
u8 tls_counters_addr_high[0x20];
|
||||
u8 rx[0x1];
|
||||
u8 tx[0x1];
|
||||
u8 tls_v12[0x1];
|
||||
u8 tls_v13[0x1];
|
||||
u8 lro[0x1];
|
||||
u8 ipv6[0x1];
|
||||
u8 reserved_at_106[0x1a];
|
||||
};
|
||||
|
||||
struct mlx5_ifc_ipsec_extended_cap_bits {
|
||||
u8 encapsulation[0x20];
|
||||
|
||||
@ -572,45 +550,4 @@ struct mlx5_ifc_fpga_ipsec_sa {
|
||||
__be16 vid; /* only 12 bits, rest is reserved */
|
||||
__be16 reserved2;
|
||||
} __packed;
|
||||
|
||||
enum fpga_tls_cmds {
|
||||
CMD_SETUP_STREAM = 0x1001,
|
||||
CMD_TEARDOWN_STREAM = 0x1002,
|
||||
CMD_RESYNC_RX = 0x1003,
|
||||
};
|
||||
|
||||
#define MLX5_TLS_1_2 (0)
|
||||
|
||||
#define MLX5_TLS_ALG_AES_GCM_128 (0)
|
||||
#define MLX5_TLS_ALG_AES_GCM_256 (1)
|
||||
|
||||
struct mlx5_ifc_tls_cmd_bits {
|
||||
u8 command_type[0x20];
|
||||
u8 ipv6[0x1];
|
||||
u8 direction_sx[0x1];
|
||||
u8 tls_version[0x2];
|
||||
u8 reserved[0x1c];
|
||||
u8 swid[0x20];
|
||||
u8 src_port[0x10];
|
||||
u8 dst_port[0x10];
|
||||
union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits src_ipv4_src_ipv6;
|
||||
union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits dst_ipv4_dst_ipv6;
|
||||
u8 tls_rcd_sn[0x40];
|
||||
u8 tcp_sn[0x20];
|
||||
u8 tls_implicit_iv[0x20];
|
||||
u8 tls_xor_iv[0x40];
|
||||
u8 encryption_key[0x100];
|
||||
u8 alg[4];
|
||||
u8 reserved2[0x1c];
|
||||
u8 reserved3[0x4a0];
|
||||
};
|
||||
|
||||
struct mlx5_ifc_tls_resp_bits {
|
||||
u8 syndrome[0x20];
|
||||
u8 stream_id[0x20];
|
||||
u8 reserved[0x40];
|
||||
};
|
||||
|
||||
#define MLX5_TLS_COMMAND_SIZE (0x100)
|
||||
|
||||
#endif /* MLX5_IFC_FPGA_H */
|
||||
|
Loading…
Reference in New Issue
Block a user