net/mlx5e: TLS, add innova rx support

Add the mlx5 implementation of the TLS Rx routines to add/del TLS
contexts, also add the tls_dev_resync_rx routine
to work with the TLS inline Rx crypto offload infrastructure.

Signed-off-by: Boris Pismenny <borisp@mellanox.com>
Signed-off-by: Ilya Lesokhin <ilyal@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Boris Pismenny 2018-07-13 14:33:47 +03:00 committed by David S. Miller
parent ab412e1dd7
commit ca942c78f3
2 changed files with 46 additions and 15 deletions

View file

@ -110,9 +110,7 @@ static int mlx5e_tls_add(struct net_device *netdev, struct sock *sk,
u32 caps = mlx5_accel_tls_device_caps(mdev);
int ret = -ENOMEM;
void *flow;
if (direction != TLS_OFFLOAD_CTX_DIR_TX)
return -EINVAL;
u32 swid;
flow = kzalloc(MLX5_ST_SZ_BYTES(tls_flow), GFP_KERNEL);
if (!flow)
@ -122,18 +120,23 @@ static int mlx5e_tls_add(struct net_device *netdev, struct sock *sk,
if (ret)
goto free_flow;
ret = mlx5_accel_tls_add_flow(mdev, flow, crypto_info,
start_offload_tcp_sn, &swid,
direction == TLS_OFFLOAD_CTX_DIR_TX);
if (ret < 0)
goto free_flow;
if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
struct mlx5e_tls_offload_context_tx *tx_ctx =
mlx5e_get_tls_tx_context(tls_ctx);
u32 swid;
ret = mlx5_accel_tls_add_tx_flow(mdev, flow, crypto_info,
start_offload_tcp_sn, &swid);
if (ret < 0)
goto free_flow;
tx_ctx->swid = htonl(swid);
tx_ctx->expected_seq = start_offload_tcp_sn;
} else {
struct mlx5e_tls_offload_context_rx *rx_ctx =
mlx5e_get_tls_rx_context(tls_ctx);
rx_ctx->handle = htonl(swid);
}
return 0;
@ -147,19 +150,32 @@ static void mlx5e_tls_del(struct net_device *netdev,
enum tls_offload_ctx_dir direction)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
unsigned int handle;
if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
u32 swid = ntohl(mlx5e_get_tls_tx_context(tls_ctx)->swid);
handle = ntohl((direction == TLS_OFFLOAD_CTX_DIR_TX) ?
mlx5e_get_tls_tx_context(tls_ctx)->swid :
mlx5e_get_tls_rx_context(tls_ctx)->handle);
mlx5_accel_tls_del_tx_flow(priv->mdev, swid);
} else {
netdev_err(netdev, "unsupported direction %d\n", direction);
}
mlx5_accel_tls_del_flow(priv->mdev, handle,
direction == TLS_OFFLOAD_CTX_DIR_TX);
}
static void mlx5e_tls_resync_rx(struct net_device *netdev, struct sock *sk,
u32 seq, u64 rcd_sn)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_tls_offload_context_rx *rx_ctx;
rx_ctx = mlx5e_get_tls_rx_context(tls_ctx);
mlx5_accel_tls_resync_rx(priv->mdev, rx_ctx->handle, seq, rcd_sn);
}
static const struct tlsdev_ops mlx5e_tls_ops = {
.tls_dev_add = mlx5e_tls_add,
.tls_dev_del = mlx5e_tls_del,
.tls_dev_resync_rx = mlx5e_tls_resync_rx,
};
void mlx5e_tls_build_netdev(struct mlx5e_priv *priv)

View file

@ -65,6 +65,21 @@ mlx5e_get_tls_tx_context(struct tls_context *tls_ctx)
base);
}
struct mlx5e_tls_offload_context_rx {
struct tls_offload_context_rx base;
__be32 handle;
};
static inline struct mlx5e_tls_offload_context_rx *
mlx5e_get_tls_rx_context(struct tls_context *tls_ctx)
{
BUILD_BUG_ON(sizeof(struct mlx5e_tls_offload_context_rx) >
TLS_OFFLOAD_CONTEXT_SIZE_RX);
return container_of(tls_offload_ctx_rx(tls_ctx),
struct mlx5e_tls_offload_context_rx,
base);
}
void mlx5e_tls_build_netdev(struct mlx5e_priv *priv);
int mlx5e_tls_init(struct mlx5e_priv *priv);
void mlx5e_tls_cleanup(struct mlx5e_priv *priv);