1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /* Copyright (c) 2019 Mellanox Technologies. */
4 #ifndef __MLX5E_KTLS_H__
5 #define __MLX5E_KTLS_H__
9 #ifdef CONFIG_MLX5_EN_TLS
11 #include "accel/tls.h"
13 #define MLX5E_KTLS_STATIC_UMR_WQE_SZ \
14 (offsetof(struct mlx5e_umr_wqe, tls_static_params_ctx) + \
15 MLX5_ST_SZ_BYTES(tls_static_params))
16 #define MLX5E_KTLS_STATIC_WQEBBS \
17 (DIV_ROUND_UP(MLX5E_KTLS_STATIC_UMR_WQE_SZ, MLX5_SEND_WQE_BB))
19 #define MLX5E_KTLS_PROGRESS_WQE_SZ \
20 (offsetof(struct mlx5e_tx_wqe, tls_progress_params_ctx) + \
21 MLX5_ST_SZ_BYTES(tls_progress_params))
22 #define MLX5E_KTLS_PROGRESS_WQEBBS \
23 (DIV_ROUND_UP(MLX5E_KTLS_PROGRESS_WQE_SZ, MLX5_SEND_WQE_BB))
25 struct mlx5e_dump_wqe {
26 struct mlx5_wqe_ctrl_seg ctrl;
27 struct mlx5_wqe_data_seg data;
30 #define MLX5E_KTLS_DUMP_WQEBBS \
31 (DIV_ROUND_UP(sizeof(struct mlx5e_dump_wqe), MLX5_SEND_WQE_BB))
34 MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD = 0,
35 MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_OFFLOAD = 1,
36 MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_AUTHENTICATION = 2,
40 MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_START = 0,
41 MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_SEARCHING = 1,
42 MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_TRACKING = 2,
45 struct mlx5e_ktls_offload_context_tx {
46 struct tls_offload_context_tx *tx_ctx;
47 struct tls_crypto_info *crypto_info;
51 bool ctx_post_pending;
54 struct mlx5e_ktls_offload_context_tx_shadow {
55 struct tls_offload_context_tx tx_ctx;
56 struct mlx5e_ktls_offload_context_tx *priv_tx;
60 mlx5e_set_ktls_tx_priv_ctx(struct tls_context *tls_ctx,
61 struct mlx5e_ktls_offload_context_tx *priv_tx)
63 struct tls_offload_context_tx *tx_ctx = tls_offload_ctx_tx(tls_ctx);
64 struct mlx5e_ktls_offload_context_tx_shadow *shadow;
66 BUILD_BUG_ON(sizeof(*shadow) > TLS_OFFLOAD_CONTEXT_SIZE_TX);
68 shadow = (struct mlx5e_ktls_offload_context_tx_shadow *)tx_ctx;
70 shadow->priv_tx = priv_tx;
71 priv_tx->tx_ctx = tx_ctx;
74 static inline struct mlx5e_ktls_offload_context_tx *
75 mlx5e_get_ktls_tx_priv_ctx(struct tls_context *tls_ctx)
77 struct tls_offload_context_tx *tx_ctx = tls_offload_ctx_tx(tls_ctx);
78 struct mlx5e_ktls_offload_context_tx_shadow *shadow;
80 BUILD_BUG_ON(sizeof(*shadow) > TLS_OFFLOAD_CONTEXT_SIZE_TX);
82 shadow = (struct mlx5e_ktls_offload_context_tx_shadow *)tx_ctx;
84 return shadow->priv_tx;
87 void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv);
88 void mlx5e_ktls_tx_offload_set_pending(struct mlx5e_ktls_offload_context_tx *priv_tx);
90 struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev,
91 struct mlx5e_txqsq *sq,
93 struct mlx5e_tx_wqe **wqe, u16 *pi);
94 void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
95 struct mlx5e_tx_wqe_info *wi,
98 mlx5e_ktls_dumps_num_wqebbs(struct mlx5e_txqsq *sq, unsigned int nfrags,
99 unsigned int sync_len)
101 /* Given the MTU and sync_len, calculates an upper bound for the
102 * number of WQEBBs needed for the TX resync DUMP WQEs of a record.
104 return MLX5E_KTLS_DUMP_WQEBBS *
105 (nfrags + DIV_ROUND_UP(sync_len, sq->hw_mtu));
109 static inline void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv)
114 mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
115 struct mlx5e_tx_wqe_info *wi,
120 #endif /* __MLX5E_TLS_H__ */