]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
334808b1863b94a01a608d078ed28e66f6b3e7ad
[linux.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_accel / ktls_tx.c
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 // Copyright (c) 2019 Mellanox Technologies.
3
4 #include <linux/tls.h>
5 #include "en.h"
6 #include "en/txrx.h"
7 #include "en_accel/ktls.h"
8
9 enum {
10         MLX5E_STATIC_PARAMS_CONTEXT_TLS_1_2 = 0x2,
11 };
12
13 enum {
14         MLX5E_ENCRYPTION_STANDARD_TLS = 0x1,
15 };
16
17 #define EXTRACT_INFO_FIELDS do { \
18         salt    = info->salt;    \
19         rec_seq = info->rec_seq; \
20         salt_sz    = sizeof(info->salt);    \
21         rec_seq_sz = sizeof(info->rec_seq); \
22 } while (0)
23
24 static void
25 fill_static_params_ctx(void *ctx, struct mlx5e_ktls_offload_context_tx *priv_tx)
26 {
27         struct tls_crypto_info *crypto_info = priv_tx->crypto_info;
28         struct tls12_crypto_info_aes_gcm_128 *info;
29         char *initial_rn, *gcm_iv;
30         u16 salt_sz, rec_seq_sz;
31         char *salt, *rec_seq;
32         u8 tls_version;
33
34         if (WARN_ON(crypto_info->cipher_type != TLS_CIPHER_AES_GCM_128))
35                 return;
36
37         info = (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
38         EXTRACT_INFO_FIELDS;
39
40         gcm_iv      = MLX5_ADDR_OF(tls_static_params, ctx, gcm_iv);
41         initial_rn  = MLX5_ADDR_OF(tls_static_params, ctx, initial_record_number);
42
43         memcpy(gcm_iv,      salt,    salt_sz);
44         memcpy(initial_rn,  rec_seq, rec_seq_sz);
45
46         tls_version = MLX5E_STATIC_PARAMS_CONTEXT_TLS_1_2;
47
48         MLX5_SET(tls_static_params, ctx, tls_version, tls_version);
49         MLX5_SET(tls_static_params, ctx, const_1, 1);
50         MLX5_SET(tls_static_params, ctx, const_2, 2);
51         MLX5_SET(tls_static_params, ctx, encryption_standard,
52                  MLX5E_ENCRYPTION_STANDARD_TLS);
53         MLX5_SET(tls_static_params, ctx, dek_index, priv_tx->key_id);
54 }
55
56 static void
57 build_static_params(struct mlx5e_umr_wqe *wqe, u16 pc, u32 sqn,
58                     struct mlx5e_ktls_offload_context_tx *priv_tx,
59                     bool fence)
60 {
61         struct mlx5_wqe_ctrl_seg     *cseg  = &wqe->ctrl;
62         struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl;
63
64 #define STATIC_PARAMS_DS_CNT \
65         DIV_ROUND_UP(MLX5E_KTLS_STATIC_UMR_WQE_SZ, MLX5_SEND_WQE_DS)
66
67         cseg->opmod_idx_opcode = cpu_to_be32((pc << 8) | MLX5_OPCODE_UMR |
68                                              (MLX5_OPC_MOD_TLS_TIS_STATIC_PARAMS << 24));
69         cseg->qpn_ds           = cpu_to_be32((sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
70                                              STATIC_PARAMS_DS_CNT);
71         cseg->fm_ce_se         = fence ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0;
72         cseg->tisn             = cpu_to_be32(priv_tx->tisn << 8);
73
74         ucseg->flags = MLX5_UMR_INLINE;
75         ucseg->bsf_octowords = cpu_to_be16(MLX5_ST_SZ_BYTES(tls_static_params) / 16);
76
77         fill_static_params_ctx(wqe->tls_static_params_ctx, priv_tx);
78 }
79
80 static void
81 fill_progress_params_ctx(void *ctx, struct mlx5e_ktls_offload_context_tx *priv_tx)
82 {
83         MLX5_SET(tls_progress_params, ctx, tisn, priv_tx->tisn);
84         MLX5_SET(tls_progress_params, ctx, record_tracker_state,
85                  MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_START);
86         MLX5_SET(tls_progress_params, ctx, auth_state,
87                  MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD);
88 }
89
90 static void
91 build_progress_params(struct mlx5e_tx_wqe *wqe, u16 pc, u32 sqn,
92                       struct mlx5e_ktls_offload_context_tx *priv_tx,
93                       bool fence)
94 {
95         struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
96
97 #define PROGRESS_PARAMS_DS_CNT \
98         DIV_ROUND_UP(MLX5E_KTLS_PROGRESS_WQE_SZ, MLX5_SEND_WQE_DS)
99
100         cseg->opmod_idx_opcode =
101                 cpu_to_be32((pc << 8) | MLX5_OPCODE_SET_PSV |
102                             (MLX5_OPC_MOD_TLS_TIS_PROGRESS_PARAMS << 24));
103         cseg->qpn_ds           = cpu_to_be32((sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
104                                              PROGRESS_PARAMS_DS_CNT);
105         cseg->fm_ce_se         = fence ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0;
106
107         fill_progress_params_ctx(wqe->tls_progress_params_ctx, priv_tx);
108 }
109
110 static void tx_fill_wi(struct mlx5e_txqsq *sq,
111                        u16 pi, u8 num_wqebbs, u32 num_bytes,
112                        struct page *page)
113 {
114         struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi];
115
116         memset(wi, 0, sizeof(*wi));
117         wi->num_wqebbs = num_wqebbs;
118         wi->num_bytes  = num_bytes;
119         wi->resync_dump_frag_page = page;
120 }
121
122 void mlx5e_ktls_tx_offload_set_pending(struct mlx5e_ktls_offload_context_tx *priv_tx)
123 {
124         priv_tx->ctx_post_pending = true;
125 }
126
127 static bool
128 mlx5e_ktls_tx_offload_test_and_clear_pending(struct mlx5e_ktls_offload_context_tx *priv_tx)
129 {
130         bool ret = priv_tx->ctx_post_pending;
131
132         priv_tx->ctx_post_pending = false;
133
134         return ret;
135 }
136
137 static void
138 post_static_params(struct mlx5e_txqsq *sq,
139                    struct mlx5e_ktls_offload_context_tx *priv_tx,
140                    bool fence)
141 {
142         struct mlx5e_umr_wqe *umr_wqe;
143         u16 pi;
144
145         umr_wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_STATIC_UMR_WQE_SZ, &pi);
146         build_static_params(umr_wqe, sq->pc, sq->sqn, priv_tx, fence);
147         tx_fill_wi(sq, pi, MLX5E_KTLS_STATIC_WQEBBS, 0, NULL);
148         sq->pc += MLX5E_KTLS_STATIC_WQEBBS;
149 }
150
151 static void
152 post_progress_params(struct mlx5e_txqsq *sq,
153                      struct mlx5e_ktls_offload_context_tx *priv_tx,
154                      bool fence)
155 {
156         struct mlx5e_tx_wqe *wqe;
157         u16 pi;
158
159         wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_PROGRESS_WQE_SZ, &pi);
160         build_progress_params(wqe, sq->pc, sq->sqn, priv_tx, fence);
161         tx_fill_wi(sq, pi, MLX5E_KTLS_PROGRESS_WQEBBS, 0, NULL);
162         sq->pc += MLX5E_KTLS_PROGRESS_WQEBBS;
163 }
164
165 static void
166 mlx5e_ktls_tx_post_param_wqes(struct mlx5e_txqsq *sq,
167                               struct mlx5e_ktls_offload_context_tx *priv_tx,
168                               bool skip_static_post, bool fence_first_post)
169 {
170         bool progress_fence = skip_static_post || !fence_first_post;
171
172         if (!skip_static_post)
173                 post_static_params(sq, priv_tx, fence_first_post);
174
175         post_progress_params(sq, priv_tx, progress_fence);
176 }
177
178 struct tx_sync_info {
179         u64 rcd_sn;
180         s32 sync_len;
181         int nr_frags;
182         skb_frag_t frags[MAX_SKB_FRAGS];
183 };
184
185 static bool tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx,
186                              u32 tcp_seq, struct tx_sync_info *info)
187 {
188         struct tls_offload_context_tx *tx_ctx = priv_tx->tx_ctx;
189         struct tls_record_info *record;
190         int remaining, i = 0;
191         unsigned long flags;
192         bool ret = true;
193
194         spin_lock_irqsave(&tx_ctx->lock, flags);
195         record = tls_get_record(tx_ctx, tcp_seq, &info->rcd_sn);
196
197         if (unlikely(!record)) {
198                 ret = false;
199                 goto out;
200         }
201
202         if (unlikely(tcp_seq < tls_record_start_seq(record))) {
203                 if (!tls_record_is_start_marker(record))
204                         ret = false;
205                 goto out;
206         }
207
208         info->sync_len = tcp_seq - tls_record_start_seq(record);
209         remaining = info->sync_len;
210         while (remaining > 0) {
211                 skb_frag_t *frag = &record->frags[i];
212
213                 get_page(skb_frag_page(frag));
214                 remaining -= skb_frag_size(frag);
215                 info->frags[i++] = *frag;
216         }
217         /* reduce the part which will be sent with the original SKB */
218         if (remaining < 0)
219                 skb_frag_size_add(&info->frags[i - 1], remaining);
220         info->nr_frags = i;
221 out:
222         spin_unlock_irqrestore(&tx_ctx->lock, flags);
223         return ret;
224 }
225
226 static void
227 tx_post_resync_params(struct mlx5e_txqsq *sq,
228                       struct mlx5e_ktls_offload_context_tx *priv_tx,
229                       u64 rcd_sn)
230 {
231         struct tls_crypto_info *crypto_info = priv_tx->crypto_info;
232         struct tls12_crypto_info_aes_gcm_128 *info;
233         __be64 rn_be = cpu_to_be64(rcd_sn);
234         bool skip_static_post;
235         u16 rec_seq_sz;
236         char *rec_seq;
237
238         if (WARN_ON(crypto_info->cipher_type != TLS_CIPHER_AES_GCM_128))
239                 return;
240
241         info = (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
242         rec_seq = info->rec_seq;
243         rec_seq_sz = sizeof(info->rec_seq);
244
245         skip_static_post = !memcmp(rec_seq, &rn_be, rec_seq_sz);
246         if (!skip_static_post)
247                 memcpy(rec_seq, &rn_be, rec_seq_sz);
248
249         mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, skip_static_post, true);
250 }
251
252 static int
253 tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn, bool first)
254 {
255         struct mlx5_wqe_ctrl_seg *cseg;
256         struct mlx5_wqe_data_seg *dseg;
257         struct mlx5e_dump_wqe *wqe;
258         dma_addr_t dma_addr = 0;
259         u16 ds_cnt;
260         int fsz;
261         u16 pi;
262
263         wqe = mlx5e_sq_fetch_wqe(sq, sizeof(*wqe), &pi);
264
265         ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
266
267         cseg = &wqe->ctrl;
268         dseg = &wqe->data;
269
270         cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8)  | MLX5_OPCODE_DUMP);
271         cseg->qpn_ds           = cpu_to_be32((sq->sqn << 8) | ds_cnt);
272         cseg->tisn             = cpu_to_be32(tisn << 8);
273         cseg->fm_ce_se         = first ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0;
274
275         fsz = skb_frag_size(frag);
276         dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
277                                     DMA_TO_DEVICE);
278         if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
279                 return -ENOMEM;
280
281         dseg->addr       = cpu_to_be64(dma_addr);
282         dseg->lkey       = sq->mkey_be;
283         dseg->byte_count = cpu_to_be32(fsz);
284         mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
285
286         tx_fill_wi(sq, pi, MLX5E_KTLS_DUMP_WQEBBS, fsz, skb_frag_page(frag));
287         sq->pc += MLX5E_KTLS_DUMP_WQEBBS;
288
289         return 0;
290 }
291
292 void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
293                                            struct mlx5e_tx_wqe_info *wi,
294                                            u32 *dma_fifo_cc)
295 {
296         struct mlx5e_sq_stats *stats;
297         struct mlx5e_sq_dma *dma;
298
299         if (!wi->resync_dump_frag_page)
300                 return;
301
302         dma = mlx5e_dma_get(sq, (*dma_fifo_cc)++);
303         stats = sq->stats;
304
305         mlx5e_tx_dma_unmap(sq->pdev, dma);
306         put_page(wi->resync_dump_frag_page);
307         stats->tls_dump_packets++;
308         stats->tls_dump_bytes += wi->num_bytes;
309 }
310
311 static void tx_post_fence_nop(struct mlx5e_txqsq *sq)
312 {
313         struct mlx5_wq_cyc *wq = &sq->wq;
314         u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
315
316         tx_fill_wi(sq, pi, 1, 0, NULL);
317
318         mlx5e_post_nop_fence(wq, sq->sqn, &sq->pc);
319 }
320
321 static struct sk_buff *
322 mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
323                          struct mlx5e_txqsq *sq,
324                          struct sk_buff *skb,
325                          u32 seq)
326 {
327         struct mlx5e_sq_stats *stats = sq->stats;
328         struct mlx5_wq_cyc *wq = &sq->wq;
329         struct tx_sync_info info = {};
330         u16 contig_wqebbs_room, pi;
331         u8 num_wqebbs;
332         int i;
333
334         if (!tx_sync_info_get(priv_tx, seq, &info)) {
335                 /* We might get here if a retransmission reaches the driver
336                  * after the relevant record is acked.
337                  * It should be safe to drop the packet in this case
338                  */
339                 stats->tls_drop_no_sync_data++;
340                 goto err_out;
341         }
342
343         if (unlikely(info.sync_len < 0)) {
344                 u32 payload;
345                 int headln;
346
347                 headln = skb_transport_offset(skb) + tcp_hdrlen(skb);
348                 payload = skb->len - headln;
349                 if (likely(payload <= -info.sync_len))
350                         return skb;
351
352                 stats->tls_drop_bypass_req++;
353                 goto err_out;
354         }
355
356         stats->tls_ooo++;
357
358         num_wqebbs = MLX5E_KTLS_STATIC_WQEBBS + MLX5E_KTLS_PROGRESS_WQEBBS +
359                 (info.nr_frags ? info.nr_frags * MLX5E_KTLS_DUMP_WQEBBS : 1);
360         pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
361         contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
362         if (unlikely(contig_wqebbs_room < num_wqebbs))
363                 mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
364
365         tx_post_resync_params(sq, priv_tx, info.rcd_sn);
366
367         for (i = 0; i < info.nr_frags; i++)
368                 if (tx_post_resync_dump(sq, &info.frags[i], priv_tx->tisn, !i))
369                         goto err_out;
370
371         /* If no dump WQE was sent, we need to have a fence NOP WQE before the
372          * actual data xmit.
373          */
374         if (!info.nr_frags)
375                 tx_post_fence_nop(sq);
376
377         return skb;
378
379 err_out:
380         dev_kfree_skb_any(skb);
381         return NULL;
382 }
383
384 struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev,
385                                          struct mlx5e_txqsq *sq,
386                                          struct sk_buff *skb,
387                                          struct mlx5e_tx_wqe **wqe, u16 *pi)
388 {
389         struct mlx5e_ktls_offload_context_tx *priv_tx;
390         struct mlx5e_sq_stats *stats = sq->stats;
391         struct mlx5_wqe_ctrl_seg *cseg;
392         struct tls_context *tls_ctx;
393         int datalen;
394         u32 seq;
395
396         if (!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk))
397                 goto out;
398
399         datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
400         if (!datalen)
401                 goto out;
402
403         tls_ctx = tls_get_ctx(skb->sk);
404         if (WARN_ON_ONCE(tls_ctx->netdev != netdev))
405                 goto err_out;
406
407         priv_tx = mlx5e_get_ktls_tx_priv_ctx(tls_ctx);
408
409         if (unlikely(mlx5e_ktls_tx_offload_test_and_clear_pending(priv_tx))) {
410                 mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, false, false);
411                 *wqe = mlx5e_sq_fetch_wqe(sq, sizeof(**wqe), pi);
412                 stats->tls_ctx++;
413         }
414
415         seq = ntohl(tcp_hdr(skb)->seq);
416         if (unlikely(priv_tx->expected_seq != seq)) {
417                 skb = mlx5e_ktls_tx_handle_ooo(priv_tx, sq, skb, seq);
418                 if (unlikely(!skb))
419                         goto out;
420                 *wqe = mlx5e_sq_fetch_wqe(sq, sizeof(**wqe), pi);
421         }
422
423         priv_tx->expected_seq = seq + datalen;
424
425         cseg = &(*wqe)->ctrl;
426         cseg->tisn = cpu_to_be32(priv_tx->tisn << 8);
427
428         stats->tls_encrypted_packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
429         stats->tls_encrypted_bytes   += datalen;
430
431 out:
432         return skb;
433
434 err_out:
435         dev_kfree_skb_any(skb);
436         return NULL;
437 }