]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
x86/irq: Fix outdated comments
[linux.git] / drivers / net / ethernet / mellanox / mlx5 / core / fpga / tls.c
1 /*
2  * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  *
32  */
33
34 #include <linux/mlx5/device.h>
35 #include "fpga/tls.h"
36 #include "fpga/cmd.h"
37 #include "fpga/sdk.h"
38 #include "fpga/core.h"
39 #include "accel/tls.h"
40
41 struct mlx5_fpga_tls_command_context;
42
43 typedef void (*mlx5_fpga_tls_command_complete)
44         (struct mlx5_fpga_conn *conn, struct mlx5_fpga_device *fdev,
45          struct mlx5_fpga_tls_command_context *ctx,
46          struct mlx5_fpga_dma_buf *resp);
47
48 struct mlx5_fpga_tls_command_context {
49         struct list_head list;
50         /* There is no guarantee on the order between the TX completion
51          * and the command response.
52          * The TX completion is going to touch cmd->buf even in
53          * the case of successful transmission.
54          * So instead of requiring separate allocations for cmd
55          * and cmd->buf we've decided to use a reference counter
56          */
57         refcount_t ref;
58         struct mlx5_fpga_dma_buf buf;
59         mlx5_fpga_tls_command_complete complete;
60 };
61
62 static void
63 mlx5_fpga_tls_put_command_ctx(struct mlx5_fpga_tls_command_context *ctx)
64 {
65         if (refcount_dec_and_test(&ctx->ref))
66                 kfree(ctx);
67 }
68
69 static void mlx5_fpga_tls_cmd_complete(struct mlx5_fpga_device *fdev,
70                                        struct mlx5_fpga_dma_buf *resp)
71 {
72         struct mlx5_fpga_conn *conn = fdev->tls->conn;
73         struct mlx5_fpga_tls_command_context *ctx;
74         struct mlx5_fpga_tls *tls = fdev->tls;
75         unsigned long flags;
76
77         spin_lock_irqsave(&tls->pending_cmds_lock, flags);
78         ctx = list_first_entry(&tls->pending_cmds,
79                                struct mlx5_fpga_tls_command_context, list);
80         list_del(&ctx->list);
81         spin_unlock_irqrestore(&tls->pending_cmds_lock, flags);
82         ctx->complete(conn, fdev, ctx, resp);
83 }
84
85 static void mlx5_fpga_cmd_send_complete(struct mlx5_fpga_conn *conn,
86                                         struct mlx5_fpga_device *fdev,
87                                         struct mlx5_fpga_dma_buf *buf,
88                                         u8 status)
89 {
90         struct mlx5_fpga_tls_command_context *ctx =
91             container_of(buf, struct mlx5_fpga_tls_command_context, buf);
92
93         mlx5_fpga_tls_put_command_ctx(ctx);
94
95         if (unlikely(status))
96                 mlx5_fpga_tls_cmd_complete(fdev, NULL);
97 }
98
99 static void mlx5_fpga_tls_cmd_send(struct mlx5_fpga_device *fdev,
100                                    struct mlx5_fpga_tls_command_context *cmd,
101                                    mlx5_fpga_tls_command_complete complete)
102 {
103         struct mlx5_fpga_tls *tls = fdev->tls;
104         unsigned long flags;
105         int ret;
106
107         refcount_set(&cmd->ref, 2);
108         cmd->complete = complete;
109         cmd->buf.complete = mlx5_fpga_cmd_send_complete;
110
111         spin_lock_irqsave(&tls->pending_cmds_lock, flags);
112         /* mlx5_fpga_sbu_conn_sendmsg is called under pending_cmds_lock
113          * to make sure commands are inserted to the tls->pending_cmds list
114          * and the command QP in the same order.
115          */
116         ret = mlx5_fpga_sbu_conn_sendmsg(tls->conn, &cmd->buf);
117         if (likely(!ret))
118                 list_add_tail(&cmd->list, &tls->pending_cmds);
119         else
120                 complete(tls->conn, fdev, cmd, NULL);
121         spin_unlock_irqrestore(&tls->pending_cmds_lock, flags);
122 }
123
124 /* Start of context identifiers range (inclusive) */
125 #define SWID_START      0
126 /* End of context identifiers range (exclusive) */
127 #define SWID_END        BIT(24)
128
129 static int mlx5_fpga_tls_alloc_swid(struct idr *idr, spinlock_t *idr_spinlock,
130                                     void *ptr)
131 {
132         unsigned long flags;
133         int ret;
134
135         /* TLS metadata format is 1 byte for syndrome followed
136          * by 3 bytes of swid (software ID)
137          * swid must not exceed 3 bytes.
138          * See tls_rxtx.c:insert_pet() for details
139          */
140         BUILD_BUG_ON((SWID_END - 1) & 0xFF000000);
141
142         idr_preload(GFP_KERNEL);
143         spin_lock_irqsave(idr_spinlock, flags);
144         ret = idr_alloc(idr, ptr, SWID_START, SWID_END, GFP_ATOMIC);
145         spin_unlock_irqrestore(idr_spinlock, flags);
146         idr_preload_end();
147
148         return ret;
149 }
150
151 static void mlx5_fpga_tls_release_swid(struct idr *idr,
152                                        spinlock_t *idr_spinlock, u32 swid)
153 {
154         unsigned long flags;
155
156         spin_lock_irqsave(idr_spinlock, flags);
157         idr_remove(idr, swid);
158         spin_unlock_irqrestore(idr_spinlock, flags);
159 }
160
161 static void mlx_tls_kfree_complete(struct mlx5_fpga_conn *conn,
162                                    struct mlx5_fpga_device *fdev,
163                                    struct mlx5_fpga_dma_buf *buf, u8 status)
164 {
165         kfree(buf);
166 }
167
168 struct mlx5_teardown_stream_context {
169         struct mlx5_fpga_tls_command_context cmd;
170         u32 swid;
171 };
172
173 static void
174 mlx5_fpga_tls_teardown_completion(struct mlx5_fpga_conn *conn,
175                                   struct mlx5_fpga_device *fdev,
176                                   struct mlx5_fpga_tls_command_context *cmd,
177                                   struct mlx5_fpga_dma_buf *resp)
178 {
179         struct mlx5_teardown_stream_context *ctx =
180                     container_of(cmd, struct mlx5_teardown_stream_context, cmd);
181
182         if (resp) {
183                 u32 syndrome = MLX5_GET(tls_resp, resp->sg[0].data, syndrome);
184
185                 if (syndrome)
186                         mlx5_fpga_err(fdev,
187                                       "Teardown stream failed with syndrome = %d",
188                                       syndrome);
189                 else if (MLX5_GET(tls_cmd, cmd->buf.sg[0].data, direction_sx))
190                         mlx5_fpga_tls_release_swid(&fdev->tls->tx_idr,
191                                                    &fdev->tls->tx_idr_spinlock,
192                                                    ctx->swid);
193                 else
194                         mlx5_fpga_tls_release_swid(&fdev->tls->rx_idr,
195                                                    &fdev->tls->rx_idr_spinlock,
196                                                    ctx->swid);
197         }
198         mlx5_fpga_tls_put_command_ctx(cmd);
199 }
200
201 static void mlx5_fpga_tls_flow_to_cmd(void *flow, void *cmd)
202 {
203         memcpy(MLX5_ADDR_OF(tls_cmd, cmd, src_port), flow,
204                MLX5_BYTE_OFF(tls_flow, ipv6));
205
206         MLX5_SET(tls_cmd, cmd, ipv6, MLX5_GET(tls_flow, flow, ipv6));
207         MLX5_SET(tls_cmd, cmd, direction_sx,
208                  MLX5_GET(tls_flow, flow, direction_sx));
209 }
210
211 int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
212                             u64 rcd_sn)
213 {
214         struct mlx5_fpga_dma_buf *buf;
215         int size = sizeof(*buf) + MLX5_TLS_COMMAND_SIZE;
216         void *flow;
217         void *cmd;
218         int ret;
219
220         rcu_read_lock();
221         flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle));
222         rcu_read_unlock();
223
224         if (!flow) {
225                 WARN_ONCE(1, "Received NULL pointer for handle\n");
226                 return -EINVAL;
227         }
228
229         buf = kzalloc(size, GFP_ATOMIC);
230         if (!buf)
231                 return -ENOMEM;
232
233         cmd = (buf + 1);
234
235         mlx5_fpga_tls_flow_to_cmd(flow, cmd);
236
237         MLX5_SET(tls_cmd, cmd, swid, ntohl(handle));
238         MLX5_SET64(tls_cmd, cmd, tls_rcd_sn, be64_to_cpu(rcd_sn));
239         MLX5_SET(tls_cmd, cmd, tcp_sn, seq);
240         MLX5_SET(tls_cmd, cmd, command_type, CMD_RESYNC_RX);
241
242         buf->sg[0].data = cmd;
243         buf->sg[0].size = MLX5_TLS_COMMAND_SIZE;
244         buf->complete = mlx_tls_kfree_complete;
245
246         ret = mlx5_fpga_sbu_conn_sendmsg(mdev->fpga->tls->conn, buf);
247         if (ret < 0)
248                 kfree(buf);
249
250         return ret;
251 }
252
253 static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev,
254                                             void *flow, u32 swid, gfp_t flags)
255 {
256         struct mlx5_teardown_stream_context *ctx;
257         struct mlx5_fpga_dma_buf *buf;
258         void *cmd;
259
260         ctx = kzalloc(sizeof(*ctx) + MLX5_TLS_COMMAND_SIZE, flags);
261         if (!ctx)
262                 return;
263
264         buf = &ctx->cmd.buf;
265         cmd = (ctx + 1);
266         MLX5_SET(tls_cmd, cmd, command_type, CMD_TEARDOWN_STREAM);
267         MLX5_SET(tls_cmd, cmd, swid, swid);
268
269         mlx5_fpga_tls_flow_to_cmd(flow, cmd);
270         kfree(flow);
271
272         buf->sg[0].data = cmd;
273         buf->sg[0].size = MLX5_TLS_COMMAND_SIZE;
274
275         ctx->swid = swid;
276         mlx5_fpga_tls_cmd_send(mdev->fpga, &ctx->cmd,
277                                mlx5_fpga_tls_teardown_completion);
278 }
279
280 void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
281                             gfp_t flags, bool direction_sx)
282 {
283         struct mlx5_fpga_tls *tls = mdev->fpga->tls;
284         void *flow;
285
286         rcu_read_lock();
287         if (direction_sx)
288                 flow = idr_find(&tls->tx_idr, swid);
289         else
290                 flow = idr_find(&tls->rx_idr, swid);
291
292         rcu_read_unlock();
293
294         if (!flow) {
295                 mlx5_fpga_err(mdev->fpga, "No flow information for swid %u\n",
296                               swid);
297                 return;
298         }
299
300         mlx5_fpga_tls_send_teardown_cmd(mdev, flow, swid, flags);
301 }
302
303 enum mlx5_fpga_setup_stream_status {
304         MLX5_FPGA_CMD_PENDING,
305         MLX5_FPGA_CMD_SEND_FAILED,
306         MLX5_FPGA_CMD_RESPONSE_RECEIVED,
307         MLX5_FPGA_CMD_ABANDONED,
308 };
309
310 struct mlx5_setup_stream_context {
311         struct mlx5_fpga_tls_command_context cmd;
312         atomic_t status;
313         u32 syndrome;
314         struct completion comp;
315 };
316
317 static void
318 mlx5_fpga_tls_setup_completion(struct mlx5_fpga_conn *conn,
319                                struct mlx5_fpga_device *fdev,
320                                struct mlx5_fpga_tls_command_context *cmd,
321                                struct mlx5_fpga_dma_buf *resp)
322 {
323         struct mlx5_setup_stream_context *ctx =
324             container_of(cmd, struct mlx5_setup_stream_context, cmd);
325         int status = MLX5_FPGA_CMD_SEND_FAILED;
326         void *tls_cmd = ctx + 1;
327
328         /* If we failed to send to command resp == NULL */
329         if (resp) {
330                 ctx->syndrome = MLX5_GET(tls_resp, resp->sg[0].data, syndrome);
331                 status = MLX5_FPGA_CMD_RESPONSE_RECEIVED;
332         }
333
334         status = atomic_xchg_release(&ctx->status, status);
335         if (likely(status != MLX5_FPGA_CMD_ABANDONED)) {
336                 complete(&ctx->comp);
337                 return;
338         }
339
340         mlx5_fpga_err(fdev, "Command was abandoned, syndrome = %u\n",
341                       ctx->syndrome);
342
343         if (!ctx->syndrome) {
344                 /* The process was killed while waiting for the context to be
345                  * added, and the add completed successfully.
346                  * We need to destroy the HW context, and we can't can't reuse
347                  * the command context because we might not have received
348                  * the tx completion yet.
349                  */
350                 mlx5_fpga_tls_del_flow(fdev->mdev,
351                                        MLX5_GET(tls_cmd, tls_cmd, swid),
352                                        GFP_ATOMIC,
353                                        MLX5_GET(tls_cmd, tls_cmd,
354                                                 direction_sx));
355         }
356
357         mlx5_fpga_tls_put_command_ctx(cmd);
358 }
359
360 static int mlx5_fpga_tls_setup_stream_cmd(struct mlx5_core_dev *mdev,
361                                           struct mlx5_setup_stream_context *ctx)
362 {
363         struct mlx5_fpga_dma_buf *buf;
364         void *cmd = ctx + 1;
365         int status, ret = 0;
366
367         buf = &ctx->cmd.buf;
368         buf->sg[0].data = cmd;
369         buf->sg[0].size = MLX5_TLS_COMMAND_SIZE;
370         MLX5_SET(tls_cmd, cmd, command_type, CMD_SETUP_STREAM);
371
372         init_completion(&ctx->comp);
373         atomic_set(&ctx->status, MLX5_FPGA_CMD_PENDING);
374         ctx->syndrome = -1;
375
376         mlx5_fpga_tls_cmd_send(mdev->fpga, &ctx->cmd,
377                                mlx5_fpga_tls_setup_completion);
378         wait_for_completion_killable(&ctx->comp);
379
380         status = atomic_xchg_acquire(&ctx->status, MLX5_FPGA_CMD_ABANDONED);
381         if (unlikely(status == MLX5_FPGA_CMD_PENDING))
382         /* ctx is going to be released in mlx5_fpga_tls_setup_completion */
383                 return -EINTR;
384
385         if (unlikely(ctx->syndrome))
386                 ret = -ENOMEM;
387
388         mlx5_fpga_tls_put_command_ctx(&ctx->cmd);
389         return ret;
390 }
391
392 static void mlx5_fpga_tls_hw_qp_recv_cb(void *cb_arg,
393                                         struct mlx5_fpga_dma_buf *buf)
394 {
395         struct mlx5_fpga_device *fdev = (struct mlx5_fpga_device *)cb_arg;
396
397         mlx5_fpga_tls_cmd_complete(fdev, buf);
398 }
399
400 bool mlx5_fpga_is_tls_device(struct mlx5_core_dev *mdev)
401 {
402         if (!mdev->fpga || !MLX5_CAP_GEN(mdev, fpga))
403                 return false;
404
405         if (MLX5_CAP_FPGA(mdev, ieee_vendor_id) !=
406             MLX5_FPGA_CAP_SANDBOX_VENDOR_ID_MLNX)
407                 return false;
408
409         if (MLX5_CAP_FPGA(mdev, sandbox_product_id) !=
410             MLX5_FPGA_CAP_SANDBOX_PRODUCT_ID_TLS)
411                 return false;
412
413         if (MLX5_CAP_FPGA(mdev, sandbox_product_version) != 0)
414                 return false;
415
416         return true;
417 }
418
419 static int mlx5_fpga_tls_get_caps(struct mlx5_fpga_device *fdev,
420                                   u32 *p_caps)
421 {
422         int err, cap_size = MLX5_ST_SZ_BYTES(tls_extended_cap);
423         u32 caps = 0;
424         void *buf;
425
426         buf = kzalloc(cap_size, GFP_KERNEL);
427         if (!buf)
428                 return -ENOMEM;
429
430         err = mlx5_fpga_get_sbu_caps(fdev, cap_size, buf);
431         if (err)
432                 goto out;
433
434         if (MLX5_GET(tls_extended_cap, buf, tx))
435                 caps |= MLX5_ACCEL_TLS_TX;
436         if (MLX5_GET(tls_extended_cap, buf, rx))
437                 caps |= MLX5_ACCEL_TLS_RX;
438         if (MLX5_GET(tls_extended_cap, buf, tls_v12))
439                 caps |= MLX5_ACCEL_TLS_V12;
440         if (MLX5_GET(tls_extended_cap, buf, tls_v13))
441                 caps |= MLX5_ACCEL_TLS_V13;
442         if (MLX5_GET(tls_extended_cap, buf, lro))
443                 caps |= MLX5_ACCEL_TLS_LRO;
444         if (MLX5_GET(tls_extended_cap, buf, ipv6))
445                 caps |= MLX5_ACCEL_TLS_IPV6;
446
447         if (MLX5_GET(tls_extended_cap, buf, aes_gcm_128))
448                 caps |= MLX5_ACCEL_TLS_AES_GCM128;
449         if (MLX5_GET(tls_extended_cap, buf, aes_gcm_256))
450                 caps |= MLX5_ACCEL_TLS_AES_GCM256;
451
452         *p_caps = caps;
453         err = 0;
454 out:
455         kfree(buf);
456         return err;
457 }
458
459 int mlx5_fpga_tls_init(struct mlx5_core_dev *mdev)
460 {
461         struct mlx5_fpga_device *fdev = mdev->fpga;
462         struct mlx5_fpga_conn_attr init_attr = {0};
463         struct mlx5_fpga_conn *conn;
464         struct mlx5_fpga_tls *tls;
465         int err = 0;
466
467         if (!mlx5_fpga_is_tls_device(mdev) || !fdev)
468                 return 0;
469
470         tls = kzalloc(sizeof(*tls), GFP_KERNEL);
471         if (!tls)
472                 return -ENOMEM;
473
474         err = mlx5_fpga_tls_get_caps(fdev, &tls->caps);
475         if (err)
476                 goto error;
477
478         if (!(tls->caps & (MLX5_ACCEL_TLS_V12 | MLX5_ACCEL_TLS_AES_GCM128))) {
479                 err = -ENOTSUPP;
480                 goto error;
481         }
482
483         init_attr.rx_size = SBU_QP_QUEUE_SIZE;
484         init_attr.tx_size = SBU_QP_QUEUE_SIZE;
485         init_attr.recv_cb = mlx5_fpga_tls_hw_qp_recv_cb;
486         init_attr.cb_arg = fdev;
487         conn = mlx5_fpga_sbu_conn_create(fdev, &init_attr);
488         if (IS_ERR(conn)) {
489                 err = PTR_ERR(conn);
490                 mlx5_fpga_err(fdev, "Error creating TLS command connection %d\n",
491                               err);
492                 goto error;
493         }
494
495         tls->conn = conn;
496         spin_lock_init(&tls->pending_cmds_lock);
497         INIT_LIST_HEAD(&tls->pending_cmds);
498
499         idr_init(&tls->tx_idr);
500         idr_init(&tls->rx_idr);
501         spin_lock_init(&tls->tx_idr_spinlock);
502         spin_lock_init(&tls->rx_idr_spinlock);
503         fdev->tls = tls;
504         return 0;
505
506 error:
507         kfree(tls);
508         return err;
509 }
510
511 void mlx5_fpga_tls_cleanup(struct mlx5_core_dev *mdev)
512 {
513         struct mlx5_fpga_device *fdev = mdev->fpga;
514
515         if (!fdev || !fdev->tls)
516                 return;
517
518         mlx5_fpga_sbu_conn_destroy(fdev->tls->conn);
519         kfree(fdev->tls);
520         fdev->tls = NULL;
521 }
522
523 static void mlx5_fpga_tls_set_aes_gcm128_ctx(void *cmd,
524                                              struct tls_crypto_info *info,
525                                              __be64 *rcd_sn)
526 {
527         struct tls12_crypto_info_aes_gcm_128 *crypto_info =
528             (struct tls12_crypto_info_aes_gcm_128 *)info;
529
530         memcpy(MLX5_ADDR_OF(tls_cmd, cmd, tls_rcd_sn), crypto_info->rec_seq,
531                TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE);
532
533         memcpy(MLX5_ADDR_OF(tls_cmd, cmd, tls_implicit_iv),
534                crypto_info->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
535         memcpy(MLX5_ADDR_OF(tls_cmd, cmd, encryption_key),
536                crypto_info->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE);
537
538         /* in AES-GCM 128 we need to write the key twice */
539         memcpy(MLX5_ADDR_OF(tls_cmd, cmd, encryption_key) +
540                    TLS_CIPHER_AES_GCM_128_KEY_SIZE,
541                crypto_info->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE);
542
543         MLX5_SET(tls_cmd, cmd, alg, MLX5_TLS_ALG_AES_GCM_128);
544 }
545
546 static int mlx5_fpga_tls_set_key_material(void *cmd, u32 caps,
547                                           struct tls_crypto_info *crypto_info)
548 {
549         __be64 rcd_sn;
550
551         switch (crypto_info->cipher_type) {
552         case TLS_CIPHER_AES_GCM_128:
553                 if (!(caps & MLX5_ACCEL_TLS_AES_GCM128))
554                         return -EINVAL;
555                 mlx5_fpga_tls_set_aes_gcm128_ctx(cmd, crypto_info, &rcd_sn);
556                 break;
557         default:
558                 return -EINVAL;
559         }
560
561         return 0;
562 }
563
564 static int _mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
565                                    struct tls_crypto_info *crypto_info,
566                                    u32 swid, u32 tcp_sn)
567 {
568         u32 caps = mlx5_fpga_tls_device_caps(mdev);
569         struct mlx5_setup_stream_context *ctx;
570         int ret = -ENOMEM;
571         size_t cmd_size;
572         void *cmd;
573
574         cmd_size = MLX5_TLS_COMMAND_SIZE + sizeof(*ctx);
575         ctx = kzalloc(cmd_size, GFP_KERNEL);
576         if (!ctx)
577                 goto out;
578
579         cmd = ctx + 1;
580         ret = mlx5_fpga_tls_set_key_material(cmd, caps, crypto_info);
581         if (ret)
582                 goto free_ctx;
583
584         mlx5_fpga_tls_flow_to_cmd(flow, cmd);
585
586         MLX5_SET(tls_cmd, cmd, swid, swid);
587         MLX5_SET(tls_cmd, cmd, tcp_sn, tcp_sn);
588
589         return mlx5_fpga_tls_setup_stream_cmd(mdev, ctx);
590
591 free_ctx:
592         kfree(ctx);
593 out:
594         return ret;
595 }
596
597 int mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
598                            struct tls_crypto_info *crypto_info,
599                            u32 start_offload_tcp_sn, u32 *p_swid,
600                            bool direction_sx)
601 {
602         struct mlx5_fpga_tls *tls = mdev->fpga->tls;
603         int ret = -ENOMEM;
604         u32 swid;
605
606         if (direction_sx)
607                 ret = mlx5_fpga_tls_alloc_swid(&tls->tx_idr,
608                                                &tls->tx_idr_spinlock, flow);
609         else
610                 ret = mlx5_fpga_tls_alloc_swid(&tls->rx_idr,
611                                                &tls->rx_idr_spinlock, flow);
612
613         if (ret < 0)
614                 return ret;
615
616         swid = ret;
617         MLX5_SET(tls_flow, flow, direction_sx, direction_sx ? 1 : 0);
618
619         ret = _mlx5_fpga_tls_add_flow(mdev, flow, crypto_info, swid,
620                                       start_offload_tcp_sn);
621         if (ret && ret != -EINTR)
622                 goto free_swid;
623
624         *p_swid = swid;
625         return 0;
626 free_swid:
627         if (direction_sx)
628                 mlx5_fpga_tls_release_swid(&tls->tx_idr,
629                                            &tls->tx_idr_spinlock, swid);
630         else
631                 mlx5_fpga_tls_release_swid(&tls->rx_idr,
632                                            &tls->rx_idr_spinlock, swid);
633
634         return ret;
635 }