]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/nvme/host/tcp.c
088dac0d97c46129e454b66d0379686a17940f10
[linux.git] / drivers / nvme / host / tcp.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * NVMe over Fabrics TCP host.
4  * Copyright (c) 2018 Lightbits Labs. All rights reserved.
5  */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/module.h>
8 #include <linux/init.h>
9 #include <linux/slab.h>
10 #include <linux/err.h>
11 #include <linux/nvme-tcp.h>
12 #include <net/sock.h>
13 #include <net/tcp.h>
14 #include <linux/blk-mq.h>
15 #include <crypto/hash.h>
16
17 #include "nvme.h"
18 #include "fabrics.h"
19
20 struct nvme_tcp_queue;
21
22 enum nvme_tcp_send_state {
23         NVME_TCP_SEND_CMD_PDU = 0,
24         NVME_TCP_SEND_H2C_PDU,
25         NVME_TCP_SEND_DATA,
26         NVME_TCP_SEND_DDGST,
27 };
28
29 struct nvme_tcp_request {
30         struct nvme_request     req;
31         void                    *pdu;
32         struct nvme_tcp_queue   *queue;
33         u32                     data_len;
34         u32                     pdu_len;
35         u32                     pdu_sent;
36         u16                     ttag;
37         struct list_head        entry;
38         __le32                  ddgst;
39
40         struct bio              *curr_bio;
41         struct iov_iter         iter;
42
43         /* send state */
44         size_t                  offset;
45         size_t                  data_sent;
46         enum nvme_tcp_send_state state;
47 };
48
49 enum nvme_tcp_queue_flags {
50         NVME_TCP_Q_ALLOCATED    = 0,
51         NVME_TCP_Q_LIVE         = 1,
52 };
53
54 enum nvme_tcp_recv_state {
55         NVME_TCP_RECV_PDU = 0,
56         NVME_TCP_RECV_DATA,
57         NVME_TCP_RECV_DDGST,
58 };
59
60 struct nvme_tcp_ctrl;
61 struct nvme_tcp_queue {
62         struct socket           *sock;
63         struct work_struct      io_work;
64         int                     io_cpu;
65
66         spinlock_t              lock;
67         struct list_head        send_list;
68
69         /* recv state */
70         void                    *pdu;
71         int                     pdu_remaining;
72         int                     pdu_offset;
73         size_t                  data_remaining;
74         size_t                  ddgst_remaining;
75
76         /* send state */
77         struct nvme_tcp_request *request;
78
79         int                     queue_size;
80         size_t                  cmnd_capsule_len;
81         struct nvme_tcp_ctrl    *ctrl;
82         unsigned long           flags;
83         bool                    rd_enabled;
84
85         bool                    hdr_digest;
86         bool                    data_digest;
87         struct ahash_request    *rcv_hash;
88         struct ahash_request    *snd_hash;
89         __le32                  exp_ddgst;
90         __le32                  recv_ddgst;
91
92         struct page_frag_cache  pf_cache;
93
94         void (*state_change)(struct sock *);
95         void (*data_ready)(struct sock *);
96         void (*write_space)(struct sock *);
97 };
98
99 struct nvme_tcp_ctrl {
100         /* read only in the hot path */
101         struct nvme_tcp_queue   *queues;
102         struct blk_mq_tag_set   tag_set;
103
104         /* other member variables */
105         struct list_head        list;
106         struct blk_mq_tag_set   admin_tag_set;
107         struct sockaddr_storage addr;
108         struct sockaddr_storage src_addr;
109         struct nvme_ctrl        ctrl;
110
111         struct work_struct      err_work;
112         struct delayed_work     connect_work;
113         struct nvme_tcp_request async_req;
114         u32                     io_queues[HCTX_MAX_TYPES];
115 };
116
117 static LIST_HEAD(nvme_tcp_ctrl_list);
118 static DEFINE_MUTEX(nvme_tcp_ctrl_mutex);
119 static struct workqueue_struct *nvme_tcp_wq;
120 static struct blk_mq_ops nvme_tcp_mq_ops;
121 static struct blk_mq_ops nvme_tcp_admin_mq_ops;
122
123 static inline struct nvme_tcp_ctrl *to_tcp_ctrl(struct nvme_ctrl *ctrl)
124 {
125         return container_of(ctrl, struct nvme_tcp_ctrl, ctrl);
126 }
127
128 static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue)
129 {
130         return queue - queue->ctrl->queues;
131 }
132
133 static inline struct blk_mq_tags *nvme_tcp_tagset(struct nvme_tcp_queue *queue)
134 {
135         u32 queue_idx = nvme_tcp_queue_id(queue);
136
137         if (queue_idx == 0)
138                 return queue->ctrl->admin_tag_set.tags[queue_idx];
139         return queue->ctrl->tag_set.tags[queue_idx - 1];
140 }
141
142 static inline u8 nvme_tcp_hdgst_len(struct nvme_tcp_queue *queue)
143 {
144         return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
145 }
146
147 static inline u8 nvme_tcp_ddgst_len(struct nvme_tcp_queue *queue)
148 {
149         return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
150 }
151
152 static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_queue *queue)
153 {
154         return queue->cmnd_capsule_len - sizeof(struct nvme_command);
155 }
156
157 static inline bool nvme_tcp_async_req(struct nvme_tcp_request *req)
158 {
159         return req == &req->queue->ctrl->async_req;
160 }
161
162 static inline bool nvme_tcp_has_inline_data(struct nvme_tcp_request *req)
163 {
164         struct request *rq;
165         unsigned int bytes;
166
167         if (unlikely(nvme_tcp_async_req(req)))
168                 return false; /* async events don't have a request */
169
170         rq = blk_mq_rq_from_pdu(req);
171         bytes = blk_rq_payload_bytes(rq);
172
173         return rq_data_dir(rq) == WRITE && bytes &&
174                 bytes <= nvme_tcp_inline_data_size(req->queue);
175 }
176
177 static inline struct page *nvme_tcp_req_cur_page(struct nvme_tcp_request *req)
178 {
179         return req->iter.bvec->bv_page;
180 }
181
182 static inline size_t nvme_tcp_req_cur_offset(struct nvme_tcp_request *req)
183 {
184         return req->iter.bvec->bv_offset + req->iter.iov_offset;
185 }
186
187 static inline size_t nvme_tcp_req_cur_length(struct nvme_tcp_request *req)
188 {
189         return min_t(size_t, req->iter.bvec->bv_len - req->iter.iov_offset,
190                         req->pdu_len - req->pdu_sent);
191 }
192
193 static inline size_t nvme_tcp_req_offset(struct nvme_tcp_request *req)
194 {
195         return req->iter.iov_offset;
196 }
197
198 static inline size_t nvme_tcp_pdu_data_left(struct nvme_tcp_request *req)
199 {
200         return rq_data_dir(blk_mq_rq_from_pdu(req)) == WRITE ?
201                         req->pdu_len - req->pdu_sent : 0;
202 }
203
204 static inline size_t nvme_tcp_pdu_last_send(struct nvme_tcp_request *req,
205                 int len)
206 {
207         return nvme_tcp_pdu_data_left(req) <= len;
208 }
209
210 static void nvme_tcp_init_iter(struct nvme_tcp_request *req,
211                 unsigned int dir)
212 {
213         struct request *rq = blk_mq_rq_from_pdu(req);
214         struct bio_vec *vec;
215         unsigned int size;
216         int nsegs;
217         size_t offset;
218
219         if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) {
220                 vec = &rq->special_vec;
221                 nsegs = 1;
222                 size = blk_rq_payload_bytes(rq);
223                 offset = 0;
224         } else {
225                 struct bio *bio = req->curr_bio;
226
227                 vec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
228                 nsegs = bio_segments(bio);
229                 size = bio->bi_iter.bi_size;
230                 offset = bio->bi_iter.bi_bvec_done;
231         }
232
233         iov_iter_bvec(&req->iter, dir, vec, nsegs, size);
234         req->iter.iov_offset = offset;
235 }
236
237 static inline void nvme_tcp_advance_req(struct nvme_tcp_request *req,
238                 int len)
239 {
240         req->data_sent += len;
241         req->pdu_sent += len;
242         iov_iter_advance(&req->iter, len);
243         if (!iov_iter_count(&req->iter) &&
244             req->data_sent < req->data_len) {
245                 req->curr_bio = req->curr_bio->bi_next;
246                 nvme_tcp_init_iter(req, WRITE);
247         }
248 }
249
250 static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req)
251 {
252         struct nvme_tcp_queue *queue = req->queue;
253
254         spin_lock(&queue->lock);
255         list_add_tail(&req->entry, &queue->send_list);
256         spin_unlock(&queue->lock);
257
258         queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
259 }
260
261 static inline struct nvme_tcp_request *
262 nvme_tcp_fetch_request(struct nvme_tcp_queue *queue)
263 {
264         struct nvme_tcp_request *req;
265
266         spin_lock(&queue->lock);
267         req = list_first_entry_or_null(&queue->send_list,
268                         struct nvme_tcp_request, entry);
269         if (req)
270                 list_del(&req->entry);
271         spin_unlock(&queue->lock);
272
273         return req;
274 }
275
276 static inline void nvme_tcp_ddgst_final(struct ahash_request *hash,
277                 __le32 *dgst)
278 {
279         ahash_request_set_crypt(hash, NULL, (u8 *)dgst, 0);
280         crypto_ahash_final(hash);
281 }
282
283 static inline void nvme_tcp_ddgst_update(struct ahash_request *hash,
284                 struct page *page, off_t off, size_t len)
285 {
286         struct scatterlist sg;
287
288         sg_init_marker(&sg, 1);
289         sg_set_page(&sg, page, len, off);
290         ahash_request_set_crypt(hash, &sg, NULL, len);
291         crypto_ahash_update(hash);
292 }
293
294 static inline void nvme_tcp_hdgst(struct ahash_request *hash,
295                 void *pdu, size_t len)
296 {
297         struct scatterlist sg;
298
299         sg_init_one(&sg, pdu, len);
300         ahash_request_set_crypt(hash, &sg, pdu + len, len);
301         crypto_ahash_digest(hash);
302 }
303
304 static int nvme_tcp_verify_hdgst(struct nvme_tcp_queue *queue,
305                 void *pdu, size_t pdu_len)
306 {
307         struct nvme_tcp_hdr *hdr = pdu;
308         __le32 recv_digest;
309         __le32 exp_digest;
310
311         if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) {
312                 dev_err(queue->ctrl->ctrl.device,
313                         "queue %d: header digest flag is cleared\n",
314                         nvme_tcp_queue_id(queue));
315                 return -EPROTO;
316         }
317
318         recv_digest = *(__le32 *)(pdu + hdr->hlen);
319         nvme_tcp_hdgst(queue->rcv_hash, pdu, pdu_len);
320         exp_digest = *(__le32 *)(pdu + hdr->hlen);
321         if (recv_digest != exp_digest) {
322                 dev_err(queue->ctrl->ctrl.device,
323                         "header digest error: recv %#x expected %#x\n",
324                         le32_to_cpu(recv_digest), le32_to_cpu(exp_digest));
325                 return -EIO;
326         }
327
328         return 0;
329 }
330
331 static int nvme_tcp_check_ddgst(struct nvme_tcp_queue *queue, void *pdu)
332 {
333         struct nvme_tcp_hdr *hdr = pdu;
334         u8 digest_len = nvme_tcp_hdgst_len(queue);
335         u32 len;
336
337         len = le32_to_cpu(hdr->plen) - hdr->hlen -
338                 ((hdr->flags & NVME_TCP_F_HDGST) ? digest_len : 0);
339
340         if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) {
341                 dev_err(queue->ctrl->ctrl.device,
342                         "queue %d: data digest flag is cleared\n",
343                 nvme_tcp_queue_id(queue));
344                 return -EPROTO;
345         }
346         crypto_ahash_init(queue->rcv_hash);
347
348         return 0;
349 }
350
351 static void nvme_tcp_exit_request(struct blk_mq_tag_set *set,
352                 struct request *rq, unsigned int hctx_idx)
353 {
354         struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
355
356         page_frag_free(req->pdu);
357 }
358
359 static int nvme_tcp_init_request(struct blk_mq_tag_set *set,
360                 struct request *rq, unsigned int hctx_idx,
361                 unsigned int numa_node)
362 {
363         struct nvme_tcp_ctrl *ctrl = set->driver_data;
364         struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
365         int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
366         struct nvme_tcp_queue *queue = &ctrl->queues[queue_idx];
367         u8 hdgst = nvme_tcp_hdgst_len(queue);
368
369         req->pdu = page_frag_alloc(&queue->pf_cache,
370                 sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
371                 GFP_KERNEL | __GFP_ZERO);
372         if (!req->pdu)
373                 return -ENOMEM;
374
375         req->queue = queue;
376         nvme_req(rq)->ctrl = &ctrl->ctrl;
377
378         return 0;
379 }
380
381 static int nvme_tcp_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
382                 unsigned int hctx_idx)
383 {
384         struct nvme_tcp_ctrl *ctrl = data;
385         struct nvme_tcp_queue *queue = &ctrl->queues[hctx_idx + 1];
386
387         hctx->driver_data = queue;
388         return 0;
389 }
390
391 static int nvme_tcp_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
392                 unsigned int hctx_idx)
393 {
394         struct nvme_tcp_ctrl *ctrl = data;
395         struct nvme_tcp_queue *queue = &ctrl->queues[0];
396
397         hctx->driver_data = queue;
398         return 0;
399 }
400
401 static enum nvme_tcp_recv_state
402 nvme_tcp_recv_state(struct nvme_tcp_queue *queue)
403 {
404         return  (queue->pdu_remaining) ? NVME_TCP_RECV_PDU :
405                 (queue->ddgst_remaining) ? NVME_TCP_RECV_DDGST :
406                 NVME_TCP_RECV_DATA;
407 }
408
409 static void nvme_tcp_init_recv_ctx(struct nvme_tcp_queue *queue)
410 {
411         queue->pdu_remaining = sizeof(struct nvme_tcp_rsp_pdu) +
412                                 nvme_tcp_hdgst_len(queue);
413         queue->pdu_offset = 0;
414         queue->data_remaining = -1;
415         queue->ddgst_remaining = 0;
416 }
417
418 static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl)
419 {
420         if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
421                 return;
422
423         queue_work(nvme_wq, &to_tcp_ctrl(ctrl)->err_work);
424 }
425
426 static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
427                 struct nvme_completion *cqe)
428 {
429         struct request *rq;
430
431         rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), cqe->command_id);
432         if (!rq) {
433                 dev_err(queue->ctrl->ctrl.device,
434                         "queue %d tag 0x%x not found\n",
435                         nvme_tcp_queue_id(queue), cqe->command_id);
436                 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
437                 return -EINVAL;
438         }
439
440         nvme_end_request(rq, cqe->status, cqe->result);
441
442         return 0;
443 }
444
445 static int nvme_tcp_handle_c2h_data(struct nvme_tcp_queue *queue,
446                 struct nvme_tcp_data_pdu *pdu)
447 {
448         struct request *rq;
449
450         rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
451         if (!rq) {
452                 dev_err(queue->ctrl->ctrl.device,
453                         "queue %d tag %#x not found\n",
454                         nvme_tcp_queue_id(queue), pdu->command_id);
455                 return -ENOENT;
456         }
457
458         if (!blk_rq_payload_bytes(rq)) {
459                 dev_err(queue->ctrl->ctrl.device,
460                         "queue %d tag %#x unexpected data\n",
461                         nvme_tcp_queue_id(queue), rq->tag);
462                 return -EIO;
463         }
464
465         queue->data_remaining = le32_to_cpu(pdu->data_length);
466
467         if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS &&
468             unlikely(!(pdu->hdr.flags & NVME_TCP_F_DATA_LAST))) {
469                 dev_err(queue->ctrl->ctrl.device,
470                         "queue %d tag %#x SUCCESS set but not last PDU\n",
471                         nvme_tcp_queue_id(queue), rq->tag);
472                 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
473                 return -EPROTO;
474         }
475
476         return 0;
477 }
478
479 static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue,
480                 struct nvme_tcp_rsp_pdu *pdu)
481 {
482         struct nvme_completion *cqe = &pdu->cqe;
483         int ret = 0;
484
485         /*
486          * AEN requests are special as they don't time out and can
487          * survive any kind of queue freeze and often don't respond to
488          * aborts.  We don't even bother to allocate a struct request
489          * for them but rather special case them here.
490          */
491         if (unlikely(nvme_tcp_queue_id(queue) == 0 &&
492             cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH))
493                 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
494                                 &cqe->result);
495         else
496                 ret = nvme_tcp_process_nvme_cqe(queue, cqe);
497
498         return ret;
499 }
500
501 static int nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req,
502                 struct nvme_tcp_r2t_pdu *pdu)
503 {
504         struct nvme_tcp_data_pdu *data = req->pdu;
505         struct nvme_tcp_queue *queue = req->queue;
506         struct request *rq = blk_mq_rq_from_pdu(req);
507         u8 hdgst = nvme_tcp_hdgst_len(queue);
508         u8 ddgst = nvme_tcp_ddgst_len(queue);
509
510         req->pdu_len = le32_to_cpu(pdu->r2t_length);
511         req->pdu_sent = 0;
512
513         if (unlikely(req->data_sent + req->pdu_len > req->data_len)) {
514                 dev_err(queue->ctrl->ctrl.device,
515                         "req %d r2t len %u exceeded data len %u (%zu sent)\n",
516                         rq->tag, req->pdu_len, req->data_len,
517                         req->data_sent);
518                 return -EPROTO;
519         }
520
521         if (unlikely(le32_to_cpu(pdu->r2t_offset) < req->data_sent)) {
522                 dev_err(queue->ctrl->ctrl.device,
523                         "req %d unexpected r2t offset %u (expected %zu)\n",
524                         rq->tag, le32_to_cpu(pdu->r2t_offset),
525                         req->data_sent);
526                 return -EPROTO;
527         }
528
529         memset(data, 0, sizeof(*data));
530         data->hdr.type = nvme_tcp_h2c_data;
531         data->hdr.flags = NVME_TCP_F_DATA_LAST;
532         if (queue->hdr_digest)
533                 data->hdr.flags |= NVME_TCP_F_HDGST;
534         if (queue->data_digest)
535                 data->hdr.flags |= NVME_TCP_F_DDGST;
536         data->hdr.hlen = sizeof(*data);
537         data->hdr.pdo = data->hdr.hlen + hdgst;
538         data->hdr.plen =
539                 cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst);
540         data->ttag = pdu->ttag;
541         data->command_id = rq->tag;
542         data->data_offset = cpu_to_le32(req->data_sent);
543         data->data_length = cpu_to_le32(req->pdu_len);
544         return 0;
545 }
546
547 static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
548                 struct nvme_tcp_r2t_pdu *pdu)
549 {
550         struct nvme_tcp_request *req;
551         struct request *rq;
552         int ret;
553
554         rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
555         if (!rq) {
556                 dev_err(queue->ctrl->ctrl.device,
557                         "queue %d tag %#x not found\n",
558                         nvme_tcp_queue_id(queue), pdu->command_id);
559                 return -ENOENT;
560         }
561         req = blk_mq_rq_to_pdu(rq);
562
563         ret = nvme_tcp_setup_h2c_data_pdu(req, pdu);
564         if (unlikely(ret))
565                 return ret;
566
567         req->state = NVME_TCP_SEND_H2C_PDU;
568         req->offset = 0;
569
570         nvme_tcp_queue_request(req);
571
572         return 0;
573 }
574
575 static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
576                 unsigned int *offset, size_t *len)
577 {
578         struct nvme_tcp_hdr *hdr;
579         char *pdu = queue->pdu;
580         size_t rcv_len = min_t(size_t, *len, queue->pdu_remaining);
581         int ret;
582
583         ret = skb_copy_bits(skb, *offset,
584                 &pdu[queue->pdu_offset], rcv_len);
585         if (unlikely(ret))
586                 return ret;
587
588         queue->pdu_remaining -= rcv_len;
589         queue->pdu_offset += rcv_len;
590         *offset += rcv_len;
591         *len -= rcv_len;
592         if (queue->pdu_remaining)
593                 return 0;
594
595         hdr = queue->pdu;
596         if (queue->hdr_digest) {
597                 ret = nvme_tcp_verify_hdgst(queue, queue->pdu, hdr->hlen);
598                 if (unlikely(ret))
599                         return ret;
600         }
601
602
603         if (queue->data_digest) {
604                 ret = nvme_tcp_check_ddgst(queue, queue->pdu);
605                 if (unlikely(ret))
606                         return ret;
607         }
608
609         switch (hdr->type) {
610         case nvme_tcp_c2h_data:
611                 return nvme_tcp_handle_c2h_data(queue, (void *)queue->pdu);
612         case nvme_tcp_rsp:
613                 nvme_tcp_init_recv_ctx(queue);
614                 return nvme_tcp_handle_comp(queue, (void *)queue->pdu);
615         case nvme_tcp_r2t:
616                 nvme_tcp_init_recv_ctx(queue);
617                 return nvme_tcp_handle_r2t(queue, (void *)queue->pdu);
618         default:
619                 dev_err(queue->ctrl->ctrl.device,
620                         "unsupported pdu type (%d)\n", hdr->type);
621                 return -EINVAL;
622         }
623 }
624
625 static inline void nvme_tcp_end_request(struct request *rq, u16 status)
626 {
627         union nvme_result res = {};
628
629         nvme_end_request(rq, cpu_to_le16(status << 1), res);
630 }
631
632 static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
633                               unsigned int *offset, size_t *len)
634 {
635         struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
636         struct nvme_tcp_request *req;
637         struct request *rq;
638
639         rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
640         if (!rq) {
641                 dev_err(queue->ctrl->ctrl.device,
642                         "queue %d tag %#x not found\n",
643                         nvme_tcp_queue_id(queue), pdu->command_id);
644                 return -ENOENT;
645         }
646         req = blk_mq_rq_to_pdu(rq);
647
648         while (true) {
649                 int recv_len, ret;
650
651                 recv_len = min_t(size_t, *len, queue->data_remaining);
652                 if (!recv_len)
653                         break;
654
655                 if (!iov_iter_count(&req->iter)) {
656                         req->curr_bio = req->curr_bio->bi_next;
657
658                         /*
659                          * If we don`t have any bios it means that controller
660                          * sent more data than we requested, hence error
661                          */
662                         if (!req->curr_bio) {
663                                 dev_err(queue->ctrl->ctrl.device,
664                                         "queue %d no space in request %#x",
665                                         nvme_tcp_queue_id(queue), rq->tag);
666                                 nvme_tcp_init_recv_ctx(queue);
667                                 return -EIO;
668                         }
669                         nvme_tcp_init_iter(req, READ);
670                 }
671
672                 /* we can read only from what is left in this bio */
673                 recv_len = min_t(size_t, recv_len,
674                                 iov_iter_count(&req->iter));
675
676                 if (queue->data_digest)
677                         ret = skb_copy_and_hash_datagram_iter(skb, *offset,
678                                 &req->iter, recv_len, queue->rcv_hash);
679                 else
680                         ret = skb_copy_datagram_iter(skb, *offset,
681                                         &req->iter, recv_len);
682                 if (ret) {
683                         dev_err(queue->ctrl->ctrl.device,
684                                 "queue %d failed to copy request %#x data",
685                                 nvme_tcp_queue_id(queue), rq->tag);
686                         return ret;
687                 }
688
689                 *len -= recv_len;
690                 *offset += recv_len;
691                 queue->data_remaining -= recv_len;
692         }
693
694         if (!queue->data_remaining) {
695                 if (queue->data_digest) {
696                         nvme_tcp_ddgst_final(queue->rcv_hash, &queue->exp_ddgst);
697                         queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH;
698                 } else {
699                         if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS)
700                                 nvme_tcp_end_request(rq, NVME_SC_SUCCESS);
701                         nvme_tcp_init_recv_ctx(queue);
702                 }
703         }
704
705         return 0;
706 }
707
708 static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
709                 struct sk_buff *skb, unsigned int *offset, size_t *len)
710 {
711         struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
712         char *ddgst = (char *)&queue->recv_ddgst;
713         size_t recv_len = min_t(size_t, *len, queue->ddgst_remaining);
714         off_t off = NVME_TCP_DIGEST_LENGTH - queue->ddgst_remaining;
715         int ret;
716
717         ret = skb_copy_bits(skb, *offset, &ddgst[off], recv_len);
718         if (unlikely(ret))
719                 return ret;
720
721         queue->ddgst_remaining -= recv_len;
722         *offset += recv_len;
723         *len -= recv_len;
724         if (queue->ddgst_remaining)
725                 return 0;
726
727         if (queue->recv_ddgst != queue->exp_ddgst) {
728                 dev_err(queue->ctrl->ctrl.device,
729                         "data digest error: recv %#x expected %#x\n",
730                         le32_to_cpu(queue->recv_ddgst),
731                         le32_to_cpu(queue->exp_ddgst));
732                 return -EIO;
733         }
734
735         if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
736                 struct request *rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue),
737                                                 pdu->command_id);
738
739                 nvme_tcp_end_request(rq, NVME_SC_SUCCESS);
740         }
741
742         nvme_tcp_init_recv_ctx(queue);
743         return 0;
744 }
745
746 static int nvme_tcp_recv_skb(read_descriptor_t *desc, struct sk_buff *skb,
747                              unsigned int offset, size_t len)
748 {
749         struct nvme_tcp_queue *queue = desc->arg.data;
750         size_t consumed = len;
751         int result;
752
753         while (len) {
754                 switch (nvme_tcp_recv_state(queue)) {
755                 case NVME_TCP_RECV_PDU:
756                         result = nvme_tcp_recv_pdu(queue, skb, &offset, &len);
757                         break;
758                 case NVME_TCP_RECV_DATA:
759                         result = nvme_tcp_recv_data(queue, skb, &offset, &len);
760                         break;
761                 case NVME_TCP_RECV_DDGST:
762                         result = nvme_tcp_recv_ddgst(queue, skb, &offset, &len);
763                         break;
764                 default:
765                         result = -EFAULT;
766                 }
767                 if (result) {
768                         dev_err(queue->ctrl->ctrl.device,
769                                 "receive failed:  %d\n", result);
770                         queue->rd_enabled = false;
771                         nvme_tcp_error_recovery(&queue->ctrl->ctrl);
772                         return result;
773                 }
774         }
775
776         return consumed;
777 }
778
779 static void nvme_tcp_data_ready(struct sock *sk)
780 {
781         struct nvme_tcp_queue *queue;
782
783         read_lock(&sk->sk_callback_lock);
784         queue = sk->sk_user_data;
785         if (likely(queue && queue->rd_enabled))
786                 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
787         read_unlock(&sk->sk_callback_lock);
788 }
789
790 static void nvme_tcp_write_space(struct sock *sk)
791 {
792         struct nvme_tcp_queue *queue;
793
794         read_lock_bh(&sk->sk_callback_lock);
795         queue = sk->sk_user_data;
796         if (likely(queue && sk_stream_is_writeable(sk))) {
797                 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
798                 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
799         }
800         read_unlock_bh(&sk->sk_callback_lock);
801 }
802
803 static void nvme_tcp_state_change(struct sock *sk)
804 {
805         struct nvme_tcp_queue *queue;
806
807         read_lock(&sk->sk_callback_lock);
808         queue = sk->sk_user_data;
809         if (!queue)
810                 goto done;
811
812         switch (sk->sk_state) {
813         case TCP_CLOSE:
814         case TCP_CLOSE_WAIT:
815         case TCP_LAST_ACK:
816         case TCP_FIN_WAIT1:
817         case TCP_FIN_WAIT2:
818                 /* fallthrough */
819                 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
820                 break;
821         default:
822                 dev_info(queue->ctrl->ctrl.device,
823                         "queue %d socket state %d\n",
824                         nvme_tcp_queue_id(queue), sk->sk_state);
825         }
826
827         queue->state_change(sk);
828 done:
829         read_unlock(&sk->sk_callback_lock);
830 }
831
832 static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue)
833 {
834         queue->request = NULL;
835 }
836
837 static void nvme_tcp_fail_request(struct nvme_tcp_request *req)
838 {
839         nvme_tcp_end_request(blk_mq_rq_from_pdu(req), NVME_SC_DATA_XFER_ERROR);
840 }
841
842 static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
843 {
844         struct nvme_tcp_queue *queue = req->queue;
845
846         while (true) {
847                 struct page *page = nvme_tcp_req_cur_page(req);
848                 size_t offset = nvme_tcp_req_cur_offset(req);
849                 size_t len = nvme_tcp_req_cur_length(req);
850                 bool last = nvme_tcp_pdu_last_send(req, len);
851                 int ret, flags = MSG_DONTWAIT;
852
853                 if (last && !queue->data_digest)
854                         flags |= MSG_EOR;
855                 else
856                         flags |= MSG_MORE;
857
858                 /* can't zcopy slab pages */
859                 if (unlikely(PageSlab(page))) {
860                         ret = sock_no_sendpage(queue->sock, page, offset, len,
861                                         flags);
862                 } else {
863                         ret = kernel_sendpage(queue->sock, page, offset, len,
864                                         flags);
865                 }
866                 if (ret <= 0)
867                         return ret;
868
869                 nvme_tcp_advance_req(req, ret);
870                 if (queue->data_digest)
871                         nvme_tcp_ddgst_update(queue->snd_hash, page,
872                                         offset, ret);
873
874                 /* fully successful last write*/
875                 if (last && ret == len) {
876                         if (queue->data_digest) {
877                                 nvme_tcp_ddgst_final(queue->snd_hash,
878                                         &req->ddgst);
879                                 req->state = NVME_TCP_SEND_DDGST;
880                                 req->offset = 0;
881                         } else {
882                                 nvme_tcp_done_send_req(queue);
883                         }
884                         return 1;
885                 }
886         }
887         return -EAGAIN;
888 }
889
890 static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
891 {
892         struct nvme_tcp_queue *queue = req->queue;
893         struct nvme_tcp_cmd_pdu *pdu = req->pdu;
894         bool inline_data = nvme_tcp_has_inline_data(req);
895         int flags = MSG_DONTWAIT | (inline_data ? MSG_MORE : MSG_EOR);
896         u8 hdgst = nvme_tcp_hdgst_len(queue);
897         int len = sizeof(*pdu) + hdgst - req->offset;
898         int ret;
899
900         if (queue->hdr_digest && !req->offset)
901                 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
902
903         ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
904                         offset_in_page(pdu) + req->offset, len,  flags);
905         if (unlikely(ret <= 0))
906                 return ret;
907
908         len -= ret;
909         if (!len) {
910                 if (inline_data) {
911                         req->state = NVME_TCP_SEND_DATA;
912                         if (queue->data_digest)
913                                 crypto_ahash_init(queue->snd_hash);
914                         nvme_tcp_init_iter(req, WRITE);
915                 } else {
916                         nvme_tcp_done_send_req(queue);
917                 }
918                 return 1;
919         }
920         req->offset += ret;
921
922         return -EAGAIN;
923 }
924
925 static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
926 {
927         struct nvme_tcp_queue *queue = req->queue;
928         struct nvme_tcp_data_pdu *pdu = req->pdu;
929         u8 hdgst = nvme_tcp_hdgst_len(queue);
930         int len = sizeof(*pdu) - req->offset + hdgst;
931         int ret;
932
933         if (queue->hdr_digest && !req->offset)
934                 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
935
936         ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
937                         offset_in_page(pdu) + req->offset, len,
938                         MSG_DONTWAIT | MSG_MORE);
939         if (unlikely(ret <= 0))
940                 return ret;
941
942         len -= ret;
943         if (!len) {
944                 req->state = NVME_TCP_SEND_DATA;
945                 if (queue->data_digest)
946                         crypto_ahash_init(queue->snd_hash);
947                 if (!req->data_sent)
948                         nvme_tcp_init_iter(req, WRITE);
949                 return 1;
950         }
951         req->offset += ret;
952
953         return -EAGAIN;
954 }
955
956 static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
957 {
958         struct nvme_tcp_queue *queue = req->queue;
959         int ret;
960         struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_EOR };
961         struct kvec iov = {
962                 .iov_base = &req->ddgst + req->offset,
963                 .iov_len = NVME_TCP_DIGEST_LENGTH - req->offset
964         };
965
966         ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
967         if (unlikely(ret <= 0))
968                 return ret;
969
970         if (req->offset + ret == NVME_TCP_DIGEST_LENGTH) {
971                 nvme_tcp_done_send_req(queue);
972                 return 1;
973         }
974
975         req->offset += ret;
976         return -EAGAIN;
977 }
978
979 static int nvme_tcp_try_send(struct nvme_tcp_queue *queue)
980 {
981         struct nvme_tcp_request *req;
982         int ret = 1;
983
984         if (!queue->request) {
985                 queue->request = nvme_tcp_fetch_request(queue);
986                 if (!queue->request)
987                         return 0;
988         }
989         req = queue->request;
990
991         if (req->state == NVME_TCP_SEND_CMD_PDU) {
992                 ret = nvme_tcp_try_send_cmd_pdu(req);
993                 if (ret <= 0)
994                         goto done;
995                 if (!nvme_tcp_has_inline_data(req))
996                         return ret;
997         }
998
999         if (req->state == NVME_TCP_SEND_H2C_PDU) {
1000                 ret = nvme_tcp_try_send_data_pdu(req);
1001                 if (ret <= 0)
1002                         goto done;
1003         }
1004
1005         if (req->state == NVME_TCP_SEND_DATA) {
1006                 ret = nvme_tcp_try_send_data(req);
1007                 if (ret <= 0)
1008                         goto done;
1009         }
1010
1011         if (req->state == NVME_TCP_SEND_DDGST)
1012                 ret = nvme_tcp_try_send_ddgst(req);
1013 done:
1014         if (ret == -EAGAIN)
1015                 ret = 0;
1016         return ret;
1017 }
1018
1019 static int nvme_tcp_try_recv(struct nvme_tcp_queue *queue)
1020 {
1021         struct socket *sock = queue->sock;
1022         struct sock *sk = sock->sk;
1023         read_descriptor_t rd_desc;
1024         int consumed;
1025
1026         rd_desc.arg.data = queue;
1027         rd_desc.count = 1;
1028         lock_sock(sk);
1029         consumed = sock->ops->read_sock(sk, &rd_desc, nvme_tcp_recv_skb);
1030         release_sock(sk);
1031         return consumed;
1032 }
1033
1034 static void nvme_tcp_io_work(struct work_struct *w)
1035 {
1036         struct nvme_tcp_queue *queue =
1037                 container_of(w, struct nvme_tcp_queue, io_work);
1038         unsigned long start = jiffies + msecs_to_jiffies(1);
1039
1040         do {
1041                 bool pending = false;
1042                 int result;
1043
1044                 result = nvme_tcp_try_send(queue);
1045                 if (result > 0) {
1046                         pending = true;
1047                 } else if (unlikely(result < 0)) {
1048                         dev_err(queue->ctrl->ctrl.device,
1049                                 "failed to send request %d\n", result);
1050                         if (result != -EPIPE)
1051                                 nvme_tcp_fail_request(queue->request);
1052                         nvme_tcp_done_send_req(queue);
1053                         return;
1054                 }
1055
1056                 result = nvme_tcp_try_recv(queue);
1057                 if (result > 0)
1058                         pending = true;
1059
1060                 if (!pending)
1061                         return;
1062
1063         } while (time_after(jiffies, start)); /* quota is exhausted */
1064
1065         queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
1066 }
1067
1068 static void nvme_tcp_free_crypto(struct nvme_tcp_queue *queue)
1069 {
1070         struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash);
1071
1072         ahash_request_free(queue->rcv_hash);
1073         ahash_request_free(queue->snd_hash);
1074         crypto_free_ahash(tfm);
1075 }
1076
1077 static int nvme_tcp_alloc_crypto(struct nvme_tcp_queue *queue)
1078 {
1079         struct crypto_ahash *tfm;
1080
1081         tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
1082         if (IS_ERR(tfm))
1083                 return PTR_ERR(tfm);
1084
1085         queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL);
1086         if (!queue->snd_hash)
1087                 goto free_tfm;
1088         ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL);
1089
1090         queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL);
1091         if (!queue->rcv_hash)
1092                 goto free_snd_hash;
1093         ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL);
1094
1095         return 0;
1096 free_snd_hash:
1097         ahash_request_free(queue->snd_hash);
1098 free_tfm:
1099         crypto_free_ahash(tfm);
1100         return -ENOMEM;
1101 }
1102
1103 static void nvme_tcp_free_async_req(struct nvme_tcp_ctrl *ctrl)
1104 {
1105         struct nvme_tcp_request *async = &ctrl->async_req;
1106
1107         page_frag_free(async->pdu);
1108 }
1109
1110 static int nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl *ctrl)
1111 {
1112         struct nvme_tcp_queue *queue = &ctrl->queues[0];
1113         struct nvme_tcp_request *async = &ctrl->async_req;
1114         u8 hdgst = nvme_tcp_hdgst_len(queue);
1115
1116         async->pdu = page_frag_alloc(&queue->pf_cache,
1117                 sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
1118                 GFP_KERNEL | __GFP_ZERO);
1119         if (!async->pdu)
1120                 return -ENOMEM;
1121
1122         async->queue = &ctrl->queues[0];
1123         return 0;
1124 }
1125
1126 static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
1127 {
1128         struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1129         struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1130
1131         if (!test_and_clear_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
1132                 return;
1133
1134         if (queue->hdr_digest || queue->data_digest)
1135                 nvme_tcp_free_crypto(queue);
1136
1137         sock_release(queue->sock);
1138         kfree(queue->pdu);
1139 }
1140
1141 static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
1142 {
1143         struct nvme_tcp_icreq_pdu *icreq;
1144         struct nvme_tcp_icresp_pdu *icresp;
1145         struct msghdr msg = {};
1146         struct kvec iov;
1147         bool ctrl_hdgst, ctrl_ddgst;
1148         int ret;
1149
1150         icreq = kzalloc(sizeof(*icreq), GFP_KERNEL);
1151         if (!icreq)
1152                 return -ENOMEM;
1153
1154         icresp = kzalloc(sizeof(*icresp), GFP_KERNEL);
1155         if (!icresp) {
1156                 ret = -ENOMEM;
1157                 goto free_icreq;
1158         }
1159
1160         icreq->hdr.type = nvme_tcp_icreq;
1161         icreq->hdr.hlen = sizeof(*icreq);
1162         icreq->hdr.pdo = 0;
1163         icreq->hdr.plen = cpu_to_le32(icreq->hdr.hlen);
1164         icreq->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
1165         icreq->maxr2t = 0; /* single inflight r2t supported */
1166         icreq->hpda = 0; /* no alignment constraint */
1167         if (queue->hdr_digest)
1168                 icreq->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
1169         if (queue->data_digest)
1170                 icreq->digest |= NVME_TCP_DATA_DIGEST_ENABLE;
1171
1172         iov.iov_base = icreq;
1173         iov.iov_len = sizeof(*icreq);
1174         ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
1175         if (ret < 0)
1176                 goto free_icresp;
1177
1178         memset(&msg, 0, sizeof(msg));
1179         iov.iov_base = icresp;
1180         iov.iov_len = sizeof(*icresp);
1181         ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1182                         iov.iov_len, msg.msg_flags);
1183         if (ret < 0)
1184                 goto free_icresp;
1185
1186         ret = -EINVAL;
1187         if (icresp->hdr.type != nvme_tcp_icresp) {
1188                 pr_err("queue %d: bad type returned %d\n",
1189                         nvme_tcp_queue_id(queue), icresp->hdr.type);
1190                 goto free_icresp;
1191         }
1192
1193         if (le32_to_cpu(icresp->hdr.plen) != sizeof(*icresp)) {
1194                 pr_err("queue %d: bad pdu length returned %d\n",
1195                         nvme_tcp_queue_id(queue), icresp->hdr.plen);
1196                 goto free_icresp;
1197         }
1198
1199         if (icresp->pfv != NVME_TCP_PFV_1_0) {
1200                 pr_err("queue %d: bad pfv returned %d\n",
1201                         nvme_tcp_queue_id(queue), icresp->pfv);
1202                 goto free_icresp;
1203         }
1204
1205         ctrl_ddgst = !!(icresp->digest & NVME_TCP_DATA_DIGEST_ENABLE);
1206         if ((queue->data_digest && !ctrl_ddgst) ||
1207             (!queue->data_digest && ctrl_ddgst)) {
1208                 pr_err("queue %d: data digest mismatch host: %s ctrl: %s\n",
1209                         nvme_tcp_queue_id(queue),
1210                         queue->data_digest ? "enabled" : "disabled",
1211                         ctrl_ddgst ? "enabled" : "disabled");
1212                 goto free_icresp;
1213         }
1214
1215         ctrl_hdgst = !!(icresp->digest & NVME_TCP_HDR_DIGEST_ENABLE);
1216         if ((queue->hdr_digest && !ctrl_hdgst) ||
1217             (!queue->hdr_digest && ctrl_hdgst)) {
1218                 pr_err("queue %d: header digest mismatch host: %s ctrl: %s\n",
1219                         nvme_tcp_queue_id(queue),
1220                         queue->hdr_digest ? "enabled" : "disabled",
1221                         ctrl_hdgst ? "enabled" : "disabled");
1222                 goto free_icresp;
1223         }
1224
1225         if (icresp->cpda != 0) {
1226                 pr_err("queue %d: unsupported cpda returned %d\n",
1227                         nvme_tcp_queue_id(queue), icresp->cpda);
1228                 goto free_icresp;
1229         }
1230
1231         ret = 0;
1232 free_icresp:
1233         kfree(icresp);
1234 free_icreq:
1235         kfree(icreq);
1236         return ret;
1237 }
1238
1239 static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
1240                 int qid, size_t queue_size)
1241 {
1242         struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1243         struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1244         struct linger sol = { .l_onoff = 1, .l_linger = 0 };
1245         int ret, opt, rcv_pdu_size, n;
1246
1247         queue->ctrl = ctrl;
1248         INIT_LIST_HEAD(&queue->send_list);
1249         spin_lock_init(&queue->lock);
1250         INIT_WORK(&queue->io_work, nvme_tcp_io_work);
1251         queue->queue_size = queue_size;
1252
1253         if (qid > 0)
1254                 queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16;
1255         else
1256                 queue->cmnd_capsule_len = sizeof(struct nvme_command) +
1257                                                 NVME_TCP_ADMIN_CCSZ;
1258
1259         ret = sock_create(ctrl->addr.ss_family, SOCK_STREAM,
1260                         IPPROTO_TCP, &queue->sock);
1261         if (ret) {
1262                 dev_err(ctrl->ctrl.device,
1263                         "failed to create socket: %d\n", ret);
1264                 return ret;
1265         }
1266
1267         /* Single syn retry */
1268         opt = 1;
1269         ret = kernel_setsockopt(queue->sock, IPPROTO_TCP, TCP_SYNCNT,
1270                         (char *)&opt, sizeof(opt));
1271         if (ret) {
1272                 dev_err(ctrl->ctrl.device,
1273                         "failed to set TCP_SYNCNT sock opt %d\n", ret);
1274                 goto err_sock;
1275         }
1276
1277         /* Set TCP no delay */
1278         opt = 1;
1279         ret = kernel_setsockopt(queue->sock, IPPROTO_TCP,
1280                         TCP_NODELAY, (char *)&opt, sizeof(opt));
1281         if (ret) {
1282                 dev_err(ctrl->ctrl.device,
1283                         "failed to set TCP_NODELAY sock opt %d\n", ret);
1284                 goto err_sock;
1285         }
1286
1287         /*
1288          * Cleanup whatever is sitting in the TCP transmit queue on socket
1289          * close. This is done to prevent stale data from being sent should
1290          * the network connection be restored before TCP times out.
1291          */
1292         ret = kernel_setsockopt(queue->sock, SOL_SOCKET, SO_LINGER,
1293                         (char *)&sol, sizeof(sol));
1294         if (ret) {
1295                 dev_err(ctrl->ctrl.device,
1296                         "failed to set SO_LINGER sock opt %d\n", ret);
1297                 goto err_sock;
1298         }
1299
1300         queue->sock->sk->sk_allocation = GFP_ATOMIC;
1301         if (!qid)
1302                 n = 0;
1303         else
1304                 n = (qid - 1) % num_online_cpus();
1305         queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
1306         queue->request = NULL;
1307         queue->data_remaining = 0;
1308         queue->ddgst_remaining = 0;
1309         queue->pdu_remaining = 0;
1310         queue->pdu_offset = 0;
1311         sk_set_memalloc(queue->sock->sk);
1312
1313         if (ctrl->ctrl.opts->mask & NVMF_OPT_HOST_TRADDR) {
1314                 ret = kernel_bind(queue->sock, (struct sockaddr *)&ctrl->src_addr,
1315                         sizeof(ctrl->src_addr));
1316                 if (ret) {
1317                         dev_err(ctrl->ctrl.device,
1318                                 "failed to bind queue %d socket %d\n",
1319                                 qid, ret);
1320                         goto err_sock;
1321                 }
1322         }
1323
1324         queue->hdr_digest = nctrl->opts->hdr_digest;
1325         queue->data_digest = nctrl->opts->data_digest;
1326         if (queue->hdr_digest || queue->data_digest) {
1327                 ret = nvme_tcp_alloc_crypto(queue);
1328                 if (ret) {
1329                         dev_err(ctrl->ctrl.device,
1330                                 "failed to allocate queue %d crypto\n", qid);
1331                         goto err_sock;
1332                 }
1333         }
1334
1335         rcv_pdu_size = sizeof(struct nvme_tcp_rsp_pdu) +
1336                         nvme_tcp_hdgst_len(queue);
1337         queue->pdu = kmalloc(rcv_pdu_size, GFP_KERNEL);
1338         if (!queue->pdu) {
1339                 ret = -ENOMEM;
1340                 goto err_crypto;
1341         }
1342
1343         dev_dbg(ctrl->ctrl.device, "connecting queue %d\n",
1344                         nvme_tcp_queue_id(queue));
1345
1346         ret = kernel_connect(queue->sock, (struct sockaddr *)&ctrl->addr,
1347                 sizeof(ctrl->addr), 0);
1348         if (ret) {
1349                 dev_err(ctrl->ctrl.device,
1350                         "failed to connect socket: %d\n", ret);
1351                 goto err_rcv_pdu;
1352         }
1353
1354         ret = nvme_tcp_init_connection(queue);
1355         if (ret)
1356                 goto err_init_connect;
1357
1358         queue->rd_enabled = true;
1359         set_bit(NVME_TCP_Q_ALLOCATED, &queue->flags);
1360         nvme_tcp_init_recv_ctx(queue);
1361
1362         write_lock_bh(&queue->sock->sk->sk_callback_lock);
1363         queue->sock->sk->sk_user_data = queue;
1364         queue->state_change = queue->sock->sk->sk_state_change;
1365         queue->data_ready = queue->sock->sk->sk_data_ready;
1366         queue->write_space = queue->sock->sk->sk_write_space;
1367         queue->sock->sk->sk_data_ready = nvme_tcp_data_ready;
1368         queue->sock->sk->sk_state_change = nvme_tcp_state_change;
1369         queue->sock->sk->sk_write_space = nvme_tcp_write_space;
1370         write_unlock_bh(&queue->sock->sk->sk_callback_lock);
1371
1372         return 0;
1373
1374 err_init_connect:
1375         kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1376 err_rcv_pdu:
1377         kfree(queue->pdu);
1378 err_crypto:
1379         if (queue->hdr_digest || queue->data_digest)
1380                 nvme_tcp_free_crypto(queue);
1381 err_sock:
1382         sock_release(queue->sock);
1383         queue->sock = NULL;
1384         return ret;
1385 }
1386
1387 static void nvme_tcp_restore_sock_calls(struct nvme_tcp_queue *queue)
1388 {
1389         struct socket *sock = queue->sock;
1390
1391         write_lock_bh(&sock->sk->sk_callback_lock);
1392         sock->sk->sk_user_data  = NULL;
1393         sock->sk->sk_data_ready = queue->data_ready;
1394         sock->sk->sk_state_change = queue->state_change;
1395         sock->sk->sk_write_space  = queue->write_space;
1396         write_unlock_bh(&sock->sk->sk_callback_lock);
1397 }
1398
1399 static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue)
1400 {
1401         kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1402         nvme_tcp_restore_sock_calls(queue);
1403         cancel_work_sync(&queue->io_work);
1404 }
1405
1406 static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
1407 {
1408         struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1409         struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1410
1411         if (!test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
1412                 return;
1413
1414         __nvme_tcp_stop_queue(queue);
1415 }
1416
1417 static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
1418 {
1419         struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1420         int ret;
1421
1422         if (idx)
1423                 ret = nvmf_connect_io_queue(nctrl, idx, false);
1424         else
1425                 ret = nvmf_connect_admin_queue(nctrl);
1426
1427         if (!ret) {
1428                 set_bit(NVME_TCP_Q_LIVE, &ctrl->queues[idx].flags);
1429         } else {
1430                 if (test_bit(NVME_TCP_Q_ALLOCATED, &ctrl->queues[idx].flags))
1431                         __nvme_tcp_stop_queue(&ctrl->queues[idx]);
1432                 dev_err(nctrl->device,
1433                         "failed to connect queue: %d ret=%d\n", idx, ret);
1434         }
1435         return ret;
1436 }
1437
1438 static struct blk_mq_tag_set *nvme_tcp_alloc_tagset(struct nvme_ctrl *nctrl,
1439                 bool admin)
1440 {
1441         struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1442         struct blk_mq_tag_set *set;
1443         int ret;
1444
1445         if (admin) {
1446                 set = &ctrl->admin_tag_set;
1447                 memset(set, 0, sizeof(*set));
1448                 set->ops = &nvme_tcp_admin_mq_ops;
1449                 set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
1450                 set->reserved_tags = 2; /* connect + keep-alive */
1451                 set->numa_node = NUMA_NO_NODE;
1452                 set->cmd_size = sizeof(struct nvme_tcp_request);
1453                 set->driver_data = ctrl;
1454                 set->nr_hw_queues = 1;
1455                 set->timeout = ADMIN_TIMEOUT;
1456         } else {
1457                 set = &ctrl->tag_set;
1458                 memset(set, 0, sizeof(*set));
1459                 set->ops = &nvme_tcp_mq_ops;
1460                 set->queue_depth = nctrl->sqsize + 1;
1461                 set->reserved_tags = 1; /* fabric connect */
1462                 set->numa_node = NUMA_NO_NODE;
1463                 set->flags = BLK_MQ_F_SHOULD_MERGE;
1464                 set->cmd_size = sizeof(struct nvme_tcp_request);
1465                 set->driver_data = ctrl;
1466                 set->nr_hw_queues = nctrl->queue_count - 1;
1467                 set->timeout = NVME_IO_TIMEOUT;
1468                 set->nr_maps = 2 /* default + read */;
1469         }
1470
1471         ret = blk_mq_alloc_tag_set(set);
1472         if (ret)
1473                 return ERR_PTR(ret);
1474
1475         return set;
1476 }
1477
1478 static void nvme_tcp_free_admin_queue(struct nvme_ctrl *ctrl)
1479 {
1480         if (to_tcp_ctrl(ctrl)->async_req.pdu) {
1481                 nvme_tcp_free_async_req(to_tcp_ctrl(ctrl));
1482                 to_tcp_ctrl(ctrl)->async_req.pdu = NULL;
1483         }
1484
1485         nvme_tcp_free_queue(ctrl, 0);
1486 }
1487
1488 static void nvme_tcp_free_io_queues(struct nvme_ctrl *ctrl)
1489 {
1490         int i;
1491
1492         for (i = 1; i < ctrl->queue_count; i++)
1493                 nvme_tcp_free_queue(ctrl, i);
1494 }
1495
1496 static void nvme_tcp_stop_io_queues(struct nvme_ctrl *ctrl)
1497 {
1498         int i;
1499
1500         for (i = 1; i < ctrl->queue_count; i++)
1501                 nvme_tcp_stop_queue(ctrl, i);
1502 }
1503
1504 static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl)
1505 {
1506         int i, ret = 0;
1507
1508         for (i = 1; i < ctrl->queue_count; i++) {
1509                 ret = nvme_tcp_start_queue(ctrl, i);
1510                 if (ret)
1511                         goto out_stop_queues;
1512         }
1513
1514         return 0;
1515
1516 out_stop_queues:
1517         for (i--; i >= 1; i--)
1518                 nvme_tcp_stop_queue(ctrl, i);
1519         return ret;
1520 }
1521
1522 static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl)
1523 {
1524         int ret;
1525
1526         ret = nvme_tcp_alloc_queue(ctrl, 0, NVME_AQ_DEPTH);
1527         if (ret)
1528                 return ret;
1529
1530         ret = nvme_tcp_alloc_async_req(to_tcp_ctrl(ctrl));
1531         if (ret)
1532                 goto out_free_queue;
1533
1534         return 0;
1535
1536 out_free_queue:
1537         nvme_tcp_free_queue(ctrl, 0);
1538         return ret;
1539 }
1540
1541 static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
1542 {
1543         int i, ret;
1544
1545         for (i = 1; i < ctrl->queue_count; i++) {
1546                 ret = nvme_tcp_alloc_queue(ctrl, i,
1547                                 ctrl->sqsize + 1);
1548                 if (ret)
1549                         goto out_free_queues;
1550         }
1551
1552         return 0;
1553
1554 out_free_queues:
1555         for (i--; i >= 1; i--)
1556                 nvme_tcp_free_queue(ctrl, i);
1557
1558         return ret;
1559 }
1560
1561 static unsigned int nvme_tcp_nr_io_queues(struct nvme_ctrl *ctrl)
1562 {
1563         unsigned int nr_io_queues;
1564
1565         nr_io_queues = min(ctrl->opts->nr_io_queues, num_online_cpus());
1566         nr_io_queues += min(ctrl->opts->nr_write_queues, num_online_cpus());
1567
1568         return nr_io_queues;
1569 }
1570
1571 static void nvme_tcp_set_io_queues(struct nvme_ctrl *nctrl,
1572                 unsigned int nr_io_queues)
1573 {
1574         struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1575         struct nvmf_ctrl_options *opts = nctrl->opts;
1576
1577         if (opts->nr_write_queues && opts->nr_io_queues < nr_io_queues) {
1578                 /*
1579                  * separate read/write queues
1580                  * hand out dedicated default queues only after we have
1581                  * sufficient read queues.
1582                  */
1583                 ctrl->io_queues[HCTX_TYPE_READ] = opts->nr_io_queues;
1584                 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_READ];
1585                 ctrl->io_queues[HCTX_TYPE_DEFAULT] =
1586                         min(opts->nr_write_queues, nr_io_queues);
1587                 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
1588         } else {
1589                 /*
1590                  * shared read/write queues
1591                  * either no write queues were requested, or we don't have
1592                  * sufficient queue count to have dedicated default queues.
1593                  */
1594                 ctrl->io_queues[HCTX_TYPE_DEFAULT] =
1595                         min(opts->nr_io_queues, nr_io_queues);
1596                 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
1597         }
1598 }
1599
1600 static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
1601 {
1602         unsigned int nr_io_queues;
1603         int ret;
1604
1605         nr_io_queues = nvme_tcp_nr_io_queues(ctrl);
1606         ret = nvme_set_queue_count(ctrl, &nr_io_queues);
1607         if (ret)
1608                 return ret;
1609
1610         ctrl->queue_count = nr_io_queues + 1;
1611         if (ctrl->queue_count < 2)
1612                 return 0;
1613
1614         dev_info(ctrl->device,
1615                 "creating %d I/O queues.\n", nr_io_queues);
1616
1617         nvme_tcp_set_io_queues(ctrl, nr_io_queues);
1618
1619         return __nvme_tcp_alloc_io_queues(ctrl);
1620 }
1621
1622 static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove)
1623 {
1624         nvme_tcp_stop_io_queues(ctrl);
1625         if (remove) {
1626                 blk_cleanup_queue(ctrl->connect_q);
1627                 blk_mq_free_tag_set(ctrl->tagset);
1628         }
1629         nvme_tcp_free_io_queues(ctrl);
1630 }
1631
1632 static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
1633 {
1634         int ret;
1635
1636         ret = nvme_tcp_alloc_io_queues(ctrl);
1637         if (ret)
1638                 return ret;
1639
1640         if (new) {
1641                 ctrl->tagset = nvme_tcp_alloc_tagset(ctrl, false);
1642                 if (IS_ERR(ctrl->tagset)) {
1643                         ret = PTR_ERR(ctrl->tagset);
1644                         goto out_free_io_queues;
1645                 }
1646
1647                 ctrl->connect_q = blk_mq_init_queue(ctrl->tagset);
1648                 if (IS_ERR(ctrl->connect_q)) {
1649                         ret = PTR_ERR(ctrl->connect_q);
1650                         goto out_free_tag_set;
1651                 }
1652         } else {
1653                 blk_mq_update_nr_hw_queues(ctrl->tagset,
1654                         ctrl->queue_count - 1);
1655         }
1656
1657         ret = nvme_tcp_start_io_queues(ctrl);
1658         if (ret)
1659                 goto out_cleanup_connect_q;
1660
1661         return 0;
1662
1663 out_cleanup_connect_q:
1664         if (new)
1665                 blk_cleanup_queue(ctrl->connect_q);
1666 out_free_tag_set:
1667         if (new)
1668                 blk_mq_free_tag_set(ctrl->tagset);
1669 out_free_io_queues:
1670         nvme_tcp_free_io_queues(ctrl);
1671         return ret;
1672 }
1673
1674 static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove)
1675 {
1676         nvme_tcp_stop_queue(ctrl, 0);
1677         if (remove) {
1678                 blk_cleanup_queue(ctrl->admin_q);
1679                 blk_mq_free_tag_set(ctrl->admin_tagset);
1680         }
1681         nvme_tcp_free_admin_queue(ctrl);
1682 }
1683
1684 static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
1685 {
1686         int error;
1687
1688         error = nvme_tcp_alloc_admin_queue(ctrl);
1689         if (error)
1690                 return error;
1691
1692         if (new) {
1693                 ctrl->admin_tagset = nvme_tcp_alloc_tagset(ctrl, true);
1694                 if (IS_ERR(ctrl->admin_tagset)) {
1695                         error = PTR_ERR(ctrl->admin_tagset);
1696                         goto out_free_queue;
1697                 }
1698
1699                 ctrl->admin_q = blk_mq_init_queue(ctrl->admin_tagset);
1700                 if (IS_ERR(ctrl->admin_q)) {
1701                         error = PTR_ERR(ctrl->admin_q);
1702                         goto out_free_tagset;
1703                 }
1704         }
1705
1706         error = nvme_tcp_start_queue(ctrl, 0);
1707         if (error)
1708                 goto out_cleanup_queue;
1709
1710         error = nvme_enable_ctrl(ctrl);
1711         if (error)
1712                 goto out_stop_queue;
1713
1714         error = nvme_init_identify(ctrl);
1715         if (error)
1716                 goto out_stop_queue;
1717
1718         return 0;
1719
1720 out_stop_queue:
1721         nvme_tcp_stop_queue(ctrl, 0);
1722 out_cleanup_queue:
1723         if (new)
1724                 blk_cleanup_queue(ctrl->admin_q);
1725 out_free_tagset:
1726         if (new)
1727                 blk_mq_free_tag_set(ctrl->admin_tagset);
1728 out_free_queue:
1729         nvme_tcp_free_admin_queue(ctrl);
1730         return error;
1731 }
1732
1733 static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
1734                 bool remove)
1735 {
1736         blk_mq_quiesce_queue(ctrl->admin_q);
1737         nvme_tcp_stop_queue(ctrl, 0);
1738         if (ctrl->admin_tagset) {
1739                 blk_mq_tagset_busy_iter(ctrl->admin_tagset,
1740                         nvme_cancel_request, ctrl);
1741                 blk_mq_tagset_wait_completed_request(ctrl->admin_tagset);
1742         }
1743         blk_mq_unquiesce_queue(ctrl->admin_q);
1744         nvme_tcp_destroy_admin_queue(ctrl, remove);
1745 }
1746
1747 static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
1748                 bool remove)
1749 {
1750         if (ctrl->queue_count <= 1)
1751                 return;
1752         nvme_stop_queues(ctrl);
1753         nvme_tcp_stop_io_queues(ctrl);
1754         if (ctrl->tagset) {
1755                 blk_mq_tagset_busy_iter(ctrl->tagset,
1756                         nvme_cancel_request, ctrl);
1757                 blk_mq_tagset_wait_completed_request(ctrl->tagset);
1758         }
1759         if (remove)
1760                 nvme_start_queues(ctrl);
1761         nvme_tcp_destroy_io_queues(ctrl, remove);
1762 }
1763
1764 static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl)
1765 {
1766         /* If we are resetting/deleting then do nothing */
1767         if (ctrl->state != NVME_CTRL_CONNECTING) {
1768                 WARN_ON_ONCE(ctrl->state == NVME_CTRL_NEW ||
1769                         ctrl->state == NVME_CTRL_LIVE);
1770                 return;
1771         }
1772
1773         if (nvmf_should_reconnect(ctrl)) {
1774                 dev_info(ctrl->device, "Reconnecting in %d seconds...\n",
1775                         ctrl->opts->reconnect_delay);
1776                 queue_delayed_work(nvme_wq, &to_tcp_ctrl(ctrl)->connect_work,
1777                                 ctrl->opts->reconnect_delay * HZ);
1778         } else {
1779                 dev_info(ctrl->device, "Removing controller...\n");
1780                 nvme_delete_ctrl(ctrl);
1781         }
1782 }
1783
1784 static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
1785 {
1786         struct nvmf_ctrl_options *opts = ctrl->opts;
1787         int ret = -EINVAL;
1788
1789         ret = nvme_tcp_configure_admin_queue(ctrl, new);
1790         if (ret)
1791                 return ret;
1792
1793         if (ctrl->icdoff) {
1794                 dev_err(ctrl->device, "icdoff is not supported!\n");
1795                 goto destroy_admin;
1796         }
1797
1798         if (opts->queue_size > ctrl->sqsize + 1)
1799                 dev_warn(ctrl->device,
1800                         "queue_size %zu > ctrl sqsize %u, clamping down\n",
1801                         opts->queue_size, ctrl->sqsize + 1);
1802
1803         if (ctrl->sqsize + 1 > ctrl->maxcmd) {
1804                 dev_warn(ctrl->device,
1805                         "sqsize %u > ctrl maxcmd %u, clamping down\n",
1806                         ctrl->sqsize + 1, ctrl->maxcmd);
1807                 ctrl->sqsize = ctrl->maxcmd - 1;
1808         }
1809
1810         if (ctrl->queue_count > 1) {
1811                 ret = nvme_tcp_configure_io_queues(ctrl, new);
1812                 if (ret)
1813                         goto destroy_admin;
1814         }
1815
1816         if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) {
1817                 /* state change failure is ok if we're in DELETING state */
1818                 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING);
1819                 ret = -EINVAL;
1820                 goto destroy_io;
1821         }
1822
1823         nvme_start_ctrl(ctrl);
1824         return 0;
1825
1826 destroy_io:
1827         if (ctrl->queue_count > 1)
1828                 nvme_tcp_destroy_io_queues(ctrl, new);
1829 destroy_admin:
1830         nvme_tcp_stop_queue(ctrl, 0);
1831         nvme_tcp_destroy_admin_queue(ctrl, new);
1832         return ret;
1833 }
1834
1835 static void nvme_tcp_reconnect_ctrl_work(struct work_struct *work)
1836 {
1837         struct nvme_tcp_ctrl *tcp_ctrl = container_of(to_delayed_work(work),
1838                         struct nvme_tcp_ctrl, connect_work);
1839         struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
1840
1841         ++ctrl->nr_reconnects;
1842
1843         if (nvme_tcp_setup_ctrl(ctrl, false))
1844                 goto requeue;
1845
1846         dev_info(ctrl->device, "Successfully reconnected (%d attempt)\n",
1847                         ctrl->nr_reconnects);
1848
1849         ctrl->nr_reconnects = 0;
1850
1851         return;
1852
1853 requeue:
1854         dev_info(ctrl->device, "Failed reconnect attempt %d\n",
1855                         ctrl->nr_reconnects);
1856         nvme_tcp_reconnect_or_remove(ctrl);
1857 }
1858
1859 static void nvme_tcp_error_recovery_work(struct work_struct *work)
1860 {
1861         struct nvme_tcp_ctrl *tcp_ctrl = container_of(work,
1862                                 struct nvme_tcp_ctrl, err_work);
1863         struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
1864
1865         nvme_stop_keep_alive(ctrl);
1866         nvme_tcp_teardown_io_queues(ctrl, false);
1867         /* unquiesce to fail fast pending requests */
1868         nvme_start_queues(ctrl);
1869         nvme_tcp_teardown_admin_queue(ctrl, false);
1870
1871         if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
1872                 /* state change failure is ok if we're in DELETING state */
1873                 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING);
1874                 return;
1875         }
1876
1877         nvme_tcp_reconnect_or_remove(ctrl);
1878 }
1879
1880 static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
1881 {
1882         cancel_work_sync(&to_tcp_ctrl(ctrl)->err_work);
1883         cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work);
1884
1885         nvme_tcp_teardown_io_queues(ctrl, shutdown);
1886         if (shutdown)
1887                 nvme_shutdown_ctrl(ctrl);
1888         else
1889                 nvme_disable_ctrl(ctrl, ctrl->cap);
1890         nvme_tcp_teardown_admin_queue(ctrl, shutdown);
1891 }
1892
1893 static void nvme_tcp_delete_ctrl(struct nvme_ctrl *ctrl)
1894 {
1895         nvme_tcp_teardown_ctrl(ctrl, true);
1896 }
1897
1898 static void nvme_reset_ctrl_work(struct work_struct *work)
1899 {
1900         struct nvme_ctrl *ctrl =
1901                 container_of(work, struct nvme_ctrl, reset_work);
1902
1903         nvme_stop_ctrl(ctrl);
1904         nvme_tcp_teardown_ctrl(ctrl, false);
1905
1906         if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
1907                 /* state change failure is ok if we're in DELETING state */
1908                 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING);
1909                 return;
1910         }
1911
1912         if (nvme_tcp_setup_ctrl(ctrl, false))
1913                 goto out_fail;
1914
1915         return;
1916
1917 out_fail:
1918         ++ctrl->nr_reconnects;
1919         nvme_tcp_reconnect_or_remove(ctrl);
1920 }
1921
1922 static void nvme_tcp_free_ctrl(struct nvme_ctrl *nctrl)
1923 {
1924         struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1925
1926         if (list_empty(&ctrl->list))
1927                 goto free_ctrl;
1928
1929         mutex_lock(&nvme_tcp_ctrl_mutex);
1930         list_del(&ctrl->list);
1931         mutex_unlock(&nvme_tcp_ctrl_mutex);
1932
1933         nvmf_free_options(nctrl->opts);
1934 free_ctrl:
1935         kfree(ctrl->queues);
1936         kfree(ctrl);
1937 }
1938
1939 static void nvme_tcp_set_sg_null(struct nvme_command *c)
1940 {
1941         struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
1942
1943         sg->addr = 0;
1944         sg->length = 0;
1945         sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
1946                         NVME_SGL_FMT_TRANSPORT_A;
1947 }
1948
1949 static void nvme_tcp_set_sg_inline(struct nvme_tcp_queue *queue,
1950                 struct nvme_command *c, u32 data_len)
1951 {
1952         struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
1953
1954         sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff);
1955         sg->length = cpu_to_le32(data_len);
1956         sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET;
1957 }
1958
1959 static void nvme_tcp_set_sg_host_data(struct nvme_command *c,
1960                 u32 data_len)
1961 {
1962         struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
1963
1964         sg->addr = 0;
1965         sg->length = cpu_to_le32(data_len);
1966         sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
1967                         NVME_SGL_FMT_TRANSPORT_A;
1968 }
1969
1970 static void nvme_tcp_submit_async_event(struct nvme_ctrl *arg)
1971 {
1972         struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(arg);
1973         struct nvme_tcp_queue *queue = &ctrl->queues[0];
1974         struct nvme_tcp_cmd_pdu *pdu = ctrl->async_req.pdu;
1975         struct nvme_command *cmd = &pdu->cmd;
1976         u8 hdgst = nvme_tcp_hdgst_len(queue);
1977
1978         memset(pdu, 0, sizeof(*pdu));
1979         pdu->hdr.type = nvme_tcp_cmd;
1980         if (queue->hdr_digest)
1981                 pdu->hdr.flags |= NVME_TCP_F_HDGST;
1982         pdu->hdr.hlen = sizeof(*pdu);
1983         pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
1984
1985         cmd->common.opcode = nvme_admin_async_event;
1986         cmd->common.command_id = NVME_AQ_BLK_MQ_DEPTH;
1987         cmd->common.flags |= NVME_CMD_SGL_METABUF;
1988         nvme_tcp_set_sg_null(cmd);
1989
1990         ctrl->async_req.state = NVME_TCP_SEND_CMD_PDU;
1991         ctrl->async_req.offset = 0;
1992         ctrl->async_req.curr_bio = NULL;
1993         ctrl->async_req.data_len = 0;
1994
1995         nvme_tcp_queue_request(&ctrl->async_req);
1996 }
1997
1998 static enum blk_eh_timer_return
1999 nvme_tcp_timeout(struct request *rq, bool reserved)
2000 {
2001         struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2002         struct nvme_tcp_ctrl *ctrl = req->queue->ctrl;
2003         struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2004
2005         dev_warn(ctrl->ctrl.device,
2006                 "queue %d: timeout request %#x type %d\n",
2007                 nvme_tcp_queue_id(req->queue), rq->tag, pdu->hdr.type);
2008
2009         if (ctrl->ctrl.state != NVME_CTRL_LIVE) {
2010                 /*
2011                  * Teardown immediately if controller times out while starting
2012                  * or we are already started error recovery. all outstanding
2013                  * requests are completed on shutdown, so we return BLK_EH_DONE.
2014                  */
2015                 flush_work(&ctrl->err_work);
2016                 nvme_tcp_teardown_io_queues(&ctrl->ctrl, false);
2017                 nvme_tcp_teardown_admin_queue(&ctrl->ctrl, false);
2018                 return BLK_EH_DONE;
2019         }
2020
2021         dev_warn(ctrl->ctrl.device, "starting error recovery\n");
2022         nvme_tcp_error_recovery(&ctrl->ctrl);
2023
2024         return BLK_EH_RESET_TIMER;
2025 }
2026
2027 static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue,
2028                         struct request *rq)
2029 {
2030         struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2031         struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2032         struct nvme_command *c = &pdu->cmd;
2033
2034         c->common.flags |= NVME_CMD_SGL_METABUF;
2035
2036         if (rq_data_dir(rq) == WRITE && req->data_len &&
2037             req->data_len <= nvme_tcp_inline_data_size(queue))
2038                 nvme_tcp_set_sg_inline(queue, c, req->data_len);
2039         else
2040                 nvme_tcp_set_sg_host_data(c, req->data_len);
2041
2042         return 0;
2043 }
2044
2045 static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
2046                 struct request *rq)
2047 {
2048         struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2049         struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2050         struct nvme_tcp_queue *queue = req->queue;
2051         u8 hdgst = nvme_tcp_hdgst_len(queue), ddgst = 0;
2052         blk_status_t ret;
2053
2054         ret = nvme_setup_cmd(ns, rq, &pdu->cmd);
2055         if (ret)
2056                 return ret;
2057
2058         req->state = NVME_TCP_SEND_CMD_PDU;
2059         req->offset = 0;
2060         req->data_sent = 0;
2061         req->pdu_len = 0;
2062         req->pdu_sent = 0;
2063         req->data_len = blk_rq_payload_bytes(rq);
2064         req->curr_bio = rq->bio;
2065
2066         if (rq_data_dir(rq) == WRITE &&
2067             req->data_len <= nvme_tcp_inline_data_size(queue))
2068                 req->pdu_len = req->data_len;
2069         else if (req->curr_bio)
2070                 nvme_tcp_init_iter(req, READ);
2071
2072         pdu->hdr.type = nvme_tcp_cmd;
2073         pdu->hdr.flags = 0;
2074         if (queue->hdr_digest)
2075                 pdu->hdr.flags |= NVME_TCP_F_HDGST;
2076         if (queue->data_digest && req->pdu_len) {
2077                 pdu->hdr.flags |= NVME_TCP_F_DDGST;
2078                 ddgst = nvme_tcp_ddgst_len(queue);
2079         }
2080         pdu->hdr.hlen = sizeof(*pdu);
2081         pdu->hdr.pdo = req->pdu_len ? pdu->hdr.hlen + hdgst : 0;
2082         pdu->hdr.plen =
2083                 cpu_to_le32(pdu->hdr.hlen + hdgst + req->pdu_len + ddgst);
2084
2085         ret = nvme_tcp_map_data(queue, rq);
2086         if (unlikely(ret)) {
2087                 dev_err(queue->ctrl->ctrl.device,
2088                         "Failed to map data (%d)\n", ret);
2089                 return ret;
2090         }
2091
2092         return 0;
2093 }
2094
2095 static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx,
2096                 const struct blk_mq_queue_data *bd)
2097 {
2098         struct nvme_ns *ns = hctx->queue->queuedata;
2099         struct nvme_tcp_queue *queue = hctx->driver_data;
2100         struct request *rq = bd->rq;
2101         struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2102         bool queue_ready = test_bit(NVME_TCP_Q_LIVE, &queue->flags);
2103         blk_status_t ret;
2104
2105         if (!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
2106                 return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
2107
2108         ret = nvme_tcp_setup_cmd_pdu(ns, rq);
2109         if (unlikely(ret))
2110                 return ret;
2111
2112         blk_mq_start_request(rq);
2113
2114         nvme_tcp_queue_request(req);
2115
2116         return BLK_STS_OK;
2117 }
2118
2119 static int nvme_tcp_map_queues(struct blk_mq_tag_set *set)
2120 {
2121         struct nvme_tcp_ctrl *ctrl = set->driver_data;
2122         struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2123
2124         if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) {
2125                 /* separate read/write queues */
2126                 set->map[HCTX_TYPE_DEFAULT].nr_queues =
2127                         ctrl->io_queues[HCTX_TYPE_DEFAULT];
2128                 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
2129                 set->map[HCTX_TYPE_READ].nr_queues =
2130                         ctrl->io_queues[HCTX_TYPE_READ];
2131                 set->map[HCTX_TYPE_READ].queue_offset =
2132                         ctrl->io_queues[HCTX_TYPE_DEFAULT];
2133         } else {
2134                 /* shared read/write queues */
2135                 set->map[HCTX_TYPE_DEFAULT].nr_queues =
2136                         ctrl->io_queues[HCTX_TYPE_DEFAULT];
2137                 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
2138                 set->map[HCTX_TYPE_READ].nr_queues =
2139                         ctrl->io_queues[HCTX_TYPE_DEFAULT];
2140                 set->map[HCTX_TYPE_READ].queue_offset = 0;
2141         }
2142         blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
2143         blk_mq_map_queues(&set->map[HCTX_TYPE_READ]);
2144
2145         dev_info(ctrl->ctrl.device,
2146                 "mapped %d/%d default/read queues.\n",
2147                 ctrl->io_queues[HCTX_TYPE_DEFAULT],
2148                 ctrl->io_queues[HCTX_TYPE_READ]);
2149
2150         return 0;
2151 }
2152
2153 static struct blk_mq_ops nvme_tcp_mq_ops = {
2154         .queue_rq       = nvme_tcp_queue_rq,
2155         .complete       = nvme_complete_rq,
2156         .init_request   = nvme_tcp_init_request,
2157         .exit_request   = nvme_tcp_exit_request,
2158         .init_hctx      = nvme_tcp_init_hctx,
2159         .timeout        = nvme_tcp_timeout,
2160         .map_queues     = nvme_tcp_map_queues,
2161 };
2162
2163 static struct blk_mq_ops nvme_tcp_admin_mq_ops = {
2164         .queue_rq       = nvme_tcp_queue_rq,
2165         .complete       = nvme_complete_rq,
2166         .init_request   = nvme_tcp_init_request,
2167         .exit_request   = nvme_tcp_exit_request,
2168         .init_hctx      = nvme_tcp_init_admin_hctx,
2169         .timeout        = nvme_tcp_timeout,
2170 };
2171
2172 static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops = {
2173         .name                   = "tcp",
2174         .module                 = THIS_MODULE,
2175         .flags                  = NVME_F_FABRICS,
2176         .reg_read32             = nvmf_reg_read32,
2177         .reg_read64             = nvmf_reg_read64,
2178         .reg_write32            = nvmf_reg_write32,
2179         .free_ctrl              = nvme_tcp_free_ctrl,
2180         .submit_async_event     = nvme_tcp_submit_async_event,
2181         .delete_ctrl            = nvme_tcp_delete_ctrl,
2182         .get_address            = nvmf_get_address,
2183 };
2184
2185 static bool
2186 nvme_tcp_existing_controller(struct nvmf_ctrl_options *opts)
2187 {
2188         struct nvme_tcp_ctrl *ctrl;
2189         bool found = false;
2190
2191         mutex_lock(&nvme_tcp_ctrl_mutex);
2192         list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list) {
2193                 found = nvmf_ip_options_match(&ctrl->ctrl, opts);
2194                 if (found)
2195                         break;
2196         }
2197         mutex_unlock(&nvme_tcp_ctrl_mutex);
2198
2199         return found;
2200 }
2201
2202 static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
2203                 struct nvmf_ctrl_options *opts)
2204 {
2205         struct nvme_tcp_ctrl *ctrl;
2206         int ret;
2207
2208         ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
2209         if (!ctrl)
2210                 return ERR_PTR(-ENOMEM);
2211
2212         INIT_LIST_HEAD(&ctrl->list);
2213         ctrl->ctrl.opts = opts;
2214         ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues + 1;
2215         ctrl->ctrl.sqsize = opts->queue_size - 1;
2216         ctrl->ctrl.kato = opts->kato;
2217
2218         INIT_DELAYED_WORK(&ctrl->connect_work,
2219                         nvme_tcp_reconnect_ctrl_work);
2220         INIT_WORK(&ctrl->err_work, nvme_tcp_error_recovery_work);
2221         INIT_WORK(&ctrl->ctrl.reset_work, nvme_reset_ctrl_work);
2222
2223         if (!(opts->mask & NVMF_OPT_TRSVCID)) {
2224                 opts->trsvcid =
2225                         kstrdup(__stringify(NVME_TCP_DISC_PORT), GFP_KERNEL);
2226                 if (!opts->trsvcid) {
2227                         ret = -ENOMEM;
2228                         goto out_free_ctrl;
2229                 }
2230                 opts->mask |= NVMF_OPT_TRSVCID;
2231         }
2232
2233         ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
2234                         opts->traddr, opts->trsvcid, &ctrl->addr);
2235         if (ret) {
2236                 pr_err("malformed address passed: %s:%s\n",
2237                         opts->traddr, opts->trsvcid);
2238                 goto out_free_ctrl;
2239         }
2240
2241         if (opts->mask & NVMF_OPT_HOST_TRADDR) {
2242                 ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
2243                         opts->host_traddr, NULL, &ctrl->src_addr);
2244                 if (ret) {
2245                         pr_err("malformed src address passed: %s\n",
2246                                opts->host_traddr);
2247                         goto out_free_ctrl;
2248                 }
2249         }
2250
2251         if (!opts->duplicate_connect && nvme_tcp_existing_controller(opts)) {
2252                 ret = -EALREADY;
2253                 goto out_free_ctrl;
2254         }
2255
2256         ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues),
2257                                 GFP_KERNEL);
2258         if (!ctrl->queues) {
2259                 ret = -ENOMEM;
2260                 goto out_free_ctrl;
2261         }
2262
2263         ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_tcp_ctrl_ops, 0);
2264         if (ret)
2265                 goto out_kfree_queues;
2266
2267         if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
2268                 WARN_ON_ONCE(1);
2269                 ret = -EINTR;
2270                 goto out_uninit_ctrl;
2271         }
2272
2273         ret = nvme_tcp_setup_ctrl(&ctrl->ctrl, true);
2274         if (ret)
2275                 goto out_uninit_ctrl;
2276
2277         dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp\n",
2278                 ctrl->ctrl.opts->subsysnqn, &ctrl->addr);
2279
2280         nvme_get_ctrl(&ctrl->ctrl);
2281
2282         mutex_lock(&nvme_tcp_ctrl_mutex);
2283         list_add_tail(&ctrl->list, &nvme_tcp_ctrl_list);
2284         mutex_unlock(&nvme_tcp_ctrl_mutex);
2285
2286         return &ctrl->ctrl;
2287
2288 out_uninit_ctrl:
2289         nvme_uninit_ctrl(&ctrl->ctrl);
2290         nvme_put_ctrl(&ctrl->ctrl);
2291         if (ret > 0)
2292                 ret = -EIO;
2293         return ERR_PTR(ret);
2294 out_kfree_queues:
2295         kfree(ctrl->queues);
2296 out_free_ctrl:
2297         kfree(ctrl);
2298         return ERR_PTR(ret);
2299 }
2300
2301 static struct nvmf_transport_ops nvme_tcp_transport = {
2302         .name           = "tcp",
2303         .module         = THIS_MODULE,
2304         .required_opts  = NVMF_OPT_TRADDR,
2305         .allowed_opts   = NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY |
2306                           NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO |
2307                           NVMF_OPT_HDR_DIGEST | NVMF_OPT_DATA_DIGEST |
2308                           NVMF_OPT_NR_WRITE_QUEUES,
2309         .create_ctrl    = nvme_tcp_create_ctrl,
2310 };
2311
2312 static int __init nvme_tcp_init_module(void)
2313 {
2314         nvme_tcp_wq = alloc_workqueue("nvme_tcp_wq",
2315                         WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
2316         if (!nvme_tcp_wq)
2317                 return -ENOMEM;
2318
2319         nvmf_register_transport(&nvme_tcp_transport);
2320         return 0;
2321 }
2322
2323 static void __exit nvme_tcp_cleanup_module(void)
2324 {
2325         struct nvme_tcp_ctrl *ctrl;
2326
2327         nvmf_unregister_transport(&nvme_tcp_transport);
2328
2329         mutex_lock(&nvme_tcp_ctrl_mutex);
2330         list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list)
2331                 nvme_delete_ctrl(&ctrl->ctrl);
2332         mutex_unlock(&nvme_tcp_ctrl_mutex);
2333         flush_workqueue(nvme_delete_wq);
2334
2335         destroy_workqueue(nvme_tcp_wq);
2336 }
2337
2338 module_init(nvme_tcp_init_module);
2339 module_exit(nvme_tcp_cleanup_module);
2340
2341 MODULE_LICENSE("GPL v2");