1 // SPDX-License-Identifier: GPL-2.0
3 * NVMe over Fabrics TCP host.
4 * Copyright (c) 2018 Lightbits Labs. All rights reserved.
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/module.h>
8 #include <linux/init.h>
9 #include <linux/slab.h>
10 #include <linux/err.h>
11 #include <linux/nvme-tcp.h>
14 #include <linux/blk-mq.h>
15 #include <crypto/hash.h>
20 struct nvme_tcp_queue;
22 enum nvme_tcp_send_state {
23 NVME_TCP_SEND_CMD_PDU = 0,
24 NVME_TCP_SEND_H2C_PDU,
29 struct nvme_tcp_request {
30 struct nvme_request req;
32 struct nvme_tcp_queue *queue;
37 struct list_head entry;
46 enum nvme_tcp_send_state state;
49 enum nvme_tcp_queue_flags {
50 NVME_TCP_Q_ALLOCATED = 0,
54 enum nvme_tcp_recv_state {
55 NVME_TCP_RECV_PDU = 0,
61 struct nvme_tcp_queue {
63 struct work_struct io_work;
67 struct list_head send_list;
73 size_t data_remaining;
74 size_t ddgst_remaining;
77 struct nvme_tcp_request *request;
80 size_t cmnd_capsule_len;
81 struct nvme_tcp_ctrl *ctrl;
87 struct ahash_request *rcv_hash;
88 struct ahash_request *snd_hash;
92 struct page_frag_cache pf_cache;
94 void (*state_change)(struct sock *);
95 void (*data_ready)(struct sock *);
96 void (*write_space)(struct sock *);
99 struct nvme_tcp_ctrl {
100 /* read only in the hot path */
101 struct nvme_tcp_queue *queues;
102 struct blk_mq_tag_set tag_set;
104 /* other member variables */
105 struct list_head list;
106 struct blk_mq_tag_set admin_tag_set;
107 struct sockaddr_storage addr;
108 struct sockaddr_storage src_addr;
109 struct nvme_ctrl ctrl;
111 struct work_struct err_work;
112 struct delayed_work connect_work;
113 struct nvme_tcp_request async_req;
114 u32 io_queues[HCTX_MAX_TYPES];
117 static LIST_HEAD(nvme_tcp_ctrl_list);
118 static DEFINE_MUTEX(nvme_tcp_ctrl_mutex);
119 static struct workqueue_struct *nvme_tcp_wq;
120 static struct blk_mq_ops nvme_tcp_mq_ops;
121 static struct blk_mq_ops nvme_tcp_admin_mq_ops;
123 static inline struct nvme_tcp_ctrl *to_tcp_ctrl(struct nvme_ctrl *ctrl)
125 return container_of(ctrl, struct nvme_tcp_ctrl, ctrl);
128 static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue)
130 return queue - queue->ctrl->queues;
133 static inline struct blk_mq_tags *nvme_tcp_tagset(struct nvme_tcp_queue *queue)
135 u32 queue_idx = nvme_tcp_queue_id(queue);
138 return queue->ctrl->admin_tag_set.tags[queue_idx];
139 return queue->ctrl->tag_set.tags[queue_idx - 1];
142 static inline u8 nvme_tcp_hdgst_len(struct nvme_tcp_queue *queue)
144 return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
147 static inline u8 nvme_tcp_ddgst_len(struct nvme_tcp_queue *queue)
149 return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
152 static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_queue *queue)
154 return queue->cmnd_capsule_len - sizeof(struct nvme_command);
157 static inline bool nvme_tcp_async_req(struct nvme_tcp_request *req)
159 return req == &req->queue->ctrl->async_req;
162 static inline bool nvme_tcp_has_inline_data(struct nvme_tcp_request *req)
167 if (unlikely(nvme_tcp_async_req(req)))
168 return false; /* async events don't have a request */
170 rq = blk_mq_rq_from_pdu(req);
171 bytes = blk_rq_payload_bytes(rq);
173 return rq_data_dir(rq) == WRITE && bytes &&
174 bytes <= nvme_tcp_inline_data_size(req->queue);
177 static inline struct page *nvme_tcp_req_cur_page(struct nvme_tcp_request *req)
179 return req->iter.bvec->bv_page;
182 static inline size_t nvme_tcp_req_cur_offset(struct nvme_tcp_request *req)
184 return req->iter.bvec->bv_offset + req->iter.iov_offset;
187 static inline size_t nvme_tcp_req_cur_length(struct nvme_tcp_request *req)
189 return min_t(size_t, req->iter.bvec->bv_len - req->iter.iov_offset,
190 req->pdu_len - req->pdu_sent);
193 static inline size_t nvme_tcp_req_offset(struct nvme_tcp_request *req)
195 return req->iter.iov_offset;
198 static inline size_t nvme_tcp_pdu_data_left(struct nvme_tcp_request *req)
200 return rq_data_dir(blk_mq_rq_from_pdu(req)) == WRITE ?
201 req->pdu_len - req->pdu_sent : 0;
204 static inline size_t nvme_tcp_pdu_last_send(struct nvme_tcp_request *req,
207 return nvme_tcp_pdu_data_left(req) <= len;
210 static void nvme_tcp_init_iter(struct nvme_tcp_request *req,
213 struct request *rq = blk_mq_rq_from_pdu(req);
219 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) {
220 vec = &rq->special_vec;
222 size = blk_rq_payload_bytes(rq);
225 struct bio *bio = req->curr_bio;
227 vec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
228 nsegs = bio_segments(bio);
229 size = bio->bi_iter.bi_size;
230 offset = bio->bi_iter.bi_bvec_done;
233 iov_iter_bvec(&req->iter, dir, vec, nsegs, size);
234 req->iter.iov_offset = offset;
237 static inline void nvme_tcp_advance_req(struct nvme_tcp_request *req,
240 req->data_sent += len;
241 req->pdu_sent += len;
242 iov_iter_advance(&req->iter, len);
243 if (!iov_iter_count(&req->iter) &&
244 req->data_sent < req->data_len) {
245 req->curr_bio = req->curr_bio->bi_next;
246 nvme_tcp_init_iter(req, WRITE);
250 static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req)
252 struct nvme_tcp_queue *queue = req->queue;
254 spin_lock(&queue->lock);
255 list_add_tail(&req->entry, &queue->send_list);
256 spin_unlock(&queue->lock);
258 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
261 static inline struct nvme_tcp_request *
262 nvme_tcp_fetch_request(struct nvme_tcp_queue *queue)
264 struct nvme_tcp_request *req;
266 spin_lock(&queue->lock);
267 req = list_first_entry_or_null(&queue->send_list,
268 struct nvme_tcp_request, entry);
270 list_del(&req->entry);
271 spin_unlock(&queue->lock);
276 static inline void nvme_tcp_ddgst_final(struct ahash_request *hash,
279 ahash_request_set_crypt(hash, NULL, (u8 *)dgst, 0);
280 crypto_ahash_final(hash);
283 static inline void nvme_tcp_ddgst_update(struct ahash_request *hash,
284 struct page *page, off_t off, size_t len)
286 struct scatterlist sg;
288 sg_init_marker(&sg, 1);
289 sg_set_page(&sg, page, len, off);
290 ahash_request_set_crypt(hash, &sg, NULL, len);
291 crypto_ahash_update(hash);
294 static inline void nvme_tcp_hdgst(struct ahash_request *hash,
295 void *pdu, size_t len)
297 struct scatterlist sg;
299 sg_init_one(&sg, pdu, len);
300 ahash_request_set_crypt(hash, &sg, pdu + len, len);
301 crypto_ahash_digest(hash);
304 static int nvme_tcp_verify_hdgst(struct nvme_tcp_queue *queue,
305 void *pdu, size_t pdu_len)
307 struct nvme_tcp_hdr *hdr = pdu;
311 if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) {
312 dev_err(queue->ctrl->ctrl.device,
313 "queue %d: header digest flag is cleared\n",
314 nvme_tcp_queue_id(queue));
318 recv_digest = *(__le32 *)(pdu + hdr->hlen);
319 nvme_tcp_hdgst(queue->rcv_hash, pdu, pdu_len);
320 exp_digest = *(__le32 *)(pdu + hdr->hlen);
321 if (recv_digest != exp_digest) {
322 dev_err(queue->ctrl->ctrl.device,
323 "header digest error: recv %#x expected %#x\n",
324 le32_to_cpu(recv_digest), le32_to_cpu(exp_digest));
331 static int nvme_tcp_check_ddgst(struct nvme_tcp_queue *queue, void *pdu)
333 struct nvme_tcp_hdr *hdr = pdu;
334 u8 digest_len = nvme_tcp_hdgst_len(queue);
337 len = le32_to_cpu(hdr->plen) - hdr->hlen -
338 ((hdr->flags & NVME_TCP_F_HDGST) ? digest_len : 0);
340 if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) {
341 dev_err(queue->ctrl->ctrl.device,
342 "queue %d: data digest flag is cleared\n",
343 nvme_tcp_queue_id(queue));
346 crypto_ahash_init(queue->rcv_hash);
351 static void nvme_tcp_exit_request(struct blk_mq_tag_set *set,
352 struct request *rq, unsigned int hctx_idx)
354 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
356 page_frag_free(req->pdu);
359 static int nvme_tcp_init_request(struct blk_mq_tag_set *set,
360 struct request *rq, unsigned int hctx_idx,
361 unsigned int numa_node)
363 struct nvme_tcp_ctrl *ctrl = set->driver_data;
364 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
365 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
366 struct nvme_tcp_queue *queue = &ctrl->queues[queue_idx];
367 u8 hdgst = nvme_tcp_hdgst_len(queue);
369 req->pdu = page_frag_alloc(&queue->pf_cache,
370 sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
371 GFP_KERNEL | __GFP_ZERO);
376 nvme_req(rq)->ctrl = &ctrl->ctrl;
381 static int nvme_tcp_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
382 unsigned int hctx_idx)
384 struct nvme_tcp_ctrl *ctrl = data;
385 struct nvme_tcp_queue *queue = &ctrl->queues[hctx_idx + 1];
387 hctx->driver_data = queue;
391 static int nvme_tcp_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
392 unsigned int hctx_idx)
394 struct nvme_tcp_ctrl *ctrl = data;
395 struct nvme_tcp_queue *queue = &ctrl->queues[0];
397 hctx->driver_data = queue;
401 static enum nvme_tcp_recv_state
402 nvme_tcp_recv_state(struct nvme_tcp_queue *queue)
404 return (queue->pdu_remaining) ? NVME_TCP_RECV_PDU :
405 (queue->ddgst_remaining) ? NVME_TCP_RECV_DDGST :
409 static void nvme_tcp_init_recv_ctx(struct nvme_tcp_queue *queue)
411 queue->pdu_remaining = sizeof(struct nvme_tcp_rsp_pdu) +
412 nvme_tcp_hdgst_len(queue);
413 queue->pdu_offset = 0;
414 queue->data_remaining = -1;
415 queue->ddgst_remaining = 0;
418 static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl)
420 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
423 queue_work(nvme_wq, &to_tcp_ctrl(ctrl)->err_work);
426 static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
427 struct nvme_completion *cqe)
431 rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), cqe->command_id);
433 dev_err(queue->ctrl->ctrl.device,
434 "queue %d tag 0x%x not found\n",
435 nvme_tcp_queue_id(queue), cqe->command_id);
436 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
440 nvme_end_request(rq, cqe->status, cqe->result);
445 static int nvme_tcp_handle_c2h_data(struct nvme_tcp_queue *queue,
446 struct nvme_tcp_data_pdu *pdu)
450 rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
452 dev_err(queue->ctrl->ctrl.device,
453 "queue %d tag %#x not found\n",
454 nvme_tcp_queue_id(queue), pdu->command_id);
458 if (!blk_rq_payload_bytes(rq)) {
459 dev_err(queue->ctrl->ctrl.device,
460 "queue %d tag %#x unexpected data\n",
461 nvme_tcp_queue_id(queue), rq->tag);
465 queue->data_remaining = le32_to_cpu(pdu->data_length);
467 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS &&
468 unlikely(!(pdu->hdr.flags & NVME_TCP_F_DATA_LAST))) {
469 dev_err(queue->ctrl->ctrl.device,
470 "queue %d tag %#x SUCCESS set but not last PDU\n",
471 nvme_tcp_queue_id(queue), rq->tag);
472 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
479 static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue,
480 struct nvme_tcp_rsp_pdu *pdu)
482 struct nvme_completion *cqe = &pdu->cqe;
486 * AEN requests are special as they don't time out and can
487 * survive any kind of queue freeze and often don't respond to
488 * aborts. We don't even bother to allocate a struct request
489 * for them but rather special case them here.
491 if (unlikely(nvme_tcp_queue_id(queue) == 0 &&
492 cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH))
493 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
496 ret = nvme_tcp_process_nvme_cqe(queue, cqe);
501 static int nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req,
502 struct nvme_tcp_r2t_pdu *pdu)
504 struct nvme_tcp_data_pdu *data = req->pdu;
505 struct nvme_tcp_queue *queue = req->queue;
506 struct request *rq = blk_mq_rq_from_pdu(req);
507 u8 hdgst = nvme_tcp_hdgst_len(queue);
508 u8 ddgst = nvme_tcp_ddgst_len(queue);
510 req->pdu_len = le32_to_cpu(pdu->r2t_length);
513 if (unlikely(req->data_sent + req->pdu_len > req->data_len)) {
514 dev_err(queue->ctrl->ctrl.device,
515 "req %d r2t len %u exceeded data len %u (%zu sent)\n",
516 rq->tag, req->pdu_len, req->data_len,
521 if (unlikely(le32_to_cpu(pdu->r2t_offset) < req->data_sent)) {
522 dev_err(queue->ctrl->ctrl.device,
523 "req %d unexpected r2t offset %u (expected %zu)\n",
524 rq->tag, le32_to_cpu(pdu->r2t_offset),
529 memset(data, 0, sizeof(*data));
530 data->hdr.type = nvme_tcp_h2c_data;
531 data->hdr.flags = NVME_TCP_F_DATA_LAST;
532 if (queue->hdr_digest)
533 data->hdr.flags |= NVME_TCP_F_HDGST;
534 if (queue->data_digest)
535 data->hdr.flags |= NVME_TCP_F_DDGST;
536 data->hdr.hlen = sizeof(*data);
537 data->hdr.pdo = data->hdr.hlen + hdgst;
539 cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst);
540 data->ttag = pdu->ttag;
541 data->command_id = rq->tag;
542 data->data_offset = cpu_to_le32(req->data_sent);
543 data->data_length = cpu_to_le32(req->pdu_len);
547 static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
548 struct nvme_tcp_r2t_pdu *pdu)
550 struct nvme_tcp_request *req;
554 rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
556 dev_err(queue->ctrl->ctrl.device,
557 "queue %d tag %#x not found\n",
558 nvme_tcp_queue_id(queue), pdu->command_id);
561 req = blk_mq_rq_to_pdu(rq);
563 ret = nvme_tcp_setup_h2c_data_pdu(req, pdu);
567 req->state = NVME_TCP_SEND_H2C_PDU;
570 nvme_tcp_queue_request(req);
575 static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
576 unsigned int *offset, size_t *len)
578 struct nvme_tcp_hdr *hdr;
579 char *pdu = queue->pdu;
580 size_t rcv_len = min_t(size_t, *len, queue->pdu_remaining);
583 ret = skb_copy_bits(skb, *offset,
584 &pdu[queue->pdu_offset], rcv_len);
588 queue->pdu_remaining -= rcv_len;
589 queue->pdu_offset += rcv_len;
592 if (queue->pdu_remaining)
596 if (queue->hdr_digest) {
597 ret = nvme_tcp_verify_hdgst(queue, queue->pdu, hdr->hlen);
603 if (queue->data_digest) {
604 ret = nvme_tcp_check_ddgst(queue, queue->pdu);
610 case nvme_tcp_c2h_data:
611 return nvme_tcp_handle_c2h_data(queue, (void *)queue->pdu);
613 nvme_tcp_init_recv_ctx(queue);
614 return nvme_tcp_handle_comp(queue, (void *)queue->pdu);
616 nvme_tcp_init_recv_ctx(queue);
617 return nvme_tcp_handle_r2t(queue, (void *)queue->pdu);
619 dev_err(queue->ctrl->ctrl.device,
620 "unsupported pdu type (%d)\n", hdr->type);
625 static inline void nvme_tcp_end_request(struct request *rq, u16 status)
627 union nvme_result res = {};
629 nvme_end_request(rq, cpu_to_le16(status << 1), res);
632 static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
633 unsigned int *offset, size_t *len)
635 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
636 struct nvme_tcp_request *req;
639 rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
641 dev_err(queue->ctrl->ctrl.device,
642 "queue %d tag %#x not found\n",
643 nvme_tcp_queue_id(queue), pdu->command_id);
646 req = blk_mq_rq_to_pdu(rq);
651 recv_len = min_t(size_t, *len, queue->data_remaining);
655 if (!iov_iter_count(&req->iter)) {
656 req->curr_bio = req->curr_bio->bi_next;
659 * If we don`t have any bios it means that controller
660 * sent more data than we requested, hence error
662 if (!req->curr_bio) {
663 dev_err(queue->ctrl->ctrl.device,
664 "queue %d no space in request %#x",
665 nvme_tcp_queue_id(queue), rq->tag);
666 nvme_tcp_init_recv_ctx(queue);
669 nvme_tcp_init_iter(req, READ);
672 /* we can read only from what is left in this bio */
673 recv_len = min_t(size_t, recv_len,
674 iov_iter_count(&req->iter));
676 if (queue->data_digest)
677 ret = skb_copy_and_hash_datagram_iter(skb, *offset,
678 &req->iter, recv_len, queue->rcv_hash);
680 ret = skb_copy_datagram_iter(skb, *offset,
681 &req->iter, recv_len);
683 dev_err(queue->ctrl->ctrl.device,
684 "queue %d failed to copy request %#x data",
685 nvme_tcp_queue_id(queue), rq->tag);
691 queue->data_remaining -= recv_len;
694 if (!queue->data_remaining) {
695 if (queue->data_digest) {
696 nvme_tcp_ddgst_final(queue->rcv_hash, &queue->exp_ddgst);
697 queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH;
699 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS)
700 nvme_tcp_end_request(rq, NVME_SC_SUCCESS);
701 nvme_tcp_init_recv_ctx(queue);
708 static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
709 struct sk_buff *skb, unsigned int *offset, size_t *len)
711 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
712 char *ddgst = (char *)&queue->recv_ddgst;
713 size_t recv_len = min_t(size_t, *len, queue->ddgst_remaining);
714 off_t off = NVME_TCP_DIGEST_LENGTH - queue->ddgst_remaining;
717 ret = skb_copy_bits(skb, *offset, &ddgst[off], recv_len);
721 queue->ddgst_remaining -= recv_len;
724 if (queue->ddgst_remaining)
727 if (queue->recv_ddgst != queue->exp_ddgst) {
728 dev_err(queue->ctrl->ctrl.device,
729 "data digest error: recv %#x expected %#x\n",
730 le32_to_cpu(queue->recv_ddgst),
731 le32_to_cpu(queue->exp_ddgst));
735 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
736 struct request *rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue),
739 nvme_tcp_end_request(rq, NVME_SC_SUCCESS);
742 nvme_tcp_init_recv_ctx(queue);
746 static int nvme_tcp_recv_skb(read_descriptor_t *desc, struct sk_buff *skb,
747 unsigned int offset, size_t len)
749 struct nvme_tcp_queue *queue = desc->arg.data;
750 size_t consumed = len;
754 switch (nvme_tcp_recv_state(queue)) {
755 case NVME_TCP_RECV_PDU:
756 result = nvme_tcp_recv_pdu(queue, skb, &offset, &len);
758 case NVME_TCP_RECV_DATA:
759 result = nvme_tcp_recv_data(queue, skb, &offset, &len);
761 case NVME_TCP_RECV_DDGST:
762 result = nvme_tcp_recv_ddgst(queue, skb, &offset, &len);
768 dev_err(queue->ctrl->ctrl.device,
769 "receive failed: %d\n", result);
770 queue->rd_enabled = false;
771 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
779 static void nvme_tcp_data_ready(struct sock *sk)
781 struct nvme_tcp_queue *queue;
783 read_lock(&sk->sk_callback_lock);
784 queue = sk->sk_user_data;
785 if (likely(queue && queue->rd_enabled))
786 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
787 read_unlock(&sk->sk_callback_lock);
790 static void nvme_tcp_write_space(struct sock *sk)
792 struct nvme_tcp_queue *queue;
794 read_lock_bh(&sk->sk_callback_lock);
795 queue = sk->sk_user_data;
796 if (likely(queue && sk_stream_is_writeable(sk))) {
797 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
798 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
800 read_unlock_bh(&sk->sk_callback_lock);
803 static void nvme_tcp_state_change(struct sock *sk)
805 struct nvme_tcp_queue *queue;
807 read_lock(&sk->sk_callback_lock);
808 queue = sk->sk_user_data;
812 switch (sk->sk_state) {
819 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
822 dev_info(queue->ctrl->ctrl.device,
823 "queue %d socket state %d\n",
824 nvme_tcp_queue_id(queue), sk->sk_state);
827 queue->state_change(sk);
829 read_unlock(&sk->sk_callback_lock);
832 static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue)
834 queue->request = NULL;
837 static void nvme_tcp_fail_request(struct nvme_tcp_request *req)
839 nvme_tcp_end_request(blk_mq_rq_from_pdu(req), NVME_SC_DATA_XFER_ERROR);
842 static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
844 struct nvme_tcp_queue *queue = req->queue;
847 struct page *page = nvme_tcp_req_cur_page(req);
848 size_t offset = nvme_tcp_req_cur_offset(req);
849 size_t len = nvme_tcp_req_cur_length(req);
850 bool last = nvme_tcp_pdu_last_send(req, len);
851 int ret, flags = MSG_DONTWAIT;
853 if (last && !queue->data_digest)
858 /* can't zcopy slab pages */
859 if (unlikely(PageSlab(page))) {
860 ret = sock_no_sendpage(queue->sock, page, offset, len,
863 ret = kernel_sendpage(queue->sock, page, offset, len,
869 nvme_tcp_advance_req(req, ret);
870 if (queue->data_digest)
871 nvme_tcp_ddgst_update(queue->snd_hash, page,
874 /* fully successful last write*/
875 if (last && ret == len) {
876 if (queue->data_digest) {
877 nvme_tcp_ddgst_final(queue->snd_hash,
879 req->state = NVME_TCP_SEND_DDGST;
882 nvme_tcp_done_send_req(queue);
890 static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
892 struct nvme_tcp_queue *queue = req->queue;
893 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
894 bool inline_data = nvme_tcp_has_inline_data(req);
895 int flags = MSG_DONTWAIT | (inline_data ? MSG_MORE : MSG_EOR);
896 u8 hdgst = nvme_tcp_hdgst_len(queue);
897 int len = sizeof(*pdu) + hdgst - req->offset;
900 if (queue->hdr_digest && !req->offset)
901 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
903 ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
904 offset_in_page(pdu) + req->offset, len, flags);
905 if (unlikely(ret <= 0))
911 req->state = NVME_TCP_SEND_DATA;
912 if (queue->data_digest)
913 crypto_ahash_init(queue->snd_hash);
914 nvme_tcp_init_iter(req, WRITE);
916 nvme_tcp_done_send_req(queue);
925 static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
927 struct nvme_tcp_queue *queue = req->queue;
928 struct nvme_tcp_data_pdu *pdu = req->pdu;
929 u8 hdgst = nvme_tcp_hdgst_len(queue);
930 int len = sizeof(*pdu) - req->offset + hdgst;
933 if (queue->hdr_digest && !req->offset)
934 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
936 ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
937 offset_in_page(pdu) + req->offset, len,
938 MSG_DONTWAIT | MSG_MORE);
939 if (unlikely(ret <= 0))
944 req->state = NVME_TCP_SEND_DATA;
945 if (queue->data_digest)
946 crypto_ahash_init(queue->snd_hash);
948 nvme_tcp_init_iter(req, WRITE);
956 static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
958 struct nvme_tcp_queue *queue = req->queue;
960 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_EOR };
962 .iov_base = &req->ddgst + req->offset,
963 .iov_len = NVME_TCP_DIGEST_LENGTH - req->offset
966 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
967 if (unlikely(ret <= 0))
970 if (req->offset + ret == NVME_TCP_DIGEST_LENGTH) {
971 nvme_tcp_done_send_req(queue);
979 static int nvme_tcp_try_send(struct nvme_tcp_queue *queue)
981 struct nvme_tcp_request *req;
984 if (!queue->request) {
985 queue->request = nvme_tcp_fetch_request(queue);
989 req = queue->request;
991 if (req->state == NVME_TCP_SEND_CMD_PDU) {
992 ret = nvme_tcp_try_send_cmd_pdu(req);
995 if (!nvme_tcp_has_inline_data(req))
999 if (req->state == NVME_TCP_SEND_H2C_PDU) {
1000 ret = nvme_tcp_try_send_data_pdu(req);
1005 if (req->state == NVME_TCP_SEND_DATA) {
1006 ret = nvme_tcp_try_send_data(req);
1011 if (req->state == NVME_TCP_SEND_DDGST)
1012 ret = nvme_tcp_try_send_ddgst(req);
1019 static int nvme_tcp_try_recv(struct nvme_tcp_queue *queue)
1021 struct socket *sock = queue->sock;
1022 struct sock *sk = sock->sk;
1023 read_descriptor_t rd_desc;
1026 rd_desc.arg.data = queue;
1029 consumed = sock->ops->read_sock(sk, &rd_desc, nvme_tcp_recv_skb);
1034 static void nvme_tcp_io_work(struct work_struct *w)
1036 struct nvme_tcp_queue *queue =
1037 container_of(w, struct nvme_tcp_queue, io_work);
1038 unsigned long start = jiffies + msecs_to_jiffies(1);
1041 bool pending = false;
1044 result = nvme_tcp_try_send(queue);
1047 } else if (unlikely(result < 0)) {
1048 dev_err(queue->ctrl->ctrl.device,
1049 "failed to send request %d\n", result);
1050 if (result != -EPIPE)
1051 nvme_tcp_fail_request(queue->request);
1052 nvme_tcp_done_send_req(queue);
1056 result = nvme_tcp_try_recv(queue);
1063 } while (time_after(jiffies, start)); /* quota is exhausted */
1065 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
1068 static void nvme_tcp_free_crypto(struct nvme_tcp_queue *queue)
1070 struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash);
1072 ahash_request_free(queue->rcv_hash);
1073 ahash_request_free(queue->snd_hash);
1074 crypto_free_ahash(tfm);
1077 static int nvme_tcp_alloc_crypto(struct nvme_tcp_queue *queue)
1079 struct crypto_ahash *tfm;
1081 tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
1083 return PTR_ERR(tfm);
1085 queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL);
1086 if (!queue->snd_hash)
1088 ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL);
1090 queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL);
1091 if (!queue->rcv_hash)
1093 ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL);
1097 ahash_request_free(queue->snd_hash);
1099 crypto_free_ahash(tfm);
1103 static void nvme_tcp_free_async_req(struct nvme_tcp_ctrl *ctrl)
1105 struct nvme_tcp_request *async = &ctrl->async_req;
1107 page_frag_free(async->pdu);
1110 static int nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl *ctrl)
1112 struct nvme_tcp_queue *queue = &ctrl->queues[0];
1113 struct nvme_tcp_request *async = &ctrl->async_req;
1114 u8 hdgst = nvme_tcp_hdgst_len(queue);
1116 async->pdu = page_frag_alloc(&queue->pf_cache,
1117 sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
1118 GFP_KERNEL | __GFP_ZERO);
1122 async->queue = &ctrl->queues[0];
1126 static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
1128 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1129 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1131 if (!test_and_clear_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
1134 if (queue->hdr_digest || queue->data_digest)
1135 nvme_tcp_free_crypto(queue);
1137 sock_release(queue->sock);
1141 static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
1143 struct nvme_tcp_icreq_pdu *icreq;
1144 struct nvme_tcp_icresp_pdu *icresp;
1145 struct msghdr msg = {};
1147 bool ctrl_hdgst, ctrl_ddgst;
1150 icreq = kzalloc(sizeof(*icreq), GFP_KERNEL);
1154 icresp = kzalloc(sizeof(*icresp), GFP_KERNEL);
1160 icreq->hdr.type = nvme_tcp_icreq;
1161 icreq->hdr.hlen = sizeof(*icreq);
1163 icreq->hdr.plen = cpu_to_le32(icreq->hdr.hlen);
1164 icreq->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
1165 icreq->maxr2t = 0; /* single inflight r2t supported */
1166 icreq->hpda = 0; /* no alignment constraint */
1167 if (queue->hdr_digest)
1168 icreq->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
1169 if (queue->data_digest)
1170 icreq->digest |= NVME_TCP_DATA_DIGEST_ENABLE;
1172 iov.iov_base = icreq;
1173 iov.iov_len = sizeof(*icreq);
1174 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
1178 memset(&msg, 0, sizeof(msg));
1179 iov.iov_base = icresp;
1180 iov.iov_len = sizeof(*icresp);
1181 ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1182 iov.iov_len, msg.msg_flags);
1187 if (icresp->hdr.type != nvme_tcp_icresp) {
1188 pr_err("queue %d: bad type returned %d\n",
1189 nvme_tcp_queue_id(queue), icresp->hdr.type);
1193 if (le32_to_cpu(icresp->hdr.plen) != sizeof(*icresp)) {
1194 pr_err("queue %d: bad pdu length returned %d\n",
1195 nvme_tcp_queue_id(queue), icresp->hdr.plen);
1199 if (icresp->pfv != NVME_TCP_PFV_1_0) {
1200 pr_err("queue %d: bad pfv returned %d\n",
1201 nvme_tcp_queue_id(queue), icresp->pfv);
1205 ctrl_ddgst = !!(icresp->digest & NVME_TCP_DATA_DIGEST_ENABLE);
1206 if ((queue->data_digest && !ctrl_ddgst) ||
1207 (!queue->data_digest && ctrl_ddgst)) {
1208 pr_err("queue %d: data digest mismatch host: %s ctrl: %s\n",
1209 nvme_tcp_queue_id(queue),
1210 queue->data_digest ? "enabled" : "disabled",
1211 ctrl_ddgst ? "enabled" : "disabled");
1215 ctrl_hdgst = !!(icresp->digest & NVME_TCP_HDR_DIGEST_ENABLE);
1216 if ((queue->hdr_digest && !ctrl_hdgst) ||
1217 (!queue->hdr_digest && ctrl_hdgst)) {
1218 pr_err("queue %d: header digest mismatch host: %s ctrl: %s\n",
1219 nvme_tcp_queue_id(queue),
1220 queue->hdr_digest ? "enabled" : "disabled",
1221 ctrl_hdgst ? "enabled" : "disabled");
1225 if (icresp->cpda != 0) {
1226 pr_err("queue %d: unsupported cpda returned %d\n",
1227 nvme_tcp_queue_id(queue), icresp->cpda);
1239 static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
1240 int qid, size_t queue_size)
1242 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1243 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1244 struct linger sol = { .l_onoff = 1, .l_linger = 0 };
1245 int ret, opt, rcv_pdu_size, n;
1248 INIT_LIST_HEAD(&queue->send_list);
1249 spin_lock_init(&queue->lock);
1250 INIT_WORK(&queue->io_work, nvme_tcp_io_work);
1251 queue->queue_size = queue_size;
1254 queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16;
1256 queue->cmnd_capsule_len = sizeof(struct nvme_command) +
1257 NVME_TCP_ADMIN_CCSZ;
1259 ret = sock_create(ctrl->addr.ss_family, SOCK_STREAM,
1260 IPPROTO_TCP, &queue->sock);
1262 dev_err(ctrl->ctrl.device,
1263 "failed to create socket: %d\n", ret);
1267 /* Single syn retry */
1269 ret = kernel_setsockopt(queue->sock, IPPROTO_TCP, TCP_SYNCNT,
1270 (char *)&opt, sizeof(opt));
1272 dev_err(ctrl->ctrl.device,
1273 "failed to set TCP_SYNCNT sock opt %d\n", ret);
1277 /* Set TCP no delay */
1279 ret = kernel_setsockopt(queue->sock, IPPROTO_TCP,
1280 TCP_NODELAY, (char *)&opt, sizeof(opt));
1282 dev_err(ctrl->ctrl.device,
1283 "failed to set TCP_NODELAY sock opt %d\n", ret);
1288 * Cleanup whatever is sitting in the TCP transmit queue on socket
1289 * close. This is done to prevent stale data from being sent should
1290 * the network connection be restored before TCP times out.
1292 ret = kernel_setsockopt(queue->sock, SOL_SOCKET, SO_LINGER,
1293 (char *)&sol, sizeof(sol));
1295 dev_err(ctrl->ctrl.device,
1296 "failed to set SO_LINGER sock opt %d\n", ret);
1300 queue->sock->sk->sk_allocation = GFP_ATOMIC;
1304 n = (qid - 1) % num_online_cpus();
1305 queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
1306 queue->request = NULL;
1307 queue->data_remaining = 0;
1308 queue->ddgst_remaining = 0;
1309 queue->pdu_remaining = 0;
1310 queue->pdu_offset = 0;
1311 sk_set_memalloc(queue->sock->sk);
1313 if (ctrl->ctrl.opts->mask & NVMF_OPT_HOST_TRADDR) {
1314 ret = kernel_bind(queue->sock, (struct sockaddr *)&ctrl->src_addr,
1315 sizeof(ctrl->src_addr));
1317 dev_err(ctrl->ctrl.device,
1318 "failed to bind queue %d socket %d\n",
1324 queue->hdr_digest = nctrl->opts->hdr_digest;
1325 queue->data_digest = nctrl->opts->data_digest;
1326 if (queue->hdr_digest || queue->data_digest) {
1327 ret = nvme_tcp_alloc_crypto(queue);
1329 dev_err(ctrl->ctrl.device,
1330 "failed to allocate queue %d crypto\n", qid);
1335 rcv_pdu_size = sizeof(struct nvme_tcp_rsp_pdu) +
1336 nvme_tcp_hdgst_len(queue);
1337 queue->pdu = kmalloc(rcv_pdu_size, GFP_KERNEL);
1343 dev_dbg(ctrl->ctrl.device, "connecting queue %d\n",
1344 nvme_tcp_queue_id(queue));
1346 ret = kernel_connect(queue->sock, (struct sockaddr *)&ctrl->addr,
1347 sizeof(ctrl->addr), 0);
1349 dev_err(ctrl->ctrl.device,
1350 "failed to connect socket: %d\n", ret);
1354 ret = nvme_tcp_init_connection(queue);
1356 goto err_init_connect;
1358 queue->rd_enabled = true;
1359 set_bit(NVME_TCP_Q_ALLOCATED, &queue->flags);
1360 nvme_tcp_init_recv_ctx(queue);
1362 write_lock_bh(&queue->sock->sk->sk_callback_lock);
1363 queue->sock->sk->sk_user_data = queue;
1364 queue->state_change = queue->sock->sk->sk_state_change;
1365 queue->data_ready = queue->sock->sk->sk_data_ready;
1366 queue->write_space = queue->sock->sk->sk_write_space;
1367 queue->sock->sk->sk_data_ready = nvme_tcp_data_ready;
1368 queue->sock->sk->sk_state_change = nvme_tcp_state_change;
1369 queue->sock->sk->sk_write_space = nvme_tcp_write_space;
1370 write_unlock_bh(&queue->sock->sk->sk_callback_lock);
1375 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1379 if (queue->hdr_digest || queue->data_digest)
1380 nvme_tcp_free_crypto(queue);
1382 sock_release(queue->sock);
1387 static void nvme_tcp_restore_sock_calls(struct nvme_tcp_queue *queue)
1389 struct socket *sock = queue->sock;
1391 write_lock_bh(&sock->sk->sk_callback_lock);
1392 sock->sk->sk_user_data = NULL;
1393 sock->sk->sk_data_ready = queue->data_ready;
1394 sock->sk->sk_state_change = queue->state_change;
1395 sock->sk->sk_write_space = queue->write_space;
1396 write_unlock_bh(&sock->sk->sk_callback_lock);
1399 static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue)
1401 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1402 nvme_tcp_restore_sock_calls(queue);
1403 cancel_work_sync(&queue->io_work);
1406 static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
1408 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1409 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1411 if (!test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
1414 __nvme_tcp_stop_queue(queue);
1417 static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
1419 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1423 ret = nvmf_connect_io_queue(nctrl, idx, false);
1425 ret = nvmf_connect_admin_queue(nctrl);
1428 set_bit(NVME_TCP_Q_LIVE, &ctrl->queues[idx].flags);
1430 if (test_bit(NVME_TCP_Q_ALLOCATED, &ctrl->queues[idx].flags))
1431 __nvme_tcp_stop_queue(&ctrl->queues[idx]);
1432 dev_err(nctrl->device,
1433 "failed to connect queue: %d ret=%d\n", idx, ret);
1438 static struct blk_mq_tag_set *nvme_tcp_alloc_tagset(struct nvme_ctrl *nctrl,
1441 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1442 struct blk_mq_tag_set *set;
1446 set = &ctrl->admin_tag_set;
1447 memset(set, 0, sizeof(*set));
1448 set->ops = &nvme_tcp_admin_mq_ops;
1449 set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
1450 set->reserved_tags = 2; /* connect + keep-alive */
1451 set->numa_node = NUMA_NO_NODE;
1452 set->cmd_size = sizeof(struct nvme_tcp_request);
1453 set->driver_data = ctrl;
1454 set->nr_hw_queues = 1;
1455 set->timeout = ADMIN_TIMEOUT;
1457 set = &ctrl->tag_set;
1458 memset(set, 0, sizeof(*set));
1459 set->ops = &nvme_tcp_mq_ops;
1460 set->queue_depth = nctrl->sqsize + 1;
1461 set->reserved_tags = 1; /* fabric connect */
1462 set->numa_node = NUMA_NO_NODE;
1463 set->flags = BLK_MQ_F_SHOULD_MERGE;
1464 set->cmd_size = sizeof(struct nvme_tcp_request);
1465 set->driver_data = ctrl;
1466 set->nr_hw_queues = nctrl->queue_count - 1;
1467 set->timeout = NVME_IO_TIMEOUT;
1468 set->nr_maps = 2 /* default + read */;
1471 ret = blk_mq_alloc_tag_set(set);
1473 return ERR_PTR(ret);
1478 static void nvme_tcp_free_admin_queue(struct nvme_ctrl *ctrl)
1480 if (to_tcp_ctrl(ctrl)->async_req.pdu) {
1481 nvme_tcp_free_async_req(to_tcp_ctrl(ctrl));
1482 to_tcp_ctrl(ctrl)->async_req.pdu = NULL;
1485 nvme_tcp_free_queue(ctrl, 0);
1488 static void nvme_tcp_free_io_queues(struct nvme_ctrl *ctrl)
1492 for (i = 1; i < ctrl->queue_count; i++)
1493 nvme_tcp_free_queue(ctrl, i);
1496 static void nvme_tcp_stop_io_queues(struct nvme_ctrl *ctrl)
1500 for (i = 1; i < ctrl->queue_count; i++)
1501 nvme_tcp_stop_queue(ctrl, i);
1504 static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl)
1508 for (i = 1; i < ctrl->queue_count; i++) {
1509 ret = nvme_tcp_start_queue(ctrl, i);
1511 goto out_stop_queues;
1517 for (i--; i >= 1; i--)
1518 nvme_tcp_stop_queue(ctrl, i);
1522 static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl)
1526 ret = nvme_tcp_alloc_queue(ctrl, 0, NVME_AQ_DEPTH);
1530 ret = nvme_tcp_alloc_async_req(to_tcp_ctrl(ctrl));
1532 goto out_free_queue;
1537 nvme_tcp_free_queue(ctrl, 0);
1541 static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
1545 for (i = 1; i < ctrl->queue_count; i++) {
1546 ret = nvme_tcp_alloc_queue(ctrl, i,
1549 goto out_free_queues;
1555 for (i--; i >= 1; i--)
1556 nvme_tcp_free_queue(ctrl, i);
1561 static unsigned int nvme_tcp_nr_io_queues(struct nvme_ctrl *ctrl)
1563 unsigned int nr_io_queues;
1565 nr_io_queues = min(ctrl->opts->nr_io_queues, num_online_cpus());
1566 nr_io_queues += min(ctrl->opts->nr_write_queues, num_online_cpus());
1568 return nr_io_queues;
1571 static void nvme_tcp_set_io_queues(struct nvme_ctrl *nctrl,
1572 unsigned int nr_io_queues)
1574 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1575 struct nvmf_ctrl_options *opts = nctrl->opts;
1577 if (opts->nr_write_queues && opts->nr_io_queues < nr_io_queues) {
1579 * separate read/write queues
1580 * hand out dedicated default queues only after we have
1581 * sufficient read queues.
1583 ctrl->io_queues[HCTX_TYPE_READ] = opts->nr_io_queues;
1584 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_READ];
1585 ctrl->io_queues[HCTX_TYPE_DEFAULT] =
1586 min(opts->nr_write_queues, nr_io_queues);
1587 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
1590 * shared read/write queues
1591 * either no write queues were requested, or we don't have
1592 * sufficient queue count to have dedicated default queues.
1594 ctrl->io_queues[HCTX_TYPE_DEFAULT] =
1595 min(opts->nr_io_queues, nr_io_queues);
1596 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
1600 static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
1602 unsigned int nr_io_queues;
1605 nr_io_queues = nvme_tcp_nr_io_queues(ctrl);
1606 ret = nvme_set_queue_count(ctrl, &nr_io_queues);
1610 ctrl->queue_count = nr_io_queues + 1;
1611 if (ctrl->queue_count < 2)
1614 dev_info(ctrl->device,
1615 "creating %d I/O queues.\n", nr_io_queues);
1617 nvme_tcp_set_io_queues(ctrl, nr_io_queues);
1619 return __nvme_tcp_alloc_io_queues(ctrl);
1622 static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove)
1624 nvme_tcp_stop_io_queues(ctrl);
1626 blk_cleanup_queue(ctrl->connect_q);
1627 blk_mq_free_tag_set(ctrl->tagset);
1629 nvme_tcp_free_io_queues(ctrl);
1632 static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
1636 ret = nvme_tcp_alloc_io_queues(ctrl);
1641 ctrl->tagset = nvme_tcp_alloc_tagset(ctrl, false);
1642 if (IS_ERR(ctrl->tagset)) {
1643 ret = PTR_ERR(ctrl->tagset);
1644 goto out_free_io_queues;
1647 ctrl->connect_q = blk_mq_init_queue(ctrl->tagset);
1648 if (IS_ERR(ctrl->connect_q)) {
1649 ret = PTR_ERR(ctrl->connect_q);
1650 goto out_free_tag_set;
1653 blk_mq_update_nr_hw_queues(ctrl->tagset,
1654 ctrl->queue_count - 1);
1657 ret = nvme_tcp_start_io_queues(ctrl);
1659 goto out_cleanup_connect_q;
1663 out_cleanup_connect_q:
1665 blk_cleanup_queue(ctrl->connect_q);
1668 blk_mq_free_tag_set(ctrl->tagset);
1670 nvme_tcp_free_io_queues(ctrl);
1674 static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove)
1676 nvme_tcp_stop_queue(ctrl, 0);
1678 blk_cleanup_queue(ctrl->admin_q);
1679 blk_mq_free_tag_set(ctrl->admin_tagset);
1681 nvme_tcp_free_admin_queue(ctrl);
1684 static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
1688 error = nvme_tcp_alloc_admin_queue(ctrl);
1693 ctrl->admin_tagset = nvme_tcp_alloc_tagset(ctrl, true);
1694 if (IS_ERR(ctrl->admin_tagset)) {
1695 error = PTR_ERR(ctrl->admin_tagset);
1696 goto out_free_queue;
1699 ctrl->admin_q = blk_mq_init_queue(ctrl->admin_tagset);
1700 if (IS_ERR(ctrl->admin_q)) {
1701 error = PTR_ERR(ctrl->admin_q);
1702 goto out_free_tagset;
1706 error = nvme_tcp_start_queue(ctrl, 0);
1708 goto out_cleanup_queue;
1710 error = nvme_enable_ctrl(ctrl);
1712 goto out_stop_queue;
1714 error = nvme_init_identify(ctrl);
1716 goto out_stop_queue;
1721 nvme_tcp_stop_queue(ctrl, 0);
1724 blk_cleanup_queue(ctrl->admin_q);
1727 blk_mq_free_tag_set(ctrl->admin_tagset);
1729 nvme_tcp_free_admin_queue(ctrl);
1733 static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
1736 blk_mq_quiesce_queue(ctrl->admin_q);
1737 nvme_tcp_stop_queue(ctrl, 0);
1738 if (ctrl->admin_tagset) {
1739 blk_mq_tagset_busy_iter(ctrl->admin_tagset,
1740 nvme_cancel_request, ctrl);
1741 blk_mq_tagset_wait_completed_request(ctrl->admin_tagset);
1743 blk_mq_unquiesce_queue(ctrl->admin_q);
1744 nvme_tcp_destroy_admin_queue(ctrl, remove);
1747 static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
1750 if (ctrl->queue_count <= 1)
1752 nvme_stop_queues(ctrl);
1753 nvme_tcp_stop_io_queues(ctrl);
1755 blk_mq_tagset_busy_iter(ctrl->tagset,
1756 nvme_cancel_request, ctrl);
1757 blk_mq_tagset_wait_completed_request(ctrl->tagset);
1760 nvme_start_queues(ctrl);
1761 nvme_tcp_destroy_io_queues(ctrl, remove);
1764 static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl)
1766 /* If we are resetting/deleting then do nothing */
1767 if (ctrl->state != NVME_CTRL_CONNECTING) {
1768 WARN_ON_ONCE(ctrl->state == NVME_CTRL_NEW ||
1769 ctrl->state == NVME_CTRL_LIVE);
1773 if (nvmf_should_reconnect(ctrl)) {
1774 dev_info(ctrl->device, "Reconnecting in %d seconds...\n",
1775 ctrl->opts->reconnect_delay);
1776 queue_delayed_work(nvme_wq, &to_tcp_ctrl(ctrl)->connect_work,
1777 ctrl->opts->reconnect_delay * HZ);
1779 dev_info(ctrl->device, "Removing controller...\n");
1780 nvme_delete_ctrl(ctrl);
1784 static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
1786 struct nvmf_ctrl_options *opts = ctrl->opts;
1789 ret = nvme_tcp_configure_admin_queue(ctrl, new);
1794 dev_err(ctrl->device, "icdoff is not supported!\n");
1798 if (opts->queue_size > ctrl->sqsize + 1)
1799 dev_warn(ctrl->device,
1800 "queue_size %zu > ctrl sqsize %u, clamping down\n",
1801 opts->queue_size, ctrl->sqsize + 1);
1803 if (ctrl->sqsize + 1 > ctrl->maxcmd) {
1804 dev_warn(ctrl->device,
1805 "sqsize %u > ctrl maxcmd %u, clamping down\n",
1806 ctrl->sqsize + 1, ctrl->maxcmd);
1807 ctrl->sqsize = ctrl->maxcmd - 1;
1810 if (ctrl->queue_count > 1) {
1811 ret = nvme_tcp_configure_io_queues(ctrl, new);
1816 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) {
1817 /* state change failure is ok if we're in DELETING state */
1818 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING);
1823 nvme_start_ctrl(ctrl);
1827 if (ctrl->queue_count > 1)
1828 nvme_tcp_destroy_io_queues(ctrl, new);
1830 nvme_tcp_stop_queue(ctrl, 0);
1831 nvme_tcp_destroy_admin_queue(ctrl, new);
1835 static void nvme_tcp_reconnect_ctrl_work(struct work_struct *work)
1837 struct nvme_tcp_ctrl *tcp_ctrl = container_of(to_delayed_work(work),
1838 struct nvme_tcp_ctrl, connect_work);
1839 struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
1841 ++ctrl->nr_reconnects;
1843 if (nvme_tcp_setup_ctrl(ctrl, false))
1846 dev_info(ctrl->device, "Successfully reconnected (%d attempt)\n",
1847 ctrl->nr_reconnects);
1849 ctrl->nr_reconnects = 0;
1854 dev_info(ctrl->device, "Failed reconnect attempt %d\n",
1855 ctrl->nr_reconnects);
1856 nvme_tcp_reconnect_or_remove(ctrl);
1859 static void nvme_tcp_error_recovery_work(struct work_struct *work)
1861 struct nvme_tcp_ctrl *tcp_ctrl = container_of(work,
1862 struct nvme_tcp_ctrl, err_work);
1863 struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
1865 nvme_stop_keep_alive(ctrl);
1866 nvme_tcp_teardown_io_queues(ctrl, false);
1867 /* unquiesce to fail fast pending requests */
1868 nvme_start_queues(ctrl);
1869 nvme_tcp_teardown_admin_queue(ctrl, false);
1871 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
1872 /* state change failure is ok if we're in DELETING state */
1873 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING);
1877 nvme_tcp_reconnect_or_remove(ctrl);
1880 static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
1882 cancel_work_sync(&to_tcp_ctrl(ctrl)->err_work);
1883 cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work);
1885 nvme_tcp_teardown_io_queues(ctrl, shutdown);
1887 nvme_shutdown_ctrl(ctrl);
1889 nvme_disable_ctrl(ctrl, ctrl->cap);
1890 nvme_tcp_teardown_admin_queue(ctrl, shutdown);
1893 static void nvme_tcp_delete_ctrl(struct nvme_ctrl *ctrl)
1895 nvme_tcp_teardown_ctrl(ctrl, true);
1898 static void nvme_reset_ctrl_work(struct work_struct *work)
1900 struct nvme_ctrl *ctrl =
1901 container_of(work, struct nvme_ctrl, reset_work);
1903 nvme_stop_ctrl(ctrl);
1904 nvme_tcp_teardown_ctrl(ctrl, false);
1906 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
1907 /* state change failure is ok if we're in DELETING state */
1908 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING);
1912 if (nvme_tcp_setup_ctrl(ctrl, false))
1918 ++ctrl->nr_reconnects;
1919 nvme_tcp_reconnect_or_remove(ctrl);
1922 static void nvme_tcp_free_ctrl(struct nvme_ctrl *nctrl)
1924 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1926 if (list_empty(&ctrl->list))
1929 mutex_lock(&nvme_tcp_ctrl_mutex);
1930 list_del(&ctrl->list);
1931 mutex_unlock(&nvme_tcp_ctrl_mutex);
1933 nvmf_free_options(nctrl->opts);
1935 kfree(ctrl->queues);
1939 static void nvme_tcp_set_sg_null(struct nvme_command *c)
1941 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
1945 sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
1946 NVME_SGL_FMT_TRANSPORT_A;
1949 static void nvme_tcp_set_sg_inline(struct nvme_tcp_queue *queue,
1950 struct nvme_command *c, u32 data_len)
1952 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
1954 sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff);
1955 sg->length = cpu_to_le32(data_len);
1956 sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET;
1959 static void nvme_tcp_set_sg_host_data(struct nvme_command *c,
1962 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
1965 sg->length = cpu_to_le32(data_len);
1966 sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
1967 NVME_SGL_FMT_TRANSPORT_A;
1970 static void nvme_tcp_submit_async_event(struct nvme_ctrl *arg)
1972 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(arg);
1973 struct nvme_tcp_queue *queue = &ctrl->queues[0];
1974 struct nvme_tcp_cmd_pdu *pdu = ctrl->async_req.pdu;
1975 struct nvme_command *cmd = &pdu->cmd;
1976 u8 hdgst = nvme_tcp_hdgst_len(queue);
1978 memset(pdu, 0, sizeof(*pdu));
1979 pdu->hdr.type = nvme_tcp_cmd;
1980 if (queue->hdr_digest)
1981 pdu->hdr.flags |= NVME_TCP_F_HDGST;
1982 pdu->hdr.hlen = sizeof(*pdu);
1983 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
1985 cmd->common.opcode = nvme_admin_async_event;
1986 cmd->common.command_id = NVME_AQ_BLK_MQ_DEPTH;
1987 cmd->common.flags |= NVME_CMD_SGL_METABUF;
1988 nvme_tcp_set_sg_null(cmd);
1990 ctrl->async_req.state = NVME_TCP_SEND_CMD_PDU;
1991 ctrl->async_req.offset = 0;
1992 ctrl->async_req.curr_bio = NULL;
1993 ctrl->async_req.data_len = 0;
1995 nvme_tcp_queue_request(&ctrl->async_req);
1998 static enum blk_eh_timer_return
1999 nvme_tcp_timeout(struct request *rq, bool reserved)
2001 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2002 struct nvme_tcp_ctrl *ctrl = req->queue->ctrl;
2003 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2005 dev_warn(ctrl->ctrl.device,
2006 "queue %d: timeout request %#x type %d\n",
2007 nvme_tcp_queue_id(req->queue), rq->tag, pdu->hdr.type);
2009 if (ctrl->ctrl.state != NVME_CTRL_LIVE) {
2011 * Teardown immediately if controller times out while starting
2012 * or we are already started error recovery. all outstanding
2013 * requests are completed on shutdown, so we return BLK_EH_DONE.
2015 flush_work(&ctrl->err_work);
2016 nvme_tcp_teardown_io_queues(&ctrl->ctrl, false);
2017 nvme_tcp_teardown_admin_queue(&ctrl->ctrl, false);
2021 dev_warn(ctrl->ctrl.device, "starting error recovery\n");
2022 nvme_tcp_error_recovery(&ctrl->ctrl);
2024 return BLK_EH_RESET_TIMER;
2027 static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue,
2030 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2031 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2032 struct nvme_command *c = &pdu->cmd;
2034 c->common.flags |= NVME_CMD_SGL_METABUF;
2036 if (rq_data_dir(rq) == WRITE && req->data_len &&
2037 req->data_len <= nvme_tcp_inline_data_size(queue))
2038 nvme_tcp_set_sg_inline(queue, c, req->data_len);
2040 nvme_tcp_set_sg_host_data(c, req->data_len);
2045 static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
2048 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2049 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2050 struct nvme_tcp_queue *queue = req->queue;
2051 u8 hdgst = nvme_tcp_hdgst_len(queue), ddgst = 0;
2054 ret = nvme_setup_cmd(ns, rq, &pdu->cmd);
2058 req->state = NVME_TCP_SEND_CMD_PDU;
2063 req->data_len = blk_rq_payload_bytes(rq);
2064 req->curr_bio = rq->bio;
2066 if (rq_data_dir(rq) == WRITE &&
2067 req->data_len <= nvme_tcp_inline_data_size(queue))
2068 req->pdu_len = req->data_len;
2069 else if (req->curr_bio)
2070 nvme_tcp_init_iter(req, READ);
2072 pdu->hdr.type = nvme_tcp_cmd;
2074 if (queue->hdr_digest)
2075 pdu->hdr.flags |= NVME_TCP_F_HDGST;
2076 if (queue->data_digest && req->pdu_len) {
2077 pdu->hdr.flags |= NVME_TCP_F_DDGST;
2078 ddgst = nvme_tcp_ddgst_len(queue);
2080 pdu->hdr.hlen = sizeof(*pdu);
2081 pdu->hdr.pdo = req->pdu_len ? pdu->hdr.hlen + hdgst : 0;
2083 cpu_to_le32(pdu->hdr.hlen + hdgst + req->pdu_len + ddgst);
2085 ret = nvme_tcp_map_data(queue, rq);
2086 if (unlikely(ret)) {
2087 dev_err(queue->ctrl->ctrl.device,
2088 "Failed to map data (%d)\n", ret);
2095 static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx,
2096 const struct blk_mq_queue_data *bd)
2098 struct nvme_ns *ns = hctx->queue->queuedata;
2099 struct nvme_tcp_queue *queue = hctx->driver_data;
2100 struct request *rq = bd->rq;
2101 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2102 bool queue_ready = test_bit(NVME_TCP_Q_LIVE, &queue->flags);
2105 if (!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
2106 return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
2108 ret = nvme_tcp_setup_cmd_pdu(ns, rq);
2112 blk_mq_start_request(rq);
2114 nvme_tcp_queue_request(req);
2119 static int nvme_tcp_map_queues(struct blk_mq_tag_set *set)
2121 struct nvme_tcp_ctrl *ctrl = set->driver_data;
2122 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2124 if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) {
2125 /* separate read/write queues */
2126 set->map[HCTX_TYPE_DEFAULT].nr_queues =
2127 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2128 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
2129 set->map[HCTX_TYPE_READ].nr_queues =
2130 ctrl->io_queues[HCTX_TYPE_READ];
2131 set->map[HCTX_TYPE_READ].queue_offset =
2132 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2134 /* shared read/write queues */
2135 set->map[HCTX_TYPE_DEFAULT].nr_queues =
2136 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2137 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
2138 set->map[HCTX_TYPE_READ].nr_queues =
2139 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2140 set->map[HCTX_TYPE_READ].queue_offset = 0;
2142 blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
2143 blk_mq_map_queues(&set->map[HCTX_TYPE_READ]);
2145 dev_info(ctrl->ctrl.device,
2146 "mapped %d/%d default/read queues.\n",
2147 ctrl->io_queues[HCTX_TYPE_DEFAULT],
2148 ctrl->io_queues[HCTX_TYPE_READ]);
2153 static struct blk_mq_ops nvme_tcp_mq_ops = {
2154 .queue_rq = nvme_tcp_queue_rq,
2155 .complete = nvme_complete_rq,
2156 .init_request = nvme_tcp_init_request,
2157 .exit_request = nvme_tcp_exit_request,
2158 .init_hctx = nvme_tcp_init_hctx,
2159 .timeout = nvme_tcp_timeout,
2160 .map_queues = nvme_tcp_map_queues,
2163 static struct blk_mq_ops nvme_tcp_admin_mq_ops = {
2164 .queue_rq = nvme_tcp_queue_rq,
2165 .complete = nvme_complete_rq,
2166 .init_request = nvme_tcp_init_request,
2167 .exit_request = nvme_tcp_exit_request,
2168 .init_hctx = nvme_tcp_init_admin_hctx,
2169 .timeout = nvme_tcp_timeout,
2172 static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops = {
2174 .module = THIS_MODULE,
2175 .flags = NVME_F_FABRICS,
2176 .reg_read32 = nvmf_reg_read32,
2177 .reg_read64 = nvmf_reg_read64,
2178 .reg_write32 = nvmf_reg_write32,
2179 .free_ctrl = nvme_tcp_free_ctrl,
2180 .submit_async_event = nvme_tcp_submit_async_event,
2181 .delete_ctrl = nvme_tcp_delete_ctrl,
2182 .get_address = nvmf_get_address,
2186 nvme_tcp_existing_controller(struct nvmf_ctrl_options *opts)
2188 struct nvme_tcp_ctrl *ctrl;
2191 mutex_lock(&nvme_tcp_ctrl_mutex);
2192 list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list) {
2193 found = nvmf_ip_options_match(&ctrl->ctrl, opts);
2197 mutex_unlock(&nvme_tcp_ctrl_mutex);
2202 static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
2203 struct nvmf_ctrl_options *opts)
2205 struct nvme_tcp_ctrl *ctrl;
2208 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
2210 return ERR_PTR(-ENOMEM);
2212 INIT_LIST_HEAD(&ctrl->list);
2213 ctrl->ctrl.opts = opts;
2214 ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues + 1;
2215 ctrl->ctrl.sqsize = opts->queue_size - 1;
2216 ctrl->ctrl.kato = opts->kato;
2218 INIT_DELAYED_WORK(&ctrl->connect_work,
2219 nvme_tcp_reconnect_ctrl_work);
2220 INIT_WORK(&ctrl->err_work, nvme_tcp_error_recovery_work);
2221 INIT_WORK(&ctrl->ctrl.reset_work, nvme_reset_ctrl_work);
2223 if (!(opts->mask & NVMF_OPT_TRSVCID)) {
2225 kstrdup(__stringify(NVME_TCP_DISC_PORT), GFP_KERNEL);
2226 if (!opts->trsvcid) {
2230 opts->mask |= NVMF_OPT_TRSVCID;
2233 ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
2234 opts->traddr, opts->trsvcid, &ctrl->addr);
2236 pr_err("malformed address passed: %s:%s\n",
2237 opts->traddr, opts->trsvcid);
2241 if (opts->mask & NVMF_OPT_HOST_TRADDR) {
2242 ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
2243 opts->host_traddr, NULL, &ctrl->src_addr);
2245 pr_err("malformed src address passed: %s\n",
2251 if (!opts->duplicate_connect && nvme_tcp_existing_controller(opts)) {
2256 ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues),
2258 if (!ctrl->queues) {
2263 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_tcp_ctrl_ops, 0);
2265 goto out_kfree_queues;
2267 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
2270 goto out_uninit_ctrl;
2273 ret = nvme_tcp_setup_ctrl(&ctrl->ctrl, true);
2275 goto out_uninit_ctrl;
2277 dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp\n",
2278 ctrl->ctrl.opts->subsysnqn, &ctrl->addr);
2280 nvme_get_ctrl(&ctrl->ctrl);
2282 mutex_lock(&nvme_tcp_ctrl_mutex);
2283 list_add_tail(&ctrl->list, &nvme_tcp_ctrl_list);
2284 mutex_unlock(&nvme_tcp_ctrl_mutex);
2289 nvme_uninit_ctrl(&ctrl->ctrl);
2290 nvme_put_ctrl(&ctrl->ctrl);
2293 return ERR_PTR(ret);
2295 kfree(ctrl->queues);
2298 return ERR_PTR(ret);
2301 static struct nvmf_transport_ops nvme_tcp_transport = {
2303 .module = THIS_MODULE,
2304 .required_opts = NVMF_OPT_TRADDR,
2305 .allowed_opts = NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY |
2306 NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO |
2307 NVMF_OPT_HDR_DIGEST | NVMF_OPT_DATA_DIGEST |
2308 NVMF_OPT_NR_WRITE_QUEUES,
2309 .create_ctrl = nvme_tcp_create_ctrl,
2312 static int __init nvme_tcp_init_module(void)
2314 nvme_tcp_wq = alloc_workqueue("nvme_tcp_wq",
2315 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
2319 nvmf_register_transport(&nvme_tcp_transport);
2323 static void __exit nvme_tcp_cleanup_module(void)
2325 struct nvme_tcp_ctrl *ctrl;
2327 nvmf_unregister_transport(&nvme_tcp_transport);
2329 mutex_lock(&nvme_tcp_ctrl_mutex);
2330 list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list)
2331 nvme_delete_ctrl(&ctrl->ctrl);
2332 mutex_unlock(&nvme_tcp_ctrl_mutex);
2333 flush_workqueue(nvme_delete_wq);
2335 destroy_workqueue(nvme_tcp_wq);
2338 module_init(nvme_tcp_init_module);
2339 module_exit(nvme_tcp_cleanup_module);
2341 MODULE_LICENSE("GPL v2");