1 // SPDX-License-Identifier: GPL-2.0
3 * NVMe over Fabrics TCP host.
4 * Copyright (c) 2018 Lightbits Labs. All rights reserved.
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/module.h>
8 #include <linux/init.h>
9 #include <linux/slab.h>
10 #include <linux/err.h>
11 #include <linux/nvme-tcp.h>
14 #include <linux/blk-mq.h>
15 #include <crypto/hash.h>
20 struct nvme_tcp_queue;
22 enum nvme_tcp_send_state {
23 NVME_TCP_SEND_CMD_PDU = 0,
24 NVME_TCP_SEND_H2C_PDU,
29 struct nvme_tcp_request {
30 struct nvme_request req;
32 struct nvme_tcp_queue *queue;
37 struct list_head entry;
46 enum nvme_tcp_send_state state;
49 enum nvme_tcp_queue_flags {
50 NVME_TCP_Q_ALLOCATED = 0,
54 enum nvme_tcp_recv_state {
55 NVME_TCP_RECV_PDU = 0,
61 struct nvme_tcp_queue {
63 struct work_struct io_work;
67 struct list_head send_list;
73 size_t data_remaining;
74 size_t ddgst_remaining;
77 struct nvme_tcp_request *request;
80 size_t cmnd_capsule_len;
81 struct nvme_tcp_ctrl *ctrl;
87 struct ahash_request *rcv_hash;
88 struct ahash_request *snd_hash;
92 struct page_frag_cache pf_cache;
94 void (*state_change)(struct sock *);
95 void (*data_ready)(struct sock *);
96 void (*write_space)(struct sock *);
99 struct nvme_tcp_ctrl {
100 /* read only in the hot path */
101 struct nvme_tcp_queue *queues;
102 struct blk_mq_tag_set tag_set;
104 /* other member variables */
105 struct list_head list;
106 struct blk_mq_tag_set admin_tag_set;
107 struct sockaddr_storage addr;
108 struct sockaddr_storage src_addr;
109 struct nvme_ctrl ctrl;
111 struct work_struct err_work;
112 struct delayed_work connect_work;
113 struct nvme_tcp_request async_req;
114 u32 io_queues[HCTX_MAX_TYPES];
117 static LIST_HEAD(nvme_tcp_ctrl_list);
118 static DEFINE_MUTEX(nvme_tcp_ctrl_mutex);
119 static struct workqueue_struct *nvme_tcp_wq;
120 static struct blk_mq_ops nvme_tcp_mq_ops;
121 static struct blk_mq_ops nvme_tcp_admin_mq_ops;
123 static inline struct nvme_tcp_ctrl *to_tcp_ctrl(struct nvme_ctrl *ctrl)
125 return container_of(ctrl, struct nvme_tcp_ctrl, ctrl);
128 static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue)
130 return queue - queue->ctrl->queues;
133 static inline struct blk_mq_tags *nvme_tcp_tagset(struct nvme_tcp_queue *queue)
135 u32 queue_idx = nvme_tcp_queue_id(queue);
138 return queue->ctrl->admin_tag_set.tags[queue_idx];
139 return queue->ctrl->tag_set.tags[queue_idx - 1];
142 static inline u8 nvme_tcp_hdgst_len(struct nvme_tcp_queue *queue)
144 return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
147 static inline u8 nvme_tcp_ddgst_len(struct nvme_tcp_queue *queue)
149 return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
152 static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_queue *queue)
154 return queue->cmnd_capsule_len - sizeof(struct nvme_command);
157 static inline bool nvme_tcp_async_req(struct nvme_tcp_request *req)
159 return req == &req->queue->ctrl->async_req;
162 static inline bool nvme_tcp_has_inline_data(struct nvme_tcp_request *req)
167 if (unlikely(nvme_tcp_async_req(req)))
168 return false; /* async events don't have a request */
170 rq = blk_mq_rq_from_pdu(req);
171 bytes = blk_rq_payload_bytes(rq);
173 return rq_data_dir(rq) == WRITE && bytes &&
174 bytes <= nvme_tcp_inline_data_size(req->queue);
177 static inline struct page *nvme_tcp_req_cur_page(struct nvme_tcp_request *req)
179 return req->iter.bvec->bv_page;
182 static inline size_t nvme_tcp_req_cur_offset(struct nvme_tcp_request *req)
184 return req->iter.bvec->bv_offset + req->iter.iov_offset;
187 static inline size_t nvme_tcp_req_cur_length(struct nvme_tcp_request *req)
189 return min_t(size_t, req->iter.bvec->bv_len - req->iter.iov_offset,
190 req->pdu_len - req->pdu_sent);
193 static inline size_t nvme_tcp_req_offset(struct nvme_tcp_request *req)
195 return req->iter.iov_offset;
198 static inline size_t nvme_tcp_pdu_data_left(struct nvme_tcp_request *req)
200 return rq_data_dir(blk_mq_rq_from_pdu(req)) == WRITE ?
201 req->pdu_len - req->pdu_sent : 0;
204 static inline size_t nvme_tcp_pdu_last_send(struct nvme_tcp_request *req,
207 return nvme_tcp_pdu_data_left(req) <= len;
210 static void nvme_tcp_init_iter(struct nvme_tcp_request *req,
213 struct request *rq = blk_mq_rq_from_pdu(req);
219 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) {
220 vec = &rq->special_vec;
222 size = blk_rq_payload_bytes(rq);
225 struct bio *bio = req->curr_bio;
227 vec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
228 nsegs = bio_segments(bio);
229 size = bio->bi_iter.bi_size;
230 offset = bio->bi_iter.bi_bvec_done;
233 iov_iter_bvec(&req->iter, dir, vec, nsegs, size);
234 req->iter.iov_offset = offset;
237 static inline void nvme_tcp_advance_req(struct nvme_tcp_request *req,
240 req->data_sent += len;
241 req->pdu_sent += len;
242 iov_iter_advance(&req->iter, len);
243 if (!iov_iter_count(&req->iter) &&
244 req->data_sent < req->data_len) {
245 req->curr_bio = req->curr_bio->bi_next;
246 nvme_tcp_init_iter(req, WRITE);
250 static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req)
252 struct nvme_tcp_queue *queue = req->queue;
254 spin_lock(&queue->lock);
255 list_add_tail(&req->entry, &queue->send_list);
256 spin_unlock(&queue->lock);
258 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
261 static inline struct nvme_tcp_request *
262 nvme_tcp_fetch_request(struct nvme_tcp_queue *queue)
264 struct nvme_tcp_request *req;
266 spin_lock(&queue->lock);
267 req = list_first_entry_or_null(&queue->send_list,
268 struct nvme_tcp_request, entry);
270 list_del(&req->entry);
271 spin_unlock(&queue->lock);
276 static inline void nvme_tcp_ddgst_final(struct ahash_request *hash,
279 ahash_request_set_crypt(hash, NULL, (u8 *)dgst, 0);
280 crypto_ahash_final(hash);
283 static inline void nvme_tcp_ddgst_update(struct ahash_request *hash,
284 struct page *page, off_t off, size_t len)
286 struct scatterlist sg;
288 sg_init_marker(&sg, 1);
289 sg_set_page(&sg, page, len, off);
290 ahash_request_set_crypt(hash, &sg, NULL, len);
291 crypto_ahash_update(hash);
294 static inline void nvme_tcp_hdgst(struct ahash_request *hash,
295 void *pdu, size_t len)
297 struct scatterlist sg;
299 sg_init_one(&sg, pdu, len);
300 ahash_request_set_crypt(hash, &sg, pdu + len, len);
301 crypto_ahash_digest(hash);
304 static int nvme_tcp_verify_hdgst(struct nvme_tcp_queue *queue,
305 void *pdu, size_t pdu_len)
307 struct nvme_tcp_hdr *hdr = pdu;
311 if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) {
312 dev_err(queue->ctrl->ctrl.device,
313 "queue %d: header digest flag is cleared\n",
314 nvme_tcp_queue_id(queue));
318 recv_digest = *(__le32 *)(pdu + hdr->hlen);
319 nvme_tcp_hdgst(queue->rcv_hash, pdu, pdu_len);
320 exp_digest = *(__le32 *)(pdu + hdr->hlen);
321 if (recv_digest != exp_digest) {
322 dev_err(queue->ctrl->ctrl.device,
323 "header digest error: recv %#x expected %#x\n",
324 le32_to_cpu(recv_digest), le32_to_cpu(exp_digest));
331 static int nvme_tcp_check_ddgst(struct nvme_tcp_queue *queue, void *pdu)
333 struct nvme_tcp_hdr *hdr = pdu;
334 u8 digest_len = nvme_tcp_hdgst_len(queue);
337 len = le32_to_cpu(hdr->plen) - hdr->hlen -
338 ((hdr->flags & NVME_TCP_F_HDGST) ? digest_len : 0);
340 if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) {
341 dev_err(queue->ctrl->ctrl.device,
342 "queue %d: data digest flag is cleared\n",
343 nvme_tcp_queue_id(queue));
346 crypto_ahash_init(queue->rcv_hash);
351 static void nvme_tcp_exit_request(struct blk_mq_tag_set *set,
352 struct request *rq, unsigned int hctx_idx)
354 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
356 page_frag_free(req->pdu);
359 static int nvme_tcp_init_request(struct blk_mq_tag_set *set,
360 struct request *rq, unsigned int hctx_idx,
361 unsigned int numa_node)
363 struct nvme_tcp_ctrl *ctrl = set->driver_data;
364 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
365 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
366 struct nvme_tcp_queue *queue = &ctrl->queues[queue_idx];
367 u8 hdgst = nvme_tcp_hdgst_len(queue);
369 req->pdu = page_frag_alloc(&queue->pf_cache,
370 sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
371 GFP_KERNEL | __GFP_ZERO);
376 nvme_req(rq)->ctrl = &ctrl->ctrl;
381 static int nvme_tcp_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
382 unsigned int hctx_idx)
384 struct nvme_tcp_ctrl *ctrl = data;
385 struct nvme_tcp_queue *queue = &ctrl->queues[hctx_idx + 1];
387 hctx->driver_data = queue;
391 static int nvme_tcp_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
392 unsigned int hctx_idx)
394 struct nvme_tcp_ctrl *ctrl = data;
395 struct nvme_tcp_queue *queue = &ctrl->queues[0];
397 hctx->driver_data = queue;
401 static enum nvme_tcp_recv_state
402 nvme_tcp_recv_state(struct nvme_tcp_queue *queue)
404 return (queue->pdu_remaining) ? NVME_TCP_RECV_PDU :
405 (queue->ddgst_remaining) ? NVME_TCP_RECV_DDGST :
409 static void nvme_tcp_init_recv_ctx(struct nvme_tcp_queue *queue)
411 queue->pdu_remaining = sizeof(struct nvme_tcp_rsp_pdu) +
412 nvme_tcp_hdgst_len(queue);
413 queue->pdu_offset = 0;
414 queue->data_remaining = -1;
415 queue->ddgst_remaining = 0;
418 static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl)
420 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
423 queue_work(nvme_wq, &to_tcp_ctrl(ctrl)->err_work);
426 static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
427 struct nvme_completion *cqe)
431 rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), cqe->command_id);
433 dev_err(queue->ctrl->ctrl.device,
434 "queue %d tag 0x%x not found\n",
435 nvme_tcp_queue_id(queue), cqe->command_id);
436 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
440 nvme_end_request(rq, cqe->status, cqe->result);
445 static int nvme_tcp_handle_c2h_data(struct nvme_tcp_queue *queue,
446 struct nvme_tcp_data_pdu *pdu)
450 rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
452 dev_err(queue->ctrl->ctrl.device,
453 "queue %d tag %#x not found\n",
454 nvme_tcp_queue_id(queue), pdu->command_id);
458 if (!blk_rq_payload_bytes(rq)) {
459 dev_err(queue->ctrl->ctrl.device,
460 "queue %d tag %#x unexpected data\n",
461 nvme_tcp_queue_id(queue), rq->tag);
465 queue->data_remaining = le32_to_cpu(pdu->data_length);
467 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS &&
468 unlikely(!(pdu->hdr.flags & NVME_TCP_F_DATA_LAST))) {
469 dev_err(queue->ctrl->ctrl.device,
470 "queue %d tag %#x SUCCESS set but not last PDU\n",
471 nvme_tcp_queue_id(queue), rq->tag);
472 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
479 static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue,
480 struct nvme_tcp_rsp_pdu *pdu)
482 struct nvme_completion *cqe = &pdu->cqe;
486 * AEN requests are special as they don't time out and can
487 * survive any kind of queue freeze and often don't respond to
488 * aborts. We don't even bother to allocate a struct request
489 * for them but rather special case them here.
491 if (unlikely(nvme_tcp_queue_id(queue) == 0 &&
492 cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH))
493 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
496 ret = nvme_tcp_process_nvme_cqe(queue, cqe);
501 static int nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req,
502 struct nvme_tcp_r2t_pdu *pdu)
504 struct nvme_tcp_data_pdu *data = req->pdu;
505 struct nvme_tcp_queue *queue = req->queue;
506 struct request *rq = blk_mq_rq_from_pdu(req);
507 u8 hdgst = nvme_tcp_hdgst_len(queue);
508 u8 ddgst = nvme_tcp_ddgst_len(queue);
510 req->pdu_len = le32_to_cpu(pdu->r2t_length);
513 if (unlikely(req->data_sent + req->pdu_len > req->data_len)) {
514 dev_err(queue->ctrl->ctrl.device,
515 "req %d r2t len %u exceeded data len %u (%zu sent)\n",
516 rq->tag, req->pdu_len, req->data_len,
521 if (unlikely(le32_to_cpu(pdu->r2t_offset) < req->data_sent)) {
522 dev_err(queue->ctrl->ctrl.device,
523 "req %d unexpected r2t offset %u (expected %zu)\n",
524 rq->tag, le32_to_cpu(pdu->r2t_offset),
529 memset(data, 0, sizeof(*data));
530 data->hdr.type = nvme_tcp_h2c_data;
531 data->hdr.flags = NVME_TCP_F_DATA_LAST;
532 if (queue->hdr_digest)
533 data->hdr.flags |= NVME_TCP_F_HDGST;
534 if (queue->data_digest)
535 data->hdr.flags |= NVME_TCP_F_DDGST;
536 data->hdr.hlen = sizeof(*data);
537 data->hdr.pdo = data->hdr.hlen + hdgst;
539 cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst);
540 data->ttag = pdu->ttag;
541 data->command_id = rq->tag;
542 data->data_offset = cpu_to_le32(req->data_sent);
543 data->data_length = cpu_to_le32(req->pdu_len);
547 static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
548 struct nvme_tcp_r2t_pdu *pdu)
550 struct nvme_tcp_request *req;
554 rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
556 dev_err(queue->ctrl->ctrl.device,
557 "queue %d tag %#x not found\n",
558 nvme_tcp_queue_id(queue), pdu->command_id);
561 req = blk_mq_rq_to_pdu(rq);
563 ret = nvme_tcp_setup_h2c_data_pdu(req, pdu);
567 req->state = NVME_TCP_SEND_H2C_PDU;
570 nvme_tcp_queue_request(req);
575 static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
576 unsigned int *offset, size_t *len)
578 struct nvme_tcp_hdr *hdr;
579 char *pdu = queue->pdu;
580 size_t rcv_len = min_t(size_t, *len, queue->pdu_remaining);
583 ret = skb_copy_bits(skb, *offset,
584 &pdu[queue->pdu_offset], rcv_len);
588 queue->pdu_remaining -= rcv_len;
589 queue->pdu_offset += rcv_len;
592 if (queue->pdu_remaining)
596 if (queue->hdr_digest) {
597 ret = nvme_tcp_verify_hdgst(queue, queue->pdu, hdr->hlen);
603 if (queue->data_digest) {
604 ret = nvme_tcp_check_ddgst(queue, queue->pdu);
610 case nvme_tcp_c2h_data:
611 ret = nvme_tcp_handle_c2h_data(queue, (void *)queue->pdu);
614 nvme_tcp_init_recv_ctx(queue);
615 ret = nvme_tcp_handle_comp(queue, (void *)queue->pdu);
618 nvme_tcp_init_recv_ctx(queue);
619 ret = nvme_tcp_handle_r2t(queue, (void *)queue->pdu);
622 dev_err(queue->ctrl->ctrl.device,
623 "unsupported pdu type (%d)\n", hdr->type);
630 static inline void nvme_tcp_end_request(struct request *rq, u16 status)
632 union nvme_result res = {};
634 nvme_end_request(rq, cpu_to_le16(status << 1), res);
637 static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
638 unsigned int *offset, size_t *len)
640 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
641 struct nvme_tcp_request *req;
644 rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
646 dev_err(queue->ctrl->ctrl.device,
647 "queue %d tag %#x not found\n",
648 nvme_tcp_queue_id(queue), pdu->command_id);
651 req = blk_mq_rq_to_pdu(rq);
656 recv_len = min_t(size_t, *len, queue->data_remaining);
660 if (!iov_iter_count(&req->iter)) {
661 req->curr_bio = req->curr_bio->bi_next;
664 * If we don`t have any bios it means that controller
665 * sent more data than we requested, hence error
667 if (!req->curr_bio) {
668 dev_err(queue->ctrl->ctrl.device,
669 "queue %d no space in request %#x",
670 nvme_tcp_queue_id(queue), rq->tag);
671 nvme_tcp_init_recv_ctx(queue);
674 nvme_tcp_init_iter(req, READ);
677 /* we can read only from what is left in this bio */
678 recv_len = min_t(size_t, recv_len,
679 iov_iter_count(&req->iter));
681 if (queue->data_digest)
682 ret = skb_copy_and_hash_datagram_iter(skb, *offset,
683 &req->iter, recv_len, queue->rcv_hash);
685 ret = skb_copy_datagram_iter(skb, *offset,
686 &req->iter, recv_len);
688 dev_err(queue->ctrl->ctrl.device,
689 "queue %d failed to copy request %#x data",
690 nvme_tcp_queue_id(queue), rq->tag);
696 queue->data_remaining -= recv_len;
699 if (!queue->data_remaining) {
700 if (queue->data_digest) {
701 nvme_tcp_ddgst_final(queue->rcv_hash, &queue->exp_ddgst);
702 queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH;
704 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS)
705 nvme_tcp_end_request(rq, NVME_SC_SUCCESS);
706 nvme_tcp_init_recv_ctx(queue);
713 static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
714 struct sk_buff *skb, unsigned int *offset, size_t *len)
716 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
717 char *ddgst = (char *)&queue->recv_ddgst;
718 size_t recv_len = min_t(size_t, *len, queue->ddgst_remaining);
719 off_t off = NVME_TCP_DIGEST_LENGTH - queue->ddgst_remaining;
722 ret = skb_copy_bits(skb, *offset, &ddgst[off], recv_len);
726 queue->ddgst_remaining -= recv_len;
729 if (queue->ddgst_remaining)
732 if (queue->recv_ddgst != queue->exp_ddgst) {
733 dev_err(queue->ctrl->ctrl.device,
734 "data digest error: recv %#x expected %#x\n",
735 le32_to_cpu(queue->recv_ddgst),
736 le32_to_cpu(queue->exp_ddgst));
740 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
741 struct request *rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue),
744 nvme_tcp_end_request(rq, NVME_SC_SUCCESS);
747 nvme_tcp_init_recv_ctx(queue);
751 static int nvme_tcp_recv_skb(read_descriptor_t *desc, struct sk_buff *skb,
752 unsigned int offset, size_t len)
754 struct nvme_tcp_queue *queue = desc->arg.data;
755 size_t consumed = len;
759 switch (nvme_tcp_recv_state(queue)) {
760 case NVME_TCP_RECV_PDU:
761 result = nvme_tcp_recv_pdu(queue, skb, &offset, &len);
763 case NVME_TCP_RECV_DATA:
764 result = nvme_tcp_recv_data(queue, skb, &offset, &len);
766 case NVME_TCP_RECV_DDGST:
767 result = nvme_tcp_recv_ddgst(queue, skb, &offset, &len);
773 dev_err(queue->ctrl->ctrl.device,
774 "receive failed: %d\n", result);
775 queue->rd_enabled = false;
776 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
784 static void nvme_tcp_data_ready(struct sock *sk)
786 struct nvme_tcp_queue *queue;
788 read_lock(&sk->sk_callback_lock);
789 queue = sk->sk_user_data;
790 if (likely(queue && queue->rd_enabled))
791 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
792 read_unlock(&sk->sk_callback_lock);
795 static void nvme_tcp_write_space(struct sock *sk)
797 struct nvme_tcp_queue *queue;
799 read_lock_bh(&sk->sk_callback_lock);
800 queue = sk->sk_user_data;
801 if (likely(queue && sk_stream_is_writeable(sk))) {
802 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
803 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
805 read_unlock_bh(&sk->sk_callback_lock);
808 static void nvme_tcp_state_change(struct sock *sk)
810 struct nvme_tcp_queue *queue;
812 read_lock(&sk->sk_callback_lock);
813 queue = sk->sk_user_data;
817 switch (sk->sk_state) {
824 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
827 dev_info(queue->ctrl->ctrl.device,
828 "queue %d socket state %d\n",
829 nvme_tcp_queue_id(queue), sk->sk_state);
832 queue->state_change(sk);
834 read_unlock(&sk->sk_callback_lock);
837 static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue)
839 queue->request = NULL;
842 static void nvme_tcp_fail_request(struct nvme_tcp_request *req)
844 nvme_tcp_end_request(blk_mq_rq_from_pdu(req), NVME_SC_DATA_XFER_ERROR);
847 static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
849 struct nvme_tcp_queue *queue = req->queue;
852 struct page *page = nvme_tcp_req_cur_page(req);
853 size_t offset = nvme_tcp_req_cur_offset(req);
854 size_t len = nvme_tcp_req_cur_length(req);
855 bool last = nvme_tcp_pdu_last_send(req, len);
856 int ret, flags = MSG_DONTWAIT;
858 if (last && !queue->data_digest)
863 ret = kernel_sendpage(queue->sock, page, offset, len, flags);
867 nvme_tcp_advance_req(req, ret);
868 if (queue->data_digest)
869 nvme_tcp_ddgst_update(queue->snd_hash, page,
872 /* fully successful last write*/
873 if (last && ret == len) {
874 if (queue->data_digest) {
875 nvme_tcp_ddgst_final(queue->snd_hash,
877 req->state = NVME_TCP_SEND_DDGST;
880 nvme_tcp_done_send_req(queue);
888 static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
890 struct nvme_tcp_queue *queue = req->queue;
891 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
892 bool inline_data = nvme_tcp_has_inline_data(req);
893 int flags = MSG_DONTWAIT | (inline_data ? MSG_MORE : MSG_EOR);
894 u8 hdgst = nvme_tcp_hdgst_len(queue);
895 int len = sizeof(*pdu) + hdgst - req->offset;
898 if (queue->hdr_digest && !req->offset)
899 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
901 ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
902 offset_in_page(pdu) + req->offset, len, flags);
903 if (unlikely(ret <= 0))
909 req->state = NVME_TCP_SEND_DATA;
910 if (queue->data_digest)
911 crypto_ahash_init(queue->snd_hash);
912 nvme_tcp_init_iter(req, WRITE);
914 nvme_tcp_done_send_req(queue);
923 static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
925 struct nvme_tcp_queue *queue = req->queue;
926 struct nvme_tcp_data_pdu *pdu = req->pdu;
927 u8 hdgst = nvme_tcp_hdgst_len(queue);
928 int len = sizeof(*pdu) - req->offset + hdgst;
931 if (queue->hdr_digest && !req->offset)
932 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
934 ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
935 offset_in_page(pdu) + req->offset, len,
936 MSG_DONTWAIT | MSG_MORE);
937 if (unlikely(ret <= 0))
942 req->state = NVME_TCP_SEND_DATA;
943 if (queue->data_digest)
944 crypto_ahash_init(queue->snd_hash);
946 nvme_tcp_init_iter(req, WRITE);
954 static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
956 struct nvme_tcp_queue *queue = req->queue;
958 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_EOR };
960 .iov_base = &req->ddgst + req->offset,
961 .iov_len = NVME_TCP_DIGEST_LENGTH - req->offset
964 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
965 if (unlikely(ret <= 0))
968 if (req->offset + ret == NVME_TCP_DIGEST_LENGTH) {
969 nvme_tcp_done_send_req(queue);
977 static int nvme_tcp_try_send(struct nvme_tcp_queue *queue)
979 struct nvme_tcp_request *req;
982 if (!queue->request) {
983 queue->request = nvme_tcp_fetch_request(queue);
987 req = queue->request;
989 if (req->state == NVME_TCP_SEND_CMD_PDU) {
990 ret = nvme_tcp_try_send_cmd_pdu(req);
993 if (!nvme_tcp_has_inline_data(req))
997 if (req->state == NVME_TCP_SEND_H2C_PDU) {
998 ret = nvme_tcp_try_send_data_pdu(req);
1003 if (req->state == NVME_TCP_SEND_DATA) {
1004 ret = nvme_tcp_try_send_data(req);
1009 if (req->state == NVME_TCP_SEND_DDGST)
1010 ret = nvme_tcp_try_send_ddgst(req);
1017 static int nvme_tcp_try_recv(struct nvme_tcp_queue *queue)
1019 struct sock *sk = queue->sock->sk;
1020 read_descriptor_t rd_desc;
1023 rd_desc.arg.data = queue;
1026 consumed = tcp_read_sock(sk, &rd_desc, nvme_tcp_recv_skb);
1031 static void nvme_tcp_io_work(struct work_struct *w)
1033 struct nvme_tcp_queue *queue =
1034 container_of(w, struct nvme_tcp_queue, io_work);
1035 unsigned long start = jiffies + msecs_to_jiffies(1);
1038 bool pending = false;
1041 result = nvme_tcp_try_send(queue);
1044 } else if (unlikely(result < 0)) {
1045 dev_err(queue->ctrl->ctrl.device,
1046 "failed to send request %d\n", result);
1047 if (result != -EPIPE)
1048 nvme_tcp_fail_request(queue->request);
1049 nvme_tcp_done_send_req(queue);
1053 result = nvme_tcp_try_recv(queue);
1060 } while (time_after(jiffies, start)); /* quota is exhausted */
1062 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
1065 static void nvme_tcp_free_crypto(struct nvme_tcp_queue *queue)
1067 struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash);
1069 ahash_request_free(queue->rcv_hash);
1070 ahash_request_free(queue->snd_hash);
1071 crypto_free_ahash(tfm);
1074 static int nvme_tcp_alloc_crypto(struct nvme_tcp_queue *queue)
1076 struct crypto_ahash *tfm;
1078 tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
1080 return PTR_ERR(tfm);
1082 queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL);
1083 if (!queue->snd_hash)
1085 ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL);
1087 queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL);
1088 if (!queue->rcv_hash)
1090 ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL);
1094 ahash_request_free(queue->snd_hash);
1096 crypto_free_ahash(tfm);
1100 static void nvme_tcp_free_async_req(struct nvme_tcp_ctrl *ctrl)
1102 struct nvme_tcp_request *async = &ctrl->async_req;
1104 page_frag_free(async->pdu);
1107 static int nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl *ctrl)
1109 struct nvme_tcp_queue *queue = &ctrl->queues[0];
1110 struct nvme_tcp_request *async = &ctrl->async_req;
1111 u8 hdgst = nvme_tcp_hdgst_len(queue);
1113 async->pdu = page_frag_alloc(&queue->pf_cache,
1114 sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
1115 GFP_KERNEL | __GFP_ZERO);
1119 async->queue = &ctrl->queues[0];
1123 static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
1125 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1126 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1128 if (!test_and_clear_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
1131 if (queue->hdr_digest || queue->data_digest)
1132 nvme_tcp_free_crypto(queue);
1134 sock_release(queue->sock);
1138 static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
1140 struct nvme_tcp_icreq_pdu *icreq;
1141 struct nvme_tcp_icresp_pdu *icresp;
1142 struct msghdr msg = {};
1144 bool ctrl_hdgst, ctrl_ddgst;
1147 icreq = kzalloc(sizeof(*icreq), GFP_KERNEL);
1151 icresp = kzalloc(sizeof(*icresp), GFP_KERNEL);
1157 icreq->hdr.type = nvme_tcp_icreq;
1158 icreq->hdr.hlen = sizeof(*icreq);
1160 icreq->hdr.plen = cpu_to_le32(icreq->hdr.hlen);
1161 icreq->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
1162 icreq->maxr2t = 0; /* single inflight r2t supported */
1163 icreq->hpda = 0; /* no alignment constraint */
1164 if (queue->hdr_digest)
1165 icreq->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
1166 if (queue->data_digest)
1167 icreq->digest |= NVME_TCP_DATA_DIGEST_ENABLE;
1169 iov.iov_base = icreq;
1170 iov.iov_len = sizeof(*icreq);
1171 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
1175 memset(&msg, 0, sizeof(msg));
1176 iov.iov_base = icresp;
1177 iov.iov_len = sizeof(*icresp);
1178 ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1179 iov.iov_len, msg.msg_flags);
1184 if (icresp->hdr.type != nvme_tcp_icresp) {
1185 pr_err("queue %d: bad type returned %d\n",
1186 nvme_tcp_queue_id(queue), icresp->hdr.type);
1190 if (le32_to_cpu(icresp->hdr.plen) != sizeof(*icresp)) {
1191 pr_err("queue %d: bad pdu length returned %d\n",
1192 nvme_tcp_queue_id(queue), icresp->hdr.plen);
1196 if (icresp->pfv != NVME_TCP_PFV_1_0) {
1197 pr_err("queue %d: bad pfv returned %d\n",
1198 nvme_tcp_queue_id(queue), icresp->pfv);
1202 ctrl_ddgst = !!(icresp->digest & NVME_TCP_DATA_DIGEST_ENABLE);
1203 if ((queue->data_digest && !ctrl_ddgst) ||
1204 (!queue->data_digest && ctrl_ddgst)) {
1205 pr_err("queue %d: data digest mismatch host: %s ctrl: %s\n",
1206 nvme_tcp_queue_id(queue),
1207 queue->data_digest ? "enabled" : "disabled",
1208 ctrl_ddgst ? "enabled" : "disabled");
1212 ctrl_hdgst = !!(icresp->digest & NVME_TCP_HDR_DIGEST_ENABLE);
1213 if ((queue->hdr_digest && !ctrl_hdgst) ||
1214 (!queue->hdr_digest && ctrl_hdgst)) {
1215 pr_err("queue %d: header digest mismatch host: %s ctrl: %s\n",
1216 nvme_tcp_queue_id(queue),
1217 queue->hdr_digest ? "enabled" : "disabled",
1218 ctrl_hdgst ? "enabled" : "disabled");
1222 if (icresp->cpda != 0) {
1223 pr_err("queue %d: unsupported cpda returned %d\n",
1224 nvme_tcp_queue_id(queue), icresp->cpda);
1236 static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
1237 int qid, size_t queue_size)
1239 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1240 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1241 struct linger sol = { .l_onoff = 1, .l_linger = 0 };
1242 int ret, opt, rcv_pdu_size, n;
1245 INIT_LIST_HEAD(&queue->send_list);
1246 spin_lock_init(&queue->lock);
1247 INIT_WORK(&queue->io_work, nvme_tcp_io_work);
1248 queue->queue_size = queue_size;
1251 queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16;
1253 queue->cmnd_capsule_len = sizeof(struct nvme_command) +
1254 NVME_TCP_ADMIN_CCSZ;
1256 ret = sock_create(ctrl->addr.ss_family, SOCK_STREAM,
1257 IPPROTO_TCP, &queue->sock);
1259 dev_err(ctrl->ctrl.device,
1260 "failed to create socket: %d\n", ret);
1264 /* Single syn retry */
1266 ret = kernel_setsockopt(queue->sock, IPPROTO_TCP, TCP_SYNCNT,
1267 (char *)&opt, sizeof(opt));
1269 dev_err(ctrl->ctrl.device,
1270 "failed to set TCP_SYNCNT sock opt %d\n", ret);
1274 /* Set TCP no delay */
1276 ret = kernel_setsockopt(queue->sock, IPPROTO_TCP,
1277 TCP_NODELAY, (char *)&opt, sizeof(opt));
1279 dev_err(ctrl->ctrl.device,
1280 "failed to set TCP_NODELAY sock opt %d\n", ret);
1285 * Cleanup whatever is sitting in the TCP transmit queue on socket
1286 * close. This is done to prevent stale data from being sent should
1287 * the network connection be restored before TCP times out.
1289 ret = kernel_setsockopt(queue->sock, SOL_SOCKET, SO_LINGER,
1290 (char *)&sol, sizeof(sol));
1292 dev_err(ctrl->ctrl.device,
1293 "failed to set SO_LINGER sock opt %d\n", ret);
1297 queue->sock->sk->sk_allocation = GFP_ATOMIC;
1301 n = (qid - 1) % num_online_cpus();
1302 queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
1303 queue->request = NULL;
1304 queue->data_remaining = 0;
1305 queue->ddgst_remaining = 0;
1306 queue->pdu_remaining = 0;
1307 queue->pdu_offset = 0;
1308 sk_set_memalloc(queue->sock->sk);
1310 if (ctrl->ctrl.opts->mask & NVMF_OPT_HOST_TRADDR) {
1311 ret = kernel_bind(queue->sock, (struct sockaddr *)&ctrl->src_addr,
1312 sizeof(ctrl->src_addr));
1314 dev_err(ctrl->ctrl.device,
1315 "failed to bind queue %d socket %d\n",
1321 queue->hdr_digest = nctrl->opts->hdr_digest;
1322 queue->data_digest = nctrl->opts->data_digest;
1323 if (queue->hdr_digest || queue->data_digest) {
1324 ret = nvme_tcp_alloc_crypto(queue);
1326 dev_err(ctrl->ctrl.device,
1327 "failed to allocate queue %d crypto\n", qid);
1332 rcv_pdu_size = sizeof(struct nvme_tcp_rsp_pdu) +
1333 nvme_tcp_hdgst_len(queue);
1334 queue->pdu = kmalloc(rcv_pdu_size, GFP_KERNEL);
1340 dev_dbg(ctrl->ctrl.device, "connecting queue %d\n",
1341 nvme_tcp_queue_id(queue));
1343 ret = kernel_connect(queue->sock, (struct sockaddr *)&ctrl->addr,
1344 sizeof(ctrl->addr), 0);
1346 dev_err(ctrl->ctrl.device,
1347 "failed to connect socket: %d\n", ret);
1351 ret = nvme_tcp_init_connection(queue);
1353 goto err_init_connect;
1355 queue->rd_enabled = true;
1356 set_bit(NVME_TCP_Q_ALLOCATED, &queue->flags);
1357 nvme_tcp_init_recv_ctx(queue);
1359 write_lock_bh(&queue->sock->sk->sk_callback_lock);
1360 queue->sock->sk->sk_user_data = queue;
1361 queue->state_change = queue->sock->sk->sk_state_change;
1362 queue->data_ready = queue->sock->sk->sk_data_ready;
1363 queue->write_space = queue->sock->sk->sk_write_space;
1364 queue->sock->sk->sk_data_ready = nvme_tcp_data_ready;
1365 queue->sock->sk->sk_state_change = nvme_tcp_state_change;
1366 queue->sock->sk->sk_write_space = nvme_tcp_write_space;
1367 write_unlock_bh(&queue->sock->sk->sk_callback_lock);
1372 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1376 if (queue->hdr_digest || queue->data_digest)
1377 nvme_tcp_free_crypto(queue);
1379 sock_release(queue->sock);
1384 static void nvme_tcp_restore_sock_calls(struct nvme_tcp_queue *queue)
1386 struct socket *sock = queue->sock;
1388 write_lock_bh(&sock->sk->sk_callback_lock);
1389 sock->sk->sk_user_data = NULL;
1390 sock->sk->sk_data_ready = queue->data_ready;
1391 sock->sk->sk_state_change = queue->state_change;
1392 sock->sk->sk_write_space = queue->write_space;
1393 write_unlock_bh(&sock->sk->sk_callback_lock);
1396 static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue)
1398 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1399 nvme_tcp_restore_sock_calls(queue);
1400 cancel_work_sync(&queue->io_work);
1403 static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
1405 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1406 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1408 if (!test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
1411 __nvme_tcp_stop_queue(queue);
1414 static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
1416 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1420 ret = nvmf_connect_io_queue(nctrl, idx, false);
1422 ret = nvmf_connect_admin_queue(nctrl);
1425 set_bit(NVME_TCP_Q_LIVE, &ctrl->queues[idx].flags);
1427 if (test_bit(NVME_TCP_Q_ALLOCATED, &ctrl->queues[idx].flags))
1428 __nvme_tcp_stop_queue(&ctrl->queues[idx]);
1429 dev_err(nctrl->device,
1430 "failed to connect queue: %d ret=%d\n", idx, ret);
1435 static struct blk_mq_tag_set *nvme_tcp_alloc_tagset(struct nvme_ctrl *nctrl,
1438 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1439 struct blk_mq_tag_set *set;
1443 set = &ctrl->admin_tag_set;
1444 memset(set, 0, sizeof(*set));
1445 set->ops = &nvme_tcp_admin_mq_ops;
1446 set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
1447 set->reserved_tags = 2; /* connect + keep-alive */
1448 set->numa_node = NUMA_NO_NODE;
1449 set->cmd_size = sizeof(struct nvme_tcp_request);
1450 set->driver_data = ctrl;
1451 set->nr_hw_queues = 1;
1452 set->timeout = ADMIN_TIMEOUT;
1454 set = &ctrl->tag_set;
1455 memset(set, 0, sizeof(*set));
1456 set->ops = &nvme_tcp_mq_ops;
1457 set->queue_depth = nctrl->sqsize + 1;
1458 set->reserved_tags = 1; /* fabric connect */
1459 set->numa_node = NUMA_NO_NODE;
1460 set->flags = BLK_MQ_F_SHOULD_MERGE;
1461 set->cmd_size = sizeof(struct nvme_tcp_request);
1462 set->driver_data = ctrl;
1463 set->nr_hw_queues = nctrl->queue_count - 1;
1464 set->timeout = NVME_IO_TIMEOUT;
1465 set->nr_maps = 2 /* default + read */;
1468 ret = blk_mq_alloc_tag_set(set);
1470 return ERR_PTR(ret);
1475 static void nvme_tcp_free_admin_queue(struct nvme_ctrl *ctrl)
1477 if (to_tcp_ctrl(ctrl)->async_req.pdu) {
1478 nvme_tcp_free_async_req(to_tcp_ctrl(ctrl));
1479 to_tcp_ctrl(ctrl)->async_req.pdu = NULL;
1482 nvme_tcp_free_queue(ctrl, 0);
1485 static void nvme_tcp_free_io_queues(struct nvme_ctrl *ctrl)
1489 for (i = 1; i < ctrl->queue_count; i++)
1490 nvme_tcp_free_queue(ctrl, i);
1493 static void nvme_tcp_stop_io_queues(struct nvme_ctrl *ctrl)
1497 for (i = 1; i < ctrl->queue_count; i++)
1498 nvme_tcp_stop_queue(ctrl, i);
1501 static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl)
1505 for (i = 1; i < ctrl->queue_count; i++) {
1506 ret = nvme_tcp_start_queue(ctrl, i);
1508 goto out_stop_queues;
1514 for (i--; i >= 1; i--)
1515 nvme_tcp_stop_queue(ctrl, i);
1519 static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl)
1523 ret = nvme_tcp_alloc_queue(ctrl, 0, NVME_AQ_DEPTH);
1527 ret = nvme_tcp_alloc_async_req(to_tcp_ctrl(ctrl));
1529 goto out_free_queue;
1534 nvme_tcp_free_queue(ctrl, 0);
1538 static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
1542 for (i = 1; i < ctrl->queue_count; i++) {
1543 ret = nvme_tcp_alloc_queue(ctrl, i,
1546 goto out_free_queues;
1552 for (i--; i >= 1; i--)
1553 nvme_tcp_free_queue(ctrl, i);
1558 static unsigned int nvme_tcp_nr_io_queues(struct nvme_ctrl *ctrl)
1560 unsigned int nr_io_queues;
1562 nr_io_queues = min(ctrl->opts->nr_io_queues, num_online_cpus());
1563 nr_io_queues += min(ctrl->opts->nr_write_queues, num_online_cpus());
1565 return nr_io_queues;
1568 static void nvme_tcp_set_io_queues(struct nvme_ctrl *nctrl,
1569 unsigned int nr_io_queues)
1571 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1572 struct nvmf_ctrl_options *opts = nctrl->opts;
1574 if (opts->nr_write_queues && opts->nr_io_queues < nr_io_queues) {
1576 * separate read/write queues
1577 * hand out dedicated default queues only after we have
1578 * sufficient read queues.
1580 ctrl->io_queues[HCTX_TYPE_READ] = opts->nr_io_queues;
1581 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_READ];
1582 ctrl->io_queues[HCTX_TYPE_DEFAULT] =
1583 min(opts->nr_write_queues, nr_io_queues);
1584 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
1587 * shared read/write queues
1588 * either no write queues were requested, or we don't have
1589 * sufficient queue count to have dedicated default queues.
1591 ctrl->io_queues[HCTX_TYPE_DEFAULT] =
1592 min(opts->nr_io_queues, nr_io_queues);
1593 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
1597 static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
1599 unsigned int nr_io_queues;
1602 nr_io_queues = nvme_tcp_nr_io_queues(ctrl);
1603 ret = nvme_set_queue_count(ctrl, &nr_io_queues);
1607 ctrl->queue_count = nr_io_queues + 1;
1608 if (ctrl->queue_count < 2)
1611 dev_info(ctrl->device,
1612 "creating %d I/O queues.\n", nr_io_queues);
1614 nvme_tcp_set_io_queues(ctrl, nr_io_queues);
1616 return __nvme_tcp_alloc_io_queues(ctrl);
1619 static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove)
1621 nvme_tcp_stop_io_queues(ctrl);
1623 blk_cleanup_queue(ctrl->connect_q);
1624 blk_mq_free_tag_set(ctrl->tagset);
1626 nvme_tcp_free_io_queues(ctrl);
1629 static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
1633 ret = nvme_tcp_alloc_io_queues(ctrl);
1638 ctrl->tagset = nvme_tcp_alloc_tagset(ctrl, false);
1639 if (IS_ERR(ctrl->tagset)) {
1640 ret = PTR_ERR(ctrl->tagset);
1641 goto out_free_io_queues;
1644 ctrl->connect_q = blk_mq_init_queue(ctrl->tagset);
1645 if (IS_ERR(ctrl->connect_q)) {
1646 ret = PTR_ERR(ctrl->connect_q);
1647 goto out_free_tag_set;
1650 blk_mq_update_nr_hw_queues(ctrl->tagset,
1651 ctrl->queue_count - 1);
1654 ret = nvme_tcp_start_io_queues(ctrl);
1656 goto out_cleanup_connect_q;
1660 out_cleanup_connect_q:
1662 blk_cleanup_queue(ctrl->connect_q);
1665 blk_mq_free_tag_set(ctrl->tagset);
1667 nvme_tcp_free_io_queues(ctrl);
1671 static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove)
1673 nvme_tcp_stop_queue(ctrl, 0);
1675 blk_cleanup_queue(ctrl->admin_q);
1676 blk_mq_free_tag_set(ctrl->admin_tagset);
1678 nvme_tcp_free_admin_queue(ctrl);
1681 static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
1685 error = nvme_tcp_alloc_admin_queue(ctrl);
1690 ctrl->admin_tagset = nvme_tcp_alloc_tagset(ctrl, true);
1691 if (IS_ERR(ctrl->admin_tagset)) {
1692 error = PTR_ERR(ctrl->admin_tagset);
1693 goto out_free_queue;
1696 ctrl->admin_q = blk_mq_init_queue(ctrl->admin_tagset);
1697 if (IS_ERR(ctrl->admin_q)) {
1698 error = PTR_ERR(ctrl->admin_q);
1699 goto out_free_tagset;
1703 error = nvme_tcp_start_queue(ctrl, 0);
1705 goto out_cleanup_queue;
1707 error = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap);
1709 dev_err(ctrl->device,
1710 "prop_get NVME_REG_CAP failed\n");
1711 goto out_stop_queue;
1714 ctrl->sqsize = min_t(int, NVME_CAP_MQES(ctrl->cap), ctrl->sqsize);
1716 error = nvme_enable_ctrl(ctrl, ctrl->cap);
1718 goto out_stop_queue;
1720 error = nvme_init_identify(ctrl);
1722 goto out_stop_queue;
1727 nvme_tcp_stop_queue(ctrl, 0);
1730 blk_cleanup_queue(ctrl->admin_q);
1733 blk_mq_free_tag_set(ctrl->admin_tagset);
1735 nvme_tcp_free_admin_queue(ctrl);
1739 static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
1742 blk_mq_quiesce_queue(ctrl->admin_q);
1743 nvme_tcp_stop_queue(ctrl, 0);
1744 if (ctrl->admin_tagset)
1745 blk_mq_tagset_busy_iter(ctrl->admin_tagset,
1746 nvme_cancel_request, ctrl);
1747 blk_mq_unquiesce_queue(ctrl->admin_q);
1748 nvme_tcp_destroy_admin_queue(ctrl, remove);
1751 static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
1754 if (ctrl->queue_count <= 1)
1756 nvme_stop_queues(ctrl);
1757 nvme_tcp_stop_io_queues(ctrl);
1759 blk_mq_tagset_busy_iter(ctrl->tagset,
1760 nvme_cancel_request, ctrl);
1762 nvme_start_queues(ctrl);
1763 nvme_tcp_destroy_io_queues(ctrl, remove);
1766 static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl)
1768 /* If we are resetting/deleting then do nothing */
1769 if (ctrl->state != NVME_CTRL_CONNECTING) {
1770 WARN_ON_ONCE(ctrl->state == NVME_CTRL_NEW ||
1771 ctrl->state == NVME_CTRL_LIVE);
1775 if (nvmf_should_reconnect(ctrl)) {
1776 dev_info(ctrl->device, "Reconnecting in %d seconds...\n",
1777 ctrl->opts->reconnect_delay);
1778 queue_delayed_work(nvme_wq, &to_tcp_ctrl(ctrl)->connect_work,
1779 ctrl->opts->reconnect_delay * HZ);
1781 dev_info(ctrl->device, "Removing controller...\n");
1782 nvme_delete_ctrl(ctrl);
1786 static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
1788 struct nvmf_ctrl_options *opts = ctrl->opts;
1791 ret = nvme_tcp_configure_admin_queue(ctrl, new);
1796 dev_err(ctrl->device, "icdoff is not supported!\n");
1800 if (opts->queue_size > ctrl->sqsize + 1)
1801 dev_warn(ctrl->device,
1802 "queue_size %zu > ctrl sqsize %u, clamping down\n",
1803 opts->queue_size, ctrl->sqsize + 1);
1805 if (ctrl->sqsize + 1 > ctrl->maxcmd) {
1806 dev_warn(ctrl->device,
1807 "sqsize %u > ctrl maxcmd %u, clamping down\n",
1808 ctrl->sqsize + 1, ctrl->maxcmd);
1809 ctrl->sqsize = ctrl->maxcmd - 1;
1812 if (ctrl->queue_count > 1) {
1813 ret = nvme_tcp_configure_io_queues(ctrl, new);
1818 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) {
1819 /* state change failure is ok if we're in DELETING state */
1820 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING);
1825 nvme_start_ctrl(ctrl);
1829 if (ctrl->queue_count > 1)
1830 nvme_tcp_destroy_io_queues(ctrl, new);
1832 nvme_tcp_stop_queue(ctrl, 0);
1833 nvme_tcp_destroy_admin_queue(ctrl, new);
1837 static void nvme_tcp_reconnect_ctrl_work(struct work_struct *work)
1839 struct nvme_tcp_ctrl *tcp_ctrl = container_of(to_delayed_work(work),
1840 struct nvme_tcp_ctrl, connect_work);
1841 struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
1843 ++ctrl->nr_reconnects;
1845 if (nvme_tcp_setup_ctrl(ctrl, false))
1848 dev_info(ctrl->device, "Successfully reconnected (%d attempt)\n",
1849 ctrl->nr_reconnects);
1851 ctrl->nr_reconnects = 0;
1856 dev_info(ctrl->device, "Failed reconnect attempt %d\n",
1857 ctrl->nr_reconnects);
1858 nvme_tcp_reconnect_or_remove(ctrl);
1861 static void nvme_tcp_error_recovery_work(struct work_struct *work)
1863 struct nvme_tcp_ctrl *tcp_ctrl = container_of(work,
1864 struct nvme_tcp_ctrl, err_work);
1865 struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
1867 nvme_stop_keep_alive(ctrl);
1868 nvme_tcp_teardown_io_queues(ctrl, false);
1869 /* unquiesce to fail fast pending requests */
1870 nvme_start_queues(ctrl);
1871 nvme_tcp_teardown_admin_queue(ctrl, false);
1873 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
1874 /* state change failure is ok if we're in DELETING state */
1875 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING);
1879 nvme_tcp_reconnect_or_remove(ctrl);
1882 static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
1884 cancel_work_sync(&to_tcp_ctrl(ctrl)->err_work);
1885 cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work);
1887 nvme_tcp_teardown_io_queues(ctrl, shutdown);
1889 nvme_shutdown_ctrl(ctrl);
1891 nvme_disable_ctrl(ctrl, ctrl->cap);
1892 nvme_tcp_teardown_admin_queue(ctrl, shutdown);
1895 static void nvme_tcp_delete_ctrl(struct nvme_ctrl *ctrl)
1897 nvme_tcp_teardown_ctrl(ctrl, true);
1900 static void nvme_reset_ctrl_work(struct work_struct *work)
1902 struct nvme_ctrl *ctrl =
1903 container_of(work, struct nvme_ctrl, reset_work);
1905 nvme_stop_ctrl(ctrl);
1906 nvme_tcp_teardown_ctrl(ctrl, false);
1908 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
1909 /* state change failure is ok if we're in DELETING state */
1910 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING);
1914 if (nvme_tcp_setup_ctrl(ctrl, false))
1920 ++ctrl->nr_reconnects;
1921 nvme_tcp_reconnect_or_remove(ctrl);
1924 static void nvme_tcp_free_ctrl(struct nvme_ctrl *nctrl)
1926 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1928 if (list_empty(&ctrl->list))
1931 mutex_lock(&nvme_tcp_ctrl_mutex);
1932 list_del(&ctrl->list);
1933 mutex_unlock(&nvme_tcp_ctrl_mutex);
1935 nvmf_free_options(nctrl->opts);
1937 kfree(ctrl->queues);
1941 static void nvme_tcp_set_sg_null(struct nvme_command *c)
1943 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
1947 sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
1948 NVME_SGL_FMT_TRANSPORT_A;
1951 static void nvme_tcp_set_sg_inline(struct nvme_tcp_queue *queue,
1952 struct nvme_command *c, u32 data_len)
1954 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
1956 sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff);
1957 sg->length = cpu_to_le32(data_len);
1958 sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET;
1961 static void nvme_tcp_set_sg_host_data(struct nvme_command *c,
1964 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
1967 sg->length = cpu_to_le32(data_len);
1968 sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
1969 NVME_SGL_FMT_TRANSPORT_A;
1972 static void nvme_tcp_submit_async_event(struct nvme_ctrl *arg)
1974 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(arg);
1975 struct nvme_tcp_queue *queue = &ctrl->queues[0];
1976 struct nvme_tcp_cmd_pdu *pdu = ctrl->async_req.pdu;
1977 struct nvme_command *cmd = &pdu->cmd;
1978 u8 hdgst = nvme_tcp_hdgst_len(queue);
1980 memset(pdu, 0, sizeof(*pdu));
1981 pdu->hdr.type = nvme_tcp_cmd;
1982 if (queue->hdr_digest)
1983 pdu->hdr.flags |= NVME_TCP_F_HDGST;
1984 pdu->hdr.hlen = sizeof(*pdu);
1985 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
1987 cmd->common.opcode = nvme_admin_async_event;
1988 cmd->common.command_id = NVME_AQ_BLK_MQ_DEPTH;
1989 cmd->common.flags |= NVME_CMD_SGL_METABUF;
1990 nvme_tcp_set_sg_null(cmd);
1992 ctrl->async_req.state = NVME_TCP_SEND_CMD_PDU;
1993 ctrl->async_req.offset = 0;
1994 ctrl->async_req.curr_bio = NULL;
1995 ctrl->async_req.data_len = 0;
1997 nvme_tcp_queue_request(&ctrl->async_req);
2000 static enum blk_eh_timer_return
2001 nvme_tcp_timeout(struct request *rq, bool reserved)
2003 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2004 struct nvme_tcp_ctrl *ctrl = req->queue->ctrl;
2005 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2007 dev_warn(ctrl->ctrl.device,
2008 "queue %d: timeout request %#x type %d\n",
2009 nvme_tcp_queue_id(req->queue), rq->tag, pdu->hdr.type);
2011 if (ctrl->ctrl.state != NVME_CTRL_LIVE) {
2013 * Teardown immediately if controller times out while starting
2014 * or we are already started error recovery. all outstanding
2015 * requests are completed on shutdown, so we return BLK_EH_DONE.
2017 flush_work(&ctrl->err_work);
2018 nvme_tcp_teardown_io_queues(&ctrl->ctrl, false);
2019 nvme_tcp_teardown_admin_queue(&ctrl->ctrl, false);
2023 dev_warn(ctrl->ctrl.device, "starting error recovery\n");
2024 nvme_tcp_error_recovery(&ctrl->ctrl);
2026 return BLK_EH_RESET_TIMER;
2029 static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue,
2032 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2033 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2034 struct nvme_command *c = &pdu->cmd;
2036 c->common.flags |= NVME_CMD_SGL_METABUF;
2038 if (rq_data_dir(rq) == WRITE && req->data_len &&
2039 req->data_len <= nvme_tcp_inline_data_size(queue))
2040 nvme_tcp_set_sg_inline(queue, c, req->data_len);
2042 nvme_tcp_set_sg_host_data(c, req->data_len);
2047 static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
2050 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2051 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2052 struct nvme_tcp_queue *queue = req->queue;
2053 u8 hdgst = nvme_tcp_hdgst_len(queue), ddgst = 0;
2056 ret = nvme_setup_cmd(ns, rq, &pdu->cmd);
2060 req->state = NVME_TCP_SEND_CMD_PDU;
2065 req->data_len = blk_rq_payload_bytes(rq);
2066 req->curr_bio = rq->bio;
2068 if (rq_data_dir(rq) == WRITE &&
2069 req->data_len <= nvme_tcp_inline_data_size(queue))
2070 req->pdu_len = req->data_len;
2071 else if (req->curr_bio)
2072 nvme_tcp_init_iter(req, READ);
2074 pdu->hdr.type = nvme_tcp_cmd;
2076 if (queue->hdr_digest)
2077 pdu->hdr.flags |= NVME_TCP_F_HDGST;
2078 if (queue->data_digest && req->pdu_len) {
2079 pdu->hdr.flags |= NVME_TCP_F_DDGST;
2080 ddgst = nvme_tcp_ddgst_len(queue);
2082 pdu->hdr.hlen = sizeof(*pdu);
2083 pdu->hdr.pdo = req->pdu_len ? pdu->hdr.hlen + hdgst : 0;
2085 cpu_to_le32(pdu->hdr.hlen + hdgst + req->pdu_len + ddgst);
2087 ret = nvme_tcp_map_data(queue, rq);
2088 if (unlikely(ret)) {
2089 dev_err(queue->ctrl->ctrl.device,
2090 "Failed to map data (%d)\n", ret);
2097 static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx,
2098 const struct blk_mq_queue_data *bd)
2100 struct nvme_ns *ns = hctx->queue->queuedata;
2101 struct nvme_tcp_queue *queue = hctx->driver_data;
2102 struct request *rq = bd->rq;
2103 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2104 bool queue_ready = test_bit(NVME_TCP_Q_LIVE, &queue->flags);
2107 if (!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
2108 return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
2110 ret = nvme_tcp_setup_cmd_pdu(ns, rq);
2114 blk_mq_start_request(rq);
2116 nvme_tcp_queue_request(req);
2121 static int nvme_tcp_map_queues(struct blk_mq_tag_set *set)
2123 struct nvme_tcp_ctrl *ctrl = set->driver_data;
2124 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2126 if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) {
2127 /* separate read/write queues */
2128 set->map[HCTX_TYPE_DEFAULT].nr_queues =
2129 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2130 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
2131 set->map[HCTX_TYPE_READ].nr_queues =
2132 ctrl->io_queues[HCTX_TYPE_READ];
2133 set->map[HCTX_TYPE_READ].queue_offset =
2134 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2136 /* shared read/write queues */
2137 set->map[HCTX_TYPE_DEFAULT].nr_queues =
2138 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2139 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
2140 set->map[HCTX_TYPE_READ].nr_queues =
2141 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2142 set->map[HCTX_TYPE_READ].queue_offset = 0;
2144 blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
2145 blk_mq_map_queues(&set->map[HCTX_TYPE_READ]);
2147 dev_info(ctrl->ctrl.device,
2148 "mapped %d/%d default/read queues.\n",
2149 ctrl->io_queues[HCTX_TYPE_DEFAULT],
2150 ctrl->io_queues[HCTX_TYPE_READ]);
2155 static struct blk_mq_ops nvme_tcp_mq_ops = {
2156 .queue_rq = nvme_tcp_queue_rq,
2157 .complete = nvme_complete_rq,
2158 .init_request = nvme_tcp_init_request,
2159 .exit_request = nvme_tcp_exit_request,
2160 .init_hctx = nvme_tcp_init_hctx,
2161 .timeout = nvme_tcp_timeout,
2162 .map_queues = nvme_tcp_map_queues,
2165 static struct blk_mq_ops nvme_tcp_admin_mq_ops = {
2166 .queue_rq = nvme_tcp_queue_rq,
2167 .complete = nvme_complete_rq,
2168 .init_request = nvme_tcp_init_request,
2169 .exit_request = nvme_tcp_exit_request,
2170 .init_hctx = nvme_tcp_init_admin_hctx,
2171 .timeout = nvme_tcp_timeout,
2174 static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops = {
2176 .module = THIS_MODULE,
2177 .flags = NVME_F_FABRICS,
2178 .reg_read32 = nvmf_reg_read32,
2179 .reg_read64 = nvmf_reg_read64,
2180 .reg_write32 = nvmf_reg_write32,
2181 .free_ctrl = nvme_tcp_free_ctrl,
2182 .submit_async_event = nvme_tcp_submit_async_event,
2183 .delete_ctrl = nvme_tcp_delete_ctrl,
2184 .get_address = nvmf_get_address,
2188 nvme_tcp_existing_controller(struct nvmf_ctrl_options *opts)
2190 struct nvme_tcp_ctrl *ctrl;
2193 mutex_lock(&nvme_tcp_ctrl_mutex);
2194 list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list) {
2195 found = nvmf_ip_options_match(&ctrl->ctrl, opts);
2199 mutex_unlock(&nvme_tcp_ctrl_mutex);
2204 static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
2205 struct nvmf_ctrl_options *opts)
2207 struct nvme_tcp_ctrl *ctrl;
2210 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
2212 return ERR_PTR(-ENOMEM);
2214 INIT_LIST_HEAD(&ctrl->list);
2215 ctrl->ctrl.opts = opts;
2216 ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues + 1;
2217 ctrl->ctrl.sqsize = opts->queue_size - 1;
2218 ctrl->ctrl.kato = opts->kato;
2220 INIT_DELAYED_WORK(&ctrl->connect_work,
2221 nvme_tcp_reconnect_ctrl_work);
2222 INIT_WORK(&ctrl->err_work, nvme_tcp_error_recovery_work);
2223 INIT_WORK(&ctrl->ctrl.reset_work, nvme_reset_ctrl_work);
2225 if (!(opts->mask & NVMF_OPT_TRSVCID)) {
2227 kstrdup(__stringify(NVME_TCP_DISC_PORT), GFP_KERNEL);
2228 if (!opts->trsvcid) {
2232 opts->mask |= NVMF_OPT_TRSVCID;
2235 ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
2236 opts->traddr, opts->trsvcid, &ctrl->addr);
2238 pr_err("malformed address passed: %s:%s\n",
2239 opts->traddr, opts->trsvcid);
2243 if (opts->mask & NVMF_OPT_HOST_TRADDR) {
2244 ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
2245 opts->host_traddr, NULL, &ctrl->src_addr);
2247 pr_err("malformed src address passed: %s\n",
2253 if (!opts->duplicate_connect && nvme_tcp_existing_controller(opts)) {
2258 ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues),
2260 if (!ctrl->queues) {
2265 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_tcp_ctrl_ops, 0);
2267 goto out_kfree_queues;
2269 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
2272 goto out_uninit_ctrl;
2275 ret = nvme_tcp_setup_ctrl(&ctrl->ctrl, true);
2277 goto out_uninit_ctrl;
2279 dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp\n",
2280 ctrl->ctrl.opts->subsysnqn, &ctrl->addr);
2282 nvme_get_ctrl(&ctrl->ctrl);
2284 mutex_lock(&nvme_tcp_ctrl_mutex);
2285 list_add_tail(&ctrl->list, &nvme_tcp_ctrl_list);
2286 mutex_unlock(&nvme_tcp_ctrl_mutex);
2291 nvme_uninit_ctrl(&ctrl->ctrl);
2292 nvme_put_ctrl(&ctrl->ctrl);
2295 return ERR_PTR(ret);
2297 kfree(ctrl->queues);
2300 return ERR_PTR(ret);
2303 static struct nvmf_transport_ops nvme_tcp_transport = {
2305 .module = THIS_MODULE,
2306 .required_opts = NVMF_OPT_TRADDR,
2307 .allowed_opts = NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY |
2308 NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO |
2309 NVMF_OPT_HDR_DIGEST | NVMF_OPT_DATA_DIGEST |
2310 NVMF_OPT_NR_WRITE_QUEUES,
2311 .create_ctrl = nvme_tcp_create_ctrl,
2314 static int __init nvme_tcp_init_module(void)
2316 nvme_tcp_wq = alloc_workqueue("nvme_tcp_wq",
2317 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
2321 nvmf_register_transport(&nvme_tcp_transport);
2325 static void __exit nvme_tcp_cleanup_module(void)
2327 struct nvme_tcp_ctrl *ctrl;
2329 nvmf_unregister_transport(&nvme_tcp_transport);
2331 mutex_lock(&nvme_tcp_ctrl_mutex);
2332 list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list)
2333 nvme_delete_ctrl(&ctrl->ctrl);
2334 mutex_unlock(&nvme_tcp_ctrl_mutex);
2335 flush_workqueue(nvme_delete_wq);
2337 destroy_workqueue(nvme_tcp_wq);
2340 module_init(nvme_tcp_init_module);
2341 module_exit(nvme_tcp_cleanup_module);
2343 MODULE_LICENSE("GPL v2");