1 // SPDX-License-Identifier: GPL-2.0
3 * NVMe I/O command implementation.
4 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/blkdev.h>
8 #include <linux/module.h>
11 int nvmet_bdev_ns_enable(struct nvmet_ns *ns)
15 ns->bdev = blkdev_get_by_path(ns->device_path,
16 FMODE_READ | FMODE_WRITE, NULL);
17 if (IS_ERR(ns->bdev)) {
18 ret = PTR_ERR(ns->bdev);
19 if (ret != -ENOTBLK) {
20 pr_err("failed to open block device %s: (%ld)\n",
21 ns->device_path, PTR_ERR(ns->bdev));
26 ns->size = i_size_read(ns->bdev->bd_inode);
27 ns->blksize_shift = blksize_bits(bdev_logical_block_size(ns->bdev));
31 void nvmet_bdev_ns_disable(struct nvmet_ns *ns)
34 blkdev_put(ns->bdev, FMODE_WRITE | FMODE_READ);
39 static u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts)
41 u16 status = NVME_SC_SUCCESS;
43 if (likely(blk_sts == BLK_STS_OK))
46 * Right now there exists M : 1 mapping between block layer error
47 * to the NVMe status code (see nvme_error_status()). For consistency,
48 * when we reverse map we use most appropriate NVMe Status code from
49 * the group of the NVMe staus codes used in the nvme_error_status().
53 status = NVME_SC_CAP_EXCEEDED | NVME_SC_DNR;
54 req->error_loc = offsetof(struct nvme_rw_command, length);
57 status = NVME_SC_LBA_RANGE | NVME_SC_DNR;
58 req->error_loc = offsetof(struct nvme_rw_command, slba);
61 req->error_loc = offsetof(struct nvme_common_command, opcode);
62 switch (req->cmd->common.opcode) {
64 case nvme_cmd_write_zeroes:
65 status = NVME_SC_ONCS_NOT_SUPPORTED | NVME_SC_DNR;
68 status = NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
72 status = NVME_SC_ACCESS_DENIED;
73 req->error_loc = offsetof(struct nvme_rw_command, nsid);
78 status = NVME_SC_INTERNAL | NVME_SC_DNR;
79 req->error_loc = offsetof(struct nvme_common_command, opcode);
82 switch (req->cmd->common.opcode) {
85 req->error_slba = le64_to_cpu(req->cmd->rw.slba);
87 case nvme_cmd_write_zeroes:
89 le64_to_cpu(req->cmd->write_zeroes.slba);
97 static void nvmet_bio_done(struct bio *bio)
99 struct nvmet_req *req = bio->bi_private;
101 nvmet_req_complete(req, blk_to_nvme_status(req, bio->bi_status));
102 if (bio != &req->b.inline_bio)
106 static void nvmet_bdev_execute_rw(struct nvmet_req *req)
108 int sg_cnt = req->sg_cnt;
110 struct scatterlist *sg;
112 int op, op_flags = 0, i;
115 nvmet_req_complete(req, 0);
119 if (req->cmd->rw.opcode == nvme_cmd_write) {
121 op_flags = REQ_SYNC | REQ_IDLE;
122 if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
128 if (is_pci_p2pdma_page(sg_page(req->sg)))
129 op_flags |= REQ_NOMERGE;
131 sector = le64_to_cpu(req->cmd->rw.slba);
132 sector <<= (req->ns->blksize_shift - 9);
134 if (req->data_len <= NVMET_MAX_INLINE_DATA_LEN) {
135 bio = &req->b.inline_bio;
136 bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
138 bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES));
140 bio_set_dev(bio, req->ns->bdev);
141 bio->bi_iter.bi_sector = sector;
142 bio->bi_private = req;
143 bio->bi_end_io = nvmet_bio_done;
144 bio_set_op_attrs(bio, op, op_flags);
146 for_each_sg(req->sg, sg, req->sg_cnt, i) {
147 while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
149 struct bio *prev = bio;
151 bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES));
152 bio_set_dev(bio, req->ns->bdev);
153 bio->bi_iter.bi_sector = sector;
154 bio_set_op_attrs(bio, op, op_flags);
156 bio_chain(bio, prev);
160 sector += sg->length >> 9;
167 static void nvmet_bdev_execute_flush(struct nvmet_req *req)
169 struct bio *bio = &req->b.inline_bio;
171 bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
172 bio_set_dev(bio, req->ns->bdev);
173 bio->bi_private = req;
174 bio->bi_end_io = nvmet_bio_done;
175 bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
180 u16 nvmet_bdev_flush(struct nvmet_req *req)
182 if (blkdev_issue_flush(req->ns->bdev, GFP_KERNEL, NULL))
183 return NVME_SC_INTERNAL | NVME_SC_DNR;
187 static u16 nvmet_bdev_discard_range(struct nvmet_req *req,
188 struct nvme_dsm_range *range, struct bio **bio)
190 struct nvmet_ns *ns = req->ns;
193 ret = __blkdev_issue_discard(ns->bdev,
194 le64_to_cpu(range->slba) << (ns->blksize_shift - 9),
195 le32_to_cpu(range->nlb) << (ns->blksize_shift - 9),
197 if (ret && ret != -EOPNOTSUPP) {
198 req->error_slba = le64_to_cpu(range->slba);
199 return errno_to_nvme_status(req, ret);
201 return NVME_SC_SUCCESS;
204 static void nvmet_bdev_execute_discard(struct nvmet_req *req)
206 struct nvme_dsm_range range;
207 struct bio *bio = NULL;
211 for (i = 0; i <= le32_to_cpu(req->cmd->dsm.nr); i++) {
212 status = nvmet_copy_from_sgl(req, i * sizeof(range), &range,
217 status = nvmet_bdev_discard_range(req, &range, &bio);
223 bio->bi_private = req;
224 bio->bi_end_io = nvmet_bio_done;
226 bio->bi_status = BLK_STS_IOERR;
232 nvmet_req_complete(req, status);
236 static void nvmet_bdev_execute_dsm(struct nvmet_req *req)
238 switch (le32_to_cpu(req->cmd->dsm.attributes)) {
240 nvmet_bdev_execute_discard(req);
242 case NVME_DSMGMT_IDR:
243 case NVME_DSMGMT_IDW:
245 /* Not supported yet */
246 nvmet_req_complete(req, 0);
251 static void nvmet_bdev_execute_write_zeroes(struct nvmet_req *req)
253 struct nvme_write_zeroes_cmd *write_zeroes = &req->cmd->write_zeroes;
254 struct bio *bio = NULL;
259 sector = le64_to_cpu(write_zeroes->slba) <<
260 (req->ns->blksize_shift - 9);
261 nr_sector = (((sector_t)le16_to_cpu(write_zeroes->length) + 1) <<
262 (req->ns->blksize_shift - 9));
264 ret = __blkdev_issue_zeroout(req->ns->bdev, sector, nr_sector,
265 GFP_KERNEL, &bio, 0);
267 bio->bi_private = req;
268 bio->bi_end_io = nvmet_bio_done;
271 nvmet_req_complete(req, errno_to_nvme_status(req, ret));
275 u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req)
277 struct nvme_command *cmd = req->cmd;
279 switch (cmd->common.opcode) {
282 req->execute = nvmet_bdev_execute_rw;
283 req->data_len = nvmet_rw_len(req);
286 req->execute = nvmet_bdev_execute_flush;
290 req->execute = nvmet_bdev_execute_dsm;
291 req->data_len = (le32_to_cpu(cmd->dsm.nr) + 1) *
292 sizeof(struct nvme_dsm_range);
294 case nvme_cmd_write_zeroes:
295 req->execute = nvmet_bdev_execute_write_zeroes;
299 pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode,
301 req->error_loc = offsetof(struct nvme_common_command, opcode);
302 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;