]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
nvmet: don't split large I/Os unconditionally
authorSagi Grimberg <sagi@grimberg.me>
Fri, 28 Sep 2018 22:40:43 +0000 (15:40 -0700)
committerChristoph Hellwig <hch@lst.de>
Mon, 1 Oct 2018 21:16:13 +0000 (14:16 -0700)
If we know that the I/O size exceeds our inline bio vec, no
point using it and split the rest to begin with. We could
in theory reuse the inline bio and only allocate the bio_vec,
but its really not worth optimizing for.

Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
Signed-off-by: Christoph Hellwig <hch@lst.de>
drivers/nvme/target/io-cmd-bdev.c
drivers/nvme/target/nvmet.h

index 7bc9f624043296c2bd71d625b6a7ec36d9319015..f93fb571114280b3b0408e678e187adb561a3f22 100644 (file)
@@ -58,7 +58,7 @@ static void nvmet_bio_done(struct bio *bio)
 static void nvmet_bdev_execute_rw(struct nvmet_req *req)
 {
        int sg_cnt = req->sg_cnt;
-       struct bio *bio = &req->b.inline_bio;
+       struct bio *bio;
        struct scatterlist *sg;
        sector_t sector;
        blk_qc_t cookie;
@@ -81,7 +81,12 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
        sector = le64_to_cpu(req->cmd->rw.slba);
        sector <<= (req->ns->blksize_shift - 9);
 
-       bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
+       if (req->data_len <= NVMET_MAX_INLINE_DATA_LEN) {
+               bio = &req->b.inline_bio;
+               bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
+       } else {
+               bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES));
+       }
        bio_set_dev(bio, req->ns->bdev);
        bio->bi_iter.bi_sector = sector;
        bio->bi_private = req;
index ec9af4ee03b603cb2e4e68c23e78d9b59a1a331a..08f7b57a1203fd8511f9fe3fca68fec521873667 100644 (file)
@@ -264,6 +264,7 @@ struct nvmet_fabrics_ops {
 };
 
 #define NVMET_MAX_INLINE_BIOVEC        8
+#define NVMET_MAX_INLINE_DATA_LEN NVMET_MAX_INLINE_BIOVEC * PAGE_SIZE
 
 struct nvmet_req {
        struct nvme_command     *cmd;