]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
block: make bdev_ops->rw_page() take a REQ_OP instead of bool
authorTejun Heo <tj@kernel.org>
Wed, 18 Jul 2018 11:47:36 +0000 (04:47 -0700)
committerJens Axboe <axboe@kernel.dk>
Wed, 18 Jul 2018 14:44:14 +0000 (08:44 -0600)
c11f0c0b5bb9 ("block/mm: make bdev_ops->rw_page() take a bool for
read/write") replaced @op with boolean @is_write, which limited the
amount of information going into ->rw_page() and more importantly
page_endio(), which removed the need to expose block internals to mm.

Unfortunately, we want to track discards separately and @is_write
isn't enough information.  This patch updates bdev_ops->rw_page() to
take REQ_OP instead but leaves page_endio() to take bool @is_write.
This allows the block part of operations to have enough information
while not leaking it to mm.

Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Mike Christie <mchristi@redhat.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
drivers/block/brd.c
drivers/block/zram/zram_drv.c
drivers/nvdimm/btt.c
drivers/nvdimm/pmem.c
fs/block_dev.c
fs/mpage.c
include/linux/blkdev.h

index bb976598ee4340d6a71f86df61c414352fc6a706..df8103dd40ac2d1f6e5c29ef7790160a3335b35b 100644 (file)
@@ -254,20 +254,20 @@ static void copy_from_brd(void *dst, struct brd_device *brd,
  * Process a single bvec of a bio.
  */
 static int brd_do_bvec(struct brd_device *brd, struct page *page,
-                       unsigned int len, unsigned int off, bool is_write,
+                       unsigned int len, unsigned int off, unsigned int op,
                        sector_t sector)
 {
        void *mem;
        int err = 0;
 
-       if (is_write) {
+       if (op_is_write(op)) {
                err = copy_to_brd_setup(brd, sector, len);
                if (err)
                        goto out;
        }
 
        mem = kmap_atomic(page);
-       if (!is_write) {
+       if (!op_is_write(op)) {
                copy_from_brd(mem + off, brd, sector, len);
                flush_dcache_page(page);
        } else {
@@ -296,7 +296,7 @@ static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio)
                int err;
 
                err = brd_do_bvec(brd, bvec.bv_page, len, bvec.bv_offset,
-                                       op_is_write(bio_op(bio)), sector);
+                                 bio_op(bio), sector);
                if (err)
                        goto io_error;
                sector += len >> SECTOR_SHIFT;
@@ -310,15 +310,15 @@ static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio)
 }
 
 static int brd_rw_page(struct block_device *bdev, sector_t sector,
-                      struct page *page, bool is_write)
+                      struct page *page, unsigned int op)
 {
        struct brd_device *brd = bdev->bd_disk->private_data;
        int err;
 
        if (PageTransHuge(page))
                return -ENOTSUPP;
-       err = brd_do_bvec(brd, page, PAGE_SIZE, 0, is_write, sector);
-       page_endio(page, is_write, err);
+       err = brd_do_bvec(brd, page, PAGE_SIZE, 0, op, sector);
+       page_endio(page, op_is_write(op), err);
        return err;
 }
 
index 7436b2d27fa38513602207c6b4bc9213c97af436..78c29044684a678878d25b35a5b512bf457c2a58 100644 (file)
@@ -1274,17 +1274,17 @@ static void zram_bio_discard(struct zram *zram, u32 index,
  * Returns 1 if IO request was successfully submitted.
  */
 static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
-                       int offset, bool is_write, struct bio *bio)
+                       int offset, unsigned int op, struct bio *bio)
 {
        unsigned long start_time = jiffies;
-       int rw_acct = is_write ? REQ_OP_WRITE : REQ_OP_READ;
+       int rw_acct = op_is_write(op) ? REQ_OP_WRITE : REQ_OP_READ;
        struct request_queue *q = zram->disk->queue;
        int ret;
 
        generic_start_io_acct(q, rw_acct, bvec->bv_len >> SECTOR_SHIFT,
                        &zram->disk->part0);
 
-       if (!is_write) {
+       if (!op_is_write(op)) {
                atomic64_inc(&zram->stats.num_reads);
                ret = zram_bvec_read(zram, bvec, index, offset, bio);
                flush_dcache_page(bvec->bv_page);
@@ -1300,7 +1300,7 @@ static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
        zram_slot_unlock(zram, index);
 
        if (unlikely(ret < 0)) {
-               if (!is_write)
+               if (!op_is_write(op))
                        atomic64_inc(&zram->stats.failed_reads);
                else
                        atomic64_inc(&zram->stats.failed_writes);
@@ -1338,7 +1338,7 @@ static void __zram_make_request(struct zram *zram, struct bio *bio)
                        bv.bv_len = min_t(unsigned int, PAGE_SIZE - offset,
                                                        unwritten);
                        if (zram_bvec_rw(zram, &bv, index, offset,
-                                       op_is_write(bio_op(bio)), bio) < 0)
+                                        bio_op(bio), bio) < 0)
                                goto out;
 
                        bv.bv_offset += bv.bv_len;
@@ -1390,7 +1390,7 @@ static void zram_slot_free_notify(struct block_device *bdev,
 }
 
 static int zram_rw_page(struct block_device *bdev, sector_t sector,
-                      struct page *page, bool is_write)
+                      struct page *page, unsigned int op)
 {
        int offset, ret;
        u32 index;
@@ -1414,7 +1414,7 @@ static int zram_rw_page(struct block_device *bdev, sector_t sector,
        bv.bv_len = PAGE_SIZE;
        bv.bv_offset = 0;
 
-       ret = zram_bvec_rw(zram, &bv, index, offset, is_write, NULL);
+       ret = zram_bvec_rw(zram, &bv, index, offset, op, NULL);
 out:
        /*
         * If I/O fails, just return error(ie, non-zero) without
@@ -1429,7 +1429,7 @@ static int zram_rw_page(struct block_device *bdev, sector_t sector,
 
        switch (ret) {
        case 0:
-               page_endio(page, is_write, 0);
+               page_endio(page, op_is_write(op), 0);
                break;
        case 1:
                ret = 0;
index 85de8053aa344510a9f071db342404a424c16c73..0360c015f6580b1cb5fef2667b2844381f0c8696 100644 (file)
@@ -1423,11 +1423,11 @@ static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip,
 
 static int btt_do_bvec(struct btt *btt, struct bio_integrity_payload *bip,
                        struct page *page, unsigned int len, unsigned int off,
-                       bool is_write, sector_t sector)
+                       unsigned int op, sector_t sector)
 {
        int ret;
 
-       if (!is_write) {
+       if (!op_is_write(op)) {
                ret = btt_read_pg(btt, bip, page, off, sector, len);
                flush_dcache_page(page);
        } else {
@@ -1464,7 +1464,7 @@ static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio)
                }
 
                err = btt_do_bvec(btt, bip, bvec.bv_page, len, bvec.bv_offset,
-                                 op_is_write(bio_op(bio)), iter.bi_sector);
+                                 bio_op(bio), iter.bi_sector);
                if (err) {
                        dev_err(&btt->nd_btt->dev,
                                        "io error in %s sector %lld, len %d,\n",
@@ -1483,16 +1483,16 @@ static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio)
 }
 
 static int btt_rw_page(struct block_device *bdev, sector_t sector,
-               struct page *page, bool is_write)
+               struct page *page, unsigned int op)
 {
        struct btt *btt = bdev->bd_disk->private_data;
        int rc;
        unsigned int len;
 
        len = hpage_nr_pages(page) * PAGE_SIZE;
-       rc = btt_do_bvec(btt, NULL, page, len, 0, is_write, sector);
+       rc = btt_do_bvec(btt, NULL, page, len, 0, op, sector);
        if (rc == 0)
-               page_endio(page, is_write, 0);
+               page_endio(page, op_is_write(op), 0);
 
        return rc;
 }
index 8b1fd7f1a224eedebf08cddfe2258949c50a6bcf..dd17acd8fe6810e5d1a04c05bd8f23241ceec409 100644 (file)
@@ -120,7 +120,7 @@ static blk_status_t read_pmem(struct page *page, unsigned int off,
 }
 
 static blk_status_t pmem_do_bvec(struct pmem_device *pmem, struct page *page,
-                       unsigned int len, unsigned int off, bool is_write,
+                       unsigned int len, unsigned int off, unsigned int op,
                        sector_t sector)
 {
        blk_status_t rc = BLK_STS_OK;
@@ -131,7 +131,7 @@ static blk_status_t pmem_do_bvec(struct pmem_device *pmem, struct page *page,
        if (unlikely(is_bad_pmem(&pmem->bb, sector, len)))
                bad_pmem = true;
 
-       if (!is_write) {
+       if (!op_is_write(op)) {
                if (unlikely(bad_pmem))
                        rc = BLK_STS_IOERR;
                else {
@@ -180,8 +180,7 @@ static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
        do_acct = nd_iostat_start(bio, &start);
        bio_for_each_segment(bvec, bio, iter) {
                rc = pmem_do_bvec(pmem, bvec.bv_page, bvec.bv_len,
-                               bvec.bv_offset, op_is_write(bio_op(bio)),
-                               iter.bi_sector);
+                               bvec.bv_offset, bio_op(bio), iter.bi_sector);
                if (rc) {
                        bio->bi_status = rc;
                        break;
@@ -198,13 +197,13 @@ static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
 }
 
 static int pmem_rw_page(struct block_device *bdev, sector_t sector,
-                      struct page *page, bool is_write)
+                      struct page *page, unsigned int op)
 {
        struct pmem_device *pmem = bdev->bd_queue->queuedata;
        blk_status_t rc;
 
        rc = pmem_do_bvec(pmem, page, hpage_nr_pages(page) * PAGE_SIZE,
-                         0, is_write, sector);
+                         0, op, sector);
 
        /*
         * The ->rw_page interface is subtle and tricky.  The core
@@ -213,7 +212,7 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector,
         * caused by double completion.
         */
        if (rc == 0)
-               page_endio(page, is_write, 0);
+               page_endio(page, op_is_write(op), 0);
 
        return blk_status_to_errno(rc);
 }
index 0dd87aaeb39a7d05bbec28ce01536b106c4f76c2..496fb51a1e1a1058e5c4d0c5d14501da039c47cd 100644 (file)
@@ -665,7 +665,8 @@ int bdev_read_page(struct block_device *bdev, sector_t sector,
        result = blk_queue_enter(bdev->bd_queue, 0);
        if (result)
                return result;
-       result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, false);
+       result = ops->rw_page(bdev, sector + get_start_sect(bdev), page,
+                             REQ_OP_READ);
        blk_queue_exit(bdev->bd_queue);
        return result;
 }
@@ -703,7 +704,8 @@ int bdev_write_page(struct block_device *bdev, sector_t sector,
                return result;
 
        set_page_writeback(page);
-       result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, true);
+       result = ops->rw_page(bdev, sector + get_start_sect(bdev), page,
+                             REQ_OP_WRITE);
        if (result) {
                end_page_writeback(page);
        } else {
index b7e7f570733ad0766afe5d7f116e787c7ebf21f4..b73638db9866cd31f516cc5b0f11850825e5824b 100644 (file)
@@ -51,8 +51,8 @@ static void mpage_end_io(struct bio *bio)
 
        bio_for_each_segment_all(bv, bio, i) {
                struct page *page = bv->bv_page;
-               page_endio(page, op_is_write(bio_op(bio)),
-                               blk_status_to_errno(bio->bi_status));
+               page_endio(page, bio_op(bio),
+                          blk_status_to_errno(bio->bi_status));
        }
 
        bio_put(bio);
index 1939ed95f9361a12e3ce717623235cf48bc15e24..331a6cb8805f00f358ecc2b918726822edc1cdeb 100644 (file)
@@ -1943,7 +1943,7 @@ static inline bool integrity_req_gap_front_merge(struct request *req,
 struct block_device_operations {
        int (*open) (struct block_device *, fmode_t);
        void (*release) (struct gendisk *, fmode_t);
-       int (*rw_page)(struct block_device *, sector_t, struct page *, bool);
+       int (*rw_page)(struct block_device *, sector_t, struct page *, unsigned int);
        int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
        int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
        unsigned int (*check_events) (struct gendisk *disk,