]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - block/blk-core.c
block: Introduce request_queue.initialize_rq_fn()
[linux.git] / block / blk-core.c
index 8592409db2720a324fb256c05a84071188167912..09989028616ffd5a790a6aaf47d2b6c81dc58e3f 100644 (file)
@@ -143,6 +143,7 @@ static const struct {
        [BLK_STS_MEDIUM]        = { -ENODATA,   "critical medium" },
        [BLK_STS_PROTECTION]    = { -EILSEQ,    "protection" },
        [BLK_STS_RESOURCE]      = { -ENOMEM,    "kernel resource" },
+       [BLK_STS_AGAIN]         = { -EAGAIN,    "nonblocking retry" },
 
        /* device mapper special case, should not leak out: */
        [BLK_STS_DM_REQUEUE]    = { -EREMCHG, "dm internal retry" },
@@ -790,7 +791,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
        if (q->id < 0)
                goto fail_q;
 
-       q->bio_split = bioset_create(BIO_POOL_SIZE, 0);
+       q->bio_split = bioset_create(BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
        if (!q->bio_split)
                goto fail_id;
 
@@ -1314,6 +1315,11 @@ static struct request *get_request(struct request_queue *q, unsigned int op,
        if (!IS_ERR(rq))
                return rq;
 
+       if (op & REQ_NOWAIT) {
+               blk_put_rl(rl);
+               return ERR_PTR(-EAGAIN);
+       }
+
        if (!gfpflags_allow_blocking(gfp_mask) || unlikely(blk_queue_dying(q))) {
                blk_put_rl(rl);
                return rq;
@@ -1341,8 +1347,8 @@ static struct request *get_request(struct request_queue *q, unsigned int op,
        goto retry;
 }
 
-static struct request *blk_old_get_request(struct request_queue *q, int rw,
-               gfp_t gfp_mask)
+static struct request *blk_old_get_request(struct request_queue *q,
+                                          unsigned int op, gfp_t gfp_mask)
 {
        struct request *rq;
 
@@ -1350,7 +1356,7 @@ static struct request *blk_old_get_request(struct request_queue *q, int rw,
        create_io_context(gfp_mask, q->node);
 
        spin_lock_irq(q->queue_lock);
-       rq = get_request(q, rw, NULL, gfp_mask);
+       rq = get_request(q, op, NULL, gfp_mask);
        if (IS_ERR(rq)) {
                spin_unlock_irq(q->queue_lock);
                return rq;
@@ -1363,14 +1369,24 @@ static struct request *blk_old_get_request(struct request_queue *q, int rw,
        return rq;
 }
 
-struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
+struct request *blk_get_request(struct request_queue *q, unsigned int op,
+                               gfp_t gfp_mask)
 {
-       if (q->mq_ops)
-               return blk_mq_alloc_request(q, rw,
+       struct request *req;
+
+       if (q->mq_ops) {
+               req = blk_mq_alloc_request(q, op,
                        (gfp_mask & __GFP_DIRECT_RECLAIM) ?
                                0 : BLK_MQ_REQ_NOWAIT);
-       else
-               return blk_old_get_request(q, rw, gfp_mask);
+               if (!IS_ERR(req) && q->mq_ops->initialize_rq_fn)
+                       q->mq_ops->initialize_rq_fn(req);
+       } else {
+               req = blk_old_get_request(q, op, gfp_mask);
+               if (!IS_ERR(req) && q->initialize_rq_fn)
+                       q->initialize_rq_fn(req);
+       }
+
+       return req;
 }
 EXPORT_SYMBOL(blk_get_request);
 
@@ -1723,7 +1739,7 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
         */
        blk_queue_bounce(q, &bio);
 
-       blk_queue_split(q, &bio, q->bio_split);
+       blk_queue_split(q, &bio);
 
        if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
                bio->bi_status = BLK_STS_IOERR;
@@ -1961,6 +1977,14 @@ generic_make_request_checks(struct bio *bio)
                goto end_io;
        }
 
+       /*
+        * For a REQ_NOWAIT based request, return -EOPNOTSUPP
+        * if queue is not a request based queue.
+        */
+
+       if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_rq_based(q))
+               goto not_supported;
+
        part = bio->bi_bdev->bd_part;
        if (should_fail_request(part, bio->bi_iter.bi_size) ||
            should_fail_request(&part_to_disk(part)->part0,
@@ -2118,7 +2142,7 @@ blk_qc_t generic_make_request(struct bio *bio)
        do {
                struct request_queue *q = bdev_get_queue(bio->bi_bdev);
 
-               if (likely(blk_queue_enter(q, false) == 0)) {
+               if (likely(blk_queue_enter(q, bio->bi_opf & REQ_NOWAIT) == 0)) {
                        struct bio_list lower, same;
 
                        /* Create a fresh bio_list for all subordinate requests */
@@ -2143,7 +2167,11 @@ blk_qc_t generic_make_request(struct bio *bio)
                        bio_list_merge(&bio_list_on_stack[0], &same);
                        bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]);
                } else {
-                       bio_io_error(bio);
+                       if (unlikely(!blk_queue_dying(q) &&
+                                       (bio->bi_opf & REQ_NOWAIT)))
+                               bio_wouldblock_error(bio);
+                       else
+                               bio_io_error(bio);
                }
                bio = bio_list_pop(&bio_list_on_stack[0]);
        } while (bio);