]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - block/blk-core.c
blk-mq: move debugfs declarations to a separate header file
[linux.git] / block / blk-core.c
index 728299323f65b27ac782174a486d6fa878edd45a..acdca6536562f5d8a99cc49f1e97f781312edb8c 100644 (file)
@@ -40,6 +40,7 @@
 
 #include "blk.h"
 #include "blk-mq.h"
+#include "blk-mq-debugfs.h"
 #include "blk-mq-sched.h"
 #include "blk-wbt.h"
 
@@ -268,10 +269,8 @@ void blk_sync_queue(struct request_queue *q)
                struct blk_mq_hw_ctx *hctx;
                int i;
 
-               queue_for_each_hw_ctx(q, hctx, i) {
-                       cancel_work_sync(&hctx->run_work);
-                       cancel_delayed_work_sync(&hctx->delay_work);
-               }
+               queue_for_each_hw_ctx(q, hctx, i)
+                       cancel_delayed_work_sync(&hctx->run_work);
        } else {
                cancel_delayed_work_sync(&q->delay_work);
        }
@@ -563,9 +562,13 @@ void blk_cleanup_queue(struct request_queue *q)
         * prevent that q->request_fn() gets invoked after draining finished.
         */
        blk_freeze_queue(q);
-       spin_lock_irq(lock);
-       if (!q->mq_ops)
+       if (!q->mq_ops) {
+               spin_lock_irq(lock);
                __blk_drain_queue(q, true);
+       } else {
+               blk_mq_debugfs_unregister_mq(q);
+               spin_lock_irq(lock);
+       }
        queue_flag_set(QUEUE_FLAG_DEAD, q);
        spin_unlock_irq(lock);
 
@@ -1628,18 +1631,23 @@ unsigned int blk_plug_queued_count(struct request_queue *q)
        return ret;
 }
 
-void init_request_from_bio(struct request *req, struct bio *bio)
+void blk_init_request_from_bio(struct request *req, struct bio *bio)
 {
+       struct io_context *ioc = rq_ioc(bio);
+
        if (bio->bi_opf & REQ_RAHEAD)
                req->cmd_flags |= REQ_FAILFAST_MASK;
 
-       req->errors = 0;
        req->__sector = bio->bi_iter.bi_sector;
-       blk_rq_set_prio(req, rq_ioc(bio));
        if (ioprio_valid(bio_prio(bio)))
                req->ioprio = bio_prio(bio);
+       else if (ioc)
+               req->ioprio = ioc->ioprio;
+       else
+               req->ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0);
        blk_rq_bio_prep(req->q, req, bio);
 }
+EXPORT_SYMBOL_GPL(blk_init_request_from_bio);
 
 static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
 {
@@ -1730,7 +1738,7 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
         * We don't worry about that case for efficiency. It won't happen
         * often, and the elevators are able to handle it.
         */
-       init_request_from_bio(req, bio);
+       blk_init_request_from_bio(req, bio);
 
        if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags))
                req->cpu = raw_smp_processor_id();
@@ -2567,22 +2575,11 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
 {
        int total_bytes;
 
-       trace_block_rq_complete(req->q, req, nr_bytes);
+       trace_block_rq_complete(req, error, nr_bytes);
 
        if (!req->bio)
                return false;
 
-       /*
-        * For fs requests, rq is just carrier of independent bio's
-        * and each partial completion should be handled separately.
-        * Reset per-request error on each partial completion.
-        *
-        * TODO: tj: This is too subtle.  It would be better to let
-        * low level drivers do what they see fit.
-        */
-       if (!blk_rq_is_passthrough(req))
-               req->errors = 0;
-
        if (error && !blk_rq_is_passthrough(req) &&
            !(req->rq_flags & RQF_QUIET)) {
                char *error_type;
@@ -2805,7 +2802,7 @@ static bool blk_end_bidi_request(struct request *rq, int error,
  *     %false - we are done with this request
  *     %true  - still buffers pending for this request
  **/
-bool __blk_end_bidi_request(struct request *rq, int error,
+static bool __blk_end_bidi_request(struct request *rq, int error,
                                   unsigned int nr_bytes, unsigned int bidi_bytes)
 {
        if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
@@ -3078,6 +3075,13 @@ int kblockd_schedule_work_on(int cpu, struct work_struct *work)
 }
 EXPORT_SYMBOL(kblockd_schedule_work_on);
 
+int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork,
+                               unsigned long delay)
+{
+       return mod_delayed_work_on(cpu, kblockd_workqueue, dwork, delay);
+}
+EXPORT_SYMBOL(kblockd_mod_delayed_work_on);
+
 int kblockd_schedule_delayed_work(struct delayed_work *dwork,
                                  unsigned long delay)
 {