]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - block/blk-mq.c
blk-mq: introduce blk_mq_hw_queue_first_cpu() to figure out first cpu
[linux.git] / block / blk-mq.c
index f489ec9208071e2859777f64d8ebda1308a5d084..e05bd10d5c84423bffe894c43848617258deff3b 100644 (file)
@@ -1335,6 +1335,15 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
        hctx_unlock(hctx, srcu_idx);
 }
 
+static inline int blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx *hctx)
+{
+       int cpu = cpumask_first_and(hctx->cpumask, cpu_online_mask);
+
+       if (cpu >= nr_cpu_ids)
+               cpu = cpumask_first(hctx->cpumask);
+       return cpu;
+}
+
 /*
  * It'd be great if the workqueue API had a way to pass
  * in a mask and had some smarts for more clever placement.
@@ -1344,26 +1353,17 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
 static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
 {
        bool tried = false;
+       int next_cpu = hctx->next_cpu;
 
        if (hctx->queue->nr_hw_queues == 1)
                return WORK_CPU_UNBOUND;
 
        if (--hctx->next_cpu_batch <= 0) {
-               int next_cpu;
 select_cpu:
-               next_cpu = cpumask_next_and(hctx->next_cpu, hctx->cpumask,
+               next_cpu = cpumask_next_and(next_cpu, hctx->cpumask,
                                cpu_online_mask);
                if (next_cpu >= nr_cpu_ids)
-                       next_cpu = cpumask_first_and(hctx->cpumask,cpu_online_mask);
-
-               /*
-                * No online CPU is found, so have to make sure hctx->next_cpu
-                * is set correctly for not breaking workqueue.
-                */
-               if (next_cpu >= nr_cpu_ids)
-                       hctx->next_cpu = cpumask_first(hctx->cpumask);
-               else
-                       hctx->next_cpu = next_cpu;
+                       next_cpu = blk_mq_first_mapped_cpu(hctx);
                hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
        }
 
@@ -1371,7 +1371,7 @@ static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
         * Do unbound schedule if we can't find a online CPU for this hctx,
         * and it should only happen in the path of handling CPU DEAD.
         */
-       if (!cpu_online(hctx->next_cpu)) {
+       if (!cpu_online(next_cpu)) {
                if (!tried) {
                        tried = true;
                        goto select_cpu;
@@ -1381,10 +1381,13 @@ static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
                 * Make sure to re-select CPU next time once after CPUs
                 * in hctx->cpumask become online again.
                 */
+               hctx->next_cpu = next_cpu;
                hctx->next_cpu_batch = 1;
                return WORK_CPU_UNBOUND;
        }
-       return hctx->next_cpu;
+
+       hctx->next_cpu = next_cpu;
+       return next_cpu;
 }
 
 static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
@@ -2429,10 +2432,7 @@ static void blk_mq_map_swqueue(struct request_queue *q)
                /*
                 * Initialize batch roundrobin counts
                 */
-               hctx->next_cpu = cpumask_first_and(hctx->cpumask,
-                               cpu_online_mask);
-               if (hctx->next_cpu >= nr_cpu_ids)
-                       hctx->next_cpu = cpumask_first(hctx->cpumask);
+               hctx->next_cpu = blk_mq_first_mapped_cpu(hctx);
                hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
        }
 }