]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - block/blk-mq.h
Merge tag 'rtc-4.21' of git://git.kernel.org/pub/scm/linux/kernel/git/abelloni/linux
[linux.git] / block / blk-mq.h
index 9ae8e9f8f8b1b69605ed12e107d12da8655cb7e4..d943d46b078547e5f48d488575be5178fb0195e5 100644 (file)
@@ -18,8 +18,8 @@ struct blk_mq_ctxs {
 struct blk_mq_ctx {
        struct {
                spinlock_t              lock;
-               struct list_head        rq_list;
-       }  ____cacheline_aligned_in_smp;
+               struct list_head        rq_lists[HCTX_MAX_TYPES];
+       } ____cacheline_aligned_in_smp;
 
        unsigned int            cpu;
        unsigned short          index_hw[HCTX_MAX_TYPES];
@@ -68,8 +68,10 @@ void blk_mq_request_bypass_insert(struct request *rq, bool run_queue);
 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
                                struct list_head *list);
 
-/* Used by blk_insert_cloned_request() to issue request directly */
-blk_status_t blk_mq_request_issue_directly(struct request *rq);
+blk_status_t blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
+                                               struct request *rq,
+                                               blk_qc_t *cookie,
+                                               bool bypass, bool last);
 void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
                                    struct list_head *list);
 
@@ -81,16 +83,14 @@ extern int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int);
 /*
  * blk_mq_map_queue_type() - map (hctx_type,cpu) to hardware queue
  * @q: request queue
- * @hctx_type: the hctx type index
+ * @type: the hctx type index
  * @cpu: CPU
  */
 static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q,
-                                                         unsigned int hctx_type,
+                                                         enum hctx_type type,
                                                          unsigned int cpu)
 {
-       struct blk_mq_tag_set *set = q->tag_set;
-
-       return q->queue_hw_ctx[set->map[hctx_type].mq_map[cpu]];
+       return q->queue_hw_ctx[q->tag_set->map[type].mq_map[cpu]];
 }
 
 /*
@@ -103,12 +103,20 @@ static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
                                                     unsigned int flags,
                                                     unsigned int cpu)
 {
-       int hctx_type = 0;
-
-       if (q->mq_ops->rq_flags_to_type)
-               hctx_type = q->mq_ops->rq_flags_to_type(q, flags);
-
-       return blk_mq_map_queue_type(q, hctx_type, cpu);
+       enum hctx_type type = HCTX_TYPE_DEFAULT;
+
+       if ((flags & REQ_HIPRI) &&
+           q->tag_set->nr_maps > HCTX_TYPE_POLL && 
+           q->tag_set->map[HCTX_TYPE_POLL].nr_queues &&
+           test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
+               type = HCTX_TYPE_POLL;
+
+       else if (((flags & REQ_OP_MASK) == REQ_OP_READ) &&
+                q->tag_set->nr_maps > HCTX_TYPE_READ &&
+                q->tag_set->map[HCTX_TYPE_READ].nr_queues)
+               type = HCTX_TYPE_READ;
+       
+       return blk_mq_map_queue_type(q, type, cpu);
 }
 
 /*
@@ -184,8 +192,7 @@ static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
        return hctx->nr_ctx && hctx->tags;
 }
 
-void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part,
-                     unsigned int inflight[2]);
+unsigned int blk_mq_in_flight(struct request_queue *q, struct hd_struct *part);
 void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part,
                         unsigned int inflight[2]);