]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - block/blk-mq.h
Merge tag 'rtc-4.21' of git://git.kernel.org/pub/scm/linux/kernel/git/abelloni/linux
[linux.git] / block / blk-mq.h
index 9497b47e2526c62006f00101cc7acffd35e7e299..d943d46b078547e5f48d488575be5178fb0195e5 100644 (file)
@@ -7,17 +7,22 @@
 
 struct blk_mq_tag_set;
 
+struct blk_mq_ctxs {
+       struct kobject kobj;
+       struct blk_mq_ctx __percpu      *queue_ctx;
+};
+
 /**
  * struct blk_mq_ctx - State for a software queue facing the submitting CPUs
  */
 struct blk_mq_ctx {
        struct {
                spinlock_t              lock;
-               struct list_head        rq_list;
-       }  ____cacheline_aligned_in_smp;
+               struct list_head        rq_lists[HCTX_MAX_TYPES];
+       } ____cacheline_aligned_in_smp;
 
        unsigned int            cpu;
-       unsigned int            index_hw;
+       unsigned short          index_hw[HCTX_MAX_TYPES];
 
        /* incremented at dispatch time */
        unsigned long           rq_dispatched[2];
@@ -27,6 +32,7 @@ struct blk_mq_ctx {
        unsigned long           ____cacheline_aligned_in_smp rq_completed[2];
 
        struct request_queue    *queue;
+       struct blk_mq_ctxs      *ctxs;
        struct kobject          kobj;
 } ____cacheline_aligned_in_smp;
 
@@ -62,20 +68,55 @@ void blk_mq_request_bypass_insert(struct request *rq, bool run_queue);
 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
                                struct list_head *list);
 
-/* Used by blk_insert_cloned_request() to issue request directly */
-blk_status_t blk_mq_request_issue_directly(struct request *rq);
+blk_status_t blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
+                                               struct request *rq,
+                                               blk_qc_t *cookie,
+                                               bool bypass, bool last);
 void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
                                    struct list_head *list);
 
 /*
  * CPU -> queue mappings
  */
-extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
+extern int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int);
+
+/*
+ * blk_mq_map_queue_type() - map (hctx_type,cpu) to hardware queue
+ * @q: request queue
+ * @type: the hctx type index
+ * @cpu: CPU
+ */
+static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q,
+                                                         enum hctx_type type,
+                                                         unsigned int cpu)
+{
+       return q->queue_hw_ctx[q->tag_set->map[type].mq_map[cpu]];
+}
 
+/*
+ * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue
+ * @q: request queue
+ * @flags: request command flags
+ * @cpu: CPU
+ */
 static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
-               int cpu)
+                                                    unsigned int flags,
+                                                    unsigned int cpu)
 {
-       return q->queue_hw_ctx[q->mq_map[cpu]];
+       enum hctx_type type = HCTX_TYPE_DEFAULT;
+
+       if ((flags & REQ_HIPRI) &&
+           q->tag_set->nr_maps > HCTX_TYPE_POLL && 
+           q->tag_set->map[HCTX_TYPE_POLL].nr_queues &&
+           test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
+               type = HCTX_TYPE_POLL;
+
+       else if (((flags & REQ_OP_MASK) == REQ_OP_READ) &&
+                q->tag_set->nr_maps > HCTX_TYPE_READ &&
+                q->tag_set->map[HCTX_TYPE_READ].nr_queues)
+               type = HCTX_TYPE_READ;
+       
+       return blk_mq_map_queue_type(q, type, cpu);
 }
 
 /*
@@ -126,6 +167,7 @@ struct blk_mq_alloc_data {
        struct request_queue *q;
        blk_mq_req_flags_t flags;
        unsigned int shallow_depth;
+       unsigned int cmd_flags;
 
        /* input & output parameter */
        struct blk_mq_ctx *ctx;
@@ -150,8 +192,7 @@ static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
        return hctx->nr_ctx && hctx->tags;
 }
 
-void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part,
-                     unsigned int inflight[2]);
+unsigned int blk_mq_in_flight(struct request_queue *q, struct hd_struct *part);
 void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part,
                         unsigned int inflight[2]);
 
@@ -195,21 +236,18 @@ static inline void blk_mq_put_driver_tag_hctx(struct blk_mq_hw_ctx *hctx,
 
 static inline void blk_mq_put_driver_tag(struct request *rq)
 {
-       struct blk_mq_hw_ctx *hctx;
-
        if (rq->tag == -1 || rq->internal_tag == -1)
                return;
 
-       hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu);
-       __blk_mq_put_driver_tag(hctx, rq);
+       __blk_mq_put_driver_tag(rq->mq_hctx, rq);
 }
 
-static inline void blk_mq_clear_mq_map(struct blk_mq_tag_set *set)
+static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap)
 {
        int cpu;
 
        for_each_possible_cpu(cpu)
-               set->mq_map[cpu] = 0;
+               qmap->mq_map[cpu] = 0;
 }
 
 #endif