1 /* SPDX-License-Identifier: GPL-2.0 */
6 #include "blk-mq-tag.h"
12 struct blk_mq_ctx __percpu *queue_ctx;
16 * struct blk_mq_ctx - State for a software queue facing the submitting CPUs
21 struct list_head rq_list;
22 } ____cacheline_aligned_in_smp;
25 unsigned short index_hw[HCTX_MAX_TYPES];
27 /* incremented at dispatch time */
28 unsigned long rq_dispatched[2];
29 unsigned long rq_merged;
31 /* incremented at completion time */
32 unsigned long ____cacheline_aligned_in_smp rq_completed[2];
34 struct request_queue *queue;
35 struct blk_mq_ctxs *ctxs;
37 } ____cacheline_aligned_in_smp;
39 void blk_mq_freeze_queue(struct request_queue *q);
40 void blk_mq_free_queue(struct request_queue *q);
41 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
42 void blk_mq_wake_waiters(struct request_queue *q);
43 bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *, bool);
44 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
45 bool blk_mq_get_driver_tag(struct request *rq);
46 struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
47 struct blk_mq_ctx *start);
50 * Internal helpers for allocating/freeing the request map
52 void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
53 unsigned int hctx_idx);
54 void blk_mq_free_rq_map(struct blk_mq_tags *tags);
55 struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
56 unsigned int hctx_idx,
58 unsigned int reserved_tags);
59 int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
60 unsigned int hctx_idx, unsigned int depth);
63 * Internal helpers for request insertion into sw queues
65 void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
67 void blk_mq_request_bypass_insert(struct request *rq, bool run_queue);
68 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
69 struct list_head *list);
71 /* Used by blk_insert_cloned_request() to issue request directly */
72 blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last);
73 void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
74 struct list_head *list);
77 * CPU -> queue mappings
79 extern int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int);
82 * blk_mq_map_queue_type() - map (hctx_type,cpu) to hardware queue
84 * @type: the hctx type index
87 static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q,
91 return q->queue_hw_ctx[q->tag_set->map[type].mq_map[cpu]];
95 * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue
97 * @flags: request command flags
100 static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
104 enum hctx_type type = HCTX_TYPE_DEFAULT;
106 if (q->tag_set->nr_maps > HCTX_TYPE_POLL &&
107 ((flags & REQ_HIPRI) && test_bit(QUEUE_FLAG_POLL, &q->queue_flags)))
108 type = HCTX_TYPE_POLL;
110 else if (q->tag_set->nr_maps > HCTX_TYPE_READ &&
111 ((flags & REQ_OP_MASK) == REQ_OP_READ))
112 type = HCTX_TYPE_READ;
114 return blk_mq_map_queue_type(q, type, cpu);
120 extern void blk_mq_sysfs_init(struct request_queue *q);
121 extern void blk_mq_sysfs_deinit(struct request_queue *q);
122 extern int __blk_mq_register_dev(struct device *dev, struct request_queue *q);
123 extern int blk_mq_sysfs_register(struct request_queue *q);
124 extern void blk_mq_sysfs_unregister(struct request_queue *q);
125 extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
127 void blk_mq_release(struct request_queue *q);
130 * blk_mq_rq_state() - read the current MQ_RQ_* state of a request
131 * @rq: target request.
133 static inline enum mq_rq_state blk_mq_rq_state(struct request *rq)
135 return READ_ONCE(rq->state);
138 static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
141 return per_cpu_ptr(q->queue_ctx, cpu);
145 * This assumes per-cpu software queueing queues. They could be per-node
146 * as well, for instance. For now this is hardcoded as-is. Note that we don't
147 * care about preemption, since we know the ctx's are persistent. This does
148 * mean that we can't rely on ctx always matching the currently running CPU.
150 static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
152 return __blk_mq_get_ctx(q, get_cpu());
155 static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
160 struct blk_mq_alloc_data {
161 /* input parameter */
162 struct request_queue *q;
163 blk_mq_req_flags_t flags;
164 unsigned int shallow_depth;
165 unsigned int cmd_flags;
167 /* input & output parameter */
168 struct blk_mq_ctx *ctx;
169 struct blk_mq_hw_ctx *hctx;
172 static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data)
174 if (data->flags & BLK_MQ_REQ_INTERNAL)
175 return data->hctx->sched_tags;
177 return data->hctx->tags;
180 static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
182 return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
185 static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
187 return hctx->nr_ctx && hctx->tags;
190 void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part,
191 unsigned int inflight[2]);
192 void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part,
193 unsigned int inflight[2]);
195 static inline void blk_mq_put_dispatch_budget(struct blk_mq_hw_ctx *hctx)
197 struct request_queue *q = hctx->queue;
199 if (q->mq_ops->put_budget)
200 q->mq_ops->put_budget(hctx);
203 static inline bool blk_mq_get_dispatch_budget(struct blk_mq_hw_ctx *hctx)
205 struct request_queue *q = hctx->queue;
207 if (q->mq_ops->get_budget)
208 return q->mq_ops->get_budget(hctx);
212 static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
215 blk_mq_put_tag(hctx, hctx->tags, rq->mq_ctx, rq->tag);
218 if (rq->rq_flags & RQF_MQ_INFLIGHT) {
219 rq->rq_flags &= ~RQF_MQ_INFLIGHT;
220 atomic_dec(&hctx->nr_active);
224 static inline void blk_mq_put_driver_tag_hctx(struct blk_mq_hw_ctx *hctx,
227 if (rq->tag == -1 || rq->internal_tag == -1)
230 __blk_mq_put_driver_tag(hctx, rq);
233 static inline void blk_mq_put_driver_tag(struct request *rq)
235 if (rq->tag == -1 || rq->internal_tag == -1)
238 __blk_mq_put_driver_tag(rq->mq_hctx, rq);
241 static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap)
245 for_each_possible_cpu(cpu)
246 qmap->mq_map[cpu] = 0;