1 #include <linux/kernel.h>
2 #include <linux/module.h>
3 #include <linux/backing-dev.h>
5 #include <linux/blkdev.h>
7 #include <linux/init.h>
8 #include <linux/slab.h>
9 #include <linux/workqueue.h>
10 #include <linux/smp.h>
12 #include <linux/blk-mq.h>
14 #include "blk-mq-debugfs.h"
15 #include "blk-mq-tag.h"
17 static void blk_mq_sysfs_release(struct kobject *kobj)
21 static void blk_mq_hw_sysfs_release(struct kobject *kobj)
23 struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx,
25 free_cpumask_var(hctx->cpumask);
30 struct blk_mq_ctx_sysfs_entry {
31 struct attribute attr;
32 ssize_t (*show)(struct blk_mq_ctx *, char *);
33 ssize_t (*store)(struct blk_mq_ctx *, const char *, size_t);
36 struct blk_mq_hw_ctx_sysfs_entry {
37 struct attribute attr;
38 ssize_t (*show)(struct blk_mq_hw_ctx *, char *);
39 ssize_t (*store)(struct blk_mq_hw_ctx *, const char *, size_t);
42 static ssize_t blk_mq_sysfs_show(struct kobject *kobj, struct attribute *attr,
45 struct blk_mq_ctx_sysfs_entry *entry;
46 struct blk_mq_ctx *ctx;
47 struct request_queue *q;
50 entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
51 ctx = container_of(kobj, struct blk_mq_ctx, kobj);
58 mutex_lock(&q->sysfs_lock);
59 if (!blk_queue_dying(q))
60 res = entry->show(ctx, page);
61 mutex_unlock(&q->sysfs_lock);
65 static ssize_t blk_mq_sysfs_store(struct kobject *kobj, struct attribute *attr,
66 const char *page, size_t length)
68 struct blk_mq_ctx_sysfs_entry *entry;
69 struct blk_mq_ctx *ctx;
70 struct request_queue *q;
73 entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
74 ctx = container_of(kobj, struct blk_mq_ctx, kobj);
81 mutex_lock(&q->sysfs_lock);
82 if (!blk_queue_dying(q))
83 res = entry->store(ctx, page, length);
84 mutex_unlock(&q->sysfs_lock);
88 static ssize_t blk_mq_hw_sysfs_show(struct kobject *kobj,
89 struct attribute *attr, char *page)
91 struct blk_mq_hw_ctx_sysfs_entry *entry;
92 struct blk_mq_hw_ctx *hctx;
93 struct request_queue *q;
96 entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
97 hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
104 mutex_lock(&q->sysfs_lock);
105 if (!blk_queue_dying(q))
106 res = entry->show(hctx, page);
107 mutex_unlock(&q->sysfs_lock);
111 static ssize_t blk_mq_hw_sysfs_store(struct kobject *kobj,
112 struct attribute *attr, const char *page,
115 struct blk_mq_hw_ctx_sysfs_entry *entry;
116 struct blk_mq_hw_ctx *hctx;
117 struct request_queue *q;
120 entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
121 hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
128 mutex_lock(&q->sysfs_lock);
129 if (!blk_queue_dying(q))
130 res = entry->store(hctx, page, length);
131 mutex_unlock(&q->sysfs_lock);
135 static ssize_t blk_mq_hw_sysfs_nr_tags_show(struct blk_mq_hw_ctx *hctx,
138 return sprintf(page, "%u\n", hctx->tags->nr_tags);
141 static ssize_t blk_mq_hw_sysfs_nr_reserved_tags_show(struct blk_mq_hw_ctx *hctx,
144 return sprintf(page, "%u\n", hctx->tags->nr_reserved_tags);
147 static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
149 unsigned int i, first = 1;
152 for_each_cpu(i, hctx->cpumask) {
154 ret += sprintf(ret + page, "%u", i);
156 ret += sprintf(ret + page, ", %u", i);
161 ret += sprintf(ret + page, "\n");
165 static struct attribute *default_ctx_attrs[] = {
169 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_tags = {
170 .attr = {.name = "nr_tags", .mode = S_IRUGO },
171 .show = blk_mq_hw_sysfs_nr_tags_show,
173 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_reserved_tags = {
174 .attr = {.name = "nr_reserved_tags", .mode = S_IRUGO },
175 .show = blk_mq_hw_sysfs_nr_reserved_tags_show,
177 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus = {
178 .attr = {.name = "cpu_list", .mode = S_IRUGO },
179 .show = blk_mq_hw_sysfs_cpus_show,
182 static struct attribute *default_hw_ctx_attrs[] = {
183 &blk_mq_hw_sysfs_nr_tags.attr,
184 &blk_mq_hw_sysfs_nr_reserved_tags.attr,
185 &blk_mq_hw_sysfs_cpus.attr,
189 static const struct sysfs_ops blk_mq_sysfs_ops = {
190 .show = blk_mq_sysfs_show,
191 .store = blk_mq_sysfs_store,
194 static const struct sysfs_ops blk_mq_hw_sysfs_ops = {
195 .show = blk_mq_hw_sysfs_show,
196 .store = blk_mq_hw_sysfs_store,
199 static struct kobj_type blk_mq_ktype = {
200 .sysfs_ops = &blk_mq_sysfs_ops,
201 .release = blk_mq_sysfs_release,
204 static struct kobj_type blk_mq_ctx_ktype = {
205 .sysfs_ops = &blk_mq_sysfs_ops,
206 .default_attrs = default_ctx_attrs,
207 .release = blk_mq_sysfs_release,
210 static struct kobj_type blk_mq_hw_ktype = {
211 .sysfs_ops = &blk_mq_hw_sysfs_ops,
212 .default_attrs = default_hw_ctx_attrs,
213 .release = blk_mq_hw_sysfs_release,
216 static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
218 struct blk_mq_ctx *ctx;
224 hctx_for_each_ctx(hctx, ctx, i)
225 kobject_del(&ctx->kobj);
227 kobject_del(&hctx->kobj);
230 static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
232 struct request_queue *q = hctx->queue;
233 struct blk_mq_ctx *ctx;
239 ret = kobject_add(&hctx->kobj, &q->mq_kobj, "%u", hctx->queue_num);
243 hctx_for_each_ctx(hctx, ctx, i) {
244 ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
252 static void __blk_mq_unregister_dev(struct device *dev, struct request_queue *q)
254 struct blk_mq_hw_ctx *hctx;
257 lockdep_assert_held(&q->sysfs_lock);
259 queue_for_each_hw_ctx(q, hctx, i)
260 blk_mq_unregister_hctx(hctx);
262 blk_mq_debugfs_unregister_mq(q);
264 kobject_uevent(&q->mq_kobj, KOBJ_REMOVE);
265 kobject_del(&q->mq_kobj);
266 kobject_put(&dev->kobj);
268 q->mq_sysfs_init_done = false;
271 void blk_mq_unregister_dev(struct device *dev, struct request_queue *q)
273 mutex_lock(&q->sysfs_lock);
274 __blk_mq_unregister_dev(dev, q);
275 mutex_unlock(&q->sysfs_lock);
278 void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx)
280 kobject_init(&hctx->kobj, &blk_mq_hw_ktype);
283 void blk_mq_sysfs_deinit(struct request_queue *q)
285 struct blk_mq_ctx *ctx;
288 for_each_possible_cpu(cpu) {
289 ctx = per_cpu_ptr(q->queue_ctx, cpu);
290 kobject_put(&ctx->kobj);
292 kobject_put(&q->mq_kobj);
295 void blk_mq_sysfs_init(struct request_queue *q)
297 struct blk_mq_ctx *ctx;
300 kobject_init(&q->mq_kobj, &blk_mq_ktype);
302 for_each_possible_cpu(cpu) {
303 ctx = per_cpu_ptr(q->queue_ctx, cpu);
304 kobject_init(&ctx->kobj, &blk_mq_ctx_ktype);
308 int __blk_mq_register_dev(struct device *dev, struct request_queue *q)
310 struct blk_mq_hw_ctx *hctx;
313 WARN_ON_ONCE(!q->kobj.parent);
314 lockdep_assert_held(&q->sysfs_lock);
316 ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
320 kobject_uevent(&q->mq_kobj, KOBJ_ADD);
322 blk_mq_debugfs_register(q);
324 queue_for_each_hw_ctx(q, hctx, i) {
325 ret = blk_mq_register_hctx(hctx);
330 q->mq_sysfs_init_done = true;
337 blk_mq_unregister_hctx(q->queue_hw_ctx[i]);
339 blk_mq_debugfs_unregister_mq(q);
341 kobject_uevent(&q->mq_kobj, KOBJ_REMOVE);
342 kobject_del(&q->mq_kobj);
343 kobject_put(&dev->kobj);
347 int blk_mq_register_dev(struct device *dev, struct request_queue *q)
351 mutex_lock(&q->sysfs_lock);
352 ret = __blk_mq_register_dev(dev, q);
353 mutex_unlock(&q->sysfs_lock);
357 EXPORT_SYMBOL_GPL(blk_mq_register_dev);
359 void blk_mq_sysfs_unregister(struct request_queue *q)
361 struct blk_mq_hw_ctx *hctx;
364 mutex_lock(&q->sysfs_lock);
365 if (!q->mq_sysfs_init_done)
368 blk_mq_debugfs_unregister_mq(q);
370 queue_for_each_hw_ctx(q, hctx, i)
371 blk_mq_unregister_hctx(hctx);
374 mutex_unlock(&q->sysfs_lock);
377 int blk_mq_sysfs_register(struct request_queue *q)
379 struct blk_mq_hw_ctx *hctx;
382 mutex_lock(&q->sysfs_lock);
383 if (!q->mq_sysfs_init_done)
386 blk_mq_debugfs_register_mq(q);
388 queue_for_each_hw_ctx(q, hctx, i) {
389 ret = blk_mq_register_hctx(hctx);
395 mutex_unlock(&q->sysfs_lock);