1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
6 #include <linux/kernel.h>
7 #include <linux/blkdev.h>
8 #include <linux/blktrace_api.h>
9 #include <linux/percpu.h>
10 #include <linux/init.h>
11 #include <linux/mutex.h>
12 #include <linux/slab.h>
13 #include <linux/debugfs.h>
14 #include <linux/export.h>
15 #include <linux/time.h>
16 #include <linux/uaccess.h>
17 #include <linux/list.h>
18 #include <linux/blk-cgroup.h>
20 #include "../../block/blk.h"
22 #include <trace/events/block.h>
24 #include "trace_output.h"
26 #ifdef CONFIG_BLK_DEV_IO_TRACE
28 static unsigned int blktrace_seq __read_mostly = 1;
30 static struct trace_array *blk_tr;
31 static bool blk_tracer_enabled __read_mostly;
33 static LIST_HEAD(running_trace_list);
34 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(running_trace_lock);
36 /* Select an alternative, minimalistic output than the original one */
37 #define TRACE_BLK_OPT_CLASSIC 0x1
38 #define TRACE_BLK_OPT_CGROUP 0x2
39 #define TRACE_BLK_OPT_CGNAME 0x4
41 static struct tracer_opt blk_tracer_opts[] = {
42 /* Default disable the minimalistic output */
43 { TRACER_OPT(blk_classic, TRACE_BLK_OPT_CLASSIC) },
44 #ifdef CONFIG_BLK_CGROUP
45 { TRACER_OPT(blk_cgroup, TRACE_BLK_OPT_CGROUP) },
46 { TRACER_OPT(blk_cgname, TRACE_BLK_OPT_CGNAME) },
51 static struct tracer_flags blk_tracer_flags = {
53 .opts = blk_tracer_opts,
56 /* Global reference count of probes */
57 static DEFINE_MUTEX(blk_probe_mutex);
58 static int blk_probes_ref;
60 static void blk_register_tracepoints(void);
61 static void blk_unregister_tracepoints(void);
64 * Send out a notify message.
66 static void trace_note(struct blk_trace *bt, pid_t pid, int action,
67 const void *data, size_t len, u64 cgid)
69 struct blk_io_trace *t;
70 struct ring_buffer_event *event = NULL;
71 struct ring_buffer *buffer = NULL;
73 int cpu = smp_processor_id();
74 bool blk_tracer = blk_tracer_enabled;
75 ssize_t cgid_len = cgid ? sizeof(cgid) : 0;
78 buffer = blk_tr->trace_buffer.buffer;
80 event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
81 sizeof(*t) + len + cgid_len,
85 t = ring_buffer_event_data(event);
92 t = relay_reserve(bt->rchan, sizeof(*t) + len + cgid_len);
94 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
95 t->time = ktime_to_ns(ktime_get());
98 t->action = action | (cgid ? __BLK_TN_CGROUP : 0);
101 t->pdu_len = len + cgid_len;
103 memcpy((void *)t + sizeof(*t), &cgid, cgid_len);
104 memcpy((void *) t + sizeof(*t) + cgid_len, data, len);
107 trace_buffer_unlock_commit(blk_tr, buffer, event, 0, pc);
112 * Send out a notify for this process, if we haven't done so since a trace
115 static void trace_note_tsk(struct task_struct *tsk)
118 struct blk_trace *bt;
120 tsk->btrace_seq = blktrace_seq;
121 spin_lock_irqsave(&running_trace_lock, flags);
122 list_for_each_entry(bt, &running_trace_list, running_list) {
123 trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm,
124 sizeof(tsk->comm), 0);
126 spin_unlock_irqrestore(&running_trace_lock, flags);
129 static void trace_note_time(struct blk_trace *bt)
131 struct timespec64 now;
135 /* need to check user space to see if this breaks in y2038 or y2106 */
136 ktime_get_real_ts64(&now);
137 words[0] = (u32)now.tv_sec;
138 words[1] = now.tv_nsec;
140 local_irq_save(flags);
141 trace_note(bt, 0, BLK_TN_TIMESTAMP, words, sizeof(words), 0);
142 local_irq_restore(flags);
145 void __trace_note_message(struct blk_trace *bt, struct blkcg *blkcg,
146 const char *fmt, ...)
153 if (unlikely(bt->trace_state != Blktrace_running &&
154 !blk_tracer_enabled))
158 * If the BLK_TC_NOTIFY action mask isn't set, don't send any note
159 * message to the trace.
161 if (!(bt->act_mask & BLK_TC_NOTIFY))
164 local_irq_save(flags);
165 buf = this_cpu_ptr(bt->msg_data);
167 n = vscnprintf(buf, BLK_TN_MAX_MSG, fmt, args);
170 if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP))
172 #ifdef CONFIG_BLK_CGROUP
173 trace_note(bt, 0, BLK_TN_MESSAGE, buf, n,
174 blkcg ? cgroup_id(blkcg->css.cgroup) : 1);
176 trace_note(bt, 0, BLK_TN_MESSAGE, buf, n, 0);
178 local_irq_restore(flags);
180 EXPORT_SYMBOL_GPL(__trace_note_message);
182 static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector,
185 if (((bt->act_mask << BLK_TC_SHIFT) & what) == 0)
187 if (sector && (sector < bt->start_lba || sector > bt->end_lba))
189 if (bt->pid && pid != bt->pid)
196 * Data direction bit lookup
198 static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ),
199 BLK_TC_ACT(BLK_TC_WRITE) };
201 #define BLK_TC_RAHEAD BLK_TC_AHEAD
202 #define BLK_TC_PREFLUSH BLK_TC_FLUSH
204 /* The ilog2() calls fall out because they're constant */
205 #define MASK_TC_BIT(rw, __name) ((rw & REQ_ ## __name) << \
206 (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - __REQ_ ## __name))
209 * The worker for the various blk_add_trace*() types. Fills out a
210 * blk_io_trace structure and places it in a per-cpu subbuffer.
212 static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
213 int op, int op_flags, u32 what, int error, int pdu_len,
214 void *pdu_data, u64 cgid)
216 struct task_struct *tsk = current;
217 struct ring_buffer_event *event = NULL;
218 struct ring_buffer *buffer = NULL;
219 struct blk_io_trace *t;
220 unsigned long flags = 0;
221 unsigned long *sequence;
224 bool blk_tracer = blk_tracer_enabled;
225 ssize_t cgid_len = cgid ? sizeof(cgid) : 0;
227 if (unlikely(bt->trace_state != Blktrace_running && !blk_tracer))
230 what |= ddir_act[op_is_write(op) ? WRITE : READ];
231 what |= MASK_TC_BIT(op_flags, SYNC);
232 what |= MASK_TC_BIT(op_flags, RAHEAD);
233 what |= MASK_TC_BIT(op_flags, META);
234 what |= MASK_TC_BIT(op_flags, PREFLUSH);
235 what |= MASK_TC_BIT(op_flags, FUA);
236 if (op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE)
237 what |= BLK_TC_ACT(BLK_TC_DISCARD);
238 if (op == REQ_OP_FLUSH)
239 what |= BLK_TC_ACT(BLK_TC_FLUSH);
241 what |= __BLK_TA_CGROUP;
244 if (act_log_check(bt, what, sector, pid))
246 cpu = raw_smp_processor_id();
249 tracing_record_cmdline(current);
251 buffer = blk_tr->trace_buffer.buffer;
252 pc = preempt_count();
253 event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
254 sizeof(*t) + pdu_len + cgid_len,
258 t = ring_buffer_event_data(event);
262 if (unlikely(tsk->btrace_seq != blktrace_seq))
266 * A word about the locking here - we disable interrupts to reserve
267 * some space in the relay per-cpu buffer, to prevent an irq
268 * from coming in and stepping on our toes.
270 local_irq_save(flags);
271 t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len + cgid_len);
273 sequence = per_cpu_ptr(bt->sequence, cpu);
275 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
276 t->sequence = ++(*sequence);
277 t->time = ktime_to_ns(ktime_get());
280 * These two are not needed in ftrace as they are in the
281 * generic trace_entry, filled by tracing_generic_entry_update,
282 * but for the trace_event->bin() synthesizer benefit we do it
293 t->pdu_len = pdu_len + cgid_len;
296 memcpy((void *)t + sizeof(*t), &cgid, cgid_len);
298 memcpy((void *)t + sizeof(*t) + cgid_len, pdu_data, pdu_len);
301 trace_buffer_unlock_commit(blk_tr, buffer, event, 0, pc);
306 local_irq_restore(flags);
309 static void blk_trace_free(struct blk_trace *bt)
311 debugfs_remove(bt->msg_file);
312 debugfs_remove(bt->dropped_file);
313 relay_close(bt->rchan);
314 debugfs_remove(bt->dir);
315 free_percpu(bt->sequence);
316 free_percpu(bt->msg_data);
320 static void get_probe_ref(void)
322 mutex_lock(&blk_probe_mutex);
323 if (++blk_probes_ref == 1)
324 blk_register_tracepoints();
325 mutex_unlock(&blk_probe_mutex);
328 static void put_probe_ref(void)
330 mutex_lock(&blk_probe_mutex);
331 if (!--blk_probes_ref)
332 blk_unregister_tracepoints();
333 mutex_unlock(&blk_probe_mutex);
336 static void blk_trace_cleanup(struct blk_trace *bt)
342 static int __blk_trace_remove(struct request_queue *q)
344 struct blk_trace *bt;
346 bt = xchg(&q->blk_trace, NULL);
350 if (bt->trace_state != Blktrace_running)
351 blk_trace_cleanup(bt);
356 int blk_trace_remove(struct request_queue *q)
360 mutex_lock(&q->blk_trace_mutex);
361 ret = __blk_trace_remove(q);
362 mutex_unlock(&q->blk_trace_mutex);
366 EXPORT_SYMBOL_GPL(blk_trace_remove);
368 static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
369 size_t count, loff_t *ppos)
371 struct blk_trace *bt = filp->private_data;
374 snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
376 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
379 static const struct file_operations blk_dropped_fops = {
380 .owner = THIS_MODULE,
382 .read = blk_dropped_read,
383 .llseek = default_llseek,
386 static ssize_t blk_msg_write(struct file *filp, const char __user *buffer,
387 size_t count, loff_t *ppos)
390 struct blk_trace *bt;
392 if (count >= BLK_TN_MAX_MSG)
395 msg = memdup_user_nul(buffer, count);
399 bt = filp->private_data;
400 __trace_note_message(bt, NULL, "%s", msg);
406 static const struct file_operations blk_msg_fops = {
407 .owner = THIS_MODULE,
409 .write = blk_msg_write,
410 .llseek = noop_llseek,
414 * Keep track of how many times we encountered a full subbuffer, to aid
415 * the user space app in telling how many lost events there were.
417 static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
418 void *prev_subbuf, size_t prev_padding)
420 struct blk_trace *bt;
422 if (!relay_buf_full(buf))
425 bt = buf->chan->private_data;
426 atomic_inc(&bt->dropped);
430 static int blk_remove_buf_file_callback(struct dentry *dentry)
432 debugfs_remove(dentry);
437 static struct dentry *blk_create_buf_file_callback(const char *filename,
438 struct dentry *parent,
440 struct rchan_buf *buf,
443 return debugfs_create_file(filename, mode, parent, buf,
444 &relay_file_operations);
447 static struct rchan_callbacks blk_relay_callbacks = {
448 .subbuf_start = blk_subbuf_start_callback,
449 .create_buf_file = blk_create_buf_file_callback,
450 .remove_buf_file = blk_remove_buf_file_callback,
453 static void blk_trace_setup_lba(struct blk_trace *bt,
454 struct block_device *bdev)
456 struct hd_struct *part = NULL;
459 part = bdev->bd_part;
462 bt->start_lba = part->start_sect;
463 bt->end_lba = part->start_sect + part->nr_sects;
471 * Setup everything required to start tracing
473 static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
474 struct block_device *bdev,
475 struct blk_user_trace_setup *buts)
477 struct blk_trace *bt = NULL;
478 struct dentry *dir = NULL;
481 if (!buts->buf_size || !buts->buf_nr)
484 if (!blk_debugfs_root)
487 strncpy(buts->name, name, BLKTRACE_BDEV_SIZE);
488 buts->name[BLKTRACE_BDEV_SIZE - 1] = '\0';
491 * some device names have larger paths - convert the slashes
492 * to underscores for this to work as expected
494 strreplace(buts->name, '/', '_');
496 bt = kzalloc(sizeof(*bt), GFP_KERNEL);
501 bt->sequence = alloc_percpu(unsigned long);
505 bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
511 dir = debugfs_lookup(buts->name, blk_debugfs_root);
513 bt->dir = dir = debugfs_create_dir(buts->name, blk_debugfs_root);
516 atomic_set(&bt->dropped, 0);
517 INIT_LIST_HEAD(&bt->running_list);
520 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
523 bt->msg_file = debugfs_create_file("msg", 0222, dir, bt, &blk_msg_fops);
525 bt->rchan = relay_open("trace", dir, buts->buf_size,
526 buts->buf_nr, &blk_relay_callbacks, bt);
530 bt->act_mask = buts->act_mask;
532 bt->act_mask = (u16) -1;
534 blk_trace_setup_lba(bt, bdev);
536 /* overwrite with user settings */
538 bt->start_lba = buts->start_lba;
540 bt->end_lba = buts->end_lba;
543 bt->trace_state = Blktrace_setup;
546 if (cmpxchg(&q->blk_trace, NULL, bt))
560 static int __blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
561 struct block_device *bdev, char __user *arg)
563 struct blk_user_trace_setup buts;
566 ret = copy_from_user(&buts, arg, sizeof(buts));
570 ret = do_blk_trace_setup(q, name, dev, bdev, &buts);
574 if (copy_to_user(arg, &buts, sizeof(buts))) {
575 __blk_trace_remove(q);
581 int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
582 struct block_device *bdev,
587 mutex_lock(&q->blk_trace_mutex);
588 ret = __blk_trace_setup(q, name, dev, bdev, arg);
589 mutex_unlock(&q->blk_trace_mutex);
593 EXPORT_SYMBOL_GPL(blk_trace_setup);
595 #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64)
596 static int compat_blk_trace_setup(struct request_queue *q, char *name,
597 dev_t dev, struct block_device *bdev,
600 struct blk_user_trace_setup buts;
601 struct compat_blk_user_trace_setup cbuts;
604 if (copy_from_user(&cbuts, arg, sizeof(cbuts)))
607 buts = (struct blk_user_trace_setup) {
608 .act_mask = cbuts.act_mask,
609 .buf_size = cbuts.buf_size,
610 .buf_nr = cbuts.buf_nr,
611 .start_lba = cbuts.start_lba,
612 .end_lba = cbuts.end_lba,
616 ret = do_blk_trace_setup(q, name, dev, bdev, &buts);
620 if (copy_to_user(arg, &buts.name, ARRAY_SIZE(buts.name))) {
621 __blk_trace_remove(q);
629 static int __blk_trace_startstop(struct request_queue *q, int start)
632 struct blk_trace *bt = q->blk_trace;
638 * For starting a trace, we can transition from a setup or stopped
639 * trace. For stopping a trace, the state must be running
643 if (bt->trace_state == Blktrace_setup ||
644 bt->trace_state == Blktrace_stopped) {
647 bt->trace_state = Blktrace_running;
648 spin_lock_irq(&running_trace_lock);
649 list_add(&bt->running_list, &running_trace_list);
650 spin_unlock_irq(&running_trace_lock);
656 if (bt->trace_state == Blktrace_running) {
657 bt->trace_state = Blktrace_stopped;
658 spin_lock_irq(&running_trace_lock);
659 list_del_init(&bt->running_list);
660 spin_unlock_irq(&running_trace_lock);
661 relay_flush(bt->rchan);
669 int blk_trace_startstop(struct request_queue *q, int start)
673 mutex_lock(&q->blk_trace_mutex);
674 ret = __blk_trace_startstop(q, start);
675 mutex_unlock(&q->blk_trace_mutex);
679 EXPORT_SYMBOL_GPL(blk_trace_startstop);
682 * When reading or writing the blktrace sysfs files, the references to the
683 * opened sysfs or device files should prevent the underlying block device
684 * from being removed. So no further delete protection is really needed.
688 * blk_trace_ioctl: - handle the ioctls associated with tracing
689 * @bdev: the block device
690 * @cmd: the ioctl cmd
691 * @arg: the argument data, if any
694 int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
696 struct request_queue *q;
698 char b[BDEVNAME_SIZE];
700 q = bdev_get_queue(bdev);
704 mutex_lock(&q->blk_trace_mutex);
709 ret = __blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
711 #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64)
712 case BLKTRACESETUP32:
714 ret = compat_blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
721 ret = __blk_trace_startstop(q, start);
723 case BLKTRACETEARDOWN:
724 ret = __blk_trace_remove(q);
731 mutex_unlock(&q->blk_trace_mutex);
736 * blk_trace_shutdown: - stop and cleanup trace structures
737 * @q: the request queue associated with the device
740 void blk_trace_shutdown(struct request_queue *q)
742 mutex_lock(&q->blk_trace_mutex);
745 __blk_trace_startstop(q, 0);
746 __blk_trace_remove(q);
749 mutex_unlock(&q->blk_trace_mutex);
752 #ifdef CONFIG_BLK_CGROUP
753 static u64 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
755 struct blk_trace *bt = q->blk_trace;
757 if (!bt || !(blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP))
762 return cgroup_id(bio_blkcg(bio)->css.cgroup);
765 u64 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
772 blk_trace_request_get_cgid(struct request_queue *q, struct request *rq)
776 /* Use the first bio */
777 return blk_trace_bio_get_cgid(q, rq->bio);
785 * blk_add_trace_rq - Add a trace for a request oriented action
786 * @rq: the source request
787 * @error: return status to log
788 * @nr_bytes: number of completed bytes
790 * @cgid: the cgroup info
793 * Records an action against a request. Will log the bio offset + size.
796 static void blk_add_trace_rq(struct request *rq, int error,
797 unsigned int nr_bytes, u32 what, u64 cgid)
799 struct blk_trace *bt = rq->q->blk_trace;
804 if (blk_rq_is_passthrough(rq))
805 what |= BLK_TC_ACT(BLK_TC_PC);
807 what |= BLK_TC_ACT(BLK_TC_FS);
809 __blk_add_trace(bt, blk_rq_trace_sector(rq), nr_bytes, req_op(rq),
810 rq->cmd_flags, what, error, 0, NULL, cgid);
813 static void blk_add_trace_rq_insert(void *ignore,
814 struct request_queue *q, struct request *rq)
816 blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_INSERT,
817 blk_trace_request_get_cgid(q, rq));
820 static void blk_add_trace_rq_issue(void *ignore,
821 struct request_queue *q, struct request *rq)
823 blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_ISSUE,
824 blk_trace_request_get_cgid(q, rq));
827 static void blk_add_trace_rq_requeue(void *ignore,
828 struct request_queue *q,
831 blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_REQUEUE,
832 blk_trace_request_get_cgid(q, rq));
835 static void blk_add_trace_rq_complete(void *ignore, struct request *rq,
836 int error, unsigned int nr_bytes)
838 blk_add_trace_rq(rq, error, nr_bytes, BLK_TA_COMPLETE,
839 blk_trace_request_get_cgid(rq->q, rq));
843 * blk_add_trace_bio - Add a trace for a bio oriented action
844 * @q: queue the io is for
845 * @bio: the source bio
847 * @error: error, if any
850 * Records an action against a bio. Will log the bio offset + size.
853 static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
856 struct blk_trace *bt = q->blk_trace;
861 __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
862 bio_op(bio), bio->bi_opf, what, error, 0, NULL,
863 blk_trace_bio_get_cgid(q, bio));
866 static void blk_add_trace_bio_bounce(void *ignore,
867 struct request_queue *q, struct bio *bio)
869 blk_add_trace_bio(q, bio, BLK_TA_BOUNCE, 0);
872 static void blk_add_trace_bio_complete(void *ignore,
873 struct request_queue *q, struct bio *bio,
876 blk_add_trace_bio(q, bio, BLK_TA_COMPLETE, error);
879 static void blk_add_trace_bio_backmerge(void *ignore,
880 struct request_queue *q,
884 blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE, 0);
887 static void blk_add_trace_bio_frontmerge(void *ignore,
888 struct request_queue *q,
892 blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE, 0);
895 static void blk_add_trace_bio_queue(void *ignore,
896 struct request_queue *q, struct bio *bio)
898 blk_add_trace_bio(q, bio, BLK_TA_QUEUE, 0);
901 static void blk_add_trace_getrq(void *ignore,
902 struct request_queue *q,
903 struct bio *bio, int rw)
906 blk_add_trace_bio(q, bio, BLK_TA_GETRQ, 0);
908 struct blk_trace *bt = q->blk_trace;
911 __blk_add_trace(bt, 0, 0, rw, 0, BLK_TA_GETRQ, 0, 0,
917 static void blk_add_trace_sleeprq(void *ignore,
918 struct request_queue *q,
919 struct bio *bio, int rw)
922 blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ, 0);
924 struct blk_trace *bt = q->blk_trace;
927 __blk_add_trace(bt, 0, 0, rw, 0, BLK_TA_SLEEPRQ,
932 static void blk_add_trace_plug(void *ignore, struct request_queue *q)
934 struct blk_trace *bt = q->blk_trace;
937 __blk_add_trace(bt, 0, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL, 0);
940 static void blk_add_trace_unplug(void *ignore, struct request_queue *q,
941 unsigned int depth, bool explicit)
943 struct blk_trace *bt = q->blk_trace;
946 __be64 rpdu = cpu_to_be64(depth);
950 what = BLK_TA_UNPLUG_IO;
952 what = BLK_TA_UNPLUG_TIMER;
954 __blk_add_trace(bt, 0, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu, 0);
958 static void blk_add_trace_split(void *ignore,
959 struct request_queue *q, struct bio *bio,
962 struct blk_trace *bt = q->blk_trace;
965 __be64 rpdu = cpu_to_be64(pdu);
967 __blk_add_trace(bt, bio->bi_iter.bi_sector,
968 bio->bi_iter.bi_size, bio_op(bio), bio->bi_opf,
969 BLK_TA_SPLIT, bio->bi_status, sizeof(rpdu),
970 &rpdu, blk_trace_bio_get_cgid(q, bio));
975 * blk_add_trace_bio_remap - Add a trace for a bio-remap operation
976 * @ignore: trace callback data parameter (not used)
977 * @q: queue the io is for
978 * @bio: the source bio
979 * @dev: target device
980 * @from: source sector
983 * Device mapper or raid target sometimes need to split a bio because
984 * it spans a stripe (or similar). Add a trace for that action.
987 static void blk_add_trace_bio_remap(void *ignore,
988 struct request_queue *q, struct bio *bio,
989 dev_t dev, sector_t from)
991 struct blk_trace *bt = q->blk_trace;
992 struct blk_io_trace_remap r;
997 r.device_from = cpu_to_be32(dev);
998 r.device_to = cpu_to_be32(bio_dev(bio));
999 r.sector_from = cpu_to_be64(from);
1001 __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
1002 bio_op(bio), bio->bi_opf, BLK_TA_REMAP, bio->bi_status,
1003 sizeof(r), &r, blk_trace_bio_get_cgid(q, bio));
1007 * blk_add_trace_rq_remap - Add a trace for a request-remap operation
1008 * @ignore: trace callback data parameter (not used)
1009 * @q: queue the io is for
1010 * @rq: the source request
1011 * @dev: target device
1012 * @from: source sector
1015 * Device mapper remaps request to other devices.
1016 * Add a trace for that action.
1019 static void blk_add_trace_rq_remap(void *ignore,
1020 struct request_queue *q,
1021 struct request *rq, dev_t dev,
1024 struct blk_trace *bt = q->blk_trace;
1025 struct blk_io_trace_remap r;
1030 r.device_from = cpu_to_be32(dev);
1031 r.device_to = cpu_to_be32(disk_devt(rq->rq_disk));
1032 r.sector_from = cpu_to_be64(from);
1034 __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
1035 rq_data_dir(rq), 0, BLK_TA_REMAP, 0,
1036 sizeof(r), &r, blk_trace_request_get_cgid(q, rq));
1040 * blk_add_driver_data - Add binary message with driver-specific data
1041 * @q: queue the io is for
1043 * @data: driver-specific data
1044 * @len: length of driver-specific data
1047 * Some drivers might want to write driver-specific data per request.
1050 void blk_add_driver_data(struct request_queue *q,
1052 void *data, size_t len)
1054 struct blk_trace *bt = q->blk_trace;
1059 __blk_add_trace(bt, blk_rq_trace_sector(rq), blk_rq_bytes(rq), 0, 0,
1060 BLK_TA_DRV_DATA, 0, len, data,
1061 blk_trace_request_get_cgid(q, rq));
1063 EXPORT_SYMBOL_GPL(blk_add_driver_data);
1065 static void blk_register_tracepoints(void)
1069 ret = register_trace_block_rq_insert(blk_add_trace_rq_insert, NULL);
1071 ret = register_trace_block_rq_issue(blk_add_trace_rq_issue, NULL);
1073 ret = register_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL);
1075 ret = register_trace_block_rq_complete(blk_add_trace_rq_complete, NULL);
1077 ret = register_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL);
1079 ret = register_trace_block_bio_complete(blk_add_trace_bio_complete, NULL);
1081 ret = register_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL);
1083 ret = register_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL);
1085 ret = register_trace_block_bio_queue(blk_add_trace_bio_queue, NULL);
1087 ret = register_trace_block_getrq(blk_add_trace_getrq, NULL);
1089 ret = register_trace_block_sleeprq(blk_add_trace_sleeprq, NULL);
1091 ret = register_trace_block_plug(blk_add_trace_plug, NULL);
1093 ret = register_trace_block_unplug(blk_add_trace_unplug, NULL);
1095 ret = register_trace_block_split(blk_add_trace_split, NULL);
1097 ret = register_trace_block_bio_remap(blk_add_trace_bio_remap, NULL);
1099 ret = register_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
1103 static void blk_unregister_tracepoints(void)
1105 unregister_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
1106 unregister_trace_block_bio_remap(blk_add_trace_bio_remap, NULL);
1107 unregister_trace_block_split(blk_add_trace_split, NULL);
1108 unregister_trace_block_unplug(blk_add_trace_unplug, NULL);
1109 unregister_trace_block_plug(blk_add_trace_plug, NULL);
1110 unregister_trace_block_sleeprq(blk_add_trace_sleeprq, NULL);
1111 unregister_trace_block_getrq(blk_add_trace_getrq, NULL);
1112 unregister_trace_block_bio_queue(blk_add_trace_bio_queue, NULL);
1113 unregister_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL);
1114 unregister_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL);
1115 unregister_trace_block_bio_complete(blk_add_trace_bio_complete, NULL);
1116 unregister_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL);
1117 unregister_trace_block_rq_complete(blk_add_trace_rq_complete, NULL);
1118 unregister_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL);
1119 unregister_trace_block_rq_issue(blk_add_trace_rq_issue, NULL);
1120 unregister_trace_block_rq_insert(blk_add_trace_rq_insert, NULL);
1122 tracepoint_synchronize_unregister();
1126 * struct blk_io_tracer formatting routines
1129 static void fill_rwbs(char *rwbs, const struct blk_io_trace *t)
1132 int tc = t->action >> BLK_TC_SHIFT;
1134 if ((t->action & ~__BLK_TN_CGROUP) == BLK_TN_MESSAGE) {
1139 if (tc & BLK_TC_FLUSH)
1142 if (tc & BLK_TC_DISCARD)
1144 else if (tc & BLK_TC_WRITE)
1151 if (tc & BLK_TC_FUA)
1153 if (tc & BLK_TC_AHEAD)
1155 if (tc & BLK_TC_SYNC)
1157 if (tc & BLK_TC_META)
1164 const struct blk_io_trace *te_blk_io_trace(const struct trace_entry *ent)
1166 return (const struct blk_io_trace *)ent;
1169 static inline const void *pdu_start(const struct trace_entry *ent, bool has_cg)
1171 return (void *)(te_blk_io_trace(ent) + 1) + (has_cg ? sizeof(u64) : 0);
1174 static inline u64 t_cgid(const struct trace_entry *ent)
1176 return *(u64 *)(te_blk_io_trace(ent) + 1);
1179 static inline int pdu_real_len(const struct trace_entry *ent, bool has_cg)
1181 return te_blk_io_trace(ent)->pdu_len - (has_cg ? sizeof(u64) : 0);
1184 static inline u32 t_action(const struct trace_entry *ent)
1186 return te_blk_io_trace(ent)->action;
1189 static inline u32 t_bytes(const struct trace_entry *ent)
1191 return te_blk_io_trace(ent)->bytes;
1194 static inline u32 t_sec(const struct trace_entry *ent)
1196 return te_blk_io_trace(ent)->bytes >> 9;
1199 static inline unsigned long long t_sector(const struct trace_entry *ent)
1201 return te_blk_io_trace(ent)->sector;
1204 static inline __u16 t_error(const struct trace_entry *ent)
1206 return te_blk_io_trace(ent)->error;
1209 static __u64 get_pdu_int(const struct trace_entry *ent, bool has_cg)
1211 const __u64 *val = pdu_start(ent, has_cg);
1212 return be64_to_cpu(*val);
1215 static void get_pdu_remap(const struct trace_entry *ent,
1216 struct blk_io_trace_remap *r, bool has_cg)
1218 const struct blk_io_trace_remap *__r = pdu_start(ent, has_cg);
1219 __u64 sector_from = __r->sector_from;
1221 r->device_from = be32_to_cpu(__r->device_from);
1222 r->device_to = be32_to_cpu(__r->device_to);
1223 r->sector_from = be64_to_cpu(sector_from);
1226 typedef void (blk_log_action_t) (struct trace_iterator *iter, const char *act,
1229 static void blk_log_action_classic(struct trace_iterator *iter, const char *act,
1232 char rwbs[RWBS_LEN];
1233 unsigned long long ts = iter->ts;
1234 unsigned long nsec_rem = do_div(ts, NSEC_PER_SEC);
1235 unsigned secs = (unsigned long)ts;
1236 const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
1240 trace_seq_printf(&iter->seq,
1241 "%3d,%-3d %2d %5d.%09lu %5u %2s %3s ",
1242 MAJOR(t->device), MINOR(t->device), iter->cpu,
1243 secs, nsec_rem, iter->ent->pid, act, rwbs);
1246 static void blk_log_action(struct trace_iterator *iter, const char *act,
1249 char rwbs[RWBS_LEN];
1250 const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
1254 u64 id = t_cgid(iter->ent);
1256 if (blk_tracer_flags.val & TRACE_BLK_OPT_CGNAME) {
1257 char blkcg_name_buf[NAME_MAX + 1] = "<...>";
1259 cgroup_path_from_kernfs_id(id, blkcg_name_buf,
1260 sizeof(blkcg_name_buf));
1261 trace_seq_printf(&iter->seq, "%3d,%-3d %s %2s %3s ",
1262 MAJOR(t->device), MINOR(t->device),
1263 blkcg_name_buf, act, rwbs);
1266 * The cgid portion used to be "INO,GEN". Userland
1267 * builds a FILEID_INO32_GEN fid out of them and
1268 * opens the cgroup using open_by_handle_at(2).
1269 * While 32bit ino setups are still the same, 64bit
1270 * ones now use the 64bit ino as the whole ID and
1271 * no longer use generation.
1273 * Regarldess of the content, always output
1274 * "LOW32,HIGH32" so that FILEID_INO32_GEN fid can
1275 * be mapped back to @id on both 64 and 32bit ino
1276 * setups. See __kernfs_fh_to_dentry().
1278 trace_seq_printf(&iter->seq,
1279 "%3d,%-3d %llx,%-llx %2s %3s ",
1280 MAJOR(t->device), MINOR(t->device),
1281 id & U32_MAX, id >> 32, act, rwbs);
1284 trace_seq_printf(&iter->seq, "%3d,%-3d %2s %3s ",
1285 MAJOR(t->device), MINOR(t->device), act, rwbs);
1288 static void blk_log_dump_pdu(struct trace_seq *s,
1289 const struct trace_entry *ent, bool has_cg)
1291 const unsigned char *pdu_buf;
1295 pdu_buf = pdu_start(ent, has_cg);
1296 pdu_len = pdu_real_len(ent, has_cg);
1301 /* find the last zero that needs to be printed */
1302 for (end = pdu_len - 1; end >= 0; end--)
1307 trace_seq_putc(s, '(');
1309 for (i = 0; i < pdu_len; i++) {
1311 trace_seq_printf(s, "%s%02x",
1312 i == 0 ? "" : " ", pdu_buf[i]);
1315 * stop when the rest is just zeroes and indicate so
1316 * with a ".." appended
1318 if (i == end && end != pdu_len - 1) {
1319 trace_seq_puts(s, " ..) ");
1324 trace_seq_puts(s, ") ");
1327 static void blk_log_generic(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1329 char cmd[TASK_COMM_LEN];
1331 trace_find_cmdline(ent->pid, cmd);
1333 if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) {
1334 trace_seq_printf(s, "%u ", t_bytes(ent));
1335 blk_log_dump_pdu(s, ent, has_cg);
1336 trace_seq_printf(s, "[%s]\n", cmd);
1339 trace_seq_printf(s, "%llu + %u [%s]\n",
1340 t_sector(ent), t_sec(ent), cmd);
1342 trace_seq_printf(s, "[%s]\n", cmd);
1346 static void blk_log_with_error(struct trace_seq *s,
1347 const struct trace_entry *ent, bool has_cg)
1349 if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) {
1350 blk_log_dump_pdu(s, ent, has_cg);
1351 trace_seq_printf(s, "[%d]\n", t_error(ent));
1354 trace_seq_printf(s, "%llu + %u [%d]\n",
1356 t_sec(ent), t_error(ent));
1358 trace_seq_printf(s, "%llu [%d]\n",
1359 t_sector(ent), t_error(ent));
1363 static void blk_log_remap(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1365 struct blk_io_trace_remap r = { .device_from = 0, };
1367 get_pdu_remap(ent, &r, has_cg);
1368 trace_seq_printf(s, "%llu + %u <- (%d,%d) %llu\n",
1369 t_sector(ent), t_sec(ent),
1370 MAJOR(r.device_from), MINOR(r.device_from),
1371 (unsigned long long)r.sector_from);
1374 static void blk_log_plug(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1376 char cmd[TASK_COMM_LEN];
1378 trace_find_cmdline(ent->pid, cmd);
1380 trace_seq_printf(s, "[%s]\n", cmd);
1383 static void blk_log_unplug(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1385 char cmd[TASK_COMM_LEN];
1387 trace_find_cmdline(ent->pid, cmd);
1389 trace_seq_printf(s, "[%s] %llu\n", cmd, get_pdu_int(ent, has_cg));
1392 static void blk_log_split(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1394 char cmd[TASK_COMM_LEN];
1396 trace_find_cmdline(ent->pid, cmd);
1398 trace_seq_printf(s, "%llu / %llu [%s]\n", t_sector(ent),
1399 get_pdu_int(ent, has_cg), cmd);
1402 static void blk_log_msg(struct trace_seq *s, const struct trace_entry *ent,
1406 trace_seq_putmem(s, pdu_start(ent, has_cg),
1407 pdu_real_len(ent, has_cg));
1408 trace_seq_putc(s, '\n');
1412 * struct tracer operations
1415 static void blk_tracer_print_header(struct seq_file *m)
1417 if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
1419 seq_puts(m, "# DEV CPU TIMESTAMP PID ACT FLG\n"
1423 static void blk_tracer_start(struct trace_array *tr)
1425 blk_tracer_enabled = true;
1428 static int blk_tracer_init(struct trace_array *tr)
1431 blk_tracer_start(tr);
1435 static void blk_tracer_stop(struct trace_array *tr)
1437 blk_tracer_enabled = false;
1440 static void blk_tracer_reset(struct trace_array *tr)
1442 blk_tracer_stop(tr);
1445 static const struct {
1447 void (*print)(struct trace_seq *s, const struct trace_entry *ent,
1450 [__BLK_TA_QUEUE] = {{ "Q", "queue" }, blk_log_generic },
1451 [__BLK_TA_BACKMERGE] = {{ "M", "backmerge" }, blk_log_generic },
1452 [__BLK_TA_FRONTMERGE] = {{ "F", "frontmerge" }, blk_log_generic },
1453 [__BLK_TA_GETRQ] = {{ "G", "getrq" }, blk_log_generic },
1454 [__BLK_TA_SLEEPRQ] = {{ "S", "sleeprq" }, blk_log_generic },
1455 [__BLK_TA_REQUEUE] = {{ "R", "requeue" }, blk_log_with_error },
1456 [__BLK_TA_ISSUE] = {{ "D", "issue" }, blk_log_generic },
1457 [__BLK_TA_COMPLETE] = {{ "C", "complete" }, blk_log_with_error },
1458 [__BLK_TA_PLUG] = {{ "P", "plug" }, blk_log_plug },
1459 [__BLK_TA_UNPLUG_IO] = {{ "U", "unplug_io" }, blk_log_unplug },
1460 [__BLK_TA_UNPLUG_TIMER] = {{ "UT", "unplug_timer" }, blk_log_unplug },
1461 [__BLK_TA_INSERT] = {{ "I", "insert" }, blk_log_generic },
1462 [__BLK_TA_SPLIT] = {{ "X", "split" }, blk_log_split },
1463 [__BLK_TA_BOUNCE] = {{ "B", "bounce" }, blk_log_generic },
1464 [__BLK_TA_REMAP] = {{ "A", "remap" }, blk_log_remap },
1467 static enum print_line_t print_one_line(struct trace_iterator *iter,
1470 struct trace_array *tr = iter->tr;
1471 struct trace_seq *s = &iter->seq;
1472 const struct blk_io_trace *t;
1475 blk_log_action_t *log_action;
1478 t = te_blk_io_trace(iter->ent);
1479 what = (t->action & ((1 << BLK_TC_SHIFT) - 1)) & ~__BLK_TA_CGROUP;
1480 long_act = !!(tr->trace_flags & TRACE_ITER_VERBOSE);
1481 log_action = classic ? &blk_log_action_classic : &blk_log_action;
1482 has_cg = t->action & __BLK_TA_CGROUP;
1484 if ((t->action & ~__BLK_TN_CGROUP) == BLK_TN_MESSAGE) {
1485 log_action(iter, long_act ? "message" : "m", has_cg);
1486 blk_log_msg(s, iter->ent, has_cg);
1487 return trace_handle_return(s);
1490 if (unlikely(what == 0 || what >= ARRAY_SIZE(what2act)))
1491 trace_seq_printf(s, "Unknown action %x\n", what);
1493 log_action(iter, what2act[what].act[long_act], has_cg);
1494 what2act[what].print(s, iter->ent, has_cg);
1497 return trace_handle_return(s);
1500 static enum print_line_t blk_trace_event_print(struct trace_iterator *iter,
1501 int flags, struct trace_event *event)
1503 return print_one_line(iter, false);
1506 static void blk_trace_synthesize_old_trace(struct trace_iterator *iter)
1508 struct trace_seq *s = &iter->seq;
1509 struct blk_io_trace *t = (struct blk_io_trace *)iter->ent;
1510 const int offset = offsetof(struct blk_io_trace, sector);
1511 struct blk_io_trace old = {
1512 .magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION,
1516 trace_seq_putmem(s, &old, offset);
1517 trace_seq_putmem(s, &t->sector,
1518 sizeof(old) - offset + t->pdu_len);
1521 static enum print_line_t
1522 blk_trace_event_print_binary(struct trace_iterator *iter, int flags,
1523 struct trace_event *event)
1525 blk_trace_synthesize_old_trace(iter);
1527 return trace_handle_return(&iter->seq);
1530 static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter)
1532 if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
1533 return TRACE_TYPE_UNHANDLED;
1535 return print_one_line(iter, true);
1539 blk_tracer_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
1541 /* don't output context-info for blk_classic output */
1542 if (bit == TRACE_BLK_OPT_CLASSIC) {
1544 tr->trace_flags &= ~TRACE_ITER_CONTEXT_INFO;
1546 tr->trace_flags |= TRACE_ITER_CONTEXT_INFO;
1551 static struct tracer blk_tracer __read_mostly = {
1553 .init = blk_tracer_init,
1554 .reset = blk_tracer_reset,
1555 .start = blk_tracer_start,
1556 .stop = blk_tracer_stop,
1557 .print_header = blk_tracer_print_header,
1558 .print_line = blk_tracer_print_line,
1559 .flags = &blk_tracer_flags,
1560 .set_flag = blk_tracer_set_flag,
1563 static struct trace_event_functions trace_blk_event_funcs = {
1564 .trace = blk_trace_event_print,
1565 .binary = blk_trace_event_print_binary,
1568 static struct trace_event trace_blk_event = {
1570 .funcs = &trace_blk_event_funcs,
1573 static int __init init_blk_tracer(void)
1575 if (!register_trace_event(&trace_blk_event)) {
1576 pr_warn("Warning: could not register block events\n");
1580 if (register_tracer(&blk_tracer) != 0) {
1581 pr_warn("Warning: could not register the block tracer\n");
1582 unregister_trace_event(&trace_blk_event);
1589 device_initcall(init_blk_tracer);
1591 static int blk_trace_remove_queue(struct request_queue *q)
1593 struct blk_trace *bt;
1595 bt = xchg(&q->blk_trace, NULL);
1605 * Setup everything required to start tracing
1607 static int blk_trace_setup_queue(struct request_queue *q,
1608 struct block_device *bdev)
1610 struct blk_trace *bt = NULL;
1613 bt = kzalloc(sizeof(*bt), GFP_KERNEL);
1617 bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
1621 bt->dev = bdev->bd_dev;
1622 bt->act_mask = (u16)-1;
1624 blk_trace_setup_lba(bt, bdev);
1627 if (cmpxchg(&q->blk_trace, NULL, bt))
1639 * sysfs interface to enable and configure tracing
1642 static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
1643 struct device_attribute *attr,
1645 static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
1646 struct device_attribute *attr,
1647 const char *buf, size_t count);
1648 #define BLK_TRACE_DEVICE_ATTR(_name) \
1649 DEVICE_ATTR(_name, S_IRUGO | S_IWUSR, \
1650 sysfs_blk_trace_attr_show, \
1651 sysfs_blk_trace_attr_store)
1653 static BLK_TRACE_DEVICE_ATTR(enable);
1654 static BLK_TRACE_DEVICE_ATTR(act_mask);
1655 static BLK_TRACE_DEVICE_ATTR(pid);
1656 static BLK_TRACE_DEVICE_ATTR(start_lba);
1657 static BLK_TRACE_DEVICE_ATTR(end_lba);
1659 static struct attribute *blk_trace_attrs[] = {
1660 &dev_attr_enable.attr,
1661 &dev_attr_act_mask.attr,
1663 &dev_attr_start_lba.attr,
1664 &dev_attr_end_lba.attr,
1668 struct attribute_group blk_trace_attr_group = {
1670 .attrs = blk_trace_attrs,
1673 static const struct {
1677 { BLK_TC_READ, "read" },
1678 { BLK_TC_WRITE, "write" },
1679 { BLK_TC_FLUSH, "flush" },
1680 { BLK_TC_SYNC, "sync" },
1681 { BLK_TC_QUEUE, "queue" },
1682 { BLK_TC_REQUEUE, "requeue" },
1683 { BLK_TC_ISSUE, "issue" },
1684 { BLK_TC_COMPLETE, "complete" },
1685 { BLK_TC_FS, "fs" },
1686 { BLK_TC_PC, "pc" },
1687 { BLK_TC_NOTIFY, "notify" },
1688 { BLK_TC_AHEAD, "ahead" },
1689 { BLK_TC_META, "meta" },
1690 { BLK_TC_DISCARD, "discard" },
1691 { BLK_TC_DRV_DATA, "drv_data" },
1692 { BLK_TC_FUA, "fua" },
1695 static int blk_trace_str2mask(const char *str)
1699 char *buf, *s, *token;
1701 buf = kstrdup(str, GFP_KERNEL);
1707 token = strsep(&s, ",");
1714 for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
1715 if (strcasecmp(token, mask_maps[i].str) == 0) {
1716 mask |= mask_maps[i].mask;
1720 if (i == ARRAY_SIZE(mask_maps)) {
1730 static ssize_t blk_trace_mask2str(char *buf, int mask)
1735 for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
1736 if (mask & mask_maps[i].mask) {
1737 p += sprintf(p, "%s%s",
1738 (p == buf) ? "" : ",", mask_maps[i].str);
1746 static struct request_queue *blk_trace_get_queue(struct block_device *bdev)
1748 if (bdev->bd_disk == NULL)
1751 return bdev_get_queue(bdev);
1754 static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
1755 struct device_attribute *attr,
1758 struct hd_struct *p = dev_to_part(dev);
1759 struct request_queue *q;
1760 struct block_device *bdev;
1761 ssize_t ret = -ENXIO;
1763 bdev = bdget(part_devt(p));
1767 q = blk_trace_get_queue(bdev);
1771 mutex_lock(&q->blk_trace_mutex);
1773 if (attr == &dev_attr_enable) {
1774 ret = sprintf(buf, "%u\n", !!q->blk_trace);
1775 goto out_unlock_bdev;
1778 if (q->blk_trace == NULL)
1779 ret = sprintf(buf, "disabled\n");
1780 else if (attr == &dev_attr_act_mask)
1781 ret = blk_trace_mask2str(buf, q->blk_trace->act_mask);
1782 else if (attr == &dev_attr_pid)
1783 ret = sprintf(buf, "%u\n", q->blk_trace->pid);
1784 else if (attr == &dev_attr_start_lba)
1785 ret = sprintf(buf, "%llu\n", q->blk_trace->start_lba);
1786 else if (attr == &dev_attr_end_lba)
1787 ret = sprintf(buf, "%llu\n", q->blk_trace->end_lba);
1790 mutex_unlock(&q->blk_trace_mutex);
1797 static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
1798 struct device_attribute *attr,
1799 const char *buf, size_t count)
1801 struct block_device *bdev;
1802 struct request_queue *q;
1803 struct hd_struct *p;
1805 ssize_t ret = -EINVAL;
1810 if (attr == &dev_attr_act_mask) {
1811 if (kstrtoull(buf, 0, &value)) {
1812 /* Assume it is a list of trace category names */
1813 ret = blk_trace_str2mask(buf);
1818 } else if (kstrtoull(buf, 0, &value))
1823 p = dev_to_part(dev);
1824 bdev = bdget(part_devt(p));
1828 q = blk_trace_get_queue(bdev);
1832 mutex_lock(&q->blk_trace_mutex);
1834 if (attr == &dev_attr_enable) {
1835 if (!!value == !!q->blk_trace) {
1837 goto out_unlock_bdev;
1840 ret = blk_trace_setup_queue(q, bdev);
1842 ret = blk_trace_remove_queue(q);
1843 goto out_unlock_bdev;
1847 if (q->blk_trace == NULL)
1848 ret = blk_trace_setup_queue(q, bdev);
1851 if (attr == &dev_attr_act_mask)
1852 q->blk_trace->act_mask = value;
1853 else if (attr == &dev_attr_pid)
1854 q->blk_trace->pid = value;
1855 else if (attr == &dev_attr_start_lba)
1856 q->blk_trace->start_lba = value;
1857 else if (attr == &dev_attr_end_lba)
1858 q->blk_trace->end_lba = value;
1862 mutex_unlock(&q->blk_trace_mutex);
1866 return ret ? ret : count;
1869 int blk_trace_init_sysfs(struct device *dev)
1871 return sysfs_create_group(&dev->kobj, &blk_trace_attr_group);
1874 void blk_trace_remove_sysfs(struct device *dev)
1876 sysfs_remove_group(&dev->kobj, &blk_trace_attr_group);
1879 #endif /* CONFIG_BLK_DEV_IO_TRACE */
1881 #ifdef CONFIG_EVENT_TRACING
1883 void blk_fill_rwbs(char *rwbs, unsigned int op, int bytes)
1887 if (op & REQ_PREFLUSH)
1890 switch (op & REQ_OP_MASK) {
1892 case REQ_OP_WRITE_SAME:
1895 case REQ_OP_DISCARD:
1898 case REQ_OP_SECURE_ERASE:
1914 if (op & REQ_RAHEAD)
1923 EXPORT_SYMBOL_GPL(blk_fill_rwbs);
1925 #endif /* CONFIG_EVENT_TRACING */