1 // SPDX-License-Identifier: GPL-2.0
3 * ring buffer based function tracer
5 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
8 * Originally taken from the RT patch by:
9 * Arnaldo Carvalho de Melo <acme@redhat.com>
11 * Based on code from the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 Nadia Yvette Chambers
15 #include <linux/ring_buffer.h>
16 #include <generated/utsrelease.h>
17 #include <linux/stacktrace.h>
18 #include <linux/writeback.h>
19 #include <linux/kallsyms.h>
20 #include <linux/seq_file.h>
21 #include <linux/notifier.h>
22 #include <linux/irqflags.h>
23 #include <linux/debugfs.h>
24 #include <linux/tracefs.h>
25 #include <linux/pagemap.h>
26 #include <linux/hardirq.h>
27 #include <linux/linkage.h>
28 #include <linux/uaccess.h>
29 #include <linux/vmalloc.h>
30 #include <linux/ftrace.h>
31 #include <linux/module.h>
32 #include <linux/percpu.h>
33 #include <linux/splice.h>
34 #include <linux/kdebug.h>
35 #include <linux/string.h>
36 #include <linux/mount.h>
37 #include <linux/rwsem.h>
38 #include <linux/slab.h>
39 #include <linux/ctype.h>
40 #include <linux/init.h>
41 #include <linux/poll.h>
42 #include <linux/nmi.h>
44 #include <linux/trace.h>
45 #include <linux/sched/clock.h>
46 #include <linux/sched/rt.h>
49 #include "trace_output.h"
52 * On boot up, the ring buffer is set to the minimum size, so that
53 * we do not waste memory on systems that are not using tracing.
55 bool ring_buffer_expanded;
58 * We need to change this state when a selftest is running.
59 * A selftest will lurk into the ring-buffer to count the
60 * entries inserted during the selftest although some concurrent
61 * insertions into the ring-buffer such as trace_printk could occurred
62 * at the same time, giving false positive or negative results.
64 static bool __read_mostly tracing_selftest_running;
67 * If a tracer is running, we do not want to run SELFTEST.
69 bool __read_mostly tracing_selftest_disabled;
71 /* Pipe tracepoints to printk */
72 struct trace_iterator *tracepoint_print_iter;
73 int tracepoint_printk;
74 static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
76 /* For tracers that don't implement custom flags */
77 static struct tracer_opt dummy_tracer_opt[] = {
82 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
88 * To prevent the comm cache from being overwritten when no
89 * tracing is active, only save the comm when a trace event
92 static DEFINE_PER_CPU(bool, trace_taskinfo_save);
95 * Kill all tracing for good (never come back).
96 * It is initialized to 1 but will turn to zero if the initialization
97 * of the tracer is successful. But that is the only place that sets
100 static int tracing_disabled = 1;
102 cpumask_var_t __read_mostly tracing_buffer_mask;
105 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
107 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
108 * is set, then ftrace_dump is called. This will output the contents
109 * of the ftrace buffers to the console. This is very useful for
110 * capturing traces that lead to crashes and outputing it to a
113 * It is default off, but you can enable it with either specifying
114 * "ftrace_dump_on_oops" in the kernel command line, or setting
115 * /proc/sys/kernel/ftrace_dump_on_oops
116 * Set 1 if you want to dump buffers of all CPUs
117 * Set 2 if you want to dump the buffer of the CPU that triggered oops
120 enum ftrace_dump_mode ftrace_dump_on_oops;
122 /* When set, tracing will stop when a WARN*() is hit */
123 int __disable_trace_on_warning;
125 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
126 /* Map of enums to their values, for "eval_map" file */
127 struct trace_eval_map_head {
129 unsigned long length;
132 union trace_eval_map_item;
134 struct trace_eval_map_tail {
136 * "end" is first and points to NULL as it must be different
137 * than "mod" or "eval_string"
139 union trace_eval_map_item *next;
140 const char *end; /* points to NULL */
143 static DEFINE_MUTEX(trace_eval_mutex);
146 * The trace_eval_maps are saved in an array with two extra elements,
147 * one at the beginning, and one at the end. The beginning item contains
148 * the count of the saved maps (head.length), and the module they
149 * belong to if not built in (head.mod). The ending item contains a
150 * pointer to the next array of saved eval_map items.
152 union trace_eval_map_item {
153 struct trace_eval_map map;
154 struct trace_eval_map_head head;
155 struct trace_eval_map_tail tail;
158 static union trace_eval_map_item *trace_eval_maps;
159 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
161 static int tracing_set_tracer(struct trace_array *tr, const char *buf);
162 static void ftrace_trace_userstack(struct ring_buffer *buffer,
163 unsigned long flags, int pc);
165 #define MAX_TRACER_SIZE 100
166 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
167 static char *default_bootup_tracer;
169 static bool allocate_snapshot;
171 static int __init set_cmdline_ftrace(char *str)
173 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
174 default_bootup_tracer = bootup_tracer_buf;
175 /* We are using ftrace early, expand it */
176 ring_buffer_expanded = true;
179 __setup("ftrace=", set_cmdline_ftrace);
181 static int __init set_ftrace_dump_on_oops(char *str)
183 if (*str++ != '=' || !*str) {
184 ftrace_dump_on_oops = DUMP_ALL;
188 if (!strcmp("orig_cpu", str)) {
189 ftrace_dump_on_oops = DUMP_ORIG;
195 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
197 static int __init stop_trace_on_warning(char *str)
199 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
200 __disable_trace_on_warning = 1;
203 __setup("traceoff_on_warning", stop_trace_on_warning);
205 static int __init boot_alloc_snapshot(char *str)
207 allocate_snapshot = true;
208 /* We also need the main ring buffer expanded */
209 ring_buffer_expanded = true;
212 __setup("alloc_snapshot", boot_alloc_snapshot);
215 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
217 static int __init set_trace_boot_options(char *str)
219 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
222 __setup("trace_options=", set_trace_boot_options);
224 static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
225 static char *trace_boot_clock __initdata;
227 static int __init set_trace_boot_clock(char *str)
229 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
230 trace_boot_clock = trace_boot_clock_buf;
233 __setup("trace_clock=", set_trace_boot_clock);
235 static int __init set_tracepoint_printk(char *str)
237 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
238 tracepoint_printk = 1;
241 __setup("tp_printk", set_tracepoint_printk);
243 unsigned long long ns2usecs(u64 nsec)
250 /* trace_flags holds trace_options default values */
251 #define TRACE_DEFAULT_FLAGS \
252 (FUNCTION_DEFAULT_FLAGS | \
253 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
254 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
255 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
256 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
258 /* trace_options that are only supported by global_trace */
259 #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
260 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
262 /* trace_flags that are default zero for instances */
263 #define ZEROED_TRACE_FLAGS \
264 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
267 * The global_trace is the descriptor that holds the top-level tracing
268 * buffers for the live tracing.
270 static struct trace_array global_trace = {
271 .trace_flags = TRACE_DEFAULT_FLAGS,
274 LIST_HEAD(ftrace_trace_arrays);
276 int trace_array_get(struct trace_array *this_tr)
278 struct trace_array *tr;
281 mutex_lock(&trace_types_lock);
282 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
289 mutex_unlock(&trace_types_lock);
294 static void __trace_array_put(struct trace_array *this_tr)
296 WARN_ON(!this_tr->ref);
300 void trace_array_put(struct trace_array *this_tr)
302 mutex_lock(&trace_types_lock);
303 __trace_array_put(this_tr);
304 mutex_unlock(&trace_types_lock);
307 int call_filter_check_discard(struct trace_event_call *call, void *rec,
308 struct ring_buffer *buffer,
309 struct ring_buffer_event *event)
311 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
312 !filter_match_preds(call->filter, rec)) {
313 __trace_event_discard_commit(buffer, event);
320 void trace_free_pid_list(struct trace_pid_list *pid_list)
322 vfree(pid_list->pids);
327 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
328 * @filtered_pids: The list of pids to check
329 * @search_pid: The PID to find in @filtered_pids
331 * Returns true if @search_pid is fonud in @filtered_pids, and false otherwis.
334 trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
337 * If pid_max changed after filtered_pids was created, we
338 * by default ignore all pids greater than the previous pid_max.
340 if (search_pid >= filtered_pids->pid_max)
343 return test_bit(search_pid, filtered_pids->pids);
347 * trace_ignore_this_task - should a task be ignored for tracing
348 * @filtered_pids: The list of pids to check
349 * @task: The task that should be ignored if not filtered
351 * Checks if @task should be traced or not from @filtered_pids.
352 * Returns true if @task should *NOT* be traced.
353 * Returns false if @task should be traced.
356 trace_ignore_this_task(struct trace_pid_list *filtered_pids, struct task_struct *task)
359 * Return false, because if filtered_pids does not exist,
360 * all pids are good to trace.
365 return !trace_find_filtered_pid(filtered_pids, task->pid);
369 * trace_pid_filter_add_remove_task - Add or remove a task from a pid_list
370 * @pid_list: The list to modify
371 * @self: The current task for fork or NULL for exit
372 * @task: The task to add or remove
374 * If adding a task, if @self is defined, the task is only added if @self
375 * is also included in @pid_list. This happens on fork and tasks should
376 * only be added when the parent is listed. If @self is NULL, then the
377 * @task pid will be removed from the list, which would happen on exit
380 void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
381 struct task_struct *self,
382 struct task_struct *task)
387 /* For forks, we only add if the forking task is listed */
389 if (!trace_find_filtered_pid(pid_list, self->pid))
393 /* Sorry, but we don't support pid_max changing after setting */
394 if (task->pid >= pid_list->pid_max)
397 /* "self" is set for forks, and NULL for exits */
399 set_bit(task->pid, pid_list->pids);
401 clear_bit(task->pid, pid_list->pids);
405 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
406 * @pid_list: The pid list to show
407 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
408 * @pos: The position of the file
410 * This is used by the seq_file "next" operation to iterate the pids
411 * listed in a trace_pid_list structure.
413 * Returns the pid+1 as we want to display pid of zero, but NULL would
414 * stop the iteration.
416 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
418 unsigned long pid = (unsigned long)v;
422 /* pid already is +1 of the actual prevous bit */
423 pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid);
425 /* Return pid + 1 to allow zero to be represented */
426 if (pid < pid_list->pid_max)
427 return (void *)(pid + 1);
433 * trace_pid_start - Used for seq_file to start reading pid lists
434 * @pid_list: The pid list to show
435 * @pos: The position of the file
437 * This is used by seq_file "start" operation to start the iteration
440 * Returns the pid+1 as we want to display pid of zero, but NULL would
441 * stop the iteration.
443 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
448 pid = find_first_bit(pid_list->pids, pid_list->pid_max);
449 if (pid >= pid_list->pid_max)
452 /* Return pid + 1 so that zero can be the exit value */
453 for (pid++; pid && l < *pos;
454 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
460 * trace_pid_show - show the current pid in seq_file processing
461 * @m: The seq_file structure to write into
462 * @v: A void pointer of the pid (+1) value to display
464 * Can be directly used by seq_file operations to display the current
467 int trace_pid_show(struct seq_file *m, void *v)
469 unsigned long pid = (unsigned long)v - 1;
471 seq_printf(m, "%lu\n", pid);
475 /* 128 should be much more than enough */
476 #define PID_BUF_SIZE 127
478 int trace_pid_write(struct trace_pid_list *filtered_pids,
479 struct trace_pid_list **new_pid_list,
480 const char __user *ubuf, size_t cnt)
482 struct trace_pid_list *pid_list;
483 struct trace_parser parser;
491 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
495 * Always recreate a new array. The write is an all or nothing
496 * operation. Always create a new array when adding new pids by
497 * the user. If the operation fails, then the current list is
500 pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
502 trace_parser_put(&parser);
506 pid_list->pid_max = READ_ONCE(pid_max);
508 /* Only truncating will shrink pid_max */
509 if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max)
510 pid_list->pid_max = filtered_pids->pid_max;
512 pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
513 if (!pid_list->pids) {
514 trace_parser_put(&parser);
520 /* copy the current bits to the new max */
521 for_each_set_bit(pid, filtered_pids->pids,
522 filtered_pids->pid_max) {
523 set_bit(pid, pid_list->pids);
532 ret = trace_get_user(&parser, ubuf, cnt, &pos);
533 if (ret < 0 || !trace_parser_loaded(&parser))
541 if (kstrtoul(parser.buffer, 0, &val))
543 if (val >= pid_list->pid_max)
548 set_bit(pid, pid_list->pids);
551 trace_parser_clear(&parser);
554 trace_parser_put(&parser);
557 trace_free_pid_list(pid_list);
562 /* Cleared the list of pids */
563 trace_free_pid_list(pid_list);
568 *new_pid_list = pid_list;
573 static u64 buffer_ftrace_now(struct trace_buffer *buf, int cpu)
577 /* Early boot up does not have a buffer yet */
579 return trace_clock_local();
581 ts = ring_buffer_time_stamp(buf->buffer, cpu);
582 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
587 u64 ftrace_now(int cpu)
589 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
593 * tracing_is_enabled - Show if global_trace has been disabled
595 * Shows if the global trace has been enabled or not. It uses the
596 * mirror flag "buffer_disabled" to be used in fast paths such as for
597 * the irqsoff tracer. But it may be inaccurate due to races. If you
598 * need to know the accurate state, use tracing_is_on() which is a little
599 * slower, but accurate.
601 int tracing_is_enabled(void)
604 * For quick access (irqsoff uses this in fast path), just
605 * return the mirror variable of the state of the ring buffer.
606 * It's a little racy, but we don't really care.
609 return !global_trace.buffer_disabled;
613 * trace_buf_size is the size in bytes that is allocated
614 * for a buffer. Note, the number of bytes is always rounded
617 * This number is purposely set to a low number of 16384.
618 * If the dump on oops happens, it will be much appreciated
619 * to not have to wait for all that output. Anyway this can be
620 * boot time and run time configurable.
622 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
624 static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
626 /* trace_types holds a link list of available tracers. */
627 static struct tracer *trace_types __read_mostly;
630 * trace_types_lock is used to protect the trace_types list.
632 DEFINE_MUTEX(trace_types_lock);
635 * serialize the access of the ring buffer
637 * ring buffer serializes readers, but it is low level protection.
638 * The validity of the events (which returns by ring_buffer_peek() ..etc)
639 * are not protected by ring buffer.
641 * The content of events may become garbage if we allow other process consumes
642 * these events concurrently:
643 * A) the page of the consumed events may become a normal page
644 * (not reader page) in ring buffer, and this page will be rewrited
645 * by events producer.
646 * B) The page of the consumed events may become a page for splice_read,
647 * and this page will be returned to system.
649 * These primitives allow multi process access to different cpu ring buffer
652 * These primitives don't distinguish read-only and read-consume access.
653 * Multi read-only access are also serialized.
657 static DECLARE_RWSEM(all_cpu_access_lock);
658 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
660 static inline void trace_access_lock(int cpu)
662 if (cpu == RING_BUFFER_ALL_CPUS) {
663 /* gain it for accessing the whole ring buffer. */
664 down_write(&all_cpu_access_lock);
666 /* gain it for accessing a cpu ring buffer. */
668 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
669 down_read(&all_cpu_access_lock);
671 /* Secondly block other access to this @cpu ring buffer. */
672 mutex_lock(&per_cpu(cpu_access_lock, cpu));
676 static inline void trace_access_unlock(int cpu)
678 if (cpu == RING_BUFFER_ALL_CPUS) {
679 up_write(&all_cpu_access_lock);
681 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
682 up_read(&all_cpu_access_lock);
686 static inline void trace_access_lock_init(void)
690 for_each_possible_cpu(cpu)
691 mutex_init(&per_cpu(cpu_access_lock, cpu));
696 static DEFINE_MUTEX(access_lock);
698 static inline void trace_access_lock(int cpu)
701 mutex_lock(&access_lock);
704 static inline void trace_access_unlock(int cpu)
707 mutex_unlock(&access_lock);
710 static inline void trace_access_lock_init(void)
716 #ifdef CONFIG_STACKTRACE
717 static void __ftrace_trace_stack(struct ring_buffer *buffer,
719 int skip, int pc, struct pt_regs *regs);
720 static inline void ftrace_trace_stack(struct trace_array *tr,
721 struct ring_buffer *buffer,
723 int skip, int pc, struct pt_regs *regs);
726 static inline void __ftrace_trace_stack(struct ring_buffer *buffer,
728 int skip, int pc, struct pt_regs *regs)
731 static inline void ftrace_trace_stack(struct trace_array *tr,
732 struct ring_buffer *buffer,
734 int skip, int pc, struct pt_regs *regs)
740 static __always_inline void
741 trace_event_setup(struct ring_buffer_event *event,
742 int type, unsigned long flags, int pc)
744 struct trace_entry *ent = ring_buffer_event_data(event);
746 tracing_generic_entry_update(ent, flags, pc);
750 static __always_inline struct ring_buffer_event *
751 __trace_buffer_lock_reserve(struct ring_buffer *buffer,
754 unsigned long flags, int pc)
756 struct ring_buffer_event *event;
758 event = ring_buffer_lock_reserve(buffer, len);
760 trace_event_setup(event, type, flags, pc);
765 void tracer_tracing_on(struct trace_array *tr)
767 if (tr->trace_buffer.buffer)
768 ring_buffer_record_on(tr->trace_buffer.buffer);
770 * This flag is looked at when buffers haven't been allocated
771 * yet, or by some tracers (like irqsoff), that just want to
772 * know if the ring buffer has been disabled, but it can handle
773 * races of where it gets disabled but we still do a record.
774 * As the check is in the fast path of the tracers, it is more
775 * important to be fast than accurate.
777 tr->buffer_disabled = 0;
778 /* Make the flag seen by readers */
783 * tracing_on - enable tracing buffers
785 * This function enables tracing buffers that may have been
786 * disabled with tracing_off.
788 void tracing_on(void)
790 tracer_tracing_on(&global_trace);
792 EXPORT_SYMBOL_GPL(tracing_on);
795 static __always_inline void
796 __buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
798 __this_cpu_write(trace_taskinfo_save, true);
800 /* If this is the temp buffer, we need to commit fully */
801 if (this_cpu_read(trace_buffered_event) == event) {
802 /* Length is in event->array[0] */
803 ring_buffer_write(buffer, event->array[0], &event->array[1]);
804 /* Release the temp buffer */
805 this_cpu_dec(trace_buffered_event_cnt);
807 ring_buffer_unlock_commit(buffer, event);
811 * __trace_puts - write a constant string into the trace buffer.
812 * @ip: The address of the caller
813 * @str: The constant string to write
814 * @size: The size of the string.
816 int __trace_puts(unsigned long ip, const char *str, int size)
818 struct ring_buffer_event *event;
819 struct ring_buffer *buffer;
820 struct print_entry *entry;
821 unsigned long irq_flags;
825 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
828 pc = preempt_count();
830 if (unlikely(tracing_selftest_running || tracing_disabled))
833 alloc = sizeof(*entry) + size + 2; /* possible \n added */
835 local_save_flags(irq_flags);
836 buffer = global_trace.trace_buffer.buffer;
837 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
842 entry = ring_buffer_event_data(event);
845 memcpy(&entry->buf, str, size);
847 /* Add a newline if necessary */
848 if (entry->buf[size - 1] != '\n') {
849 entry->buf[size] = '\n';
850 entry->buf[size + 1] = '\0';
852 entry->buf[size] = '\0';
854 __buffer_unlock_commit(buffer, event);
855 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
859 EXPORT_SYMBOL_GPL(__trace_puts);
862 * __trace_bputs - write the pointer to a constant string into trace buffer
863 * @ip: The address of the caller
864 * @str: The constant string to write to the buffer to
866 int __trace_bputs(unsigned long ip, const char *str)
868 struct ring_buffer_event *event;
869 struct ring_buffer *buffer;
870 struct bputs_entry *entry;
871 unsigned long irq_flags;
872 int size = sizeof(struct bputs_entry);
875 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
878 pc = preempt_count();
880 if (unlikely(tracing_selftest_running || tracing_disabled))
883 local_save_flags(irq_flags);
884 buffer = global_trace.trace_buffer.buffer;
885 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
890 entry = ring_buffer_event_data(event);
894 __buffer_unlock_commit(buffer, event);
895 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
899 EXPORT_SYMBOL_GPL(__trace_bputs);
901 #ifdef CONFIG_TRACER_SNAPSHOT
902 void tracing_snapshot_instance_cond(struct trace_array *tr, void *cond_data)
904 struct tracer *tracer = tr->current_trace;
908 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
909 internal_trace_puts("*** snapshot is being ignored ***\n");
913 if (!tr->allocated_snapshot) {
914 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
915 internal_trace_puts("*** stopping trace here! ***\n");
920 /* Note, snapshot can not be used when the tracer uses it */
921 if (tracer->use_max_tr) {
922 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
923 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
927 local_irq_save(flags);
928 update_max_tr(tr, current, smp_processor_id(), cond_data);
929 local_irq_restore(flags);
932 void tracing_snapshot_instance(struct trace_array *tr)
934 tracing_snapshot_instance_cond(tr, NULL);
938 * tracing_snapshot - take a snapshot of the current buffer.
940 * This causes a swap between the snapshot buffer and the current live
941 * tracing buffer. You can use this to take snapshots of the live
942 * trace when some condition is triggered, but continue to trace.
944 * Note, make sure to allocate the snapshot with either
945 * a tracing_snapshot_alloc(), or by doing it manually
946 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
948 * If the snapshot buffer is not allocated, it will stop tracing.
949 * Basically making a permanent snapshot.
951 void tracing_snapshot(void)
953 struct trace_array *tr = &global_trace;
955 tracing_snapshot_instance(tr);
957 EXPORT_SYMBOL_GPL(tracing_snapshot);
960 * tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
961 * @tr: The tracing instance to snapshot
962 * @cond_data: The data to be tested conditionally, and possibly saved
964 * This is the same as tracing_snapshot() except that the snapshot is
965 * conditional - the snapshot will only happen if the
966 * cond_snapshot.update() implementation receiving the cond_data
967 * returns true, which means that the trace array's cond_snapshot
968 * update() operation used the cond_data to determine whether the
969 * snapshot should be taken, and if it was, presumably saved it along
972 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
974 tracing_snapshot_instance_cond(tr, cond_data);
976 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
979 * tracing_snapshot_cond_data - get the user data associated with a snapshot
980 * @tr: The tracing instance
982 * When the user enables a conditional snapshot using
983 * tracing_snapshot_cond_enable(), the user-defined cond_data is saved
984 * with the snapshot. This accessor is used to retrieve it.
986 * Should not be called from cond_snapshot.update(), since it takes
987 * the tr->max_lock lock, which the code calling
988 * cond_snapshot.update() has already done.
990 * Returns the cond_data associated with the trace array's snapshot.
992 void *tracing_cond_snapshot_data(struct trace_array *tr)
994 void *cond_data = NULL;
996 arch_spin_lock(&tr->max_lock);
998 if (tr->cond_snapshot)
999 cond_data = tr->cond_snapshot->cond_data;
1001 arch_spin_unlock(&tr->max_lock);
1005 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1007 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
1008 struct trace_buffer *size_buf, int cpu_id);
1009 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
1011 int tracing_alloc_snapshot_instance(struct trace_array *tr)
1015 if (!tr->allocated_snapshot) {
1017 /* allocate spare buffer */
1018 ret = resize_buffer_duplicate_size(&tr->max_buffer,
1019 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
1023 tr->allocated_snapshot = true;
1029 static void free_snapshot(struct trace_array *tr)
1032 * We don't free the ring buffer. instead, resize it because
1033 * The max_tr ring buffer has some state (e.g. ring->clock) and
1034 * we want preserve it.
1036 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
1037 set_buffer_entries(&tr->max_buffer, 1);
1038 tracing_reset_online_cpus(&tr->max_buffer);
1039 tr->allocated_snapshot = false;
1043 * tracing_alloc_snapshot - allocate snapshot buffer.
1045 * This only allocates the snapshot buffer if it isn't already
1046 * allocated - it doesn't also take a snapshot.
1048 * This is meant to be used in cases where the snapshot buffer needs
1049 * to be set up for events that can't sleep but need to be able to
1050 * trigger a snapshot.
1052 int tracing_alloc_snapshot(void)
1054 struct trace_array *tr = &global_trace;
1057 ret = tracing_alloc_snapshot_instance(tr);
1062 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1065 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
1067 * This is similar to tracing_snapshot(), but it will allocate the
1068 * snapshot buffer if it isn't already allocated. Use this only
1069 * where it is safe to sleep, as the allocation may sleep.
1071 * This causes a swap between the snapshot buffer and the current live
1072 * tracing buffer. You can use this to take snapshots of the live
1073 * trace when some condition is triggered, but continue to trace.
1075 void tracing_snapshot_alloc(void)
1079 ret = tracing_alloc_snapshot();
1085 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1088 * tracing_snapshot_cond_enable - enable conditional snapshot for an instance
1089 * @tr: The tracing instance
1090 * @cond_data: User data to associate with the snapshot
1091 * @update: Implementation of the cond_snapshot update function
1093 * Check whether the conditional snapshot for the given instance has
1094 * already been enabled, or if the current tracer is already using a
1095 * snapshot; if so, return -EBUSY, else create a cond_snapshot and
1096 * save the cond_data and update function inside.
1098 * Returns 0 if successful, error otherwise.
1100 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
1101 cond_update_fn_t update)
1103 struct cond_snapshot *cond_snapshot;
1106 cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
1110 cond_snapshot->cond_data = cond_data;
1111 cond_snapshot->update = update;
1113 mutex_lock(&trace_types_lock);
1115 ret = tracing_alloc_snapshot_instance(tr);
1119 if (tr->current_trace->use_max_tr) {
1125 * The cond_snapshot can only change to NULL without the
1126 * trace_types_lock. We don't care if we race with it going
1127 * to NULL, but we want to make sure that it's not set to
1128 * something other than NULL when we get here, which we can
1129 * do safely with only holding the trace_types_lock and not
1130 * having to take the max_lock.
1132 if (tr->cond_snapshot) {
1137 arch_spin_lock(&tr->max_lock);
1138 tr->cond_snapshot = cond_snapshot;
1139 arch_spin_unlock(&tr->max_lock);
1141 mutex_unlock(&trace_types_lock);
1146 mutex_unlock(&trace_types_lock);
1147 kfree(cond_snapshot);
1150 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1153 * tracing_snapshot_cond_disable - disable conditional snapshot for an instance
1154 * @tr: The tracing instance
1156 * Check whether the conditional snapshot for the given instance is
1157 * enabled; if so, free the cond_snapshot associated with it,
1158 * otherwise return -EINVAL.
1160 * Returns 0 if successful, error otherwise.
1162 int tracing_snapshot_cond_disable(struct trace_array *tr)
1166 arch_spin_lock(&tr->max_lock);
1168 if (!tr->cond_snapshot)
1171 kfree(tr->cond_snapshot);
1172 tr->cond_snapshot = NULL;
1175 arch_spin_unlock(&tr->max_lock);
1179 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1181 void tracing_snapshot(void)
1183 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1185 EXPORT_SYMBOL_GPL(tracing_snapshot);
1186 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1188 WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used");
1190 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1191 int tracing_alloc_snapshot(void)
1193 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1196 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1197 void tracing_snapshot_alloc(void)
1202 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1203 void *tracing_cond_snapshot_data(struct trace_array *tr)
1207 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1208 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update)
1212 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1213 int tracing_snapshot_cond_disable(struct trace_array *tr)
1217 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1218 #endif /* CONFIG_TRACER_SNAPSHOT */
1220 void tracer_tracing_off(struct trace_array *tr)
1222 if (tr->trace_buffer.buffer)
1223 ring_buffer_record_off(tr->trace_buffer.buffer);
1225 * This flag is looked at when buffers haven't been allocated
1226 * yet, or by some tracers (like irqsoff), that just want to
1227 * know if the ring buffer has been disabled, but it can handle
1228 * races of where it gets disabled but we still do a record.
1229 * As the check is in the fast path of the tracers, it is more
1230 * important to be fast than accurate.
1232 tr->buffer_disabled = 1;
1233 /* Make the flag seen by readers */
1238 * tracing_off - turn off tracing buffers
1240 * This function stops the tracing buffers from recording data.
1241 * It does not disable any overhead the tracers themselves may
1242 * be causing. This function simply causes all recording to
1243 * the ring buffers to fail.
1245 void tracing_off(void)
1247 tracer_tracing_off(&global_trace);
1249 EXPORT_SYMBOL_GPL(tracing_off);
1251 void disable_trace_on_warning(void)
1253 if (__disable_trace_on_warning)
1258 * tracer_tracing_is_on - show real state of ring buffer enabled
1259 * @tr : the trace array to know if ring buffer is enabled
1261 * Shows real state of the ring buffer if it is enabled or not.
1263 bool tracer_tracing_is_on(struct trace_array *tr)
1265 if (tr->trace_buffer.buffer)
1266 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
1267 return !tr->buffer_disabled;
1271 * tracing_is_on - show state of ring buffers enabled
1273 int tracing_is_on(void)
1275 return tracer_tracing_is_on(&global_trace);
1277 EXPORT_SYMBOL_GPL(tracing_is_on);
1279 static int __init set_buf_size(char *str)
1281 unsigned long buf_size;
1285 buf_size = memparse(str, &str);
1286 /* nr_entries can not be zero */
1289 trace_buf_size = buf_size;
1292 __setup("trace_buf_size=", set_buf_size);
1294 static int __init set_tracing_thresh(char *str)
1296 unsigned long threshold;
1301 ret = kstrtoul(str, 0, &threshold);
1304 tracing_thresh = threshold * 1000;
1307 __setup("tracing_thresh=", set_tracing_thresh);
1309 unsigned long nsecs_to_usecs(unsigned long nsecs)
1311 return nsecs / 1000;
1315 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
1316 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
1317 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
1318 * of strings in the order that the evals (enum) were defined.
1323 /* These must match the bit postions in trace_iterator_flags */
1324 static const char *trace_options[] = {
1332 int in_ns; /* is this clock in nanoseconds? */
1333 } trace_clocks[] = {
1334 { trace_clock_local, "local", 1 },
1335 { trace_clock_global, "global", 1 },
1336 { trace_clock_counter, "counter", 0 },
1337 { trace_clock_jiffies, "uptime", 0 },
1338 { trace_clock, "perf", 1 },
1339 { ktime_get_mono_fast_ns, "mono", 1 },
1340 { ktime_get_raw_fast_ns, "mono_raw", 1 },
1341 { ktime_get_boot_fast_ns, "boot", 1 },
1345 bool trace_clock_in_ns(struct trace_array *tr)
1347 if (trace_clocks[tr->clock_id].in_ns)
1354 * trace_parser_get_init - gets the buffer for trace parser
1356 int trace_parser_get_init(struct trace_parser *parser, int size)
1358 memset(parser, 0, sizeof(*parser));
1360 parser->buffer = kmalloc(size, GFP_KERNEL);
1361 if (!parser->buffer)
1364 parser->size = size;
1369 * trace_parser_put - frees the buffer for trace parser
1371 void trace_parser_put(struct trace_parser *parser)
1373 kfree(parser->buffer);
1374 parser->buffer = NULL;
1378 * trace_get_user - reads the user input string separated by space
1379 * (matched by isspace(ch))
1381 * For each string found the 'struct trace_parser' is updated,
1382 * and the function returns.
1384 * Returns number of bytes read.
1386 * See kernel/trace/trace.h for 'struct trace_parser' details.
1388 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1389 size_t cnt, loff_t *ppos)
1396 trace_parser_clear(parser);
1398 ret = get_user(ch, ubuf++);
1406 * The parser is not finished with the last write,
1407 * continue reading the user input without skipping spaces.
1409 if (!parser->cont) {
1410 /* skip white space */
1411 while (cnt && isspace(ch)) {
1412 ret = get_user(ch, ubuf++);
1421 /* only spaces were written */
1422 if (isspace(ch) || !ch) {
1429 /* read the non-space input */
1430 while (cnt && !isspace(ch) && ch) {
1431 if (parser->idx < parser->size - 1)
1432 parser->buffer[parser->idx++] = ch;
1437 ret = get_user(ch, ubuf++);
1444 /* We either got finished input or we have to wait for another call. */
1445 if (isspace(ch) || !ch) {
1446 parser->buffer[parser->idx] = 0;
1447 parser->cont = false;
1448 } else if (parser->idx < parser->size - 1) {
1449 parser->cont = true;
1450 parser->buffer[parser->idx++] = ch;
1451 /* Make sure the parsed string always terminates with '\0'. */
1452 parser->buffer[parser->idx] = 0;
1465 /* TODO add a seq_buf_to_buffer() */
1466 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
1470 if (trace_seq_used(s) <= s->seq.readpos)
1473 len = trace_seq_used(s) - s->seq.readpos;
1476 memcpy(buf, s->buffer + s->seq.readpos, cnt);
1478 s->seq.readpos += cnt;
1482 unsigned long __read_mostly tracing_thresh;
1484 #ifdef CONFIG_TRACER_MAX_TRACE
1486 * Copy the new maximum trace into the separate maximum-trace
1487 * structure. (this way the maximum trace is permanently saved,
1488 * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
1491 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1493 struct trace_buffer *trace_buf = &tr->trace_buffer;
1494 struct trace_buffer *max_buf = &tr->max_buffer;
1495 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1496 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
1499 max_buf->time_start = data->preempt_timestamp;
1501 max_data->saved_latency = tr->max_latency;
1502 max_data->critical_start = data->critical_start;
1503 max_data->critical_end = data->critical_end;
1505 strncpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
1506 max_data->pid = tsk->pid;
1508 * If tsk == current, then use current_uid(), as that does not use
1509 * RCU. The irq tracer can be called out of RCU scope.
1512 max_data->uid = current_uid();
1514 max_data->uid = task_uid(tsk);
1516 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1517 max_data->policy = tsk->policy;
1518 max_data->rt_priority = tsk->rt_priority;
1520 /* record this tasks comm */
1521 tracing_record_cmdline(tsk);
1525 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1527 * @tsk: the task with the latency
1528 * @cpu: The cpu that initiated the trace.
1529 * @cond_data: User data associated with a conditional snapshot
1531 * Flip the buffers between the @tr and the max_tr and record information
1532 * about which task was the cause of this latency.
1535 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
1541 WARN_ON_ONCE(!irqs_disabled());
1543 if (!tr->allocated_snapshot) {
1544 /* Only the nop tracer should hit this when disabling */
1545 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1549 arch_spin_lock(&tr->max_lock);
1551 /* Inherit the recordable setting from trace_buffer */
1552 if (ring_buffer_record_is_set_on(tr->trace_buffer.buffer))
1553 ring_buffer_record_on(tr->max_buffer.buffer);
1555 ring_buffer_record_off(tr->max_buffer.buffer);
1557 #ifdef CONFIG_TRACER_SNAPSHOT
1558 if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data))
1561 swap(tr->trace_buffer.buffer, tr->max_buffer.buffer);
1563 __update_max_tr(tr, tsk, cpu);
1566 arch_spin_unlock(&tr->max_lock);
1570 * update_max_tr_single - only copy one trace over, and reset the rest
1572 * @tsk - task with the latency
1573 * @cpu - the cpu of the buffer to copy.
1575 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1578 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1585 WARN_ON_ONCE(!irqs_disabled());
1586 if (!tr->allocated_snapshot) {
1587 /* Only the nop tracer should hit this when disabling */
1588 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1592 arch_spin_lock(&tr->max_lock);
1594 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
1596 if (ret == -EBUSY) {
1598 * We failed to swap the buffer due to a commit taking
1599 * place on this CPU. We fail to record, but we reset
1600 * the max trace buffer (no one writes directly to it)
1601 * and flag that it failed.
1603 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1604 "Failed to swap buffers due to commit in progress\n");
1607 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1609 __update_max_tr(tr, tsk, cpu);
1610 arch_spin_unlock(&tr->max_lock);
1612 #endif /* CONFIG_TRACER_MAX_TRACE */
1614 static int wait_on_pipe(struct trace_iterator *iter, int full)
1616 /* Iterators are static, they should be filled or empty */
1617 if (trace_buffer_iter(iter, iter->cpu_file))
1620 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1624 #ifdef CONFIG_FTRACE_STARTUP_TEST
1625 static bool selftests_can_run;
1627 struct trace_selftests {
1628 struct list_head list;
1629 struct tracer *type;
1632 static LIST_HEAD(postponed_selftests);
1634 static int save_selftest(struct tracer *type)
1636 struct trace_selftests *selftest;
1638 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1642 selftest->type = type;
1643 list_add(&selftest->list, &postponed_selftests);
1647 static int run_tracer_selftest(struct tracer *type)
1649 struct trace_array *tr = &global_trace;
1650 struct tracer *saved_tracer = tr->current_trace;
1653 if (!type->selftest || tracing_selftest_disabled)
1657 * If a tracer registers early in boot up (before scheduling is
1658 * initialized and such), then do not run its selftests yet.
1659 * Instead, run it a little later in the boot process.
1661 if (!selftests_can_run)
1662 return save_selftest(type);
1665 * Run a selftest on this tracer.
1666 * Here we reset the trace buffer, and set the current
1667 * tracer to be this tracer. The tracer can then run some
1668 * internal tracing to verify that everything is in order.
1669 * If we fail, we do not register this tracer.
1671 tracing_reset_online_cpus(&tr->trace_buffer);
1673 tr->current_trace = type;
1675 #ifdef CONFIG_TRACER_MAX_TRACE
1676 if (type->use_max_tr) {
1677 /* If we expanded the buffers, make sure the max is expanded too */
1678 if (ring_buffer_expanded)
1679 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1680 RING_BUFFER_ALL_CPUS);
1681 tr->allocated_snapshot = true;
1685 /* the test is responsible for initializing and enabling */
1686 pr_info("Testing tracer %s: ", type->name);
1687 ret = type->selftest(type, tr);
1688 /* the test is responsible for resetting too */
1689 tr->current_trace = saved_tracer;
1691 printk(KERN_CONT "FAILED!\n");
1692 /* Add the warning after printing 'FAILED' */
1696 /* Only reset on passing, to avoid touching corrupted buffers */
1697 tracing_reset_online_cpus(&tr->trace_buffer);
1699 #ifdef CONFIG_TRACER_MAX_TRACE
1700 if (type->use_max_tr) {
1701 tr->allocated_snapshot = false;
1703 /* Shrink the max buffer again */
1704 if (ring_buffer_expanded)
1705 ring_buffer_resize(tr->max_buffer.buffer, 1,
1706 RING_BUFFER_ALL_CPUS);
1710 printk(KERN_CONT "PASSED\n");
1714 static __init int init_trace_selftests(void)
1716 struct trace_selftests *p, *n;
1717 struct tracer *t, **last;
1720 selftests_can_run = true;
1722 mutex_lock(&trace_types_lock);
1724 if (list_empty(&postponed_selftests))
1727 pr_info("Running postponed tracer tests:\n");
1729 list_for_each_entry_safe(p, n, &postponed_selftests, list) {
1730 ret = run_tracer_selftest(p->type);
1731 /* If the test fails, then warn and remove from available_tracers */
1733 WARN(1, "tracer: %s failed selftest, disabling\n",
1735 last = &trace_types;
1736 for (t = trace_types; t; t = t->next) {
1749 mutex_unlock(&trace_types_lock);
1753 core_initcall(init_trace_selftests);
1755 static inline int run_tracer_selftest(struct tracer *type)
1759 #endif /* CONFIG_FTRACE_STARTUP_TEST */
1761 static void add_tracer_options(struct trace_array *tr, struct tracer *t);
1763 static void __init apply_trace_boot_options(void);
1766 * register_tracer - register a tracer with the ftrace system.
1767 * @type - the plugin for the tracer
1769 * Register a new plugin tracer.
1771 int __init register_tracer(struct tracer *type)
1777 pr_info("Tracer must have a name\n");
1781 if (strlen(type->name) >= MAX_TRACER_SIZE) {
1782 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1786 mutex_lock(&trace_types_lock);
1788 tracing_selftest_running = true;
1790 for (t = trace_types; t; t = t->next) {
1791 if (strcmp(type->name, t->name) == 0) {
1793 pr_info("Tracer %s already registered\n",
1800 if (!type->set_flag)
1801 type->set_flag = &dummy_set_flag;
1803 /*allocate a dummy tracer_flags*/
1804 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
1809 type->flags->val = 0;
1810 type->flags->opts = dummy_tracer_opt;
1812 if (!type->flags->opts)
1813 type->flags->opts = dummy_tracer_opt;
1815 /* store the tracer for __set_tracer_option */
1816 type->flags->trace = type;
1818 ret = run_tracer_selftest(type);
1822 type->next = trace_types;
1824 add_tracer_options(&global_trace, type);
1827 tracing_selftest_running = false;
1828 mutex_unlock(&trace_types_lock);
1830 if (ret || !default_bootup_tracer)
1833 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
1836 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1837 /* Do we want this tracer to start on bootup? */
1838 tracing_set_tracer(&global_trace, type->name);
1839 default_bootup_tracer = NULL;
1841 apply_trace_boot_options();
1843 /* disable other selftests, since this will break it. */
1844 tracing_selftest_disabled = true;
1845 #ifdef CONFIG_FTRACE_STARTUP_TEST
1846 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1854 void tracing_reset(struct trace_buffer *buf, int cpu)
1856 struct ring_buffer *buffer = buf->buffer;
1861 ring_buffer_record_disable(buffer);
1863 /* Make sure all commits have finished */
1865 ring_buffer_reset_cpu(buffer, cpu);
1867 ring_buffer_record_enable(buffer);
1870 void tracing_reset_online_cpus(struct trace_buffer *buf)
1872 struct ring_buffer *buffer = buf->buffer;
1878 ring_buffer_record_disable(buffer);
1880 /* Make sure all commits have finished */
1883 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
1885 for_each_online_cpu(cpu)
1886 ring_buffer_reset_cpu(buffer, cpu);
1888 ring_buffer_record_enable(buffer);
1891 /* Must have trace_types_lock held */
1892 void tracing_reset_all_online_cpus(void)
1894 struct trace_array *tr;
1896 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1897 if (!tr->clear_trace)
1899 tr->clear_trace = false;
1900 tracing_reset_online_cpus(&tr->trace_buffer);
1901 #ifdef CONFIG_TRACER_MAX_TRACE
1902 tracing_reset_online_cpus(&tr->max_buffer);
1907 static int *tgid_map;
1909 #define SAVED_CMDLINES_DEFAULT 128
1910 #define NO_CMDLINE_MAP UINT_MAX
1911 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
1912 struct saved_cmdlines_buffer {
1913 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1914 unsigned *map_cmdline_to_pid;
1915 unsigned cmdline_num;
1917 char *saved_cmdlines;
1919 static struct saved_cmdlines_buffer *savedcmd;
1921 /* temporary disable recording */
1922 static atomic_t trace_record_taskinfo_disabled __read_mostly;
1924 static inline char *get_saved_cmdlines(int idx)
1926 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1929 static inline void set_cmdline(int idx, const char *cmdline)
1931 strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1934 static int allocate_cmdlines_buffer(unsigned int val,
1935 struct saved_cmdlines_buffer *s)
1937 s->map_cmdline_to_pid = kmalloc_array(val,
1938 sizeof(*s->map_cmdline_to_pid),
1940 if (!s->map_cmdline_to_pid)
1943 s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL);
1944 if (!s->saved_cmdlines) {
1945 kfree(s->map_cmdline_to_pid);
1950 s->cmdline_num = val;
1951 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1952 sizeof(s->map_pid_to_cmdline));
1953 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1954 val * sizeof(*s->map_cmdline_to_pid));
1959 static int trace_create_savedcmd(void)
1963 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
1967 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1977 int is_tracing_stopped(void)
1979 return global_trace.stop_count;
1983 * tracing_start - quick start of the tracer
1985 * If tracing is enabled but was stopped by tracing_stop,
1986 * this will start the tracer back up.
1988 void tracing_start(void)
1990 struct ring_buffer *buffer;
1991 unsigned long flags;
1993 if (tracing_disabled)
1996 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1997 if (--global_trace.stop_count) {
1998 if (global_trace.stop_count < 0) {
1999 /* Someone screwed up their debugging */
2001 global_trace.stop_count = 0;
2006 /* Prevent the buffers from switching */
2007 arch_spin_lock(&global_trace.max_lock);
2009 buffer = global_trace.trace_buffer.buffer;
2011 ring_buffer_record_enable(buffer);
2013 #ifdef CONFIG_TRACER_MAX_TRACE
2014 buffer = global_trace.max_buffer.buffer;
2016 ring_buffer_record_enable(buffer);
2019 arch_spin_unlock(&global_trace.max_lock);
2022 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2025 static void tracing_start_tr(struct trace_array *tr)
2027 struct ring_buffer *buffer;
2028 unsigned long flags;
2030 if (tracing_disabled)
2033 /* If global, we need to also start the max tracer */
2034 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2035 return tracing_start();
2037 raw_spin_lock_irqsave(&tr->start_lock, flags);
2039 if (--tr->stop_count) {
2040 if (tr->stop_count < 0) {
2041 /* Someone screwed up their debugging */
2048 buffer = tr->trace_buffer.buffer;
2050 ring_buffer_record_enable(buffer);
2053 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2057 * tracing_stop - quick stop of the tracer
2059 * Light weight way to stop tracing. Use in conjunction with
2062 void tracing_stop(void)
2064 struct ring_buffer *buffer;
2065 unsigned long flags;
2067 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2068 if (global_trace.stop_count++)
2071 /* Prevent the buffers from switching */
2072 arch_spin_lock(&global_trace.max_lock);
2074 buffer = global_trace.trace_buffer.buffer;
2076 ring_buffer_record_disable(buffer);
2078 #ifdef CONFIG_TRACER_MAX_TRACE
2079 buffer = global_trace.max_buffer.buffer;
2081 ring_buffer_record_disable(buffer);
2084 arch_spin_unlock(&global_trace.max_lock);
2087 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2090 static void tracing_stop_tr(struct trace_array *tr)
2092 struct ring_buffer *buffer;
2093 unsigned long flags;
2095 /* If global, we need to also stop the max tracer */
2096 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2097 return tracing_stop();
2099 raw_spin_lock_irqsave(&tr->start_lock, flags);
2100 if (tr->stop_count++)
2103 buffer = tr->trace_buffer.buffer;
2105 ring_buffer_record_disable(buffer);
2108 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2111 static int trace_save_cmdline(struct task_struct *tsk)
2115 /* treat recording of idle task as a success */
2119 if (unlikely(tsk->pid > PID_MAX_DEFAULT))
2123 * It's not the end of the world if we don't get
2124 * the lock, but we also don't want to spin
2125 * nor do we want to disable interrupts,
2126 * so if we miss here, then better luck next time.
2128 if (!arch_spin_trylock(&trace_cmdline_lock))
2131 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
2132 if (idx == NO_CMDLINE_MAP) {
2133 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
2136 * Check whether the cmdline buffer at idx has a pid
2137 * mapped. We are going to overwrite that entry so we
2138 * need to clear the map_pid_to_cmdline. Otherwise we
2139 * would read the new comm for the old pid.
2141 pid = savedcmd->map_cmdline_to_pid[idx];
2142 if (pid != NO_CMDLINE_MAP)
2143 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
2145 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
2146 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
2148 savedcmd->cmdline_idx = idx;
2151 set_cmdline(idx, tsk->comm);
2153 arch_spin_unlock(&trace_cmdline_lock);
2158 static void __trace_find_cmdline(int pid, char comm[])
2163 strcpy(comm, "<idle>");
2167 if (WARN_ON_ONCE(pid < 0)) {
2168 strcpy(comm, "<XXX>");
2172 if (pid > PID_MAX_DEFAULT) {
2173 strcpy(comm, "<...>");
2177 map = savedcmd->map_pid_to_cmdline[pid];
2178 if (map != NO_CMDLINE_MAP)
2179 strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
2181 strcpy(comm, "<...>");
2184 void trace_find_cmdline(int pid, char comm[])
2187 arch_spin_lock(&trace_cmdline_lock);
2189 __trace_find_cmdline(pid, comm);
2191 arch_spin_unlock(&trace_cmdline_lock);
2195 int trace_find_tgid(int pid)
2197 if (unlikely(!tgid_map || !pid || pid > PID_MAX_DEFAULT))
2200 return tgid_map[pid];
2203 static int trace_save_tgid(struct task_struct *tsk)
2205 /* treat recording of idle task as a success */
2209 if (unlikely(!tgid_map || tsk->pid > PID_MAX_DEFAULT))
2212 tgid_map[tsk->pid] = tsk->tgid;
2216 static bool tracing_record_taskinfo_skip(int flags)
2218 if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
2220 if (atomic_read(&trace_record_taskinfo_disabled) || !tracing_is_on())
2222 if (!__this_cpu_read(trace_taskinfo_save))
2228 * tracing_record_taskinfo - record the task info of a task
2230 * @task - task to record
2231 * @flags - TRACE_RECORD_CMDLINE for recording comm
2232 * - TRACE_RECORD_TGID for recording tgid
2234 void tracing_record_taskinfo(struct task_struct *task, int flags)
2238 if (tracing_record_taskinfo_skip(flags))
2242 * Record as much task information as possible. If some fail, continue
2243 * to try to record the others.
2245 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
2246 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
2248 /* If recording any information failed, retry again soon. */
2252 __this_cpu_write(trace_taskinfo_save, false);
2256 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2258 * @prev - previous task during sched_switch
2259 * @next - next task during sched_switch
2260 * @flags - TRACE_RECORD_CMDLINE for recording comm
2261 * TRACE_RECORD_TGID for recording tgid
2263 void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2264 struct task_struct *next, int flags)
2268 if (tracing_record_taskinfo_skip(flags))
2272 * Record as much task information as possible. If some fail, continue
2273 * to try to record the others.
2275 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
2276 done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
2277 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
2278 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
2280 /* If recording any information failed, retry again soon. */
2284 __this_cpu_write(trace_taskinfo_save, false);
2287 /* Helpers to record a specific task information */
2288 void tracing_record_cmdline(struct task_struct *task)
2290 tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
2293 void tracing_record_tgid(struct task_struct *task)
2295 tracing_record_taskinfo(task, TRACE_RECORD_TGID);
2299 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2300 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2301 * simplifies those functions and keeps them in sync.
2303 enum print_line_t trace_handle_return(struct trace_seq *s)
2305 return trace_seq_has_overflowed(s) ?
2306 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2308 EXPORT_SYMBOL_GPL(trace_handle_return);
2311 tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
2314 struct task_struct *tsk = current;
2316 entry->preempt_count = pc & 0xff;
2317 entry->pid = (tsk) ? tsk->pid : 0;
2319 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
2320 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
2322 TRACE_FLAG_IRQS_NOSUPPORT |
2324 ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
2325 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
2326 ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
2327 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
2328 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
2330 EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
2332 struct ring_buffer_event *
2333 trace_buffer_lock_reserve(struct ring_buffer *buffer,
2336 unsigned long flags, int pc)
2338 return __trace_buffer_lock_reserve(buffer, type, len, flags, pc);
2341 DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2342 DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2343 static int trace_buffered_event_ref;
2346 * trace_buffered_event_enable - enable buffering events
2348 * When events are being filtered, it is quicker to use a temporary
2349 * buffer to write the event data into if there's a likely chance
2350 * that it will not be committed. The discard of the ring buffer
2351 * is not as fast as committing, and is much slower than copying
2354 * When an event is to be filtered, allocate per cpu buffers to
2355 * write the event data into, and if the event is filtered and discarded
2356 * it is simply dropped, otherwise, the entire data is to be committed
2359 void trace_buffered_event_enable(void)
2361 struct ring_buffer_event *event;
2365 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2367 if (trace_buffered_event_ref++)
2370 for_each_tracing_cpu(cpu) {
2371 page = alloc_pages_node(cpu_to_node(cpu),
2372 GFP_KERNEL | __GFP_NORETRY, 0);
2376 event = page_address(page);
2377 memset(event, 0, sizeof(*event));
2379 per_cpu(trace_buffered_event, cpu) = event;
2382 if (cpu == smp_processor_id() &&
2383 this_cpu_read(trace_buffered_event) !=
2384 per_cpu(trace_buffered_event, cpu))
2391 trace_buffered_event_disable();
2394 static void enable_trace_buffered_event(void *data)
2396 /* Probably not needed, but do it anyway */
2398 this_cpu_dec(trace_buffered_event_cnt);
2401 static void disable_trace_buffered_event(void *data)
2403 this_cpu_inc(trace_buffered_event_cnt);
2407 * trace_buffered_event_disable - disable buffering events
2409 * When a filter is removed, it is faster to not use the buffered
2410 * events, and to commit directly into the ring buffer. Free up
2411 * the temp buffers when there are no more users. This requires
2412 * special synchronization with current events.
2414 void trace_buffered_event_disable(void)
2418 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2420 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2423 if (--trace_buffered_event_ref)
2427 /* For each CPU, set the buffer as used. */
2428 smp_call_function_many(tracing_buffer_mask,
2429 disable_trace_buffered_event, NULL, 1);
2432 /* Wait for all current users to finish */
2435 for_each_tracing_cpu(cpu) {
2436 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2437 per_cpu(trace_buffered_event, cpu) = NULL;
2440 * Make sure trace_buffered_event is NULL before clearing
2441 * trace_buffered_event_cnt.
2446 /* Do the work on each cpu */
2447 smp_call_function_many(tracing_buffer_mask,
2448 enable_trace_buffered_event, NULL, 1);
2452 static struct ring_buffer *temp_buffer;
2454 struct ring_buffer_event *
2455 trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
2456 struct trace_event_file *trace_file,
2457 int type, unsigned long len,
2458 unsigned long flags, int pc)
2460 struct ring_buffer_event *entry;
2463 *current_rb = trace_file->tr->trace_buffer.buffer;
2465 if (!ring_buffer_time_stamp_abs(*current_rb) && (trace_file->flags &
2466 (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
2467 (entry = this_cpu_read(trace_buffered_event))) {
2468 /* Try to use the per cpu buffer first */
2469 val = this_cpu_inc_return(trace_buffered_event_cnt);
2471 trace_event_setup(entry, type, flags, pc);
2472 entry->array[0] = len;
2475 this_cpu_dec(trace_buffered_event_cnt);
2478 entry = __trace_buffer_lock_reserve(*current_rb,
2479 type, len, flags, pc);
2481 * If tracing is off, but we have triggers enabled
2482 * we still need to look at the event data. Use the temp_buffer
2483 * to store the trace event for the tigger to use. It's recusive
2484 * safe and will not be recorded anywhere.
2486 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2487 *current_rb = temp_buffer;
2488 entry = __trace_buffer_lock_reserve(*current_rb,
2489 type, len, flags, pc);
2493 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2495 static DEFINE_SPINLOCK(tracepoint_iter_lock);
2496 static DEFINE_MUTEX(tracepoint_printk_mutex);
2498 static void output_printk(struct trace_event_buffer *fbuffer)
2500 struct trace_event_call *event_call;
2501 struct trace_event *event;
2502 unsigned long flags;
2503 struct trace_iterator *iter = tracepoint_print_iter;
2505 /* We should never get here if iter is NULL */
2506 if (WARN_ON_ONCE(!iter))
2509 event_call = fbuffer->trace_file->event_call;
2510 if (!event_call || !event_call->event.funcs ||
2511 !event_call->event.funcs->trace)
2514 event = &fbuffer->trace_file->event_call->event;
2516 spin_lock_irqsave(&tracepoint_iter_lock, flags);
2517 trace_seq_init(&iter->seq);
2518 iter->ent = fbuffer->entry;
2519 event_call->event.funcs->trace(iter, 0, event);
2520 trace_seq_putc(&iter->seq, 0);
2521 printk("%s", iter->seq.buffer);
2523 spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2526 int tracepoint_printk_sysctl(struct ctl_table *table, int write,
2527 void __user *buffer, size_t *lenp,
2530 int save_tracepoint_printk;
2533 mutex_lock(&tracepoint_printk_mutex);
2534 save_tracepoint_printk = tracepoint_printk;
2536 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2539 * This will force exiting early, as tracepoint_printk
2540 * is always zero when tracepoint_printk_iter is not allocated
2542 if (!tracepoint_print_iter)
2543 tracepoint_printk = 0;
2545 if (save_tracepoint_printk == tracepoint_printk)
2548 if (tracepoint_printk)
2549 static_key_enable(&tracepoint_printk_key.key);
2551 static_key_disable(&tracepoint_printk_key.key);
2554 mutex_unlock(&tracepoint_printk_mutex);
2559 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2561 if (static_key_false(&tracepoint_printk_key.key))
2562 output_printk(fbuffer);
2564 event_trigger_unlock_commit(fbuffer->trace_file, fbuffer->buffer,
2565 fbuffer->event, fbuffer->entry,
2566 fbuffer->flags, fbuffer->pc);
2568 EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2573 * trace_buffer_unlock_commit_regs()
2574 * trace_event_buffer_commit()
2575 * trace_event_raw_event_xxx()
2577 # define STACK_SKIP 3
2579 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2580 struct ring_buffer *buffer,
2581 struct ring_buffer_event *event,
2582 unsigned long flags, int pc,
2583 struct pt_regs *regs)
2585 __buffer_unlock_commit(buffer, event);
2588 * If regs is not set, then skip the necessary functions.
2589 * Note, we can still get here via blktrace, wakeup tracer
2590 * and mmiotrace, but that's ok if they lose a function or
2591 * two. They are not that meaningful.
2593 ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs);
2594 ftrace_trace_userstack(buffer, flags, pc);
2598 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2601 trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer,
2602 struct ring_buffer_event *event)
2604 __buffer_unlock_commit(buffer, event);
2608 trace_process_export(struct trace_export *export,
2609 struct ring_buffer_event *event)
2611 struct trace_entry *entry;
2612 unsigned int size = 0;
2614 entry = ring_buffer_event_data(event);
2615 size = ring_buffer_event_length(event);
2616 export->write(export, entry, size);
2619 static DEFINE_MUTEX(ftrace_export_lock);
2621 static struct trace_export __rcu *ftrace_exports_list __read_mostly;
2623 static DEFINE_STATIC_KEY_FALSE(ftrace_exports_enabled);
2625 static inline void ftrace_exports_enable(void)
2627 static_branch_enable(&ftrace_exports_enabled);
2630 static inline void ftrace_exports_disable(void)
2632 static_branch_disable(&ftrace_exports_enabled);
2635 static void ftrace_exports(struct ring_buffer_event *event)
2637 struct trace_export *export;
2639 preempt_disable_notrace();
2641 export = rcu_dereference_raw_notrace(ftrace_exports_list);
2643 trace_process_export(export, event);
2644 export = rcu_dereference_raw_notrace(export->next);
2647 preempt_enable_notrace();
2651 add_trace_export(struct trace_export **list, struct trace_export *export)
2653 rcu_assign_pointer(export->next, *list);
2655 * We are entering export into the list but another
2656 * CPU might be walking that list. We need to make sure
2657 * the export->next pointer is valid before another CPU sees
2658 * the export pointer included into the list.
2660 rcu_assign_pointer(*list, export);
2664 rm_trace_export(struct trace_export **list, struct trace_export *export)
2666 struct trace_export **p;
2668 for (p = list; *p != NULL; p = &(*p)->next)
2675 rcu_assign_pointer(*p, (*p)->next);
2681 add_ftrace_export(struct trace_export **list, struct trace_export *export)
2684 ftrace_exports_enable();
2686 add_trace_export(list, export);
2690 rm_ftrace_export(struct trace_export **list, struct trace_export *export)
2694 ret = rm_trace_export(list, export);
2696 ftrace_exports_disable();
2701 int register_ftrace_export(struct trace_export *export)
2703 if (WARN_ON_ONCE(!export->write))
2706 mutex_lock(&ftrace_export_lock);
2708 add_ftrace_export(&ftrace_exports_list, export);
2710 mutex_unlock(&ftrace_export_lock);
2714 EXPORT_SYMBOL_GPL(register_ftrace_export);
2716 int unregister_ftrace_export(struct trace_export *export)
2720 mutex_lock(&ftrace_export_lock);
2722 ret = rm_ftrace_export(&ftrace_exports_list, export);
2724 mutex_unlock(&ftrace_export_lock);
2728 EXPORT_SYMBOL_GPL(unregister_ftrace_export);
2731 trace_function(struct trace_array *tr,
2732 unsigned long ip, unsigned long parent_ip, unsigned long flags,
2735 struct trace_event_call *call = &event_function;
2736 struct ring_buffer *buffer = tr->trace_buffer.buffer;
2737 struct ring_buffer_event *event;
2738 struct ftrace_entry *entry;
2740 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
2744 entry = ring_buffer_event_data(event);
2746 entry->parent_ip = parent_ip;
2748 if (!call_filter_check_discard(call, entry, buffer, event)) {
2749 if (static_branch_unlikely(&ftrace_exports_enabled))
2750 ftrace_exports(event);
2751 __buffer_unlock_commit(buffer, event);
2755 #ifdef CONFIG_STACKTRACE
2757 /* Allow 4 levels of nesting: normal, softirq, irq, NMI */
2758 #define FTRACE_KSTACK_NESTING 4
2760 #define FTRACE_KSTACK_ENTRIES (PAGE_SIZE / FTRACE_KSTACK_NESTING)
2762 struct ftrace_stack {
2763 unsigned long calls[FTRACE_KSTACK_ENTRIES];
2767 struct ftrace_stacks {
2768 struct ftrace_stack stacks[FTRACE_KSTACK_NESTING];
2771 static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
2772 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2774 static void __ftrace_trace_stack(struct ring_buffer *buffer,
2775 unsigned long flags,
2776 int skip, int pc, struct pt_regs *regs)
2778 struct trace_event_call *call = &event_kernel_stack;
2779 struct ring_buffer_event *event;
2780 unsigned int size, nr_entries;
2781 struct ftrace_stack *fstack;
2782 struct stack_entry *entry;
2786 * Add one, for this function and the call to save_stack_trace()
2787 * If regs is set, then these functions will not be in the way.
2789 #ifndef CONFIG_UNWINDER_ORC
2795 * Since events can happen in NMIs there's no safe way to
2796 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
2797 * or NMI comes in, it will just have to use the default
2798 * FTRACE_STACK_SIZE.
2800 preempt_disable_notrace();
2802 stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
2804 /* This should never happen. If it does, yell once and skip */
2805 if (WARN_ON_ONCE(stackidx > FTRACE_KSTACK_NESTING))
2809 * The above __this_cpu_inc_return() is 'atomic' cpu local. An
2810 * interrupt will either see the value pre increment or post
2811 * increment. If the interrupt happens pre increment it will have
2812 * restored the counter when it returns. We just need a barrier to
2813 * keep gcc from moving things around.
2817 fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx;
2818 size = ARRAY_SIZE(fstack->calls);
2821 nr_entries = stack_trace_save_regs(regs, fstack->calls,
2824 nr_entries = stack_trace_save(fstack->calls, size, skip);
2827 size = nr_entries * sizeof(unsigned long);
2828 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
2829 sizeof(*entry) + size, flags, pc);
2832 entry = ring_buffer_event_data(event);
2834 memcpy(&entry->caller, fstack->calls, size);
2835 entry->size = nr_entries;
2837 if (!call_filter_check_discard(call, entry, buffer, event))
2838 __buffer_unlock_commit(buffer, event);
2841 /* Again, don't let gcc optimize things here */
2843 __this_cpu_dec(ftrace_stack_reserve);
2844 preempt_enable_notrace();
2848 static inline void ftrace_trace_stack(struct trace_array *tr,
2849 struct ring_buffer *buffer,
2850 unsigned long flags,
2851 int skip, int pc, struct pt_regs *regs)
2853 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
2856 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
2859 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
2862 struct ring_buffer *buffer = tr->trace_buffer.buffer;
2864 if (rcu_is_watching()) {
2865 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
2870 * When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
2871 * but if the above rcu_is_watching() failed, then the NMI
2872 * triggered someplace critical, and rcu_irq_enter() should
2873 * not be called from NMI.
2875 if (unlikely(in_nmi()))
2878 rcu_irq_enter_irqson();
2879 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
2880 rcu_irq_exit_irqson();
2884 * trace_dump_stack - record a stack back trace in the trace buffer
2885 * @skip: Number of functions to skip (helper handlers)
2887 void trace_dump_stack(int skip)
2889 unsigned long flags;
2891 if (tracing_disabled || tracing_selftest_running)
2894 local_save_flags(flags);
2896 #ifndef CONFIG_UNWINDER_ORC
2897 /* Skip 1 to skip this function. */
2900 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
2901 flags, skip, preempt_count(), NULL);
2903 EXPORT_SYMBOL_GPL(trace_dump_stack);
2905 #ifdef CONFIG_USER_STACKTRACE_SUPPORT
2906 static DEFINE_PER_CPU(int, user_stack_count);
2909 ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
2911 struct trace_event_call *call = &event_user_stack;
2912 struct ring_buffer_event *event;
2913 struct userstack_entry *entry;
2915 if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
2919 * NMIs can not handle page faults, even with fix ups.
2920 * The save user stack can (and often does) fault.
2922 if (unlikely(in_nmi()))
2926 * prevent recursion, since the user stack tracing may
2927 * trigger other kernel events.
2930 if (__this_cpu_read(user_stack_count))
2933 __this_cpu_inc(user_stack_count);
2935 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
2936 sizeof(*entry), flags, pc);
2938 goto out_drop_count;
2939 entry = ring_buffer_event_data(event);
2941 entry->tgid = current->tgid;
2942 memset(&entry->caller, 0, sizeof(entry->caller));
2944 stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
2945 if (!call_filter_check_discard(call, entry, buffer, event))
2946 __buffer_unlock_commit(buffer, event);
2949 __this_cpu_dec(user_stack_count);
2953 #else /* CONFIG_USER_STACKTRACE_SUPPORT */
2954 static void ftrace_trace_userstack(struct ring_buffer *buffer,
2955 unsigned long flags, int pc)
2958 #endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
2960 #endif /* CONFIG_STACKTRACE */
2962 /* created for use with alloc_percpu */
2963 struct trace_buffer_struct {
2965 char buffer[4][TRACE_BUF_SIZE];
2968 static struct trace_buffer_struct *trace_percpu_buffer;
2971 * Thise allows for lockless recording. If we're nested too deeply, then
2972 * this returns NULL.
2974 static char *get_trace_buf(void)
2976 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
2978 if (!buffer || buffer->nesting >= 4)
2983 /* Interrupts must see nesting incremented before we use the buffer */
2985 return &buffer->buffer[buffer->nesting][0];
2988 static void put_trace_buf(void)
2990 /* Don't let the decrement of nesting leak before this */
2992 this_cpu_dec(trace_percpu_buffer->nesting);
2995 static int alloc_percpu_trace_buffer(void)
2997 struct trace_buffer_struct *buffers;
2999 buffers = alloc_percpu(struct trace_buffer_struct);
3000 if (WARN(!buffers, "Could not allocate percpu trace_printk buffer"))
3003 trace_percpu_buffer = buffers;
3007 static int buffers_allocated;
3009 void trace_printk_init_buffers(void)
3011 if (buffers_allocated)
3014 if (alloc_percpu_trace_buffer())
3017 /* trace_printk() is for debug use only. Don't use it in production. */
3020 pr_warn("**********************************************************\n");
3021 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3023 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
3025 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
3026 pr_warn("** unsafe for production use. **\n");
3028 pr_warn("** If you see this message and you are not debugging **\n");
3029 pr_warn("** the kernel, report this immediately to your vendor! **\n");
3031 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3032 pr_warn("**********************************************************\n");
3034 /* Expand the buffers to set size */
3035 tracing_update_buffers();
3037 buffers_allocated = 1;
3040 * trace_printk_init_buffers() can be called by modules.
3041 * If that happens, then we need to start cmdline recording
3042 * directly here. If the global_trace.buffer is already
3043 * allocated here, then this was called by module code.
3045 if (global_trace.trace_buffer.buffer)
3046 tracing_start_cmdline_record();
3049 void trace_printk_start_comm(void)
3051 /* Start tracing comms if trace printk is set */
3052 if (!buffers_allocated)
3054 tracing_start_cmdline_record();
3057 static void trace_printk_start_stop_comm(int enabled)
3059 if (!buffers_allocated)
3063 tracing_start_cmdline_record();
3065 tracing_stop_cmdline_record();
3069 * trace_vbprintk - write binary msg to tracing buffer
3072 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
3074 struct trace_event_call *call = &event_bprint;
3075 struct ring_buffer_event *event;
3076 struct ring_buffer *buffer;
3077 struct trace_array *tr = &global_trace;
3078 struct bprint_entry *entry;
3079 unsigned long flags;
3081 int len = 0, size, pc;
3083 if (unlikely(tracing_selftest_running || tracing_disabled))
3086 /* Don't pollute graph traces with trace_vprintk internals */
3087 pause_graph_tracing();
3089 pc = preempt_count();
3090 preempt_disable_notrace();
3092 tbuffer = get_trace_buf();
3098 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
3100 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
3103 local_save_flags(flags);
3104 size = sizeof(*entry) + sizeof(u32) * len;
3105 buffer = tr->trace_buffer.buffer;
3106 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
3110 entry = ring_buffer_event_data(event);
3114 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
3115 if (!call_filter_check_discard(call, entry, buffer, event)) {
3116 __buffer_unlock_commit(buffer, event);
3117 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
3124 preempt_enable_notrace();
3125 unpause_graph_tracing();
3129 EXPORT_SYMBOL_GPL(trace_vbprintk);
3133 __trace_array_vprintk(struct ring_buffer *buffer,
3134 unsigned long ip, const char *fmt, va_list args)
3136 struct trace_event_call *call = &event_print;
3137 struct ring_buffer_event *event;
3138 int len = 0, size, pc;
3139 struct print_entry *entry;
3140 unsigned long flags;
3143 if (tracing_disabled || tracing_selftest_running)
3146 /* Don't pollute graph traces with trace_vprintk internals */
3147 pause_graph_tracing();
3149 pc = preempt_count();
3150 preempt_disable_notrace();
3153 tbuffer = get_trace_buf();
3159 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
3161 local_save_flags(flags);
3162 size = sizeof(*entry) + len + 1;
3163 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
3167 entry = ring_buffer_event_data(event);
3170 memcpy(&entry->buf, tbuffer, len + 1);
3171 if (!call_filter_check_discard(call, entry, buffer, event)) {
3172 __buffer_unlock_commit(buffer, event);
3173 ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
3180 preempt_enable_notrace();
3181 unpause_graph_tracing();
3187 int trace_array_vprintk(struct trace_array *tr,
3188 unsigned long ip, const char *fmt, va_list args)
3190 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
3194 int trace_array_printk(struct trace_array *tr,
3195 unsigned long ip, const char *fmt, ...)
3200 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
3204 ret = trace_array_vprintk(tr, ip, fmt, ap);
3210 int trace_array_printk_buf(struct ring_buffer *buffer,
3211 unsigned long ip, const char *fmt, ...)
3216 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
3220 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3226 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3228 return trace_array_vprintk(&global_trace, ip, fmt, args);
3230 EXPORT_SYMBOL_GPL(trace_vprintk);
3232 static void trace_iterator_increment(struct trace_iterator *iter)
3234 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3238 ring_buffer_read(buf_iter, NULL);
3241 static struct trace_entry *
3242 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3243 unsigned long *lost_events)
3245 struct ring_buffer_event *event;
3246 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
3249 event = ring_buffer_iter_peek(buf_iter, ts);
3251 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
3255 iter->ent_size = ring_buffer_event_length(event);
3256 return ring_buffer_event_data(event);
3262 static struct trace_entry *
3263 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3264 unsigned long *missing_events, u64 *ent_ts)
3266 struct ring_buffer *buffer = iter->trace_buffer->buffer;
3267 struct trace_entry *ent, *next = NULL;
3268 unsigned long lost_events = 0, next_lost = 0;
3269 int cpu_file = iter->cpu_file;
3270 u64 next_ts = 0, ts;
3276 * If we are in a per_cpu trace file, don't bother by iterating over
3277 * all cpu and peek directly.
3279 if (cpu_file > RING_BUFFER_ALL_CPUS) {
3280 if (ring_buffer_empty_cpu(buffer, cpu_file))
3282 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
3284 *ent_cpu = cpu_file;
3289 for_each_tracing_cpu(cpu) {
3291 if (ring_buffer_empty_cpu(buffer, cpu))
3294 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
3297 * Pick the entry with the smallest timestamp:
3299 if (ent && (!next || ts < next_ts)) {
3303 next_lost = lost_events;
3304 next_size = iter->ent_size;
3308 iter->ent_size = next_size;
3311 *ent_cpu = next_cpu;
3317 *missing_events = next_lost;
3322 /* Find the next real entry, without updating the iterator itself */
3323 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
3324 int *ent_cpu, u64 *ent_ts)
3326 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
3329 /* Find the next real entry, and increment the iterator to the next entry */
3330 void *trace_find_next_entry_inc(struct trace_iterator *iter)
3332 iter->ent = __find_next_entry(iter, &iter->cpu,
3333 &iter->lost_events, &iter->ts);
3336 trace_iterator_increment(iter);
3338 return iter->ent ? iter : NULL;
3341 static void trace_consume(struct trace_iterator *iter)
3343 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
3344 &iter->lost_events);
3347 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
3349 struct trace_iterator *iter = m->private;
3353 WARN_ON_ONCE(iter->leftover);
3357 /* can't go backwards */
3362 ent = trace_find_next_entry_inc(iter);
3366 while (ent && iter->idx < i)
3367 ent = trace_find_next_entry_inc(iter);
3374 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
3376 struct ring_buffer_event *event;
3377 struct ring_buffer_iter *buf_iter;
3378 unsigned long entries = 0;
3381 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
3383 buf_iter = trace_buffer_iter(iter, cpu);
3387 ring_buffer_iter_reset(buf_iter);
3390 * We could have the case with the max latency tracers
3391 * that a reset never took place on a cpu. This is evident
3392 * by the timestamp being before the start of the buffer.
3394 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
3395 if (ts >= iter->trace_buffer->time_start)
3398 ring_buffer_read(buf_iter, NULL);
3401 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
3405 * The current tracer is copied to avoid a global locking
3408 static void *s_start(struct seq_file *m, loff_t *pos)
3410 struct trace_iterator *iter = m->private;
3411 struct trace_array *tr = iter->tr;
3412 int cpu_file = iter->cpu_file;
3418 * copy the tracer to avoid using a global lock all around.
3419 * iter->trace is a copy of current_trace, the pointer to the
3420 * name may be used instead of a strcmp(), as iter->trace->name
3421 * will point to the same string as current_trace->name.
3423 mutex_lock(&trace_types_lock);
3424 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
3425 *iter->trace = *tr->current_trace;
3426 mutex_unlock(&trace_types_lock);
3428 #ifdef CONFIG_TRACER_MAX_TRACE
3429 if (iter->snapshot && iter->trace->use_max_tr)
3430 return ERR_PTR(-EBUSY);
3433 if (!iter->snapshot)
3434 atomic_inc(&trace_record_taskinfo_disabled);
3436 if (*pos != iter->pos) {
3441 if (cpu_file == RING_BUFFER_ALL_CPUS) {
3442 for_each_tracing_cpu(cpu)
3443 tracing_iter_reset(iter, cpu);
3445 tracing_iter_reset(iter, cpu_file);
3448 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
3453 * If we overflowed the seq_file before, then we want
3454 * to just reuse the trace_seq buffer again.
3460 p = s_next(m, p, &l);
3464 trace_event_read_lock();
3465 trace_access_lock(cpu_file);
3469 static void s_stop(struct seq_file *m, void *p)
3471 struct trace_iterator *iter = m->private;
3473 #ifdef CONFIG_TRACER_MAX_TRACE
3474 if (iter->snapshot && iter->trace->use_max_tr)
3478 if (!iter->snapshot)
3479 atomic_dec(&trace_record_taskinfo_disabled);
3481 trace_access_unlock(iter->cpu_file);
3482 trace_event_read_unlock();
3486 get_total_entries(struct trace_buffer *buf,
3487 unsigned long *total, unsigned long *entries)
3489 unsigned long count;
3495 for_each_tracing_cpu(cpu) {
3496 count = ring_buffer_entries_cpu(buf->buffer, cpu);
3498 * If this buffer has skipped entries, then we hold all
3499 * entries for the trace and we need to ignore the
3500 * ones before the time stamp.
3502 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
3503 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
3504 /* total is the same as the entries */
3508 ring_buffer_overrun_cpu(buf->buffer, cpu);
3513 static void print_lat_help_header(struct seq_file *m)
3515 seq_puts(m, "# _------=> CPU# \n"
3516 "# / _-----=> irqs-off \n"
3517 "# | / _----=> need-resched \n"
3518 "# || / _---=> hardirq/softirq \n"
3519 "# ||| / _--=> preempt-depth \n"
3521 "# cmd pid ||||| time | caller \n"
3522 "# \\ / ||||| \\ | / \n");
3525 static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
3527 unsigned long total;
3528 unsigned long entries;
3530 get_total_entries(buf, &total, &entries);
3531 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
3532 entries, total, num_online_cpus());
3536 static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m,
3539 bool tgid = flags & TRACE_ITER_RECORD_TGID;
3541 print_event_info(buf, m);
3543 seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? "TGID " : "");
3544 seq_printf(m, "# | | %s | | |\n", tgid ? " | " : "");
3547 static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m,
3550 bool tgid = flags & TRACE_ITER_RECORD_TGID;
3551 const char tgid_space[] = " ";
3552 const char space[] = " ";
3554 print_event_info(buf, m);
3556 seq_printf(m, "# %s _-----=> irqs-off\n",
3557 tgid ? tgid_space : space);
3558 seq_printf(m, "# %s / _----=> need-resched\n",
3559 tgid ? tgid_space : space);
3560 seq_printf(m, "# %s| / _---=> hardirq/softirq\n",
3561 tgid ? tgid_space : space);
3562 seq_printf(m, "# %s|| / _--=> preempt-depth\n",
3563 tgid ? tgid_space : space);
3564 seq_printf(m, "# %s||| / delay\n",
3565 tgid ? tgid_space : space);
3566 seq_printf(m, "# TASK-PID %sCPU# |||| TIMESTAMP FUNCTION\n",
3567 tgid ? " TGID " : space);
3568 seq_printf(m, "# | | %s | |||| | |\n",
3569 tgid ? " | " : space);
3573 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
3575 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
3576 struct trace_buffer *buf = iter->trace_buffer;
3577 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
3578 struct tracer *type = iter->trace;
3579 unsigned long entries;
3580 unsigned long total;
3581 const char *name = "preemption";
3585 get_total_entries(buf, &total, &entries);
3587 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
3589 seq_puts(m, "# -----------------------------------"
3590 "---------------------------------\n");
3591 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
3592 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
3593 nsecs_to_usecs(data->saved_latency),
3597 #if defined(CONFIG_PREEMPT_NONE)
3599 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
3601 #elif defined(CONFIG_PREEMPT)
3606 /* These are reserved for later use */
3609 seq_printf(m, " #P:%d)\n", num_online_cpus());
3613 seq_puts(m, "# -----------------\n");
3614 seq_printf(m, "# | task: %.16s-%d "
3615 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
3616 data->comm, data->pid,
3617 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
3618 data->policy, data->rt_priority);
3619 seq_puts(m, "# -----------------\n");
3621 if (data->critical_start) {
3622 seq_puts(m, "# => started at: ");
3623 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
3624 trace_print_seq(m, &iter->seq);
3625 seq_puts(m, "\n# => ended at: ");
3626 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
3627 trace_print_seq(m, &iter->seq);
3628 seq_puts(m, "\n#\n");
3634 static void test_cpu_buff_start(struct trace_iterator *iter)
3636 struct trace_seq *s = &iter->seq;
3637 struct trace_array *tr = iter->tr;
3639 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
3642 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
3645 if (cpumask_available(iter->started) &&
3646 cpumask_test_cpu(iter->cpu, iter->started))
3649 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
3652 if (cpumask_available(iter->started))
3653 cpumask_set_cpu(iter->cpu, iter->started);
3655 /* Don't print started cpu buffer for the first entry of the trace */
3657 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
3661 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
3663 struct trace_array *tr = iter->tr;
3664 struct trace_seq *s = &iter->seq;
3665 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
3666 struct trace_entry *entry;
3667 struct trace_event *event;
3671 test_cpu_buff_start(iter);
3673 event = ftrace_find_event(entry->type);
3675 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
3676 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3677 trace_print_lat_context(iter);
3679 trace_print_context(iter);
3682 if (trace_seq_has_overflowed(s))
3683 return TRACE_TYPE_PARTIAL_LINE;
3686 return event->funcs->trace(iter, sym_flags, event);
3688 trace_seq_printf(s, "Unknown type %d\n", entry->type);
3690 return trace_handle_return(s);
3693 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
3695 struct trace_array *tr = iter->tr;
3696 struct trace_seq *s = &iter->seq;
3697 struct trace_entry *entry;
3698 struct trace_event *event;
3702 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
3703 trace_seq_printf(s, "%d %d %llu ",
3704 entry->pid, iter->cpu, iter->ts);
3706 if (trace_seq_has_overflowed(s))
3707 return TRACE_TYPE_PARTIAL_LINE;
3709 event = ftrace_find_event(entry->type);
3711 return event->funcs->raw(iter, 0, event);
3713 trace_seq_printf(s, "%d ?\n", entry->type);
3715 return trace_handle_return(s);
3718 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
3720 struct trace_array *tr = iter->tr;
3721 struct trace_seq *s = &iter->seq;
3722 unsigned char newline = '\n';
3723 struct trace_entry *entry;
3724 struct trace_event *event;
3728 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
3729 SEQ_PUT_HEX_FIELD(s, entry->pid);
3730 SEQ_PUT_HEX_FIELD(s, iter->cpu);
3731 SEQ_PUT_HEX_FIELD(s, iter->ts);
3732 if (trace_seq_has_overflowed(s))
3733 return TRACE_TYPE_PARTIAL_LINE;
3736 event = ftrace_find_event(entry->type);
3738 enum print_line_t ret = event->funcs->hex(iter, 0, event);
3739 if (ret != TRACE_TYPE_HANDLED)
3743 SEQ_PUT_FIELD(s, newline);
3745 return trace_handle_return(s);
3748 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
3750 struct trace_array *tr = iter->tr;
3751 struct trace_seq *s = &iter->seq;
3752 struct trace_entry *entry;
3753 struct trace_event *event;
3757 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
3758 SEQ_PUT_FIELD(s, entry->pid);
3759 SEQ_PUT_FIELD(s, iter->cpu);
3760 SEQ_PUT_FIELD(s, iter->ts);
3761 if (trace_seq_has_overflowed(s))
3762 return TRACE_TYPE_PARTIAL_LINE;
3765 event = ftrace_find_event(entry->type);
3766 return event ? event->funcs->binary(iter, 0, event) :
3770 int trace_empty(struct trace_iterator *iter)
3772 struct ring_buffer_iter *buf_iter;
3775 /* If we are looking at one CPU buffer, only check that one */
3776 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
3777 cpu = iter->cpu_file;
3778 buf_iter = trace_buffer_iter(iter, cpu);
3780 if (!ring_buffer_iter_empty(buf_iter))
3783 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
3789 for_each_tracing_cpu(cpu) {
3790 buf_iter = trace_buffer_iter(iter, cpu);
3792 if (!ring_buffer_iter_empty(buf_iter))
3795 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
3803 /* Called with trace_event_read_lock() held. */
3804 enum print_line_t print_trace_line(struct trace_iterator *iter)
3806 struct trace_array *tr = iter->tr;
3807 unsigned long trace_flags = tr->trace_flags;
3808 enum print_line_t ret;
3810 if (iter->lost_events) {
3811 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
3812 iter->cpu, iter->lost_events);
3813 if (trace_seq_has_overflowed(&iter->seq))
3814 return TRACE_TYPE_PARTIAL_LINE;
3817 if (iter->trace && iter->trace->print_line) {
3818 ret = iter->trace->print_line(iter);
3819 if (ret != TRACE_TYPE_UNHANDLED)
3823 if (iter->ent->type == TRACE_BPUTS &&
3824 trace_flags & TRACE_ITER_PRINTK &&
3825 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3826 return trace_print_bputs_msg_only(iter);
3828 if (iter->ent->type == TRACE_BPRINT &&
3829 trace_flags & TRACE_ITER_PRINTK &&
3830 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3831 return trace_print_bprintk_msg_only(iter);
3833 if (iter->ent->type == TRACE_PRINT &&
3834 trace_flags & TRACE_ITER_PRINTK &&
3835 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3836 return trace_print_printk_msg_only(iter);
3838 if (trace_flags & TRACE_ITER_BIN)
3839 return print_bin_fmt(iter);
3841 if (trace_flags & TRACE_ITER_HEX)
3842 return print_hex_fmt(iter);
3844 if (trace_flags & TRACE_ITER_RAW)
3845 return print_raw_fmt(iter);
3847 return print_trace_fmt(iter);
3850 void trace_latency_header(struct seq_file *m)
3852 struct trace_iterator *iter = m->private;
3853 struct trace_array *tr = iter->tr;
3855 /* print nothing if the buffers are empty */
3856 if (trace_empty(iter))
3859 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3860 print_trace_header(m, iter);
3862 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
3863 print_lat_help_header(m);
3866 void trace_default_header(struct seq_file *m)
3868 struct trace_iterator *iter = m->private;
3869 struct trace_array *tr = iter->tr;
3870 unsigned long trace_flags = tr->trace_flags;
3872 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
3875 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
3876 /* print nothing if the buffers are empty */
3877 if (trace_empty(iter))
3879 print_trace_header(m, iter);
3880 if (!(trace_flags & TRACE_ITER_VERBOSE))
3881 print_lat_help_header(m);
3883 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
3884 if (trace_flags & TRACE_ITER_IRQ_INFO)
3885 print_func_help_header_irq(iter->trace_buffer,
3888 print_func_help_header(iter->trace_buffer, m,
3894 static void test_ftrace_alive(struct seq_file *m)
3896 if (!ftrace_is_dead())
3898 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
3899 "# MAY BE MISSING FUNCTION EVENTS\n");
3902 #ifdef CONFIG_TRACER_MAX_TRACE
3903 static void show_snapshot_main_help(struct seq_file *m)
3905 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
3906 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3907 "# Takes a snapshot of the main buffer.\n"
3908 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
3909 "# (Doesn't have to be '2' works with any number that\n"
3910 "# is not a '0' or '1')\n");
3913 static void show_snapshot_percpu_help(struct seq_file *m)
3915 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
3916 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
3917 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3918 "# Takes a snapshot of the main buffer for this cpu.\n");
3920 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
3921 "# Must use main snapshot file to allocate.\n");
3923 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
3924 "# (Doesn't have to be '2' works with any number that\n"
3925 "# is not a '0' or '1')\n");
3928 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
3930 if (iter->tr->allocated_snapshot)
3931 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
3933 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
3935 seq_puts(m, "# Snapshot commands:\n");
3936 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
3937 show_snapshot_main_help(m);
3939 show_snapshot_percpu_help(m);
3942 /* Should never be called */
3943 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
3946 static int s_show(struct seq_file *m, void *v)
3948 struct trace_iterator *iter = v;
3951 if (iter->ent == NULL) {
3953 seq_printf(m, "# tracer: %s\n", iter->trace->name);
3955 test_ftrace_alive(m);
3957 if (iter->snapshot && trace_empty(iter))
3958 print_snapshot_help(m, iter);
3959 else if (iter->trace && iter->trace->print_header)
3960 iter->trace->print_header(m);
3962 trace_default_header(m);
3964 } else if (iter->leftover) {
3966 * If we filled the seq_file buffer earlier, we
3967 * want to just show it now.
3969 ret = trace_print_seq(m, &iter->seq);
3971 /* ret should this time be zero, but you never know */
3972 iter->leftover = ret;
3975 print_trace_line(iter);
3976 ret = trace_print_seq(m, &iter->seq);
3978 * If we overflow the seq_file buffer, then it will
3979 * ask us for this data again at start up.
3981 * ret is 0 if seq_file write succeeded.
3984 iter->leftover = ret;
3991 * Should be used after trace_array_get(), trace_types_lock
3992 * ensures that i_cdev was already initialized.
3994 static inline int tracing_get_cpu(struct inode *inode)
3996 if (inode->i_cdev) /* See trace_create_cpu_file() */
3997 return (long)inode->i_cdev - 1;
3998 return RING_BUFFER_ALL_CPUS;
4001 static const struct seq_operations tracer_seq_ops = {
4008 static struct trace_iterator *
4009 __tracing_open(struct inode *inode, struct file *file, bool snapshot)
4011 struct trace_array *tr = inode->i_private;
4012 struct trace_iterator *iter;
4015 if (tracing_disabled)
4016 return ERR_PTR(-ENODEV);
4018 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
4020 return ERR_PTR(-ENOMEM);
4022 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
4024 if (!iter->buffer_iter)
4028 * We make a copy of the current tracer to avoid concurrent
4029 * changes on it while we are reading.
4031 mutex_lock(&trace_types_lock);
4032 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
4036 *iter->trace = *tr->current_trace;
4038 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
4043 #ifdef CONFIG_TRACER_MAX_TRACE
4044 /* Currently only the top directory has a snapshot */
4045 if (tr->current_trace->print_max || snapshot)
4046 iter->trace_buffer = &tr->max_buffer;
4049 iter->trace_buffer = &tr->trace_buffer;
4050 iter->snapshot = snapshot;
4052 iter->cpu_file = tracing_get_cpu(inode);
4053 mutex_init(&iter->mutex);
4055 /* Notify the tracer early; before we stop tracing. */
4056 if (iter->trace && iter->trace->open)
4057 iter->trace->open(iter);
4059 /* Annotate start of buffers if we had overruns */
4060 if (ring_buffer_overruns(iter->trace_buffer->buffer))
4061 iter->iter_flags |= TRACE_FILE_ANNOTATE;
4063 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
4064 if (trace_clocks[tr->clock_id].in_ns)
4065 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4067 /* stop the trace while dumping if we are not opening "snapshot" */
4068 if (!iter->snapshot)
4069 tracing_stop_tr(tr);
4071 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
4072 for_each_tracing_cpu(cpu) {
4073 iter->buffer_iter[cpu] =
4074 ring_buffer_read_prepare(iter->trace_buffer->buffer,
4077 ring_buffer_read_prepare_sync();
4078 for_each_tracing_cpu(cpu) {
4079 ring_buffer_read_start(iter->buffer_iter[cpu]);
4080 tracing_iter_reset(iter, cpu);
4083 cpu = iter->cpu_file;
4084 iter->buffer_iter[cpu] =
4085 ring_buffer_read_prepare(iter->trace_buffer->buffer,
4087 ring_buffer_read_prepare_sync();
4088 ring_buffer_read_start(iter->buffer_iter[cpu]);
4089 tracing_iter_reset(iter, cpu);
4092 mutex_unlock(&trace_types_lock);
4097 mutex_unlock(&trace_types_lock);
4099 kfree(iter->buffer_iter);
4101 seq_release_private(inode, file);
4102 return ERR_PTR(-ENOMEM);
4105 int tracing_open_generic(struct inode *inode, struct file *filp)
4107 if (tracing_disabled)
4110 filp->private_data = inode->i_private;
4114 bool tracing_is_disabled(void)
4116 return (tracing_disabled) ? true: false;
4120 * Open and update trace_array ref count.
4121 * Must have the current trace_array passed to it.
4123 static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
4125 struct trace_array *tr = inode->i_private;
4127 if (tracing_disabled)
4130 if (trace_array_get(tr) < 0)
4133 filp->private_data = inode->i_private;
4138 static int tracing_release(struct inode *inode, struct file *file)
4140 struct trace_array *tr = inode->i_private;
4141 struct seq_file *m = file->private_data;
4142 struct trace_iterator *iter;
4145 if (!(file->f_mode & FMODE_READ)) {
4146 trace_array_put(tr);
4150 /* Writes do not use seq_file */
4152 mutex_lock(&trace_types_lock);
4154 for_each_tracing_cpu(cpu) {
4155 if (iter->buffer_iter[cpu])
4156 ring_buffer_read_finish(iter->buffer_iter[cpu]);
4159 if (iter->trace && iter->trace->close)
4160 iter->trace->close(iter);
4162 if (!iter->snapshot)
4163 /* reenable tracing if it was previously enabled */
4164 tracing_start_tr(tr);
4166 __trace_array_put(tr);
4168 mutex_unlock(&trace_types_lock);
4170 mutex_destroy(&iter->mutex);
4171 free_cpumask_var(iter->started);
4173 kfree(iter->buffer_iter);
4174 seq_release_private(inode, file);
4179 static int tracing_release_generic_tr(struct inode *inode, struct file *file)
4181 struct trace_array *tr = inode->i_private;
4183 trace_array_put(tr);
4187 static int tracing_single_release_tr(struct inode *inode, struct file *file)
4189 struct trace_array *tr = inode->i_private;
4191 trace_array_put(tr);
4193 return single_release(inode, file);
4196 static int tracing_open(struct inode *inode, struct file *file)
4198 struct trace_array *tr = inode->i_private;
4199 struct trace_iterator *iter;
4202 if (trace_array_get(tr) < 0)
4205 /* If this file was open for write, then erase contents */
4206 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
4207 int cpu = tracing_get_cpu(inode);
4208 struct trace_buffer *trace_buf = &tr->trace_buffer;
4210 #ifdef CONFIG_TRACER_MAX_TRACE
4211 if (tr->current_trace->print_max)
4212 trace_buf = &tr->max_buffer;
4215 if (cpu == RING_BUFFER_ALL_CPUS)
4216 tracing_reset_online_cpus(trace_buf);
4218 tracing_reset(trace_buf, cpu);
4221 if (file->f_mode & FMODE_READ) {
4222 iter = __tracing_open(inode, file, false);
4224 ret = PTR_ERR(iter);
4225 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
4226 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4230 trace_array_put(tr);
4236 * Some tracers are not suitable for instance buffers.
4237 * A tracer is always available for the global array (toplevel)
4238 * or if it explicitly states that it is.
4241 trace_ok_for_array(struct tracer *t, struct trace_array *tr)
4243 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
4246 /* Find the next tracer that this trace array may use */
4247 static struct tracer *
4248 get_tracer_for_array(struct trace_array *tr, struct tracer *t)
4250 while (t && !trace_ok_for_array(t, tr))
4257 t_next(struct seq_file *m, void *v, loff_t *pos)
4259 struct trace_array *tr = m->private;
4260 struct tracer *t = v;
4265 t = get_tracer_for_array(tr, t->next);
4270 static void *t_start(struct seq_file *m, loff_t *pos)
4272 struct trace_array *tr = m->private;
4276 mutex_lock(&trace_types_lock);
4278 t = get_tracer_for_array(tr, trace_types);
4279 for (; t && l < *pos; t = t_next(m, t, &l))
4285 static void t_stop(struct seq_file *m, void *p)
4287 mutex_unlock(&trace_types_lock);
4290 static int t_show(struct seq_file *m, void *v)
4292 struct tracer *t = v;
4297 seq_puts(m, t->name);
4306 static const struct seq_operations show_traces_seq_ops = {
4313 static int show_traces_open(struct inode *inode, struct file *file)
4315 struct trace_array *tr = inode->i_private;
4319 if (tracing_disabled)
4322 ret = seq_open(file, &show_traces_seq_ops);
4326 m = file->private_data;
4333 tracing_write_stub(struct file *filp, const char __user *ubuf,
4334 size_t count, loff_t *ppos)
4339 loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
4343 if (file->f_mode & FMODE_READ)
4344 ret = seq_lseek(file, offset, whence);
4346 file->f_pos = ret = 0;
4351 static const struct file_operations tracing_fops = {
4352 .open = tracing_open,
4354 .write = tracing_write_stub,
4355 .llseek = tracing_lseek,
4356 .release = tracing_release,
4359 static const struct file_operations show_traces_fops = {
4360 .open = show_traces_open,
4362 .release = seq_release,
4363 .llseek = seq_lseek,
4367 tracing_cpumask_read(struct file *filp, char __user *ubuf,
4368 size_t count, loff_t *ppos)
4370 struct trace_array *tr = file_inode(filp)->i_private;
4374 len = snprintf(NULL, 0, "%*pb\n",
4375 cpumask_pr_args(tr->tracing_cpumask)) + 1;
4376 mask_str = kmalloc(len, GFP_KERNEL);
4380 len = snprintf(mask_str, len, "%*pb\n",
4381 cpumask_pr_args(tr->tracing_cpumask));
4386 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
4395 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
4396 size_t count, loff_t *ppos)
4398 struct trace_array *tr = file_inode(filp)->i_private;
4399 cpumask_var_t tracing_cpumask_new;
4402 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
4405 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
4409 local_irq_disable();
4410 arch_spin_lock(&tr->max_lock);
4411 for_each_tracing_cpu(cpu) {
4413 * Increase/decrease the disabled counter if we are
4414 * about to flip a bit in the cpumask:
4416 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
4417 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
4418 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
4419 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
4421 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
4422 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
4423 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
4424 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
4427 arch_spin_unlock(&tr->max_lock);
4430 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
4431 free_cpumask_var(tracing_cpumask_new);
4436 free_cpumask_var(tracing_cpumask_new);
4441 static const struct file_operations tracing_cpumask_fops = {
4442 .open = tracing_open_generic_tr,
4443 .read = tracing_cpumask_read,
4444 .write = tracing_cpumask_write,
4445 .release = tracing_release_generic_tr,
4446 .llseek = generic_file_llseek,
4449 static int tracing_trace_options_show(struct seq_file *m, void *v)
4451 struct tracer_opt *trace_opts;
4452 struct trace_array *tr = m->private;
4456 mutex_lock(&trace_types_lock);
4457 tracer_flags = tr->current_trace->flags->val;
4458 trace_opts = tr->current_trace->flags->opts;
4460 for (i = 0; trace_options[i]; i++) {
4461 if (tr->trace_flags & (1 << i))
4462 seq_printf(m, "%s\n", trace_options[i]);
4464 seq_printf(m, "no%s\n", trace_options[i]);
4467 for (i = 0; trace_opts[i].name; i++) {
4468 if (tracer_flags & trace_opts[i].bit)
4469 seq_printf(m, "%s\n", trace_opts[i].name);
4471 seq_printf(m, "no%s\n", trace_opts[i].name);
4473 mutex_unlock(&trace_types_lock);
4478 static int __set_tracer_option(struct trace_array *tr,
4479 struct tracer_flags *tracer_flags,
4480 struct tracer_opt *opts, int neg)
4482 struct tracer *trace = tracer_flags->trace;
4485 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
4490 tracer_flags->val &= ~opts->bit;
4492 tracer_flags->val |= opts->bit;
4496 /* Try to assign a tracer specific option */
4497 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
4499 struct tracer *trace = tr->current_trace;
4500 struct tracer_flags *tracer_flags = trace->flags;
4501 struct tracer_opt *opts = NULL;
4504 for (i = 0; tracer_flags->opts[i].name; i++) {
4505 opts = &tracer_flags->opts[i];
4507 if (strcmp(cmp, opts->name) == 0)
4508 return __set_tracer_option(tr, trace->flags, opts, neg);
4514 /* Some tracers require overwrite to stay enabled */
4515 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
4517 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
4523 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
4525 /* do nothing if flag is already set */
4526 if (!!(tr->trace_flags & mask) == !!enabled)
4529 /* Give the tracer a chance to approve the change */
4530 if (tr->current_trace->flag_changed)
4531 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
4535 tr->trace_flags |= mask;
4537 tr->trace_flags &= ~mask;
4539 if (mask == TRACE_ITER_RECORD_CMD)
4540 trace_event_enable_cmd_record(enabled);
4542 if (mask == TRACE_ITER_RECORD_TGID) {
4544 tgid_map = kcalloc(PID_MAX_DEFAULT + 1,
4548 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
4552 trace_event_enable_tgid_record(enabled);
4555 if (mask == TRACE_ITER_EVENT_FORK)
4556 trace_event_follow_fork(tr, enabled);
4558 if (mask == TRACE_ITER_FUNC_FORK)
4559 ftrace_pid_follow_fork(tr, enabled);
4561 if (mask == TRACE_ITER_OVERWRITE) {
4562 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
4563 #ifdef CONFIG_TRACER_MAX_TRACE
4564 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
4568 if (mask == TRACE_ITER_PRINTK) {
4569 trace_printk_start_stop_comm(enabled);
4570 trace_printk_control(enabled);
4576 static int trace_set_options(struct trace_array *tr, char *option)
4581 size_t orig_len = strlen(option);
4584 cmp = strstrip(option);
4586 len = str_has_prefix(cmp, "no");
4592 mutex_lock(&trace_types_lock);
4594 ret = match_string(trace_options, -1, cmp);
4595 /* If no option could be set, test the specific tracer options */
4597 ret = set_tracer_option(tr, cmp, neg);
4599 ret = set_tracer_flag(tr, 1 << ret, !neg);
4601 mutex_unlock(&trace_types_lock);
4604 * If the first trailing whitespace is replaced with '\0' by strstrip,
4605 * turn it back into a space.
4607 if (orig_len > strlen(option))
4608 option[strlen(option)] = ' ';
4613 static void __init apply_trace_boot_options(void)
4615 char *buf = trace_boot_options_buf;
4619 option = strsep(&buf, ",");
4625 trace_set_options(&global_trace, option);
4627 /* Put back the comma to allow this to be called again */
4634 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
4635 size_t cnt, loff_t *ppos)
4637 struct seq_file *m = filp->private_data;
4638 struct trace_array *tr = m->private;
4642 if (cnt >= sizeof(buf))
4645 if (copy_from_user(buf, ubuf, cnt))
4650 ret = trace_set_options(tr, buf);
4659 static int tracing_trace_options_open(struct inode *inode, struct file *file)
4661 struct trace_array *tr = inode->i_private;
4664 if (tracing_disabled)
4667 if (trace_array_get(tr) < 0)
4670 ret = single_open(file, tracing_trace_options_show, inode->i_private);
4672 trace_array_put(tr);
4677 static const struct file_operations tracing_iter_fops = {
4678 .open = tracing_trace_options_open,
4680 .llseek = seq_lseek,
4681 .release = tracing_single_release_tr,
4682 .write = tracing_trace_options_write,
4685 static const char readme_msg[] =
4686 "tracing mini-HOWTO:\n\n"
4687 "# echo 0 > tracing_on : quick way to disable tracing\n"
4688 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
4689 " Important files:\n"
4690 " trace\t\t\t- The static contents of the buffer\n"
4691 "\t\t\t To clear the buffer write into this file: echo > trace\n"
4692 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
4693 " current_tracer\t- function and latency tracers\n"
4694 " available_tracers\t- list of configured tracers for current_tracer\n"
4695 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
4696 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
4697 " trace_clock\t\t-change the clock used to order events\n"
4698 " local: Per cpu clock but may not be synced across CPUs\n"
4699 " global: Synced across CPUs but slows tracing down.\n"
4700 " counter: Not a clock, but just an increment\n"
4701 " uptime: Jiffy counter from time of boot\n"
4702 " perf: Same clock that perf events use\n"
4703 #ifdef CONFIG_X86_64
4704 " x86-tsc: TSC cycle counter\n"
4706 "\n timestamp_mode\t-view the mode used to timestamp events\n"
4707 " delta: Delta difference against a buffer-wide timestamp\n"
4708 " absolute: Absolute (standalone) timestamp\n"
4709 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
4710 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
4711 " tracing_cpumask\t- Limit which CPUs to trace\n"
4712 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
4713 "\t\t\t Remove sub-buffer with rmdir\n"
4714 " trace_options\t\t- Set format or modify how tracing happens\n"
4715 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
4716 "\t\t\t option name\n"
4717 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
4718 #ifdef CONFIG_DYNAMIC_FTRACE
4719 "\n available_filter_functions - list of functions that can be filtered on\n"
4720 " set_ftrace_filter\t- echo function name in here to only trace these\n"
4721 "\t\t\t functions\n"
4722 "\t accepts: func_full_name or glob-matching-pattern\n"
4723 "\t modules: Can select a group via module\n"
4724 "\t Format: :mod:<module-name>\n"
4725 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
4726 "\t triggers: a command to perform when function is hit\n"
4727 "\t Format: <function>:<trigger>[:count]\n"
4728 "\t trigger: traceon, traceoff\n"
4729 "\t\t enable_event:<system>:<event>\n"
4730 "\t\t disable_event:<system>:<event>\n"
4731 #ifdef CONFIG_STACKTRACE
4734 #ifdef CONFIG_TRACER_SNAPSHOT
4739 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
4740 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
4741 "\t The first one will disable tracing every time do_fault is hit\n"
4742 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
4743 "\t The first time do trap is hit and it disables tracing, the\n"
4744 "\t counter will decrement to 2. If tracing is already disabled,\n"
4745 "\t the counter will not decrement. It only decrements when the\n"
4746 "\t trigger did work\n"
4747 "\t To remove trigger without count:\n"
4748 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
4749 "\t To remove trigger with a count:\n"
4750 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
4751 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
4752 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
4753 "\t modules: Can select a group via module command :mod:\n"
4754 "\t Does not accept triggers\n"
4755 #endif /* CONFIG_DYNAMIC_FTRACE */
4756 #ifdef CONFIG_FUNCTION_TRACER
4757 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
4760 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4761 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
4762 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
4763 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
4765 #ifdef CONFIG_TRACER_SNAPSHOT
4766 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
4767 "\t\t\t snapshot buffer. Read the contents for more\n"
4768 "\t\t\t information\n"
4770 #ifdef CONFIG_STACK_TRACER
4771 " stack_trace\t\t- Shows the max stack trace when active\n"
4772 " stack_max_size\t- Shows current max stack size that was traced\n"
4773 "\t\t\t Write into this file to reset the max size (trigger a\n"
4774 "\t\t\t new trace)\n"
4775 #ifdef CONFIG_DYNAMIC_FTRACE
4776 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
4779 #endif /* CONFIG_STACK_TRACER */
4780 #ifdef CONFIG_DYNAMIC_EVENTS
4781 " dynamic_events\t\t- Add/remove/show the generic dynamic events\n"
4782 "\t\t\t Write into this file to define/undefine new trace events.\n"
4784 #ifdef CONFIG_KPROBE_EVENTS
4785 " kprobe_events\t\t- Add/remove/show the kernel dynamic events\n"
4786 "\t\t\t Write into this file to define/undefine new trace events.\n"
4788 #ifdef CONFIG_UPROBE_EVENTS
4789 " uprobe_events\t\t- Add/remove/show the userspace dynamic events\n"
4790 "\t\t\t Write into this file to define/undefine new trace events.\n"
4792 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
4793 "\t accepts: event-definitions (one definition per line)\n"
4794 "\t Format: p[:[<group>/]<event>] <place> [<args>]\n"
4795 "\t r[maxactive][:[<group>/]<event>] <place> [<args>]\n"
4796 #ifdef CONFIG_HIST_TRIGGERS
4797 "\t s:[synthetic/]<event> <field> [<field>]\n"
4799 "\t -:[<group>/]<event>\n"
4800 #ifdef CONFIG_KPROBE_EVENTS
4801 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
4802 "place (kretprobe): [<module>:]<symbol>[+<offset>]|<memaddr>\n"
4804 #ifdef CONFIG_UPROBE_EVENTS
4805 " place (uprobe): <path>:<offset>[(ref_ctr_offset)]\n"
4807 "\t args: <name>=fetcharg[:type]\n"
4808 "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n"
4809 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
4810 "\t $stack<index>, $stack, $retval, $comm, $arg<N>\n"
4812 "\t $stack<index>, $stack, $retval, $comm\n"
4814 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string, symbol,\n"
4815 "\t b<bit-width>@<bit-offset>/<container-size>,\n"
4816 "\t <type>\\[<array-size>\\]\n"
4817 #ifdef CONFIG_HIST_TRIGGERS
4818 "\t field: <stype> <name>;\n"
4819 "\t stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n"
4820 "\t [unsigned] char/int/long\n"
4823 " events/\t\t- Directory containing all trace event subsystems:\n"
4824 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
4825 " events/<system>/\t- Directory containing all trace events for <system>:\n"
4826 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
4828 " filter\t\t- If set, only events passing filter are traced\n"
4829 " events/<system>/<event>/\t- Directory containing control files for\n"
4831 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
4832 " filter\t\t- If set, only events passing filter are traced\n"
4833 " trigger\t\t- If set, a command to perform when event is hit\n"
4834 "\t Format: <trigger>[:count][if <filter>]\n"
4835 "\t trigger: traceon, traceoff\n"
4836 "\t enable_event:<system>:<event>\n"
4837 "\t disable_event:<system>:<event>\n"
4838 #ifdef CONFIG_HIST_TRIGGERS
4839 "\t enable_hist:<system>:<event>\n"
4840 "\t disable_hist:<system>:<event>\n"
4842 #ifdef CONFIG_STACKTRACE
4845 #ifdef CONFIG_TRACER_SNAPSHOT
4848 #ifdef CONFIG_HIST_TRIGGERS
4849 "\t\t hist (see below)\n"
4851 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
4852 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
4853 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
4854 "\t events/block/block_unplug/trigger\n"
4855 "\t The first disables tracing every time block_unplug is hit.\n"
4856 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
4857 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
4858 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
4859 "\t Like function triggers, the counter is only decremented if it\n"
4860 "\t enabled or disabled tracing.\n"
4861 "\t To remove a trigger without a count:\n"
4862 "\t echo '!<trigger> > <system>/<event>/trigger\n"
4863 "\t To remove a trigger with a count:\n"
4864 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
4865 "\t Filters can be ignored when removing a trigger.\n"
4866 #ifdef CONFIG_HIST_TRIGGERS
4867 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
4868 "\t Format: hist:keys=<field1[,field2,...]>\n"
4869 "\t [:values=<field1[,field2,...]>]\n"
4870 "\t [:sort=<field1[,field2,...]>]\n"
4871 "\t [:size=#entries]\n"
4872 "\t [:pause][:continue][:clear]\n"
4873 "\t [:name=histname1]\n"
4874 "\t [:<handler>.<action>]\n"
4875 "\t [if <filter>]\n\n"
4876 "\t When a matching event is hit, an entry is added to a hash\n"
4877 "\t table using the key(s) and value(s) named, and the value of a\n"
4878 "\t sum called 'hitcount' is incremented. Keys and values\n"
4879 "\t correspond to fields in the event's format description. Keys\n"
4880 "\t can be any field, or the special string 'stacktrace'.\n"
4881 "\t Compound keys consisting of up to two fields can be specified\n"
4882 "\t by the 'keys' keyword. Values must correspond to numeric\n"
4883 "\t fields. Sort keys consisting of up to two fields can be\n"
4884 "\t specified using the 'sort' keyword. The sort direction can\n"
4885 "\t be modified by appending '.descending' or '.ascending' to a\n"
4886 "\t sort field. The 'size' parameter can be used to specify more\n"
4887 "\t or fewer than the default 2048 entries for the hashtable size.\n"
4888 "\t If a hist trigger is given a name using the 'name' parameter,\n"
4889 "\t its histogram data will be shared with other triggers of the\n"
4890 "\t same name, and trigger hits will update this common data.\n\n"
4891 "\t Reading the 'hist' file for the event will dump the hash\n"
4892 "\t table in its entirety to stdout. If there are multiple hist\n"
4893 "\t triggers attached to an event, there will be a table for each\n"
4894 "\t trigger in the output. The table displayed for a named\n"
4895 "\t trigger will be the same as any other instance having the\n"
4896 "\t same name. The default format used to display a given field\n"
4897 "\t can be modified by appending any of the following modifiers\n"
4898 "\t to the field name, as applicable:\n\n"
4899 "\t .hex display a number as a hex value\n"
4900 "\t .sym display an address as a symbol\n"
4901 "\t .sym-offset display an address as a symbol and offset\n"
4902 "\t .execname display a common_pid as a program name\n"
4903 "\t .syscall display a syscall id as a syscall name\n"
4904 "\t .log2 display log2 value rather than raw number\n"
4905 "\t .usecs display a common_timestamp in microseconds\n\n"
4906 "\t The 'pause' parameter can be used to pause an existing hist\n"
4907 "\t trigger or to start a hist trigger but not log any events\n"
4908 "\t until told to do so. 'continue' can be used to start or\n"
4909 "\t restart a paused hist trigger.\n\n"
4910 "\t The 'clear' parameter will clear the contents of a running\n"
4911 "\t hist trigger and leave its current paused/active state\n"
4913 "\t The enable_hist and disable_hist triggers can be used to\n"
4914 "\t have one event conditionally start and stop another event's\n"
4915 "\t already-attached hist trigger. The syntax is analogous to\n"
4916 "\t the enable_event and disable_event triggers.\n\n"
4917 "\t Hist trigger handlers and actions are executed whenever a\n"
4918 "\t a histogram entry is added or updated. They take the form:\n\n"
4919 "\t <handler>.<action>\n\n"
4920 "\t The available handlers are:\n\n"
4921 "\t onmatch(matching.event) - invoke on addition or update\n"
4922 "\t onmax(var) - invoke if var exceeds current max\n"
4923 "\t onchange(var) - invoke action if var changes\n\n"
4924 "\t The available actions are:\n\n"
4925 "\t trace(<synthetic_event>,param list) - generate synthetic event\n"
4926 "\t save(field,...) - save current event fields\n"
4927 #ifdef CONFIG_TRACER_SNAPSHOT
4928 "\t snapshot() - snapshot the trace buffer\n"
4934 tracing_readme_read(struct file *filp, char __user *ubuf,
4935 size_t cnt, loff_t *ppos)
4937 return simple_read_from_buffer(ubuf, cnt, ppos,
4938 readme_msg, strlen(readme_msg));
4941 static const struct file_operations tracing_readme_fops = {
4942 .open = tracing_open_generic,
4943 .read = tracing_readme_read,
4944 .llseek = generic_file_llseek,
4947 static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
4951 if (*pos || m->count)
4956 for (; ptr <= &tgid_map[PID_MAX_DEFAULT]; ptr++) {
4957 if (trace_find_tgid(*ptr))
4964 static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
4974 v = saved_tgids_next(m, v, &l);
4982 static void saved_tgids_stop(struct seq_file *m, void *v)
4986 static int saved_tgids_show(struct seq_file *m, void *v)
4988 int pid = (int *)v - tgid_map;
4990 seq_printf(m, "%d %d\n", pid, trace_find_tgid(pid));
4994 static const struct seq_operations tracing_saved_tgids_seq_ops = {
4995 .start = saved_tgids_start,
4996 .stop = saved_tgids_stop,
4997 .next = saved_tgids_next,
4998 .show = saved_tgids_show,
5001 static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
5003 if (tracing_disabled)
5006 return seq_open(filp, &tracing_saved_tgids_seq_ops);
5010 static const struct file_operations tracing_saved_tgids_fops = {
5011 .open = tracing_saved_tgids_open,
5013 .llseek = seq_lseek,
5014 .release = seq_release,
5017 static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
5019 unsigned int *ptr = v;
5021 if (*pos || m->count)
5026 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
5028 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
5037 static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
5043 arch_spin_lock(&trace_cmdline_lock);
5045 v = &savedcmd->map_cmdline_to_pid[0];
5047 v = saved_cmdlines_next(m, v, &l);
5055 static void saved_cmdlines_stop(struct seq_file *m, void *v)
5057 arch_spin_unlock(&trace_cmdline_lock);
5061 static int saved_cmdlines_show(struct seq_file *m, void *v)
5063 char buf[TASK_COMM_LEN];
5064 unsigned int *pid = v;
5066 __trace_find_cmdline(*pid, buf);
5067 seq_printf(m, "%d %s\n", *pid, buf);
5071 static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
5072 .start = saved_cmdlines_start,
5073 .next = saved_cmdlines_next,
5074 .stop = saved_cmdlines_stop,
5075 .show = saved_cmdlines_show,
5078 static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
5080 if (tracing_disabled)
5083 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
5086 static const struct file_operations tracing_saved_cmdlines_fops = {
5087 .open = tracing_saved_cmdlines_open,
5089 .llseek = seq_lseek,
5090 .release = seq_release,
5094 tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
5095 size_t cnt, loff_t *ppos)
5100 arch_spin_lock(&trace_cmdline_lock);
5101 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
5102 arch_spin_unlock(&trace_cmdline_lock);
5104 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5107 static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
5109 kfree(s->saved_cmdlines);
5110 kfree(s->map_cmdline_to_pid);
5114 static int tracing_resize_saved_cmdlines(unsigned int val)
5116 struct saved_cmdlines_buffer *s, *savedcmd_temp;
5118 s = kmalloc(sizeof(*s), GFP_KERNEL);
5122 if (allocate_cmdlines_buffer(val, s) < 0) {
5127 arch_spin_lock(&trace_cmdline_lock);
5128 savedcmd_temp = savedcmd;
5130 arch_spin_unlock(&trace_cmdline_lock);
5131 free_saved_cmdlines_buffer(savedcmd_temp);
5137 tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
5138 size_t cnt, loff_t *ppos)
5143 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5147 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
5148 if (!val || val > PID_MAX_DEFAULT)
5151 ret = tracing_resize_saved_cmdlines((unsigned int)val);
5160 static const struct file_operations tracing_saved_cmdlines_size_fops = {
5161 .open = tracing_open_generic,
5162 .read = tracing_saved_cmdlines_size_read,
5163 .write = tracing_saved_cmdlines_size_write,
5166 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
5167 static union trace_eval_map_item *
5168 update_eval_map(union trace_eval_map_item *ptr)
5170 if (!ptr->map.eval_string) {
5171 if (ptr->tail.next) {
5172 ptr = ptr->tail.next;
5173 /* Set ptr to the next real item (skip head) */
5181 static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
5183 union trace_eval_map_item *ptr = v;
5186 * Paranoid! If ptr points to end, we don't want to increment past it.
5187 * This really should never happen.
5189 ptr = update_eval_map(ptr);
5190 if (WARN_ON_ONCE(!ptr))
5197 ptr = update_eval_map(ptr);
5202 static void *eval_map_start(struct seq_file *m, loff_t *pos)
5204 union trace_eval_map_item *v;
5207 mutex_lock(&trace_eval_mutex);
5209 v = trace_eval_maps;
5213 while (v && l < *pos) {
5214 v = eval_map_next(m, v, &l);
5220 static void eval_map_stop(struct seq_file *m, void *v)
5222 mutex_unlock(&trace_eval_mutex);
5225 static int eval_map_show(struct seq_file *m, void *v)
5227 union trace_eval_map_item *ptr = v;
5229 seq_printf(m, "%s %ld (%s)\n",
5230 ptr->map.eval_string, ptr->map.eval_value,
5236 static const struct seq_operations tracing_eval_map_seq_ops = {
5237 .start = eval_map_start,
5238 .next = eval_map_next,
5239 .stop = eval_map_stop,
5240 .show = eval_map_show,
5243 static int tracing_eval_map_open(struct inode *inode, struct file *filp)
5245 if (tracing_disabled)
5248 return seq_open(filp, &tracing_eval_map_seq_ops);
5251 static const struct file_operations tracing_eval_map_fops = {
5252 .open = tracing_eval_map_open,
5254 .llseek = seq_lseek,
5255 .release = seq_release,
5258 static inline union trace_eval_map_item *
5259 trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
5261 /* Return tail of array given the head */
5262 return ptr + ptr->head.length + 1;
5266 trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
5269 struct trace_eval_map **stop;
5270 struct trace_eval_map **map;
5271 union trace_eval_map_item *map_array;
5272 union trace_eval_map_item *ptr;
5277 * The trace_eval_maps contains the map plus a head and tail item,
5278 * where the head holds the module and length of array, and the
5279 * tail holds a pointer to the next list.
5281 map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
5283 pr_warn("Unable to allocate trace eval mapping\n");
5287 mutex_lock(&trace_eval_mutex);
5289 if (!trace_eval_maps)
5290 trace_eval_maps = map_array;
5292 ptr = trace_eval_maps;
5294 ptr = trace_eval_jmp_to_tail(ptr);
5295 if (!ptr->tail.next)
5297 ptr = ptr->tail.next;
5300 ptr->tail.next = map_array;
5302 map_array->head.mod = mod;
5303 map_array->head.length = len;
5306 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
5307 map_array->map = **map;
5310 memset(map_array, 0, sizeof(*map_array));
5312 mutex_unlock(&trace_eval_mutex);
5315 static void trace_create_eval_file(struct dentry *d_tracer)
5317 trace_create_file("eval_map", 0444, d_tracer,
5318 NULL, &tracing_eval_map_fops);
5321 #else /* CONFIG_TRACE_EVAL_MAP_FILE */
5322 static inline void trace_create_eval_file(struct dentry *d_tracer) { }
5323 static inline void trace_insert_eval_map_file(struct module *mod,
5324 struct trace_eval_map **start, int len) { }
5325 #endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
5327 static void trace_insert_eval_map(struct module *mod,
5328 struct trace_eval_map **start, int len)
5330 struct trace_eval_map **map;
5337 trace_event_eval_update(map, len);
5339 trace_insert_eval_map_file(mod, start, len);
5343 tracing_set_trace_read(struct file *filp, char __user *ubuf,
5344 size_t cnt, loff_t *ppos)
5346 struct trace_array *tr = filp->private_data;
5347 char buf[MAX_TRACER_SIZE+2];
5350 mutex_lock(&trace_types_lock);
5351 r = sprintf(buf, "%s\n", tr->current_trace->name);
5352 mutex_unlock(&trace_types_lock);
5354 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5357 int tracer_init(struct tracer *t, struct trace_array *tr)
5359 tracing_reset_online_cpus(&tr->trace_buffer);
5363 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
5367 for_each_tracing_cpu(cpu)
5368 per_cpu_ptr(buf->data, cpu)->entries = val;
5371 #ifdef CONFIG_TRACER_MAX_TRACE
5372 /* resize @tr's buffer to the size of @size_tr's entries */
5373 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
5374 struct trace_buffer *size_buf, int cpu_id)
5378 if (cpu_id == RING_BUFFER_ALL_CPUS) {
5379 for_each_tracing_cpu(cpu) {
5380 ret = ring_buffer_resize(trace_buf->buffer,
5381 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
5384 per_cpu_ptr(trace_buf->data, cpu)->entries =
5385 per_cpu_ptr(size_buf->data, cpu)->entries;
5388 ret = ring_buffer_resize(trace_buf->buffer,
5389 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
5391 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
5392 per_cpu_ptr(size_buf->data, cpu_id)->entries;
5397 #endif /* CONFIG_TRACER_MAX_TRACE */
5399 static int __tracing_resize_ring_buffer(struct trace_array *tr,
5400 unsigned long size, int cpu)
5405 * If kernel or user changes the size of the ring buffer
5406 * we use the size that was given, and we can forget about
5407 * expanding it later.
5409 ring_buffer_expanded = true;
5411 /* May be called before buffers are initialized */
5412 if (!tr->trace_buffer.buffer)
5415 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
5419 #ifdef CONFIG_TRACER_MAX_TRACE
5420 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
5421 !tr->current_trace->use_max_tr)
5424 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
5426 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
5427 &tr->trace_buffer, cpu);
5430 * AARGH! We are left with different
5431 * size max buffer!!!!
5432 * The max buffer is our "snapshot" buffer.
5433 * When a tracer needs a snapshot (one of the
5434 * latency tracers), it swaps the max buffer
5435 * with the saved snap shot. We succeeded to
5436 * update the size of the main buffer, but failed to
5437 * update the size of the max buffer. But when we tried
5438 * to reset the main buffer to the original size, we
5439 * failed there too. This is very unlikely to
5440 * happen, but if it does, warn and kill all
5444 tracing_disabled = 1;
5449 if (cpu == RING_BUFFER_ALL_CPUS)
5450 set_buffer_entries(&tr->max_buffer, size);
5452 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
5455 #endif /* CONFIG_TRACER_MAX_TRACE */
5457 if (cpu == RING_BUFFER_ALL_CPUS)
5458 set_buffer_entries(&tr->trace_buffer, size);
5460 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
5465 static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
5466 unsigned long size, int cpu_id)
5470 mutex_lock(&trace_types_lock);
5472 if (cpu_id != RING_BUFFER_ALL_CPUS) {
5473 /* make sure, this cpu is enabled in the mask */
5474 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
5480 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
5485 mutex_unlock(&trace_types_lock);
5492 * tracing_update_buffers - used by tracing facility to expand ring buffers
5494 * To save on memory when the tracing is never used on a system with it
5495 * configured in. The ring buffers are set to a minimum size. But once
5496 * a user starts to use the tracing facility, then they need to grow
5497 * to their default size.
5499 * This function is to be called when a tracer is about to be used.
5501 int tracing_update_buffers(void)
5505 mutex_lock(&trace_types_lock);
5506 if (!ring_buffer_expanded)
5507 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
5508 RING_BUFFER_ALL_CPUS);
5509 mutex_unlock(&trace_types_lock);
5514 struct trace_option_dentry;
5517 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
5520 * Used to clear out the tracer before deletion of an instance.
5521 * Must have trace_types_lock held.
5523 static void tracing_set_nop(struct trace_array *tr)
5525 if (tr->current_trace == &nop_trace)
5528 tr->current_trace->enabled--;
5530 if (tr->current_trace->reset)
5531 tr->current_trace->reset(tr);
5533 tr->current_trace = &nop_trace;
5536 static void add_tracer_options(struct trace_array *tr, struct tracer *t)
5538 /* Only enable if the directory has been created already. */
5542 create_trace_option_files(tr, t);
5545 static int tracing_set_tracer(struct trace_array *tr, const char *buf)
5548 #ifdef CONFIG_TRACER_MAX_TRACE
5553 mutex_lock(&trace_types_lock);
5555 if (!ring_buffer_expanded) {
5556 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
5557 RING_BUFFER_ALL_CPUS);
5563 for (t = trace_types; t; t = t->next) {
5564 if (strcmp(t->name, buf) == 0)
5571 if (t == tr->current_trace)
5574 #ifdef CONFIG_TRACER_SNAPSHOT
5575 if (t->use_max_tr) {
5576 arch_spin_lock(&tr->max_lock);
5577 if (tr->cond_snapshot)
5579 arch_spin_unlock(&tr->max_lock);
5584 /* Some tracers won't work on kernel command line */
5585 if (system_state < SYSTEM_RUNNING && t->noboot) {
5586 pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
5591 /* Some tracers are only allowed for the top level buffer */
5592 if (!trace_ok_for_array(t, tr)) {
5597 /* If trace pipe files are being read, we can't change the tracer */
5598 if (tr->current_trace->ref) {
5603 trace_branch_disable();
5605 tr->current_trace->enabled--;
5607 if (tr->current_trace->reset)
5608 tr->current_trace->reset(tr);
5610 /* Current trace needs to be nop_trace before synchronize_rcu */
5611 tr->current_trace = &nop_trace;
5613 #ifdef CONFIG_TRACER_MAX_TRACE
5614 had_max_tr = tr->allocated_snapshot;
5616 if (had_max_tr && !t->use_max_tr) {
5618 * We need to make sure that the update_max_tr sees that
5619 * current_trace changed to nop_trace to keep it from
5620 * swapping the buffers after we resize it.
5621 * The update_max_tr is called from interrupts disabled
5622 * so a synchronized_sched() is sufficient.
5629 #ifdef CONFIG_TRACER_MAX_TRACE
5630 if (t->use_max_tr && !had_max_tr) {
5631 ret = tracing_alloc_snapshot_instance(tr);
5638 ret = tracer_init(t, tr);
5643 tr->current_trace = t;
5644 tr->current_trace->enabled++;
5645 trace_branch_enable(tr);
5647 mutex_unlock(&trace_types_lock);
5653 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
5654 size_t cnt, loff_t *ppos)
5656 struct trace_array *tr = filp->private_data;
5657 char buf[MAX_TRACER_SIZE+1];
5664 if (cnt > MAX_TRACER_SIZE)
5665 cnt = MAX_TRACER_SIZE;
5667 if (copy_from_user(buf, ubuf, cnt))
5672 /* strip ending whitespace. */
5673 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
5676 err = tracing_set_tracer(tr, buf);
5686 tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
5687 size_t cnt, loff_t *ppos)
5692 r = snprintf(buf, sizeof(buf), "%ld\n",
5693 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
5694 if (r > sizeof(buf))
5696 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5700 tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
5701 size_t cnt, loff_t *ppos)
5706 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5716 tracing_thresh_read(struct file *filp, char __user *ubuf,
5717 size_t cnt, loff_t *ppos)
5719 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
5723 tracing_thresh_write(struct file *filp, const char __user *ubuf,
5724 size_t cnt, loff_t *ppos)
5726 struct trace_array *tr = filp->private_data;
5729 mutex_lock(&trace_types_lock);
5730 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
5734 if (tr->current_trace->update_thresh) {
5735 ret = tr->current_trace->update_thresh(tr);
5742 mutex_unlock(&trace_types_lock);
5747 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
5750 tracing_max_lat_read(struct file *filp, char __user *ubuf,
5751 size_t cnt, loff_t *ppos)
5753 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
5757 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
5758 size_t cnt, loff_t *ppos)
5760 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
5765 static int tracing_open_pipe(struct inode *inode, struct file *filp)
5767 struct trace_array *tr = inode->i_private;
5768 struct trace_iterator *iter;
5771 if (tracing_disabled)
5774 if (trace_array_get(tr) < 0)
5777 mutex_lock(&trace_types_lock);
5779 /* create a buffer to store the information to pass to userspace */
5780 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5783 __trace_array_put(tr);
5787 trace_seq_init(&iter->seq);
5788 iter->trace = tr->current_trace;
5790 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
5795 /* trace pipe does not show start of buffer */
5796 cpumask_setall(iter->started);
5798 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
5799 iter->iter_flags |= TRACE_FILE_LAT_FMT;
5801 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
5802 if (trace_clocks[tr->clock_id].in_ns)
5803 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
5806 iter->trace_buffer = &tr->trace_buffer;
5807 iter->cpu_file = tracing_get_cpu(inode);
5808 mutex_init(&iter->mutex);
5809 filp->private_data = iter;
5811 if (iter->trace->pipe_open)
5812 iter->trace->pipe_open(iter);
5814 nonseekable_open(inode, filp);
5816 tr->current_trace->ref++;
5818 mutex_unlock(&trace_types_lock);
5823 __trace_array_put(tr);
5824 mutex_unlock(&trace_types_lock);
5828 static int tracing_release_pipe(struct inode *inode, struct file *file)
5830 struct trace_iterator *iter = file->private_data;
5831 struct trace_array *tr = inode->i_private;
5833 mutex_lock(&trace_types_lock);
5835 tr->current_trace->ref--;
5837 if (iter->trace->pipe_close)
5838 iter->trace->pipe_close(iter);
5840 mutex_unlock(&trace_types_lock);
5842 free_cpumask_var(iter->started);
5843 mutex_destroy(&iter->mutex);
5846 trace_array_put(tr);
5852 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
5854 struct trace_array *tr = iter->tr;
5856 /* Iterators are static, they should be filled or empty */
5857 if (trace_buffer_iter(iter, iter->cpu_file))
5858 return EPOLLIN | EPOLLRDNORM;
5860 if (tr->trace_flags & TRACE_ITER_BLOCK)
5862 * Always select as readable when in blocking mode
5864 return EPOLLIN | EPOLLRDNORM;
5866 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
5871 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
5873 struct trace_iterator *iter = filp->private_data;
5875 return trace_poll(iter, filp, poll_table);
5878 /* Must be called with iter->mutex held. */
5879 static int tracing_wait_pipe(struct file *filp)
5881 struct trace_iterator *iter = filp->private_data;
5884 while (trace_empty(iter)) {
5886 if ((filp->f_flags & O_NONBLOCK)) {
5891 * We block until we read something and tracing is disabled.
5892 * We still block if tracing is disabled, but we have never
5893 * read anything. This allows a user to cat this file, and
5894 * then enable tracing. But after we have read something,
5895 * we give an EOF when tracing is again disabled.
5897 * iter->pos will be 0 if we haven't read anything.
5899 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
5902 mutex_unlock(&iter->mutex);
5904 ret = wait_on_pipe(iter, 0);
5906 mutex_lock(&iter->mutex);
5919 tracing_read_pipe(struct file *filp, char __user *ubuf,
5920 size_t cnt, loff_t *ppos)
5922 struct trace_iterator *iter = filp->private_data;
5926 * Avoid more than one consumer on a single file descriptor
5927 * This is just a matter of traces coherency, the ring buffer itself
5930 mutex_lock(&iter->mutex);
5932 /* return any leftover data */
5933 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
5937 trace_seq_init(&iter->seq);
5939 if (iter->trace->read) {
5940 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
5946 sret = tracing_wait_pipe(filp);
5950 /* stop when tracing is finished */
5951 if (trace_empty(iter)) {
5956 if (cnt >= PAGE_SIZE)
5957 cnt = PAGE_SIZE - 1;
5959 /* reset all but tr, trace, and overruns */
5960 memset(&iter->seq, 0,
5961 sizeof(struct trace_iterator) -
5962 offsetof(struct trace_iterator, seq));
5963 cpumask_clear(iter->started);
5966 trace_event_read_lock();
5967 trace_access_lock(iter->cpu_file);
5968 while (trace_find_next_entry_inc(iter) != NULL) {
5969 enum print_line_t ret;
5970 int save_len = iter->seq.seq.len;
5972 ret = print_trace_line(iter);
5973 if (ret == TRACE_TYPE_PARTIAL_LINE) {
5974 /* don't print partial lines */
5975 iter->seq.seq.len = save_len;
5978 if (ret != TRACE_TYPE_NO_CONSUME)
5979 trace_consume(iter);
5981 if (trace_seq_used(&iter->seq) >= cnt)
5985 * Setting the full flag means we reached the trace_seq buffer
5986 * size and we should leave by partial output condition above.
5987 * One of the trace_seq_* functions is not used properly.
5989 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
5992 trace_access_unlock(iter->cpu_file);
5993 trace_event_read_unlock();
5995 /* Now copy what we have to the user */
5996 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
5997 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
5998 trace_seq_init(&iter->seq);
6001 * If there was nothing to send to user, in spite of consuming trace
6002 * entries, go back to wait for more entries.
6008 mutex_unlock(&iter->mutex);
6013 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
6016 __free_page(spd->pages[idx]);
6019 static const struct pipe_buf_operations tracing_pipe_buf_ops = {
6020 .confirm = generic_pipe_buf_confirm,
6021 .release = generic_pipe_buf_release,
6022 .steal = generic_pipe_buf_steal,
6023 .get = generic_pipe_buf_get,
6027 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
6033 /* Seq buffer is page-sized, exactly what we need. */
6035 save_len = iter->seq.seq.len;
6036 ret = print_trace_line(iter);
6038 if (trace_seq_has_overflowed(&iter->seq)) {
6039 iter->seq.seq.len = save_len;
6044 * This should not be hit, because it should only
6045 * be set if the iter->seq overflowed. But check it
6046 * anyway to be safe.
6048 if (ret == TRACE_TYPE_PARTIAL_LINE) {
6049 iter->seq.seq.len = save_len;
6053 count = trace_seq_used(&iter->seq) - save_len;
6056 iter->seq.seq.len = save_len;
6060 if (ret != TRACE_TYPE_NO_CONSUME)
6061 trace_consume(iter);
6063 if (!trace_find_next_entry_inc(iter)) {
6073 static ssize_t tracing_splice_read_pipe(struct file *filp,
6075 struct pipe_inode_info *pipe,
6079 struct page *pages_def[PIPE_DEF_BUFFERS];
6080 struct partial_page partial_def[PIPE_DEF_BUFFERS];
6081 struct trace_iterator *iter = filp->private_data;
6082 struct splice_pipe_desc spd = {
6084 .partial = partial_def,
6085 .nr_pages = 0, /* This gets updated below. */
6086 .nr_pages_max = PIPE_DEF_BUFFERS,
6087 .ops = &tracing_pipe_buf_ops,
6088 .spd_release = tracing_spd_release_pipe,
6094 if (splice_grow_spd(pipe, &spd))
6097 mutex_lock(&iter->mutex);
6099 if (iter->trace->splice_read) {
6100 ret = iter->trace->splice_read(iter, filp,
6101 ppos, pipe, len, flags);
6106 ret = tracing_wait_pipe(filp);
6110 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
6115 trace_event_read_lock();
6116 trace_access_lock(iter->cpu_file);
6118 /* Fill as many pages as possible. */
6119 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
6120 spd.pages[i] = alloc_page(GFP_KERNEL);
6124 rem = tracing_fill_pipe_page(rem, iter);
6126 /* Copy the data into the page, so we can start over. */
6127 ret = trace_seq_to_buffer(&iter->seq,
6128 page_address(spd.pages[i]),
6129 trace_seq_used(&iter->seq));
6131 __free_page(spd.pages[i]);
6134 spd.partial[i].offset = 0;
6135 spd.partial[i].len = trace_seq_used(&iter->seq);
6137 trace_seq_init(&iter->seq);
6140 trace_access_unlock(iter->cpu_file);
6141 trace_event_read_unlock();
6142 mutex_unlock(&iter->mutex);
6147 ret = splice_to_pipe(pipe, &spd);
6151 splice_shrink_spd(&spd);
6155 mutex_unlock(&iter->mutex);
6160 tracing_entries_read(struct file *filp, char __user *ubuf,
6161 size_t cnt, loff_t *ppos)
6163 struct inode *inode = file_inode(filp);
6164 struct trace_array *tr = inode->i_private;
6165 int cpu = tracing_get_cpu(inode);
6170 mutex_lock(&trace_types_lock);
6172 if (cpu == RING_BUFFER_ALL_CPUS) {
6173 int cpu, buf_size_same;
6178 /* check if all cpu sizes are same */
6179 for_each_tracing_cpu(cpu) {
6180 /* fill in the size from first enabled cpu */
6182 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
6183 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
6189 if (buf_size_same) {
6190 if (!ring_buffer_expanded)
6191 r = sprintf(buf, "%lu (expanded: %lu)\n",
6193 trace_buf_size >> 10);
6195 r = sprintf(buf, "%lu\n", size >> 10);
6197 r = sprintf(buf, "X\n");
6199 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
6201 mutex_unlock(&trace_types_lock);
6203 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6208 tracing_entries_write(struct file *filp, const char __user *ubuf,
6209 size_t cnt, loff_t *ppos)
6211 struct inode *inode = file_inode(filp);
6212 struct trace_array *tr = inode->i_private;
6216 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6220 /* must have at least 1 entry */
6224 /* value is in KB */
6226 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
6236 tracing_total_entries_read(struct file *filp, char __user *ubuf,
6237 size_t cnt, loff_t *ppos)
6239 struct trace_array *tr = filp->private_data;
6242 unsigned long size = 0, expanded_size = 0;
6244 mutex_lock(&trace_types_lock);
6245 for_each_tracing_cpu(cpu) {
6246 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
6247 if (!ring_buffer_expanded)
6248 expanded_size += trace_buf_size >> 10;
6250 if (ring_buffer_expanded)
6251 r = sprintf(buf, "%lu\n", size);
6253 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
6254 mutex_unlock(&trace_types_lock);
6256 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6260 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
6261 size_t cnt, loff_t *ppos)
6264 * There is no need to read what the user has written, this function
6265 * is just to make sure that there is no error when "echo" is used
6274 tracing_free_buffer_release(struct inode *inode, struct file *filp)
6276 struct trace_array *tr = inode->i_private;
6278 /* disable tracing ? */
6279 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
6280 tracer_tracing_off(tr);
6281 /* resize the ring buffer to 0 */
6282 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
6284 trace_array_put(tr);
6290 tracing_mark_write(struct file *filp, const char __user *ubuf,
6291 size_t cnt, loff_t *fpos)
6293 struct trace_array *tr = filp->private_data;
6294 struct ring_buffer_event *event;
6295 enum event_trigger_type tt = ETT_NONE;
6296 struct ring_buffer *buffer;
6297 struct print_entry *entry;
6298 unsigned long irq_flags;
6299 const char faulted[] = "<faulted>";
6304 /* Used in tracing_mark_raw_write() as well */
6305 #define FAULTED_SIZE (sizeof(faulted) - 1) /* '\0' is already accounted for */
6307 if (tracing_disabled)
6310 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6313 if (cnt > TRACE_BUF_SIZE)
6314 cnt = TRACE_BUF_SIZE;
6316 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
6318 local_save_flags(irq_flags);
6319 size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
6321 /* If less than "<faulted>", then make sure we can still add that */
6322 if (cnt < FAULTED_SIZE)
6323 size += FAULTED_SIZE - cnt;
6325 buffer = tr->trace_buffer.buffer;
6326 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
6327 irq_flags, preempt_count());
6328 if (unlikely(!event))
6329 /* Ring buffer disabled, return as if not open for write */
6332 entry = ring_buffer_event_data(event);
6333 entry->ip = _THIS_IP_;
6335 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
6337 memcpy(&entry->buf, faulted, FAULTED_SIZE);
6344 if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
6345 /* do not add \n before testing triggers, but add \0 */
6346 entry->buf[cnt] = '\0';
6347 tt = event_triggers_call(tr->trace_marker_file, entry, event);
6350 if (entry->buf[cnt - 1] != '\n') {
6351 entry->buf[cnt] = '\n';
6352 entry->buf[cnt + 1] = '\0';
6354 entry->buf[cnt] = '\0';
6356 __buffer_unlock_commit(buffer, event);
6359 event_triggers_post_call(tr->trace_marker_file, tt);
6367 /* Limit it for now to 3K (including tag) */
6368 #define RAW_DATA_MAX_SIZE (1024*3)
6371 tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
6372 size_t cnt, loff_t *fpos)
6374 struct trace_array *tr = filp->private_data;
6375 struct ring_buffer_event *event;
6376 struct ring_buffer *buffer;
6377 struct raw_data_entry *entry;
6378 const char faulted[] = "<faulted>";
6379 unsigned long irq_flags;
6384 #define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
6386 if (tracing_disabled)
6389 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6392 /* The marker must at least have a tag id */
6393 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
6396 if (cnt > TRACE_BUF_SIZE)
6397 cnt = TRACE_BUF_SIZE;
6399 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
6401 local_save_flags(irq_flags);
6402 size = sizeof(*entry) + cnt;
6403 if (cnt < FAULT_SIZE_ID)
6404 size += FAULT_SIZE_ID - cnt;
6406 buffer = tr->trace_buffer.buffer;
6407 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
6408 irq_flags, preempt_count());
6410 /* Ring buffer disabled, return as if not open for write */
6413 entry = ring_buffer_event_data(event);
6415 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
6418 memcpy(&entry->buf, faulted, FAULTED_SIZE);
6423 __buffer_unlock_commit(buffer, event);
6431 static int tracing_clock_show(struct seq_file *m, void *v)
6433 struct trace_array *tr = m->private;
6436 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
6438 "%s%s%s%s", i ? " " : "",
6439 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
6440 i == tr->clock_id ? "]" : "");
6446 int tracing_set_clock(struct trace_array *tr, const char *clockstr)
6450 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
6451 if (strcmp(trace_clocks[i].name, clockstr) == 0)
6454 if (i == ARRAY_SIZE(trace_clocks))
6457 mutex_lock(&trace_types_lock);
6461 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
6464 * New clock may not be consistent with the previous clock.
6465 * Reset the buffer so that it doesn't have incomparable timestamps.
6467 tracing_reset_online_cpus(&tr->trace_buffer);
6469 #ifdef CONFIG_TRACER_MAX_TRACE
6470 if (tr->max_buffer.buffer)
6471 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
6472 tracing_reset_online_cpus(&tr->max_buffer);
6475 mutex_unlock(&trace_types_lock);
6480 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
6481 size_t cnt, loff_t *fpos)
6483 struct seq_file *m = filp->private_data;
6484 struct trace_array *tr = m->private;
6486 const char *clockstr;
6489 if (cnt >= sizeof(buf))
6492 if (copy_from_user(buf, ubuf, cnt))
6497 clockstr = strstrip(buf);
6499 ret = tracing_set_clock(tr, clockstr);
6508 static int tracing_clock_open(struct inode *inode, struct file *file)
6510 struct trace_array *tr = inode->i_private;
6513 if (tracing_disabled)
6516 if (trace_array_get(tr))
6519 ret = single_open(file, tracing_clock_show, inode->i_private);
6521 trace_array_put(tr);
6526 static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
6528 struct trace_array *tr = m->private;
6530 mutex_lock(&trace_types_lock);
6532 if (ring_buffer_time_stamp_abs(tr->trace_buffer.buffer))
6533 seq_puts(m, "delta [absolute]\n");
6535 seq_puts(m, "[delta] absolute\n");
6537 mutex_unlock(&trace_types_lock);
6542 static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
6544 struct trace_array *tr = inode->i_private;
6547 if (tracing_disabled)
6550 if (trace_array_get(tr))
6553 ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
6555 trace_array_put(tr);
6560 int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs)
6564 mutex_lock(&trace_types_lock);
6566 if (abs && tr->time_stamp_abs_ref++)
6570 if (WARN_ON_ONCE(!tr->time_stamp_abs_ref)) {
6575 if (--tr->time_stamp_abs_ref)
6579 ring_buffer_set_time_stamp_abs(tr->trace_buffer.buffer, abs);
6581 #ifdef CONFIG_TRACER_MAX_TRACE
6582 if (tr->max_buffer.buffer)
6583 ring_buffer_set_time_stamp_abs(tr->max_buffer.buffer, abs);
6586 mutex_unlock(&trace_types_lock);
6591 struct ftrace_buffer_info {
6592 struct trace_iterator iter;
6594 unsigned int spare_cpu;
6598 #ifdef CONFIG_TRACER_SNAPSHOT
6599 static int tracing_snapshot_open(struct inode *inode, struct file *file)
6601 struct trace_array *tr = inode->i_private;
6602 struct trace_iterator *iter;
6606 if (trace_array_get(tr) < 0)
6609 if (file->f_mode & FMODE_READ) {
6610 iter = __tracing_open(inode, file, true);
6612 ret = PTR_ERR(iter);
6614 /* Writes still need the seq_file to hold the private data */
6616 m = kzalloc(sizeof(*m), GFP_KERNEL);
6619 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6627 iter->trace_buffer = &tr->max_buffer;
6628 iter->cpu_file = tracing_get_cpu(inode);
6630 file->private_data = m;
6634 trace_array_put(tr);
6640 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
6643 struct seq_file *m = filp->private_data;
6644 struct trace_iterator *iter = m->private;
6645 struct trace_array *tr = iter->tr;
6649 ret = tracing_update_buffers();
6653 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6657 mutex_lock(&trace_types_lock);
6659 if (tr->current_trace->use_max_tr) {
6664 arch_spin_lock(&tr->max_lock);
6665 if (tr->cond_snapshot)
6667 arch_spin_unlock(&tr->max_lock);
6673 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6677 if (tr->allocated_snapshot)
6681 /* Only allow per-cpu swap if the ring buffer supports it */
6682 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
6683 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6688 if (!tr->allocated_snapshot) {
6689 ret = tracing_alloc_snapshot_instance(tr);
6693 local_irq_disable();
6694 /* Now, we're going to swap */
6695 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
6696 update_max_tr(tr, current, smp_processor_id(), NULL);
6698 update_max_tr_single(tr, current, iter->cpu_file);
6702 if (tr->allocated_snapshot) {
6703 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
6704 tracing_reset_online_cpus(&tr->max_buffer);
6706 tracing_reset(&tr->max_buffer, iter->cpu_file);
6716 mutex_unlock(&trace_types_lock);
6720 static int tracing_snapshot_release(struct inode *inode, struct file *file)
6722 struct seq_file *m = file->private_data;
6725 ret = tracing_release(inode, file);
6727 if (file->f_mode & FMODE_READ)
6730 /* If write only, the seq_file is just a stub */
6738 static int tracing_buffers_open(struct inode *inode, struct file *filp);
6739 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
6740 size_t count, loff_t *ppos);
6741 static int tracing_buffers_release(struct inode *inode, struct file *file);
6742 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6743 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
6745 static int snapshot_raw_open(struct inode *inode, struct file *filp)
6747 struct ftrace_buffer_info *info;
6750 ret = tracing_buffers_open(inode, filp);
6754 info = filp->private_data;
6756 if (info->iter.trace->use_max_tr) {
6757 tracing_buffers_release(inode, filp);
6761 info->iter.snapshot = true;
6762 info->iter.trace_buffer = &info->iter.tr->max_buffer;
6767 #endif /* CONFIG_TRACER_SNAPSHOT */
6770 static const struct file_operations tracing_thresh_fops = {
6771 .open = tracing_open_generic,
6772 .read = tracing_thresh_read,
6773 .write = tracing_thresh_write,
6774 .llseek = generic_file_llseek,
6777 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
6778 static const struct file_operations tracing_max_lat_fops = {
6779 .open = tracing_open_generic,
6780 .read = tracing_max_lat_read,
6781 .write = tracing_max_lat_write,
6782 .llseek = generic_file_llseek,
6786 static const struct file_operations set_tracer_fops = {
6787 .open = tracing_open_generic,
6788 .read = tracing_set_trace_read,
6789 .write = tracing_set_trace_write,
6790 .llseek = generic_file_llseek,
6793 static const struct file_operations tracing_pipe_fops = {
6794 .open = tracing_open_pipe,
6795 .poll = tracing_poll_pipe,
6796 .read = tracing_read_pipe,
6797 .splice_read = tracing_splice_read_pipe,
6798 .release = tracing_release_pipe,
6799 .llseek = no_llseek,
6802 static const struct file_operations tracing_entries_fops = {
6803 .open = tracing_open_generic_tr,
6804 .read = tracing_entries_read,
6805 .write = tracing_entries_write,
6806 .llseek = generic_file_llseek,
6807 .release = tracing_release_generic_tr,
6810 static const struct file_operations tracing_total_entries_fops = {
6811 .open = tracing_open_generic_tr,
6812 .read = tracing_total_entries_read,
6813 .llseek = generic_file_llseek,
6814 .release = tracing_release_generic_tr,
6817 static const struct file_operations tracing_free_buffer_fops = {
6818 .open = tracing_open_generic_tr,
6819 .write = tracing_free_buffer_write,
6820 .release = tracing_free_buffer_release,
6823 static const struct file_operations tracing_mark_fops = {
6824 .open = tracing_open_generic_tr,
6825 .write = tracing_mark_write,
6826 .llseek = generic_file_llseek,
6827 .release = tracing_release_generic_tr,
6830 static const struct file_operations tracing_mark_raw_fops = {
6831 .open = tracing_open_generic_tr,
6832 .write = tracing_mark_raw_write,
6833 .llseek = generic_file_llseek,
6834 .release = tracing_release_generic_tr,
6837 static const struct file_operations trace_clock_fops = {
6838 .open = tracing_clock_open,
6840 .llseek = seq_lseek,
6841 .release = tracing_single_release_tr,
6842 .write = tracing_clock_write,
6845 static const struct file_operations trace_time_stamp_mode_fops = {
6846 .open = tracing_time_stamp_mode_open,
6848 .llseek = seq_lseek,
6849 .release = tracing_single_release_tr,
6852 #ifdef CONFIG_TRACER_SNAPSHOT
6853 static const struct file_operations snapshot_fops = {
6854 .open = tracing_snapshot_open,
6856 .write = tracing_snapshot_write,
6857 .llseek = tracing_lseek,
6858 .release = tracing_snapshot_release,
6861 static const struct file_operations snapshot_raw_fops = {
6862 .open = snapshot_raw_open,
6863 .read = tracing_buffers_read,
6864 .release = tracing_buffers_release,
6865 .splice_read = tracing_buffers_splice_read,
6866 .llseek = no_llseek,
6869 #endif /* CONFIG_TRACER_SNAPSHOT */
6871 static int tracing_buffers_open(struct inode *inode, struct file *filp)
6873 struct trace_array *tr = inode->i_private;
6874 struct ftrace_buffer_info *info;
6877 if (tracing_disabled)
6880 if (trace_array_get(tr) < 0)
6883 info = kzalloc(sizeof(*info), GFP_KERNEL);
6885 trace_array_put(tr);
6889 mutex_lock(&trace_types_lock);
6892 info->iter.cpu_file = tracing_get_cpu(inode);
6893 info->iter.trace = tr->current_trace;
6894 info->iter.trace_buffer = &tr->trace_buffer;
6896 /* Force reading ring buffer for first read */
6897 info->read = (unsigned int)-1;
6899 filp->private_data = info;
6901 tr->current_trace->ref++;
6903 mutex_unlock(&trace_types_lock);
6905 ret = nonseekable_open(inode, filp);
6907 trace_array_put(tr);
6913 tracing_buffers_poll(struct file *filp, poll_table *poll_table)
6915 struct ftrace_buffer_info *info = filp->private_data;
6916 struct trace_iterator *iter = &info->iter;
6918 return trace_poll(iter, filp, poll_table);
6922 tracing_buffers_read(struct file *filp, char __user *ubuf,
6923 size_t count, loff_t *ppos)
6925 struct ftrace_buffer_info *info = filp->private_data;
6926 struct trace_iterator *iter = &info->iter;
6933 #ifdef CONFIG_TRACER_MAX_TRACE
6934 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
6939 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
6941 if (IS_ERR(info->spare)) {
6942 ret = PTR_ERR(info->spare);
6945 info->spare_cpu = iter->cpu_file;
6951 /* Do we have previous read data to read? */
6952 if (info->read < PAGE_SIZE)
6956 trace_access_lock(iter->cpu_file);
6957 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
6961 trace_access_unlock(iter->cpu_file);
6964 if (trace_empty(iter)) {
6965 if ((filp->f_flags & O_NONBLOCK))
6968 ret = wait_on_pipe(iter, 0);
6979 size = PAGE_SIZE - info->read;
6983 ret = copy_to_user(ubuf, info->spare + info->read, size);
6995 static int tracing_buffers_release(struct inode *inode, struct file *file)
6997 struct ftrace_buffer_info *info = file->private_data;
6998 struct trace_iterator *iter = &info->iter;
7000 mutex_lock(&trace_types_lock);
7002 iter->tr->current_trace->ref--;
7004 __trace_array_put(iter->tr);
7007 ring_buffer_free_read_page(iter->trace_buffer->buffer,
7008 info->spare_cpu, info->spare);
7011 mutex_unlock(&trace_types_lock);
7017 struct ring_buffer *buffer;
7020 refcount_t refcount;
7023 static void buffer_ref_release(struct buffer_ref *ref)
7025 if (!refcount_dec_and_test(&ref->refcount))
7027 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
7031 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
7032 struct pipe_buffer *buf)
7034 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
7036 buffer_ref_release(ref);
7040 static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
7041 struct pipe_buffer *buf)
7043 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
7045 if (refcount_read(&ref->refcount) > INT_MAX/2)
7048 refcount_inc(&ref->refcount);
7052 /* Pipe buffer operations for a buffer. */
7053 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
7054 .confirm = generic_pipe_buf_confirm,
7055 .release = buffer_pipe_buf_release,
7056 .steal = generic_pipe_buf_nosteal,
7057 .get = buffer_pipe_buf_get,
7061 * Callback from splice_to_pipe(), if we need to release some pages
7062 * at the end of the spd in case we error'ed out in filling the pipe.
7064 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
7066 struct buffer_ref *ref =
7067 (struct buffer_ref *)spd->partial[i].private;
7069 buffer_ref_release(ref);
7070 spd->partial[i].private = 0;
7074 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7075 struct pipe_inode_info *pipe, size_t len,
7078 struct ftrace_buffer_info *info = file->private_data;
7079 struct trace_iterator *iter = &info->iter;
7080 struct partial_page partial_def[PIPE_DEF_BUFFERS];
7081 struct page *pages_def[PIPE_DEF_BUFFERS];
7082 struct splice_pipe_desc spd = {
7084 .partial = partial_def,
7085 .nr_pages_max = PIPE_DEF_BUFFERS,
7086 .ops = &buffer_pipe_buf_ops,
7087 .spd_release = buffer_spd_release,
7089 struct buffer_ref *ref;
7093 #ifdef CONFIG_TRACER_MAX_TRACE
7094 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
7098 if (*ppos & (PAGE_SIZE - 1))
7101 if (len & (PAGE_SIZE - 1)) {
7102 if (len < PAGE_SIZE)
7107 if (splice_grow_spd(pipe, &spd))
7111 trace_access_lock(iter->cpu_file);
7112 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
7114 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
7118 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
7124 refcount_set(&ref->refcount, 1);
7125 ref->buffer = iter->trace_buffer->buffer;
7126 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
7127 if (IS_ERR(ref->page)) {
7128 ret = PTR_ERR(ref->page);
7133 ref->cpu = iter->cpu_file;
7135 r = ring_buffer_read_page(ref->buffer, &ref->page,
7136 len, iter->cpu_file, 1);
7138 ring_buffer_free_read_page(ref->buffer, ref->cpu,
7144 page = virt_to_page(ref->page);
7146 spd.pages[i] = page;
7147 spd.partial[i].len = PAGE_SIZE;
7148 spd.partial[i].offset = 0;
7149 spd.partial[i].private = (unsigned long)ref;
7153 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
7156 trace_access_unlock(iter->cpu_file);
7159 /* did we read anything? */
7160 if (!spd.nr_pages) {
7165 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
7168 ret = wait_on_pipe(iter, iter->tr->buffer_percent);
7175 ret = splice_to_pipe(pipe, &spd);
7177 splice_shrink_spd(&spd);
7182 static const struct file_operations tracing_buffers_fops = {
7183 .open = tracing_buffers_open,
7184 .read = tracing_buffers_read,
7185 .poll = tracing_buffers_poll,
7186 .release = tracing_buffers_release,
7187 .splice_read = tracing_buffers_splice_read,
7188 .llseek = no_llseek,
7192 tracing_stats_read(struct file *filp, char __user *ubuf,
7193 size_t count, loff_t *ppos)
7195 struct inode *inode = file_inode(filp);
7196 struct trace_array *tr = inode->i_private;
7197 struct trace_buffer *trace_buf = &tr->trace_buffer;
7198 int cpu = tracing_get_cpu(inode);
7199 struct trace_seq *s;
7201 unsigned long long t;
7202 unsigned long usec_rem;
7204 s = kmalloc(sizeof(*s), GFP_KERNEL);
7210 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
7211 trace_seq_printf(s, "entries: %ld\n", cnt);
7213 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
7214 trace_seq_printf(s, "overrun: %ld\n", cnt);
7216 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
7217 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
7219 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
7220 trace_seq_printf(s, "bytes: %ld\n", cnt);
7222 if (trace_clocks[tr->clock_id].in_ns) {
7223 /* local or global for trace_clock */
7224 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
7225 usec_rem = do_div(t, USEC_PER_SEC);
7226 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
7229 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
7230 usec_rem = do_div(t, USEC_PER_SEC);
7231 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
7233 /* counter or tsc mode for trace_clock */
7234 trace_seq_printf(s, "oldest event ts: %llu\n",
7235 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
7237 trace_seq_printf(s, "now ts: %llu\n",
7238 ring_buffer_time_stamp(trace_buf->buffer, cpu));
7241 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
7242 trace_seq_printf(s, "dropped events: %ld\n", cnt);
7244 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
7245 trace_seq_printf(s, "read events: %ld\n", cnt);
7247 count = simple_read_from_buffer(ubuf, count, ppos,
7248 s->buffer, trace_seq_used(s));
7255 static const struct file_operations tracing_stats_fops = {
7256 .open = tracing_open_generic_tr,
7257 .read = tracing_stats_read,
7258 .llseek = generic_file_llseek,
7259 .release = tracing_release_generic_tr,
7262 #ifdef CONFIG_DYNAMIC_FTRACE
7265 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
7266 size_t cnt, loff_t *ppos)
7268 unsigned long *p = filp->private_data;
7269 char buf[64]; /* Not too big for a shallow stack */
7272 r = scnprintf(buf, 63, "%ld", *p);
7275 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7278 static const struct file_operations tracing_dyn_info_fops = {
7279 .open = tracing_open_generic,
7280 .read = tracing_read_dyn_info,
7281 .llseek = generic_file_llseek,
7283 #endif /* CONFIG_DYNAMIC_FTRACE */
7285 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
7287 ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
7288 struct trace_array *tr, struct ftrace_probe_ops *ops,
7291 tracing_snapshot_instance(tr);
7295 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
7296 struct trace_array *tr, struct ftrace_probe_ops *ops,
7299 struct ftrace_func_mapper *mapper = data;
7303 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
7313 tracing_snapshot_instance(tr);
7317 ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
7318 struct ftrace_probe_ops *ops, void *data)
7320 struct ftrace_func_mapper *mapper = data;
7323 seq_printf(m, "%ps:", (void *)ip);
7325 seq_puts(m, "snapshot");
7328 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
7331 seq_printf(m, ":count=%ld\n", *count);
7333 seq_puts(m, ":unlimited\n");
7339 ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
7340 unsigned long ip, void *init_data, void **data)
7342 struct ftrace_func_mapper *mapper = *data;
7345 mapper = allocate_ftrace_func_mapper();
7351 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
7355 ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
7356 unsigned long ip, void *data)
7358 struct ftrace_func_mapper *mapper = data;
7363 free_ftrace_func_mapper(mapper, NULL);
7367 ftrace_func_mapper_remove_ip(mapper, ip);
7370 static struct ftrace_probe_ops snapshot_probe_ops = {
7371 .func = ftrace_snapshot,
7372 .print = ftrace_snapshot_print,
7375 static struct ftrace_probe_ops snapshot_count_probe_ops = {
7376 .func = ftrace_count_snapshot,
7377 .print = ftrace_snapshot_print,
7378 .init = ftrace_snapshot_init,
7379 .free = ftrace_snapshot_free,
7383 ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
7384 char *glob, char *cmd, char *param, int enable)
7386 struct ftrace_probe_ops *ops;
7387 void *count = (void *)-1;
7394 /* hash funcs only work with set_ftrace_filter */
7398 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
7401 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
7406 number = strsep(¶m, ":");
7408 if (!strlen(number))
7412 * We use the callback data field (which is a pointer)
7415 ret = kstrtoul(number, 0, (unsigned long *)&count);
7420 ret = tracing_alloc_snapshot_instance(tr);
7424 ret = register_ftrace_function_probe(glob, tr, ops, count);
7427 return ret < 0 ? ret : 0;
7430 static struct ftrace_func_command ftrace_snapshot_cmd = {
7432 .func = ftrace_trace_snapshot_callback,
7435 static __init int register_snapshot_cmd(void)
7437 return register_ftrace_command(&ftrace_snapshot_cmd);
7440 static inline __init int register_snapshot_cmd(void) { return 0; }
7441 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
7443 static struct dentry *tracing_get_dentry(struct trace_array *tr)
7445 if (WARN_ON(!tr->dir))
7446 return ERR_PTR(-ENODEV);
7448 /* Top directory uses NULL as the parent */
7449 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
7452 /* All sub buffers have a descriptor */
7456 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
7458 struct dentry *d_tracer;
7461 return tr->percpu_dir;
7463 d_tracer = tracing_get_dentry(tr);
7464 if (IS_ERR(d_tracer))
7467 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
7469 WARN_ONCE(!tr->percpu_dir,
7470 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
7472 return tr->percpu_dir;
7475 static struct dentry *
7476 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
7477 void *data, long cpu, const struct file_operations *fops)
7479 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
7481 if (ret) /* See tracing_get_cpu() */
7482 d_inode(ret)->i_cdev = (void *)(cpu + 1);
7487 tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
7489 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
7490 struct dentry *d_cpu;
7491 char cpu_dir[30]; /* 30 characters should be more than enough */
7496 snprintf(cpu_dir, 30, "cpu%ld", cpu);
7497 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
7499 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
7503 /* per cpu trace_pipe */
7504 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
7505 tr, cpu, &tracing_pipe_fops);
7508 trace_create_cpu_file("trace", 0644, d_cpu,
7509 tr, cpu, &tracing_fops);
7511 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
7512 tr, cpu, &tracing_buffers_fops);
7514 trace_create_cpu_file("stats", 0444, d_cpu,
7515 tr, cpu, &tracing_stats_fops);
7517 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
7518 tr, cpu, &tracing_entries_fops);
7520 #ifdef CONFIG_TRACER_SNAPSHOT
7521 trace_create_cpu_file("snapshot", 0644, d_cpu,
7522 tr, cpu, &snapshot_fops);
7524 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
7525 tr, cpu, &snapshot_raw_fops);
7529 #ifdef CONFIG_FTRACE_SELFTEST
7530 /* Let selftest have access to static functions in this file */
7531 #include "trace_selftest.c"
7535 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
7538 struct trace_option_dentry *topt = filp->private_data;
7541 if (topt->flags->val & topt->opt->bit)
7546 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
7550 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
7553 struct trace_option_dentry *topt = filp->private_data;
7557 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7561 if (val != 0 && val != 1)
7564 if (!!(topt->flags->val & topt->opt->bit) != val) {
7565 mutex_lock(&trace_types_lock);
7566 ret = __set_tracer_option(topt->tr, topt->flags,
7568 mutex_unlock(&trace_types_lock);
7579 static const struct file_operations trace_options_fops = {
7580 .open = tracing_open_generic,
7581 .read = trace_options_read,
7582 .write = trace_options_write,
7583 .llseek = generic_file_llseek,
7587 * In order to pass in both the trace_array descriptor as well as the index
7588 * to the flag that the trace option file represents, the trace_array
7589 * has a character array of trace_flags_index[], which holds the index
7590 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
7591 * The address of this character array is passed to the flag option file
7592 * read/write callbacks.
7594 * In order to extract both the index and the trace_array descriptor,
7595 * get_tr_index() uses the following algorithm.
7599 * As the pointer itself contains the address of the index (remember
7602 * Then to get the trace_array descriptor, by subtracting that index
7603 * from the ptr, we get to the start of the index itself.
7605 * ptr - idx == &index[0]
7607 * Then a simple container_of() from that pointer gets us to the
7608 * trace_array descriptor.
7610 static void get_tr_index(void *data, struct trace_array **ptr,
7611 unsigned int *pindex)
7613 *pindex = *(unsigned char *)data;
7615 *ptr = container_of(data - *pindex, struct trace_array,
7620 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
7623 void *tr_index = filp->private_data;
7624 struct trace_array *tr;
7628 get_tr_index(tr_index, &tr, &index);
7630 if (tr->trace_flags & (1 << index))
7635 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
7639 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
7642 void *tr_index = filp->private_data;
7643 struct trace_array *tr;
7648 get_tr_index(tr_index, &tr, &index);
7650 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7654 if (val != 0 && val != 1)
7657 mutex_lock(&trace_types_lock);
7658 ret = set_tracer_flag(tr, 1 << index, val);
7659 mutex_unlock(&trace_types_lock);
7669 static const struct file_operations trace_options_core_fops = {
7670 .open = tracing_open_generic,
7671 .read = trace_options_core_read,
7672 .write = trace_options_core_write,
7673 .llseek = generic_file_llseek,
7676 struct dentry *trace_create_file(const char *name,
7678 struct dentry *parent,
7680 const struct file_operations *fops)
7684 ret = tracefs_create_file(name, mode, parent, data, fops);
7686 pr_warn("Could not create tracefs '%s' entry\n", name);
7692 static struct dentry *trace_options_init_dentry(struct trace_array *tr)
7694 struct dentry *d_tracer;
7699 d_tracer = tracing_get_dentry(tr);
7700 if (IS_ERR(d_tracer))
7703 tr->options = tracefs_create_dir("options", d_tracer);
7705 pr_warn("Could not create tracefs directory 'options'\n");
7713 create_trace_option_file(struct trace_array *tr,
7714 struct trace_option_dentry *topt,
7715 struct tracer_flags *flags,
7716 struct tracer_opt *opt)
7718 struct dentry *t_options;
7720 t_options = trace_options_init_dentry(tr);
7724 topt->flags = flags;
7728 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
7729 &trace_options_fops);
7734 create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
7736 struct trace_option_dentry *topts;
7737 struct trace_options *tr_topts;
7738 struct tracer_flags *flags;
7739 struct tracer_opt *opts;
7746 flags = tracer->flags;
7748 if (!flags || !flags->opts)
7752 * If this is an instance, only create flags for tracers
7753 * the instance may have.
7755 if (!trace_ok_for_array(tracer, tr))
7758 for (i = 0; i < tr->nr_topts; i++) {
7759 /* Make sure there's no duplicate flags. */
7760 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
7766 for (cnt = 0; opts[cnt].name; cnt++)
7769 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
7773 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
7780 tr->topts = tr_topts;
7781 tr->topts[tr->nr_topts].tracer = tracer;
7782 tr->topts[tr->nr_topts].topts = topts;
7785 for (cnt = 0; opts[cnt].name; cnt++) {
7786 create_trace_option_file(tr, &topts[cnt], flags,
7788 WARN_ONCE(topts[cnt].entry == NULL,
7789 "Failed to create trace option: %s",
7794 static struct dentry *
7795 create_trace_option_core_file(struct trace_array *tr,
7796 const char *option, long index)
7798 struct dentry *t_options;
7800 t_options = trace_options_init_dentry(tr);
7804 return trace_create_file(option, 0644, t_options,
7805 (void *)&tr->trace_flags_index[index],
7806 &trace_options_core_fops);
7809 static void create_trace_options_dir(struct trace_array *tr)
7811 struct dentry *t_options;
7812 bool top_level = tr == &global_trace;
7815 t_options = trace_options_init_dentry(tr);
7819 for (i = 0; trace_options[i]; i++) {
7821 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
7822 create_trace_option_core_file(tr, trace_options[i], i);
7827 rb_simple_read(struct file *filp, char __user *ubuf,
7828 size_t cnt, loff_t *ppos)
7830 struct trace_array *tr = filp->private_data;
7834 r = tracer_tracing_is_on(tr);
7835 r = sprintf(buf, "%d\n", r);
7837 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7841 rb_simple_write(struct file *filp, const char __user *ubuf,
7842 size_t cnt, loff_t *ppos)
7844 struct trace_array *tr = filp->private_data;
7845 struct ring_buffer *buffer = tr->trace_buffer.buffer;
7849 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7854 mutex_lock(&trace_types_lock);
7855 if (!!val == tracer_tracing_is_on(tr)) {
7856 val = 0; /* do nothing */
7858 tracer_tracing_on(tr);
7859 if (tr->current_trace->start)
7860 tr->current_trace->start(tr);
7862 tracer_tracing_off(tr);
7863 if (tr->current_trace->stop)
7864 tr->current_trace->stop(tr);
7866 mutex_unlock(&trace_types_lock);
7874 static const struct file_operations rb_simple_fops = {
7875 .open = tracing_open_generic_tr,
7876 .read = rb_simple_read,
7877 .write = rb_simple_write,
7878 .release = tracing_release_generic_tr,
7879 .llseek = default_llseek,
7883 buffer_percent_read(struct file *filp, char __user *ubuf,
7884 size_t cnt, loff_t *ppos)
7886 struct trace_array *tr = filp->private_data;
7890 r = tr->buffer_percent;
7891 r = sprintf(buf, "%d\n", r);
7893 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7897 buffer_percent_write(struct file *filp, const char __user *ubuf,
7898 size_t cnt, loff_t *ppos)
7900 struct trace_array *tr = filp->private_data;
7904 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7914 tr->buffer_percent = val;
7921 static const struct file_operations buffer_percent_fops = {
7922 .open = tracing_open_generic_tr,
7923 .read = buffer_percent_read,
7924 .write = buffer_percent_write,
7925 .release = tracing_release_generic_tr,
7926 .llseek = default_llseek,
7929 struct dentry *trace_instance_dir;
7932 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
7935 allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
7937 enum ring_buffer_flags rb_flags;
7939 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
7943 buf->buffer = ring_buffer_alloc(size, rb_flags);
7947 buf->data = alloc_percpu(struct trace_array_cpu);
7949 ring_buffer_free(buf->buffer);
7954 /* Allocate the first page for all buffers */
7955 set_buffer_entries(&tr->trace_buffer,
7956 ring_buffer_size(tr->trace_buffer.buffer, 0));
7961 static int allocate_trace_buffers(struct trace_array *tr, int size)
7965 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
7969 #ifdef CONFIG_TRACER_MAX_TRACE
7970 ret = allocate_trace_buffer(tr, &tr->max_buffer,
7971 allocate_snapshot ? size : 1);
7973 ring_buffer_free(tr->trace_buffer.buffer);
7974 tr->trace_buffer.buffer = NULL;
7975 free_percpu(tr->trace_buffer.data);
7976 tr->trace_buffer.data = NULL;
7979 tr->allocated_snapshot = allocate_snapshot;
7982 * Only the top level trace array gets its snapshot allocated
7983 * from the kernel command line.
7985 allocate_snapshot = false;
7990 static void free_trace_buffer(struct trace_buffer *buf)
7993 ring_buffer_free(buf->buffer);
7995 free_percpu(buf->data);
8000 static void free_trace_buffers(struct trace_array *tr)
8005 free_trace_buffer(&tr->trace_buffer);
8007 #ifdef CONFIG_TRACER_MAX_TRACE
8008 free_trace_buffer(&tr->max_buffer);
8012 static void init_trace_flags_index(struct trace_array *tr)
8016 /* Used by the trace options files */
8017 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
8018 tr->trace_flags_index[i] = i;
8021 static void __update_tracer_options(struct trace_array *tr)
8025 for (t = trace_types; t; t = t->next)
8026 add_tracer_options(tr, t);
8029 static void update_tracer_options(struct trace_array *tr)
8031 mutex_lock(&trace_types_lock);
8032 __update_tracer_options(tr);
8033 mutex_unlock(&trace_types_lock);
8036 static int instance_mkdir(const char *name)
8038 struct trace_array *tr;
8041 mutex_lock(&event_mutex);
8042 mutex_lock(&trace_types_lock);
8045 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8046 if (tr->name && strcmp(tr->name, name) == 0)
8051 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
8055 tr->name = kstrdup(name, GFP_KERNEL);
8059 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
8062 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
8064 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
8066 raw_spin_lock_init(&tr->start_lock);
8068 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
8070 tr->current_trace = &nop_trace;
8072 INIT_LIST_HEAD(&tr->systems);
8073 INIT_LIST_HEAD(&tr->events);
8074 INIT_LIST_HEAD(&tr->hist_vars);
8076 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
8079 tr->dir = tracefs_create_dir(name, trace_instance_dir);
8083 ret = event_trace_add_tracer(tr->dir, tr);
8085 tracefs_remove_recursive(tr->dir);
8089 ftrace_init_trace_array(tr);
8091 init_tracer_tracefs(tr, tr->dir);
8092 init_trace_flags_index(tr);
8093 __update_tracer_options(tr);
8095 list_add(&tr->list, &ftrace_trace_arrays);
8097 mutex_unlock(&trace_types_lock);
8098 mutex_unlock(&event_mutex);
8103 free_trace_buffers(tr);
8104 free_cpumask_var(tr->tracing_cpumask);
8109 mutex_unlock(&trace_types_lock);
8110 mutex_unlock(&event_mutex);
8116 static int instance_rmdir(const char *name)
8118 struct trace_array *tr;
8123 mutex_lock(&event_mutex);
8124 mutex_lock(&trace_types_lock);
8127 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8128 if (tr->name && strcmp(tr->name, name) == 0) {
8137 if (tr->ref || (tr->current_trace && tr->current_trace->ref))
8140 list_del(&tr->list);
8142 /* Disable all the flags that were enabled coming in */
8143 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
8144 if ((1 << i) & ZEROED_TRACE_FLAGS)
8145 set_tracer_flag(tr, 1 << i, 0);
8148 tracing_set_nop(tr);
8149 clear_ftrace_function_probes(tr);
8150 event_trace_del_tracer(tr);
8151 ftrace_clear_pids(tr);
8152 ftrace_destroy_function_files(tr);
8153 tracefs_remove_recursive(tr->dir);
8154 free_trace_buffers(tr);
8156 for (i = 0; i < tr->nr_topts; i++) {
8157 kfree(tr->topts[i].topts);
8161 free_cpumask_var(tr->tracing_cpumask);
8168 mutex_unlock(&trace_types_lock);
8169 mutex_unlock(&event_mutex);
8174 static __init void create_trace_instances(struct dentry *d_tracer)
8176 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
8179 if (WARN_ON(!trace_instance_dir))
8184 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
8186 struct trace_event_file *file;
8189 trace_create_file("available_tracers", 0444, d_tracer,
8190 tr, &show_traces_fops);
8192 trace_create_file("current_tracer", 0644, d_tracer,
8193 tr, &set_tracer_fops);
8195 trace_create_file("tracing_cpumask", 0644, d_tracer,
8196 tr, &tracing_cpumask_fops);
8198 trace_create_file("trace_options", 0644, d_tracer,
8199 tr, &tracing_iter_fops);
8201 trace_create_file("trace", 0644, d_tracer,
8204 trace_create_file("trace_pipe", 0444, d_tracer,
8205 tr, &tracing_pipe_fops);
8207 trace_create_file("buffer_size_kb", 0644, d_tracer,
8208 tr, &tracing_entries_fops);
8210 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
8211 tr, &tracing_total_entries_fops);
8213 trace_create_file("free_buffer", 0200, d_tracer,
8214 tr, &tracing_free_buffer_fops);
8216 trace_create_file("trace_marker", 0220, d_tracer,
8217 tr, &tracing_mark_fops);
8219 file = __find_event_file(tr, "ftrace", "print");
8220 if (file && file->dir)
8221 trace_create_file("trigger", 0644, file->dir, file,
8222 &event_trigger_fops);
8223 tr->trace_marker_file = file;
8225 trace_create_file("trace_marker_raw", 0220, d_tracer,
8226 tr, &tracing_mark_raw_fops);
8228 trace_create_file("trace_clock", 0644, d_tracer, tr,
8231 trace_create_file("tracing_on", 0644, d_tracer,
8232 tr, &rb_simple_fops);
8234 trace_create_file("timestamp_mode", 0444, d_tracer, tr,
8235 &trace_time_stamp_mode_fops);
8237 tr->buffer_percent = 50;
8239 trace_create_file("buffer_percent", 0444, d_tracer,
8240 tr, &buffer_percent_fops);
8242 create_trace_options_dir(tr);
8244 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
8245 trace_create_file("tracing_max_latency", 0644, d_tracer,
8246 &tr->max_latency, &tracing_max_lat_fops);
8249 if (ftrace_create_function_files(tr, d_tracer))
8250 WARN(1, "Could not allocate function filter files");
8252 #ifdef CONFIG_TRACER_SNAPSHOT
8253 trace_create_file("snapshot", 0644, d_tracer,
8254 tr, &snapshot_fops);
8257 for_each_tracing_cpu(cpu)
8258 tracing_init_tracefs_percpu(tr, cpu);
8260 ftrace_init_tracefs(tr, d_tracer);
8263 static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
8265 struct vfsmount *mnt;
8266 struct file_system_type *type;
8269 * To maintain backward compatibility for tools that mount
8270 * debugfs to get to the tracing facility, tracefs is automatically
8271 * mounted to the debugfs/tracing directory.
8273 type = get_fs_type("tracefs");
8276 mnt = vfs_submount(mntpt, type, "tracefs", NULL);
8277 put_filesystem(type);
8286 * tracing_init_dentry - initialize top level trace array
8288 * This is called when creating files or directories in the tracing
8289 * directory. It is called via fs_initcall() by any of the boot up code
8290 * and expects to return the dentry of the top level tracing directory.
8292 struct dentry *tracing_init_dentry(void)
8294 struct trace_array *tr = &global_trace;
8296 /* The top level trace array uses NULL as parent */
8300 if (WARN_ON(!tracefs_initialized()) ||
8301 (IS_ENABLED(CONFIG_DEBUG_FS) &&
8302 WARN_ON(!debugfs_initialized())))
8303 return ERR_PTR(-ENODEV);
8306 * As there may still be users that expect the tracing
8307 * files to exist in debugfs/tracing, we must automount
8308 * the tracefs file system there, so older tools still
8309 * work with the newer kerenl.
8311 tr->dir = debugfs_create_automount("tracing", NULL,
8312 trace_automount, NULL);
8314 pr_warn_once("Could not create debugfs directory 'tracing'\n");
8315 return ERR_PTR(-ENOMEM);
8321 extern struct trace_eval_map *__start_ftrace_eval_maps[];
8322 extern struct trace_eval_map *__stop_ftrace_eval_maps[];
8324 static void __init trace_eval_init(void)
8328 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
8329 trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
8332 #ifdef CONFIG_MODULES
8333 static void trace_module_add_evals(struct module *mod)
8335 if (!mod->num_trace_evals)
8339 * Modules with bad taint do not have events created, do
8340 * not bother with enums either.
8342 if (trace_module_has_bad_taint(mod))
8345 trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
8348 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
8349 static void trace_module_remove_evals(struct module *mod)
8351 union trace_eval_map_item *map;
8352 union trace_eval_map_item **last = &trace_eval_maps;
8354 if (!mod->num_trace_evals)
8357 mutex_lock(&trace_eval_mutex);
8359 map = trace_eval_maps;
8362 if (map->head.mod == mod)
8364 map = trace_eval_jmp_to_tail(map);
8365 last = &map->tail.next;
8366 map = map->tail.next;
8371 *last = trace_eval_jmp_to_tail(map)->tail.next;
8374 mutex_unlock(&trace_eval_mutex);
8377 static inline void trace_module_remove_evals(struct module *mod) { }
8378 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
8380 static int trace_module_notify(struct notifier_block *self,
8381 unsigned long val, void *data)
8383 struct module *mod = data;
8386 case MODULE_STATE_COMING:
8387 trace_module_add_evals(mod);
8389 case MODULE_STATE_GOING:
8390 trace_module_remove_evals(mod);
8397 static struct notifier_block trace_module_nb = {
8398 .notifier_call = trace_module_notify,
8401 #endif /* CONFIG_MODULES */
8403 static __init int tracer_init_tracefs(void)
8405 struct dentry *d_tracer;
8407 trace_access_lock_init();
8409 d_tracer = tracing_init_dentry();
8410 if (IS_ERR(d_tracer))
8415 init_tracer_tracefs(&global_trace, d_tracer);
8416 ftrace_init_tracefs_toplevel(&global_trace, d_tracer);
8418 trace_create_file("tracing_thresh", 0644, d_tracer,
8419 &global_trace, &tracing_thresh_fops);
8421 trace_create_file("README", 0444, d_tracer,
8422 NULL, &tracing_readme_fops);
8424 trace_create_file("saved_cmdlines", 0444, d_tracer,
8425 NULL, &tracing_saved_cmdlines_fops);
8427 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
8428 NULL, &tracing_saved_cmdlines_size_fops);
8430 trace_create_file("saved_tgids", 0444, d_tracer,
8431 NULL, &tracing_saved_tgids_fops);
8435 trace_create_eval_file(d_tracer);
8437 #ifdef CONFIG_MODULES
8438 register_module_notifier(&trace_module_nb);
8441 #ifdef CONFIG_DYNAMIC_FTRACE
8442 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
8443 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
8446 create_trace_instances(d_tracer);
8448 update_tracer_options(&global_trace);
8453 static int trace_panic_handler(struct notifier_block *this,
8454 unsigned long event, void *unused)
8456 if (ftrace_dump_on_oops)
8457 ftrace_dump(ftrace_dump_on_oops);
8461 static struct notifier_block trace_panic_notifier = {
8462 .notifier_call = trace_panic_handler,
8464 .priority = 150 /* priority: INT_MAX >= x >= 0 */
8467 static int trace_die_handler(struct notifier_block *self,
8473 if (ftrace_dump_on_oops)
8474 ftrace_dump(ftrace_dump_on_oops);
8482 static struct notifier_block trace_die_notifier = {
8483 .notifier_call = trace_die_handler,
8488 * printk is set to max of 1024, we really don't need it that big.
8489 * Nothing should be printing 1000 characters anyway.
8491 #define TRACE_MAX_PRINT 1000
8494 * Define here KERN_TRACE so that we have one place to modify
8495 * it if we decide to change what log level the ftrace dump
8498 #define KERN_TRACE KERN_EMERG
8501 trace_printk_seq(struct trace_seq *s)
8503 /* Probably should print a warning here. */
8504 if (s->seq.len >= TRACE_MAX_PRINT)
8505 s->seq.len = TRACE_MAX_PRINT;
8508 * More paranoid code. Although the buffer size is set to
8509 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
8510 * an extra layer of protection.
8512 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
8513 s->seq.len = s->seq.size - 1;
8515 /* should be zero ended, but we are paranoid. */
8516 s->buffer[s->seq.len] = 0;
8518 printk(KERN_TRACE "%s", s->buffer);
8523 void trace_init_global_iter(struct trace_iterator *iter)
8525 iter->tr = &global_trace;
8526 iter->trace = iter->tr->current_trace;
8527 iter->cpu_file = RING_BUFFER_ALL_CPUS;
8528 iter->trace_buffer = &global_trace.trace_buffer;
8530 if (iter->trace && iter->trace->open)
8531 iter->trace->open(iter);
8533 /* Annotate start of buffers if we had overruns */
8534 if (ring_buffer_overruns(iter->trace_buffer->buffer))
8535 iter->iter_flags |= TRACE_FILE_ANNOTATE;
8537 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
8538 if (trace_clocks[iter->tr->clock_id].in_ns)
8539 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
8542 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
8544 /* use static because iter can be a bit big for the stack */
8545 static struct trace_iterator iter;
8546 static atomic_t dump_running;
8547 struct trace_array *tr = &global_trace;
8548 unsigned int old_userobj;
8549 unsigned long flags;
8552 /* Only allow one dump user at a time. */
8553 if (atomic_inc_return(&dump_running) != 1) {
8554 atomic_dec(&dump_running);
8559 * Always turn off tracing when we dump.
8560 * We don't need to show trace output of what happens
8561 * between multiple crashes.
8563 * If the user does a sysrq-z, then they can re-enable
8564 * tracing with echo 1 > tracing_on.
8568 local_irq_save(flags);
8569 printk_nmi_direct_enter();
8571 /* Simulate the iterator */
8572 trace_init_global_iter(&iter);
8574 for_each_tracing_cpu(cpu) {
8575 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
8578 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
8580 /* don't look at user memory in panic mode */
8581 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
8583 switch (oops_dump_mode) {
8585 iter.cpu_file = RING_BUFFER_ALL_CPUS;
8588 iter.cpu_file = raw_smp_processor_id();
8593 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
8594 iter.cpu_file = RING_BUFFER_ALL_CPUS;
8597 printk(KERN_TRACE "Dumping ftrace buffer:\n");
8599 /* Did function tracer already get disabled? */
8600 if (ftrace_is_dead()) {
8601 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
8602 printk("# MAY BE MISSING FUNCTION EVENTS\n");
8606 * We need to stop all tracing on all CPUS to read the
8607 * the next buffer. This is a bit expensive, but is
8608 * not done often. We fill all what we can read,
8609 * and then release the locks again.
8612 while (!trace_empty(&iter)) {
8615 printk(KERN_TRACE "---------------------------------\n");
8619 /* reset all but tr, trace, and overruns */
8620 memset(&iter.seq, 0,
8621 sizeof(struct trace_iterator) -
8622 offsetof(struct trace_iterator, seq));
8623 iter.iter_flags |= TRACE_FILE_LAT_FMT;
8626 if (trace_find_next_entry_inc(&iter) != NULL) {
8629 ret = print_trace_line(&iter);
8630 if (ret != TRACE_TYPE_NO_CONSUME)
8631 trace_consume(&iter);
8633 touch_nmi_watchdog();
8635 trace_printk_seq(&iter.seq);
8639 printk(KERN_TRACE " (ftrace buffer empty)\n");
8641 printk(KERN_TRACE "---------------------------------\n");
8644 tr->trace_flags |= old_userobj;
8646 for_each_tracing_cpu(cpu) {
8647 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
8649 atomic_dec(&dump_running);
8650 printk_nmi_direct_exit();
8651 local_irq_restore(flags);
8653 EXPORT_SYMBOL_GPL(ftrace_dump);
8655 int trace_run_command(const char *buf, int (*createfn)(int, char **))
8662 argv = argv_split(GFP_KERNEL, buf, &argc);
8667 ret = createfn(argc, argv);
8674 #define WRITE_BUFSIZE 4096
8676 ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
8677 size_t count, loff_t *ppos,
8678 int (*createfn)(int, char **))
8680 char *kbuf, *buf, *tmp;
8685 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
8689 while (done < count) {
8690 size = count - done;
8692 if (size >= WRITE_BUFSIZE)
8693 size = WRITE_BUFSIZE - 1;
8695 if (copy_from_user(kbuf, buffer + done, size)) {
8702 tmp = strchr(buf, '\n');
8705 size = tmp - buf + 1;
8708 if (done + size < count) {
8711 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
8712 pr_warn("Line length is too long: Should be less than %d\n",
8720 /* Remove comments */
8721 tmp = strchr(buf, '#');
8726 ret = trace_run_command(buf, createfn);
8731 } while (done < count);
8741 __init static int tracer_alloc_buffers(void)
8747 * Make sure we don't accidently add more trace options
8748 * than we have bits for.
8750 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
8752 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
8755 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
8756 goto out_free_buffer_mask;
8758 /* Only allocate trace_printk buffers if a trace_printk exists */
8759 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
8760 /* Must be called before global_trace.buffer is allocated */
8761 trace_printk_init_buffers();
8763 /* To save memory, keep the ring buffer size to its minimum */
8764 if (ring_buffer_expanded)
8765 ring_buf_size = trace_buf_size;
8769 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
8770 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
8772 raw_spin_lock_init(&global_trace.start_lock);
8775 * The prepare callbacks allocates some memory for the ring buffer. We
8776 * don't free the buffer if the if the CPU goes down. If we were to free
8777 * the buffer, then the user would lose any trace that was in the
8778 * buffer. The memory will be removed once the "instance" is removed.
8780 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
8781 "trace/RB:preapre", trace_rb_cpu_prepare,
8784 goto out_free_cpumask;
8785 /* Used for event triggers */
8787 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
8789 goto out_rm_hp_state;
8791 if (trace_create_savedcmd() < 0)
8792 goto out_free_temp_buffer;
8794 /* TODO: make the number of buffers hot pluggable with CPUS */
8795 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
8796 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
8798 goto out_free_savedcmd;
8801 if (global_trace.buffer_disabled)
8804 if (trace_boot_clock) {
8805 ret = tracing_set_clock(&global_trace, trace_boot_clock);
8807 pr_warn("Trace clock %s not defined, going back to default\n",
8812 * register_tracer() might reference current_trace, so it
8813 * needs to be set before we register anything. This is
8814 * just a bootstrap of current_trace anyway.
8816 global_trace.current_trace = &nop_trace;
8818 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
8820 ftrace_init_global_array_ops(&global_trace);
8822 init_trace_flags_index(&global_trace);
8824 register_tracer(&nop_trace);
8826 /* Function tracing may start here (via kernel command line) */
8827 init_function_trace();
8829 /* All seems OK, enable tracing */
8830 tracing_disabled = 0;
8832 atomic_notifier_chain_register(&panic_notifier_list,
8833 &trace_panic_notifier);
8835 register_die_notifier(&trace_die_notifier);
8837 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
8839 INIT_LIST_HEAD(&global_trace.systems);
8840 INIT_LIST_HEAD(&global_trace.events);
8841 INIT_LIST_HEAD(&global_trace.hist_vars);
8842 list_add(&global_trace.list, &ftrace_trace_arrays);
8844 apply_trace_boot_options();
8846 register_snapshot_cmd();
8851 free_saved_cmdlines_buffer(savedcmd);
8852 out_free_temp_buffer:
8853 ring_buffer_free(temp_buffer);
8855 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
8857 free_cpumask_var(global_trace.tracing_cpumask);
8858 out_free_buffer_mask:
8859 free_cpumask_var(tracing_buffer_mask);
8864 void __init early_trace_init(void)
8866 if (tracepoint_printk) {
8867 tracepoint_print_iter =
8868 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
8869 if (WARN_ON(!tracepoint_print_iter))
8870 tracepoint_printk = 0;
8872 static_key_enable(&tracepoint_printk_key.key);
8874 tracer_alloc_buffers();
8877 void __init trace_init(void)
8882 __init static int clear_boot_tracer(void)
8885 * The default tracer at boot buffer is an init section.
8886 * This function is called in lateinit. If we did not
8887 * find the boot tracer, then clear it out, to prevent
8888 * later registration from accessing the buffer that is
8889 * about to be freed.
8891 if (!default_bootup_tracer)
8894 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
8895 default_bootup_tracer);
8896 default_bootup_tracer = NULL;
8901 fs_initcall(tracer_init_tracefs);
8902 late_initcall_sync(clear_boot_tracer);
8904 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
8905 __init static int tracing_set_default_clock(void)
8907 /* sched_clock_stable() is determined in late_initcall */
8908 if (!trace_boot_clock && !sched_clock_stable()) {
8910 "Unstable clock detected, switching default tracing clock to \"global\"\n"
8911 "If you want to keep using the local clock, then add:\n"
8912 " \"trace_clock=local\"\n"
8913 "on the kernel command line\n");
8914 tracing_set_clock(&global_trace, "global");
8919 late_initcall_sync(tracing_set_default_clock);