1 // SPDX-License-Identifier: GPL-2.0
3 * ring buffer based function tracer
5 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
8 * Originally taken from the RT patch by:
9 * Arnaldo Carvalho de Melo <acme@redhat.com>
11 * Based on code from the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 Nadia Yvette Chambers
15 #include <linux/ring_buffer.h>
16 #include <generated/utsrelease.h>
17 #include <linux/stacktrace.h>
18 #include <linux/writeback.h>
19 #include <linux/kallsyms.h>
20 #include <linux/seq_file.h>
21 #include <linux/notifier.h>
22 #include <linux/irqflags.h>
23 #include <linux/debugfs.h>
24 #include <linux/tracefs.h>
25 #include <linux/pagemap.h>
26 #include <linux/hardirq.h>
27 #include <linux/linkage.h>
28 #include <linux/uaccess.h>
29 #include <linux/vmalloc.h>
30 #include <linux/ftrace.h>
31 #include <linux/module.h>
32 #include <linux/percpu.h>
33 #include <linux/splice.h>
34 #include <linux/kdebug.h>
35 #include <linux/string.h>
36 #include <linux/mount.h>
37 #include <linux/rwsem.h>
38 #include <linux/slab.h>
39 #include <linux/ctype.h>
40 #include <linux/init.h>
41 #include <linux/poll.h>
42 #include <linux/nmi.h>
44 #include <linux/trace.h>
45 #include <linux/sched/clock.h>
46 #include <linux/sched/rt.h>
49 #include "trace_output.h"
52 * On boot up, the ring buffer is set to the minimum size, so that
53 * we do not waste memory on systems that are not using tracing.
55 bool ring_buffer_expanded;
58 * We need to change this state when a selftest is running.
59 * A selftest will lurk into the ring-buffer to count the
60 * entries inserted during the selftest although some concurrent
61 * insertions into the ring-buffer such as trace_printk could occurred
62 * at the same time, giving false positive or negative results.
64 static bool __read_mostly tracing_selftest_running;
67 * If a tracer is running, we do not want to run SELFTEST.
69 bool __read_mostly tracing_selftest_disabled;
71 /* Pipe tracepoints to printk */
72 struct trace_iterator *tracepoint_print_iter;
73 int tracepoint_printk;
74 static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
76 /* For tracers that don't implement custom flags */
77 static struct tracer_opt dummy_tracer_opt[] = {
82 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
88 * To prevent the comm cache from being overwritten when no
89 * tracing is active, only save the comm when a trace event
92 static DEFINE_PER_CPU(bool, trace_taskinfo_save);
95 * Kill all tracing for good (never come back).
96 * It is initialized to 1 but will turn to zero if the initialization
97 * of the tracer is successful. But that is the only place that sets
100 static int tracing_disabled = 1;
102 cpumask_var_t __read_mostly tracing_buffer_mask;
105 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
107 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
108 * is set, then ftrace_dump is called. This will output the contents
109 * of the ftrace buffers to the console. This is very useful for
110 * capturing traces that lead to crashes and outputing it to a
113 * It is default off, but you can enable it with either specifying
114 * "ftrace_dump_on_oops" in the kernel command line, or setting
115 * /proc/sys/kernel/ftrace_dump_on_oops
116 * Set 1 if you want to dump buffers of all CPUs
117 * Set 2 if you want to dump the buffer of the CPU that triggered oops
120 enum ftrace_dump_mode ftrace_dump_on_oops;
122 /* When set, tracing will stop when a WARN*() is hit */
123 int __disable_trace_on_warning;
125 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
126 /* Map of enums to their values, for "eval_map" file */
127 struct trace_eval_map_head {
129 unsigned long length;
132 union trace_eval_map_item;
134 struct trace_eval_map_tail {
136 * "end" is first and points to NULL as it must be different
137 * than "mod" or "eval_string"
139 union trace_eval_map_item *next;
140 const char *end; /* points to NULL */
143 static DEFINE_MUTEX(trace_eval_mutex);
146 * The trace_eval_maps are saved in an array with two extra elements,
147 * one at the beginning, and one at the end. The beginning item contains
148 * the count of the saved maps (head.length), and the module they
149 * belong to if not built in (head.mod). The ending item contains a
150 * pointer to the next array of saved eval_map items.
152 union trace_eval_map_item {
153 struct trace_eval_map map;
154 struct trace_eval_map_head head;
155 struct trace_eval_map_tail tail;
158 static union trace_eval_map_item *trace_eval_maps;
159 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
161 static int tracing_set_tracer(struct trace_array *tr, const char *buf);
163 #define MAX_TRACER_SIZE 100
164 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
165 static char *default_bootup_tracer;
167 static bool allocate_snapshot;
169 static int __init set_cmdline_ftrace(char *str)
171 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
172 default_bootup_tracer = bootup_tracer_buf;
173 /* We are using ftrace early, expand it */
174 ring_buffer_expanded = true;
177 __setup("ftrace=", set_cmdline_ftrace);
179 static int __init set_ftrace_dump_on_oops(char *str)
181 if (*str++ != '=' || !*str) {
182 ftrace_dump_on_oops = DUMP_ALL;
186 if (!strcmp("orig_cpu", str)) {
187 ftrace_dump_on_oops = DUMP_ORIG;
193 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
195 static int __init stop_trace_on_warning(char *str)
197 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
198 __disable_trace_on_warning = 1;
201 __setup("traceoff_on_warning", stop_trace_on_warning);
203 static int __init boot_alloc_snapshot(char *str)
205 allocate_snapshot = true;
206 /* We also need the main ring buffer expanded */
207 ring_buffer_expanded = true;
210 __setup("alloc_snapshot", boot_alloc_snapshot);
213 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
215 static int __init set_trace_boot_options(char *str)
217 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
220 __setup("trace_options=", set_trace_boot_options);
222 static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
223 static char *trace_boot_clock __initdata;
225 static int __init set_trace_boot_clock(char *str)
227 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
228 trace_boot_clock = trace_boot_clock_buf;
231 __setup("trace_clock=", set_trace_boot_clock);
233 static int __init set_tracepoint_printk(char *str)
235 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
236 tracepoint_printk = 1;
239 __setup("tp_printk", set_tracepoint_printk);
241 unsigned long long ns2usecs(u64 nsec)
248 /* trace_flags holds trace_options default values */
249 #define TRACE_DEFAULT_FLAGS \
250 (FUNCTION_DEFAULT_FLAGS | \
251 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
252 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
253 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
254 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
256 /* trace_options that are only supported by global_trace */
257 #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
258 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
260 /* trace_flags that are default zero for instances */
261 #define ZEROED_TRACE_FLAGS \
262 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
265 * The global_trace is the descriptor that holds the top-level tracing
266 * buffers for the live tracing.
268 static struct trace_array global_trace = {
269 .trace_flags = TRACE_DEFAULT_FLAGS,
272 LIST_HEAD(ftrace_trace_arrays);
274 int trace_array_get(struct trace_array *this_tr)
276 struct trace_array *tr;
279 mutex_lock(&trace_types_lock);
280 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
287 mutex_unlock(&trace_types_lock);
292 static void __trace_array_put(struct trace_array *this_tr)
294 WARN_ON(!this_tr->ref);
298 void trace_array_put(struct trace_array *this_tr)
300 mutex_lock(&trace_types_lock);
301 __trace_array_put(this_tr);
302 mutex_unlock(&trace_types_lock);
305 int call_filter_check_discard(struct trace_event_call *call, void *rec,
306 struct ring_buffer *buffer,
307 struct ring_buffer_event *event)
309 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
310 !filter_match_preds(call->filter, rec)) {
311 __trace_event_discard_commit(buffer, event);
318 void trace_free_pid_list(struct trace_pid_list *pid_list)
320 vfree(pid_list->pids);
325 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
326 * @filtered_pids: The list of pids to check
327 * @search_pid: The PID to find in @filtered_pids
329 * Returns true if @search_pid is fonud in @filtered_pids, and false otherwis.
332 trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
335 * If pid_max changed after filtered_pids was created, we
336 * by default ignore all pids greater than the previous pid_max.
338 if (search_pid >= filtered_pids->pid_max)
341 return test_bit(search_pid, filtered_pids->pids);
345 * trace_ignore_this_task - should a task be ignored for tracing
346 * @filtered_pids: The list of pids to check
347 * @task: The task that should be ignored if not filtered
349 * Checks if @task should be traced or not from @filtered_pids.
350 * Returns true if @task should *NOT* be traced.
351 * Returns false if @task should be traced.
354 trace_ignore_this_task(struct trace_pid_list *filtered_pids, struct task_struct *task)
357 * Return false, because if filtered_pids does not exist,
358 * all pids are good to trace.
363 return !trace_find_filtered_pid(filtered_pids, task->pid);
367 * trace_pid_filter_add_remove_task - Add or remove a task from a pid_list
368 * @pid_list: The list to modify
369 * @self: The current task for fork or NULL for exit
370 * @task: The task to add or remove
372 * If adding a task, if @self is defined, the task is only added if @self
373 * is also included in @pid_list. This happens on fork and tasks should
374 * only be added when the parent is listed. If @self is NULL, then the
375 * @task pid will be removed from the list, which would happen on exit
378 void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
379 struct task_struct *self,
380 struct task_struct *task)
385 /* For forks, we only add if the forking task is listed */
387 if (!trace_find_filtered_pid(pid_list, self->pid))
391 /* Sorry, but we don't support pid_max changing after setting */
392 if (task->pid >= pid_list->pid_max)
395 /* "self" is set for forks, and NULL for exits */
397 set_bit(task->pid, pid_list->pids);
399 clear_bit(task->pid, pid_list->pids);
403 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
404 * @pid_list: The pid list to show
405 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
406 * @pos: The position of the file
408 * This is used by the seq_file "next" operation to iterate the pids
409 * listed in a trace_pid_list structure.
411 * Returns the pid+1 as we want to display pid of zero, but NULL would
412 * stop the iteration.
414 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
416 unsigned long pid = (unsigned long)v;
420 /* pid already is +1 of the actual prevous bit */
421 pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid);
423 /* Return pid + 1 to allow zero to be represented */
424 if (pid < pid_list->pid_max)
425 return (void *)(pid + 1);
431 * trace_pid_start - Used for seq_file to start reading pid lists
432 * @pid_list: The pid list to show
433 * @pos: The position of the file
435 * This is used by seq_file "start" operation to start the iteration
438 * Returns the pid+1 as we want to display pid of zero, but NULL would
439 * stop the iteration.
441 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
446 pid = find_first_bit(pid_list->pids, pid_list->pid_max);
447 if (pid >= pid_list->pid_max)
450 /* Return pid + 1 so that zero can be the exit value */
451 for (pid++; pid && l < *pos;
452 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
458 * trace_pid_show - show the current pid in seq_file processing
459 * @m: The seq_file structure to write into
460 * @v: A void pointer of the pid (+1) value to display
462 * Can be directly used by seq_file operations to display the current
465 int trace_pid_show(struct seq_file *m, void *v)
467 unsigned long pid = (unsigned long)v - 1;
469 seq_printf(m, "%lu\n", pid);
473 /* 128 should be much more than enough */
474 #define PID_BUF_SIZE 127
476 int trace_pid_write(struct trace_pid_list *filtered_pids,
477 struct trace_pid_list **new_pid_list,
478 const char __user *ubuf, size_t cnt)
480 struct trace_pid_list *pid_list;
481 struct trace_parser parser;
489 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
493 * Always recreate a new array. The write is an all or nothing
494 * operation. Always create a new array when adding new pids by
495 * the user. If the operation fails, then the current list is
498 pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
502 pid_list->pid_max = READ_ONCE(pid_max);
504 /* Only truncating will shrink pid_max */
505 if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max)
506 pid_list->pid_max = filtered_pids->pid_max;
508 pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
509 if (!pid_list->pids) {
515 /* copy the current bits to the new max */
516 for_each_set_bit(pid, filtered_pids->pids,
517 filtered_pids->pid_max) {
518 set_bit(pid, pid_list->pids);
527 ret = trace_get_user(&parser, ubuf, cnt, &pos);
528 if (ret < 0 || !trace_parser_loaded(&parser))
536 if (kstrtoul(parser.buffer, 0, &val))
538 if (val >= pid_list->pid_max)
543 set_bit(pid, pid_list->pids);
546 trace_parser_clear(&parser);
549 trace_parser_put(&parser);
552 trace_free_pid_list(pid_list);
557 /* Cleared the list of pids */
558 trace_free_pid_list(pid_list);
563 *new_pid_list = pid_list;
568 static u64 buffer_ftrace_now(struct trace_buffer *buf, int cpu)
572 /* Early boot up does not have a buffer yet */
574 return trace_clock_local();
576 ts = ring_buffer_time_stamp(buf->buffer, cpu);
577 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
582 u64 ftrace_now(int cpu)
584 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
588 * tracing_is_enabled - Show if global_trace has been disabled
590 * Shows if the global trace has been enabled or not. It uses the
591 * mirror flag "buffer_disabled" to be used in fast paths such as for
592 * the irqsoff tracer. But it may be inaccurate due to races. If you
593 * need to know the accurate state, use tracing_is_on() which is a little
594 * slower, but accurate.
596 int tracing_is_enabled(void)
599 * For quick access (irqsoff uses this in fast path), just
600 * return the mirror variable of the state of the ring buffer.
601 * It's a little racy, but we don't really care.
604 return !global_trace.buffer_disabled;
608 * trace_buf_size is the size in bytes that is allocated
609 * for a buffer. Note, the number of bytes is always rounded
612 * This number is purposely set to a low number of 16384.
613 * If the dump on oops happens, it will be much appreciated
614 * to not have to wait for all that output. Anyway this can be
615 * boot time and run time configurable.
617 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
619 static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
621 /* trace_types holds a link list of available tracers. */
622 static struct tracer *trace_types __read_mostly;
625 * trace_types_lock is used to protect the trace_types list.
627 DEFINE_MUTEX(trace_types_lock);
630 * serialize the access of the ring buffer
632 * ring buffer serializes readers, but it is low level protection.
633 * The validity of the events (which returns by ring_buffer_peek() ..etc)
634 * are not protected by ring buffer.
636 * The content of events may become garbage if we allow other process consumes
637 * these events concurrently:
638 * A) the page of the consumed events may become a normal page
639 * (not reader page) in ring buffer, and this page will be rewrited
640 * by events producer.
641 * B) The page of the consumed events may become a page for splice_read,
642 * and this page will be returned to system.
644 * These primitives allow multi process access to different cpu ring buffer
647 * These primitives don't distinguish read-only and read-consume access.
648 * Multi read-only access are also serialized.
652 static DECLARE_RWSEM(all_cpu_access_lock);
653 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
655 static inline void trace_access_lock(int cpu)
657 if (cpu == RING_BUFFER_ALL_CPUS) {
658 /* gain it for accessing the whole ring buffer. */
659 down_write(&all_cpu_access_lock);
661 /* gain it for accessing a cpu ring buffer. */
663 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
664 down_read(&all_cpu_access_lock);
666 /* Secondly block other access to this @cpu ring buffer. */
667 mutex_lock(&per_cpu(cpu_access_lock, cpu));
671 static inline void trace_access_unlock(int cpu)
673 if (cpu == RING_BUFFER_ALL_CPUS) {
674 up_write(&all_cpu_access_lock);
676 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
677 up_read(&all_cpu_access_lock);
681 static inline void trace_access_lock_init(void)
685 for_each_possible_cpu(cpu)
686 mutex_init(&per_cpu(cpu_access_lock, cpu));
691 static DEFINE_MUTEX(access_lock);
693 static inline void trace_access_lock(int cpu)
696 mutex_lock(&access_lock);
699 static inline void trace_access_unlock(int cpu)
702 mutex_unlock(&access_lock);
705 static inline void trace_access_lock_init(void)
711 #ifdef CONFIG_STACKTRACE
712 static void __ftrace_trace_stack(struct ring_buffer *buffer,
714 int skip, int pc, struct pt_regs *regs);
715 static inline void ftrace_trace_stack(struct trace_array *tr,
716 struct ring_buffer *buffer,
718 int skip, int pc, struct pt_regs *regs);
721 static inline void __ftrace_trace_stack(struct ring_buffer *buffer,
723 int skip, int pc, struct pt_regs *regs)
726 static inline void ftrace_trace_stack(struct trace_array *tr,
727 struct ring_buffer *buffer,
729 int skip, int pc, struct pt_regs *regs)
735 static __always_inline void
736 trace_event_setup(struct ring_buffer_event *event,
737 int type, unsigned long flags, int pc)
739 struct trace_entry *ent = ring_buffer_event_data(event);
741 tracing_generic_entry_update(ent, flags, pc);
745 static __always_inline struct ring_buffer_event *
746 __trace_buffer_lock_reserve(struct ring_buffer *buffer,
749 unsigned long flags, int pc)
751 struct ring_buffer_event *event;
753 event = ring_buffer_lock_reserve(buffer, len);
755 trace_event_setup(event, type, flags, pc);
760 void tracer_tracing_on(struct trace_array *tr)
762 if (tr->trace_buffer.buffer)
763 ring_buffer_record_on(tr->trace_buffer.buffer);
765 * This flag is looked at when buffers haven't been allocated
766 * yet, or by some tracers (like irqsoff), that just want to
767 * know if the ring buffer has been disabled, but it can handle
768 * races of where it gets disabled but we still do a record.
769 * As the check is in the fast path of the tracers, it is more
770 * important to be fast than accurate.
772 tr->buffer_disabled = 0;
773 /* Make the flag seen by readers */
778 * tracing_on - enable tracing buffers
780 * This function enables tracing buffers that may have been
781 * disabled with tracing_off.
783 void tracing_on(void)
785 tracer_tracing_on(&global_trace);
787 EXPORT_SYMBOL_GPL(tracing_on);
790 static __always_inline void
791 __buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
793 __this_cpu_write(trace_taskinfo_save, true);
795 /* If this is the temp buffer, we need to commit fully */
796 if (this_cpu_read(trace_buffered_event) == event) {
797 /* Length is in event->array[0] */
798 ring_buffer_write(buffer, event->array[0], &event->array[1]);
799 /* Release the temp buffer */
800 this_cpu_dec(trace_buffered_event_cnt);
802 ring_buffer_unlock_commit(buffer, event);
806 * __trace_puts - write a constant string into the trace buffer.
807 * @ip: The address of the caller
808 * @str: The constant string to write
809 * @size: The size of the string.
811 int __trace_puts(unsigned long ip, const char *str, int size)
813 struct ring_buffer_event *event;
814 struct ring_buffer *buffer;
815 struct print_entry *entry;
816 unsigned long irq_flags;
820 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
823 pc = preempt_count();
825 if (unlikely(tracing_selftest_running || tracing_disabled))
828 alloc = sizeof(*entry) + size + 2; /* possible \n added */
830 local_save_flags(irq_flags);
831 buffer = global_trace.trace_buffer.buffer;
832 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
837 entry = ring_buffer_event_data(event);
840 memcpy(&entry->buf, str, size);
842 /* Add a newline if necessary */
843 if (entry->buf[size - 1] != '\n') {
844 entry->buf[size] = '\n';
845 entry->buf[size + 1] = '\0';
847 entry->buf[size] = '\0';
849 __buffer_unlock_commit(buffer, event);
850 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
854 EXPORT_SYMBOL_GPL(__trace_puts);
857 * __trace_bputs - write the pointer to a constant string into trace buffer
858 * @ip: The address of the caller
859 * @str: The constant string to write to the buffer to
861 int __trace_bputs(unsigned long ip, const char *str)
863 struct ring_buffer_event *event;
864 struct ring_buffer *buffer;
865 struct bputs_entry *entry;
866 unsigned long irq_flags;
867 int size = sizeof(struct bputs_entry);
870 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
873 pc = preempt_count();
875 if (unlikely(tracing_selftest_running || tracing_disabled))
878 local_save_flags(irq_flags);
879 buffer = global_trace.trace_buffer.buffer;
880 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
885 entry = ring_buffer_event_data(event);
889 __buffer_unlock_commit(buffer, event);
890 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
894 EXPORT_SYMBOL_GPL(__trace_bputs);
896 #ifdef CONFIG_TRACER_SNAPSHOT
897 void tracing_snapshot_instance_cond(struct trace_array *tr, void *cond_data)
899 struct tracer *tracer = tr->current_trace;
903 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
904 internal_trace_puts("*** snapshot is being ignored ***\n");
908 if (!tr->allocated_snapshot) {
909 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
910 internal_trace_puts("*** stopping trace here! ***\n");
915 /* Note, snapshot can not be used when the tracer uses it */
916 if (tracer->use_max_tr) {
917 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
918 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
922 local_irq_save(flags);
923 update_max_tr(tr, current, smp_processor_id(), cond_data);
924 local_irq_restore(flags);
927 void tracing_snapshot_instance(struct trace_array *tr)
929 tracing_snapshot_instance_cond(tr, NULL);
933 * tracing_snapshot - take a snapshot of the current buffer.
935 * This causes a swap between the snapshot buffer and the current live
936 * tracing buffer. You can use this to take snapshots of the live
937 * trace when some condition is triggered, but continue to trace.
939 * Note, make sure to allocate the snapshot with either
940 * a tracing_snapshot_alloc(), or by doing it manually
941 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
943 * If the snapshot buffer is not allocated, it will stop tracing.
944 * Basically making a permanent snapshot.
946 void tracing_snapshot(void)
948 struct trace_array *tr = &global_trace;
950 tracing_snapshot_instance(tr);
952 EXPORT_SYMBOL_GPL(tracing_snapshot);
955 * tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
956 * @tr: The tracing instance to snapshot
957 * @cond_data: The data to be tested conditionally, and possibly saved
959 * This is the same as tracing_snapshot() except that the snapshot is
960 * conditional - the snapshot will only happen if the
961 * cond_snapshot.update() implementation receiving the cond_data
962 * returns true, which means that the trace array's cond_snapshot
963 * update() operation used the cond_data to determine whether the
964 * snapshot should be taken, and if it was, presumably saved it along
967 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
969 tracing_snapshot_instance_cond(tr, cond_data);
971 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
974 * tracing_snapshot_cond_data - get the user data associated with a snapshot
975 * @tr: The tracing instance
977 * When the user enables a conditional snapshot using
978 * tracing_snapshot_cond_enable(), the user-defined cond_data is saved
979 * with the snapshot. This accessor is used to retrieve it.
981 * Should not be called from cond_snapshot.update(), since it takes
982 * the tr->max_lock lock, which the code calling
983 * cond_snapshot.update() has already done.
985 * Returns the cond_data associated with the trace array's snapshot.
987 void *tracing_cond_snapshot_data(struct trace_array *tr)
989 void *cond_data = NULL;
991 arch_spin_lock(&tr->max_lock);
993 if (tr->cond_snapshot)
994 cond_data = tr->cond_snapshot->cond_data;
996 arch_spin_unlock(&tr->max_lock);
1000 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1002 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
1003 struct trace_buffer *size_buf, int cpu_id);
1004 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
1006 int tracing_alloc_snapshot_instance(struct trace_array *tr)
1010 if (!tr->allocated_snapshot) {
1012 /* allocate spare buffer */
1013 ret = resize_buffer_duplicate_size(&tr->max_buffer,
1014 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
1018 tr->allocated_snapshot = true;
1024 static void free_snapshot(struct trace_array *tr)
1027 * We don't free the ring buffer. instead, resize it because
1028 * The max_tr ring buffer has some state (e.g. ring->clock) and
1029 * we want preserve it.
1031 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
1032 set_buffer_entries(&tr->max_buffer, 1);
1033 tracing_reset_online_cpus(&tr->max_buffer);
1034 tr->allocated_snapshot = false;
1038 * tracing_alloc_snapshot - allocate snapshot buffer.
1040 * This only allocates the snapshot buffer if it isn't already
1041 * allocated - it doesn't also take a snapshot.
1043 * This is meant to be used in cases where the snapshot buffer needs
1044 * to be set up for events that can't sleep but need to be able to
1045 * trigger a snapshot.
1047 int tracing_alloc_snapshot(void)
1049 struct trace_array *tr = &global_trace;
1052 ret = tracing_alloc_snapshot_instance(tr);
1057 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1060 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
1062 * This is similar to tracing_snapshot(), but it will allocate the
1063 * snapshot buffer if it isn't already allocated. Use this only
1064 * where it is safe to sleep, as the allocation may sleep.
1066 * This causes a swap between the snapshot buffer and the current live
1067 * tracing buffer. You can use this to take snapshots of the live
1068 * trace when some condition is triggered, but continue to trace.
1070 void tracing_snapshot_alloc(void)
1074 ret = tracing_alloc_snapshot();
1080 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1083 * tracing_snapshot_cond_enable - enable conditional snapshot for an instance
1084 * @tr: The tracing instance
1085 * @cond_data: User data to associate with the snapshot
1086 * @update: Implementation of the cond_snapshot update function
1088 * Check whether the conditional snapshot for the given instance has
1089 * already been enabled, or if the current tracer is already using a
1090 * snapshot; if so, return -EBUSY, else create a cond_snapshot and
1091 * save the cond_data and update function inside.
1093 * Returns 0 if successful, error otherwise.
1095 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
1096 cond_update_fn_t update)
1098 struct cond_snapshot *cond_snapshot;
1101 cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
1105 cond_snapshot->cond_data = cond_data;
1106 cond_snapshot->update = update;
1108 mutex_lock(&trace_types_lock);
1110 ret = tracing_alloc_snapshot_instance(tr);
1114 if (tr->current_trace->use_max_tr) {
1120 * The cond_snapshot can only change to NULL without the
1121 * trace_types_lock. We don't care if we race with it going
1122 * to NULL, but we want to make sure that it's not set to
1123 * something other than NULL when we get here, which we can
1124 * do safely with only holding the trace_types_lock and not
1125 * having to take the max_lock.
1127 if (tr->cond_snapshot) {
1132 arch_spin_lock(&tr->max_lock);
1133 tr->cond_snapshot = cond_snapshot;
1134 arch_spin_unlock(&tr->max_lock);
1136 mutex_unlock(&trace_types_lock);
1141 mutex_unlock(&trace_types_lock);
1142 kfree(cond_snapshot);
1145 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1148 * tracing_snapshot_cond_disable - disable conditional snapshot for an instance
1149 * @tr: The tracing instance
1151 * Check whether the conditional snapshot for the given instance is
1152 * enabled; if so, free the cond_snapshot associated with it,
1153 * otherwise return -EINVAL.
1155 * Returns 0 if successful, error otherwise.
1157 int tracing_snapshot_cond_disable(struct trace_array *tr)
1161 arch_spin_lock(&tr->max_lock);
1163 if (!tr->cond_snapshot)
1166 kfree(tr->cond_snapshot);
1167 tr->cond_snapshot = NULL;
1170 arch_spin_unlock(&tr->max_lock);
1174 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1176 void tracing_snapshot(void)
1178 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1180 EXPORT_SYMBOL_GPL(tracing_snapshot);
1181 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1183 WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used");
1185 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1186 int tracing_alloc_snapshot(void)
1188 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1191 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1192 void tracing_snapshot_alloc(void)
1197 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1198 void *tracing_cond_snapshot_data(struct trace_array *tr)
1202 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1203 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update)
1207 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1208 int tracing_snapshot_cond_disable(struct trace_array *tr)
1212 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1213 #endif /* CONFIG_TRACER_SNAPSHOT */
1215 void tracer_tracing_off(struct trace_array *tr)
1217 if (tr->trace_buffer.buffer)
1218 ring_buffer_record_off(tr->trace_buffer.buffer);
1220 * This flag is looked at when buffers haven't been allocated
1221 * yet, or by some tracers (like irqsoff), that just want to
1222 * know if the ring buffer has been disabled, but it can handle
1223 * races of where it gets disabled but we still do a record.
1224 * As the check is in the fast path of the tracers, it is more
1225 * important to be fast than accurate.
1227 tr->buffer_disabled = 1;
1228 /* Make the flag seen by readers */
1233 * tracing_off - turn off tracing buffers
1235 * This function stops the tracing buffers from recording data.
1236 * It does not disable any overhead the tracers themselves may
1237 * be causing. This function simply causes all recording to
1238 * the ring buffers to fail.
1240 void tracing_off(void)
1242 tracer_tracing_off(&global_trace);
1244 EXPORT_SYMBOL_GPL(tracing_off);
1246 void disable_trace_on_warning(void)
1248 if (__disable_trace_on_warning)
1253 * tracer_tracing_is_on - show real state of ring buffer enabled
1254 * @tr : the trace array to know if ring buffer is enabled
1256 * Shows real state of the ring buffer if it is enabled or not.
1258 bool tracer_tracing_is_on(struct trace_array *tr)
1260 if (tr->trace_buffer.buffer)
1261 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
1262 return !tr->buffer_disabled;
1266 * tracing_is_on - show state of ring buffers enabled
1268 int tracing_is_on(void)
1270 return tracer_tracing_is_on(&global_trace);
1272 EXPORT_SYMBOL_GPL(tracing_is_on);
1274 static int __init set_buf_size(char *str)
1276 unsigned long buf_size;
1280 buf_size = memparse(str, &str);
1281 /* nr_entries can not be zero */
1284 trace_buf_size = buf_size;
1287 __setup("trace_buf_size=", set_buf_size);
1289 static int __init set_tracing_thresh(char *str)
1291 unsigned long threshold;
1296 ret = kstrtoul(str, 0, &threshold);
1299 tracing_thresh = threshold * 1000;
1302 __setup("tracing_thresh=", set_tracing_thresh);
1304 unsigned long nsecs_to_usecs(unsigned long nsecs)
1306 return nsecs / 1000;
1310 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
1311 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
1312 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
1313 * of strings in the order that the evals (enum) were defined.
1318 /* These must match the bit postions in trace_iterator_flags */
1319 static const char *trace_options[] = {
1327 int in_ns; /* is this clock in nanoseconds? */
1328 } trace_clocks[] = {
1329 { trace_clock_local, "local", 1 },
1330 { trace_clock_global, "global", 1 },
1331 { trace_clock_counter, "counter", 0 },
1332 { trace_clock_jiffies, "uptime", 0 },
1333 { trace_clock, "perf", 1 },
1334 { ktime_get_mono_fast_ns, "mono", 1 },
1335 { ktime_get_raw_fast_ns, "mono_raw", 1 },
1336 { ktime_get_boot_fast_ns, "boot", 1 },
1340 bool trace_clock_in_ns(struct trace_array *tr)
1342 if (trace_clocks[tr->clock_id].in_ns)
1349 * trace_parser_get_init - gets the buffer for trace parser
1351 int trace_parser_get_init(struct trace_parser *parser, int size)
1353 memset(parser, 0, sizeof(*parser));
1355 parser->buffer = kmalloc(size, GFP_KERNEL);
1356 if (!parser->buffer)
1359 parser->size = size;
1364 * trace_parser_put - frees the buffer for trace parser
1366 void trace_parser_put(struct trace_parser *parser)
1368 kfree(parser->buffer);
1369 parser->buffer = NULL;
1373 * trace_get_user - reads the user input string separated by space
1374 * (matched by isspace(ch))
1376 * For each string found the 'struct trace_parser' is updated,
1377 * and the function returns.
1379 * Returns number of bytes read.
1381 * See kernel/trace/trace.h for 'struct trace_parser' details.
1383 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1384 size_t cnt, loff_t *ppos)
1391 trace_parser_clear(parser);
1393 ret = get_user(ch, ubuf++);
1401 * The parser is not finished with the last write,
1402 * continue reading the user input without skipping spaces.
1404 if (!parser->cont) {
1405 /* skip white space */
1406 while (cnt && isspace(ch)) {
1407 ret = get_user(ch, ubuf++);
1416 /* only spaces were written */
1417 if (isspace(ch) || !ch) {
1424 /* read the non-space input */
1425 while (cnt && !isspace(ch) && ch) {
1426 if (parser->idx < parser->size - 1)
1427 parser->buffer[parser->idx++] = ch;
1432 ret = get_user(ch, ubuf++);
1439 /* We either got finished input or we have to wait for another call. */
1440 if (isspace(ch) || !ch) {
1441 parser->buffer[parser->idx] = 0;
1442 parser->cont = false;
1443 } else if (parser->idx < parser->size - 1) {
1444 parser->cont = true;
1445 parser->buffer[parser->idx++] = ch;
1446 /* Make sure the parsed string always terminates with '\0'. */
1447 parser->buffer[parser->idx] = 0;
1460 /* TODO add a seq_buf_to_buffer() */
1461 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
1465 if (trace_seq_used(s) <= s->seq.readpos)
1468 len = trace_seq_used(s) - s->seq.readpos;
1471 memcpy(buf, s->buffer + s->seq.readpos, cnt);
1473 s->seq.readpos += cnt;
1477 unsigned long __read_mostly tracing_thresh;
1479 #ifdef CONFIG_TRACER_MAX_TRACE
1481 * Copy the new maximum trace into the separate maximum-trace
1482 * structure. (this way the maximum trace is permanently saved,
1483 * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
1486 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1488 struct trace_buffer *trace_buf = &tr->trace_buffer;
1489 struct trace_buffer *max_buf = &tr->max_buffer;
1490 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1491 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
1494 max_buf->time_start = data->preempt_timestamp;
1496 max_data->saved_latency = tr->max_latency;
1497 max_data->critical_start = data->critical_start;
1498 max_data->critical_end = data->critical_end;
1500 strncpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
1501 max_data->pid = tsk->pid;
1503 * If tsk == current, then use current_uid(), as that does not use
1504 * RCU. The irq tracer can be called out of RCU scope.
1507 max_data->uid = current_uid();
1509 max_data->uid = task_uid(tsk);
1511 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1512 max_data->policy = tsk->policy;
1513 max_data->rt_priority = tsk->rt_priority;
1515 /* record this tasks comm */
1516 tracing_record_cmdline(tsk);
1520 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1522 * @tsk: the task with the latency
1523 * @cpu: The cpu that initiated the trace.
1524 * @cond_data: User data associated with a conditional snapshot
1526 * Flip the buffers between the @tr and the max_tr and record information
1527 * about which task was the cause of this latency.
1530 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
1536 WARN_ON_ONCE(!irqs_disabled());
1538 if (!tr->allocated_snapshot) {
1539 /* Only the nop tracer should hit this when disabling */
1540 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1544 arch_spin_lock(&tr->max_lock);
1546 /* Inherit the recordable setting from trace_buffer */
1547 if (ring_buffer_record_is_set_on(tr->trace_buffer.buffer))
1548 ring_buffer_record_on(tr->max_buffer.buffer);
1550 ring_buffer_record_off(tr->max_buffer.buffer);
1552 #ifdef CONFIG_TRACER_SNAPSHOT
1553 if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data))
1556 swap(tr->trace_buffer.buffer, tr->max_buffer.buffer);
1558 __update_max_tr(tr, tsk, cpu);
1561 arch_spin_unlock(&tr->max_lock);
1565 * update_max_tr_single - only copy one trace over, and reset the rest
1567 * @tsk - task with the latency
1568 * @cpu - the cpu of the buffer to copy.
1570 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1573 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1580 WARN_ON_ONCE(!irqs_disabled());
1581 if (!tr->allocated_snapshot) {
1582 /* Only the nop tracer should hit this when disabling */
1583 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1587 arch_spin_lock(&tr->max_lock);
1589 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
1591 if (ret == -EBUSY) {
1593 * We failed to swap the buffer due to a commit taking
1594 * place on this CPU. We fail to record, but we reset
1595 * the max trace buffer (no one writes directly to it)
1596 * and flag that it failed.
1598 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1599 "Failed to swap buffers due to commit in progress\n");
1602 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1604 __update_max_tr(tr, tsk, cpu);
1605 arch_spin_unlock(&tr->max_lock);
1607 #endif /* CONFIG_TRACER_MAX_TRACE */
1609 static int wait_on_pipe(struct trace_iterator *iter, int full)
1611 /* Iterators are static, they should be filled or empty */
1612 if (trace_buffer_iter(iter, iter->cpu_file))
1615 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1619 #ifdef CONFIG_FTRACE_STARTUP_TEST
1620 static bool selftests_can_run;
1622 struct trace_selftests {
1623 struct list_head list;
1624 struct tracer *type;
1627 static LIST_HEAD(postponed_selftests);
1629 static int save_selftest(struct tracer *type)
1631 struct trace_selftests *selftest;
1633 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1637 selftest->type = type;
1638 list_add(&selftest->list, &postponed_selftests);
1642 static int run_tracer_selftest(struct tracer *type)
1644 struct trace_array *tr = &global_trace;
1645 struct tracer *saved_tracer = tr->current_trace;
1648 if (!type->selftest || tracing_selftest_disabled)
1652 * If a tracer registers early in boot up (before scheduling is
1653 * initialized and such), then do not run its selftests yet.
1654 * Instead, run it a little later in the boot process.
1656 if (!selftests_can_run)
1657 return save_selftest(type);
1660 * Run a selftest on this tracer.
1661 * Here we reset the trace buffer, and set the current
1662 * tracer to be this tracer. The tracer can then run some
1663 * internal tracing to verify that everything is in order.
1664 * If we fail, we do not register this tracer.
1666 tracing_reset_online_cpus(&tr->trace_buffer);
1668 tr->current_trace = type;
1670 #ifdef CONFIG_TRACER_MAX_TRACE
1671 if (type->use_max_tr) {
1672 /* If we expanded the buffers, make sure the max is expanded too */
1673 if (ring_buffer_expanded)
1674 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1675 RING_BUFFER_ALL_CPUS);
1676 tr->allocated_snapshot = true;
1680 /* the test is responsible for initializing and enabling */
1681 pr_info("Testing tracer %s: ", type->name);
1682 ret = type->selftest(type, tr);
1683 /* the test is responsible for resetting too */
1684 tr->current_trace = saved_tracer;
1686 printk(KERN_CONT "FAILED!\n");
1687 /* Add the warning after printing 'FAILED' */
1691 /* Only reset on passing, to avoid touching corrupted buffers */
1692 tracing_reset_online_cpus(&tr->trace_buffer);
1694 #ifdef CONFIG_TRACER_MAX_TRACE
1695 if (type->use_max_tr) {
1696 tr->allocated_snapshot = false;
1698 /* Shrink the max buffer again */
1699 if (ring_buffer_expanded)
1700 ring_buffer_resize(tr->max_buffer.buffer, 1,
1701 RING_BUFFER_ALL_CPUS);
1705 printk(KERN_CONT "PASSED\n");
1709 static __init int init_trace_selftests(void)
1711 struct trace_selftests *p, *n;
1712 struct tracer *t, **last;
1715 selftests_can_run = true;
1717 mutex_lock(&trace_types_lock);
1719 if (list_empty(&postponed_selftests))
1722 pr_info("Running postponed tracer tests:\n");
1724 list_for_each_entry_safe(p, n, &postponed_selftests, list) {
1725 ret = run_tracer_selftest(p->type);
1726 /* If the test fails, then warn and remove from available_tracers */
1728 WARN(1, "tracer: %s failed selftest, disabling\n",
1730 last = &trace_types;
1731 for (t = trace_types; t; t = t->next) {
1744 mutex_unlock(&trace_types_lock);
1748 core_initcall(init_trace_selftests);
1750 static inline int run_tracer_selftest(struct tracer *type)
1754 #endif /* CONFIG_FTRACE_STARTUP_TEST */
1756 static void add_tracer_options(struct trace_array *tr, struct tracer *t);
1758 static void __init apply_trace_boot_options(void);
1761 * register_tracer - register a tracer with the ftrace system.
1762 * @type - the plugin for the tracer
1764 * Register a new plugin tracer.
1766 int __init register_tracer(struct tracer *type)
1772 pr_info("Tracer must have a name\n");
1776 if (strlen(type->name) >= MAX_TRACER_SIZE) {
1777 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1781 mutex_lock(&trace_types_lock);
1783 tracing_selftest_running = true;
1785 for (t = trace_types; t; t = t->next) {
1786 if (strcmp(type->name, t->name) == 0) {
1788 pr_info("Tracer %s already registered\n",
1795 if (!type->set_flag)
1796 type->set_flag = &dummy_set_flag;
1798 /*allocate a dummy tracer_flags*/
1799 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
1804 type->flags->val = 0;
1805 type->flags->opts = dummy_tracer_opt;
1807 if (!type->flags->opts)
1808 type->flags->opts = dummy_tracer_opt;
1810 /* store the tracer for __set_tracer_option */
1811 type->flags->trace = type;
1813 ret = run_tracer_selftest(type);
1817 type->next = trace_types;
1819 add_tracer_options(&global_trace, type);
1822 tracing_selftest_running = false;
1823 mutex_unlock(&trace_types_lock);
1825 if (ret || !default_bootup_tracer)
1828 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
1831 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1832 /* Do we want this tracer to start on bootup? */
1833 tracing_set_tracer(&global_trace, type->name);
1834 default_bootup_tracer = NULL;
1836 apply_trace_boot_options();
1838 /* disable other selftests, since this will break it. */
1839 tracing_selftest_disabled = true;
1840 #ifdef CONFIG_FTRACE_STARTUP_TEST
1841 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1849 void tracing_reset(struct trace_buffer *buf, int cpu)
1851 struct ring_buffer *buffer = buf->buffer;
1856 ring_buffer_record_disable(buffer);
1858 /* Make sure all commits have finished */
1860 ring_buffer_reset_cpu(buffer, cpu);
1862 ring_buffer_record_enable(buffer);
1865 void tracing_reset_online_cpus(struct trace_buffer *buf)
1867 struct ring_buffer *buffer = buf->buffer;
1873 ring_buffer_record_disable(buffer);
1875 /* Make sure all commits have finished */
1878 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
1880 for_each_online_cpu(cpu)
1881 ring_buffer_reset_cpu(buffer, cpu);
1883 ring_buffer_record_enable(buffer);
1886 /* Must have trace_types_lock held */
1887 void tracing_reset_all_online_cpus(void)
1889 struct trace_array *tr;
1891 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1892 if (!tr->clear_trace)
1894 tr->clear_trace = false;
1895 tracing_reset_online_cpus(&tr->trace_buffer);
1896 #ifdef CONFIG_TRACER_MAX_TRACE
1897 tracing_reset_online_cpus(&tr->max_buffer);
1902 static int *tgid_map;
1904 #define SAVED_CMDLINES_DEFAULT 128
1905 #define NO_CMDLINE_MAP UINT_MAX
1906 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
1907 struct saved_cmdlines_buffer {
1908 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1909 unsigned *map_cmdline_to_pid;
1910 unsigned cmdline_num;
1912 char *saved_cmdlines;
1914 static struct saved_cmdlines_buffer *savedcmd;
1916 /* temporary disable recording */
1917 static atomic_t trace_record_taskinfo_disabled __read_mostly;
1919 static inline char *get_saved_cmdlines(int idx)
1921 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1924 static inline void set_cmdline(int idx, const char *cmdline)
1926 strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1929 static int allocate_cmdlines_buffer(unsigned int val,
1930 struct saved_cmdlines_buffer *s)
1932 s->map_cmdline_to_pid = kmalloc_array(val,
1933 sizeof(*s->map_cmdline_to_pid),
1935 if (!s->map_cmdline_to_pid)
1938 s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL);
1939 if (!s->saved_cmdlines) {
1940 kfree(s->map_cmdline_to_pid);
1945 s->cmdline_num = val;
1946 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1947 sizeof(s->map_pid_to_cmdline));
1948 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1949 val * sizeof(*s->map_cmdline_to_pid));
1954 static int trace_create_savedcmd(void)
1958 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
1962 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1972 int is_tracing_stopped(void)
1974 return global_trace.stop_count;
1978 * tracing_start - quick start of the tracer
1980 * If tracing is enabled but was stopped by tracing_stop,
1981 * this will start the tracer back up.
1983 void tracing_start(void)
1985 struct ring_buffer *buffer;
1986 unsigned long flags;
1988 if (tracing_disabled)
1991 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1992 if (--global_trace.stop_count) {
1993 if (global_trace.stop_count < 0) {
1994 /* Someone screwed up their debugging */
1996 global_trace.stop_count = 0;
2001 /* Prevent the buffers from switching */
2002 arch_spin_lock(&global_trace.max_lock);
2004 buffer = global_trace.trace_buffer.buffer;
2006 ring_buffer_record_enable(buffer);
2008 #ifdef CONFIG_TRACER_MAX_TRACE
2009 buffer = global_trace.max_buffer.buffer;
2011 ring_buffer_record_enable(buffer);
2014 arch_spin_unlock(&global_trace.max_lock);
2017 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2020 static void tracing_start_tr(struct trace_array *tr)
2022 struct ring_buffer *buffer;
2023 unsigned long flags;
2025 if (tracing_disabled)
2028 /* If global, we need to also start the max tracer */
2029 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2030 return tracing_start();
2032 raw_spin_lock_irqsave(&tr->start_lock, flags);
2034 if (--tr->stop_count) {
2035 if (tr->stop_count < 0) {
2036 /* Someone screwed up their debugging */
2043 buffer = tr->trace_buffer.buffer;
2045 ring_buffer_record_enable(buffer);
2048 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2052 * tracing_stop - quick stop of the tracer
2054 * Light weight way to stop tracing. Use in conjunction with
2057 void tracing_stop(void)
2059 struct ring_buffer *buffer;
2060 unsigned long flags;
2062 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2063 if (global_trace.stop_count++)
2066 /* Prevent the buffers from switching */
2067 arch_spin_lock(&global_trace.max_lock);
2069 buffer = global_trace.trace_buffer.buffer;
2071 ring_buffer_record_disable(buffer);
2073 #ifdef CONFIG_TRACER_MAX_TRACE
2074 buffer = global_trace.max_buffer.buffer;
2076 ring_buffer_record_disable(buffer);
2079 arch_spin_unlock(&global_trace.max_lock);
2082 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2085 static void tracing_stop_tr(struct trace_array *tr)
2087 struct ring_buffer *buffer;
2088 unsigned long flags;
2090 /* If global, we need to also stop the max tracer */
2091 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2092 return tracing_stop();
2094 raw_spin_lock_irqsave(&tr->start_lock, flags);
2095 if (tr->stop_count++)
2098 buffer = tr->trace_buffer.buffer;
2100 ring_buffer_record_disable(buffer);
2103 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2106 static int trace_save_cmdline(struct task_struct *tsk)
2110 /* treat recording of idle task as a success */
2114 if (unlikely(tsk->pid > PID_MAX_DEFAULT))
2118 * It's not the end of the world if we don't get
2119 * the lock, but we also don't want to spin
2120 * nor do we want to disable interrupts,
2121 * so if we miss here, then better luck next time.
2123 if (!arch_spin_trylock(&trace_cmdline_lock))
2126 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
2127 if (idx == NO_CMDLINE_MAP) {
2128 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
2131 * Check whether the cmdline buffer at idx has a pid
2132 * mapped. We are going to overwrite that entry so we
2133 * need to clear the map_pid_to_cmdline. Otherwise we
2134 * would read the new comm for the old pid.
2136 pid = savedcmd->map_cmdline_to_pid[idx];
2137 if (pid != NO_CMDLINE_MAP)
2138 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
2140 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
2141 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
2143 savedcmd->cmdline_idx = idx;
2146 set_cmdline(idx, tsk->comm);
2148 arch_spin_unlock(&trace_cmdline_lock);
2153 static void __trace_find_cmdline(int pid, char comm[])
2158 strcpy(comm, "<idle>");
2162 if (WARN_ON_ONCE(pid < 0)) {
2163 strcpy(comm, "<XXX>");
2167 if (pid > PID_MAX_DEFAULT) {
2168 strcpy(comm, "<...>");
2172 map = savedcmd->map_pid_to_cmdline[pid];
2173 if (map != NO_CMDLINE_MAP)
2174 strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
2176 strcpy(comm, "<...>");
2179 void trace_find_cmdline(int pid, char comm[])
2182 arch_spin_lock(&trace_cmdline_lock);
2184 __trace_find_cmdline(pid, comm);
2186 arch_spin_unlock(&trace_cmdline_lock);
2190 int trace_find_tgid(int pid)
2192 if (unlikely(!tgid_map || !pid || pid > PID_MAX_DEFAULT))
2195 return tgid_map[pid];
2198 static int trace_save_tgid(struct task_struct *tsk)
2200 /* treat recording of idle task as a success */
2204 if (unlikely(!tgid_map || tsk->pid > PID_MAX_DEFAULT))
2207 tgid_map[tsk->pid] = tsk->tgid;
2211 static bool tracing_record_taskinfo_skip(int flags)
2213 if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
2215 if (atomic_read(&trace_record_taskinfo_disabled) || !tracing_is_on())
2217 if (!__this_cpu_read(trace_taskinfo_save))
2223 * tracing_record_taskinfo - record the task info of a task
2225 * @task - task to record
2226 * @flags - TRACE_RECORD_CMDLINE for recording comm
2227 * - TRACE_RECORD_TGID for recording tgid
2229 void tracing_record_taskinfo(struct task_struct *task, int flags)
2233 if (tracing_record_taskinfo_skip(flags))
2237 * Record as much task information as possible. If some fail, continue
2238 * to try to record the others.
2240 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
2241 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
2243 /* If recording any information failed, retry again soon. */
2247 __this_cpu_write(trace_taskinfo_save, false);
2251 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2253 * @prev - previous task during sched_switch
2254 * @next - next task during sched_switch
2255 * @flags - TRACE_RECORD_CMDLINE for recording comm
2256 * TRACE_RECORD_TGID for recording tgid
2258 void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2259 struct task_struct *next, int flags)
2263 if (tracing_record_taskinfo_skip(flags))
2267 * Record as much task information as possible. If some fail, continue
2268 * to try to record the others.
2270 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
2271 done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
2272 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
2273 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
2275 /* If recording any information failed, retry again soon. */
2279 __this_cpu_write(trace_taskinfo_save, false);
2282 /* Helpers to record a specific task information */
2283 void tracing_record_cmdline(struct task_struct *task)
2285 tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
2288 void tracing_record_tgid(struct task_struct *task)
2290 tracing_record_taskinfo(task, TRACE_RECORD_TGID);
2294 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2295 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2296 * simplifies those functions and keeps them in sync.
2298 enum print_line_t trace_handle_return(struct trace_seq *s)
2300 return trace_seq_has_overflowed(s) ?
2301 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2303 EXPORT_SYMBOL_GPL(trace_handle_return);
2306 tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
2309 struct task_struct *tsk = current;
2311 entry->preempt_count = pc & 0xff;
2312 entry->pid = (tsk) ? tsk->pid : 0;
2314 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
2315 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
2317 TRACE_FLAG_IRQS_NOSUPPORT |
2319 ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
2320 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
2321 ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
2322 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
2323 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
2325 EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
2327 struct ring_buffer_event *
2328 trace_buffer_lock_reserve(struct ring_buffer *buffer,
2331 unsigned long flags, int pc)
2333 return __trace_buffer_lock_reserve(buffer, type, len, flags, pc);
2336 DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2337 DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2338 static int trace_buffered_event_ref;
2341 * trace_buffered_event_enable - enable buffering events
2343 * When events are being filtered, it is quicker to use a temporary
2344 * buffer to write the event data into if there's a likely chance
2345 * that it will not be committed. The discard of the ring buffer
2346 * is not as fast as committing, and is much slower than copying
2349 * When an event is to be filtered, allocate per cpu buffers to
2350 * write the event data into, and if the event is filtered and discarded
2351 * it is simply dropped, otherwise, the entire data is to be committed
2354 void trace_buffered_event_enable(void)
2356 struct ring_buffer_event *event;
2360 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2362 if (trace_buffered_event_ref++)
2365 for_each_tracing_cpu(cpu) {
2366 page = alloc_pages_node(cpu_to_node(cpu),
2367 GFP_KERNEL | __GFP_NORETRY, 0);
2371 event = page_address(page);
2372 memset(event, 0, sizeof(*event));
2374 per_cpu(trace_buffered_event, cpu) = event;
2377 if (cpu == smp_processor_id() &&
2378 this_cpu_read(trace_buffered_event) !=
2379 per_cpu(trace_buffered_event, cpu))
2386 trace_buffered_event_disable();
2389 static void enable_trace_buffered_event(void *data)
2391 /* Probably not needed, but do it anyway */
2393 this_cpu_dec(trace_buffered_event_cnt);
2396 static void disable_trace_buffered_event(void *data)
2398 this_cpu_inc(trace_buffered_event_cnt);
2402 * trace_buffered_event_disable - disable buffering events
2404 * When a filter is removed, it is faster to not use the buffered
2405 * events, and to commit directly into the ring buffer. Free up
2406 * the temp buffers when there are no more users. This requires
2407 * special synchronization with current events.
2409 void trace_buffered_event_disable(void)
2413 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2415 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2418 if (--trace_buffered_event_ref)
2422 /* For each CPU, set the buffer as used. */
2423 smp_call_function_many(tracing_buffer_mask,
2424 disable_trace_buffered_event, NULL, 1);
2427 /* Wait for all current users to finish */
2430 for_each_tracing_cpu(cpu) {
2431 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2432 per_cpu(trace_buffered_event, cpu) = NULL;
2435 * Make sure trace_buffered_event is NULL before clearing
2436 * trace_buffered_event_cnt.
2441 /* Do the work on each cpu */
2442 smp_call_function_many(tracing_buffer_mask,
2443 enable_trace_buffered_event, NULL, 1);
2447 static struct ring_buffer *temp_buffer;
2449 struct ring_buffer_event *
2450 trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
2451 struct trace_event_file *trace_file,
2452 int type, unsigned long len,
2453 unsigned long flags, int pc)
2455 struct ring_buffer_event *entry;
2458 *current_rb = trace_file->tr->trace_buffer.buffer;
2460 if (!ring_buffer_time_stamp_abs(*current_rb) && (trace_file->flags &
2461 (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
2462 (entry = this_cpu_read(trace_buffered_event))) {
2463 /* Try to use the per cpu buffer first */
2464 val = this_cpu_inc_return(trace_buffered_event_cnt);
2466 trace_event_setup(entry, type, flags, pc);
2467 entry->array[0] = len;
2470 this_cpu_dec(trace_buffered_event_cnt);
2473 entry = __trace_buffer_lock_reserve(*current_rb,
2474 type, len, flags, pc);
2476 * If tracing is off, but we have triggers enabled
2477 * we still need to look at the event data. Use the temp_buffer
2478 * to store the trace event for the tigger to use. It's recusive
2479 * safe and will not be recorded anywhere.
2481 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2482 *current_rb = temp_buffer;
2483 entry = __trace_buffer_lock_reserve(*current_rb,
2484 type, len, flags, pc);
2488 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2490 static DEFINE_SPINLOCK(tracepoint_iter_lock);
2491 static DEFINE_MUTEX(tracepoint_printk_mutex);
2493 static void output_printk(struct trace_event_buffer *fbuffer)
2495 struct trace_event_call *event_call;
2496 struct trace_event *event;
2497 unsigned long flags;
2498 struct trace_iterator *iter = tracepoint_print_iter;
2500 /* We should never get here if iter is NULL */
2501 if (WARN_ON_ONCE(!iter))
2504 event_call = fbuffer->trace_file->event_call;
2505 if (!event_call || !event_call->event.funcs ||
2506 !event_call->event.funcs->trace)
2509 event = &fbuffer->trace_file->event_call->event;
2511 spin_lock_irqsave(&tracepoint_iter_lock, flags);
2512 trace_seq_init(&iter->seq);
2513 iter->ent = fbuffer->entry;
2514 event_call->event.funcs->trace(iter, 0, event);
2515 trace_seq_putc(&iter->seq, 0);
2516 printk("%s", iter->seq.buffer);
2518 spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2521 int tracepoint_printk_sysctl(struct ctl_table *table, int write,
2522 void __user *buffer, size_t *lenp,
2525 int save_tracepoint_printk;
2528 mutex_lock(&tracepoint_printk_mutex);
2529 save_tracepoint_printk = tracepoint_printk;
2531 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2534 * This will force exiting early, as tracepoint_printk
2535 * is always zero when tracepoint_printk_iter is not allocated
2537 if (!tracepoint_print_iter)
2538 tracepoint_printk = 0;
2540 if (save_tracepoint_printk == tracepoint_printk)
2543 if (tracepoint_printk)
2544 static_key_enable(&tracepoint_printk_key.key);
2546 static_key_disable(&tracepoint_printk_key.key);
2549 mutex_unlock(&tracepoint_printk_mutex);
2554 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2556 if (static_key_false(&tracepoint_printk_key.key))
2557 output_printk(fbuffer);
2559 event_trigger_unlock_commit(fbuffer->trace_file, fbuffer->buffer,
2560 fbuffer->event, fbuffer->entry,
2561 fbuffer->flags, fbuffer->pc);
2563 EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2568 * trace_buffer_unlock_commit_regs()
2569 * trace_event_buffer_commit()
2570 * trace_event_raw_event_xxx()
2572 # define STACK_SKIP 3
2574 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2575 struct ring_buffer *buffer,
2576 struct ring_buffer_event *event,
2577 unsigned long flags, int pc,
2578 struct pt_regs *regs)
2580 __buffer_unlock_commit(buffer, event);
2583 * If regs is not set, then skip the necessary functions.
2584 * Note, we can still get here via blktrace, wakeup tracer
2585 * and mmiotrace, but that's ok if they lose a function or
2586 * two. They are not that meaningful.
2588 ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs);
2589 ftrace_trace_userstack(buffer, flags, pc);
2593 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2596 trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer,
2597 struct ring_buffer_event *event)
2599 __buffer_unlock_commit(buffer, event);
2603 trace_process_export(struct trace_export *export,
2604 struct ring_buffer_event *event)
2606 struct trace_entry *entry;
2607 unsigned int size = 0;
2609 entry = ring_buffer_event_data(event);
2610 size = ring_buffer_event_length(event);
2611 export->write(export, entry, size);
2614 static DEFINE_MUTEX(ftrace_export_lock);
2616 static struct trace_export __rcu *ftrace_exports_list __read_mostly;
2618 static DEFINE_STATIC_KEY_FALSE(ftrace_exports_enabled);
2620 static inline void ftrace_exports_enable(void)
2622 static_branch_enable(&ftrace_exports_enabled);
2625 static inline void ftrace_exports_disable(void)
2627 static_branch_disable(&ftrace_exports_enabled);
2630 static void ftrace_exports(struct ring_buffer_event *event)
2632 struct trace_export *export;
2634 preempt_disable_notrace();
2636 export = rcu_dereference_raw_notrace(ftrace_exports_list);
2638 trace_process_export(export, event);
2639 export = rcu_dereference_raw_notrace(export->next);
2642 preempt_enable_notrace();
2646 add_trace_export(struct trace_export **list, struct trace_export *export)
2648 rcu_assign_pointer(export->next, *list);
2650 * We are entering export into the list but another
2651 * CPU might be walking that list. We need to make sure
2652 * the export->next pointer is valid before another CPU sees
2653 * the export pointer included into the list.
2655 rcu_assign_pointer(*list, export);
2659 rm_trace_export(struct trace_export **list, struct trace_export *export)
2661 struct trace_export **p;
2663 for (p = list; *p != NULL; p = &(*p)->next)
2670 rcu_assign_pointer(*p, (*p)->next);
2676 add_ftrace_export(struct trace_export **list, struct trace_export *export)
2679 ftrace_exports_enable();
2681 add_trace_export(list, export);
2685 rm_ftrace_export(struct trace_export **list, struct trace_export *export)
2689 ret = rm_trace_export(list, export);
2691 ftrace_exports_disable();
2696 int register_ftrace_export(struct trace_export *export)
2698 if (WARN_ON_ONCE(!export->write))
2701 mutex_lock(&ftrace_export_lock);
2703 add_ftrace_export(&ftrace_exports_list, export);
2705 mutex_unlock(&ftrace_export_lock);
2709 EXPORT_SYMBOL_GPL(register_ftrace_export);
2711 int unregister_ftrace_export(struct trace_export *export)
2715 mutex_lock(&ftrace_export_lock);
2717 ret = rm_ftrace_export(&ftrace_exports_list, export);
2719 mutex_unlock(&ftrace_export_lock);
2723 EXPORT_SYMBOL_GPL(unregister_ftrace_export);
2726 trace_function(struct trace_array *tr,
2727 unsigned long ip, unsigned long parent_ip, unsigned long flags,
2730 struct trace_event_call *call = &event_function;
2731 struct ring_buffer *buffer = tr->trace_buffer.buffer;
2732 struct ring_buffer_event *event;
2733 struct ftrace_entry *entry;
2735 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
2739 entry = ring_buffer_event_data(event);
2741 entry->parent_ip = parent_ip;
2743 if (!call_filter_check_discard(call, entry, buffer, event)) {
2744 if (static_branch_unlikely(&ftrace_exports_enabled))
2745 ftrace_exports(event);
2746 __buffer_unlock_commit(buffer, event);
2750 #ifdef CONFIG_STACKTRACE
2752 #define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
2753 struct ftrace_stack {
2754 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
2757 static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
2758 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2760 static void __ftrace_trace_stack(struct ring_buffer *buffer,
2761 unsigned long flags,
2762 int skip, int pc, struct pt_regs *regs)
2764 struct trace_event_call *call = &event_kernel_stack;
2765 struct ring_buffer_event *event;
2766 struct stack_entry *entry;
2767 struct stack_trace trace;
2769 int size = FTRACE_STACK_ENTRIES;
2771 trace.nr_entries = 0;
2775 * Add one, for this function and the call to save_stack_trace()
2776 * If regs is set, then these functions will not be in the way.
2778 #ifndef CONFIG_UNWINDER_ORC
2784 * Since events can happen in NMIs there's no safe way to
2785 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
2786 * or NMI comes in, it will just have to use the default
2787 * FTRACE_STACK_SIZE.
2789 preempt_disable_notrace();
2791 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
2793 * We don't need any atomic variables, just a barrier.
2794 * If an interrupt comes in, we don't care, because it would
2795 * have exited and put the counter back to what we want.
2796 * We just need a barrier to keep gcc from moving things
2800 if (use_stack == 1) {
2801 trace.entries = this_cpu_ptr(ftrace_stack.calls);
2802 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
2805 save_stack_trace_regs(regs, &trace);
2807 save_stack_trace(&trace);
2809 if (trace.nr_entries > size)
2810 size = trace.nr_entries;
2812 /* From now on, use_stack is a boolean */
2815 size *= sizeof(unsigned long);
2817 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
2818 sizeof(*entry) + size, flags, pc);
2821 entry = ring_buffer_event_data(event);
2823 memset(&entry->caller, 0, size);
2826 memcpy(&entry->caller, trace.entries,
2827 trace.nr_entries * sizeof(unsigned long));
2829 trace.max_entries = FTRACE_STACK_ENTRIES;
2830 trace.entries = entry->caller;
2832 save_stack_trace_regs(regs, &trace);
2834 save_stack_trace(&trace);
2837 entry->size = trace.nr_entries;
2839 if (!call_filter_check_discard(call, entry, buffer, event))
2840 __buffer_unlock_commit(buffer, event);
2843 /* Again, don't let gcc optimize things here */
2845 __this_cpu_dec(ftrace_stack_reserve);
2846 preempt_enable_notrace();
2850 static inline void ftrace_trace_stack(struct trace_array *tr,
2851 struct ring_buffer *buffer,
2852 unsigned long flags,
2853 int skip, int pc, struct pt_regs *regs)
2855 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
2858 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
2861 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
2864 struct ring_buffer *buffer = tr->trace_buffer.buffer;
2866 if (rcu_is_watching()) {
2867 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
2872 * When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
2873 * but if the above rcu_is_watching() failed, then the NMI
2874 * triggered someplace critical, and rcu_irq_enter() should
2875 * not be called from NMI.
2877 if (unlikely(in_nmi()))
2880 rcu_irq_enter_irqson();
2881 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
2882 rcu_irq_exit_irqson();
2886 * trace_dump_stack - record a stack back trace in the trace buffer
2887 * @skip: Number of functions to skip (helper handlers)
2889 void trace_dump_stack(int skip)
2891 unsigned long flags;
2893 if (tracing_disabled || tracing_selftest_running)
2896 local_save_flags(flags);
2898 #ifndef CONFIG_UNWINDER_ORC
2899 /* Skip 1 to skip this function. */
2902 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
2903 flags, skip, preempt_count(), NULL);
2905 EXPORT_SYMBOL_GPL(trace_dump_stack);
2907 static DEFINE_PER_CPU(int, user_stack_count);
2910 ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
2912 struct trace_event_call *call = &event_user_stack;
2913 struct ring_buffer_event *event;
2914 struct userstack_entry *entry;
2915 struct stack_trace trace;
2917 if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
2921 * NMIs can not handle page faults, even with fix ups.
2922 * The save user stack can (and often does) fault.
2924 if (unlikely(in_nmi()))
2928 * prevent recursion, since the user stack tracing may
2929 * trigger other kernel events.
2932 if (__this_cpu_read(user_stack_count))
2935 __this_cpu_inc(user_stack_count);
2937 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
2938 sizeof(*entry), flags, pc);
2940 goto out_drop_count;
2941 entry = ring_buffer_event_data(event);
2943 entry->tgid = current->tgid;
2944 memset(&entry->caller, 0, sizeof(entry->caller));
2946 trace.nr_entries = 0;
2947 trace.max_entries = FTRACE_STACK_ENTRIES;
2949 trace.entries = entry->caller;
2951 save_stack_trace_user(&trace);
2952 if (!call_filter_check_discard(call, entry, buffer, event))
2953 __buffer_unlock_commit(buffer, event);
2956 __this_cpu_dec(user_stack_count);
2962 static void __trace_userstack(struct trace_array *tr, unsigned long flags)
2964 ftrace_trace_userstack(tr, flags, preempt_count());
2968 #endif /* CONFIG_STACKTRACE */
2970 /* created for use with alloc_percpu */
2971 struct trace_buffer_struct {
2973 char buffer[4][TRACE_BUF_SIZE];
2976 static struct trace_buffer_struct *trace_percpu_buffer;
2979 * Thise allows for lockless recording. If we're nested too deeply, then
2980 * this returns NULL.
2982 static char *get_trace_buf(void)
2984 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
2986 if (!buffer || buffer->nesting >= 4)
2991 /* Interrupts must see nesting incremented before we use the buffer */
2993 return &buffer->buffer[buffer->nesting][0];
2996 static void put_trace_buf(void)
2998 /* Don't let the decrement of nesting leak before this */
3000 this_cpu_dec(trace_percpu_buffer->nesting);
3003 static int alloc_percpu_trace_buffer(void)
3005 struct trace_buffer_struct *buffers;
3007 buffers = alloc_percpu(struct trace_buffer_struct);
3008 if (WARN(!buffers, "Could not allocate percpu trace_printk buffer"))
3011 trace_percpu_buffer = buffers;
3015 static int buffers_allocated;
3017 void trace_printk_init_buffers(void)
3019 if (buffers_allocated)
3022 if (alloc_percpu_trace_buffer())
3025 /* trace_printk() is for debug use only. Don't use it in production. */
3028 pr_warn("**********************************************************\n");
3029 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3031 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
3033 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
3034 pr_warn("** unsafe for production use. **\n");
3036 pr_warn("** If you see this message and you are not debugging **\n");
3037 pr_warn("** the kernel, report this immediately to your vendor! **\n");
3039 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3040 pr_warn("**********************************************************\n");
3042 /* Expand the buffers to set size */
3043 tracing_update_buffers();
3045 buffers_allocated = 1;
3048 * trace_printk_init_buffers() can be called by modules.
3049 * If that happens, then we need to start cmdline recording
3050 * directly here. If the global_trace.buffer is already
3051 * allocated here, then this was called by module code.
3053 if (global_trace.trace_buffer.buffer)
3054 tracing_start_cmdline_record();
3056 EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
3058 void trace_printk_start_comm(void)
3060 /* Start tracing comms if trace printk is set */
3061 if (!buffers_allocated)
3063 tracing_start_cmdline_record();
3066 static void trace_printk_start_stop_comm(int enabled)
3068 if (!buffers_allocated)
3072 tracing_start_cmdline_record();
3074 tracing_stop_cmdline_record();
3078 * trace_vbprintk - write binary msg to tracing buffer
3081 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
3083 struct trace_event_call *call = &event_bprint;
3084 struct ring_buffer_event *event;
3085 struct ring_buffer *buffer;
3086 struct trace_array *tr = &global_trace;
3087 struct bprint_entry *entry;
3088 unsigned long flags;
3090 int len = 0, size, pc;
3092 if (unlikely(tracing_selftest_running || tracing_disabled))
3095 /* Don't pollute graph traces with trace_vprintk internals */
3096 pause_graph_tracing();
3098 pc = preempt_count();
3099 preempt_disable_notrace();
3101 tbuffer = get_trace_buf();
3107 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
3109 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
3112 local_save_flags(flags);
3113 size = sizeof(*entry) + sizeof(u32) * len;
3114 buffer = tr->trace_buffer.buffer;
3115 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
3119 entry = ring_buffer_event_data(event);
3123 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
3124 if (!call_filter_check_discard(call, entry, buffer, event)) {
3125 __buffer_unlock_commit(buffer, event);
3126 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
3133 preempt_enable_notrace();
3134 unpause_graph_tracing();
3138 EXPORT_SYMBOL_GPL(trace_vbprintk);
3142 __trace_array_vprintk(struct ring_buffer *buffer,
3143 unsigned long ip, const char *fmt, va_list args)
3145 struct trace_event_call *call = &event_print;
3146 struct ring_buffer_event *event;
3147 int len = 0, size, pc;
3148 struct print_entry *entry;
3149 unsigned long flags;
3152 if (tracing_disabled || tracing_selftest_running)
3155 /* Don't pollute graph traces with trace_vprintk internals */
3156 pause_graph_tracing();
3158 pc = preempt_count();
3159 preempt_disable_notrace();
3162 tbuffer = get_trace_buf();
3168 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
3170 local_save_flags(flags);
3171 size = sizeof(*entry) + len + 1;
3172 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
3176 entry = ring_buffer_event_data(event);
3179 memcpy(&entry->buf, tbuffer, len + 1);
3180 if (!call_filter_check_discard(call, entry, buffer, event)) {
3181 __buffer_unlock_commit(buffer, event);
3182 ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
3189 preempt_enable_notrace();
3190 unpause_graph_tracing();
3196 int trace_array_vprintk(struct trace_array *tr,
3197 unsigned long ip, const char *fmt, va_list args)
3199 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
3203 int trace_array_printk(struct trace_array *tr,
3204 unsigned long ip, const char *fmt, ...)
3209 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
3213 ret = trace_array_vprintk(tr, ip, fmt, ap);
3217 EXPORT_SYMBOL_GPL(trace_array_printk);
3220 int trace_array_printk_buf(struct ring_buffer *buffer,
3221 unsigned long ip, const char *fmt, ...)
3226 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
3230 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3236 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3238 return trace_array_vprintk(&global_trace, ip, fmt, args);
3240 EXPORT_SYMBOL_GPL(trace_vprintk);
3242 static void trace_iterator_increment(struct trace_iterator *iter)
3244 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3248 ring_buffer_read(buf_iter, NULL);
3251 static struct trace_entry *
3252 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3253 unsigned long *lost_events)
3255 struct ring_buffer_event *event;
3256 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
3259 event = ring_buffer_iter_peek(buf_iter, ts);
3261 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
3265 iter->ent_size = ring_buffer_event_length(event);
3266 return ring_buffer_event_data(event);
3272 static struct trace_entry *
3273 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3274 unsigned long *missing_events, u64 *ent_ts)
3276 struct ring_buffer *buffer = iter->trace_buffer->buffer;
3277 struct trace_entry *ent, *next = NULL;
3278 unsigned long lost_events = 0, next_lost = 0;
3279 int cpu_file = iter->cpu_file;
3280 u64 next_ts = 0, ts;
3286 * If we are in a per_cpu trace file, don't bother by iterating over
3287 * all cpu and peek directly.
3289 if (cpu_file > RING_BUFFER_ALL_CPUS) {
3290 if (ring_buffer_empty_cpu(buffer, cpu_file))
3292 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
3294 *ent_cpu = cpu_file;
3299 for_each_tracing_cpu(cpu) {
3301 if (ring_buffer_empty_cpu(buffer, cpu))
3304 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
3307 * Pick the entry with the smallest timestamp:
3309 if (ent && (!next || ts < next_ts)) {
3313 next_lost = lost_events;
3314 next_size = iter->ent_size;
3318 iter->ent_size = next_size;
3321 *ent_cpu = next_cpu;
3327 *missing_events = next_lost;
3332 /* Find the next real entry, without updating the iterator itself */
3333 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
3334 int *ent_cpu, u64 *ent_ts)
3336 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
3339 /* Find the next real entry, and increment the iterator to the next entry */
3340 void *trace_find_next_entry_inc(struct trace_iterator *iter)
3342 iter->ent = __find_next_entry(iter, &iter->cpu,
3343 &iter->lost_events, &iter->ts);
3346 trace_iterator_increment(iter);
3348 return iter->ent ? iter : NULL;
3351 static void trace_consume(struct trace_iterator *iter)
3353 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
3354 &iter->lost_events);
3357 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
3359 struct trace_iterator *iter = m->private;
3363 WARN_ON_ONCE(iter->leftover);
3367 /* can't go backwards */
3372 ent = trace_find_next_entry_inc(iter);
3376 while (ent && iter->idx < i)
3377 ent = trace_find_next_entry_inc(iter);
3384 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
3386 struct ring_buffer_event *event;
3387 struct ring_buffer_iter *buf_iter;
3388 unsigned long entries = 0;
3391 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
3393 buf_iter = trace_buffer_iter(iter, cpu);
3397 ring_buffer_iter_reset(buf_iter);
3400 * We could have the case with the max latency tracers
3401 * that a reset never took place on a cpu. This is evident
3402 * by the timestamp being before the start of the buffer.
3404 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
3405 if (ts >= iter->trace_buffer->time_start)
3408 ring_buffer_read(buf_iter, NULL);
3411 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
3415 * The current tracer is copied to avoid a global locking
3418 static void *s_start(struct seq_file *m, loff_t *pos)
3420 struct trace_iterator *iter = m->private;
3421 struct trace_array *tr = iter->tr;
3422 int cpu_file = iter->cpu_file;
3428 * copy the tracer to avoid using a global lock all around.
3429 * iter->trace is a copy of current_trace, the pointer to the
3430 * name may be used instead of a strcmp(), as iter->trace->name
3431 * will point to the same string as current_trace->name.
3433 mutex_lock(&trace_types_lock);
3434 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
3435 *iter->trace = *tr->current_trace;
3436 mutex_unlock(&trace_types_lock);
3438 #ifdef CONFIG_TRACER_MAX_TRACE
3439 if (iter->snapshot && iter->trace->use_max_tr)
3440 return ERR_PTR(-EBUSY);
3443 if (!iter->snapshot)
3444 atomic_inc(&trace_record_taskinfo_disabled);
3446 if (*pos != iter->pos) {
3451 if (cpu_file == RING_BUFFER_ALL_CPUS) {
3452 for_each_tracing_cpu(cpu)
3453 tracing_iter_reset(iter, cpu);
3455 tracing_iter_reset(iter, cpu_file);
3458 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
3463 * If we overflowed the seq_file before, then we want
3464 * to just reuse the trace_seq buffer again.
3470 p = s_next(m, p, &l);
3474 trace_event_read_lock();
3475 trace_access_lock(cpu_file);
3479 static void s_stop(struct seq_file *m, void *p)
3481 struct trace_iterator *iter = m->private;
3483 #ifdef CONFIG_TRACER_MAX_TRACE
3484 if (iter->snapshot && iter->trace->use_max_tr)
3488 if (!iter->snapshot)
3489 atomic_dec(&trace_record_taskinfo_disabled);
3491 trace_access_unlock(iter->cpu_file);
3492 trace_event_read_unlock();
3496 get_total_entries_cpu(struct trace_buffer *buf, unsigned long *total,
3497 unsigned long *entries, int cpu)
3499 unsigned long count;
3501 count = ring_buffer_entries_cpu(buf->buffer, cpu);
3503 * If this buffer has skipped entries, then we hold all
3504 * entries for the trace and we need to ignore the
3505 * ones before the time stamp.
3507 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
3508 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
3509 /* total is the same as the entries */
3513 ring_buffer_overrun_cpu(buf->buffer, cpu);
3518 get_total_entries(struct trace_buffer *buf,
3519 unsigned long *total, unsigned long *entries)
3527 for_each_tracing_cpu(cpu) {
3528 get_total_entries_cpu(buf, &t, &e, cpu);
3534 unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
3536 unsigned long total, entries;
3541 get_total_entries_cpu(&tr->trace_buffer, &total, &entries, cpu);
3546 unsigned long trace_total_entries(struct trace_array *tr)
3548 unsigned long total, entries;
3553 get_total_entries(&tr->trace_buffer, &total, &entries);
3558 static void print_lat_help_header(struct seq_file *m)
3560 seq_puts(m, "# _------=> CPU# \n"
3561 "# / _-----=> irqs-off \n"
3562 "# | / _----=> need-resched \n"
3563 "# || / _---=> hardirq/softirq \n"
3564 "# ||| / _--=> preempt-depth \n"
3566 "# cmd pid ||||| time | caller \n"
3567 "# \\ / ||||| \\ | / \n");
3570 static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
3572 unsigned long total;
3573 unsigned long entries;
3575 get_total_entries(buf, &total, &entries);
3576 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
3577 entries, total, num_online_cpus());
3581 static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m,
3584 bool tgid = flags & TRACE_ITER_RECORD_TGID;
3586 print_event_info(buf, m);
3588 seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? "TGID " : "");
3589 seq_printf(m, "# | | %s | | |\n", tgid ? " | " : "");
3592 static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m,
3595 bool tgid = flags & TRACE_ITER_RECORD_TGID;
3596 const char tgid_space[] = " ";
3597 const char space[] = " ";
3599 print_event_info(buf, m);
3601 seq_printf(m, "# %s _-----=> irqs-off\n",
3602 tgid ? tgid_space : space);
3603 seq_printf(m, "# %s / _----=> need-resched\n",
3604 tgid ? tgid_space : space);
3605 seq_printf(m, "# %s| / _---=> hardirq/softirq\n",
3606 tgid ? tgid_space : space);
3607 seq_printf(m, "# %s|| / _--=> preempt-depth\n",
3608 tgid ? tgid_space : space);
3609 seq_printf(m, "# %s||| / delay\n",
3610 tgid ? tgid_space : space);
3611 seq_printf(m, "# TASK-PID %sCPU# |||| TIMESTAMP FUNCTION\n",
3612 tgid ? " TGID " : space);
3613 seq_printf(m, "# | | %s | |||| | |\n",
3614 tgid ? " | " : space);
3618 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
3620 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
3621 struct trace_buffer *buf = iter->trace_buffer;
3622 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
3623 struct tracer *type = iter->trace;
3624 unsigned long entries;
3625 unsigned long total;
3626 const char *name = "preemption";
3630 get_total_entries(buf, &total, &entries);
3632 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
3634 seq_puts(m, "# -----------------------------------"
3635 "---------------------------------\n");
3636 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
3637 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
3638 nsecs_to_usecs(data->saved_latency),
3642 #if defined(CONFIG_PREEMPT_NONE)
3644 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
3646 #elif defined(CONFIG_PREEMPT)
3651 /* These are reserved for later use */
3654 seq_printf(m, " #P:%d)\n", num_online_cpus());
3658 seq_puts(m, "# -----------------\n");
3659 seq_printf(m, "# | task: %.16s-%d "
3660 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
3661 data->comm, data->pid,
3662 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
3663 data->policy, data->rt_priority);
3664 seq_puts(m, "# -----------------\n");
3666 if (data->critical_start) {
3667 seq_puts(m, "# => started at: ");
3668 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
3669 trace_print_seq(m, &iter->seq);
3670 seq_puts(m, "\n# => ended at: ");
3671 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
3672 trace_print_seq(m, &iter->seq);
3673 seq_puts(m, "\n#\n");
3679 static void test_cpu_buff_start(struct trace_iterator *iter)
3681 struct trace_seq *s = &iter->seq;
3682 struct trace_array *tr = iter->tr;
3684 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
3687 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
3690 if (cpumask_available(iter->started) &&
3691 cpumask_test_cpu(iter->cpu, iter->started))
3694 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
3697 if (cpumask_available(iter->started))
3698 cpumask_set_cpu(iter->cpu, iter->started);
3700 /* Don't print started cpu buffer for the first entry of the trace */
3702 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
3706 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
3708 struct trace_array *tr = iter->tr;
3709 struct trace_seq *s = &iter->seq;
3710 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
3711 struct trace_entry *entry;
3712 struct trace_event *event;
3716 test_cpu_buff_start(iter);
3718 event = ftrace_find_event(entry->type);
3720 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
3721 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3722 trace_print_lat_context(iter);
3724 trace_print_context(iter);
3727 if (trace_seq_has_overflowed(s))
3728 return TRACE_TYPE_PARTIAL_LINE;
3731 return event->funcs->trace(iter, sym_flags, event);
3733 trace_seq_printf(s, "Unknown type %d\n", entry->type);
3735 return trace_handle_return(s);
3738 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
3740 struct trace_array *tr = iter->tr;
3741 struct trace_seq *s = &iter->seq;
3742 struct trace_entry *entry;
3743 struct trace_event *event;
3747 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
3748 trace_seq_printf(s, "%d %d %llu ",
3749 entry->pid, iter->cpu, iter->ts);
3751 if (trace_seq_has_overflowed(s))
3752 return TRACE_TYPE_PARTIAL_LINE;
3754 event = ftrace_find_event(entry->type);
3756 return event->funcs->raw(iter, 0, event);
3758 trace_seq_printf(s, "%d ?\n", entry->type);
3760 return trace_handle_return(s);
3763 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
3765 struct trace_array *tr = iter->tr;
3766 struct trace_seq *s = &iter->seq;
3767 unsigned char newline = '\n';
3768 struct trace_entry *entry;
3769 struct trace_event *event;
3773 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
3774 SEQ_PUT_HEX_FIELD(s, entry->pid);
3775 SEQ_PUT_HEX_FIELD(s, iter->cpu);
3776 SEQ_PUT_HEX_FIELD(s, iter->ts);
3777 if (trace_seq_has_overflowed(s))
3778 return TRACE_TYPE_PARTIAL_LINE;
3781 event = ftrace_find_event(entry->type);
3783 enum print_line_t ret = event->funcs->hex(iter, 0, event);
3784 if (ret != TRACE_TYPE_HANDLED)
3788 SEQ_PUT_FIELD(s, newline);
3790 return trace_handle_return(s);
3793 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
3795 struct trace_array *tr = iter->tr;
3796 struct trace_seq *s = &iter->seq;
3797 struct trace_entry *entry;
3798 struct trace_event *event;
3802 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
3803 SEQ_PUT_FIELD(s, entry->pid);
3804 SEQ_PUT_FIELD(s, iter->cpu);
3805 SEQ_PUT_FIELD(s, iter->ts);
3806 if (trace_seq_has_overflowed(s))
3807 return TRACE_TYPE_PARTIAL_LINE;
3810 event = ftrace_find_event(entry->type);
3811 return event ? event->funcs->binary(iter, 0, event) :
3815 int trace_empty(struct trace_iterator *iter)
3817 struct ring_buffer_iter *buf_iter;
3820 /* If we are looking at one CPU buffer, only check that one */
3821 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
3822 cpu = iter->cpu_file;
3823 buf_iter = trace_buffer_iter(iter, cpu);
3825 if (!ring_buffer_iter_empty(buf_iter))
3828 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
3834 for_each_tracing_cpu(cpu) {
3835 buf_iter = trace_buffer_iter(iter, cpu);
3837 if (!ring_buffer_iter_empty(buf_iter))
3840 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
3848 /* Called with trace_event_read_lock() held. */
3849 enum print_line_t print_trace_line(struct trace_iterator *iter)
3851 struct trace_array *tr = iter->tr;
3852 unsigned long trace_flags = tr->trace_flags;
3853 enum print_line_t ret;
3855 if (iter->lost_events) {
3856 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
3857 iter->cpu, iter->lost_events);
3858 if (trace_seq_has_overflowed(&iter->seq))
3859 return TRACE_TYPE_PARTIAL_LINE;
3862 if (iter->trace && iter->trace->print_line) {
3863 ret = iter->trace->print_line(iter);
3864 if (ret != TRACE_TYPE_UNHANDLED)
3868 if (iter->ent->type == TRACE_BPUTS &&
3869 trace_flags & TRACE_ITER_PRINTK &&
3870 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3871 return trace_print_bputs_msg_only(iter);
3873 if (iter->ent->type == TRACE_BPRINT &&
3874 trace_flags & TRACE_ITER_PRINTK &&
3875 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3876 return trace_print_bprintk_msg_only(iter);
3878 if (iter->ent->type == TRACE_PRINT &&
3879 trace_flags & TRACE_ITER_PRINTK &&
3880 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3881 return trace_print_printk_msg_only(iter);
3883 if (trace_flags & TRACE_ITER_BIN)
3884 return print_bin_fmt(iter);
3886 if (trace_flags & TRACE_ITER_HEX)
3887 return print_hex_fmt(iter);
3889 if (trace_flags & TRACE_ITER_RAW)
3890 return print_raw_fmt(iter);
3892 return print_trace_fmt(iter);
3895 void trace_latency_header(struct seq_file *m)
3897 struct trace_iterator *iter = m->private;
3898 struct trace_array *tr = iter->tr;
3900 /* print nothing if the buffers are empty */
3901 if (trace_empty(iter))
3904 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3905 print_trace_header(m, iter);
3907 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
3908 print_lat_help_header(m);
3911 void trace_default_header(struct seq_file *m)
3913 struct trace_iterator *iter = m->private;
3914 struct trace_array *tr = iter->tr;
3915 unsigned long trace_flags = tr->trace_flags;
3917 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
3920 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
3921 /* print nothing if the buffers are empty */
3922 if (trace_empty(iter))
3924 print_trace_header(m, iter);
3925 if (!(trace_flags & TRACE_ITER_VERBOSE))
3926 print_lat_help_header(m);
3928 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
3929 if (trace_flags & TRACE_ITER_IRQ_INFO)
3930 print_func_help_header_irq(iter->trace_buffer,
3933 print_func_help_header(iter->trace_buffer, m,
3939 static void test_ftrace_alive(struct seq_file *m)
3941 if (!ftrace_is_dead())
3943 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
3944 "# MAY BE MISSING FUNCTION EVENTS\n");
3947 #ifdef CONFIG_TRACER_MAX_TRACE
3948 static void show_snapshot_main_help(struct seq_file *m)
3950 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
3951 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3952 "# Takes a snapshot of the main buffer.\n"
3953 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
3954 "# (Doesn't have to be '2' works with any number that\n"
3955 "# is not a '0' or '1')\n");
3958 static void show_snapshot_percpu_help(struct seq_file *m)
3960 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
3961 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
3962 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3963 "# Takes a snapshot of the main buffer for this cpu.\n");
3965 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
3966 "# Must use main snapshot file to allocate.\n");
3968 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
3969 "# (Doesn't have to be '2' works with any number that\n"
3970 "# is not a '0' or '1')\n");
3973 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
3975 if (iter->tr->allocated_snapshot)
3976 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
3978 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
3980 seq_puts(m, "# Snapshot commands:\n");
3981 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
3982 show_snapshot_main_help(m);
3984 show_snapshot_percpu_help(m);
3987 /* Should never be called */
3988 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
3991 static int s_show(struct seq_file *m, void *v)
3993 struct trace_iterator *iter = v;
3996 if (iter->ent == NULL) {
3998 seq_printf(m, "# tracer: %s\n", iter->trace->name);
4000 test_ftrace_alive(m);
4002 if (iter->snapshot && trace_empty(iter))
4003 print_snapshot_help(m, iter);
4004 else if (iter->trace && iter->trace->print_header)
4005 iter->trace->print_header(m);
4007 trace_default_header(m);
4009 } else if (iter->leftover) {
4011 * If we filled the seq_file buffer earlier, we
4012 * want to just show it now.
4014 ret = trace_print_seq(m, &iter->seq);
4016 /* ret should this time be zero, but you never know */
4017 iter->leftover = ret;
4020 print_trace_line(iter);
4021 ret = trace_print_seq(m, &iter->seq);
4023 * If we overflow the seq_file buffer, then it will
4024 * ask us for this data again at start up.
4026 * ret is 0 if seq_file write succeeded.
4029 iter->leftover = ret;
4036 * Should be used after trace_array_get(), trace_types_lock
4037 * ensures that i_cdev was already initialized.
4039 static inline int tracing_get_cpu(struct inode *inode)
4041 if (inode->i_cdev) /* See trace_create_cpu_file() */
4042 return (long)inode->i_cdev - 1;
4043 return RING_BUFFER_ALL_CPUS;
4046 static const struct seq_operations tracer_seq_ops = {
4053 static struct trace_iterator *
4054 __tracing_open(struct inode *inode, struct file *file, bool snapshot)
4056 struct trace_array *tr = inode->i_private;
4057 struct trace_iterator *iter;
4060 if (tracing_disabled)
4061 return ERR_PTR(-ENODEV);
4063 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
4065 return ERR_PTR(-ENOMEM);
4067 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
4069 if (!iter->buffer_iter)
4073 * We make a copy of the current tracer to avoid concurrent
4074 * changes on it while we are reading.
4076 mutex_lock(&trace_types_lock);
4077 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
4081 *iter->trace = *tr->current_trace;
4083 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
4088 #ifdef CONFIG_TRACER_MAX_TRACE
4089 /* Currently only the top directory has a snapshot */
4090 if (tr->current_trace->print_max || snapshot)
4091 iter->trace_buffer = &tr->max_buffer;
4094 iter->trace_buffer = &tr->trace_buffer;
4095 iter->snapshot = snapshot;
4097 iter->cpu_file = tracing_get_cpu(inode);
4098 mutex_init(&iter->mutex);
4100 /* Notify the tracer early; before we stop tracing. */
4101 if (iter->trace && iter->trace->open)
4102 iter->trace->open(iter);
4104 /* Annotate start of buffers if we had overruns */
4105 if (ring_buffer_overruns(iter->trace_buffer->buffer))
4106 iter->iter_flags |= TRACE_FILE_ANNOTATE;
4108 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
4109 if (trace_clocks[tr->clock_id].in_ns)
4110 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4112 /* stop the trace while dumping if we are not opening "snapshot" */
4113 if (!iter->snapshot)
4114 tracing_stop_tr(tr);
4116 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
4117 for_each_tracing_cpu(cpu) {
4118 iter->buffer_iter[cpu] =
4119 ring_buffer_read_prepare(iter->trace_buffer->buffer,
4122 ring_buffer_read_prepare_sync();
4123 for_each_tracing_cpu(cpu) {
4124 ring_buffer_read_start(iter->buffer_iter[cpu]);
4125 tracing_iter_reset(iter, cpu);
4128 cpu = iter->cpu_file;
4129 iter->buffer_iter[cpu] =
4130 ring_buffer_read_prepare(iter->trace_buffer->buffer,
4132 ring_buffer_read_prepare_sync();
4133 ring_buffer_read_start(iter->buffer_iter[cpu]);
4134 tracing_iter_reset(iter, cpu);
4137 mutex_unlock(&trace_types_lock);
4142 mutex_unlock(&trace_types_lock);
4144 kfree(iter->buffer_iter);
4146 seq_release_private(inode, file);
4147 return ERR_PTR(-ENOMEM);
4150 int tracing_open_generic(struct inode *inode, struct file *filp)
4152 if (tracing_disabled)
4155 filp->private_data = inode->i_private;
4159 bool tracing_is_disabled(void)
4161 return (tracing_disabled) ? true: false;
4165 * Open and update trace_array ref count.
4166 * Must have the current trace_array passed to it.
4168 static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
4170 struct trace_array *tr = inode->i_private;
4172 if (tracing_disabled)
4175 if (trace_array_get(tr) < 0)
4178 filp->private_data = inode->i_private;
4183 static int tracing_release(struct inode *inode, struct file *file)
4185 struct trace_array *tr = inode->i_private;
4186 struct seq_file *m = file->private_data;
4187 struct trace_iterator *iter;
4190 if (!(file->f_mode & FMODE_READ)) {
4191 trace_array_put(tr);
4195 /* Writes do not use seq_file */
4197 mutex_lock(&trace_types_lock);
4199 for_each_tracing_cpu(cpu) {
4200 if (iter->buffer_iter[cpu])
4201 ring_buffer_read_finish(iter->buffer_iter[cpu]);
4204 if (iter->trace && iter->trace->close)
4205 iter->trace->close(iter);
4207 if (!iter->snapshot)
4208 /* reenable tracing if it was previously enabled */
4209 tracing_start_tr(tr);
4211 __trace_array_put(tr);
4213 mutex_unlock(&trace_types_lock);
4215 mutex_destroy(&iter->mutex);
4216 free_cpumask_var(iter->started);
4218 kfree(iter->buffer_iter);
4219 seq_release_private(inode, file);
4224 static int tracing_release_generic_tr(struct inode *inode, struct file *file)
4226 struct trace_array *tr = inode->i_private;
4228 trace_array_put(tr);
4232 static int tracing_single_release_tr(struct inode *inode, struct file *file)
4234 struct trace_array *tr = inode->i_private;
4236 trace_array_put(tr);
4238 return single_release(inode, file);
4241 static int tracing_open(struct inode *inode, struct file *file)
4243 struct trace_array *tr = inode->i_private;
4244 struct trace_iterator *iter;
4247 if (trace_array_get(tr) < 0)
4250 /* If this file was open for write, then erase contents */
4251 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
4252 int cpu = tracing_get_cpu(inode);
4253 struct trace_buffer *trace_buf = &tr->trace_buffer;
4255 #ifdef CONFIG_TRACER_MAX_TRACE
4256 if (tr->current_trace->print_max)
4257 trace_buf = &tr->max_buffer;
4260 if (cpu == RING_BUFFER_ALL_CPUS)
4261 tracing_reset_online_cpus(trace_buf);
4263 tracing_reset(trace_buf, cpu);
4266 if (file->f_mode & FMODE_READ) {
4267 iter = __tracing_open(inode, file, false);
4269 ret = PTR_ERR(iter);
4270 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
4271 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4275 trace_array_put(tr);
4281 * Some tracers are not suitable for instance buffers.
4282 * A tracer is always available for the global array (toplevel)
4283 * or if it explicitly states that it is.
4286 trace_ok_for_array(struct tracer *t, struct trace_array *tr)
4288 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
4291 /* Find the next tracer that this trace array may use */
4292 static struct tracer *
4293 get_tracer_for_array(struct trace_array *tr, struct tracer *t)
4295 while (t && !trace_ok_for_array(t, tr))
4302 t_next(struct seq_file *m, void *v, loff_t *pos)
4304 struct trace_array *tr = m->private;
4305 struct tracer *t = v;
4310 t = get_tracer_for_array(tr, t->next);
4315 static void *t_start(struct seq_file *m, loff_t *pos)
4317 struct trace_array *tr = m->private;
4321 mutex_lock(&trace_types_lock);
4323 t = get_tracer_for_array(tr, trace_types);
4324 for (; t && l < *pos; t = t_next(m, t, &l))
4330 static void t_stop(struct seq_file *m, void *p)
4332 mutex_unlock(&trace_types_lock);
4335 static int t_show(struct seq_file *m, void *v)
4337 struct tracer *t = v;
4342 seq_puts(m, t->name);
4351 static const struct seq_operations show_traces_seq_ops = {
4358 static int show_traces_open(struct inode *inode, struct file *file)
4360 struct trace_array *tr = inode->i_private;
4364 if (tracing_disabled)
4367 ret = seq_open(file, &show_traces_seq_ops);
4371 m = file->private_data;
4378 tracing_write_stub(struct file *filp, const char __user *ubuf,
4379 size_t count, loff_t *ppos)
4384 loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
4388 if (file->f_mode & FMODE_READ)
4389 ret = seq_lseek(file, offset, whence);
4391 file->f_pos = ret = 0;
4396 static const struct file_operations tracing_fops = {
4397 .open = tracing_open,
4399 .write = tracing_write_stub,
4400 .llseek = tracing_lseek,
4401 .release = tracing_release,
4404 static const struct file_operations show_traces_fops = {
4405 .open = show_traces_open,
4407 .release = seq_release,
4408 .llseek = seq_lseek,
4412 tracing_cpumask_read(struct file *filp, char __user *ubuf,
4413 size_t count, loff_t *ppos)
4415 struct trace_array *tr = file_inode(filp)->i_private;
4419 len = snprintf(NULL, 0, "%*pb\n",
4420 cpumask_pr_args(tr->tracing_cpumask)) + 1;
4421 mask_str = kmalloc(len, GFP_KERNEL);
4425 len = snprintf(mask_str, len, "%*pb\n",
4426 cpumask_pr_args(tr->tracing_cpumask));
4431 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
4440 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
4441 size_t count, loff_t *ppos)
4443 struct trace_array *tr = file_inode(filp)->i_private;
4444 cpumask_var_t tracing_cpumask_new;
4447 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
4450 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
4454 local_irq_disable();
4455 arch_spin_lock(&tr->max_lock);
4456 for_each_tracing_cpu(cpu) {
4458 * Increase/decrease the disabled counter if we are
4459 * about to flip a bit in the cpumask:
4461 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
4462 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
4463 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
4464 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
4466 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
4467 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
4468 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
4469 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
4472 arch_spin_unlock(&tr->max_lock);
4475 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
4476 free_cpumask_var(tracing_cpumask_new);
4481 free_cpumask_var(tracing_cpumask_new);
4486 static const struct file_operations tracing_cpumask_fops = {
4487 .open = tracing_open_generic_tr,
4488 .read = tracing_cpumask_read,
4489 .write = tracing_cpumask_write,
4490 .release = tracing_release_generic_tr,
4491 .llseek = generic_file_llseek,
4494 static int tracing_trace_options_show(struct seq_file *m, void *v)
4496 struct tracer_opt *trace_opts;
4497 struct trace_array *tr = m->private;
4501 mutex_lock(&trace_types_lock);
4502 tracer_flags = tr->current_trace->flags->val;
4503 trace_opts = tr->current_trace->flags->opts;
4505 for (i = 0; trace_options[i]; i++) {
4506 if (tr->trace_flags & (1 << i))
4507 seq_printf(m, "%s\n", trace_options[i]);
4509 seq_printf(m, "no%s\n", trace_options[i]);
4512 for (i = 0; trace_opts[i].name; i++) {
4513 if (tracer_flags & trace_opts[i].bit)
4514 seq_printf(m, "%s\n", trace_opts[i].name);
4516 seq_printf(m, "no%s\n", trace_opts[i].name);
4518 mutex_unlock(&trace_types_lock);
4523 static int __set_tracer_option(struct trace_array *tr,
4524 struct tracer_flags *tracer_flags,
4525 struct tracer_opt *opts, int neg)
4527 struct tracer *trace = tracer_flags->trace;
4530 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
4535 tracer_flags->val &= ~opts->bit;
4537 tracer_flags->val |= opts->bit;
4541 /* Try to assign a tracer specific option */
4542 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
4544 struct tracer *trace = tr->current_trace;
4545 struct tracer_flags *tracer_flags = trace->flags;
4546 struct tracer_opt *opts = NULL;
4549 for (i = 0; tracer_flags->opts[i].name; i++) {
4550 opts = &tracer_flags->opts[i];
4552 if (strcmp(cmp, opts->name) == 0)
4553 return __set_tracer_option(tr, trace->flags, opts, neg);
4559 /* Some tracers require overwrite to stay enabled */
4560 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
4562 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
4568 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
4570 /* do nothing if flag is already set */
4571 if (!!(tr->trace_flags & mask) == !!enabled)
4574 /* Give the tracer a chance to approve the change */
4575 if (tr->current_trace->flag_changed)
4576 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
4580 tr->trace_flags |= mask;
4582 tr->trace_flags &= ~mask;
4584 if (mask == TRACE_ITER_RECORD_CMD)
4585 trace_event_enable_cmd_record(enabled);
4587 if (mask == TRACE_ITER_RECORD_TGID) {
4589 tgid_map = kcalloc(PID_MAX_DEFAULT + 1,
4593 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
4597 trace_event_enable_tgid_record(enabled);
4600 if (mask == TRACE_ITER_EVENT_FORK)
4601 trace_event_follow_fork(tr, enabled);
4603 if (mask == TRACE_ITER_FUNC_FORK)
4604 ftrace_pid_follow_fork(tr, enabled);
4606 if (mask == TRACE_ITER_OVERWRITE) {
4607 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
4608 #ifdef CONFIG_TRACER_MAX_TRACE
4609 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
4613 if (mask == TRACE_ITER_PRINTK) {
4614 trace_printk_start_stop_comm(enabled);
4615 trace_printk_control(enabled);
4621 static int trace_set_options(struct trace_array *tr, char *option)
4626 size_t orig_len = strlen(option);
4629 cmp = strstrip(option);
4631 len = str_has_prefix(cmp, "no");
4637 mutex_lock(&trace_types_lock);
4639 ret = match_string(trace_options, -1, cmp);
4640 /* If no option could be set, test the specific tracer options */
4642 ret = set_tracer_option(tr, cmp, neg);
4644 ret = set_tracer_flag(tr, 1 << ret, !neg);
4646 mutex_unlock(&trace_types_lock);
4649 * If the first trailing whitespace is replaced with '\0' by strstrip,
4650 * turn it back into a space.
4652 if (orig_len > strlen(option))
4653 option[strlen(option)] = ' ';
4658 static void __init apply_trace_boot_options(void)
4660 char *buf = trace_boot_options_buf;
4664 option = strsep(&buf, ",");
4670 trace_set_options(&global_trace, option);
4672 /* Put back the comma to allow this to be called again */
4679 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
4680 size_t cnt, loff_t *ppos)
4682 struct seq_file *m = filp->private_data;
4683 struct trace_array *tr = m->private;
4687 if (cnt >= sizeof(buf))
4690 if (copy_from_user(buf, ubuf, cnt))
4695 ret = trace_set_options(tr, buf);
4704 static int tracing_trace_options_open(struct inode *inode, struct file *file)
4706 struct trace_array *tr = inode->i_private;
4709 if (tracing_disabled)
4712 if (trace_array_get(tr) < 0)
4715 ret = single_open(file, tracing_trace_options_show, inode->i_private);
4717 trace_array_put(tr);
4722 static const struct file_operations tracing_iter_fops = {
4723 .open = tracing_trace_options_open,
4725 .llseek = seq_lseek,
4726 .release = tracing_single_release_tr,
4727 .write = tracing_trace_options_write,
4730 static const char readme_msg[] =
4731 "tracing mini-HOWTO:\n\n"
4732 "# echo 0 > tracing_on : quick way to disable tracing\n"
4733 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
4734 " Important files:\n"
4735 " trace\t\t\t- The static contents of the buffer\n"
4736 "\t\t\t To clear the buffer write into this file: echo > trace\n"
4737 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
4738 " current_tracer\t- function and latency tracers\n"
4739 " available_tracers\t- list of configured tracers for current_tracer\n"
4740 " error_log\t- error log for failed commands (that support it)\n"
4741 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
4742 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
4743 " trace_clock\t\t-change the clock used to order events\n"
4744 " local: Per cpu clock but may not be synced across CPUs\n"
4745 " global: Synced across CPUs but slows tracing down.\n"
4746 " counter: Not a clock, but just an increment\n"
4747 " uptime: Jiffy counter from time of boot\n"
4748 " perf: Same clock that perf events use\n"
4749 #ifdef CONFIG_X86_64
4750 " x86-tsc: TSC cycle counter\n"
4752 "\n timestamp_mode\t-view the mode used to timestamp events\n"
4753 " delta: Delta difference against a buffer-wide timestamp\n"
4754 " absolute: Absolute (standalone) timestamp\n"
4755 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
4756 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
4757 " tracing_cpumask\t- Limit which CPUs to trace\n"
4758 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
4759 "\t\t\t Remove sub-buffer with rmdir\n"
4760 " trace_options\t\t- Set format or modify how tracing happens\n"
4761 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
4762 "\t\t\t option name\n"
4763 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
4764 #ifdef CONFIG_DYNAMIC_FTRACE
4765 "\n available_filter_functions - list of functions that can be filtered on\n"
4766 " set_ftrace_filter\t- echo function name in here to only trace these\n"
4767 "\t\t\t functions\n"
4768 "\t accepts: func_full_name or glob-matching-pattern\n"
4769 "\t modules: Can select a group via module\n"
4770 "\t Format: :mod:<module-name>\n"
4771 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
4772 "\t triggers: a command to perform when function is hit\n"
4773 "\t Format: <function>:<trigger>[:count]\n"
4774 "\t trigger: traceon, traceoff\n"
4775 "\t\t enable_event:<system>:<event>\n"
4776 "\t\t disable_event:<system>:<event>\n"
4777 #ifdef CONFIG_STACKTRACE
4780 #ifdef CONFIG_TRACER_SNAPSHOT
4785 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
4786 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
4787 "\t The first one will disable tracing every time do_fault is hit\n"
4788 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
4789 "\t The first time do trap is hit and it disables tracing, the\n"
4790 "\t counter will decrement to 2. If tracing is already disabled,\n"
4791 "\t the counter will not decrement. It only decrements when the\n"
4792 "\t trigger did work\n"
4793 "\t To remove trigger without count:\n"
4794 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
4795 "\t To remove trigger with a count:\n"
4796 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
4797 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
4798 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
4799 "\t modules: Can select a group via module command :mod:\n"
4800 "\t Does not accept triggers\n"
4801 #endif /* CONFIG_DYNAMIC_FTRACE */
4802 #ifdef CONFIG_FUNCTION_TRACER
4803 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
4806 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4807 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
4808 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
4809 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
4811 #ifdef CONFIG_TRACER_SNAPSHOT
4812 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
4813 "\t\t\t snapshot buffer. Read the contents for more\n"
4814 "\t\t\t information\n"
4816 #ifdef CONFIG_STACK_TRACER
4817 " stack_trace\t\t- Shows the max stack trace when active\n"
4818 " stack_max_size\t- Shows current max stack size that was traced\n"
4819 "\t\t\t Write into this file to reset the max size (trigger a\n"
4820 "\t\t\t new trace)\n"
4821 #ifdef CONFIG_DYNAMIC_FTRACE
4822 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
4825 #endif /* CONFIG_STACK_TRACER */
4826 #ifdef CONFIG_DYNAMIC_EVENTS
4827 " dynamic_events\t\t- Add/remove/show the generic dynamic events\n"
4828 "\t\t\t Write into this file to define/undefine new trace events.\n"
4830 #ifdef CONFIG_KPROBE_EVENTS
4831 " kprobe_events\t\t- Add/remove/show the kernel dynamic events\n"
4832 "\t\t\t Write into this file to define/undefine new trace events.\n"
4834 #ifdef CONFIG_UPROBE_EVENTS
4835 " uprobe_events\t\t- Add/remove/show the userspace dynamic events\n"
4836 "\t\t\t Write into this file to define/undefine new trace events.\n"
4838 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
4839 "\t accepts: event-definitions (one definition per line)\n"
4840 "\t Format: p[:[<group>/]<event>] <place> [<args>]\n"
4841 "\t r[maxactive][:[<group>/]<event>] <place> [<args>]\n"
4842 #ifdef CONFIG_HIST_TRIGGERS
4843 "\t s:[synthetic/]<event> <field> [<field>]\n"
4845 "\t -:[<group>/]<event>\n"
4846 #ifdef CONFIG_KPROBE_EVENTS
4847 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
4848 "place (kretprobe): [<module>:]<symbol>[+<offset>]|<memaddr>\n"
4850 #ifdef CONFIG_UPROBE_EVENTS
4851 " place (uprobe): <path>:<offset>[(ref_ctr_offset)]\n"
4853 "\t args: <name>=fetcharg[:type]\n"
4854 "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n"
4855 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
4856 "\t $stack<index>, $stack, $retval, $comm, $arg<N>\n"
4858 "\t $stack<index>, $stack, $retval, $comm\n"
4860 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string, symbol,\n"
4861 "\t b<bit-width>@<bit-offset>/<container-size>,\n"
4862 "\t <type>\\[<array-size>\\]\n"
4863 #ifdef CONFIG_HIST_TRIGGERS
4864 "\t field: <stype> <name>;\n"
4865 "\t stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n"
4866 "\t [unsigned] char/int/long\n"
4869 " events/\t\t- Directory containing all trace event subsystems:\n"
4870 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
4871 " events/<system>/\t- Directory containing all trace events for <system>:\n"
4872 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
4874 " filter\t\t- If set, only events passing filter are traced\n"
4875 " events/<system>/<event>/\t- Directory containing control files for\n"
4877 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
4878 " filter\t\t- If set, only events passing filter are traced\n"
4879 " trigger\t\t- If set, a command to perform when event is hit\n"
4880 "\t Format: <trigger>[:count][if <filter>]\n"
4881 "\t trigger: traceon, traceoff\n"
4882 "\t enable_event:<system>:<event>\n"
4883 "\t disable_event:<system>:<event>\n"
4884 #ifdef CONFIG_HIST_TRIGGERS
4885 "\t enable_hist:<system>:<event>\n"
4886 "\t disable_hist:<system>:<event>\n"
4888 #ifdef CONFIG_STACKTRACE
4891 #ifdef CONFIG_TRACER_SNAPSHOT
4894 #ifdef CONFIG_HIST_TRIGGERS
4895 "\t\t hist (see below)\n"
4897 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
4898 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
4899 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
4900 "\t events/block/block_unplug/trigger\n"
4901 "\t The first disables tracing every time block_unplug is hit.\n"
4902 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
4903 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
4904 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
4905 "\t Like function triggers, the counter is only decremented if it\n"
4906 "\t enabled or disabled tracing.\n"
4907 "\t To remove a trigger without a count:\n"
4908 "\t echo '!<trigger> > <system>/<event>/trigger\n"
4909 "\t To remove a trigger with a count:\n"
4910 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
4911 "\t Filters can be ignored when removing a trigger.\n"
4912 #ifdef CONFIG_HIST_TRIGGERS
4913 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
4914 "\t Format: hist:keys=<field1[,field2,...]>\n"
4915 "\t [:values=<field1[,field2,...]>]\n"
4916 "\t [:sort=<field1[,field2,...]>]\n"
4917 "\t [:size=#entries]\n"
4918 "\t [:pause][:continue][:clear]\n"
4919 "\t [:name=histname1]\n"
4920 "\t [:<handler>.<action>]\n"
4921 "\t [if <filter>]\n\n"
4922 "\t When a matching event is hit, an entry is added to a hash\n"
4923 "\t table using the key(s) and value(s) named, and the value of a\n"
4924 "\t sum called 'hitcount' is incremented. Keys and values\n"
4925 "\t correspond to fields in the event's format description. Keys\n"
4926 "\t can be any field, or the special string 'stacktrace'.\n"
4927 "\t Compound keys consisting of up to two fields can be specified\n"
4928 "\t by the 'keys' keyword. Values must correspond to numeric\n"
4929 "\t fields. Sort keys consisting of up to two fields can be\n"
4930 "\t specified using the 'sort' keyword. The sort direction can\n"
4931 "\t be modified by appending '.descending' or '.ascending' to a\n"
4932 "\t sort field. The 'size' parameter can be used to specify more\n"
4933 "\t or fewer than the default 2048 entries for the hashtable size.\n"
4934 "\t If a hist trigger is given a name using the 'name' parameter,\n"
4935 "\t its histogram data will be shared with other triggers of the\n"
4936 "\t same name, and trigger hits will update this common data.\n\n"
4937 "\t Reading the 'hist' file for the event will dump the hash\n"
4938 "\t table in its entirety to stdout. If there are multiple hist\n"
4939 "\t triggers attached to an event, there will be a table for each\n"
4940 "\t trigger in the output. The table displayed for a named\n"
4941 "\t trigger will be the same as any other instance having the\n"
4942 "\t same name. The default format used to display a given field\n"
4943 "\t can be modified by appending any of the following modifiers\n"
4944 "\t to the field name, as applicable:\n\n"
4945 "\t .hex display a number as a hex value\n"
4946 "\t .sym display an address as a symbol\n"
4947 "\t .sym-offset display an address as a symbol and offset\n"
4948 "\t .execname display a common_pid as a program name\n"
4949 "\t .syscall display a syscall id as a syscall name\n"
4950 "\t .log2 display log2 value rather than raw number\n"
4951 "\t .usecs display a common_timestamp in microseconds\n\n"
4952 "\t The 'pause' parameter can be used to pause an existing hist\n"
4953 "\t trigger or to start a hist trigger but not log any events\n"
4954 "\t until told to do so. 'continue' can be used to start or\n"
4955 "\t restart a paused hist trigger.\n\n"
4956 "\t The 'clear' parameter will clear the contents of a running\n"
4957 "\t hist trigger and leave its current paused/active state\n"
4959 "\t The enable_hist and disable_hist triggers can be used to\n"
4960 "\t have one event conditionally start and stop another event's\n"
4961 "\t already-attached hist trigger. The syntax is analogous to\n"
4962 "\t the enable_event and disable_event triggers.\n\n"
4963 "\t Hist trigger handlers and actions are executed whenever a\n"
4964 "\t a histogram entry is added or updated. They take the form:\n\n"
4965 "\t <handler>.<action>\n\n"
4966 "\t The available handlers are:\n\n"
4967 "\t onmatch(matching.event) - invoke on addition or update\n"
4968 "\t onmax(var) - invoke if var exceeds current max\n"
4969 "\t onchange(var) - invoke action if var changes\n\n"
4970 "\t The available actions are:\n\n"
4971 "\t trace(<synthetic_event>,param list) - generate synthetic event\n"
4972 "\t save(field,...) - save current event fields\n"
4973 #ifdef CONFIG_TRACER_SNAPSHOT
4974 "\t snapshot() - snapshot the trace buffer\n"
4980 tracing_readme_read(struct file *filp, char __user *ubuf,
4981 size_t cnt, loff_t *ppos)
4983 return simple_read_from_buffer(ubuf, cnt, ppos,
4984 readme_msg, strlen(readme_msg));
4987 static const struct file_operations tracing_readme_fops = {
4988 .open = tracing_open_generic,
4989 .read = tracing_readme_read,
4990 .llseek = generic_file_llseek,
4993 static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
4997 if (*pos || m->count)
5002 for (; ptr <= &tgid_map[PID_MAX_DEFAULT]; ptr++) {
5003 if (trace_find_tgid(*ptr))
5010 static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
5020 v = saved_tgids_next(m, v, &l);
5028 static void saved_tgids_stop(struct seq_file *m, void *v)
5032 static int saved_tgids_show(struct seq_file *m, void *v)
5034 int pid = (int *)v - tgid_map;
5036 seq_printf(m, "%d %d\n", pid, trace_find_tgid(pid));
5040 static const struct seq_operations tracing_saved_tgids_seq_ops = {
5041 .start = saved_tgids_start,
5042 .stop = saved_tgids_stop,
5043 .next = saved_tgids_next,
5044 .show = saved_tgids_show,
5047 static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
5049 if (tracing_disabled)
5052 return seq_open(filp, &tracing_saved_tgids_seq_ops);
5056 static const struct file_operations tracing_saved_tgids_fops = {
5057 .open = tracing_saved_tgids_open,
5059 .llseek = seq_lseek,
5060 .release = seq_release,
5063 static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
5065 unsigned int *ptr = v;
5067 if (*pos || m->count)
5072 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
5074 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
5083 static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
5089 arch_spin_lock(&trace_cmdline_lock);
5091 v = &savedcmd->map_cmdline_to_pid[0];
5093 v = saved_cmdlines_next(m, v, &l);
5101 static void saved_cmdlines_stop(struct seq_file *m, void *v)
5103 arch_spin_unlock(&trace_cmdline_lock);
5107 static int saved_cmdlines_show(struct seq_file *m, void *v)
5109 char buf[TASK_COMM_LEN];
5110 unsigned int *pid = v;
5112 __trace_find_cmdline(*pid, buf);
5113 seq_printf(m, "%d %s\n", *pid, buf);
5117 static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
5118 .start = saved_cmdlines_start,
5119 .next = saved_cmdlines_next,
5120 .stop = saved_cmdlines_stop,
5121 .show = saved_cmdlines_show,
5124 static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
5126 if (tracing_disabled)
5129 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
5132 static const struct file_operations tracing_saved_cmdlines_fops = {
5133 .open = tracing_saved_cmdlines_open,
5135 .llseek = seq_lseek,
5136 .release = seq_release,
5140 tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
5141 size_t cnt, loff_t *ppos)
5146 arch_spin_lock(&trace_cmdline_lock);
5147 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
5148 arch_spin_unlock(&trace_cmdline_lock);
5150 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5153 static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
5155 kfree(s->saved_cmdlines);
5156 kfree(s->map_cmdline_to_pid);
5160 static int tracing_resize_saved_cmdlines(unsigned int val)
5162 struct saved_cmdlines_buffer *s, *savedcmd_temp;
5164 s = kmalloc(sizeof(*s), GFP_KERNEL);
5168 if (allocate_cmdlines_buffer(val, s) < 0) {
5173 arch_spin_lock(&trace_cmdline_lock);
5174 savedcmd_temp = savedcmd;
5176 arch_spin_unlock(&trace_cmdline_lock);
5177 free_saved_cmdlines_buffer(savedcmd_temp);
5183 tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
5184 size_t cnt, loff_t *ppos)
5189 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5193 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
5194 if (!val || val > PID_MAX_DEFAULT)
5197 ret = tracing_resize_saved_cmdlines((unsigned int)val);
5206 static const struct file_operations tracing_saved_cmdlines_size_fops = {
5207 .open = tracing_open_generic,
5208 .read = tracing_saved_cmdlines_size_read,
5209 .write = tracing_saved_cmdlines_size_write,
5212 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
5213 static union trace_eval_map_item *
5214 update_eval_map(union trace_eval_map_item *ptr)
5216 if (!ptr->map.eval_string) {
5217 if (ptr->tail.next) {
5218 ptr = ptr->tail.next;
5219 /* Set ptr to the next real item (skip head) */
5227 static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
5229 union trace_eval_map_item *ptr = v;
5232 * Paranoid! If ptr points to end, we don't want to increment past it.
5233 * This really should never happen.
5235 ptr = update_eval_map(ptr);
5236 if (WARN_ON_ONCE(!ptr))
5243 ptr = update_eval_map(ptr);
5248 static void *eval_map_start(struct seq_file *m, loff_t *pos)
5250 union trace_eval_map_item *v;
5253 mutex_lock(&trace_eval_mutex);
5255 v = trace_eval_maps;
5259 while (v && l < *pos) {
5260 v = eval_map_next(m, v, &l);
5266 static void eval_map_stop(struct seq_file *m, void *v)
5268 mutex_unlock(&trace_eval_mutex);
5271 static int eval_map_show(struct seq_file *m, void *v)
5273 union trace_eval_map_item *ptr = v;
5275 seq_printf(m, "%s %ld (%s)\n",
5276 ptr->map.eval_string, ptr->map.eval_value,
5282 static const struct seq_operations tracing_eval_map_seq_ops = {
5283 .start = eval_map_start,
5284 .next = eval_map_next,
5285 .stop = eval_map_stop,
5286 .show = eval_map_show,
5289 static int tracing_eval_map_open(struct inode *inode, struct file *filp)
5291 if (tracing_disabled)
5294 return seq_open(filp, &tracing_eval_map_seq_ops);
5297 static const struct file_operations tracing_eval_map_fops = {
5298 .open = tracing_eval_map_open,
5300 .llseek = seq_lseek,
5301 .release = seq_release,
5304 static inline union trace_eval_map_item *
5305 trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
5307 /* Return tail of array given the head */
5308 return ptr + ptr->head.length + 1;
5312 trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
5315 struct trace_eval_map **stop;
5316 struct trace_eval_map **map;
5317 union trace_eval_map_item *map_array;
5318 union trace_eval_map_item *ptr;
5323 * The trace_eval_maps contains the map plus a head and tail item,
5324 * where the head holds the module and length of array, and the
5325 * tail holds a pointer to the next list.
5327 map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
5329 pr_warn("Unable to allocate trace eval mapping\n");
5333 mutex_lock(&trace_eval_mutex);
5335 if (!trace_eval_maps)
5336 trace_eval_maps = map_array;
5338 ptr = trace_eval_maps;
5340 ptr = trace_eval_jmp_to_tail(ptr);
5341 if (!ptr->tail.next)
5343 ptr = ptr->tail.next;
5346 ptr->tail.next = map_array;
5348 map_array->head.mod = mod;
5349 map_array->head.length = len;
5352 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
5353 map_array->map = **map;
5356 memset(map_array, 0, sizeof(*map_array));
5358 mutex_unlock(&trace_eval_mutex);
5361 static void trace_create_eval_file(struct dentry *d_tracer)
5363 trace_create_file("eval_map", 0444, d_tracer,
5364 NULL, &tracing_eval_map_fops);
5367 #else /* CONFIG_TRACE_EVAL_MAP_FILE */
5368 static inline void trace_create_eval_file(struct dentry *d_tracer) { }
5369 static inline void trace_insert_eval_map_file(struct module *mod,
5370 struct trace_eval_map **start, int len) { }
5371 #endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
5373 static void trace_insert_eval_map(struct module *mod,
5374 struct trace_eval_map **start, int len)
5376 struct trace_eval_map **map;
5383 trace_event_eval_update(map, len);
5385 trace_insert_eval_map_file(mod, start, len);
5389 tracing_set_trace_read(struct file *filp, char __user *ubuf,
5390 size_t cnt, loff_t *ppos)
5392 struct trace_array *tr = filp->private_data;
5393 char buf[MAX_TRACER_SIZE+2];
5396 mutex_lock(&trace_types_lock);
5397 r = sprintf(buf, "%s\n", tr->current_trace->name);
5398 mutex_unlock(&trace_types_lock);
5400 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5403 int tracer_init(struct tracer *t, struct trace_array *tr)
5405 tracing_reset_online_cpus(&tr->trace_buffer);
5409 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
5413 for_each_tracing_cpu(cpu)
5414 per_cpu_ptr(buf->data, cpu)->entries = val;
5417 #ifdef CONFIG_TRACER_MAX_TRACE
5418 /* resize @tr's buffer to the size of @size_tr's entries */
5419 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
5420 struct trace_buffer *size_buf, int cpu_id)
5424 if (cpu_id == RING_BUFFER_ALL_CPUS) {
5425 for_each_tracing_cpu(cpu) {
5426 ret = ring_buffer_resize(trace_buf->buffer,
5427 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
5430 per_cpu_ptr(trace_buf->data, cpu)->entries =
5431 per_cpu_ptr(size_buf->data, cpu)->entries;
5434 ret = ring_buffer_resize(trace_buf->buffer,
5435 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
5437 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
5438 per_cpu_ptr(size_buf->data, cpu_id)->entries;
5443 #endif /* CONFIG_TRACER_MAX_TRACE */
5445 static int __tracing_resize_ring_buffer(struct trace_array *tr,
5446 unsigned long size, int cpu)
5451 * If kernel or user changes the size of the ring buffer
5452 * we use the size that was given, and we can forget about
5453 * expanding it later.
5455 ring_buffer_expanded = true;
5457 /* May be called before buffers are initialized */
5458 if (!tr->trace_buffer.buffer)
5461 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
5465 #ifdef CONFIG_TRACER_MAX_TRACE
5466 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
5467 !tr->current_trace->use_max_tr)
5470 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
5472 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
5473 &tr->trace_buffer, cpu);
5476 * AARGH! We are left with different
5477 * size max buffer!!!!
5478 * The max buffer is our "snapshot" buffer.
5479 * When a tracer needs a snapshot (one of the
5480 * latency tracers), it swaps the max buffer
5481 * with the saved snap shot. We succeeded to
5482 * update the size of the main buffer, but failed to
5483 * update the size of the max buffer. But when we tried
5484 * to reset the main buffer to the original size, we
5485 * failed there too. This is very unlikely to
5486 * happen, but if it does, warn and kill all
5490 tracing_disabled = 1;
5495 if (cpu == RING_BUFFER_ALL_CPUS)
5496 set_buffer_entries(&tr->max_buffer, size);
5498 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
5501 #endif /* CONFIG_TRACER_MAX_TRACE */
5503 if (cpu == RING_BUFFER_ALL_CPUS)
5504 set_buffer_entries(&tr->trace_buffer, size);
5506 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
5511 static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
5512 unsigned long size, int cpu_id)
5516 mutex_lock(&trace_types_lock);
5518 if (cpu_id != RING_BUFFER_ALL_CPUS) {
5519 /* make sure, this cpu is enabled in the mask */
5520 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
5526 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
5531 mutex_unlock(&trace_types_lock);
5538 * tracing_update_buffers - used by tracing facility to expand ring buffers
5540 * To save on memory when the tracing is never used on a system with it
5541 * configured in. The ring buffers are set to a minimum size. But once
5542 * a user starts to use the tracing facility, then they need to grow
5543 * to their default size.
5545 * This function is to be called when a tracer is about to be used.
5547 int tracing_update_buffers(void)
5551 mutex_lock(&trace_types_lock);
5552 if (!ring_buffer_expanded)
5553 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
5554 RING_BUFFER_ALL_CPUS);
5555 mutex_unlock(&trace_types_lock);
5560 struct trace_option_dentry;
5563 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
5566 * Used to clear out the tracer before deletion of an instance.
5567 * Must have trace_types_lock held.
5569 static void tracing_set_nop(struct trace_array *tr)
5571 if (tr->current_trace == &nop_trace)
5574 tr->current_trace->enabled--;
5576 if (tr->current_trace->reset)
5577 tr->current_trace->reset(tr);
5579 tr->current_trace = &nop_trace;
5582 static void add_tracer_options(struct trace_array *tr, struct tracer *t)
5584 /* Only enable if the directory has been created already. */
5588 create_trace_option_files(tr, t);
5591 static int tracing_set_tracer(struct trace_array *tr, const char *buf)
5594 #ifdef CONFIG_TRACER_MAX_TRACE
5599 mutex_lock(&trace_types_lock);
5601 if (!ring_buffer_expanded) {
5602 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
5603 RING_BUFFER_ALL_CPUS);
5609 for (t = trace_types; t; t = t->next) {
5610 if (strcmp(t->name, buf) == 0)
5617 if (t == tr->current_trace)
5620 #ifdef CONFIG_TRACER_SNAPSHOT
5621 if (t->use_max_tr) {
5622 arch_spin_lock(&tr->max_lock);
5623 if (tr->cond_snapshot)
5625 arch_spin_unlock(&tr->max_lock);
5630 /* Some tracers won't work on kernel command line */
5631 if (system_state < SYSTEM_RUNNING && t->noboot) {
5632 pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
5637 /* Some tracers are only allowed for the top level buffer */
5638 if (!trace_ok_for_array(t, tr)) {
5643 /* If trace pipe files are being read, we can't change the tracer */
5644 if (tr->current_trace->ref) {
5649 trace_branch_disable();
5651 tr->current_trace->enabled--;
5653 if (tr->current_trace->reset)
5654 tr->current_trace->reset(tr);
5656 /* Current trace needs to be nop_trace before synchronize_rcu */
5657 tr->current_trace = &nop_trace;
5659 #ifdef CONFIG_TRACER_MAX_TRACE
5660 had_max_tr = tr->allocated_snapshot;
5662 if (had_max_tr && !t->use_max_tr) {
5664 * We need to make sure that the update_max_tr sees that
5665 * current_trace changed to nop_trace to keep it from
5666 * swapping the buffers after we resize it.
5667 * The update_max_tr is called from interrupts disabled
5668 * so a synchronized_sched() is sufficient.
5675 #ifdef CONFIG_TRACER_MAX_TRACE
5676 if (t->use_max_tr && !had_max_tr) {
5677 ret = tracing_alloc_snapshot_instance(tr);
5684 ret = tracer_init(t, tr);
5689 tr->current_trace = t;
5690 tr->current_trace->enabled++;
5691 trace_branch_enable(tr);
5693 mutex_unlock(&trace_types_lock);
5699 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
5700 size_t cnt, loff_t *ppos)
5702 struct trace_array *tr = filp->private_data;
5703 char buf[MAX_TRACER_SIZE+1];
5710 if (cnt > MAX_TRACER_SIZE)
5711 cnt = MAX_TRACER_SIZE;
5713 if (copy_from_user(buf, ubuf, cnt))
5718 /* strip ending whitespace. */
5719 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
5722 err = tracing_set_tracer(tr, buf);
5732 tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
5733 size_t cnt, loff_t *ppos)
5738 r = snprintf(buf, sizeof(buf), "%ld\n",
5739 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
5740 if (r > sizeof(buf))
5742 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5746 tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
5747 size_t cnt, loff_t *ppos)
5752 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5762 tracing_thresh_read(struct file *filp, char __user *ubuf,
5763 size_t cnt, loff_t *ppos)
5765 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
5769 tracing_thresh_write(struct file *filp, const char __user *ubuf,
5770 size_t cnt, loff_t *ppos)
5772 struct trace_array *tr = filp->private_data;
5775 mutex_lock(&trace_types_lock);
5776 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
5780 if (tr->current_trace->update_thresh) {
5781 ret = tr->current_trace->update_thresh(tr);
5788 mutex_unlock(&trace_types_lock);
5793 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
5796 tracing_max_lat_read(struct file *filp, char __user *ubuf,
5797 size_t cnt, loff_t *ppos)
5799 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
5803 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
5804 size_t cnt, loff_t *ppos)
5806 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
5811 static int tracing_open_pipe(struct inode *inode, struct file *filp)
5813 struct trace_array *tr = inode->i_private;
5814 struct trace_iterator *iter;
5817 if (tracing_disabled)
5820 if (trace_array_get(tr) < 0)
5823 mutex_lock(&trace_types_lock);
5825 /* create a buffer to store the information to pass to userspace */
5826 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5829 __trace_array_put(tr);
5833 trace_seq_init(&iter->seq);
5834 iter->trace = tr->current_trace;
5836 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
5841 /* trace pipe does not show start of buffer */
5842 cpumask_setall(iter->started);
5844 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
5845 iter->iter_flags |= TRACE_FILE_LAT_FMT;
5847 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
5848 if (trace_clocks[tr->clock_id].in_ns)
5849 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
5852 iter->trace_buffer = &tr->trace_buffer;
5853 iter->cpu_file = tracing_get_cpu(inode);
5854 mutex_init(&iter->mutex);
5855 filp->private_data = iter;
5857 if (iter->trace->pipe_open)
5858 iter->trace->pipe_open(iter);
5860 nonseekable_open(inode, filp);
5862 tr->current_trace->ref++;
5864 mutex_unlock(&trace_types_lock);
5869 __trace_array_put(tr);
5870 mutex_unlock(&trace_types_lock);
5874 static int tracing_release_pipe(struct inode *inode, struct file *file)
5876 struct trace_iterator *iter = file->private_data;
5877 struct trace_array *tr = inode->i_private;
5879 mutex_lock(&trace_types_lock);
5881 tr->current_trace->ref--;
5883 if (iter->trace->pipe_close)
5884 iter->trace->pipe_close(iter);
5886 mutex_unlock(&trace_types_lock);
5888 free_cpumask_var(iter->started);
5889 mutex_destroy(&iter->mutex);
5892 trace_array_put(tr);
5898 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
5900 struct trace_array *tr = iter->tr;
5902 /* Iterators are static, they should be filled or empty */
5903 if (trace_buffer_iter(iter, iter->cpu_file))
5904 return EPOLLIN | EPOLLRDNORM;
5906 if (tr->trace_flags & TRACE_ITER_BLOCK)
5908 * Always select as readable when in blocking mode
5910 return EPOLLIN | EPOLLRDNORM;
5912 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
5917 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
5919 struct trace_iterator *iter = filp->private_data;
5921 return trace_poll(iter, filp, poll_table);
5924 /* Must be called with iter->mutex held. */
5925 static int tracing_wait_pipe(struct file *filp)
5927 struct trace_iterator *iter = filp->private_data;
5930 while (trace_empty(iter)) {
5932 if ((filp->f_flags & O_NONBLOCK)) {
5937 * We block until we read something and tracing is disabled.
5938 * We still block if tracing is disabled, but we have never
5939 * read anything. This allows a user to cat this file, and
5940 * then enable tracing. But after we have read something,
5941 * we give an EOF when tracing is again disabled.
5943 * iter->pos will be 0 if we haven't read anything.
5945 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
5948 mutex_unlock(&iter->mutex);
5950 ret = wait_on_pipe(iter, 0);
5952 mutex_lock(&iter->mutex);
5965 tracing_read_pipe(struct file *filp, char __user *ubuf,
5966 size_t cnt, loff_t *ppos)
5968 struct trace_iterator *iter = filp->private_data;
5972 * Avoid more than one consumer on a single file descriptor
5973 * This is just a matter of traces coherency, the ring buffer itself
5976 mutex_lock(&iter->mutex);
5978 /* return any leftover data */
5979 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
5983 trace_seq_init(&iter->seq);
5985 if (iter->trace->read) {
5986 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
5992 sret = tracing_wait_pipe(filp);
5996 /* stop when tracing is finished */
5997 if (trace_empty(iter)) {
6002 if (cnt >= PAGE_SIZE)
6003 cnt = PAGE_SIZE - 1;
6005 /* reset all but tr, trace, and overruns */
6006 memset(&iter->seq, 0,
6007 sizeof(struct trace_iterator) -
6008 offsetof(struct trace_iterator, seq));
6009 cpumask_clear(iter->started);
6012 trace_event_read_lock();
6013 trace_access_lock(iter->cpu_file);
6014 while (trace_find_next_entry_inc(iter) != NULL) {
6015 enum print_line_t ret;
6016 int save_len = iter->seq.seq.len;
6018 ret = print_trace_line(iter);
6019 if (ret == TRACE_TYPE_PARTIAL_LINE) {
6020 /* don't print partial lines */
6021 iter->seq.seq.len = save_len;
6024 if (ret != TRACE_TYPE_NO_CONSUME)
6025 trace_consume(iter);
6027 if (trace_seq_used(&iter->seq) >= cnt)
6031 * Setting the full flag means we reached the trace_seq buffer
6032 * size and we should leave by partial output condition above.
6033 * One of the trace_seq_* functions is not used properly.
6035 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
6038 trace_access_unlock(iter->cpu_file);
6039 trace_event_read_unlock();
6041 /* Now copy what we have to the user */
6042 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6043 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
6044 trace_seq_init(&iter->seq);
6047 * If there was nothing to send to user, in spite of consuming trace
6048 * entries, go back to wait for more entries.
6054 mutex_unlock(&iter->mutex);
6059 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
6062 __free_page(spd->pages[idx]);
6065 static const struct pipe_buf_operations tracing_pipe_buf_ops = {
6066 .confirm = generic_pipe_buf_confirm,
6067 .release = generic_pipe_buf_release,
6068 .steal = generic_pipe_buf_steal,
6069 .get = generic_pipe_buf_get,
6073 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
6079 /* Seq buffer is page-sized, exactly what we need. */
6081 save_len = iter->seq.seq.len;
6082 ret = print_trace_line(iter);
6084 if (trace_seq_has_overflowed(&iter->seq)) {
6085 iter->seq.seq.len = save_len;
6090 * This should not be hit, because it should only
6091 * be set if the iter->seq overflowed. But check it
6092 * anyway to be safe.
6094 if (ret == TRACE_TYPE_PARTIAL_LINE) {
6095 iter->seq.seq.len = save_len;
6099 count = trace_seq_used(&iter->seq) - save_len;
6102 iter->seq.seq.len = save_len;
6106 if (ret != TRACE_TYPE_NO_CONSUME)
6107 trace_consume(iter);
6109 if (!trace_find_next_entry_inc(iter)) {
6119 static ssize_t tracing_splice_read_pipe(struct file *filp,
6121 struct pipe_inode_info *pipe,
6125 struct page *pages_def[PIPE_DEF_BUFFERS];
6126 struct partial_page partial_def[PIPE_DEF_BUFFERS];
6127 struct trace_iterator *iter = filp->private_data;
6128 struct splice_pipe_desc spd = {
6130 .partial = partial_def,
6131 .nr_pages = 0, /* This gets updated below. */
6132 .nr_pages_max = PIPE_DEF_BUFFERS,
6133 .ops = &tracing_pipe_buf_ops,
6134 .spd_release = tracing_spd_release_pipe,
6140 if (splice_grow_spd(pipe, &spd))
6143 mutex_lock(&iter->mutex);
6145 if (iter->trace->splice_read) {
6146 ret = iter->trace->splice_read(iter, filp,
6147 ppos, pipe, len, flags);
6152 ret = tracing_wait_pipe(filp);
6156 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
6161 trace_event_read_lock();
6162 trace_access_lock(iter->cpu_file);
6164 /* Fill as many pages as possible. */
6165 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
6166 spd.pages[i] = alloc_page(GFP_KERNEL);
6170 rem = tracing_fill_pipe_page(rem, iter);
6172 /* Copy the data into the page, so we can start over. */
6173 ret = trace_seq_to_buffer(&iter->seq,
6174 page_address(spd.pages[i]),
6175 trace_seq_used(&iter->seq));
6177 __free_page(spd.pages[i]);
6180 spd.partial[i].offset = 0;
6181 spd.partial[i].len = trace_seq_used(&iter->seq);
6183 trace_seq_init(&iter->seq);
6186 trace_access_unlock(iter->cpu_file);
6187 trace_event_read_unlock();
6188 mutex_unlock(&iter->mutex);
6193 ret = splice_to_pipe(pipe, &spd);
6197 splice_shrink_spd(&spd);
6201 mutex_unlock(&iter->mutex);
6206 tracing_entries_read(struct file *filp, char __user *ubuf,
6207 size_t cnt, loff_t *ppos)
6209 struct inode *inode = file_inode(filp);
6210 struct trace_array *tr = inode->i_private;
6211 int cpu = tracing_get_cpu(inode);
6216 mutex_lock(&trace_types_lock);
6218 if (cpu == RING_BUFFER_ALL_CPUS) {
6219 int cpu, buf_size_same;
6224 /* check if all cpu sizes are same */
6225 for_each_tracing_cpu(cpu) {
6226 /* fill in the size from first enabled cpu */
6228 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
6229 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
6235 if (buf_size_same) {
6236 if (!ring_buffer_expanded)
6237 r = sprintf(buf, "%lu (expanded: %lu)\n",
6239 trace_buf_size >> 10);
6241 r = sprintf(buf, "%lu\n", size >> 10);
6243 r = sprintf(buf, "X\n");
6245 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
6247 mutex_unlock(&trace_types_lock);
6249 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6254 tracing_entries_write(struct file *filp, const char __user *ubuf,
6255 size_t cnt, loff_t *ppos)
6257 struct inode *inode = file_inode(filp);
6258 struct trace_array *tr = inode->i_private;
6262 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6266 /* must have at least 1 entry */
6270 /* value is in KB */
6272 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
6282 tracing_total_entries_read(struct file *filp, char __user *ubuf,
6283 size_t cnt, loff_t *ppos)
6285 struct trace_array *tr = filp->private_data;
6288 unsigned long size = 0, expanded_size = 0;
6290 mutex_lock(&trace_types_lock);
6291 for_each_tracing_cpu(cpu) {
6292 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
6293 if (!ring_buffer_expanded)
6294 expanded_size += trace_buf_size >> 10;
6296 if (ring_buffer_expanded)
6297 r = sprintf(buf, "%lu\n", size);
6299 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
6300 mutex_unlock(&trace_types_lock);
6302 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6306 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
6307 size_t cnt, loff_t *ppos)
6310 * There is no need to read what the user has written, this function
6311 * is just to make sure that there is no error when "echo" is used
6320 tracing_free_buffer_release(struct inode *inode, struct file *filp)
6322 struct trace_array *tr = inode->i_private;
6324 /* disable tracing ? */
6325 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
6326 tracer_tracing_off(tr);
6327 /* resize the ring buffer to 0 */
6328 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
6330 trace_array_put(tr);
6336 tracing_mark_write(struct file *filp, const char __user *ubuf,
6337 size_t cnt, loff_t *fpos)
6339 struct trace_array *tr = filp->private_data;
6340 struct ring_buffer_event *event;
6341 enum event_trigger_type tt = ETT_NONE;
6342 struct ring_buffer *buffer;
6343 struct print_entry *entry;
6344 unsigned long irq_flags;
6345 const char faulted[] = "<faulted>";
6350 /* Used in tracing_mark_raw_write() as well */
6351 #define FAULTED_SIZE (sizeof(faulted) - 1) /* '\0' is already accounted for */
6353 if (tracing_disabled)
6356 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6359 if (cnt > TRACE_BUF_SIZE)
6360 cnt = TRACE_BUF_SIZE;
6362 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
6364 local_save_flags(irq_flags);
6365 size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
6367 /* If less than "<faulted>", then make sure we can still add that */
6368 if (cnt < FAULTED_SIZE)
6369 size += FAULTED_SIZE - cnt;
6371 buffer = tr->trace_buffer.buffer;
6372 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
6373 irq_flags, preempt_count());
6374 if (unlikely(!event))
6375 /* Ring buffer disabled, return as if not open for write */
6378 entry = ring_buffer_event_data(event);
6379 entry->ip = _THIS_IP_;
6381 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
6383 memcpy(&entry->buf, faulted, FAULTED_SIZE);
6390 if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
6391 /* do not add \n before testing triggers, but add \0 */
6392 entry->buf[cnt] = '\0';
6393 tt = event_triggers_call(tr->trace_marker_file, entry, event);
6396 if (entry->buf[cnt - 1] != '\n') {
6397 entry->buf[cnt] = '\n';
6398 entry->buf[cnt + 1] = '\0';
6400 entry->buf[cnt] = '\0';
6402 __buffer_unlock_commit(buffer, event);
6405 event_triggers_post_call(tr->trace_marker_file, tt);
6413 /* Limit it for now to 3K (including tag) */
6414 #define RAW_DATA_MAX_SIZE (1024*3)
6417 tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
6418 size_t cnt, loff_t *fpos)
6420 struct trace_array *tr = filp->private_data;
6421 struct ring_buffer_event *event;
6422 struct ring_buffer *buffer;
6423 struct raw_data_entry *entry;
6424 const char faulted[] = "<faulted>";
6425 unsigned long irq_flags;
6430 #define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
6432 if (tracing_disabled)
6435 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6438 /* The marker must at least have a tag id */
6439 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
6442 if (cnt > TRACE_BUF_SIZE)
6443 cnt = TRACE_BUF_SIZE;
6445 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
6447 local_save_flags(irq_flags);
6448 size = sizeof(*entry) + cnt;
6449 if (cnt < FAULT_SIZE_ID)
6450 size += FAULT_SIZE_ID - cnt;
6452 buffer = tr->trace_buffer.buffer;
6453 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
6454 irq_flags, preempt_count());
6456 /* Ring buffer disabled, return as if not open for write */
6459 entry = ring_buffer_event_data(event);
6461 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
6464 memcpy(&entry->buf, faulted, FAULTED_SIZE);
6469 __buffer_unlock_commit(buffer, event);
6477 static int tracing_clock_show(struct seq_file *m, void *v)
6479 struct trace_array *tr = m->private;
6482 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
6484 "%s%s%s%s", i ? " " : "",
6485 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
6486 i == tr->clock_id ? "]" : "");
6492 int tracing_set_clock(struct trace_array *tr, const char *clockstr)
6496 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
6497 if (strcmp(trace_clocks[i].name, clockstr) == 0)
6500 if (i == ARRAY_SIZE(trace_clocks))
6503 mutex_lock(&trace_types_lock);
6507 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
6510 * New clock may not be consistent with the previous clock.
6511 * Reset the buffer so that it doesn't have incomparable timestamps.
6513 tracing_reset_online_cpus(&tr->trace_buffer);
6515 #ifdef CONFIG_TRACER_MAX_TRACE
6516 if (tr->max_buffer.buffer)
6517 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
6518 tracing_reset_online_cpus(&tr->max_buffer);
6521 mutex_unlock(&trace_types_lock);
6526 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
6527 size_t cnt, loff_t *fpos)
6529 struct seq_file *m = filp->private_data;
6530 struct trace_array *tr = m->private;
6532 const char *clockstr;
6535 if (cnt >= sizeof(buf))
6538 if (copy_from_user(buf, ubuf, cnt))
6543 clockstr = strstrip(buf);
6545 ret = tracing_set_clock(tr, clockstr);
6554 static int tracing_clock_open(struct inode *inode, struct file *file)
6556 struct trace_array *tr = inode->i_private;
6559 if (tracing_disabled)
6562 if (trace_array_get(tr))
6565 ret = single_open(file, tracing_clock_show, inode->i_private);
6567 trace_array_put(tr);
6572 static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
6574 struct trace_array *tr = m->private;
6576 mutex_lock(&trace_types_lock);
6578 if (ring_buffer_time_stamp_abs(tr->trace_buffer.buffer))
6579 seq_puts(m, "delta [absolute]\n");
6581 seq_puts(m, "[delta] absolute\n");
6583 mutex_unlock(&trace_types_lock);
6588 static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
6590 struct trace_array *tr = inode->i_private;
6593 if (tracing_disabled)
6596 if (trace_array_get(tr))
6599 ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
6601 trace_array_put(tr);
6606 int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs)
6610 mutex_lock(&trace_types_lock);
6612 if (abs && tr->time_stamp_abs_ref++)
6616 if (WARN_ON_ONCE(!tr->time_stamp_abs_ref)) {
6621 if (--tr->time_stamp_abs_ref)
6625 ring_buffer_set_time_stamp_abs(tr->trace_buffer.buffer, abs);
6627 #ifdef CONFIG_TRACER_MAX_TRACE
6628 if (tr->max_buffer.buffer)
6629 ring_buffer_set_time_stamp_abs(tr->max_buffer.buffer, abs);
6632 mutex_unlock(&trace_types_lock);
6637 struct ftrace_buffer_info {
6638 struct trace_iterator iter;
6640 unsigned int spare_cpu;
6644 #ifdef CONFIG_TRACER_SNAPSHOT
6645 static int tracing_snapshot_open(struct inode *inode, struct file *file)
6647 struct trace_array *tr = inode->i_private;
6648 struct trace_iterator *iter;
6652 if (trace_array_get(tr) < 0)
6655 if (file->f_mode & FMODE_READ) {
6656 iter = __tracing_open(inode, file, true);
6658 ret = PTR_ERR(iter);
6660 /* Writes still need the seq_file to hold the private data */
6662 m = kzalloc(sizeof(*m), GFP_KERNEL);
6665 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6673 iter->trace_buffer = &tr->max_buffer;
6674 iter->cpu_file = tracing_get_cpu(inode);
6676 file->private_data = m;
6680 trace_array_put(tr);
6686 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
6689 struct seq_file *m = filp->private_data;
6690 struct trace_iterator *iter = m->private;
6691 struct trace_array *tr = iter->tr;
6695 ret = tracing_update_buffers();
6699 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6703 mutex_lock(&trace_types_lock);
6705 if (tr->current_trace->use_max_tr) {
6710 arch_spin_lock(&tr->max_lock);
6711 if (tr->cond_snapshot)
6713 arch_spin_unlock(&tr->max_lock);
6719 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6723 if (tr->allocated_snapshot)
6727 /* Only allow per-cpu swap if the ring buffer supports it */
6728 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
6729 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6734 if (!tr->allocated_snapshot) {
6735 ret = tracing_alloc_snapshot_instance(tr);
6739 local_irq_disable();
6740 /* Now, we're going to swap */
6741 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
6742 update_max_tr(tr, current, smp_processor_id(), NULL);
6744 update_max_tr_single(tr, current, iter->cpu_file);
6748 if (tr->allocated_snapshot) {
6749 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
6750 tracing_reset_online_cpus(&tr->max_buffer);
6752 tracing_reset(&tr->max_buffer, iter->cpu_file);
6762 mutex_unlock(&trace_types_lock);
6766 static int tracing_snapshot_release(struct inode *inode, struct file *file)
6768 struct seq_file *m = file->private_data;
6771 ret = tracing_release(inode, file);
6773 if (file->f_mode & FMODE_READ)
6776 /* If write only, the seq_file is just a stub */
6784 static int tracing_buffers_open(struct inode *inode, struct file *filp);
6785 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
6786 size_t count, loff_t *ppos);
6787 static int tracing_buffers_release(struct inode *inode, struct file *file);
6788 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6789 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
6791 static int snapshot_raw_open(struct inode *inode, struct file *filp)
6793 struct ftrace_buffer_info *info;
6796 ret = tracing_buffers_open(inode, filp);
6800 info = filp->private_data;
6802 if (info->iter.trace->use_max_tr) {
6803 tracing_buffers_release(inode, filp);
6807 info->iter.snapshot = true;
6808 info->iter.trace_buffer = &info->iter.tr->max_buffer;
6813 #endif /* CONFIG_TRACER_SNAPSHOT */
6816 static const struct file_operations tracing_thresh_fops = {
6817 .open = tracing_open_generic,
6818 .read = tracing_thresh_read,
6819 .write = tracing_thresh_write,
6820 .llseek = generic_file_llseek,
6823 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
6824 static const struct file_operations tracing_max_lat_fops = {
6825 .open = tracing_open_generic,
6826 .read = tracing_max_lat_read,
6827 .write = tracing_max_lat_write,
6828 .llseek = generic_file_llseek,
6832 static const struct file_operations set_tracer_fops = {
6833 .open = tracing_open_generic,
6834 .read = tracing_set_trace_read,
6835 .write = tracing_set_trace_write,
6836 .llseek = generic_file_llseek,
6839 static const struct file_operations tracing_pipe_fops = {
6840 .open = tracing_open_pipe,
6841 .poll = tracing_poll_pipe,
6842 .read = tracing_read_pipe,
6843 .splice_read = tracing_splice_read_pipe,
6844 .release = tracing_release_pipe,
6845 .llseek = no_llseek,
6848 static const struct file_operations tracing_entries_fops = {
6849 .open = tracing_open_generic_tr,
6850 .read = tracing_entries_read,
6851 .write = tracing_entries_write,
6852 .llseek = generic_file_llseek,
6853 .release = tracing_release_generic_tr,
6856 static const struct file_operations tracing_total_entries_fops = {
6857 .open = tracing_open_generic_tr,
6858 .read = tracing_total_entries_read,
6859 .llseek = generic_file_llseek,
6860 .release = tracing_release_generic_tr,
6863 static const struct file_operations tracing_free_buffer_fops = {
6864 .open = tracing_open_generic_tr,
6865 .write = tracing_free_buffer_write,
6866 .release = tracing_free_buffer_release,
6869 static const struct file_operations tracing_mark_fops = {
6870 .open = tracing_open_generic_tr,
6871 .write = tracing_mark_write,
6872 .llseek = generic_file_llseek,
6873 .release = tracing_release_generic_tr,
6876 static const struct file_operations tracing_mark_raw_fops = {
6877 .open = tracing_open_generic_tr,
6878 .write = tracing_mark_raw_write,
6879 .llseek = generic_file_llseek,
6880 .release = tracing_release_generic_tr,
6883 static const struct file_operations trace_clock_fops = {
6884 .open = tracing_clock_open,
6886 .llseek = seq_lseek,
6887 .release = tracing_single_release_tr,
6888 .write = tracing_clock_write,
6891 static const struct file_operations trace_time_stamp_mode_fops = {
6892 .open = tracing_time_stamp_mode_open,
6894 .llseek = seq_lseek,
6895 .release = tracing_single_release_tr,
6898 #ifdef CONFIG_TRACER_SNAPSHOT
6899 static const struct file_operations snapshot_fops = {
6900 .open = tracing_snapshot_open,
6902 .write = tracing_snapshot_write,
6903 .llseek = tracing_lseek,
6904 .release = tracing_snapshot_release,
6907 static const struct file_operations snapshot_raw_fops = {
6908 .open = snapshot_raw_open,
6909 .read = tracing_buffers_read,
6910 .release = tracing_buffers_release,
6911 .splice_read = tracing_buffers_splice_read,
6912 .llseek = no_llseek,
6915 #endif /* CONFIG_TRACER_SNAPSHOT */
6917 #define TRACING_LOG_ERRS_MAX 8
6918 #define TRACING_LOG_LOC_MAX 128
6920 #define CMD_PREFIX " Command: "
6923 const char **errs; /* ptr to loc-specific array of err strings */
6924 u8 type; /* index into errs -> specific err string */
6925 u8 pos; /* MAX_FILTER_STR_VAL = 256 */
6929 struct tracing_log_err {
6930 struct list_head list;
6931 struct err_info info;
6932 char loc[TRACING_LOG_LOC_MAX]; /* err location */
6933 char cmd[MAX_FILTER_STR_VAL]; /* what caused err */
6936 static DEFINE_MUTEX(tracing_err_log_lock);
6938 struct tracing_log_err *get_tracing_log_err(struct trace_array *tr)
6940 struct tracing_log_err *err;
6942 if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) {
6943 err = kzalloc(sizeof(*err), GFP_KERNEL);
6945 err = ERR_PTR(-ENOMEM);
6946 tr->n_err_log_entries++;
6951 err = list_first_entry(&tr->err_log, struct tracing_log_err, list);
6952 list_del(&err->list);
6958 * err_pos - find the position of a string within a command for error careting
6959 * @cmd: The tracing command that caused the error
6960 * @str: The string to position the caret at within @cmd
6962 * Finds the position of the first occurence of @str within @cmd. The
6963 * return value can be passed to tracing_log_err() for caret placement
6966 * Returns the index within @cmd of the first occurence of @str or 0
6967 * if @str was not found.
6969 unsigned int err_pos(char *cmd, const char *str)
6973 if (WARN_ON(!strlen(cmd)))
6976 found = strstr(cmd, str);
6984 * tracing_log_err - write an error to the tracing error log
6985 * @tr: The associated trace array for the error (NULL for top level array)
6986 * @loc: A string describing where the error occurred
6987 * @cmd: The tracing command that caused the error
6988 * @errs: The array of loc-specific static error strings
6989 * @type: The index into errs[], which produces the specific static err string
6990 * @pos: The position the caret should be placed in the cmd
6992 * Writes an error into tracing/error_log of the form:
6994 * <loc>: error: <text>
6998 * tracing/error_log is a small log file containing the last
6999 * TRACING_LOG_ERRS_MAX errors (8). Memory for errors isn't allocated
7000 * unless there has been a tracing error, and the error log can be
7001 * cleared and have its memory freed by writing the empty string in
7002 * truncation mode to it i.e. echo > tracing/error_log.
7004 * NOTE: the @errs array along with the @type param are used to
7005 * produce a static error string - this string is not copied and saved
7006 * when the error is logged - only a pointer to it is saved. See
7007 * existing callers for examples of how static strings are typically
7008 * defined for use with tracing_log_err().
7010 void tracing_log_err(struct trace_array *tr,
7011 const char *loc, const char *cmd,
7012 const char **errs, u8 type, u8 pos)
7014 struct tracing_log_err *err;
7019 mutex_lock(&tracing_err_log_lock);
7020 err = get_tracing_log_err(tr);
7021 if (PTR_ERR(err) == -ENOMEM) {
7022 mutex_unlock(&tracing_err_log_lock);
7026 snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);
7027 snprintf(err->cmd, MAX_FILTER_STR_VAL,"\n" CMD_PREFIX "%s\n", cmd);
7029 err->info.errs = errs;
7030 err->info.type = type;
7031 err->info.pos = pos;
7032 err->info.ts = local_clock();
7034 list_add_tail(&err->list, &tr->err_log);
7035 mutex_unlock(&tracing_err_log_lock);
7038 static void clear_tracing_err_log(struct trace_array *tr)
7040 struct tracing_log_err *err, *next;
7042 mutex_lock(&tracing_err_log_lock);
7043 list_for_each_entry_safe(err, next, &tr->err_log, list) {
7044 list_del(&err->list);
7048 tr->n_err_log_entries = 0;
7049 mutex_unlock(&tracing_err_log_lock);
7052 static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos)
7054 struct trace_array *tr = m->private;
7056 mutex_lock(&tracing_err_log_lock);
7058 return seq_list_start(&tr->err_log, *pos);
7061 static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos)
7063 struct trace_array *tr = m->private;
7065 return seq_list_next(v, &tr->err_log, pos);
7068 static void tracing_err_log_seq_stop(struct seq_file *m, void *v)
7070 mutex_unlock(&tracing_err_log_lock);
7073 static void tracing_err_log_show_pos(struct seq_file *m, u8 pos)
7077 for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++)
7079 for (i = 0; i < pos; i++)
7084 static int tracing_err_log_seq_show(struct seq_file *m, void *v)
7086 struct tracing_log_err *err = v;
7089 const char *err_text = err->info.errs[err->info.type];
7090 u64 sec = err->info.ts;
7093 nsec = do_div(sec, NSEC_PER_SEC);
7094 seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000,
7095 err->loc, err_text);
7096 seq_printf(m, "%s", err->cmd);
7097 tracing_err_log_show_pos(m, err->info.pos);
7103 static const struct seq_operations tracing_err_log_seq_ops = {
7104 .start = tracing_err_log_seq_start,
7105 .next = tracing_err_log_seq_next,
7106 .stop = tracing_err_log_seq_stop,
7107 .show = tracing_err_log_seq_show
7110 static int tracing_err_log_open(struct inode *inode, struct file *file)
7112 struct trace_array *tr = inode->i_private;
7115 if (trace_array_get(tr) < 0)
7118 /* If this file was opened for write, then erase contents */
7119 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
7120 clear_tracing_err_log(tr);
7122 if (file->f_mode & FMODE_READ) {
7123 ret = seq_open(file, &tracing_err_log_seq_ops);
7125 struct seq_file *m = file->private_data;
7128 trace_array_put(tr);
7134 static ssize_t tracing_err_log_write(struct file *file,
7135 const char __user *buffer,
7136 size_t count, loff_t *ppos)
7141 static const struct file_operations tracing_err_log_fops = {
7142 .open = tracing_err_log_open,
7143 .write = tracing_err_log_write,
7145 .llseek = seq_lseek,
7146 .release = tracing_release_generic_tr,
7149 static int tracing_buffers_open(struct inode *inode, struct file *filp)
7151 struct trace_array *tr = inode->i_private;
7152 struct ftrace_buffer_info *info;
7155 if (tracing_disabled)
7158 if (trace_array_get(tr) < 0)
7161 info = kzalloc(sizeof(*info), GFP_KERNEL);
7163 trace_array_put(tr);
7167 mutex_lock(&trace_types_lock);
7170 info->iter.cpu_file = tracing_get_cpu(inode);
7171 info->iter.trace = tr->current_trace;
7172 info->iter.trace_buffer = &tr->trace_buffer;
7174 /* Force reading ring buffer for first read */
7175 info->read = (unsigned int)-1;
7177 filp->private_data = info;
7179 tr->current_trace->ref++;
7181 mutex_unlock(&trace_types_lock);
7183 ret = nonseekable_open(inode, filp);
7185 trace_array_put(tr);
7191 tracing_buffers_poll(struct file *filp, poll_table *poll_table)
7193 struct ftrace_buffer_info *info = filp->private_data;
7194 struct trace_iterator *iter = &info->iter;
7196 return trace_poll(iter, filp, poll_table);
7200 tracing_buffers_read(struct file *filp, char __user *ubuf,
7201 size_t count, loff_t *ppos)
7203 struct ftrace_buffer_info *info = filp->private_data;
7204 struct trace_iterator *iter = &info->iter;
7211 #ifdef CONFIG_TRACER_MAX_TRACE
7212 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
7217 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
7219 if (IS_ERR(info->spare)) {
7220 ret = PTR_ERR(info->spare);
7223 info->spare_cpu = iter->cpu_file;
7229 /* Do we have previous read data to read? */
7230 if (info->read < PAGE_SIZE)
7234 trace_access_lock(iter->cpu_file);
7235 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
7239 trace_access_unlock(iter->cpu_file);
7242 if (trace_empty(iter)) {
7243 if ((filp->f_flags & O_NONBLOCK))
7246 ret = wait_on_pipe(iter, 0);
7257 size = PAGE_SIZE - info->read;
7261 ret = copy_to_user(ubuf, info->spare + info->read, size);
7273 static int tracing_buffers_release(struct inode *inode, struct file *file)
7275 struct ftrace_buffer_info *info = file->private_data;
7276 struct trace_iterator *iter = &info->iter;
7278 mutex_lock(&trace_types_lock);
7280 iter->tr->current_trace->ref--;
7282 __trace_array_put(iter->tr);
7285 ring_buffer_free_read_page(iter->trace_buffer->buffer,
7286 info->spare_cpu, info->spare);
7289 mutex_unlock(&trace_types_lock);
7295 struct ring_buffer *buffer;
7301 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
7302 struct pipe_buffer *buf)
7304 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
7309 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
7314 static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
7315 struct pipe_buffer *buf)
7317 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
7322 /* Pipe buffer operations for a buffer. */
7323 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
7324 .confirm = generic_pipe_buf_confirm,
7325 .release = buffer_pipe_buf_release,
7326 .steal = generic_pipe_buf_steal,
7327 .get = buffer_pipe_buf_get,
7331 * Callback from splice_to_pipe(), if we need to release some pages
7332 * at the end of the spd in case we error'ed out in filling the pipe.
7334 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
7336 struct buffer_ref *ref =
7337 (struct buffer_ref *)spd->partial[i].private;
7342 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
7344 spd->partial[i].private = 0;
7348 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7349 struct pipe_inode_info *pipe, size_t len,
7352 struct ftrace_buffer_info *info = file->private_data;
7353 struct trace_iterator *iter = &info->iter;
7354 struct partial_page partial_def[PIPE_DEF_BUFFERS];
7355 struct page *pages_def[PIPE_DEF_BUFFERS];
7356 struct splice_pipe_desc spd = {
7358 .partial = partial_def,
7359 .nr_pages_max = PIPE_DEF_BUFFERS,
7360 .ops = &buffer_pipe_buf_ops,
7361 .spd_release = buffer_spd_release,
7363 struct buffer_ref *ref;
7367 #ifdef CONFIG_TRACER_MAX_TRACE
7368 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
7372 if (*ppos & (PAGE_SIZE - 1))
7375 if (len & (PAGE_SIZE - 1)) {
7376 if (len < PAGE_SIZE)
7381 if (splice_grow_spd(pipe, &spd))
7385 trace_access_lock(iter->cpu_file);
7386 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
7388 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
7392 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
7399 ref->buffer = iter->trace_buffer->buffer;
7400 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
7401 if (IS_ERR(ref->page)) {
7402 ret = PTR_ERR(ref->page);
7407 ref->cpu = iter->cpu_file;
7409 r = ring_buffer_read_page(ref->buffer, &ref->page,
7410 len, iter->cpu_file, 1);
7412 ring_buffer_free_read_page(ref->buffer, ref->cpu,
7418 page = virt_to_page(ref->page);
7420 spd.pages[i] = page;
7421 spd.partial[i].len = PAGE_SIZE;
7422 spd.partial[i].offset = 0;
7423 spd.partial[i].private = (unsigned long)ref;
7427 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
7430 trace_access_unlock(iter->cpu_file);
7433 /* did we read anything? */
7434 if (!spd.nr_pages) {
7439 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
7442 ret = wait_on_pipe(iter, iter->tr->buffer_percent);
7449 ret = splice_to_pipe(pipe, &spd);
7451 splice_shrink_spd(&spd);
7456 static const struct file_operations tracing_buffers_fops = {
7457 .open = tracing_buffers_open,
7458 .read = tracing_buffers_read,
7459 .poll = tracing_buffers_poll,
7460 .release = tracing_buffers_release,
7461 .splice_read = tracing_buffers_splice_read,
7462 .llseek = no_llseek,
7466 tracing_stats_read(struct file *filp, char __user *ubuf,
7467 size_t count, loff_t *ppos)
7469 struct inode *inode = file_inode(filp);
7470 struct trace_array *tr = inode->i_private;
7471 struct trace_buffer *trace_buf = &tr->trace_buffer;
7472 int cpu = tracing_get_cpu(inode);
7473 struct trace_seq *s;
7475 unsigned long long t;
7476 unsigned long usec_rem;
7478 s = kmalloc(sizeof(*s), GFP_KERNEL);
7484 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
7485 trace_seq_printf(s, "entries: %ld\n", cnt);
7487 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
7488 trace_seq_printf(s, "overrun: %ld\n", cnt);
7490 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
7491 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
7493 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
7494 trace_seq_printf(s, "bytes: %ld\n", cnt);
7496 if (trace_clocks[tr->clock_id].in_ns) {
7497 /* local or global for trace_clock */
7498 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
7499 usec_rem = do_div(t, USEC_PER_SEC);
7500 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
7503 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
7504 usec_rem = do_div(t, USEC_PER_SEC);
7505 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
7507 /* counter or tsc mode for trace_clock */
7508 trace_seq_printf(s, "oldest event ts: %llu\n",
7509 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
7511 trace_seq_printf(s, "now ts: %llu\n",
7512 ring_buffer_time_stamp(trace_buf->buffer, cpu));
7515 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
7516 trace_seq_printf(s, "dropped events: %ld\n", cnt);
7518 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
7519 trace_seq_printf(s, "read events: %ld\n", cnt);
7521 count = simple_read_from_buffer(ubuf, count, ppos,
7522 s->buffer, trace_seq_used(s));
7529 static const struct file_operations tracing_stats_fops = {
7530 .open = tracing_open_generic_tr,
7531 .read = tracing_stats_read,
7532 .llseek = generic_file_llseek,
7533 .release = tracing_release_generic_tr,
7536 #ifdef CONFIG_DYNAMIC_FTRACE
7539 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
7540 size_t cnt, loff_t *ppos)
7542 unsigned long *p = filp->private_data;
7543 char buf[64]; /* Not too big for a shallow stack */
7546 r = scnprintf(buf, 63, "%ld", *p);
7549 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7552 static const struct file_operations tracing_dyn_info_fops = {
7553 .open = tracing_open_generic,
7554 .read = tracing_read_dyn_info,
7555 .llseek = generic_file_llseek,
7557 #endif /* CONFIG_DYNAMIC_FTRACE */
7559 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
7561 ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
7562 struct trace_array *tr, struct ftrace_probe_ops *ops,
7565 tracing_snapshot_instance(tr);
7569 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
7570 struct trace_array *tr, struct ftrace_probe_ops *ops,
7573 struct ftrace_func_mapper *mapper = data;
7577 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
7587 tracing_snapshot_instance(tr);
7591 ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
7592 struct ftrace_probe_ops *ops, void *data)
7594 struct ftrace_func_mapper *mapper = data;
7597 seq_printf(m, "%ps:", (void *)ip);
7599 seq_puts(m, "snapshot");
7602 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
7605 seq_printf(m, ":count=%ld\n", *count);
7607 seq_puts(m, ":unlimited\n");
7613 ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
7614 unsigned long ip, void *init_data, void **data)
7616 struct ftrace_func_mapper *mapper = *data;
7619 mapper = allocate_ftrace_func_mapper();
7625 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
7629 ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
7630 unsigned long ip, void *data)
7632 struct ftrace_func_mapper *mapper = data;
7637 free_ftrace_func_mapper(mapper, NULL);
7641 ftrace_func_mapper_remove_ip(mapper, ip);
7644 static struct ftrace_probe_ops snapshot_probe_ops = {
7645 .func = ftrace_snapshot,
7646 .print = ftrace_snapshot_print,
7649 static struct ftrace_probe_ops snapshot_count_probe_ops = {
7650 .func = ftrace_count_snapshot,
7651 .print = ftrace_snapshot_print,
7652 .init = ftrace_snapshot_init,
7653 .free = ftrace_snapshot_free,
7657 ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
7658 char *glob, char *cmd, char *param, int enable)
7660 struct ftrace_probe_ops *ops;
7661 void *count = (void *)-1;
7668 /* hash funcs only work with set_ftrace_filter */
7672 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
7675 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
7680 number = strsep(¶m, ":");
7682 if (!strlen(number))
7686 * We use the callback data field (which is a pointer)
7689 ret = kstrtoul(number, 0, (unsigned long *)&count);
7694 ret = tracing_alloc_snapshot_instance(tr);
7698 ret = register_ftrace_function_probe(glob, tr, ops, count);
7701 return ret < 0 ? ret : 0;
7704 static struct ftrace_func_command ftrace_snapshot_cmd = {
7706 .func = ftrace_trace_snapshot_callback,
7709 static __init int register_snapshot_cmd(void)
7711 return register_ftrace_command(&ftrace_snapshot_cmd);
7714 static inline __init int register_snapshot_cmd(void) { return 0; }
7715 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
7717 static struct dentry *tracing_get_dentry(struct trace_array *tr)
7719 if (WARN_ON(!tr->dir))
7720 return ERR_PTR(-ENODEV);
7722 /* Top directory uses NULL as the parent */
7723 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
7726 /* All sub buffers have a descriptor */
7730 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
7732 struct dentry *d_tracer;
7735 return tr->percpu_dir;
7737 d_tracer = tracing_get_dentry(tr);
7738 if (IS_ERR(d_tracer))
7741 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
7743 WARN_ONCE(!tr->percpu_dir,
7744 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
7746 return tr->percpu_dir;
7749 static struct dentry *
7750 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
7751 void *data, long cpu, const struct file_operations *fops)
7753 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
7755 if (ret) /* See tracing_get_cpu() */
7756 d_inode(ret)->i_cdev = (void *)(cpu + 1);
7761 tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
7763 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
7764 struct dentry *d_cpu;
7765 char cpu_dir[30]; /* 30 characters should be more than enough */
7770 snprintf(cpu_dir, 30, "cpu%ld", cpu);
7771 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
7773 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
7777 /* per cpu trace_pipe */
7778 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
7779 tr, cpu, &tracing_pipe_fops);
7782 trace_create_cpu_file("trace", 0644, d_cpu,
7783 tr, cpu, &tracing_fops);
7785 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
7786 tr, cpu, &tracing_buffers_fops);
7788 trace_create_cpu_file("stats", 0444, d_cpu,
7789 tr, cpu, &tracing_stats_fops);
7791 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
7792 tr, cpu, &tracing_entries_fops);
7794 #ifdef CONFIG_TRACER_SNAPSHOT
7795 trace_create_cpu_file("snapshot", 0644, d_cpu,
7796 tr, cpu, &snapshot_fops);
7798 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
7799 tr, cpu, &snapshot_raw_fops);
7803 #ifdef CONFIG_FTRACE_SELFTEST
7804 /* Let selftest have access to static functions in this file */
7805 #include "trace_selftest.c"
7809 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
7812 struct trace_option_dentry *topt = filp->private_data;
7815 if (topt->flags->val & topt->opt->bit)
7820 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
7824 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
7827 struct trace_option_dentry *topt = filp->private_data;
7831 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7835 if (val != 0 && val != 1)
7838 if (!!(topt->flags->val & topt->opt->bit) != val) {
7839 mutex_lock(&trace_types_lock);
7840 ret = __set_tracer_option(topt->tr, topt->flags,
7842 mutex_unlock(&trace_types_lock);
7853 static const struct file_operations trace_options_fops = {
7854 .open = tracing_open_generic,
7855 .read = trace_options_read,
7856 .write = trace_options_write,
7857 .llseek = generic_file_llseek,
7861 * In order to pass in both the trace_array descriptor as well as the index
7862 * to the flag that the trace option file represents, the trace_array
7863 * has a character array of trace_flags_index[], which holds the index
7864 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
7865 * The address of this character array is passed to the flag option file
7866 * read/write callbacks.
7868 * In order to extract both the index and the trace_array descriptor,
7869 * get_tr_index() uses the following algorithm.
7873 * As the pointer itself contains the address of the index (remember
7876 * Then to get the trace_array descriptor, by subtracting that index
7877 * from the ptr, we get to the start of the index itself.
7879 * ptr - idx == &index[0]
7881 * Then a simple container_of() from that pointer gets us to the
7882 * trace_array descriptor.
7884 static void get_tr_index(void *data, struct trace_array **ptr,
7885 unsigned int *pindex)
7887 *pindex = *(unsigned char *)data;
7889 *ptr = container_of(data - *pindex, struct trace_array,
7894 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
7897 void *tr_index = filp->private_data;
7898 struct trace_array *tr;
7902 get_tr_index(tr_index, &tr, &index);
7904 if (tr->trace_flags & (1 << index))
7909 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
7913 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
7916 void *tr_index = filp->private_data;
7917 struct trace_array *tr;
7922 get_tr_index(tr_index, &tr, &index);
7924 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7928 if (val != 0 && val != 1)
7931 mutex_lock(&trace_types_lock);
7932 ret = set_tracer_flag(tr, 1 << index, val);
7933 mutex_unlock(&trace_types_lock);
7943 static const struct file_operations trace_options_core_fops = {
7944 .open = tracing_open_generic,
7945 .read = trace_options_core_read,
7946 .write = trace_options_core_write,
7947 .llseek = generic_file_llseek,
7950 struct dentry *trace_create_file(const char *name,
7952 struct dentry *parent,
7954 const struct file_operations *fops)
7958 ret = tracefs_create_file(name, mode, parent, data, fops);
7960 pr_warn("Could not create tracefs '%s' entry\n", name);
7966 static struct dentry *trace_options_init_dentry(struct trace_array *tr)
7968 struct dentry *d_tracer;
7973 d_tracer = tracing_get_dentry(tr);
7974 if (IS_ERR(d_tracer))
7977 tr->options = tracefs_create_dir("options", d_tracer);
7979 pr_warn("Could not create tracefs directory 'options'\n");
7987 create_trace_option_file(struct trace_array *tr,
7988 struct trace_option_dentry *topt,
7989 struct tracer_flags *flags,
7990 struct tracer_opt *opt)
7992 struct dentry *t_options;
7994 t_options = trace_options_init_dentry(tr);
7998 topt->flags = flags;
8002 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
8003 &trace_options_fops);
8008 create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
8010 struct trace_option_dentry *topts;
8011 struct trace_options *tr_topts;
8012 struct tracer_flags *flags;
8013 struct tracer_opt *opts;
8020 flags = tracer->flags;
8022 if (!flags || !flags->opts)
8026 * If this is an instance, only create flags for tracers
8027 * the instance may have.
8029 if (!trace_ok_for_array(tracer, tr))
8032 for (i = 0; i < tr->nr_topts; i++) {
8033 /* Make sure there's no duplicate flags. */
8034 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
8040 for (cnt = 0; opts[cnt].name; cnt++)
8043 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
8047 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
8054 tr->topts = tr_topts;
8055 tr->topts[tr->nr_topts].tracer = tracer;
8056 tr->topts[tr->nr_topts].topts = topts;
8059 for (cnt = 0; opts[cnt].name; cnt++) {
8060 create_trace_option_file(tr, &topts[cnt], flags,
8062 WARN_ONCE(topts[cnt].entry == NULL,
8063 "Failed to create trace option: %s",
8068 static struct dentry *
8069 create_trace_option_core_file(struct trace_array *tr,
8070 const char *option, long index)
8072 struct dentry *t_options;
8074 t_options = trace_options_init_dentry(tr);
8078 return trace_create_file(option, 0644, t_options,
8079 (void *)&tr->trace_flags_index[index],
8080 &trace_options_core_fops);
8083 static void create_trace_options_dir(struct trace_array *tr)
8085 struct dentry *t_options;
8086 bool top_level = tr == &global_trace;
8089 t_options = trace_options_init_dentry(tr);
8093 for (i = 0; trace_options[i]; i++) {
8095 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
8096 create_trace_option_core_file(tr, trace_options[i], i);
8101 rb_simple_read(struct file *filp, char __user *ubuf,
8102 size_t cnt, loff_t *ppos)
8104 struct trace_array *tr = filp->private_data;
8108 r = tracer_tracing_is_on(tr);
8109 r = sprintf(buf, "%d\n", r);
8111 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8115 rb_simple_write(struct file *filp, const char __user *ubuf,
8116 size_t cnt, loff_t *ppos)
8118 struct trace_array *tr = filp->private_data;
8119 struct ring_buffer *buffer = tr->trace_buffer.buffer;
8123 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8128 mutex_lock(&trace_types_lock);
8129 if (!!val == tracer_tracing_is_on(tr)) {
8130 val = 0; /* do nothing */
8132 tracer_tracing_on(tr);
8133 if (tr->current_trace->start)
8134 tr->current_trace->start(tr);
8136 tracer_tracing_off(tr);
8137 if (tr->current_trace->stop)
8138 tr->current_trace->stop(tr);
8140 mutex_unlock(&trace_types_lock);
8148 static const struct file_operations rb_simple_fops = {
8149 .open = tracing_open_generic_tr,
8150 .read = rb_simple_read,
8151 .write = rb_simple_write,
8152 .release = tracing_release_generic_tr,
8153 .llseek = default_llseek,
8157 buffer_percent_read(struct file *filp, char __user *ubuf,
8158 size_t cnt, loff_t *ppos)
8160 struct trace_array *tr = filp->private_data;
8164 r = tr->buffer_percent;
8165 r = sprintf(buf, "%d\n", r);
8167 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8171 buffer_percent_write(struct file *filp, const char __user *ubuf,
8172 size_t cnt, loff_t *ppos)
8174 struct trace_array *tr = filp->private_data;
8178 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8188 tr->buffer_percent = val;
8195 static const struct file_operations buffer_percent_fops = {
8196 .open = tracing_open_generic_tr,
8197 .read = buffer_percent_read,
8198 .write = buffer_percent_write,
8199 .release = tracing_release_generic_tr,
8200 .llseek = default_llseek,
8203 struct dentry *trace_instance_dir;
8206 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
8209 allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
8211 enum ring_buffer_flags rb_flags;
8213 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
8217 buf->buffer = ring_buffer_alloc(size, rb_flags);
8221 buf->data = alloc_percpu(struct trace_array_cpu);
8223 ring_buffer_free(buf->buffer);
8228 /* Allocate the first page for all buffers */
8229 set_buffer_entries(&tr->trace_buffer,
8230 ring_buffer_size(tr->trace_buffer.buffer, 0));
8235 static int allocate_trace_buffers(struct trace_array *tr, int size)
8239 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
8243 #ifdef CONFIG_TRACER_MAX_TRACE
8244 ret = allocate_trace_buffer(tr, &tr->max_buffer,
8245 allocate_snapshot ? size : 1);
8247 ring_buffer_free(tr->trace_buffer.buffer);
8248 tr->trace_buffer.buffer = NULL;
8249 free_percpu(tr->trace_buffer.data);
8250 tr->trace_buffer.data = NULL;
8253 tr->allocated_snapshot = allocate_snapshot;
8256 * Only the top level trace array gets its snapshot allocated
8257 * from the kernel command line.
8259 allocate_snapshot = false;
8264 static void free_trace_buffer(struct trace_buffer *buf)
8267 ring_buffer_free(buf->buffer);
8269 free_percpu(buf->data);
8274 static void free_trace_buffers(struct trace_array *tr)
8279 free_trace_buffer(&tr->trace_buffer);
8281 #ifdef CONFIG_TRACER_MAX_TRACE
8282 free_trace_buffer(&tr->max_buffer);
8286 static void init_trace_flags_index(struct trace_array *tr)
8290 /* Used by the trace options files */
8291 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
8292 tr->trace_flags_index[i] = i;
8295 static void __update_tracer_options(struct trace_array *tr)
8299 for (t = trace_types; t; t = t->next)
8300 add_tracer_options(tr, t);
8303 static void update_tracer_options(struct trace_array *tr)
8305 mutex_lock(&trace_types_lock);
8306 __update_tracer_options(tr);
8307 mutex_unlock(&trace_types_lock);
8310 struct trace_array *trace_array_create(const char *name)
8312 struct trace_array *tr;
8315 mutex_lock(&event_mutex);
8316 mutex_lock(&trace_types_lock);
8319 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8320 if (tr->name && strcmp(tr->name, name) == 0)
8325 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
8329 tr->name = kstrdup(name, GFP_KERNEL);
8333 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
8336 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
8338 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
8340 raw_spin_lock_init(&tr->start_lock);
8342 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
8344 tr->current_trace = &nop_trace;
8346 INIT_LIST_HEAD(&tr->systems);
8347 INIT_LIST_HEAD(&tr->events);
8348 INIT_LIST_HEAD(&tr->hist_vars);
8349 INIT_LIST_HEAD(&tr->err_log);
8351 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
8354 tr->dir = tracefs_create_dir(name, trace_instance_dir);
8358 ret = event_trace_add_tracer(tr->dir, tr);
8360 tracefs_remove_recursive(tr->dir);
8364 ftrace_init_trace_array(tr);
8366 init_tracer_tracefs(tr, tr->dir);
8367 init_trace_flags_index(tr);
8368 __update_tracer_options(tr);
8370 list_add(&tr->list, &ftrace_trace_arrays);
8372 mutex_unlock(&trace_types_lock);
8373 mutex_unlock(&event_mutex);
8378 free_trace_buffers(tr);
8379 free_cpumask_var(tr->tracing_cpumask);
8384 mutex_unlock(&trace_types_lock);
8385 mutex_unlock(&event_mutex);
8387 return ERR_PTR(ret);
8389 EXPORT_SYMBOL_GPL(trace_array_create);
8391 static int instance_mkdir(const char *name)
8393 return PTR_ERR_OR_ZERO(trace_array_create(name));
8396 static int __remove_instance(struct trace_array *tr)
8400 if (tr->ref || (tr->current_trace && tr->current_trace->ref))
8403 list_del(&tr->list);
8405 /* Disable all the flags that were enabled coming in */
8406 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
8407 if ((1 << i) & ZEROED_TRACE_FLAGS)
8408 set_tracer_flag(tr, 1 << i, 0);
8411 tracing_set_nop(tr);
8412 clear_ftrace_function_probes(tr);
8413 event_trace_del_tracer(tr);
8414 ftrace_clear_pids(tr);
8415 ftrace_destroy_function_files(tr);
8416 tracefs_remove_recursive(tr->dir);
8417 free_trace_buffers(tr);
8419 for (i = 0; i < tr->nr_topts; i++) {
8420 kfree(tr->topts[i].topts);
8424 free_cpumask_var(tr->tracing_cpumask);
8432 int trace_array_destroy(struct trace_array *tr)
8439 mutex_lock(&event_mutex);
8440 mutex_lock(&trace_types_lock);
8442 ret = __remove_instance(tr);
8444 mutex_unlock(&trace_types_lock);
8445 mutex_unlock(&event_mutex);
8449 EXPORT_SYMBOL_GPL(trace_array_destroy);
8451 static int instance_rmdir(const char *name)
8453 struct trace_array *tr;
8456 mutex_lock(&event_mutex);
8457 mutex_lock(&trace_types_lock);
8460 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8461 if (tr->name && strcmp(tr->name, name) == 0) {
8462 ret = __remove_instance(tr);
8467 mutex_unlock(&trace_types_lock);
8468 mutex_unlock(&event_mutex);
8473 static __init void create_trace_instances(struct dentry *d_tracer)
8475 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
8478 if (WARN_ON(!trace_instance_dir))
8483 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
8485 struct trace_event_file *file;
8488 trace_create_file("available_tracers", 0444, d_tracer,
8489 tr, &show_traces_fops);
8491 trace_create_file("current_tracer", 0644, d_tracer,
8492 tr, &set_tracer_fops);
8494 trace_create_file("tracing_cpumask", 0644, d_tracer,
8495 tr, &tracing_cpumask_fops);
8497 trace_create_file("trace_options", 0644, d_tracer,
8498 tr, &tracing_iter_fops);
8500 trace_create_file("trace", 0644, d_tracer,
8503 trace_create_file("trace_pipe", 0444, d_tracer,
8504 tr, &tracing_pipe_fops);
8506 trace_create_file("buffer_size_kb", 0644, d_tracer,
8507 tr, &tracing_entries_fops);
8509 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
8510 tr, &tracing_total_entries_fops);
8512 trace_create_file("free_buffer", 0200, d_tracer,
8513 tr, &tracing_free_buffer_fops);
8515 trace_create_file("trace_marker", 0220, d_tracer,
8516 tr, &tracing_mark_fops);
8518 file = __find_event_file(tr, "ftrace", "print");
8519 if (file && file->dir)
8520 trace_create_file("trigger", 0644, file->dir, file,
8521 &event_trigger_fops);
8522 tr->trace_marker_file = file;
8524 trace_create_file("trace_marker_raw", 0220, d_tracer,
8525 tr, &tracing_mark_raw_fops);
8527 trace_create_file("trace_clock", 0644, d_tracer, tr,
8530 trace_create_file("tracing_on", 0644, d_tracer,
8531 tr, &rb_simple_fops);
8533 trace_create_file("timestamp_mode", 0444, d_tracer, tr,
8534 &trace_time_stamp_mode_fops);
8536 tr->buffer_percent = 50;
8538 trace_create_file("buffer_percent", 0444, d_tracer,
8539 tr, &buffer_percent_fops);
8541 create_trace_options_dir(tr);
8543 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
8544 trace_create_file("tracing_max_latency", 0644, d_tracer,
8545 &tr->max_latency, &tracing_max_lat_fops);
8548 if (ftrace_create_function_files(tr, d_tracer))
8549 WARN(1, "Could not allocate function filter files");
8551 #ifdef CONFIG_TRACER_SNAPSHOT
8552 trace_create_file("snapshot", 0644, d_tracer,
8553 tr, &snapshot_fops);
8556 trace_create_file("error_log", 0644, d_tracer,
8557 tr, &tracing_err_log_fops);
8559 for_each_tracing_cpu(cpu)
8560 tracing_init_tracefs_percpu(tr, cpu);
8562 ftrace_init_tracefs(tr, d_tracer);
8565 static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
8567 struct vfsmount *mnt;
8568 struct file_system_type *type;
8571 * To maintain backward compatibility for tools that mount
8572 * debugfs to get to the tracing facility, tracefs is automatically
8573 * mounted to the debugfs/tracing directory.
8575 type = get_fs_type("tracefs");
8578 mnt = vfs_submount(mntpt, type, "tracefs", NULL);
8579 put_filesystem(type);
8588 * tracing_init_dentry - initialize top level trace array
8590 * This is called when creating files or directories in the tracing
8591 * directory. It is called via fs_initcall() by any of the boot up code
8592 * and expects to return the dentry of the top level tracing directory.
8594 struct dentry *tracing_init_dentry(void)
8596 struct trace_array *tr = &global_trace;
8598 /* The top level trace array uses NULL as parent */
8602 if (WARN_ON(!tracefs_initialized()) ||
8603 (IS_ENABLED(CONFIG_DEBUG_FS) &&
8604 WARN_ON(!debugfs_initialized())))
8605 return ERR_PTR(-ENODEV);
8608 * As there may still be users that expect the tracing
8609 * files to exist in debugfs/tracing, we must automount
8610 * the tracefs file system there, so older tools still
8611 * work with the newer kerenl.
8613 tr->dir = debugfs_create_automount("tracing", NULL,
8614 trace_automount, NULL);
8616 pr_warn_once("Could not create debugfs directory 'tracing'\n");
8617 return ERR_PTR(-ENOMEM);
8623 extern struct trace_eval_map *__start_ftrace_eval_maps[];
8624 extern struct trace_eval_map *__stop_ftrace_eval_maps[];
8626 static void __init trace_eval_init(void)
8630 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
8631 trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
8634 #ifdef CONFIG_MODULES
8635 static void trace_module_add_evals(struct module *mod)
8637 if (!mod->num_trace_evals)
8641 * Modules with bad taint do not have events created, do
8642 * not bother with enums either.
8644 if (trace_module_has_bad_taint(mod))
8647 trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
8650 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
8651 static void trace_module_remove_evals(struct module *mod)
8653 union trace_eval_map_item *map;
8654 union trace_eval_map_item **last = &trace_eval_maps;
8656 if (!mod->num_trace_evals)
8659 mutex_lock(&trace_eval_mutex);
8661 map = trace_eval_maps;
8664 if (map->head.mod == mod)
8666 map = trace_eval_jmp_to_tail(map);
8667 last = &map->tail.next;
8668 map = map->tail.next;
8673 *last = trace_eval_jmp_to_tail(map)->tail.next;
8676 mutex_unlock(&trace_eval_mutex);
8679 static inline void trace_module_remove_evals(struct module *mod) { }
8680 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
8682 static int trace_module_notify(struct notifier_block *self,
8683 unsigned long val, void *data)
8685 struct module *mod = data;
8688 case MODULE_STATE_COMING:
8689 trace_module_add_evals(mod);
8691 case MODULE_STATE_GOING:
8692 trace_module_remove_evals(mod);
8699 static struct notifier_block trace_module_nb = {
8700 .notifier_call = trace_module_notify,
8703 #endif /* CONFIG_MODULES */
8705 static __init int tracer_init_tracefs(void)
8707 struct dentry *d_tracer;
8709 trace_access_lock_init();
8711 d_tracer = tracing_init_dentry();
8712 if (IS_ERR(d_tracer))
8717 init_tracer_tracefs(&global_trace, d_tracer);
8718 ftrace_init_tracefs_toplevel(&global_trace, d_tracer);
8720 trace_create_file("tracing_thresh", 0644, d_tracer,
8721 &global_trace, &tracing_thresh_fops);
8723 trace_create_file("README", 0444, d_tracer,
8724 NULL, &tracing_readme_fops);
8726 trace_create_file("saved_cmdlines", 0444, d_tracer,
8727 NULL, &tracing_saved_cmdlines_fops);
8729 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
8730 NULL, &tracing_saved_cmdlines_size_fops);
8732 trace_create_file("saved_tgids", 0444, d_tracer,
8733 NULL, &tracing_saved_tgids_fops);
8737 trace_create_eval_file(d_tracer);
8739 #ifdef CONFIG_MODULES
8740 register_module_notifier(&trace_module_nb);
8743 #ifdef CONFIG_DYNAMIC_FTRACE
8744 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
8745 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
8748 create_trace_instances(d_tracer);
8750 update_tracer_options(&global_trace);
8755 static int trace_panic_handler(struct notifier_block *this,
8756 unsigned long event, void *unused)
8758 if (ftrace_dump_on_oops)
8759 ftrace_dump(ftrace_dump_on_oops);
8763 static struct notifier_block trace_panic_notifier = {
8764 .notifier_call = trace_panic_handler,
8766 .priority = 150 /* priority: INT_MAX >= x >= 0 */
8769 static int trace_die_handler(struct notifier_block *self,
8775 if (ftrace_dump_on_oops)
8776 ftrace_dump(ftrace_dump_on_oops);
8784 static struct notifier_block trace_die_notifier = {
8785 .notifier_call = trace_die_handler,
8790 * printk is set to max of 1024, we really don't need it that big.
8791 * Nothing should be printing 1000 characters anyway.
8793 #define TRACE_MAX_PRINT 1000
8796 * Define here KERN_TRACE so that we have one place to modify
8797 * it if we decide to change what log level the ftrace dump
8800 #define KERN_TRACE KERN_EMERG
8803 trace_printk_seq(struct trace_seq *s)
8805 /* Probably should print a warning here. */
8806 if (s->seq.len >= TRACE_MAX_PRINT)
8807 s->seq.len = TRACE_MAX_PRINT;
8810 * More paranoid code. Although the buffer size is set to
8811 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
8812 * an extra layer of protection.
8814 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
8815 s->seq.len = s->seq.size - 1;
8817 /* should be zero ended, but we are paranoid. */
8818 s->buffer[s->seq.len] = 0;
8820 printk(KERN_TRACE "%s", s->buffer);
8825 void trace_init_global_iter(struct trace_iterator *iter)
8827 iter->tr = &global_trace;
8828 iter->trace = iter->tr->current_trace;
8829 iter->cpu_file = RING_BUFFER_ALL_CPUS;
8830 iter->trace_buffer = &global_trace.trace_buffer;
8832 if (iter->trace && iter->trace->open)
8833 iter->trace->open(iter);
8835 /* Annotate start of buffers if we had overruns */
8836 if (ring_buffer_overruns(iter->trace_buffer->buffer))
8837 iter->iter_flags |= TRACE_FILE_ANNOTATE;
8839 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
8840 if (trace_clocks[iter->tr->clock_id].in_ns)
8841 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
8844 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
8846 /* use static because iter can be a bit big for the stack */
8847 static struct trace_iterator iter;
8848 static atomic_t dump_running;
8849 struct trace_array *tr = &global_trace;
8850 unsigned int old_userobj;
8851 unsigned long flags;
8854 /* Only allow one dump user at a time. */
8855 if (atomic_inc_return(&dump_running) != 1) {
8856 atomic_dec(&dump_running);
8861 * Always turn off tracing when we dump.
8862 * We don't need to show trace output of what happens
8863 * between multiple crashes.
8865 * If the user does a sysrq-z, then they can re-enable
8866 * tracing with echo 1 > tracing_on.
8870 local_irq_save(flags);
8871 printk_nmi_direct_enter();
8873 /* Simulate the iterator */
8874 trace_init_global_iter(&iter);
8876 for_each_tracing_cpu(cpu) {
8877 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
8880 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
8882 /* don't look at user memory in panic mode */
8883 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
8885 switch (oops_dump_mode) {
8887 iter.cpu_file = RING_BUFFER_ALL_CPUS;
8890 iter.cpu_file = raw_smp_processor_id();
8895 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
8896 iter.cpu_file = RING_BUFFER_ALL_CPUS;
8899 printk(KERN_TRACE "Dumping ftrace buffer:\n");
8901 /* Did function tracer already get disabled? */
8902 if (ftrace_is_dead()) {
8903 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
8904 printk("# MAY BE MISSING FUNCTION EVENTS\n");
8908 * We need to stop all tracing on all CPUS to read the
8909 * the next buffer. This is a bit expensive, but is
8910 * not done often. We fill all what we can read,
8911 * and then release the locks again.
8914 while (!trace_empty(&iter)) {
8917 printk(KERN_TRACE "---------------------------------\n");
8921 /* reset all but tr, trace, and overruns */
8922 memset(&iter.seq, 0,
8923 sizeof(struct trace_iterator) -
8924 offsetof(struct trace_iterator, seq));
8925 iter.iter_flags |= TRACE_FILE_LAT_FMT;
8928 if (trace_find_next_entry_inc(&iter) != NULL) {
8931 ret = print_trace_line(&iter);
8932 if (ret != TRACE_TYPE_NO_CONSUME)
8933 trace_consume(&iter);
8935 touch_nmi_watchdog();
8937 trace_printk_seq(&iter.seq);
8941 printk(KERN_TRACE " (ftrace buffer empty)\n");
8943 printk(KERN_TRACE "---------------------------------\n");
8946 tr->trace_flags |= old_userobj;
8948 for_each_tracing_cpu(cpu) {
8949 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
8951 atomic_dec(&dump_running);
8952 printk_nmi_direct_exit();
8953 local_irq_restore(flags);
8955 EXPORT_SYMBOL_GPL(ftrace_dump);
8957 int trace_run_command(const char *buf, int (*createfn)(int, char **))
8964 argv = argv_split(GFP_KERNEL, buf, &argc);
8969 ret = createfn(argc, argv);
8976 #define WRITE_BUFSIZE 4096
8978 ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
8979 size_t count, loff_t *ppos,
8980 int (*createfn)(int, char **))
8982 char *kbuf, *buf, *tmp;
8987 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
8991 while (done < count) {
8992 size = count - done;
8994 if (size >= WRITE_BUFSIZE)
8995 size = WRITE_BUFSIZE - 1;
8997 if (copy_from_user(kbuf, buffer + done, size)) {
9004 tmp = strchr(buf, '\n');
9007 size = tmp - buf + 1;
9010 if (done + size < count) {
9013 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
9014 pr_warn("Line length is too long: Should be less than %d\n",
9022 /* Remove comments */
9023 tmp = strchr(buf, '#');
9028 ret = trace_run_command(buf, createfn);
9033 } while (done < count);
9043 __init static int tracer_alloc_buffers(void)
9049 * Make sure we don't accidently add more trace options
9050 * than we have bits for.
9052 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
9054 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
9057 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
9058 goto out_free_buffer_mask;
9060 /* Only allocate trace_printk buffers if a trace_printk exists */
9061 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
9062 /* Must be called before global_trace.buffer is allocated */
9063 trace_printk_init_buffers();
9065 /* To save memory, keep the ring buffer size to its minimum */
9066 if (ring_buffer_expanded)
9067 ring_buf_size = trace_buf_size;
9071 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
9072 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
9074 raw_spin_lock_init(&global_trace.start_lock);
9077 * The prepare callbacks allocates some memory for the ring buffer. We
9078 * don't free the buffer if the if the CPU goes down. If we were to free
9079 * the buffer, then the user would lose any trace that was in the
9080 * buffer. The memory will be removed once the "instance" is removed.
9082 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
9083 "trace/RB:preapre", trace_rb_cpu_prepare,
9086 goto out_free_cpumask;
9087 /* Used for event triggers */
9089 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
9091 goto out_rm_hp_state;
9093 if (trace_create_savedcmd() < 0)
9094 goto out_free_temp_buffer;
9096 /* TODO: make the number of buffers hot pluggable with CPUS */
9097 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
9098 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
9100 goto out_free_savedcmd;
9103 if (global_trace.buffer_disabled)
9106 if (trace_boot_clock) {
9107 ret = tracing_set_clock(&global_trace, trace_boot_clock);
9109 pr_warn("Trace clock %s not defined, going back to default\n",
9114 * register_tracer() might reference current_trace, so it
9115 * needs to be set before we register anything. This is
9116 * just a bootstrap of current_trace anyway.
9118 global_trace.current_trace = &nop_trace;
9120 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
9122 ftrace_init_global_array_ops(&global_trace);
9124 init_trace_flags_index(&global_trace);
9126 register_tracer(&nop_trace);
9128 /* Function tracing may start here (via kernel command line) */
9129 init_function_trace();
9131 /* All seems OK, enable tracing */
9132 tracing_disabled = 0;
9134 atomic_notifier_chain_register(&panic_notifier_list,
9135 &trace_panic_notifier);
9137 register_die_notifier(&trace_die_notifier);
9139 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
9141 INIT_LIST_HEAD(&global_trace.systems);
9142 INIT_LIST_HEAD(&global_trace.events);
9143 INIT_LIST_HEAD(&global_trace.hist_vars);
9144 INIT_LIST_HEAD(&global_trace.err_log);
9145 list_add(&global_trace.list, &ftrace_trace_arrays);
9147 apply_trace_boot_options();
9149 register_snapshot_cmd();
9154 free_saved_cmdlines_buffer(savedcmd);
9155 out_free_temp_buffer:
9156 ring_buffer_free(temp_buffer);
9158 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
9160 free_cpumask_var(global_trace.tracing_cpumask);
9161 out_free_buffer_mask:
9162 free_cpumask_var(tracing_buffer_mask);
9167 void __init early_trace_init(void)
9169 if (tracepoint_printk) {
9170 tracepoint_print_iter =
9171 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
9172 if (WARN_ON(!tracepoint_print_iter))
9173 tracepoint_printk = 0;
9175 static_key_enable(&tracepoint_printk_key.key);
9177 tracer_alloc_buffers();
9180 void __init trace_init(void)
9185 __init static int clear_boot_tracer(void)
9188 * The default tracer at boot buffer is an init section.
9189 * This function is called in lateinit. If we did not
9190 * find the boot tracer, then clear it out, to prevent
9191 * later registration from accessing the buffer that is
9192 * about to be freed.
9194 if (!default_bootup_tracer)
9197 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
9198 default_bootup_tracer);
9199 default_bootup_tracer = NULL;
9204 fs_initcall(tracer_init_tracefs);
9205 late_initcall_sync(clear_boot_tracer);
9207 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
9208 __init static int tracing_set_default_clock(void)
9210 /* sched_clock_stable() is determined in late_initcall */
9211 if (!trace_boot_clock && !sched_clock_stable()) {
9213 "Unstable clock detected, switching default tracing clock to \"global\"\n"
9214 "If you want to keep using the local clock, then add:\n"
9215 " \"trace_clock=local\"\n"
9216 "on the kernel command line\n");
9217 tracing_set_clock(&global_trace, "global");
9222 late_initcall_sync(tracing_set_default_clock);