1 // SPDX-License-Identifier: GPL-2.0
3 * ring buffer based function tracer
5 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
8 * Originally taken from the RT patch by:
9 * Arnaldo Carvalho de Melo <acme@redhat.com>
11 * Based on code from the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 Nadia Yvette Chambers
15 #include <linux/ring_buffer.h>
16 #include <generated/utsrelease.h>
17 #include <linux/stacktrace.h>
18 #include <linux/writeback.h>
19 #include <linux/kallsyms.h>
20 #include <linux/seq_file.h>
21 #include <linux/notifier.h>
22 #include <linux/irqflags.h>
23 #include <linux/debugfs.h>
24 #include <linux/tracefs.h>
25 #include <linux/pagemap.h>
26 #include <linux/hardirq.h>
27 #include <linux/linkage.h>
28 #include <linux/uaccess.h>
29 #include <linux/vmalloc.h>
30 #include <linux/ftrace.h>
31 #include <linux/module.h>
32 #include <linux/percpu.h>
33 #include <linux/splice.h>
34 #include <linux/kdebug.h>
35 #include <linux/string.h>
36 #include <linux/mount.h>
37 #include <linux/rwsem.h>
38 #include <linux/slab.h>
39 #include <linux/ctype.h>
40 #include <linux/init.h>
41 #include <linux/poll.h>
42 #include <linux/nmi.h>
44 #include <linux/trace.h>
45 #include <linux/sched/clock.h>
46 #include <linux/sched/rt.h>
49 #include "trace_output.h"
52 * On boot up, the ring buffer is set to the minimum size, so that
53 * we do not waste memory on systems that are not using tracing.
55 bool ring_buffer_expanded;
58 * We need to change this state when a selftest is running.
59 * A selftest will lurk into the ring-buffer to count the
60 * entries inserted during the selftest although some concurrent
61 * insertions into the ring-buffer such as trace_printk could occurred
62 * at the same time, giving false positive or negative results.
64 static bool __read_mostly tracing_selftest_running;
67 * If a tracer is running, we do not want to run SELFTEST.
69 bool __read_mostly tracing_selftest_disabled;
71 /* Pipe tracepoints to printk */
72 struct trace_iterator *tracepoint_print_iter;
73 int tracepoint_printk;
74 static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
76 /* For tracers that don't implement custom flags */
77 static struct tracer_opt dummy_tracer_opt[] = {
82 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
88 * To prevent the comm cache from being overwritten when no
89 * tracing is active, only save the comm when a trace event
92 static DEFINE_PER_CPU(bool, trace_taskinfo_save);
95 * Kill all tracing for good (never come back).
96 * It is initialized to 1 but will turn to zero if the initialization
97 * of the tracer is successful. But that is the only place that sets
100 static int tracing_disabled = 1;
102 cpumask_var_t __read_mostly tracing_buffer_mask;
105 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
107 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
108 * is set, then ftrace_dump is called. This will output the contents
109 * of the ftrace buffers to the console. This is very useful for
110 * capturing traces that lead to crashes and outputing it to a
113 * It is default off, but you can enable it with either specifying
114 * "ftrace_dump_on_oops" in the kernel command line, or setting
115 * /proc/sys/kernel/ftrace_dump_on_oops
116 * Set 1 if you want to dump buffers of all CPUs
117 * Set 2 if you want to dump the buffer of the CPU that triggered oops
120 enum ftrace_dump_mode ftrace_dump_on_oops;
122 /* When set, tracing will stop when a WARN*() is hit */
123 int __disable_trace_on_warning;
125 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
126 /* Map of enums to their values, for "eval_map" file */
127 struct trace_eval_map_head {
129 unsigned long length;
132 union trace_eval_map_item;
134 struct trace_eval_map_tail {
136 * "end" is first and points to NULL as it must be different
137 * than "mod" or "eval_string"
139 union trace_eval_map_item *next;
140 const char *end; /* points to NULL */
143 static DEFINE_MUTEX(trace_eval_mutex);
146 * The trace_eval_maps are saved in an array with two extra elements,
147 * one at the beginning, and one at the end. The beginning item contains
148 * the count of the saved maps (head.length), and the module they
149 * belong to if not built in (head.mod). The ending item contains a
150 * pointer to the next array of saved eval_map items.
152 union trace_eval_map_item {
153 struct trace_eval_map map;
154 struct trace_eval_map_head head;
155 struct trace_eval_map_tail tail;
158 static union trace_eval_map_item *trace_eval_maps;
159 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
161 static int tracing_set_tracer(struct trace_array *tr, const char *buf);
162 static void ftrace_trace_userstack(struct ring_buffer *buffer,
163 unsigned long flags, int pc);
165 #define MAX_TRACER_SIZE 100
166 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
167 static char *default_bootup_tracer;
169 static bool allocate_snapshot;
171 static int __init set_cmdline_ftrace(char *str)
173 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
174 default_bootup_tracer = bootup_tracer_buf;
175 /* We are using ftrace early, expand it */
176 ring_buffer_expanded = true;
179 __setup("ftrace=", set_cmdline_ftrace);
181 static int __init set_ftrace_dump_on_oops(char *str)
183 if (*str++ != '=' || !*str) {
184 ftrace_dump_on_oops = DUMP_ALL;
188 if (!strcmp("orig_cpu", str)) {
189 ftrace_dump_on_oops = DUMP_ORIG;
195 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
197 static int __init stop_trace_on_warning(char *str)
199 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
200 __disable_trace_on_warning = 1;
203 __setup("traceoff_on_warning", stop_trace_on_warning);
205 static int __init boot_alloc_snapshot(char *str)
207 allocate_snapshot = true;
208 /* We also need the main ring buffer expanded */
209 ring_buffer_expanded = true;
212 __setup("alloc_snapshot", boot_alloc_snapshot);
215 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
217 static int __init set_trace_boot_options(char *str)
219 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
222 __setup("trace_options=", set_trace_boot_options);
224 static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
225 static char *trace_boot_clock __initdata;
227 static int __init set_trace_boot_clock(char *str)
229 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
230 trace_boot_clock = trace_boot_clock_buf;
233 __setup("trace_clock=", set_trace_boot_clock);
235 static int __init set_tracepoint_printk(char *str)
237 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
238 tracepoint_printk = 1;
241 __setup("tp_printk", set_tracepoint_printk);
243 unsigned long long ns2usecs(u64 nsec)
250 /* trace_flags holds trace_options default values */
251 #define TRACE_DEFAULT_FLAGS \
252 (FUNCTION_DEFAULT_FLAGS | \
253 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
254 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
255 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
256 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
258 /* trace_options that are only supported by global_trace */
259 #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
260 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
262 /* trace_flags that are default zero for instances */
263 #define ZEROED_TRACE_FLAGS \
264 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
267 * The global_trace is the descriptor that holds the top-level tracing
268 * buffers for the live tracing.
270 static struct trace_array global_trace = {
271 .trace_flags = TRACE_DEFAULT_FLAGS,
274 LIST_HEAD(ftrace_trace_arrays);
276 int trace_array_get(struct trace_array *this_tr)
278 struct trace_array *tr;
281 mutex_lock(&trace_types_lock);
282 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
289 mutex_unlock(&trace_types_lock);
294 static void __trace_array_put(struct trace_array *this_tr)
296 WARN_ON(!this_tr->ref);
300 void trace_array_put(struct trace_array *this_tr)
302 mutex_lock(&trace_types_lock);
303 __trace_array_put(this_tr);
304 mutex_unlock(&trace_types_lock);
307 int call_filter_check_discard(struct trace_event_call *call, void *rec,
308 struct ring_buffer *buffer,
309 struct ring_buffer_event *event)
311 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
312 !filter_match_preds(call->filter, rec)) {
313 __trace_event_discard_commit(buffer, event);
320 void trace_free_pid_list(struct trace_pid_list *pid_list)
322 vfree(pid_list->pids);
327 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
328 * @filtered_pids: The list of pids to check
329 * @search_pid: The PID to find in @filtered_pids
331 * Returns true if @search_pid is fonud in @filtered_pids, and false otherwis.
334 trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
337 * If pid_max changed after filtered_pids was created, we
338 * by default ignore all pids greater than the previous pid_max.
340 if (search_pid >= filtered_pids->pid_max)
343 return test_bit(search_pid, filtered_pids->pids);
347 * trace_ignore_this_task - should a task be ignored for tracing
348 * @filtered_pids: The list of pids to check
349 * @task: The task that should be ignored if not filtered
351 * Checks if @task should be traced or not from @filtered_pids.
352 * Returns true if @task should *NOT* be traced.
353 * Returns false if @task should be traced.
356 trace_ignore_this_task(struct trace_pid_list *filtered_pids, struct task_struct *task)
359 * Return false, because if filtered_pids does not exist,
360 * all pids are good to trace.
365 return !trace_find_filtered_pid(filtered_pids, task->pid);
369 * trace_filter_add_remove_task - Add or remove a task from a pid_list
370 * @pid_list: The list to modify
371 * @self: The current task for fork or NULL for exit
372 * @task: The task to add or remove
374 * If adding a task, if @self is defined, the task is only added if @self
375 * is also included in @pid_list. This happens on fork and tasks should
376 * only be added when the parent is listed. If @self is NULL, then the
377 * @task pid will be removed from the list, which would happen on exit
380 void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
381 struct task_struct *self,
382 struct task_struct *task)
387 /* For forks, we only add if the forking task is listed */
389 if (!trace_find_filtered_pid(pid_list, self->pid))
393 /* Sorry, but we don't support pid_max changing after setting */
394 if (task->pid >= pid_list->pid_max)
397 /* "self" is set for forks, and NULL for exits */
399 set_bit(task->pid, pid_list->pids);
401 clear_bit(task->pid, pid_list->pids);
405 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
406 * @pid_list: The pid list to show
407 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
408 * @pos: The position of the file
410 * This is used by the seq_file "next" operation to iterate the pids
411 * listed in a trace_pid_list structure.
413 * Returns the pid+1 as we want to display pid of zero, but NULL would
414 * stop the iteration.
416 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
418 unsigned long pid = (unsigned long)v;
422 /* pid already is +1 of the actual prevous bit */
423 pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid);
425 /* Return pid + 1 to allow zero to be represented */
426 if (pid < pid_list->pid_max)
427 return (void *)(pid + 1);
433 * trace_pid_start - Used for seq_file to start reading pid lists
434 * @pid_list: The pid list to show
435 * @pos: The position of the file
437 * This is used by seq_file "start" operation to start the iteration
440 * Returns the pid+1 as we want to display pid of zero, but NULL would
441 * stop the iteration.
443 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
448 pid = find_first_bit(pid_list->pids, pid_list->pid_max);
449 if (pid >= pid_list->pid_max)
452 /* Return pid + 1 so that zero can be the exit value */
453 for (pid++; pid && l < *pos;
454 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
460 * trace_pid_show - show the current pid in seq_file processing
461 * @m: The seq_file structure to write into
462 * @v: A void pointer of the pid (+1) value to display
464 * Can be directly used by seq_file operations to display the current
467 int trace_pid_show(struct seq_file *m, void *v)
469 unsigned long pid = (unsigned long)v - 1;
471 seq_printf(m, "%lu\n", pid);
475 /* 128 should be much more than enough */
476 #define PID_BUF_SIZE 127
478 int trace_pid_write(struct trace_pid_list *filtered_pids,
479 struct trace_pid_list **new_pid_list,
480 const char __user *ubuf, size_t cnt)
482 struct trace_pid_list *pid_list;
483 struct trace_parser parser;
491 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
495 * Always recreate a new array. The write is an all or nothing
496 * operation. Always create a new array when adding new pids by
497 * the user. If the operation fails, then the current list is
500 pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
502 trace_parser_put(&parser);
506 pid_list->pid_max = READ_ONCE(pid_max);
508 /* Only truncating will shrink pid_max */
509 if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max)
510 pid_list->pid_max = filtered_pids->pid_max;
512 pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
513 if (!pid_list->pids) {
514 trace_parser_put(&parser);
520 /* copy the current bits to the new max */
521 for_each_set_bit(pid, filtered_pids->pids,
522 filtered_pids->pid_max) {
523 set_bit(pid, pid_list->pids);
532 ret = trace_get_user(&parser, ubuf, cnt, &pos);
533 if (ret < 0 || !trace_parser_loaded(&parser))
541 if (kstrtoul(parser.buffer, 0, &val))
543 if (val >= pid_list->pid_max)
548 set_bit(pid, pid_list->pids);
551 trace_parser_clear(&parser);
554 trace_parser_put(&parser);
557 trace_free_pid_list(pid_list);
562 /* Cleared the list of pids */
563 trace_free_pid_list(pid_list);
568 *new_pid_list = pid_list;
573 static u64 buffer_ftrace_now(struct trace_buffer *buf, int cpu)
577 /* Early boot up does not have a buffer yet */
579 return trace_clock_local();
581 ts = ring_buffer_time_stamp(buf->buffer, cpu);
582 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
587 u64 ftrace_now(int cpu)
589 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
593 * tracing_is_enabled - Show if global_trace has been disabled
595 * Shows if the global trace has been enabled or not. It uses the
596 * mirror flag "buffer_disabled" to be used in fast paths such as for
597 * the irqsoff tracer. But it may be inaccurate due to races. If you
598 * need to know the accurate state, use tracing_is_on() which is a little
599 * slower, but accurate.
601 int tracing_is_enabled(void)
604 * For quick access (irqsoff uses this in fast path), just
605 * return the mirror variable of the state of the ring buffer.
606 * It's a little racy, but we don't really care.
609 return !global_trace.buffer_disabled;
613 * trace_buf_size is the size in bytes that is allocated
614 * for a buffer. Note, the number of bytes is always rounded
617 * This number is purposely set to a low number of 16384.
618 * If the dump on oops happens, it will be much appreciated
619 * to not have to wait for all that output. Anyway this can be
620 * boot time and run time configurable.
622 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
624 static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
626 /* trace_types holds a link list of available tracers. */
627 static struct tracer *trace_types __read_mostly;
630 * trace_types_lock is used to protect the trace_types list.
632 DEFINE_MUTEX(trace_types_lock);
635 * serialize the access of the ring buffer
637 * ring buffer serializes readers, but it is low level protection.
638 * The validity of the events (which returns by ring_buffer_peek() ..etc)
639 * are not protected by ring buffer.
641 * The content of events may become garbage if we allow other process consumes
642 * these events concurrently:
643 * A) the page of the consumed events may become a normal page
644 * (not reader page) in ring buffer, and this page will be rewrited
645 * by events producer.
646 * B) The page of the consumed events may become a page for splice_read,
647 * and this page will be returned to system.
649 * These primitives allow multi process access to different cpu ring buffer
652 * These primitives don't distinguish read-only and read-consume access.
653 * Multi read-only access are also serialized.
657 static DECLARE_RWSEM(all_cpu_access_lock);
658 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
660 static inline void trace_access_lock(int cpu)
662 if (cpu == RING_BUFFER_ALL_CPUS) {
663 /* gain it for accessing the whole ring buffer. */
664 down_write(&all_cpu_access_lock);
666 /* gain it for accessing a cpu ring buffer. */
668 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
669 down_read(&all_cpu_access_lock);
671 /* Secondly block other access to this @cpu ring buffer. */
672 mutex_lock(&per_cpu(cpu_access_lock, cpu));
676 static inline void trace_access_unlock(int cpu)
678 if (cpu == RING_BUFFER_ALL_CPUS) {
679 up_write(&all_cpu_access_lock);
681 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
682 up_read(&all_cpu_access_lock);
686 static inline void trace_access_lock_init(void)
690 for_each_possible_cpu(cpu)
691 mutex_init(&per_cpu(cpu_access_lock, cpu));
696 static DEFINE_MUTEX(access_lock);
698 static inline void trace_access_lock(int cpu)
701 mutex_lock(&access_lock);
704 static inline void trace_access_unlock(int cpu)
707 mutex_unlock(&access_lock);
710 static inline void trace_access_lock_init(void)
716 #ifdef CONFIG_STACKTRACE
717 static void __ftrace_trace_stack(struct ring_buffer *buffer,
719 int skip, int pc, struct pt_regs *regs);
720 static inline void ftrace_trace_stack(struct trace_array *tr,
721 struct ring_buffer *buffer,
723 int skip, int pc, struct pt_regs *regs);
726 static inline void __ftrace_trace_stack(struct ring_buffer *buffer,
728 int skip, int pc, struct pt_regs *regs)
731 static inline void ftrace_trace_stack(struct trace_array *tr,
732 struct ring_buffer *buffer,
734 int skip, int pc, struct pt_regs *regs)
740 static __always_inline void
741 trace_event_setup(struct ring_buffer_event *event,
742 int type, unsigned long flags, int pc)
744 struct trace_entry *ent = ring_buffer_event_data(event);
746 tracing_generic_entry_update(ent, type, flags, pc);
749 static __always_inline struct ring_buffer_event *
750 __trace_buffer_lock_reserve(struct ring_buffer *buffer,
753 unsigned long flags, int pc)
755 struct ring_buffer_event *event;
757 event = ring_buffer_lock_reserve(buffer, len);
759 trace_event_setup(event, type, flags, pc);
764 void tracer_tracing_on(struct trace_array *tr)
766 if (tr->trace_buffer.buffer)
767 ring_buffer_record_on(tr->trace_buffer.buffer);
769 * This flag is looked at when buffers haven't been allocated
770 * yet, or by some tracers (like irqsoff), that just want to
771 * know if the ring buffer has been disabled, but it can handle
772 * races of where it gets disabled but we still do a record.
773 * As the check is in the fast path of the tracers, it is more
774 * important to be fast than accurate.
776 tr->buffer_disabled = 0;
777 /* Make the flag seen by readers */
782 * tracing_on - enable tracing buffers
784 * This function enables tracing buffers that may have been
785 * disabled with tracing_off.
787 void tracing_on(void)
789 tracer_tracing_on(&global_trace);
791 EXPORT_SYMBOL_GPL(tracing_on);
794 static __always_inline void
795 __buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
797 __this_cpu_write(trace_taskinfo_save, true);
799 /* If this is the temp buffer, we need to commit fully */
800 if (this_cpu_read(trace_buffered_event) == event) {
801 /* Length is in event->array[0] */
802 ring_buffer_write(buffer, event->array[0], &event->array[1]);
803 /* Release the temp buffer */
804 this_cpu_dec(trace_buffered_event_cnt);
806 ring_buffer_unlock_commit(buffer, event);
810 * __trace_puts - write a constant string into the trace buffer.
811 * @ip: The address of the caller
812 * @str: The constant string to write
813 * @size: The size of the string.
815 int __trace_puts(unsigned long ip, const char *str, int size)
817 struct ring_buffer_event *event;
818 struct ring_buffer *buffer;
819 struct print_entry *entry;
820 unsigned long irq_flags;
824 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
827 pc = preempt_count();
829 if (unlikely(tracing_selftest_running || tracing_disabled))
832 alloc = sizeof(*entry) + size + 2; /* possible \n added */
834 local_save_flags(irq_flags);
835 buffer = global_trace.trace_buffer.buffer;
836 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
841 entry = ring_buffer_event_data(event);
844 memcpy(&entry->buf, str, size);
846 /* Add a newline if necessary */
847 if (entry->buf[size - 1] != '\n') {
848 entry->buf[size] = '\n';
849 entry->buf[size + 1] = '\0';
851 entry->buf[size] = '\0';
853 __buffer_unlock_commit(buffer, event);
854 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
858 EXPORT_SYMBOL_GPL(__trace_puts);
861 * __trace_bputs - write the pointer to a constant string into trace buffer
862 * @ip: The address of the caller
863 * @str: The constant string to write to the buffer to
865 int __trace_bputs(unsigned long ip, const char *str)
867 struct ring_buffer_event *event;
868 struct ring_buffer *buffer;
869 struct bputs_entry *entry;
870 unsigned long irq_flags;
871 int size = sizeof(struct bputs_entry);
874 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
877 pc = preempt_count();
879 if (unlikely(tracing_selftest_running || tracing_disabled))
882 local_save_flags(irq_flags);
883 buffer = global_trace.trace_buffer.buffer;
884 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
889 entry = ring_buffer_event_data(event);
893 __buffer_unlock_commit(buffer, event);
894 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
898 EXPORT_SYMBOL_GPL(__trace_bputs);
900 #ifdef CONFIG_TRACER_SNAPSHOT
901 void tracing_snapshot_instance_cond(struct trace_array *tr, void *cond_data)
903 struct tracer *tracer = tr->current_trace;
907 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
908 internal_trace_puts("*** snapshot is being ignored ***\n");
912 if (!tr->allocated_snapshot) {
913 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
914 internal_trace_puts("*** stopping trace here! ***\n");
919 /* Note, snapshot can not be used when the tracer uses it */
920 if (tracer->use_max_tr) {
921 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
922 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
926 local_irq_save(flags);
927 update_max_tr(tr, current, smp_processor_id(), cond_data);
928 local_irq_restore(flags);
931 void tracing_snapshot_instance(struct trace_array *tr)
933 tracing_snapshot_instance_cond(tr, NULL);
937 * tracing_snapshot - take a snapshot of the current buffer.
939 * This causes a swap between the snapshot buffer and the current live
940 * tracing buffer. You can use this to take snapshots of the live
941 * trace when some condition is triggered, but continue to trace.
943 * Note, make sure to allocate the snapshot with either
944 * a tracing_snapshot_alloc(), or by doing it manually
945 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
947 * If the snapshot buffer is not allocated, it will stop tracing.
948 * Basically making a permanent snapshot.
950 void tracing_snapshot(void)
952 struct trace_array *tr = &global_trace;
954 tracing_snapshot_instance(tr);
956 EXPORT_SYMBOL_GPL(tracing_snapshot);
959 * tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
960 * @tr: The tracing instance to snapshot
961 * @cond_data: The data to be tested conditionally, and possibly saved
963 * This is the same as tracing_snapshot() except that the snapshot is
964 * conditional - the snapshot will only happen if the
965 * cond_snapshot.update() implementation receiving the cond_data
966 * returns true, which means that the trace array's cond_snapshot
967 * update() operation used the cond_data to determine whether the
968 * snapshot should be taken, and if it was, presumably saved it along
971 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
973 tracing_snapshot_instance_cond(tr, cond_data);
975 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
978 * tracing_snapshot_cond_data - get the user data associated with a snapshot
979 * @tr: The tracing instance
981 * When the user enables a conditional snapshot using
982 * tracing_snapshot_cond_enable(), the user-defined cond_data is saved
983 * with the snapshot. This accessor is used to retrieve it.
985 * Should not be called from cond_snapshot.update(), since it takes
986 * the tr->max_lock lock, which the code calling
987 * cond_snapshot.update() has already done.
989 * Returns the cond_data associated with the trace array's snapshot.
991 void *tracing_cond_snapshot_data(struct trace_array *tr)
993 void *cond_data = NULL;
995 arch_spin_lock(&tr->max_lock);
997 if (tr->cond_snapshot)
998 cond_data = tr->cond_snapshot->cond_data;
1000 arch_spin_unlock(&tr->max_lock);
1004 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1006 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
1007 struct trace_buffer *size_buf, int cpu_id);
1008 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
1010 int tracing_alloc_snapshot_instance(struct trace_array *tr)
1014 if (!tr->allocated_snapshot) {
1016 /* allocate spare buffer */
1017 ret = resize_buffer_duplicate_size(&tr->max_buffer,
1018 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
1022 tr->allocated_snapshot = true;
1028 static void free_snapshot(struct trace_array *tr)
1031 * We don't free the ring buffer. instead, resize it because
1032 * The max_tr ring buffer has some state (e.g. ring->clock) and
1033 * we want preserve it.
1035 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
1036 set_buffer_entries(&tr->max_buffer, 1);
1037 tracing_reset_online_cpus(&tr->max_buffer);
1038 tr->allocated_snapshot = false;
1042 * tracing_alloc_snapshot - allocate snapshot buffer.
1044 * This only allocates the snapshot buffer if it isn't already
1045 * allocated - it doesn't also take a snapshot.
1047 * This is meant to be used in cases where the snapshot buffer needs
1048 * to be set up for events that can't sleep but need to be able to
1049 * trigger a snapshot.
1051 int tracing_alloc_snapshot(void)
1053 struct trace_array *tr = &global_trace;
1056 ret = tracing_alloc_snapshot_instance(tr);
1061 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1064 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
1066 * This is similar to tracing_snapshot(), but it will allocate the
1067 * snapshot buffer if it isn't already allocated. Use this only
1068 * where it is safe to sleep, as the allocation may sleep.
1070 * This causes a swap between the snapshot buffer and the current live
1071 * tracing buffer. You can use this to take snapshots of the live
1072 * trace when some condition is triggered, but continue to trace.
1074 void tracing_snapshot_alloc(void)
1078 ret = tracing_alloc_snapshot();
1084 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1087 * tracing_snapshot_cond_enable - enable conditional snapshot for an instance
1088 * @tr: The tracing instance
1089 * @cond_data: User data to associate with the snapshot
1090 * @update: Implementation of the cond_snapshot update function
1092 * Check whether the conditional snapshot for the given instance has
1093 * already been enabled, or if the current tracer is already using a
1094 * snapshot; if so, return -EBUSY, else create a cond_snapshot and
1095 * save the cond_data and update function inside.
1097 * Returns 0 if successful, error otherwise.
1099 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
1100 cond_update_fn_t update)
1102 struct cond_snapshot *cond_snapshot;
1105 cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
1109 cond_snapshot->cond_data = cond_data;
1110 cond_snapshot->update = update;
1112 mutex_lock(&trace_types_lock);
1114 ret = tracing_alloc_snapshot_instance(tr);
1118 if (tr->current_trace->use_max_tr) {
1124 * The cond_snapshot can only change to NULL without the
1125 * trace_types_lock. We don't care if we race with it going
1126 * to NULL, but we want to make sure that it's not set to
1127 * something other than NULL when we get here, which we can
1128 * do safely with only holding the trace_types_lock and not
1129 * having to take the max_lock.
1131 if (tr->cond_snapshot) {
1136 arch_spin_lock(&tr->max_lock);
1137 tr->cond_snapshot = cond_snapshot;
1138 arch_spin_unlock(&tr->max_lock);
1140 mutex_unlock(&trace_types_lock);
1145 mutex_unlock(&trace_types_lock);
1146 kfree(cond_snapshot);
1149 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1152 * tracing_snapshot_cond_disable - disable conditional snapshot for an instance
1153 * @tr: The tracing instance
1155 * Check whether the conditional snapshot for the given instance is
1156 * enabled; if so, free the cond_snapshot associated with it,
1157 * otherwise return -EINVAL.
1159 * Returns 0 if successful, error otherwise.
1161 int tracing_snapshot_cond_disable(struct trace_array *tr)
1165 arch_spin_lock(&tr->max_lock);
1167 if (!tr->cond_snapshot)
1170 kfree(tr->cond_snapshot);
1171 tr->cond_snapshot = NULL;
1174 arch_spin_unlock(&tr->max_lock);
1178 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1180 void tracing_snapshot(void)
1182 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1184 EXPORT_SYMBOL_GPL(tracing_snapshot);
1185 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1187 WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used");
1189 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1190 int tracing_alloc_snapshot(void)
1192 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1195 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1196 void tracing_snapshot_alloc(void)
1201 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1202 void *tracing_cond_snapshot_data(struct trace_array *tr)
1206 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1207 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update)
1211 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1212 int tracing_snapshot_cond_disable(struct trace_array *tr)
1216 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1217 #endif /* CONFIG_TRACER_SNAPSHOT */
1219 void tracer_tracing_off(struct trace_array *tr)
1221 if (tr->trace_buffer.buffer)
1222 ring_buffer_record_off(tr->trace_buffer.buffer);
1224 * This flag is looked at when buffers haven't been allocated
1225 * yet, or by some tracers (like irqsoff), that just want to
1226 * know if the ring buffer has been disabled, but it can handle
1227 * races of where it gets disabled but we still do a record.
1228 * As the check is in the fast path of the tracers, it is more
1229 * important to be fast than accurate.
1231 tr->buffer_disabled = 1;
1232 /* Make the flag seen by readers */
1237 * tracing_off - turn off tracing buffers
1239 * This function stops the tracing buffers from recording data.
1240 * It does not disable any overhead the tracers themselves may
1241 * be causing. This function simply causes all recording to
1242 * the ring buffers to fail.
1244 void tracing_off(void)
1246 tracer_tracing_off(&global_trace);
1248 EXPORT_SYMBOL_GPL(tracing_off);
1250 void disable_trace_on_warning(void)
1252 if (__disable_trace_on_warning)
1257 * tracer_tracing_is_on - show real state of ring buffer enabled
1258 * @tr : the trace array to know if ring buffer is enabled
1260 * Shows real state of the ring buffer if it is enabled or not.
1262 bool tracer_tracing_is_on(struct trace_array *tr)
1264 if (tr->trace_buffer.buffer)
1265 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
1266 return !tr->buffer_disabled;
1270 * tracing_is_on - show state of ring buffers enabled
1272 int tracing_is_on(void)
1274 return tracer_tracing_is_on(&global_trace);
1276 EXPORT_SYMBOL_GPL(tracing_is_on);
1278 static int __init set_buf_size(char *str)
1280 unsigned long buf_size;
1284 buf_size = memparse(str, &str);
1285 /* nr_entries can not be zero */
1288 trace_buf_size = buf_size;
1291 __setup("trace_buf_size=", set_buf_size);
1293 static int __init set_tracing_thresh(char *str)
1295 unsigned long threshold;
1300 ret = kstrtoul(str, 0, &threshold);
1303 tracing_thresh = threshold * 1000;
1306 __setup("tracing_thresh=", set_tracing_thresh);
1308 unsigned long nsecs_to_usecs(unsigned long nsecs)
1310 return nsecs / 1000;
1314 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
1315 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
1316 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
1317 * of strings in the order that the evals (enum) were defined.
1322 /* These must match the bit postions in trace_iterator_flags */
1323 static const char *trace_options[] = {
1331 int in_ns; /* is this clock in nanoseconds? */
1332 } trace_clocks[] = {
1333 { trace_clock_local, "local", 1 },
1334 { trace_clock_global, "global", 1 },
1335 { trace_clock_counter, "counter", 0 },
1336 { trace_clock_jiffies, "uptime", 0 },
1337 { trace_clock, "perf", 1 },
1338 { ktime_get_mono_fast_ns, "mono", 1 },
1339 { ktime_get_raw_fast_ns, "mono_raw", 1 },
1340 { ktime_get_boot_fast_ns, "boot", 1 },
1344 bool trace_clock_in_ns(struct trace_array *tr)
1346 if (trace_clocks[tr->clock_id].in_ns)
1353 * trace_parser_get_init - gets the buffer for trace parser
1355 int trace_parser_get_init(struct trace_parser *parser, int size)
1357 memset(parser, 0, sizeof(*parser));
1359 parser->buffer = kmalloc(size, GFP_KERNEL);
1360 if (!parser->buffer)
1363 parser->size = size;
1368 * trace_parser_put - frees the buffer for trace parser
1370 void trace_parser_put(struct trace_parser *parser)
1372 kfree(parser->buffer);
1373 parser->buffer = NULL;
1377 * trace_get_user - reads the user input string separated by space
1378 * (matched by isspace(ch))
1380 * For each string found the 'struct trace_parser' is updated,
1381 * and the function returns.
1383 * Returns number of bytes read.
1385 * See kernel/trace/trace.h for 'struct trace_parser' details.
1387 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1388 size_t cnt, loff_t *ppos)
1395 trace_parser_clear(parser);
1397 ret = get_user(ch, ubuf++);
1405 * The parser is not finished with the last write,
1406 * continue reading the user input without skipping spaces.
1408 if (!parser->cont) {
1409 /* skip white space */
1410 while (cnt && isspace(ch)) {
1411 ret = get_user(ch, ubuf++);
1420 /* only spaces were written */
1421 if (isspace(ch) || !ch) {
1428 /* read the non-space input */
1429 while (cnt && !isspace(ch) && ch) {
1430 if (parser->idx < parser->size - 1)
1431 parser->buffer[parser->idx++] = ch;
1436 ret = get_user(ch, ubuf++);
1443 /* We either got finished input or we have to wait for another call. */
1444 if (isspace(ch) || !ch) {
1445 parser->buffer[parser->idx] = 0;
1446 parser->cont = false;
1447 } else if (parser->idx < parser->size - 1) {
1448 parser->cont = true;
1449 parser->buffer[parser->idx++] = ch;
1450 /* Make sure the parsed string always terminates with '\0'. */
1451 parser->buffer[parser->idx] = 0;
1464 /* TODO add a seq_buf_to_buffer() */
1465 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
1469 if (trace_seq_used(s) <= s->seq.readpos)
1472 len = trace_seq_used(s) - s->seq.readpos;
1475 memcpy(buf, s->buffer + s->seq.readpos, cnt);
1477 s->seq.readpos += cnt;
1481 unsigned long __read_mostly tracing_thresh;
1483 #ifdef CONFIG_TRACER_MAX_TRACE
1485 * Copy the new maximum trace into the separate maximum-trace
1486 * structure. (this way the maximum trace is permanently saved,
1487 * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
1490 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1492 struct trace_buffer *trace_buf = &tr->trace_buffer;
1493 struct trace_buffer *max_buf = &tr->max_buffer;
1494 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1495 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
1498 max_buf->time_start = data->preempt_timestamp;
1500 max_data->saved_latency = tr->max_latency;
1501 max_data->critical_start = data->critical_start;
1502 max_data->critical_end = data->critical_end;
1504 strncpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
1505 max_data->pid = tsk->pid;
1507 * If tsk == current, then use current_uid(), as that does not use
1508 * RCU. The irq tracer can be called out of RCU scope.
1511 max_data->uid = current_uid();
1513 max_data->uid = task_uid(tsk);
1515 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1516 max_data->policy = tsk->policy;
1517 max_data->rt_priority = tsk->rt_priority;
1519 /* record this tasks comm */
1520 tracing_record_cmdline(tsk);
1524 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1526 * @tsk: the task with the latency
1527 * @cpu: The cpu that initiated the trace.
1528 * @cond_data: User data associated with a conditional snapshot
1530 * Flip the buffers between the @tr and the max_tr and record information
1531 * about which task was the cause of this latency.
1534 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
1540 WARN_ON_ONCE(!irqs_disabled());
1542 if (!tr->allocated_snapshot) {
1543 /* Only the nop tracer should hit this when disabling */
1544 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1548 arch_spin_lock(&tr->max_lock);
1550 /* Inherit the recordable setting from trace_buffer */
1551 if (ring_buffer_record_is_set_on(tr->trace_buffer.buffer))
1552 ring_buffer_record_on(tr->max_buffer.buffer);
1554 ring_buffer_record_off(tr->max_buffer.buffer);
1556 #ifdef CONFIG_TRACER_SNAPSHOT
1557 if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data))
1560 swap(tr->trace_buffer.buffer, tr->max_buffer.buffer);
1562 __update_max_tr(tr, tsk, cpu);
1565 arch_spin_unlock(&tr->max_lock);
1569 * update_max_tr_single - only copy one trace over, and reset the rest
1571 * @tsk - task with the latency
1572 * @cpu - the cpu of the buffer to copy.
1574 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1577 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1584 WARN_ON_ONCE(!irqs_disabled());
1585 if (!tr->allocated_snapshot) {
1586 /* Only the nop tracer should hit this when disabling */
1587 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1591 arch_spin_lock(&tr->max_lock);
1593 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
1595 if (ret == -EBUSY) {
1597 * We failed to swap the buffer due to a commit taking
1598 * place on this CPU. We fail to record, but we reset
1599 * the max trace buffer (no one writes directly to it)
1600 * and flag that it failed.
1602 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1603 "Failed to swap buffers due to commit in progress\n");
1606 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1608 __update_max_tr(tr, tsk, cpu);
1609 arch_spin_unlock(&tr->max_lock);
1611 #endif /* CONFIG_TRACER_MAX_TRACE */
1613 static int wait_on_pipe(struct trace_iterator *iter, int full)
1615 /* Iterators are static, they should be filled or empty */
1616 if (trace_buffer_iter(iter, iter->cpu_file))
1619 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1623 #ifdef CONFIG_FTRACE_STARTUP_TEST
1624 static bool selftests_can_run;
1626 struct trace_selftests {
1627 struct list_head list;
1628 struct tracer *type;
1631 static LIST_HEAD(postponed_selftests);
1633 static int save_selftest(struct tracer *type)
1635 struct trace_selftests *selftest;
1637 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1641 selftest->type = type;
1642 list_add(&selftest->list, &postponed_selftests);
1646 static int run_tracer_selftest(struct tracer *type)
1648 struct trace_array *tr = &global_trace;
1649 struct tracer *saved_tracer = tr->current_trace;
1652 if (!type->selftest || tracing_selftest_disabled)
1656 * If a tracer registers early in boot up (before scheduling is
1657 * initialized and such), then do not run its selftests yet.
1658 * Instead, run it a little later in the boot process.
1660 if (!selftests_can_run)
1661 return save_selftest(type);
1664 * Run a selftest on this tracer.
1665 * Here we reset the trace buffer, and set the current
1666 * tracer to be this tracer. The tracer can then run some
1667 * internal tracing to verify that everything is in order.
1668 * If we fail, we do not register this tracer.
1670 tracing_reset_online_cpus(&tr->trace_buffer);
1672 tr->current_trace = type;
1674 #ifdef CONFIG_TRACER_MAX_TRACE
1675 if (type->use_max_tr) {
1676 /* If we expanded the buffers, make sure the max is expanded too */
1677 if (ring_buffer_expanded)
1678 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1679 RING_BUFFER_ALL_CPUS);
1680 tr->allocated_snapshot = true;
1684 /* the test is responsible for initializing and enabling */
1685 pr_info("Testing tracer %s: ", type->name);
1686 ret = type->selftest(type, tr);
1687 /* the test is responsible for resetting too */
1688 tr->current_trace = saved_tracer;
1690 printk(KERN_CONT "FAILED!\n");
1691 /* Add the warning after printing 'FAILED' */
1695 /* Only reset on passing, to avoid touching corrupted buffers */
1696 tracing_reset_online_cpus(&tr->trace_buffer);
1698 #ifdef CONFIG_TRACER_MAX_TRACE
1699 if (type->use_max_tr) {
1700 tr->allocated_snapshot = false;
1702 /* Shrink the max buffer again */
1703 if (ring_buffer_expanded)
1704 ring_buffer_resize(tr->max_buffer.buffer, 1,
1705 RING_BUFFER_ALL_CPUS);
1709 printk(KERN_CONT "PASSED\n");
1713 static __init int init_trace_selftests(void)
1715 struct trace_selftests *p, *n;
1716 struct tracer *t, **last;
1719 selftests_can_run = true;
1721 mutex_lock(&trace_types_lock);
1723 if (list_empty(&postponed_selftests))
1726 pr_info("Running postponed tracer tests:\n");
1728 list_for_each_entry_safe(p, n, &postponed_selftests, list) {
1729 /* This loop can take minutes when sanitizers are enabled, so
1730 * lets make sure we allow RCU processing.
1733 ret = run_tracer_selftest(p->type);
1734 /* If the test fails, then warn and remove from available_tracers */
1736 WARN(1, "tracer: %s failed selftest, disabling\n",
1738 last = &trace_types;
1739 for (t = trace_types; t; t = t->next) {
1752 mutex_unlock(&trace_types_lock);
1756 core_initcall(init_trace_selftests);
1758 static inline int run_tracer_selftest(struct tracer *type)
1762 #endif /* CONFIG_FTRACE_STARTUP_TEST */
1764 static void add_tracer_options(struct trace_array *tr, struct tracer *t);
1766 static void __init apply_trace_boot_options(void);
1769 * register_tracer - register a tracer with the ftrace system.
1770 * @type - the plugin for the tracer
1772 * Register a new plugin tracer.
1774 int __init register_tracer(struct tracer *type)
1780 pr_info("Tracer must have a name\n");
1784 if (strlen(type->name) >= MAX_TRACER_SIZE) {
1785 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1789 mutex_lock(&trace_types_lock);
1791 tracing_selftest_running = true;
1793 for (t = trace_types; t; t = t->next) {
1794 if (strcmp(type->name, t->name) == 0) {
1796 pr_info("Tracer %s already registered\n",
1803 if (!type->set_flag)
1804 type->set_flag = &dummy_set_flag;
1806 /*allocate a dummy tracer_flags*/
1807 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
1812 type->flags->val = 0;
1813 type->flags->opts = dummy_tracer_opt;
1815 if (!type->flags->opts)
1816 type->flags->opts = dummy_tracer_opt;
1818 /* store the tracer for __set_tracer_option */
1819 type->flags->trace = type;
1821 ret = run_tracer_selftest(type);
1825 type->next = trace_types;
1827 add_tracer_options(&global_trace, type);
1830 tracing_selftest_running = false;
1831 mutex_unlock(&trace_types_lock);
1833 if (ret || !default_bootup_tracer)
1836 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
1839 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1840 /* Do we want this tracer to start on bootup? */
1841 tracing_set_tracer(&global_trace, type->name);
1842 default_bootup_tracer = NULL;
1844 apply_trace_boot_options();
1846 /* disable other selftests, since this will break it. */
1847 tracing_selftest_disabled = true;
1848 #ifdef CONFIG_FTRACE_STARTUP_TEST
1849 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1857 void tracing_reset(struct trace_buffer *buf, int cpu)
1859 struct ring_buffer *buffer = buf->buffer;
1864 ring_buffer_record_disable(buffer);
1866 /* Make sure all commits have finished */
1868 ring_buffer_reset_cpu(buffer, cpu);
1870 ring_buffer_record_enable(buffer);
1873 void tracing_reset_online_cpus(struct trace_buffer *buf)
1875 struct ring_buffer *buffer = buf->buffer;
1881 ring_buffer_record_disable(buffer);
1883 /* Make sure all commits have finished */
1886 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
1888 for_each_online_cpu(cpu)
1889 ring_buffer_reset_cpu(buffer, cpu);
1891 ring_buffer_record_enable(buffer);
1894 /* Must have trace_types_lock held */
1895 void tracing_reset_all_online_cpus(void)
1897 struct trace_array *tr;
1899 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1900 if (!tr->clear_trace)
1902 tr->clear_trace = false;
1903 tracing_reset_online_cpus(&tr->trace_buffer);
1904 #ifdef CONFIG_TRACER_MAX_TRACE
1905 tracing_reset_online_cpus(&tr->max_buffer);
1910 static int *tgid_map;
1912 #define SAVED_CMDLINES_DEFAULT 128
1913 #define NO_CMDLINE_MAP UINT_MAX
1914 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
1915 struct saved_cmdlines_buffer {
1916 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1917 unsigned *map_cmdline_to_pid;
1918 unsigned cmdline_num;
1920 char *saved_cmdlines;
1922 static struct saved_cmdlines_buffer *savedcmd;
1924 /* temporary disable recording */
1925 static atomic_t trace_record_taskinfo_disabled __read_mostly;
1927 static inline char *get_saved_cmdlines(int idx)
1929 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1932 static inline void set_cmdline(int idx, const char *cmdline)
1934 strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1937 static int allocate_cmdlines_buffer(unsigned int val,
1938 struct saved_cmdlines_buffer *s)
1940 s->map_cmdline_to_pid = kmalloc_array(val,
1941 sizeof(*s->map_cmdline_to_pid),
1943 if (!s->map_cmdline_to_pid)
1946 s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL);
1947 if (!s->saved_cmdlines) {
1948 kfree(s->map_cmdline_to_pid);
1953 s->cmdline_num = val;
1954 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1955 sizeof(s->map_pid_to_cmdline));
1956 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1957 val * sizeof(*s->map_cmdline_to_pid));
1962 static int trace_create_savedcmd(void)
1966 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
1970 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1980 int is_tracing_stopped(void)
1982 return global_trace.stop_count;
1986 * tracing_start - quick start of the tracer
1988 * If tracing is enabled but was stopped by tracing_stop,
1989 * this will start the tracer back up.
1991 void tracing_start(void)
1993 struct ring_buffer *buffer;
1994 unsigned long flags;
1996 if (tracing_disabled)
1999 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2000 if (--global_trace.stop_count) {
2001 if (global_trace.stop_count < 0) {
2002 /* Someone screwed up their debugging */
2004 global_trace.stop_count = 0;
2009 /* Prevent the buffers from switching */
2010 arch_spin_lock(&global_trace.max_lock);
2012 buffer = global_trace.trace_buffer.buffer;
2014 ring_buffer_record_enable(buffer);
2016 #ifdef CONFIG_TRACER_MAX_TRACE
2017 buffer = global_trace.max_buffer.buffer;
2019 ring_buffer_record_enable(buffer);
2022 arch_spin_unlock(&global_trace.max_lock);
2025 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2028 static void tracing_start_tr(struct trace_array *tr)
2030 struct ring_buffer *buffer;
2031 unsigned long flags;
2033 if (tracing_disabled)
2036 /* If global, we need to also start the max tracer */
2037 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2038 return tracing_start();
2040 raw_spin_lock_irqsave(&tr->start_lock, flags);
2042 if (--tr->stop_count) {
2043 if (tr->stop_count < 0) {
2044 /* Someone screwed up their debugging */
2051 buffer = tr->trace_buffer.buffer;
2053 ring_buffer_record_enable(buffer);
2056 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2060 * tracing_stop - quick stop of the tracer
2062 * Light weight way to stop tracing. Use in conjunction with
2065 void tracing_stop(void)
2067 struct ring_buffer *buffer;
2068 unsigned long flags;
2070 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2071 if (global_trace.stop_count++)
2074 /* Prevent the buffers from switching */
2075 arch_spin_lock(&global_trace.max_lock);
2077 buffer = global_trace.trace_buffer.buffer;
2079 ring_buffer_record_disable(buffer);
2081 #ifdef CONFIG_TRACER_MAX_TRACE
2082 buffer = global_trace.max_buffer.buffer;
2084 ring_buffer_record_disable(buffer);
2087 arch_spin_unlock(&global_trace.max_lock);
2090 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2093 static void tracing_stop_tr(struct trace_array *tr)
2095 struct ring_buffer *buffer;
2096 unsigned long flags;
2098 /* If global, we need to also stop the max tracer */
2099 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2100 return tracing_stop();
2102 raw_spin_lock_irqsave(&tr->start_lock, flags);
2103 if (tr->stop_count++)
2106 buffer = tr->trace_buffer.buffer;
2108 ring_buffer_record_disable(buffer);
2111 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2114 static int trace_save_cmdline(struct task_struct *tsk)
2118 /* treat recording of idle task as a success */
2122 if (unlikely(tsk->pid > PID_MAX_DEFAULT))
2126 * It's not the end of the world if we don't get
2127 * the lock, but we also don't want to spin
2128 * nor do we want to disable interrupts,
2129 * so if we miss here, then better luck next time.
2131 if (!arch_spin_trylock(&trace_cmdline_lock))
2134 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
2135 if (idx == NO_CMDLINE_MAP) {
2136 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
2139 * Check whether the cmdline buffer at idx has a pid
2140 * mapped. We are going to overwrite that entry so we
2141 * need to clear the map_pid_to_cmdline. Otherwise we
2142 * would read the new comm for the old pid.
2144 pid = savedcmd->map_cmdline_to_pid[idx];
2145 if (pid != NO_CMDLINE_MAP)
2146 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
2148 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
2149 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
2151 savedcmd->cmdline_idx = idx;
2154 set_cmdline(idx, tsk->comm);
2156 arch_spin_unlock(&trace_cmdline_lock);
2161 static void __trace_find_cmdline(int pid, char comm[])
2166 strcpy(comm, "<idle>");
2170 if (WARN_ON_ONCE(pid < 0)) {
2171 strcpy(comm, "<XXX>");
2175 if (pid > PID_MAX_DEFAULT) {
2176 strcpy(comm, "<...>");
2180 map = savedcmd->map_pid_to_cmdline[pid];
2181 if (map != NO_CMDLINE_MAP)
2182 strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
2184 strcpy(comm, "<...>");
2187 void trace_find_cmdline(int pid, char comm[])
2190 arch_spin_lock(&trace_cmdline_lock);
2192 __trace_find_cmdline(pid, comm);
2194 arch_spin_unlock(&trace_cmdline_lock);
2198 int trace_find_tgid(int pid)
2200 if (unlikely(!tgid_map || !pid || pid > PID_MAX_DEFAULT))
2203 return tgid_map[pid];
2206 static int trace_save_tgid(struct task_struct *tsk)
2208 /* treat recording of idle task as a success */
2212 if (unlikely(!tgid_map || tsk->pid > PID_MAX_DEFAULT))
2215 tgid_map[tsk->pid] = tsk->tgid;
2219 static bool tracing_record_taskinfo_skip(int flags)
2221 if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
2223 if (atomic_read(&trace_record_taskinfo_disabled) || !tracing_is_on())
2225 if (!__this_cpu_read(trace_taskinfo_save))
2231 * tracing_record_taskinfo - record the task info of a task
2233 * @task - task to record
2234 * @flags - TRACE_RECORD_CMDLINE for recording comm
2235 * - TRACE_RECORD_TGID for recording tgid
2237 void tracing_record_taskinfo(struct task_struct *task, int flags)
2241 if (tracing_record_taskinfo_skip(flags))
2245 * Record as much task information as possible. If some fail, continue
2246 * to try to record the others.
2248 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
2249 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
2251 /* If recording any information failed, retry again soon. */
2255 __this_cpu_write(trace_taskinfo_save, false);
2259 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2261 * @prev - previous task during sched_switch
2262 * @next - next task during sched_switch
2263 * @flags - TRACE_RECORD_CMDLINE for recording comm
2264 * TRACE_RECORD_TGID for recording tgid
2266 void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2267 struct task_struct *next, int flags)
2271 if (tracing_record_taskinfo_skip(flags))
2275 * Record as much task information as possible. If some fail, continue
2276 * to try to record the others.
2278 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
2279 done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
2280 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
2281 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
2283 /* If recording any information failed, retry again soon. */
2287 __this_cpu_write(trace_taskinfo_save, false);
2290 /* Helpers to record a specific task information */
2291 void tracing_record_cmdline(struct task_struct *task)
2293 tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
2296 void tracing_record_tgid(struct task_struct *task)
2298 tracing_record_taskinfo(task, TRACE_RECORD_TGID);
2302 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2303 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2304 * simplifies those functions and keeps them in sync.
2306 enum print_line_t trace_handle_return(struct trace_seq *s)
2308 return trace_seq_has_overflowed(s) ?
2309 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2311 EXPORT_SYMBOL_GPL(trace_handle_return);
2314 tracing_generic_entry_update(struct trace_entry *entry, unsigned short type,
2315 unsigned long flags, int pc)
2317 struct task_struct *tsk = current;
2319 entry->preempt_count = pc & 0xff;
2320 entry->pid = (tsk) ? tsk->pid : 0;
2323 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
2324 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
2326 TRACE_FLAG_IRQS_NOSUPPORT |
2328 ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
2329 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
2330 ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
2331 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
2332 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
2334 EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
2336 struct ring_buffer_event *
2337 trace_buffer_lock_reserve(struct ring_buffer *buffer,
2340 unsigned long flags, int pc)
2342 return __trace_buffer_lock_reserve(buffer, type, len, flags, pc);
2345 DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2346 DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2347 static int trace_buffered_event_ref;
2350 * trace_buffered_event_enable - enable buffering events
2352 * When events are being filtered, it is quicker to use a temporary
2353 * buffer to write the event data into if there's a likely chance
2354 * that it will not be committed. The discard of the ring buffer
2355 * is not as fast as committing, and is much slower than copying
2358 * When an event is to be filtered, allocate per cpu buffers to
2359 * write the event data into, and if the event is filtered and discarded
2360 * it is simply dropped, otherwise, the entire data is to be committed
2363 void trace_buffered_event_enable(void)
2365 struct ring_buffer_event *event;
2369 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2371 if (trace_buffered_event_ref++)
2374 for_each_tracing_cpu(cpu) {
2375 page = alloc_pages_node(cpu_to_node(cpu),
2376 GFP_KERNEL | __GFP_NORETRY, 0);
2380 event = page_address(page);
2381 memset(event, 0, sizeof(*event));
2383 per_cpu(trace_buffered_event, cpu) = event;
2386 if (cpu == smp_processor_id() &&
2387 this_cpu_read(trace_buffered_event) !=
2388 per_cpu(trace_buffered_event, cpu))
2395 trace_buffered_event_disable();
2398 static void enable_trace_buffered_event(void *data)
2400 /* Probably not needed, but do it anyway */
2402 this_cpu_dec(trace_buffered_event_cnt);
2405 static void disable_trace_buffered_event(void *data)
2407 this_cpu_inc(trace_buffered_event_cnt);
2411 * trace_buffered_event_disable - disable buffering events
2413 * When a filter is removed, it is faster to not use the buffered
2414 * events, and to commit directly into the ring buffer. Free up
2415 * the temp buffers when there are no more users. This requires
2416 * special synchronization with current events.
2418 void trace_buffered_event_disable(void)
2422 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2424 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2427 if (--trace_buffered_event_ref)
2431 /* For each CPU, set the buffer as used. */
2432 smp_call_function_many(tracing_buffer_mask,
2433 disable_trace_buffered_event, NULL, 1);
2436 /* Wait for all current users to finish */
2439 for_each_tracing_cpu(cpu) {
2440 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2441 per_cpu(trace_buffered_event, cpu) = NULL;
2444 * Make sure trace_buffered_event is NULL before clearing
2445 * trace_buffered_event_cnt.
2450 /* Do the work on each cpu */
2451 smp_call_function_many(tracing_buffer_mask,
2452 enable_trace_buffered_event, NULL, 1);
2456 static struct ring_buffer *temp_buffer;
2458 struct ring_buffer_event *
2459 trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
2460 struct trace_event_file *trace_file,
2461 int type, unsigned long len,
2462 unsigned long flags, int pc)
2464 struct ring_buffer_event *entry;
2467 *current_rb = trace_file->tr->trace_buffer.buffer;
2469 if (!ring_buffer_time_stamp_abs(*current_rb) && (trace_file->flags &
2470 (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
2471 (entry = this_cpu_read(trace_buffered_event))) {
2472 /* Try to use the per cpu buffer first */
2473 val = this_cpu_inc_return(trace_buffered_event_cnt);
2475 trace_event_setup(entry, type, flags, pc);
2476 entry->array[0] = len;
2479 this_cpu_dec(trace_buffered_event_cnt);
2482 entry = __trace_buffer_lock_reserve(*current_rb,
2483 type, len, flags, pc);
2485 * If tracing is off, but we have triggers enabled
2486 * we still need to look at the event data. Use the temp_buffer
2487 * to store the trace event for the tigger to use. It's recusive
2488 * safe and will not be recorded anywhere.
2490 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2491 *current_rb = temp_buffer;
2492 entry = __trace_buffer_lock_reserve(*current_rb,
2493 type, len, flags, pc);
2497 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2499 static DEFINE_SPINLOCK(tracepoint_iter_lock);
2500 static DEFINE_MUTEX(tracepoint_printk_mutex);
2502 static void output_printk(struct trace_event_buffer *fbuffer)
2504 struct trace_event_call *event_call;
2505 struct trace_event *event;
2506 unsigned long flags;
2507 struct trace_iterator *iter = tracepoint_print_iter;
2509 /* We should never get here if iter is NULL */
2510 if (WARN_ON_ONCE(!iter))
2513 event_call = fbuffer->trace_file->event_call;
2514 if (!event_call || !event_call->event.funcs ||
2515 !event_call->event.funcs->trace)
2518 event = &fbuffer->trace_file->event_call->event;
2520 spin_lock_irqsave(&tracepoint_iter_lock, flags);
2521 trace_seq_init(&iter->seq);
2522 iter->ent = fbuffer->entry;
2523 event_call->event.funcs->trace(iter, 0, event);
2524 trace_seq_putc(&iter->seq, 0);
2525 printk("%s", iter->seq.buffer);
2527 spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2530 int tracepoint_printk_sysctl(struct ctl_table *table, int write,
2531 void __user *buffer, size_t *lenp,
2534 int save_tracepoint_printk;
2537 mutex_lock(&tracepoint_printk_mutex);
2538 save_tracepoint_printk = tracepoint_printk;
2540 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2543 * This will force exiting early, as tracepoint_printk
2544 * is always zero when tracepoint_printk_iter is not allocated
2546 if (!tracepoint_print_iter)
2547 tracepoint_printk = 0;
2549 if (save_tracepoint_printk == tracepoint_printk)
2552 if (tracepoint_printk)
2553 static_key_enable(&tracepoint_printk_key.key);
2555 static_key_disable(&tracepoint_printk_key.key);
2558 mutex_unlock(&tracepoint_printk_mutex);
2563 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2565 if (static_key_false(&tracepoint_printk_key.key))
2566 output_printk(fbuffer);
2568 event_trigger_unlock_commit(fbuffer->trace_file, fbuffer->buffer,
2569 fbuffer->event, fbuffer->entry,
2570 fbuffer->flags, fbuffer->pc);
2572 EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2577 * trace_buffer_unlock_commit_regs()
2578 * trace_event_buffer_commit()
2579 * trace_event_raw_event_xxx()
2581 # define STACK_SKIP 3
2583 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2584 struct ring_buffer *buffer,
2585 struct ring_buffer_event *event,
2586 unsigned long flags, int pc,
2587 struct pt_regs *regs)
2589 __buffer_unlock_commit(buffer, event);
2592 * If regs is not set, then skip the necessary functions.
2593 * Note, we can still get here via blktrace, wakeup tracer
2594 * and mmiotrace, but that's ok if they lose a function or
2595 * two. They are not that meaningful.
2597 ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs);
2598 ftrace_trace_userstack(buffer, flags, pc);
2602 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2605 trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer,
2606 struct ring_buffer_event *event)
2608 __buffer_unlock_commit(buffer, event);
2612 trace_process_export(struct trace_export *export,
2613 struct ring_buffer_event *event)
2615 struct trace_entry *entry;
2616 unsigned int size = 0;
2618 entry = ring_buffer_event_data(event);
2619 size = ring_buffer_event_length(event);
2620 export->write(export, entry, size);
2623 static DEFINE_MUTEX(ftrace_export_lock);
2625 static struct trace_export __rcu *ftrace_exports_list __read_mostly;
2627 static DEFINE_STATIC_KEY_FALSE(ftrace_exports_enabled);
2629 static inline void ftrace_exports_enable(void)
2631 static_branch_enable(&ftrace_exports_enabled);
2634 static inline void ftrace_exports_disable(void)
2636 static_branch_disable(&ftrace_exports_enabled);
2639 static void ftrace_exports(struct ring_buffer_event *event)
2641 struct trace_export *export;
2643 preempt_disable_notrace();
2645 export = rcu_dereference_raw_notrace(ftrace_exports_list);
2647 trace_process_export(export, event);
2648 export = rcu_dereference_raw_notrace(export->next);
2651 preempt_enable_notrace();
2655 add_trace_export(struct trace_export **list, struct trace_export *export)
2657 rcu_assign_pointer(export->next, *list);
2659 * We are entering export into the list but another
2660 * CPU might be walking that list. We need to make sure
2661 * the export->next pointer is valid before another CPU sees
2662 * the export pointer included into the list.
2664 rcu_assign_pointer(*list, export);
2668 rm_trace_export(struct trace_export **list, struct trace_export *export)
2670 struct trace_export **p;
2672 for (p = list; *p != NULL; p = &(*p)->next)
2679 rcu_assign_pointer(*p, (*p)->next);
2685 add_ftrace_export(struct trace_export **list, struct trace_export *export)
2688 ftrace_exports_enable();
2690 add_trace_export(list, export);
2694 rm_ftrace_export(struct trace_export **list, struct trace_export *export)
2698 ret = rm_trace_export(list, export);
2700 ftrace_exports_disable();
2705 int register_ftrace_export(struct trace_export *export)
2707 if (WARN_ON_ONCE(!export->write))
2710 mutex_lock(&ftrace_export_lock);
2712 add_ftrace_export(&ftrace_exports_list, export);
2714 mutex_unlock(&ftrace_export_lock);
2718 EXPORT_SYMBOL_GPL(register_ftrace_export);
2720 int unregister_ftrace_export(struct trace_export *export)
2724 mutex_lock(&ftrace_export_lock);
2726 ret = rm_ftrace_export(&ftrace_exports_list, export);
2728 mutex_unlock(&ftrace_export_lock);
2732 EXPORT_SYMBOL_GPL(unregister_ftrace_export);
2735 trace_function(struct trace_array *tr,
2736 unsigned long ip, unsigned long parent_ip, unsigned long flags,
2739 struct trace_event_call *call = &event_function;
2740 struct ring_buffer *buffer = tr->trace_buffer.buffer;
2741 struct ring_buffer_event *event;
2742 struct ftrace_entry *entry;
2744 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
2748 entry = ring_buffer_event_data(event);
2750 entry->parent_ip = parent_ip;
2752 if (!call_filter_check_discard(call, entry, buffer, event)) {
2753 if (static_branch_unlikely(&ftrace_exports_enabled))
2754 ftrace_exports(event);
2755 __buffer_unlock_commit(buffer, event);
2759 #ifdef CONFIG_STACKTRACE
2761 /* Allow 4 levels of nesting: normal, softirq, irq, NMI */
2762 #define FTRACE_KSTACK_NESTING 4
2764 #define FTRACE_KSTACK_ENTRIES (PAGE_SIZE / FTRACE_KSTACK_NESTING)
2766 struct ftrace_stack {
2767 unsigned long calls[FTRACE_KSTACK_ENTRIES];
2771 struct ftrace_stacks {
2772 struct ftrace_stack stacks[FTRACE_KSTACK_NESTING];
2775 static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
2776 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2778 static void __ftrace_trace_stack(struct ring_buffer *buffer,
2779 unsigned long flags,
2780 int skip, int pc, struct pt_regs *regs)
2782 struct trace_event_call *call = &event_kernel_stack;
2783 struct ring_buffer_event *event;
2784 unsigned int size, nr_entries;
2785 struct ftrace_stack *fstack;
2786 struct stack_entry *entry;
2790 * Add one, for this function and the call to save_stack_trace()
2791 * If regs is set, then these functions will not be in the way.
2793 #ifndef CONFIG_UNWINDER_ORC
2799 * Since events can happen in NMIs there's no safe way to
2800 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
2801 * or NMI comes in, it will just have to use the default
2802 * FTRACE_STACK_SIZE.
2804 preempt_disable_notrace();
2806 stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
2808 /* This should never happen. If it does, yell once and skip */
2809 if (WARN_ON_ONCE(stackidx > FTRACE_KSTACK_NESTING))
2813 * The above __this_cpu_inc_return() is 'atomic' cpu local. An
2814 * interrupt will either see the value pre increment or post
2815 * increment. If the interrupt happens pre increment it will have
2816 * restored the counter when it returns. We just need a barrier to
2817 * keep gcc from moving things around.
2821 fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx;
2822 size = ARRAY_SIZE(fstack->calls);
2825 nr_entries = stack_trace_save_regs(regs, fstack->calls,
2828 nr_entries = stack_trace_save(fstack->calls, size, skip);
2831 size = nr_entries * sizeof(unsigned long);
2832 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
2833 sizeof(*entry) + size, flags, pc);
2836 entry = ring_buffer_event_data(event);
2838 memcpy(&entry->caller, fstack->calls, size);
2839 entry->size = nr_entries;
2841 if (!call_filter_check_discard(call, entry, buffer, event))
2842 __buffer_unlock_commit(buffer, event);
2845 /* Again, don't let gcc optimize things here */
2847 __this_cpu_dec(ftrace_stack_reserve);
2848 preempt_enable_notrace();
2852 static inline void ftrace_trace_stack(struct trace_array *tr,
2853 struct ring_buffer *buffer,
2854 unsigned long flags,
2855 int skip, int pc, struct pt_regs *regs)
2857 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
2860 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
2863 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
2866 struct ring_buffer *buffer = tr->trace_buffer.buffer;
2868 if (rcu_is_watching()) {
2869 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
2874 * When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
2875 * but if the above rcu_is_watching() failed, then the NMI
2876 * triggered someplace critical, and rcu_irq_enter() should
2877 * not be called from NMI.
2879 if (unlikely(in_nmi()))
2882 rcu_irq_enter_irqson();
2883 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
2884 rcu_irq_exit_irqson();
2888 * trace_dump_stack - record a stack back trace in the trace buffer
2889 * @skip: Number of functions to skip (helper handlers)
2891 void trace_dump_stack(int skip)
2893 unsigned long flags;
2895 if (tracing_disabled || tracing_selftest_running)
2898 local_save_flags(flags);
2900 #ifndef CONFIG_UNWINDER_ORC
2901 /* Skip 1 to skip this function. */
2904 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
2905 flags, skip, preempt_count(), NULL);
2907 EXPORT_SYMBOL_GPL(trace_dump_stack);
2909 #ifdef CONFIG_USER_STACKTRACE_SUPPORT
2910 static DEFINE_PER_CPU(int, user_stack_count);
2913 ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
2915 struct trace_event_call *call = &event_user_stack;
2916 struct ring_buffer_event *event;
2917 struct userstack_entry *entry;
2919 if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
2923 * NMIs can not handle page faults, even with fix ups.
2924 * The save user stack can (and often does) fault.
2926 if (unlikely(in_nmi()))
2930 * prevent recursion, since the user stack tracing may
2931 * trigger other kernel events.
2934 if (__this_cpu_read(user_stack_count))
2937 __this_cpu_inc(user_stack_count);
2939 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
2940 sizeof(*entry), flags, pc);
2942 goto out_drop_count;
2943 entry = ring_buffer_event_data(event);
2945 entry->tgid = current->tgid;
2946 memset(&entry->caller, 0, sizeof(entry->caller));
2948 stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
2949 if (!call_filter_check_discard(call, entry, buffer, event))
2950 __buffer_unlock_commit(buffer, event);
2953 __this_cpu_dec(user_stack_count);
2957 #else /* CONFIG_USER_STACKTRACE_SUPPORT */
2958 static void ftrace_trace_userstack(struct ring_buffer *buffer,
2959 unsigned long flags, int pc)
2962 #endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
2964 #endif /* CONFIG_STACKTRACE */
2966 /* created for use with alloc_percpu */
2967 struct trace_buffer_struct {
2969 char buffer[4][TRACE_BUF_SIZE];
2972 static struct trace_buffer_struct *trace_percpu_buffer;
2975 * Thise allows for lockless recording. If we're nested too deeply, then
2976 * this returns NULL.
2978 static char *get_trace_buf(void)
2980 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
2982 if (!buffer || buffer->nesting >= 4)
2987 /* Interrupts must see nesting incremented before we use the buffer */
2989 return &buffer->buffer[buffer->nesting][0];
2992 static void put_trace_buf(void)
2994 /* Don't let the decrement of nesting leak before this */
2996 this_cpu_dec(trace_percpu_buffer->nesting);
2999 static int alloc_percpu_trace_buffer(void)
3001 struct trace_buffer_struct *buffers;
3003 buffers = alloc_percpu(struct trace_buffer_struct);
3004 if (WARN(!buffers, "Could not allocate percpu trace_printk buffer"))
3007 trace_percpu_buffer = buffers;
3011 static int buffers_allocated;
3013 void trace_printk_init_buffers(void)
3015 if (buffers_allocated)
3018 if (alloc_percpu_trace_buffer())
3021 /* trace_printk() is for debug use only. Don't use it in production. */
3024 pr_warn("**********************************************************\n");
3025 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3027 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
3029 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
3030 pr_warn("** unsafe for production use. **\n");
3032 pr_warn("** If you see this message and you are not debugging **\n");
3033 pr_warn("** the kernel, report this immediately to your vendor! **\n");
3035 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3036 pr_warn("**********************************************************\n");
3038 /* Expand the buffers to set size */
3039 tracing_update_buffers();
3041 buffers_allocated = 1;
3044 * trace_printk_init_buffers() can be called by modules.
3045 * If that happens, then we need to start cmdline recording
3046 * directly here. If the global_trace.buffer is already
3047 * allocated here, then this was called by module code.
3049 if (global_trace.trace_buffer.buffer)
3050 tracing_start_cmdline_record();
3052 EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
3054 void trace_printk_start_comm(void)
3056 /* Start tracing comms if trace printk is set */
3057 if (!buffers_allocated)
3059 tracing_start_cmdline_record();
3062 static void trace_printk_start_stop_comm(int enabled)
3064 if (!buffers_allocated)
3068 tracing_start_cmdline_record();
3070 tracing_stop_cmdline_record();
3074 * trace_vbprintk - write binary msg to tracing buffer
3077 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
3079 struct trace_event_call *call = &event_bprint;
3080 struct ring_buffer_event *event;
3081 struct ring_buffer *buffer;
3082 struct trace_array *tr = &global_trace;
3083 struct bprint_entry *entry;
3084 unsigned long flags;
3086 int len = 0, size, pc;
3088 if (unlikely(tracing_selftest_running || tracing_disabled))
3091 /* Don't pollute graph traces with trace_vprintk internals */
3092 pause_graph_tracing();
3094 pc = preempt_count();
3095 preempt_disable_notrace();
3097 tbuffer = get_trace_buf();
3103 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
3105 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
3108 local_save_flags(flags);
3109 size = sizeof(*entry) + sizeof(u32) * len;
3110 buffer = tr->trace_buffer.buffer;
3111 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
3115 entry = ring_buffer_event_data(event);
3119 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
3120 if (!call_filter_check_discard(call, entry, buffer, event)) {
3121 __buffer_unlock_commit(buffer, event);
3122 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
3129 preempt_enable_notrace();
3130 unpause_graph_tracing();
3134 EXPORT_SYMBOL_GPL(trace_vbprintk);
3138 __trace_array_vprintk(struct ring_buffer *buffer,
3139 unsigned long ip, const char *fmt, va_list args)
3141 struct trace_event_call *call = &event_print;
3142 struct ring_buffer_event *event;
3143 int len = 0, size, pc;
3144 struct print_entry *entry;
3145 unsigned long flags;
3148 if (tracing_disabled || tracing_selftest_running)
3151 /* Don't pollute graph traces with trace_vprintk internals */
3152 pause_graph_tracing();
3154 pc = preempt_count();
3155 preempt_disable_notrace();
3158 tbuffer = get_trace_buf();
3164 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
3166 local_save_flags(flags);
3167 size = sizeof(*entry) + len + 1;
3168 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
3172 entry = ring_buffer_event_data(event);
3175 memcpy(&entry->buf, tbuffer, len + 1);
3176 if (!call_filter_check_discard(call, entry, buffer, event)) {
3177 __buffer_unlock_commit(buffer, event);
3178 ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
3185 preempt_enable_notrace();
3186 unpause_graph_tracing();
3192 int trace_array_vprintk(struct trace_array *tr,
3193 unsigned long ip, const char *fmt, va_list args)
3195 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
3199 int trace_array_printk(struct trace_array *tr,
3200 unsigned long ip, const char *fmt, ...)
3205 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
3209 ret = trace_array_vprintk(tr, ip, fmt, ap);
3213 EXPORT_SYMBOL_GPL(trace_array_printk);
3216 int trace_array_printk_buf(struct ring_buffer *buffer,
3217 unsigned long ip, const char *fmt, ...)
3222 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
3226 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3232 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3234 return trace_array_vprintk(&global_trace, ip, fmt, args);
3236 EXPORT_SYMBOL_GPL(trace_vprintk);
3238 static void trace_iterator_increment(struct trace_iterator *iter)
3240 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3244 ring_buffer_read(buf_iter, NULL);
3247 static struct trace_entry *
3248 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3249 unsigned long *lost_events)
3251 struct ring_buffer_event *event;
3252 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
3255 event = ring_buffer_iter_peek(buf_iter, ts);
3257 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
3261 iter->ent_size = ring_buffer_event_length(event);
3262 return ring_buffer_event_data(event);
3268 static struct trace_entry *
3269 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3270 unsigned long *missing_events, u64 *ent_ts)
3272 struct ring_buffer *buffer = iter->trace_buffer->buffer;
3273 struct trace_entry *ent, *next = NULL;
3274 unsigned long lost_events = 0, next_lost = 0;
3275 int cpu_file = iter->cpu_file;
3276 u64 next_ts = 0, ts;
3282 * If we are in a per_cpu trace file, don't bother by iterating over
3283 * all cpu and peek directly.
3285 if (cpu_file > RING_BUFFER_ALL_CPUS) {
3286 if (ring_buffer_empty_cpu(buffer, cpu_file))
3288 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
3290 *ent_cpu = cpu_file;
3295 for_each_tracing_cpu(cpu) {
3297 if (ring_buffer_empty_cpu(buffer, cpu))
3300 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
3303 * Pick the entry with the smallest timestamp:
3305 if (ent && (!next || ts < next_ts)) {
3309 next_lost = lost_events;
3310 next_size = iter->ent_size;
3314 iter->ent_size = next_size;
3317 *ent_cpu = next_cpu;
3323 *missing_events = next_lost;
3328 /* Find the next real entry, without updating the iterator itself */
3329 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
3330 int *ent_cpu, u64 *ent_ts)
3332 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
3335 /* Find the next real entry, and increment the iterator to the next entry */
3336 void *trace_find_next_entry_inc(struct trace_iterator *iter)
3338 iter->ent = __find_next_entry(iter, &iter->cpu,
3339 &iter->lost_events, &iter->ts);
3342 trace_iterator_increment(iter);
3344 return iter->ent ? iter : NULL;
3347 static void trace_consume(struct trace_iterator *iter)
3349 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
3350 &iter->lost_events);
3353 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
3355 struct trace_iterator *iter = m->private;
3359 WARN_ON_ONCE(iter->leftover);
3363 /* can't go backwards */
3368 ent = trace_find_next_entry_inc(iter);
3372 while (ent && iter->idx < i)
3373 ent = trace_find_next_entry_inc(iter);
3380 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
3382 struct ring_buffer_event *event;
3383 struct ring_buffer_iter *buf_iter;
3384 unsigned long entries = 0;
3387 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
3389 buf_iter = trace_buffer_iter(iter, cpu);
3393 ring_buffer_iter_reset(buf_iter);
3396 * We could have the case with the max latency tracers
3397 * that a reset never took place on a cpu. This is evident
3398 * by the timestamp being before the start of the buffer.
3400 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
3401 if (ts >= iter->trace_buffer->time_start)
3404 ring_buffer_read(buf_iter, NULL);
3407 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
3411 * The current tracer is copied to avoid a global locking
3414 static void *s_start(struct seq_file *m, loff_t *pos)
3416 struct trace_iterator *iter = m->private;
3417 struct trace_array *tr = iter->tr;
3418 int cpu_file = iter->cpu_file;
3424 * copy the tracer to avoid using a global lock all around.
3425 * iter->trace is a copy of current_trace, the pointer to the
3426 * name may be used instead of a strcmp(), as iter->trace->name
3427 * will point to the same string as current_trace->name.
3429 mutex_lock(&trace_types_lock);
3430 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
3431 *iter->trace = *tr->current_trace;
3432 mutex_unlock(&trace_types_lock);
3434 #ifdef CONFIG_TRACER_MAX_TRACE
3435 if (iter->snapshot && iter->trace->use_max_tr)
3436 return ERR_PTR(-EBUSY);
3439 if (!iter->snapshot)
3440 atomic_inc(&trace_record_taskinfo_disabled);
3442 if (*pos != iter->pos) {
3447 if (cpu_file == RING_BUFFER_ALL_CPUS) {
3448 for_each_tracing_cpu(cpu)
3449 tracing_iter_reset(iter, cpu);
3451 tracing_iter_reset(iter, cpu_file);
3454 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
3459 * If we overflowed the seq_file before, then we want
3460 * to just reuse the trace_seq buffer again.
3466 p = s_next(m, p, &l);
3470 trace_event_read_lock();
3471 trace_access_lock(cpu_file);
3475 static void s_stop(struct seq_file *m, void *p)
3477 struct trace_iterator *iter = m->private;
3479 #ifdef CONFIG_TRACER_MAX_TRACE
3480 if (iter->snapshot && iter->trace->use_max_tr)
3484 if (!iter->snapshot)
3485 atomic_dec(&trace_record_taskinfo_disabled);
3487 trace_access_unlock(iter->cpu_file);
3488 trace_event_read_unlock();
3492 get_total_entries_cpu(struct trace_buffer *buf, unsigned long *total,
3493 unsigned long *entries, int cpu)
3495 unsigned long count;
3497 count = ring_buffer_entries_cpu(buf->buffer, cpu);
3499 * If this buffer has skipped entries, then we hold all
3500 * entries for the trace and we need to ignore the
3501 * ones before the time stamp.
3503 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
3504 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
3505 /* total is the same as the entries */
3509 ring_buffer_overrun_cpu(buf->buffer, cpu);
3514 get_total_entries(struct trace_buffer *buf,
3515 unsigned long *total, unsigned long *entries)
3523 for_each_tracing_cpu(cpu) {
3524 get_total_entries_cpu(buf, &t, &e, cpu);
3530 unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
3532 unsigned long total, entries;
3537 get_total_entries_cpu(&tr->trace_buffer, &total, &entries, cpu);
3542 unsigned long trace_total_entries(struct trace_array *tr)
3544 unsigned long total, entries;
3549 get_total_entries(&tr->trace_buffer, &total, &entries);
3554 static void print_lat_help_header(struct seq_file *m)
3556 seq_puts(m, "# _------=> CPU# \n"
3557 "# / _-----=> irqs-off \n"
3558 "# | / _----=> need-resched \n"
3559 "# || / _---=> hardirq/softirq \n"
3560 "# ||| / _--=> preempt-depth \n"
3562 "# cmd pid ||||| time | caller \n"
3563 "# \\ / ||||| \\ | / \n");
3566 static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
3568 unsigned long total;
3569 unsigned long entries;
3571 get_total_entries(buf, &total, &entries);
3572 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
3573 entries, total, num_online_cpus());
3577 static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m,
3580 bool tgid = flags & TRACE_ITER_RECORD_TGID;
3582 print_event_info(buf, m);
3584 seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? "TGID " : "");
3585 seq_printf(m, "# | | %s | | |\n", tgid ? " | " : "");
3588 static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m,
3591 bool tgid = flags & TRACE_ITER_RECORD_TGID;
3592 const char *space = " ";
3593 int prec = tgid ? 10 : 2;
3595 print_event_info(buf, m);
3597 seq_printf(m, "# %.*s _-----=> irqs-off\n", prec, space);
3598 seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);
3599 seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space);
3600 seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space);
3601 seq_printf(m, "# %.*s||| / delay\n", prec, space);
3602 seq_printf(m, "# TASK-PID %.*sCPU# |||| TIMESTAMP FUNCTION\n", prec, " TGID ");
3603 seq_printf(m, "# | | %.*s | |||| | |\n", prec, " | ");
3607 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
3609 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
3610 struct trace_buffer *buf = iter->trace_buffer;
3611 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
3612 struct tracer *type = iter->trace;
3613 unsigned long entries;
3614 unsigned long total;
3615 const char *name = "preemption";
3619 get_total_entries(buf, &total, &entries);
3621 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
3623 seq_puts(m, "# -----------------------------------"
3624 "---------------------------------\n");
3625 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
3626 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
3627 nsecs_to_usecs(data->saved_latency),
3631 #if defined(CONFIG_PREEMPT_NONE)
3633 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
3635 #elif defined(CONFIG_PREEMPT)
3640 /* These are reserved for later use */
3643 seq_printf(m, " #P:%d)\n", num_online_cpus());
3647 seq_puts(m, "# -----------------\n");
3648 seq_printf(m, "# | task: %.16s-%d "
3649 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
3650 data->comm, data->pid,
3651 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
3652 data->policy, data->rt_priority);
3653 seq_puts(m, "# -----------------\n");
3655 if (data->critical_start) {
3656 seq_puts(m, "# => started at: ");
3657 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
3658 trace_print_seq(m, &iter->seq);
3659 seq_puts(m, "\n# => ended at: ");
3660 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
3661 trace_print_seq(m, &iter->seq);
3662 seq_puts(m, "\n#\n");
3668 static void test_cpu_buff_start(struct trace_iterator *iter)
3670 struct trace_seq *s = &iter->seq;
3671 struct trace_array *tr = iter->tr;
3673 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
3676 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
3679 if (cpumask_available(iter->started) &&
3680 cpumask_test_cpu(iter->cpu, iter->started))
3683 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
3686 if (cpumask_available(iter->started))
3687 cpumask_set_cpu(iter->cpu, iter->started);
3689 /* Don't print started cpu buffer for the first entry of the trace */
3691 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
3695 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
3697 struct trace_array *tr = iter->tr;
3698 struct trace_seq *s = &iter->seq;
3699 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
3700 struct trace_entry *entry;
3701 struct trace_event *event;
3705 test_cpu_buff_start(iter);
3707 event = ftrace_find_event(entry->type);
3709 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
3710 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3711 trace_print_lat_context(iter);
3713 trace_print_context(iter);
3716 if (trace_seq_has_overflowed(s))
3717 return TRACE_TYPE_PARTIAL_LINE;
3720 return event->funcs->trace(iter, sym_flags, event);
3722 trace_seq_printf(s, "Unknown type %d\n", entry->type);
3724 return trace_handle_return(s);
3727 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
3729 struct trace_array *tr = iter->tr;
3730 struct trace_seq *s = &iter->seq;
3731 struct trace_entry *entry;
3732 struct trace_event *event;
3736 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
3737 trace_seq_printf(s, "%d %d %llu ",
3738 entry->pid, iter->cpu, iter->ts);
3740 if (trace_seq_has_overflowed(s))
3741 return TRACE_TYPE_PARTIAL_LINE;
3743 event = ftrace_find_event(entry->type);
3745 return event->funcs->raw(iter, 0, event);
3747 trace_seq_printf(s, "%d ?\n", entry->type);
3749 return trace_handle_return(s);
3752 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
3754 struct trace_array *tr = iter->tr;
3755 struct trace_seq *s = &iter->seq;
3756 unsigned char newline = '\n';
3757 struct trace_entry *entry;
3758 struct trace_event *event;
3762 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
3763 SEQ_PUT_HEX_FIELD(s, entry->pid);
3764 SEQ_PUT_HEX_FIELD(s, iter->cpu);
3765 SEQ_PUT_HEX_FIELD(s, iter->ts);
3766 if (trace_seq_has_overflowed(s))
3767 return TRACE_TYPE_PARTIAL_LINE;
3770 event = ftrace_find_event(entry->type);
3772 enum print_line_t ret = event->funcs->hex(iter, 0, event);
3773 if (ret != TRACE_TYPE_HANDLED)
3777 SEQ_PUT_FIELD(s, newline);
3779 return trace_handle_return(s);
3782 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
3784 struct trace_array *tr = iter->tr;
3785 struct trace_seq *s = &iter->seq;
3786 struct trace_entry *entry;
3787 struct trace_event *event;
3791 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
3792 SEQ_PUT_FIELD(s, entry->pid);
3793 SEQ_PUT_FIELD(s, iter->cpu);
3794 SEQ_PUT_FIELD(s, iter->ts);
3795 if (trace_seq_has_overflowed(s))
3796 return TRACE_TYPE_PARTIAL_LINE;
3799 event = ftrace_find_event(entry->type);
3800 return event ? event->funcs->binary(iter, 0, event) :
3804 int trace_empty(struct trace_iterator *iter)
3806 struct ring_buffer_iter *buf_iter;
3809 /* If we are looking at one CPU buffer, only check that one */
3810 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
3811 cpu = iter->cpu_file;
3812 buf_iter = trace_buffer_iter(iter, cpu);
3814 if (!ring_buffer_iter_empty(buf_iter))
3817 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
3823 for_each_tracing_cpu(cpu) {
3824 buf_iter = trace_buffer_iter(iter, cpu);
3826 if (!ring_buffer_iter_empty(buf_iter))
3829 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
3837 /* Called with trace_event_read_lock() held. */
3838 enum print_line_t print_trace_line(struct trace_iterator *iter)
3840 struct trace_array *tr = iter->tr;
3841 unsigned long trace_flags = tr->trace_flags;
3842 enum print_line_t ret;
3844 if (iter->lost_events) {
3845 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
3846 iter->cpu, iter->lost_events);
3847 if (trace_seq_has_overflowed(&iter->seq))
3848 return TRACE_TYPE_PARTIAL_LINE;
3851 if (iter->trace && iter->trace->print_line) {
3852 ret = iter->trace->print_line(iter);
3853 if (ret != TRACE_TYPE_UNHANDLED)
3857 if (iter->ent->type == TRACE_BPUTS &&
3858 trace_flags & TRACE_ITER_PRINTK &&
3859 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3860 return trace_print_bputs_msg_only(iter);
3862 if (iter->ent->type == TRACE_BPRINT &&
3863 trace_flags & TRACE_ITER_PRINTK &&
3864 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3865 return trace_print_bprintk_msg_only(iter);
3867 if (iter->ent->type == TRACE_PRINT &&
3868 trace_flags & TRACE_ITER_PRINTK &&
3869 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3870 return trace_print_printk_msg_only(iter);
3872 if (trace_flags & TRACE_ITER_BIN)
3873 return print_bin_fmt(iter);
3875 if (trace_flags & TRACE_ITER_HEX)
3876 return print_hex_fmt(iter);
3878 if (trace_flags & TRACE_ITER_RAW)
3879 return print_raw_fmt(iter);
3881 return print_trace_fmt(iter);
3884 void trace_latency_header(struct seq_file *m)
3886 struct trace_iterator *iter = m->private;
3887 struct trace_array *tr = iter->tr;
3889 /* print nothing if the buffers are empty */
3890 if (trace_empty(iter))
3893 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3894 print_trace_header(m, iter);
3896 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
3897 print_lat_help_header(m);
3900 void trace_default_header(struct seq_file *m)
3902 struct trace_iterator *iter = m->private;
3903 struct trace_array *tr = iter->tr;
3904 unsigned long trace_flags = tr->trace_flags;
3906 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
3909 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
3910 /* print nothing if the buffers are empty */
3911 if (trace_empty(iter))
3913 print_trace_header(m, iter);
3914 if (!(trace_flags & TRACE_ITER_VERBOSE))
3915 print_lat_help_header(m);
3917 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
3918 if (trace_flags & TRACE_ITER_IRQ_INFO)
3919 print_func_help_header_irq(iter->trace_buffer,
3922 print_func_help_header(iter->trace_buffer, m,
3928 static void test_ftrace_alive(struct seq_file *m)
3930 if (!ftrace_is_dead())
3932 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
3933 "# MAY BE MISSING FUNCTION EVENTS\n");
3936 #ifdef CONFIG_TRACER_MAX_TRACE
3937 static void show_snapshot_main_help(struct seq_file *m)
3939 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
3940 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3941 "# Takes a snapshot of the main buffer.\n"
3942 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
3943 "# (Doesn't have to be '2' works with any number that\n"
3944 "# is not a '0' or '1')\n");
3947 static void show_snapshot_percpu_help(struct seq_file *m)
3949 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
3950 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
3951 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3952 "# Takes a snapshot of the main buffer for this cpu.\n");
3954 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
3955 "# Must use main snapshot file to allocate.\n");
3957 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
3958 "# (Doesn't have to be '2' works with any number that\n"
3959 "# is not a '0' or '1')\n");
3962 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
3964 if (iter->tr->allocated_snapshot)
3965 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
3967 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
3969 seq_puts(m, "# Snapshot commands:\n");
3970 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
3971 show_snapshot_main_help(m);
3973 show_snapshot_percpu_help(m);
3976 /* Should never be called */
3977 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
3980 static int s_show(struct seq_file *m, void *v)
3982 struct trace_iterator *iter = v;
3985 if (iter->ent == NULL) {
3987 seq_printf(m, "# tracer: %s\n", iter->trace->name);
3989 test_ftrace_alive(m);
3991 if (iter->snapshot && trace_empty(iter))
3992 print_snapshot_help(m, iter);
3993 else if (iter->trace && iter->trace->print_header)
3994 iter->trace->print_header(m);
3996 trace_default_header(m);
3998 } else if (iter->leftover) {
4000 * If we filled the seq_file buffer earlier, we
4001 * want to just show it now.
4003 ret = trace_print_seq(m, &iter->seq);
4005 /* ret should this time be zero, but you never know */
4006 iter->leftover = ret;
4009 print_trace_line(iter);
4010 ret = trace_print_seq(m, &iter->seq);
4012 * If we overflow the seq_file buffer, then it will
4013 * ask us for this data again at start up.
4015 * ret is 0 if seq_file write succeeded.
4018 iter->leftover = ret;
4025 * Should be used after trace_array_get(), trace_types_lock
4026 * ensures that i_cdev was already initialized.
4028 static inline int tracing_get_cpu(struct inode *inode)
4030 if (inode->i_cdev) /* See trace_create_cpu_file() */
4031 return (long)inode->i_cdev - 1;
4032 return RING_BUFFER_ALL_CPUS;
4035 static const struct seq_operations tracer_seq_ops = {
4042 static struct trace_iterator *
4043 __tracing_open(struct inode *inode, struct file *file, bool snapshot)
4045 struct trace_array *tr = inode->i_private;
4046 struct trace_iterator *iter;
4049 if (tracing_disabled)
4050 return ERR_PTR(-ENODEV);
4052 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
4054 return ERR_PTR(-ENOMEM);
4056 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
4058 if (!iter->buffer_iter)
4062 * We make a copy of the current tracer to avoid concurrent
4063 * changes on it while we are reading.
4065 mutex_lock(&trace_types_lock);
4066 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
4070 *iter->trace = *tr->current_trace;
4072 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
4077 #ifdef CONFIG_TRACER_MAX_TRACE
4078 /* Currently only the top directory has a snapshot */
4079 if (tr->current_trace->print_max || snapshot)
4080 iter->trace_buffer = &tr->max_buffer;
4083 iter->trace_buffer = &tr->trace_buffer;
4084 iter->snapshot = snapshot;
4086 iter->cpu_file = tracing_get_cpu(inode);
4087 mutex_init(&iter->mutex);
4089 /* Notify the tracer early; before we stop tracing. */
4090 if (iter->trace && iter->trace->open)
4091 iter->trace->open(iter);
4093 /* Annotate start of buffers if we had overruns */
4094 if (ring_buffer_overruns(iter->trace_buffer->buffer))
4095 iter->iter_flags |= TRACE_FILE_ANNOTATE;
4097 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
4098 if (trace_clocks[tr->clock_id].in_ns)
4099 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4101 /* stop the trace while dumping if we are not opening "snapshot" */
4102 if (!iter->snapshot)
4103 tracing_stop_tr(tr);
4105 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
4106 for_each_tracing_cpu(cpu) {
4107 iter->buffer_iter[cpu] =
4108 ring_buffer_read_prepare(iter->trace_buffer->buffer,
4111 ring_buffer_read_prepare_sync();
4112 for_each_tracing_cpu(cpu) {
4113 ring_buffer_read_start(iter->buffer_iter[cpu]);
4114 tracing_iter_reset(iter, cpu);
4117 cpu = iter->cpu_file;
4118 iter->buffer_iter[cpu] =
4119 ring_buffer_read_prepare(iter->trace_buffer->buffer,
4121 ring_buffer_read_prepare_sync();
4122 ring_buffer_read_start(iter->buffer_iter[cpu]);
4123 tracing_iter_reset(iter, cpu);
4126 mutex_unlock(&trace_types_lock);
4131 mutex_unlock(&trace_types_lock);
4133 kfree(iter->buffer_iter);
4135 seq_release_private(inode, file);
4136 return ERR_PTR(-ENOMEM);
4139 int tracing_open_generic(struct inode *inode, struct file *filp)
4141 if (tracing_disabled)
4144 filp->private_data = inode->i_private;
4148 bool tracing_is_disabled(void)
4150 return (tracing_disabled) ? true: false;
4154 * Open and update trace_array ref count.
4155 * Must have the current trace_array passed to it.
4157 static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
4159 struct trace_array *tr = inode->i_private;
4161 if (tracing_disabled)
4164 if (trace_array_get(tr) < 0)
4167 filp->private_data = inode->i_private;
4172 static int tracing_release(struct inode *inode, struct file *file)
4174 struct trace_array *tr = inode->i_private;
4175 struct seq_file *m = file->private_data;
4176 struct trace_iterator *iter;
4179 if (!(file->f_mode & FMODE_READ)) {
4180 trace_array_put(tr);
4184 /* Writes do not use seq_file */
4186 mutex_lock(&trace_types_lock);
4188 for_each_tracing_cpu(cpu) {
4189 if (iter->buffer_iter[cpu])
4190 ring_buffer_read_finish(iter->buffer_iter[cpu]);
4193 if (iter->trace && iter->trace->close)
4194 iter->trace->close(iter);
4196 if (!iter->snapshot)
4197 /* reenable tracing if it was previously enabled */
4198 tracing_start_tr(tr);
4200 __trace_array_put(tr);
4202 mutex_unlock(&trace_types_lock);
4204 mutex_destroy(&iter->mutex);
4205 free_cpumask_var(iter->started);
4207 kfree(iter->buffer_iter);
4208 seq_release_private(inode, file);
4213 static int tracing_release_generic_tr(struct inode *inode, struct file *file)
4215 struct trace_array *tr = inode->i_private;
4217 trace_array_put(tr);
4221 static int tracing_single_release_tr(struct inode *inode, struct file *file)
4223 struct trace_array *tr = inode->i_private;
4225 trace_array_put(tr);
4227 return single_release(inode, file);
4230 static int tracing_open(struct inode *inode, struct file *file)
4232 struct trace_array *tr = inode->i_private;
4233 struct trace_iterator *iter;
4236 if (trace_array_get(tr) < 0)
4239 /* If this file was open for write, then erase contents */
4240 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
4241 int cpu = tracing_get_cpu(inode);
4242 struct trace_buffer *trace_buf = &tr->trace_buffer;
4244 #ifdef CONFIG_TRACER_MAX_TRACE
4245 if (tr->current_trace->print_max)
4246 trace_buf = &tr->max_buffer;
4249 if (cpu == RING_BUFFER_ALL_CPUS)
4250 tracing_reset_online_cpus(trace_buf);
4252 tracing_reset(trace_buf, cpu);
4255 if (file->f_mode & FMODE_READ) {
4256 iter = __tracing_open(inode, file, false);
4258 ret = PTR_ERR(iter);
4259 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
4260 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4264 trace_array_put(tr);
4270 * Some tracers are not suitable for instance buffers.
4271 * A tracer is always available for the global array (toplevel)
4272 * or if it explicitly states that it is.
4275 trace_ok_for_array(struct tracer *t, struct trace_array *tr)
4277 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
4280 /* Find the next tracer that this trace array may use */
4281 static struct tracer *
4282 get_tracer_for_array(struct trace_array *tr, struct tracer *t)
4284 while (t && !trace_ok_for_array(t, tr))
4291 t_next(struct seq_file *m, void *v, loff_t *pos)
4293 struct trace_array *tr = m->private;
4294 struct tracer *t = v;
4299 t = get_tracer_for_array(tr, t->next);
4304 static void *t_start(struct seq_file *m, loff_t *pos)
4306 struct trace_array *tr = m->private;
4310 mutex_lock(&trace_types_lock);
4312 t = get_tracer_for_array(tr, trace_types);
4313 for (; t && l < *pos; t = t_next(m, t, &l))
4319 static void t_stop(struct seq_file *m, void *p)
4321 mutex_unlock(&trace_types_lock);
4324 static int t_show(struct seq_file *m, void *v)
4326 struct tracer *t = v;
4331 seq_puts(m, t->name);
4340 static const struct seq_operations show_traces_seq_ops = {
4347 static int show_traces_open(struct inode *inode, struct file *file)
4349 struct trace_array *tr = inode->i_private;
4353 if (tracing_disabled)
4356 ret = seq_open(file, &show_traces_seq_ops);
4360 m = file->private_data;
4367 tracing_write_stub(struct file *filp, const char __user *ubuf,
4368 size_t count, loff_t *ppos)
4373 loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
4377 if (file->f_mode & FMODE_READ)
4378 ret = seq_lseek(file, offset, whence);
4380 file->f_pos = ret = 0;
4385 static const struct file_operations tracing_fops = {
4386 .open = tracing_open,
4388 .write = tracing_write_stub,
4389 .llseek = tracing_lseek,
4390 .release = tracing_release,
4393 static const struct file_operations show_traces_fops = {
4394 .open = show_traces_open,
4396 .release = seq_release,
4397 .llseek = seq_lseek,
4401 tracing_cpumask_read(struct file *filp, char __user *ubuf,
4402 size_t count, loff_t *ppos)
4404 struct trace_array *tr = file_inode(filp)->i_private;
4408 len = snprintf(NULL, 0, "%*pb\n",
4409 cpumask_pr_args(tr->tracing_cpumask)) + 1;
4410 mask_str = kmalloc(len, GFP_KERNEL);
4414 len = snprintf(mask_str, len, "%*pb\n",
4415 cpumask_pr_args(tr->tracing_cpumask));
4420 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
4429 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
4430 size_t count, loff_t *ppos)
4432 struct trace_array *tr = file_inode(filp)->i_private;
4433 cpumask_var_t tracing_cpumask_new;
4436 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
4439 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
4443 local_irq_disable();
4444 arch_spin_lock(&tr->max_lock);
4445 for_each_tracing_cpu(cpu) {
4447 * Increase/decrease the disabled counter if we are
4448 * about to flip a bit in the cpumask:
4450 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
4451 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
4452 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
4453 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
4455 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
4456 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
4457 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
4458 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
4461 arch_spin_unlock(&tr->max_lock);
4464 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
4465 free_cpumask_var(tracing_cpumask_new);
4470 free_cpumask_var(tracing_cpumask_new);
4475 static const struct file_operations tracing_cpumask_fops = {
4476 .open = tracing_open_generic_tr,
4477 .read = tracing_cpumask_read,
4478 .write = tracing_cpumask_write,
4479 .release = tracing_release_generic_tr,
4480 .llseek = generic_file_llseek,
4483 static int tracing_trace_options_show(struct seq_file *m, void *v)
4485 struct tracer_opt *trace_opts;
4486 struct trace_array *tr = m->private;
4490 mutex_lock(&trace_types_lock);
4491 tracer_flags = tr->current_trace->flags->val;
4492 trace_opts = tr->current_trace->flags->opts;
4494 for (i = 0; trace_options[i]; i++) {
4495 if (tr->trace_flags & (1 << i))
4496 seq_printf(m, "%s\n", trace_options[i]);
4498 seq_printf(m, "no%s\n", trace_options[i]);
4501 for (i = 0; trace_opts[i].name; i++) {
4502 if (tracer_flags & trace_opts[i].bit)
4503 seq_printf(m, "%s\n", trace_opts[i].name);
4505 seq_printf(m, "no%s\n", trace_opts[i].name);
4507 mutex_unlock(&trace_types_lock);
4512 static int __set_tracer_option(struct trace_array *tr,
4513 struct tracer_flags *tracer_flags,
4514 struct tracer_opt *opts, int neg)
4516 struct tracer *trace = tracer_flags->trace;
4519 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
4524 tracer_flags->val &= ~opts->bit;
4526 tracer_flags->val |= opts->bit;
4530 /* Try to assign a tracer specific option */
4531 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
4533 struct tracer *trace = tr->current_trace;
4534 struct tracer_flags *tracer_flags = trace->flags;
4535 struct tracer_opt *opts = NULL;
4538 for (i = 0; tracer_flags->opts[i].name; i++) {
4539 opts = &tracer_flags->opts[i];
4541 if (strcmp(cmp, opts->name) == 0)
4542 return __set_tracer_option(tr, trace->flags, opts, neg);
4548 /* Some tracers require overwrite to stay enabled */
4549 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
4551 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
4557 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
4559 /* do nothing if flag is already set */
4560 if (!!(tr->trace_flags & mask) == !!enabled)
4563 /* Give the tracer a chance to approve the change */
4564 if (tr->current_trace->flag_changed)
4565 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
4569 tr->trace_flags |= mask;
4571 tr->trace_flags &= ~mask;
4573 if (mask == TRACE_ITER_RECORD_CMD)
4574 trace_event_enable_cmd_record(enabled);
4576 if (mask == TRACE_ITER_RECORD_TGID) {
4578 tgid_map = kcalloc(PID_MAX_DEFAULT + 1,
4582 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
4586 trace_event_enable_tgid_record(enabled);
4589 if (mask == TRACE_ITER_EVENT_FORK)
4590 trace_event_follow_fork(tr, enabled);
4592 if (mask == TRACE_ITER_FUNC_FORK)
4593 ftrace_pid_follow_fork(tr, enabled);
4595 if (mask == TRACE_ITER_OVERWRITE) {
4596 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
4597 #ifdef CONFIG_TRACER_MAX_TRACE
4598 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
4602 if (mask == TRACE_ITER_PRINTK) {
4603 trace_printk_start_stop_comm(enabled);
4604 trace_printk_control(enabled);
4610 static int trace_set_options(struct trace_array *tr, char *option)
4615 size_t orig_len = strlen(option);
4618 cmp = strstrip(option);
4620 len = str_has_prefix(cmp, "no");
4626 mutex_lock(&trace_types_lock);
4628 ret = match_string(trace_options, -1, cmp);
4629 /* If no option could be set, test the specific tracer options */
4631 ret = set_tracer_option(tr, cmp, neg);
4633 ret = set_tracer_flag(tr, 1 << ret, !neg);
4635 mutex_unlock(&trace_types_lock);
4638 * If the first trailing whitespace is replaced with '\0' by strstrip,
4639 * turn it back into a space.
4641 if (orig_len > strlen(option))
4642 option[strlen(option)] = ' ';
4647 static void __init apply_trace_boot_options(void)
4649 char *buf = trace_boot_options_buf;
4653 option = strsep(&buf, ",");
4659 trace_set_options(&global_trace, option);
4661 /* Put back the comma to allow this to be called again */
4668 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
4669 size_t cnt, loff_t *ppos)
4671 struct seq_file *m = filp->private_data;
4672 struct trace_array *tr = m->private;
4676 if (cnt >= sizeof(buf))
4679 if (copy_from_user(buf, ubuf, cnt))
4684 ret = trace_set_options(tr, buf);
4693 static int tracing_trace_options_open(struct inode *inode, struct file *file)
4695 struct trace_array *tr = inode->i_private;
4698 if (tracing_disabled)
4701 if (trace_array_get(tr) < 0)
4704 ret = single_open(file, tracing_trace_options_show, inode->i_private);
4706 trace_array_put(tr);
4711 static const struct file_operations tracing_iter_fops = {
4712 .open = tracing_trace_options_open,
4714 .llseek = seq_lseek,
4715 .release = tracing_single_release_tr,
4716 .write = tracing_trace_options_write,
4719 static const char readme_msg[] =
4720 "tracing mini-HOWTO:\n\n"
4721 "# echo 0 > tracing_on : quick way to disable tracing\n"
4722 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
4723 " Important files:\n"
4724 " trace\t\t\t- The static contents of the buffer\n"
4725 "\t\t\t To clear the buffer write into this file: echo > trace\n"
4726 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
4727 " current_tracer\t- function and latency tracers\n"
4728 " available_tracers\t- list of configured tracers for current_tracer\n"
4729 " error_log\t- error log for failed commands (that support it)\n"
4730 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
4731 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
4732 " trace_clock\t\t-change the clock used to order events\n"
4733 " local: Per cpu clock but may not be synced across CPUs\n"
4734 " global: Synced across CPUs but slows tracing down.\n"
4735 " counter: Not a clock, but just an increment\n"
4736 " uptime: Jiffy counter from time of boot\n"
4737 " perf: Same clock that perf events use\n"
4738 #ifdef CONFIG_X86_64
4739 " x86-tsc: TSC cycle counter\n"
4741 "\n timestamp_mode\t-view the mode used to timestamp events\n"
4742 " delta: Delta difference against a buffer-wide timestamp\n"
4743 " absolute: Absolute (standalone) timestamp\n"
4744 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
4745 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
4746 " tracing_cpumask\t- Limit which CPUs to trace\n"
4747 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
4748 "\t\t\t Remove sub-buffer with rmdir\n"
4749 " trace_options\t\t- Set format or modify how tracing happens\n"
4750 "\t\t\t Disable an option by prefixing 'no' to the\n"
4751 "\t\t\t option name\n"
4752 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
4753 #ifdef CONFIG_DYNAMIC_FTRACE
4754 "\n available_filter_functions - list of functions that can be filtered on\n"
4755 " set_ftrace_filter\t- echo function name in here to only trace these\n"
4756 "\t\t\t functions\n"
4757 "\t accepts: func_full_name or glob-matching-pattern\n"
4758 "\t modules: Can select a group via module\n"
4759 "\t Format: :mod:<module-name>\n"
4760 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
4761 "\t triggers: a command to perform when function is hit\n"
4762 "\t Format: <function>:<trigger>[:count]\n"
4763 "\t trigger: traceon, traceoff\n"
4764 "\t\t enable_event:<system>:<event>\n"
4765 "\t\t disable_event:<system>:<event>\n"
4766 #ifdef CONFIG_STACKTRACE
4769 #ifdef CONFIG_TRACER_SNAPSHOT
4774 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
4775 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
4776 "\t The first one will disable tracing every time do_fault is hit\n"
4777 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
4778 "\t The first time do trap is hit and it disables tracing, the\n"
4779 "\t counter will decrement to 2. If tracing is already disabled,\n"
4780 "\t the counter will not decrement. It only decrements when the\n"
4781 "\t trigger did work\n"
4782 "\t To remove trigger without count:\n"
4783 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
4784 "\t To remove trigger with a count:\n"
4785 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
4786 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
4787 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
4788 "\t modules: Can select a group via module command :mod:\n"
4789 "\t Does not accept triggers\n"
4790 #endif /* CONFIG_DYNAMIC_FTRACE */
4791 #ifdef CONFIG_FUNCTION_TRACER
4792 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
4795 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4796 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
4797 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
4798 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
4800 #ifdef CONFIG_TRACER_SNAPSHOT
4801 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
4802 "\t\t\t snapshot buffer. Read the contents for more\n"
4803 "\t\t\t information\n"
4805 #ifdef CONFIG_STACK_TRACER
4806 " stack_trace\t\t- Shows the max stack trace when active\n"
4807 " stack_max_size\t- Shows current max stack size that was traced\n"
4808 "\t\t\t Write into this file to reset the max size (trigger a\n"
4809 "\t\t\t new trace)\n"
4810 #ifdef CONFIG_DYNAMIC_FTRACE
4811 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
4814 #endif /* CONFIG_STACK_TRACER */
4815 #ifdef CONFIG_DYNAMIC_EVENTS
4816 " dynamic_events\t\t- Add/remove/show the generic dynamic events\n"
4817 "\t\t\t Write into this file to define/undefine new trace events.\n"
4819 #ifdef CONFIG_KPROBE_EVENTS
4820 " kprobe_events\t\t- Add/remove/show the kernel dynamic events\n"
4821 "\t\t\t Write into this file to define/undefine new trace events.\n"
4823 #ifdef CONFIG_UPROBE_EVENTS
4824 " uprobe_events\t\t- Add/remove/show the userspace dynamic events\n"
4825 "\t\t\t Write into this file to define/undefine new trace events.\n"
4827 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
4828 "\t accepts: event-definitions (one definition per line)\n"
4829 "\t Format: p[:[<group>/]<event>] <place> [<args>]\n"
4830 "\t r[maxactive][:[<group>/]<event>] <place> [<args>]\n"
4831 #ifdef CONFIG_HIST_TRIGGERS
4832 "\t s:[synthetic/]<event> <field> [<field>]\n"
4834 "\t -:[<group>/]<event>\n"
4835 #ifdef CONFIG_KPROBE_EVENTS
4836 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
4837 "place (kretprobe): [<module>:]<symbol>[+<offset>]|<memaddr>\n"
4839 #ifdef CONFIG_UPROBE_EVENTS
4840 " place (uprobe): <path>:<offset>[(ref_ctr_offset)]\n"
4842 "\t args: <name>=fetcharg[:type]\n"
4843 "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n"
4844 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
4845 "\t $stack<index>, $stack, $retval, $comm, $arg<N>,\n"
4847 "\t $stack<index>, $stack, $retval, $comm,\n"
4849 "\t +|-[u]<offset>(<fetcharg>)\n"
4850 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string, symbol,\n"
4851 "\t b<bit-width>@<bit-offset>/<container-size>, ustring,\n"
4852 "\t <type>\\[<array-size>\\]\n"
4853 #ifdef CONFIG_HIST_TRIGGERS
4854 "\t field: <stype> <name>;\n"
4855 "\t stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n"
4856 "\t [unsigned] char/int/long\n"
4859 " events/\t\t- Directory containing all trace event subsystems:\n"
4860 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
4861 " events/<system>/\t- Directory containing all trace events for <system>:\n"
4862 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
4864 " filter\t\t- If set, only events passing filter are traced\n"
4865 " events/<system>/<event>/\t- Directory containing control files for\n"
4867 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
4868 " filter\t\t- If set, only events passing filter are traced\n"
4869 " trigger\t\t- If set, a command to perform when event is hit\n"
4870 "\t Format: <trigger>[:count][if <filter>]\n"
4871 "\t trigger: traceon, traceoff\n"
4872 "\t enable_event:<system>:<event>\n"
4873 "\t disable_event:<system>:<event>\n"
4874 #ifdef CONFIG_HIST_TRIGGERS
4875 "\t enable_hist:<system>:<event>\n"
4876 "\t disable_hist:<system>:<event>\n"
4878 #ifdef CONFIG_STACKTRACE
4881 #ifdef CONFIG_TRACER_SNAPSHOT
4884 #ifdef CONFIG_HIST_TRIGGERS
4885 "\t\t hist (see below)\n"
4887 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
4888 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
4889 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
4890 "\t events/block/block_unplug/trigger\n"
4891 "\t The first disables tracing every time block_unplug is hit.\n"
4892 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
4893 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
4894 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
4895 "\t Like function triggers, the counter is only decremented if it\n"
4896 "\t enabled or disabled tracing.\n"
4897 "\t To remove a trigger without a count:\n"
4898 "\t echo '!<trigger> > <system>/<event>/trigger\n"
4899 "\t To remove a trigger with a count:\n"
4900 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
4901 "\t Filters can be ignored when removing a trigger.\n"
4902 #ifdef CONFIG_HIST_TRIGGERS
4903 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
4904 "\t Format: hist:keys=<field1[,field2,...]>\n"
4905 "\t [:values=<field1[,field2,...]>]\n"
4906 "\t [:sort=<field1[,field2,...]>]\n"
4907 "\t [:size=#entries]\n"
4908 "\t [:pause][:continue][:clear]\n"
4909 "\t [:name=histname1]\n"
4910 "\t [:<handler>.<action>]\n"
4911 "\t [if <filter>]\n\n"
4912 "\t When a matching event is hit, an entry is added to a hash\n"
4913 "\t table using the key(s) and value(s) named, and the value of a\n"
4914 "\t sum called 'hitcount' is incremented. Keys and values\n"
4915 "\t correspond to fields in the event's format description. Keys\n"
4916 "\t can be any field, or the special string 'stacktrace'.\n"
4917 "\t Compound keys consisting of up to two fields can be specified\n"
4918 "\t by the 'keys' keyword. Values must correspond to numeric\n"
4919 "\t fields. Sort keys consisting of up to two fields can be\n"
4920 "\t specified using the 'sort' keyword. The sort direction can\n"
4921 "\t be modified by appending '.descending' or '.ascending' to a\n"
4922 "\t sort field. The 'size' parameter can be used to specify more\n"
4923 "\t or fewer than the default 2048 entries for the hashtable size.\n"
4924 "\t If a hist trigger is given a name using the 'name' parameter,\n"
4925 "\t its histogram data will be shared with other triggers of the\n"
4926 "\t same name, and trigger hits will update this common data.\n\n"
4927 "\t Reading the 'hist' file for the event will dump the hash\n"
4928 "\t table in its entirety to stdout. If there are multiple hist\n"
4929 "\t triggers attached to an event, there will be a table for each\n"
4930 "\t trigger in the output. The table displayed for a named\n"
4931 "\t trigger will be the same as any other instance having the\n"
4932 "\t same name. The default format used to display a given field\n"
4933 "\t can be modified by appending any of the following modifiers\n"
4934 "\t to the field name, as applicable:\n\n"
4935 "\t .hex display a number as a hex value\n"
4936 "\t .sym display an address as a symbol\n"
4937 "\t .sym-offset display an address as a symbol and offset\n"
4938 "\t .execname display a common_pid as a program name\n"
4939 "\t .syscall display a syscall id as a syscall name\n"
4940 "\t .log2 display log2 value rather than raw number\n"
4941 "\t .usecs display a common_timestamp in microseconds\n\n"
4942 "\t The 'pause' parameter can be used to pause an existing hist\n"
4943 "\t trigger or to start a hist trigger but not log any events\n"
4944 "\t until told to do so. 'continue' can be used to start or\n"
4945 "\t restart a paused hist trigger.\n\n"
4946 "\t The 'clear' parameter will clear the contents of a running\n"
4947 "\t hist trigger and leave its current paused/active state\n"
4949 "\t The enable_hist and disable_hist triggers can be used to\n"
4950 "\t have one event conditionally start and stop another event's\n"
4951 "\t already-attached hist trigger. The syntax is analogous to\n"
4952 "\t the enable_event and disable_event triggers.\n\n"
4953 "\t Hist trigger handlers and actions are executed whenever a\n"
4954 "\t a histogram entry is added or updated. They take the form:\n\n"
4955 "\t <handler>.<action>\n\n"
4956 "\t The available handlers are:\n\n"
4957 "\t onmatch(matching.event) - invoke on addition or update\n"
4958 "\t onmax(var) - invoke if var exceeds current max\n"
4959 "\t onchange(var) - invoke action if var changes\n\n"
4960 "\t The available actions are:\n\n"
4961 "\t trace(<synthetic_event>,param list) - generate synthetic event\n"
4962 "\t save(field,...) - save current event fields\n"
4963 #ifdef CONFIG_TRACER_SNAPSHOT
4964 "\t snapshot() - snapshot the trace buffer\n"
4970 tracing_readme_read(struct file *filp, char __user *ubuf,
4971 size_t cnt, loff_t *ppos)
4973 return simple_read_from_buffer(ubuf, cnt, ppos,
4974 readme_msg, strlen(readme_msg));
4977 static const struct file_operations tracing_readme_fops = {
4978 .open = tracing_open_generic,
4979 .read = tracing_readme_read,
4980 .llseek = generic_file_llseek,
4983 static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
4987 if (*pos || m->count)
4992 for (; ptr <= &tgid_map[PID_MAX_DEFAULT]; ptr++) {
4993 if (trace_find_tgid(*ptr))
5000 static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
5010 v = saved_tgids_next(m, v, &l);
5018 static void saved_tgids_stop(struct seq_file *m, void *v)
5022 static int saved_tgids_show(struct seq_file *m, void *v)
5024 int pid = (int *)v - tgid_map;
5026 seq_printf(m, "%d %d\n", pid, trace_find_tgid(pid));
5030 static const struct seq_operations tracing_saved_tgids_seq_ops = {
5031 .start = saved_tgids_start,
5032 .stop = saved_tgids_stop,
5033 .next = saved_tgids_next,
5034 .show = saved_tgids_show,
5037 static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
5039 if (tracing_disabled)
5042 return seq_open(filp, &tracing_saved_tgids_seq_ops);
5046 static const struct file_operations tracing_saved_tgids_fops = {
5047 .open = tracing_saved_tgids_open,
5049 .llseek = seq_lseek,
5050 .release = seq_release,
5053 static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
5055 unsigned int *ptr = v;
5057 if (*pos || m->count)
5062 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
5064 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
5073 static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
5079 arch_spin_lock(&trace_cmdline_lock);
5081 v = &savedcmd->map_cmdline_to_pid[0];
5083 v = saved_cmdlines_next(m, v, &l);
5091 static void saved_cmdlines_stop(struct seq_file *m, void *v)
5093 arch_spin_unlock(&trace_cmdline_lock);
5097 static int saved_cmdlines_show(struct seq_file *m, void *v)
5099 char buf[TASK_COMM_LEN];
5100 unsigned int *pid = v;
5102 __trace_find_cmdline(*pid, buf);
5103 seq_printf(m, "%d %s\n", *pid, buf);
5107 static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
5108 .start = saved_cmdlines_start,
5109 .next = saved_cmdlines_next,
5110 .stop = saved_cmdlines_stop,
5111 .show = saved_cmdlines_show,
5114 static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
5116 if (tracing_disabled)
5119 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
5122 static const struct file_operations tracing_saved_cmdlines_fops = {
5123 .open = tracing_saved_cmdlines_open,
5125 .llseek = seq_lseek,
5126 .release = seq_release,
5130 tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
5131 size_t cnt, loff_t *ppos)
5136 arch_spin_lock(&trace_cmdline_lock);
5137 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
5138 arch_spin_unlock(&trace_cmdline_lock);
5140 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5143 static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
5145 kfree(s->saved_cmdlines);
5146 kfree(s->map_cmdline_to_pid);
5150 static int tracing_resize_saved_cmdlines(unsigned int val)
5152 struct saved_cmdlines_buffer *s, *savedcmd_temp;
5154 s = kmalloc(sizeof(*s), GFP_KERNEL);
5158 if (allocate_cmdlines_buffer(val, s) < 0) {
5163 arch_spin_lock(&trace_cmdline_lock);
5164 savedcmd_temp = savedcmd;
5166 arch_spin_unlock(&trace_cmdline_lock);
5167 free_saved_cmdlines_buffer(savedcmd_temp);
5173 tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
5174 size_t cnt, loff_t *ppos)
5179 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5183 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
5184 if (!val || val > PID_MAX_DEFAULT)
5187 ret = tracing_resize_saved_cmdlines((unsigned int)val);
5196 static const struct file_operations tracing_saved_cmdlines_size_fops = {
5197 .open = tracing_open_generic,
5198 .read = tracing_saved_cmdlines_size_read,
5199 .write = tracing_saved_cmdlines_size_write,
5202 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
5203 static union trace_eval_map_item *
5204 update_eval_map(union trace_eval_map_item *ptr)
5206 if (!ptr->map.eval_string) {
5207 if (ptr->tail.next) {
5208 ptr = ptr->tail.next;
5209 /* Set ptr to the next real item (skip head) */
5217 static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
5219 union trace_eval_map_item *ptr = v;
5222 * Paranoid! If ptr points to end, we don't want to increment past it.
5223 * This really should never happen.
5225 ptr = update_eval_map(ptr);
5226 if (WARN_ON_ONCE(!ptr))
5233 ptr = update_eval_map(ptr);
5238 static void *eval_map_start(struct seq_file *m, loff_t *pos)
5240 union trace_eval_map_item *v;
5243 mutex_lock(&trace_eval_mutex);
5245 v = trace_eval_maps;
5249 while (v && l < *pos) {
5250 v = eval_map_next(m, v, &l);
5256 static void eval_map_stop(struct seq_file *m, void *v)
5258 mutex_unlock(&trace_eval_mutex);
5261 static int eval_map_show(struct seq_file *m, void *v)
5263 union trace_eval_map_item *ptr = v;
5265 seq_printf(m, "%s %ld (%s)\n",
5266 ptr->map.eval_string, ptr->map.eval_value,
5272 static const struct seq_operations tracing_eval_map_seq_ops = {
5273 .start = eval_map_start,
5274 .next = eval_map_next,
5275 .stop = eval_map_stop,
5276 .show = eval_map_show,
5279 static int tracing_eval_map_open(struct inode *inode, struct file *filp)
5281 if (tracing_disabled)
5284 return seq_open(filp, &tracing_eval_map_seq_ops);
5287 static const struct file_operations tracing_eval_map_fops = {
5288 .open = tracing_eval_map_open,
5290 .llseek = seq_lseek,
5291 .release = seq_release,
5294 static inline union trace_eval_map_item *
5295 trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
5297 /* Return tail of array given the head */
5298 return ptr + ptr->head.length + 1;
5302 trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
5305 struct trace_eval_map **stop;
5306 struct trace_eval_map **map;
5307 union trace_eval_map_item *map_array;
5308 union trace_eval_map_item *ptr;
5313 * The trace_eval_maps contains the map plus a head and tail item,
5314 * where the head holds the module and length of array, and the
5315 * tail holds a pointer to the next list.
5317 map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
5319 pr_warn("Unable to allocate trace eval mapping\n");
5323 mutex_lock(&trace_eval_mutex);
5325 if (!trace_eval_maps)
5326 trace_eval_maps = map_array;
5328 ptr = trace_eval_maps;
5330 ptr = trace_eval_jmp_to_tail(ptr);
5331 if (!ptr->tail.next)
5333 ptr = ptr->tail.next;
5336 ptr->tail.next = map_array;
5338 map_array->head.mod = mod;
5339 map_array->head.length = len;
5342 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
5343 map_array->map = **map;
5346 memset(map_array, 0, sizeof(*map_array));
5348 mutex_unlock(&trace_eval_mutex);
5351 static void trace_create_eval_file(struct dentry *d_tracer)
5353 trace_create_file("eval_map", 0444, d_tracer,
5354 NULL, &tracing_eval_map_fops);
5357 #else /* CONFIG_TRACE_EVAL_MAP_FILE */
5358 static inline void trace_create_eval_file(struct dentry *d_tracer) { }
5359 static inline void trace_insert_eval_map_file(struct module *mod,
5360 struct trace_eval_map **start, int len) { }
5361 #endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
5363 static void trace_insert_eval_map(struct module *mod,
5364 struct trace_eval_map **start, int len)
5366 struct trace_eval_map **map;
5373 trace_event_eval_update(map, len);
5375 trace_insert_eval_map_file(mod, start, len);
5379 tracing_set_trace_read(struct file *filp, char __user *ubuf,
5380 size_t cnt, loff_t *ppos)
5382 struct trace_array *tr = filp->private_data;
5383 char buf[MAX_TRACER_SIZE+2];
5386 mutex_lock(&trace_types_lock);
5387 r = sprintf(buf, "%s\n", tr->current_trace->name);
5388 mutex_unlock(&trace_types_lock);
5390 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5393 int tracer_init(struct tracer *t, struct trace_array *tr)
5395 tracing_reset_online_cpus(&tr->trace_buffer);
5399 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
5403 for_each_tracing_cpu(cpu)
5404 per_cpu_ptr(buf->data, cpu)->entries = val;
5407 #ifdef CONFIG_TRACER_MAX_TRACE
5408 /* resize @tr's buffer to the size of @size_tr's entries */
5409 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
5410 struct trace_buffer *size_buf, int cpu_id)
5414 if (cpu_id == RING_BUFFER_ALL_CPUS) {
5415 for_each_tracing_cpu(cpu) {
5416 ret = ring_buffer_resize(trace_buf->buffer,
5417 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
5420 per_cpu_ptr(trace_buf->data, cpu)->entries =
5421 per_cpu_ptr(size_buf->data, cpu)->entries;
5424 ret = ring_buffer_resize(trace_buf->buffer,
5425 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
5427 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
5428 per_cpu_ptr(size_buf->data, cpu_id)->entries;
5433 #endif /* CONFIG_TRACER_MAX_TRACE */
5435 static int __tracing_resize_ring_buffer(struct trace_array *tr,
5436 unsigned long size, int cpu)
5441 * If kernel or user changes the size of the ring buffer
5442 * we use the size that was given, and we can forget about
5443 * expanding it later.
5445 ring_buffer_expanded = true;
5447 /* May be called before buffers are initialized */
5448 if (!tr->trace_buffer.buffer)
5451 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
5455 #ifdef CONFIG_TRACER_MAX_TRACE
5456 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
5457 !tr->current_trace->use_max_tr)
5460 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
5462 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
5463 &tr->trace_buffer, cpu);
5466 * AARGH! We are left with different
5467 * size max buffer!!!!
5468 * The max buffer is our "snapshot" buffer.
5469 * When a tracer needs a snapshot (one of the
5470 * latency tracers), it swaps the max buffer
5471 * with the saved snap shot. We succeeded to
5472 * update the size of the main buffer, but failed to
5473 * update the size of the max buffer. But when we tried
5474 * to reset the main buffer to the original size, we
5475 * failed there too. This is very unlikely to
5476 * happen, but if it does, warn and kill all
5480 tracing_disabled = 1;
5485 if (cpu == RING_BUFFER_ALL_CPUS)
5486 set_buffer_entries(&tr->max_buffer, size);
5488 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
5491 #endif /* CONFIG_TRACER_MAX_TRACE */
5493 if (cpu == RING_BUFFER_ALL_CPUS)
5494 set_buffer_entries(&tr->trace_buffer, size);
5496 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
5501 static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
5502 unsigned long size, int cpu_id)
5506 mutex_lock(&trace_types_lock);
5508 if (cpu_id != RING_BUFFER_ALL_CPUS) {
5509 /* make sure, this cpu is enabled in the mask */
5510 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
5516 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
5521 mutex_unlock(&trace_types_lock);
5528 * tracing_update_buffers - used by tracing facility to expand ring buffers
5530 * To save on memory when the tracing is never used on a system with it
5531 * configured in. The ring buffers are set to a minimum size. But once
5532 * a user starts to use the tracing facility, then they need to grow
5533 * to their default size.
5535 * This function is to be called when a tracer is about to be used.
5537 int tracing_update_buffers(void)
5541 mutex_lock(&trace_types_lock);
5542 if (!ring_buffer_expanded)
5543 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
5544 RING_BUFFER_ALL_CPUS);
5545 mutex_unlock(&trace_types_lock);
5550 struct trace_option_dentry;
5553 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
5556 * Used to clear out the tracer before deletion of an instance.
5557 * Must have trace_types_lock held.
5559 static void tracing_set_nop(struct trace_array *tr)
5561 if (tr->current_trace == &nop_trace)
5564 tr->current_trace->enabled--;
5566 if (tr->current_trace->reset)
5567 tr->current_trace->reset(tr);
5569 tr->current_trace = &nop_trace;
5572 static void add_tracer_options(struct trace_array *tr, struct tracer *t)
5574 /* Only enable if the directory has been created already. */
5578 create_trace_option_files(tr, t);
5581 static int tracing_set_tracer(struct trace_array *tr, const char *buf)
5584 #ifdef CONFIG_TRACER_MAX_TRACE
5589 mutex_lock(&trace_types_lock);
5591 if (!ring_buffer_expanded) {
5592 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
5593 RING_BUFFER_ALL_CPUS);
5599 for (t = trace_types; t; t = t->next) {
5600 if (strcmp(t->name, buf) == 0)
5607 if (t == tr->current_trace)
5610 #ifdef CONFIG_TRACER_SNAPSHOT
5611 if (t->use_max_tr) {
5612 arch_spin_lock(&tr->max_lock);
5613 if (tr->cond_snapshot)
5615 arch_spin_unlock(&tr->max_lock);
5620 /* Some tracers won't work on kernel command line */
5621 if (system_state < SYSTEM_RUNNING && t->noboot) {
5622 pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
5627 /* Some tracers are only allowed for the top level buffer */
5628 if (!trace_ok_for_array(t, tr)) {
5633 /* If trace pipe files are being read, we can't change the tracer */
5634 if (tr->current_trace->ref) {
5639 trace_branch_disable();
5641 tr->current_trace->enabled--;
5643 if (tr->current_trace->reset)
5644 tr->current_trace->reset(tr);
5646 /* Current trace needs to be nop_trace before synchronize_rcu */
5647 tr->current_trace = &nop_trace;
5649 #ifdef CONFIG_TRACER_MAX_TRACE
5650 had_max_tr = tr->allocated_snapshot;
5652 if (had_max_tr && !t->use_max_tr) {
5654 * We need to make sure that the update_max_tr sees that
5655 * current_trace changed to nop_trace to keep it from
5656 * swapping the buffers after we resize it.
5657 * The update_max_tr is called from interrupts disabled
5658 * so a synchronized_sched() is sufficient.
5665 #ifdef CONFIG_TRACER_MAX_TRACE
5666 if (t->use_max_tr && !had_max_tr) {
5667 ret = tracing_alloc_snapshot_instance(tr);
5674 ret = tracer_init(t, tr);
5679 tr->current_trace = t;
5680 tr->current_trace->enabled++;
5681 trace_branch_enable(tr);
5683 mutex_unlock(&trace_types_lock);
5689 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
5690 size_t cnt, loff_t *ppos)
5692 struct trace_array *tr = filp->private_data;
5693 char buf[MAX_TRACER_SIZE+1];
5700 if (cnt > MAX_TRACER_SIZE)
5701 cnt = MAX_TRACER_SIZE;
5703 if (copy_from_user(buf, ubuf, cnt))
5708 /* strip ending whitespace. */
5709 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
5712 err = tracing_set_tracer(tr, buf);
5722 tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
5723 size_t cnt, loff_t *ppos)
5728 r = snprintf(buf, sizeof(buf), "%ld\n",
5729 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
5730 if (r > sizeof(buf))
5732 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5736 tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
5737 size_t cnt, loff_t *ppos)
5742 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5752 tracing_thresh_read(struct file *filp, char __user *ubuf,
5753 size_t cnt, loff_t *ppos)
5755 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
5759 tracing_thresh_write(struct file *filp, const char __user *ubuf,
5760 size_t cnt, loff_t *ppos)
5762 struct trace_array *tr = filp->private_data;
5765 mutex_lock(&trace_types_lock);
5766 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
5770 if (tr->current_trace->update_thresh) {
5771 ret = tr->current_trace->update_thresh(tr);
5778 mutex_unlock(&trace_types_lock);
5783 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
5786 tracing_max_lat_read(struct file *filp, char __user *ubuf,
5787 size_t cnt, loff_t *ppos)
5789 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
5793 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
5794 size_t cnt, loff_t *ppos)
5796 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
5801 static int tracing_open_pipe(struct inode *inode, struct file *filp)
5803 struct trace_array *tr = inode->i_private;
5804 struct trace_iterator *iter;
5807 if (tracing_disabled)
5810 if (trace_array_get(tr) < 0)
5813 mutex_lock(&trace_types_lock);
5815 /* create a buffer to store the information to pass to userspace */
5816 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5819 __trace_array_put(tr);
5823 trace_seq_init(&iter->seq);
5824 iter->trace = tr->current_trace;
5826 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
5831 /* trace pipe does not show start of buffer */
5832 cpumask_setall(iter->started);
5834 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
5835 iter->iter_flags |= TRACE_FILE_LAT_FMT;
5837 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
5838 if (trace_clocks[tr->clock_id].in_ns)
5839 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
5842 iter->trace_buffer = &tr->trace_buffer;
5843 iter->cpu_file = tracing_get_cpu(inode);
5844 mutex_init(&iter->mutex);
5845 filp->private_data = iter;
5847 if (iter->trace->pipe_open)
5848 iter->trace->pipe_open(iter);
5850 nonseekable_open(inode, filp);
5852 tr->current_trace->ref++;
5854 mutex_unlock(&trace_types_lock);
5859 __trace_array_put(tr);
5860 mutex_unlock(&trace_types_lock);
5864 static int tracing_release_pipe(struct inode *inode, struct file *file)
5866 struct trace_iterator *iter = file->private_data;
5867 struct trace_array *tr = inode->i_private;
5869 mutex_lock(&trace_types_lock);
5871 tr->current_trace->ref--;
5873 if (iter->trace->pipe_close)
5874 iter->trace->pipe_close(iter);
5876 mutex_unlock(&trace_types_lock);
5878 free_cpumask_var(iter->started);
5879 mutex_destroy(&iter->mutex);
5882 trace_array_put(tr);
5888 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
5890 struct trace_array *tr = iter->tr;
5892 /* Iterators are static, they should be filled or empty */
5893 if (trace_buffer_iter(iter, iter->cpu_file))
5894 return EPOLLIN | EPOLLRDNORM;
5896 if (tr->trace_flags & TRACE_ITER_BLOCK)
5898 * Always select as readable when in blocking mode
5900 return EPOLLIN | EPOLLRDNORM;
5902 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
5907 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
5909 struct trace_iterator *iter = filp->private_data;
5911 return trace_poll(iter, filp, poll_table);
5914 /* Must be called with iter->mutex held. */
5915 static int tracing_wait_pipe(struct file *filp)
5917 struct trace_iterator *iter = filp->private_data;
5920 while (trace_empty(iter)) {
5922 if ((filp->f_flags & O_NONBLOCK)) {
5927 * We block until we read something and tracing is disabled.
5928 * We still block if tracing is disabled, but we have never
5929 * read anything. This allows a user to cat this file, and
5930 * then enable tracing. But after we have read something,
5931 * we give an EOF when tracing is again disabled.
5933 * iter->pos will be 0 if we haven't read anything.
5935 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
5938 mutex_unlock(&iter->mutex);
5940 ret = wait_on_pipe(iter, 0);
5942 mutex_lock(&iter->mutex);
5955 tracing_read_pipe(struct file *filp, char __user *ubuf,
5956 size_t cnt, loff_t *ppos)
5958 struct trace_iterator *iter = filp->private_data;
5962 * Avoid more than one consumer on a single file descriptor
5963 * This is just a matter of traces coherency, the ring buffer itself
5966 mutex_lock(&iter->mutex);
5968 /* return any leftover data */
5969 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
5973 trace_seq_init(&iter->seq);
5975 if (iter->trace->read) {
5976 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
5982 sret = tracing_wait_pipe(filp);
5986 /* stop when tracing is finished */
5987 if (trace_empty(iter)) {
5992 if (cnt >= PAGE_SIZE)
5993 cnt = PAGE_SIZE - 1;
5995 /* reset all but tr, trace, and overruns */
5996 memset(&iter->seq, 0,
5997 sizeof(struct trace_iterator) -
5998 offsetof(struct trace_iterator, seq));
5999 cpumask_clear(iter->started);
6002 trace_event_read_lock();
6003 trace_access_lock(iter->cpu_file);
6004 while (trace_find_next_entry_inc(iter) != NULL) {
6005 enum print_line_t ret;
6006 int save_len = iter->seq.seq.len;
6008 ret = print_trace_line(iter);
6009 if (ret == TRACE_TYPE_PARTIAL_LINE) {
6010 /* don't print partial lines */
6011 iter->seq.seq.len = save_len;
6014 if (ret != TRACE_TYPE_NO_CONSUME)
6015 trace_consume(iter);
6017 if (trace_seq_used(&iter->seq) >= cnt)
6021 * Setting the full flag means we reached the trace_seq buffer
6022 * size and we should leave by partial output condition above.
6023 * One of the trace_seq_* functions is not used properly.
6025 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
6028 trace_access_unlock(iter->cpu_file);
6029 trace_event_read_unlock();
6031 /* Now copy what we have to the user */
6032 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6033 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
6034 trace_seq_init(&iter->seq);
6037 * If there was nothing to send to user, in spite of consuming trace
6038 * entries, go back to wait for more entries.
6044 mutex_unlock(&iter->mutex);
6049 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
6052 __free_page(spd->pages[idx]);
6055 static const struct pipe_buf_operations tracing_pipe_buf_ops = {
6056 .confirm = generic_pipe_buf_confirm,
6057 .release = generic_pipe_buf_release,
6058 .steal = generic_pipe_buf_steal,
6059 .get = generic_pipe_buf_get,
6063 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
6069 /* Seq buffer is page-sized, exactly what we need. */
6071 save_len = iter->seq.seq.len;
6072 ret = print_trace_line(iter);
6074 if (trace_seq_has_overflowed(&iter->seq)) {
6075 iter->seq.seq.len = save_len;
6080 * This should not be hit, because it should only
6081 * be set if the iter->seq overflowed. But check it
6082 * anyway to be safe.
6084 if (ret == TRACE_TYPE_PARTIAL_LINE) {
6085 iter->seq.seq.len = save_len;
6089 count = trace_seq_used(&iter->seq) - save_len;
6092 iter->seq.seq.len = save_len;
6096 if (ret != TRACE_TYPE_NO_CONSUME)
6097 trace_consume(iter);
6099 if (!trace_find_next_entry_inc(iter)) {
6109 static ssize_t tracing_splice_read_pipe(struct file *filp,
6111 struct pipe_inode_info *pipe,
6115 struct page *pages_def[PIPE_DEF_BUFFERS];
6116 struct partial_page partial_def[PIPE_DEF_BUFFERS];
6117 struct trace_iterator *iter = filp->private_data;
6118 struct splice_pipe_desc spd = {
6120 .partial = partial_def,
6121 .nr_pages = 0, /* This gets updated below. */
6122 .nr_pages_max = PIPE_DEF_BUFFERS,
6123 .ops = &tracing_pipe_buf_ops,
6124 .spd_release = tracing_spd_release_pipe,
6130 if (splice_grow_spd(pipe, &spd))
6133 mutex_lock(&iter->mutex);
6135 if (iter->trace->splice_read) {
6136 ret = iter->trace->splice_read(iter, filp,
6137 ppos, pipe, len, flags);
6142 ret = tracing_wait_pipe(filp);
6146 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
6151 trace_event_read_lock();
6152 trace_access_lock(iter->cpu_file);
6154 /* Fill as many pages as possible. */
6155 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
6156 spd.pages[i] = alloc_page(GFP_KERNEL);
6160 rem = tracing_fill_pipe_page(rem, iter);
6162 /* Copy the data into the page, so we can start over. */
6163 ret = trace_seq_to_buffer(&iter->seq,
6164 page_address(spd.pages[i]),
6165 trace_seq_used(&iter->seq));
6167 __free_page(spd.pages[i]);
6170 spd.partial[i].offset = 0;
6171 spd.partial[i].len = trace_seq_used(&iter->seq);
6173 trace_seq_init(&iter->seq);
6176 trace_access_unlock(iter->cpu_file);
6177 trace_event_read_unlock();
6178 mutex_unlock(&iter->mutex);
6183 ret = splice_to_pipe(pipe, &spd);
6187 splice_shrink_spd(&spd);
6191 mutex_unlock(&iter->mutex);
6196 tracing_entries_read(struct file *filp, char __user *ubuf,
6197 size_t cnt, loff_t *ppos)
6199 struct inode *inode = file_inode(filp);
6200 struct trace_array *tr = inode->i_private;
6201 int cpu = tracing_get_cpu(inode);
6206 mutex_lock(&trace_types_lock);
6208 if (cpu == RING_BUFFER_ALL_CPUS) {
6209 int cpu, buf_size_same;
6214 /* check if all cpu sizes are same */
6215 for_each_tracing_cpu(cpu) {
6216 /* fill in the size from first enabled cpu */
6218 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
6219 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
6225 if (buf_size_same) {
6226 if (!ring_buffer_expanded)
6227 r = sprintf(buf, "%lu (expanded: %lu)\n",
6229 trace_buf_size >> 10);
6231 r = sprintf(buf, "%lu\n", size >> 10);
6233 r = sprintf(buf, "X\n");
6235 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
6237 mutex_unlock(&trace_types_lock);
6239 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6244 tracing_entries_write(struct file *filp, const char __user *ubuf,
6245 size_t cnt, loff_t *ppos)
6247 struct inode *inode = file_inode(filp);
6248 struct trace_array *tr = inode->i_private;
6252 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6256 /* must have at least 1 entry */
6260 /* value is in KB */
6262 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
6272 tracing_total_entries_read(struct file *filp, char __user *ubuf,
6273 size_t cnt, loff_t *ppos)
6275 struct trace_array *tr = filp->private_data;
6278 unsigned long size = 0, expanded_size = 0;
6280 mutex_lock(&trace_types_lock);
6281 for_each_tracing_cpu(cpu) {
6282 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
6283 if (!ring_buffer_expanded)
6284 expanded_size += trace_buf_size >> 10;
6286 if (ring_buffer_expanded)
6287 r = sprintf(buf, "%lu\n", size);
6289 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
6290 mutex_unlock(&trace_types_lock);
6292 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6296 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
6297 size_t cnt, loff_t *ppos)
6300 * There is no need to read what the user has written, this function
6301 * is just to make sure that there is no error when "echo" is used
6310 tracing_free_buffer_release(struct inode *inode, struct file *filp)
6312 struct trace_array *tr = inode->i_private;
6314 /* disable tracing ? */
6315 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
6316 tracer_tracing_off(tr);
6317 /* resize the ring buffer to 0 */
6318 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
6320 trace_array_put(tr);
6326 tracing_mark_write(struct file *filp, const char __user *ubuf,
6327 size_t cnt, loff_t *fpos)
6329 struct trace_array *tr = filp->private_data;
6330 struct ring_buffer_event *event;
6331 enum event_trigger_type tt = ETT_NONE;
6332 struct ring_buffer *buffer;
6333 struct print_entry *entry;
6334 unsigned long irq_flags;
6339 /* Used in tracing_mark_raw_write() as well */
6340 #define FAULTED_STR "<faulted>"
6341 #define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */
6343 if (tracing_disabled)
6346 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6349 if (cnt > TRACE_BUF_SIZE)
6350 cnt = TRACE_BUF_SIZE;
6352 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
6354 local_save_flags(irq_flags);
6355 size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
6357 /* If less than "<faulted>", then make sure we can still add that */
6358 if (cnt < FAULTED_SIZE)
6359 size += FAULTED_SIZE - cnt;
6361 buffer = tr->trace_buffer.buffer;
6362 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
6363 irq_flags, preempt_count());
6364 if (unlikely(!event))
6365 /* Ring buffer disabled, return as if not open for write */
6368 entry = ring_buffer_event_data(event);
6369 entry->ip = _THIS_IP_;
6371 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
6373 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
6380 if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
6381 /* do not add \n before testing triggers, but add \0 */
6382 entry->buf[cnt] = '\0';
6383 tt = event_triggers_call(tr->trace_marker_file, entry, event);
6386 if (entry->buf[cnt - 1] != '\n') {
6387 entry->buf[cnt] = '\n';
6388 entry->buf[cnt + 1] = '\0';
6390 entry->buf[cnt] = '\0';
6392 __buffer_unlock_commit(buffer, event);
6395 event_triggers_post_call(tr->trace_marker_file, tt);
6403 /* Limit it for now to 3K (including tag) */
6404 #define RAW_DATA_MAX_SIZE (1024*3)
6407 tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
6408 size_t cnt, loff_t *fpos)
6410 struct trace_array *tr = filp->private_data;
6411 struct ring_buffer_event *event;
6412 struct ring_buffer *buffer;
6413 struct raw_data_entry *entry;
6414 unsigned long irq_flags;
6419 #define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
6421 if (tracing_disabled)
6424 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6427 /* The marker must at least have a tag id */
6428 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
6431 if (cnt > TRACE_BUF_SIZE)
6432 cnt = TRACE_BUF_SIZE;
6434 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
6436 local_save_flags(irq_flags);
6437 size = sizeof(*entry) + cnt;
6438 if (cnt < FAULT_SIZE_ID)
6439 size += FAULT_SIZE_ID - cnt;
6441 buffer = tr->trace_buffer.buffer;
6442 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
6443 irq_flags, preempt_count());
6445 /* Ring buffer disabled, return as if not open for write */
6448 entry = ring_buffer_event_data(event);
6450 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
6453 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
6458 __buffer_unlock_commit(buffer, event);
6466 static int tracing_clock_show(struct seq_file *m, void *v)
6468 struct trace_array *tr = m->private;
6471 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
6473 "%s%s%s%s", i ? " " : "",
6474 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
6475 i == tr->clock_id ? "]" : "");
6481 int tracing_set_clock(struct trace_array *tr, const char *clockstr)
6485 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
6486 if (strcmp(trace_clocks[i].name, clockstr) == 0)
6489 if (i == ARRAY_SIZE(trace_clocks))
6492 mutex_lock(&trace_types_lock);
6496 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
6499 * New clock may not be consistent with the previous clock.
6500 * Reset the buffer so that it doesn't have incomparable timestamps.
6502 tracing_reset_online_cpus(&tr->trace_buffer);
6504 #ifdef CONFIG_TRACER_MAX_TRACE
6505 if (tr->max_buffer.buffer)
6506 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
6507 tracing_reset_online_cpus(&tr->max_buffer);
6510 mutex_unlock(&trace_types_lock);
6515 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
6516 size_t cnt, loff_t *fpos)
6518 struct seq_file *m = filp->private_data;
6519 struct trace_array *tr = m->private;
6521 const char *clockstr;
6524 if (cnt >= sizeof(buf))
6527 if (copy_from_user(buf, ubuf, cnt))
6532 clockstr = strstrip(buf);
6534 ret = tracing_set_clock(tr, clockstr);
6543 static int tracing_clock_open(struct inode *inode, struct file *file)
6545 struct trace_array *tr = inode->i_private;
6548 if (tracing_disabled)
6551 if (trace_array_get(tr))
6554 ret = single_open(file, tracing_clock_show, inode->i_private);
6556 trace_array_put(tr);
6561 static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
6563 struct trace_array *tr = m->private;
6565 mutex_lock(&trace_types_lock);
6567 if (ring_buffer_time_stamp_abs(tr->trace_buffer.buffer))
6568 seq_puts(m, "delta [absolute]\n");
6570 seq_puts(m, "[delta] absolute\n");
6572 mutex_unlock(&trace_types_lock);
6577 static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
6579 struct trace_array *tr = inode->i_private;
6582 if (tracing_disabled)
6585 if (trace_array_get(tr))
6588 ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
6590 trace_array_put(tr);
6595 int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs)
6599 mutex_lock(&trace_types_lock);
6601 if (abs && tr->time_stamp_abs_ref++)
6605 if (WARN_ON_ONCE(!tr->time_stamp_abs_ref)) {
6610 if (--tr->time_stamp_abs_ref)
6614 ring_buffer_set_time_stamp_abs(tr->trace_buffer.buffer, abs);
6616 #ifdef CONFIG_TRACER_MAX_TRACE
6617 if (tr->max_buffer.buffer)
6618 ring_buffer_set_time_stamp_abs(tr->max_buffer.buffer, abs);
6621 mutex_unlock(&trace_types_lock);
6626 struct ftrace_buffer_info {
6627 struct trace_iterator iter;
6629 unsigned int spare_cpu;
6633 #ifdef CONFIG_TRACER_SNAPSHOT
6634 static int tracing_snapshot_open(struct inode *inode, struct file *file)
6636 struct trace_array *tr = inode->i_private;
6637 struct trace_iterator *iter;
6641 if (trace_array_get(tr) < 0)
6644 if (file->f_mode & FMODE_READ) {
6645 iter = __tracing_open(inode, file, true);
6647 ret = PTR_ERR(iter);
6649 /* Writes still need the seq_file to hold the private data */
6651 m = kzalloc(sizeof(*m), GFP_KERNEL);
6654 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6662 iter->trace_buffer = &tr->max_buffer;
6663 iter->cpu_file = tracing_get_cpu(inode);
6665 file->private_data = m;
6669 trace_array_put(tr);
6675 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
6678 struct seq_file *m = filp->private_data;
6679 struct trace_iterator *iter = m->private;
6680 struct trace_array *tr = iter->tr;
6684 ret = tracing_update_buffers();
6688 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6692 mutex_lock(&trace_types_lock);
6694 if (tr->current_trace->use_max_tr) {
6699 arch_spin_lock(&tr->max_lock);
6700 if (tr->cond_snapshot)
6702 arch_spin_unlock(&tr->max_lock);
6708 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6712 if (tr->allocated_snapshot)
6716 /* Only allow per-cpu swap if the ring buffer supports it */
6717 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
6718 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6723 if (tr->allocated_snapshot)
6724 ret = resize_buffer_duplicate_size(&tr->max_buffer,
6725 &tr->trace_buffer, iter->cpu_file);
6727 ret = tracing_alloc_snapshot_instance(tr);
6730 local_irq_disable();
6731 /* Now, we're going to swap */
6732 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
6733 update_max_tr(tr, current, smp_processor_id(), NULL);
6735 update_max_tr_single(tr, current, iter->cpu_file);
6739 if (tr->allocated_snapshot) {
6740 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
6741 tracing_reset_online_cpus(&tr->max_buffer);
6743 tracing_reset(&tr->max_buffer, iter->cpu_file);
6753 mutex_unlock(&trace_types_lock);
6757 static int tracing_snapshot_release(struct inode *inode, struct file *file)
6759 struct seq_file *m = file->private_data;
6762 ret = tracing_release(inode, file);
6764 if (file->f_mode & FMODE_READ)
6767 /* If write only, the seq_file is just a stub */
6775 static int tracing_buffers_open(struct inode *inode, struct file *filp);
6776 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
6777 size_t count, loff_t *ppos);
6778 static int tracing_buffers_release(struct inode *inode, struct file *file);
6779 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6780 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
6782 static int snapshot_raw_open(struct inode *inode, struct file *filp)
6784 struct ftrace_buffer_info *info;
6787 ret = tracing_buffers_open(inode, filp);
6791 info = filp->private_data;
6793 if (info->iter.trace->use_max_tr) {
6794 tracing_buffers_release(inode, filp);
6798 info->iter.snapshot = true;
6799 info->iter.trace_buffer = &info->iter.tr->max_buffer;
6804 #endif /* CONFIG_TRACER_SNAPSHOT */
6807 static const struct file_operations tracing_thresh_fops = {
6808 .open = tracing_open_generic,
6809 .read = tracing_thresh_read,
6810 .write = tracing_thresh_write,
6811 .llseek = generic_file_llseek,
6814 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
6815 static const struct file_operations tracing_max_lat_fops = {
6816 .open = tracing_open_generic,
6817 .read = tracing_max_lat_read,
6818 .write = tracing_max_lat_write,
6819 .llseek = generic_file_llseek,
6823 static const struct file_operations set_tracer_fops = {
6824 .open = tracing_open_generic,
6825 .read = tracing_set_trace_read,
6826 .write = tracing_set_trace_write,
6827 .llseek = generic_file_llseek,
6830 static const struct file_operations tracing_pipe_fops = {
6831 .open = tracing_open_pipe,
6832 .poll = tracing_poll_pipe,
6833 .read = tracing_read_pipe,
6834 .splice_read = tracing_splice_read_pipe,
6835 .release = tracing_release_pipe,
6836 .llseek = no_llseek,
6839 static const struct file_operations tracing_entries_fops = {
6840 .open = tracing_open_generic_tr,
6841 .read = tracing_entries_read,
6842 .write = tracing_entries_write,
6843 .llseek = generic_file_llseek,
6844 .release = tracing_release_generic_tr,
6847 static const struct file_operations tracing_total_entries_fops = {
6848 .open = tracing_open_generic_tr,
6849 .read = tracing_total_entries_read,
6850 .llseek = generic_file_llseek,
6851 .release = tracing_release_generic_tr,
6854 static const struct file_operations tracing_free_buffer_fops = {
6855 .open = tracing_open_generic_tr,
6856 .write = tracing_free_buffer_write,
6857 .release = tracing_free_buffer_release,
6860 static const struct file_operations tracing_mark_fops = {
6861 .open = tracing_open_generic_tr,
6862 .write = tracing_mark_write,
6863 .llseek = generic_file_llseek,
6864 .release = tracing_release_generic_tr,
6867 static const struct file_operations tracing_mark_raw_fops = {
6868 .open = tracing_open_generic_tr,
6869 .write = tracing_mark_raw_write,
6870 .llseek = generic_file_llseek,
6871 .release = tracing_release_generic_tr,
6874 static const struct file_operations trace_clock_fops = {
6875 .open = tracing_clock_open,
6877 .llseek = seq_lseek,
6878 .release = tracing_single_release_tr,
6879 .write = tracing_clock_write,
6882 static const struct file_operations trace_time_stamp_mode_fops = {
6883 .open = tracing_time_stamp_mode_open,
6885 .llseek = seq_lseek,
6886 .release = tracing_single_release_tr,
6889 #ifdef CONFIG_TRACER_SNAPSHOT
6890 static const struct file_operations snapshot_fops = {
6891 .open = tracing_snapshot_open,
6893 .write = tracing_snapshot_write,
6894 .llseek = tracing_lseek,
6895 .release = tracing_snapshot_release,
6898 static const struct file_operations snapshot_raw_fops = {
6899 .open = snapshot_raw_open,
6900 .read = tracing_buffers_read,
6901 .release = tracing_buffers_release,
6902 .splice_read = tracing_buffers_splice_read,
6903 .llseek = no_llseek,
6906 #endif /* CONFIG_TRACER_SNAPSHOT */
6908 #define TRACING_LOG_ERRS_MAX 8
6909 #define TRACING_LOG_LOC_MAX 128
6911 #define CMD_PREFIX " Command: "
6914 const char **errs; /* ptr to loc-specific array of err strings */
6915 u8 type; /* index into errs -> specific err string */
6916 u8 pos; /* MAX_FILTER_STR_VAL = 256 */
6920 struct tracing_log_err {
6921 struct list_head list;
6922 struct err_info info;
6923 char loc[TRACING_LOG_LOC_MAX]; /* err location */
6924 char cmd[MAX_FILTER_STR_VAL]; /* what caused err */
6927 static DEFINE_MUTEX(tracing_err_log_lock);
6929 static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr)
6931 struct tracing_log_err *err;
6933 if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) {
6934 err = kzalloc(sizeof(*err), GFP_KERNEL);
6936 err = ERR_PTR(-ENOMEM);
6937 tr->n_err_log_entries++;
6942 err = list_first_entry(&tr->err_log, struct tracing_log_err, list);
6943 list_del(&err->list);
6949 * err_pos - find the position of a string within a command for error careting
6950 * @cmd: The tracing command that caused the error
6951 * @str: The string to position the caret at within @cmd
6953 * Finds the position of the first occurence of @str within @cmd. The
6954 * return value can be passed to tracing_log_err() for caret placement
6957 * Returns the index within @cmd of the first occurence of @str or 0
6958 * if @str was not found.
6960 unsigned int err_pos(char *cmd, const char *str)
6964 if (WARN_ON(!strlen(cmd)))
6967 found = strstr(cmd, str);
6975 * tracing_log_err - write an error to the tracing error log
6976 * @tr: The associated trace array for the error (NULL for top level array)
6977 * @loc: A string describing where the error occurred
6978 * @cmd: The tracing command that caused the error
6979 * @errs: The array of loc-specific static error strings
6980 * @type: The index into errs[], which produces the specific static err string
6981 * @pos: The position the caret should be placed in the cmd
6983 * Writes an error into tracing/error_log of the form:
6985 * <loc>: error: <text>
6989 * tracing/error_log is a small log file containing the last
6990 * TRACING_LOG_ERRS_MAX errors (8). Memory for errors isn't allocated
6991 * unless there has been a tracing error, and the error log can be
6992 * cleared and have its memory freed by writing the empty string in
6993 * truncation mode to it i.e. echo > tracing/error_log.
6995 * NOTE: the @errs array along with the @type param are used to
6996 * produce a static error string - this string is not copied and saved
6997 * when the error is logged - only a pointer to it is saved. See
6998 * existing callers for examples of how static strings are typically
6999 * defined for use with tracing_log_err().
7001 void tracing_log_err(struct trace_array *tr,
7002 const char *loc, const char *cmd,
7003 const char **errs, u8 type, u8 pos)
7005 struct tracing_log_err *err;
7010 mutex_lock(&tracing_err_log_lock);
7011 err = get_tracing_log_err(tr);
7012 if (PTR_ERR(err) == -ENOMEM) {
7013 mutex_unlock(&tracing_err_log_lock);
7017 snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);
7018 snprintf(err->cmd, MAX_FILTER_STR_VAL,"\n" CMD_PREFIX "%s\n", cmd);
7020 err->info.errs = errs;
7021 err->info.type = type;
7022 err->info.pos = pos;
7023 err->info.ts = local_clock();
7025 list_add_tail(&err->list, &tr->err_log);
7026 mutex_unlock(&tracing_err_log_lock);
7029 static void clear_tracing_err_log(struct trace_array *tr)
7031 struct tracing_log_err *err, *next;
7033 mutex_lock(&tracing_err_log_lock);
7034 list_for_each_entry_safe(err, next, &tr->err_log, list) {
7035 list_del(&err->list);
7039 tr->n_err_log_entries = 0;
7040 mutex_unlock(&tracing_err_log_lock);
7043 static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos)
7045 struct trace_array *tr = m->private;
7047 mutex_lock(&tracing_err_log_lock);
7049 return seq_list_start(&tr->err_log, *pos);
7052 static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos)
7054 struct trace_array *tr = m->private;
7056 return seq_list_next(v, &tr->err_log, pos);
7059 static void tracing_err_log_seq_stop(struct seq_file *m, void *v)
7061 mutex_unlock(&tracing_err_log_lock);
7064 static void tracing_err_log_show_pos(struct seq_file *m, u8 pos)
7068 for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++)
7070 for (i = 0; i < pos; i++)
7075 static int tracing_err_log_seq_show(struct seq_file *m, void *v)
7077 struct tracing_log_err *err = v;
7080 const char *err_text = err->info.errs[err->info.type];
7081 u64 sec = err->info.ts;
7084 nsec = do_div(sec, NSEC_PER_SEC);
7085 seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000,
7086 err->loc, err_text);
7087 seq_printf(m, "%s", err->cmd);
7088 tracing_err_log_show_pos(m, err->info.pos);
7094 static const struct seq_operations tracing_err_log_seq_ops = {
7095 .start = tracing_err_log_seq_start,
7096 .next = tracing_err_log_seq_next,
7097 .stop = tracing_err_log_seq_stop,
7098 .show = tracing_err_log_seq_show
7101 static int tracing_err_log_open(struct inode *inode, struct file *file)
7103 struct trace_array *tr = inode->i_private;
7106 if (trace_array_get(tr) < 0)
7109 /* If this file was opened for write, then erase contents */
7110 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
7111 clear_tracing_err_log(tr);
7113 if (file->f_mode & FMODE_READ) {
7114 ret = seq_open(file, &tracing_err_log_seq_ops);
7116 struct seq_file *m = file->private_data;
7119 trace_array_put(tr);
7125 static ssize_t tracing_err_log_write(struct file *file,
7126 const char __user *buffer,
7127 size_t count, loff_t *ppos)
7132 static int tracing_err_log_release(struct inode *inode, struct file *file)
7134 struct trace_array *tr = inode->i_private;
7136 trace_array_put(tr);
7138 if (file->f_mode & FMODE_READ)
7139 seq_release(inode, file);
7144 static const struct file_operations tracing_err_log_fops = {
7145 .open = tracing_err_log_open,
7146 .write = tracing_err_log_write,
7148 .llseek = seq_lseek,
7149 .release = tracing_err_log_release,
7152 static int tracing_buffers_open(struct inode *inode, struct file *filp)
7154 struct trace_array *tr = inode->i_private;
7155 struct ftrace_buffer_info *info;
7158 if (tracing_disabled)
7161 if (trace_array_get(tr) < 0)
7164 info = kzalloc(sizeof(*info), GFP_KERNEL);
7166 trace_array_put(tr);
7170 mutex_lock(&trace_types_lock);
7173 info->iter.cpu_file = tracing_get_cpu(inode);
7174 info->iter.trace = tr->current_trace;
7175 info->iter.trace_buffer = &tr->trace_buffer;
7177 /* Force reading ring buffer for first read */
7178 info->read = (unsigned int)-1;
7180 filp->private_data = info;
7182 tr->current_trace->ref++;
7184 mutex_unlock(&trace_types_lock);
7186 ret = nonseekable_open(inode, filp);
7188 trace_array_put(tr);
7194 tracing_buffers_poll(struct file *filp, poll_table *poll_table)
7196 struct ftrace_buffer_info *info = filp->private_data;
7197 struct trace_iterator *iter = &info->iter;
7199 return trace_poll(iter, filp, poll_table);
7203 tracing_buffers_read(struct file *filp, char __user *ubuf,
7204 size_t count, loff_t *ppos)
7206 struct ftrace_buffer_info *info = filp->private_data;
7207 struct trace_iterator *iter = &info->iter;
7214 #ifdef CONFIG_TRACER_MAX_TRACE
7215 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
7220 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
7222 if (IS_ERR(info->spare)) {
7223 ret = PTR_ERR(info->spare);
7226 info->spare_cpu = iter->cpu_file;
7232 /* Do we have previous read data to read? */
7233 if (info->read < PAGE_SIZE)
7237 trace_access_lock(iter->cpu_file);
7238 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
7242 trace_access_unlock(iter->cpu_file);
7245 if (trace_empty(iter)) {
7246 if ((filp->f_flags & O_NONBLOCK))
7249 ret = wait_on_pipe(iter, 0);
7260 size = PAGE_SIZE - info->read;
7264 ret = copy_to_user(ubuf, info->spare + info->read, size);
7276 static int tracing_buffers_release(struct inode *inode, struct file *file)
7278 struct ftrace_buffer_info *info = file->private_data;
7279 struct trace_iterator *iter = &info->iter;
7281 mutex_lock(&trace_types_lock);
7283 iter->tr->current_trace->ref--;
7285 __trace_array_put(iter->tr);
7288 ring_buffer_free_read_page(iter->trace_buffer->buffer,
7289 info->spare_cpu, info->spare);
7292 mutex_unlock(&trace_types_lock);
7298 struct ring_buffer *buffer;
7301 refcount_t refcount;
7304 static void buffer_ref_release(struct buffer_ref *ref)
7306 if (!refcount_dec_and_test(&ref->refcount))
7308 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
7312 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
7313 struct pipe_buffer *buf)
7315 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
7317 buffer_ref_release(ref);
7321 static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
7322 struct pipe_buffer *buf)
7324 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
7326 if (refcount_read(&ref->refcount) > INT_MAX/2)
7329 refcount_inc(&ref->refcount);
7333 /* Pipe buffer operations for a buffer. */
7334 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
7335 .confirm = generic_pipe_buf_confirm,
7336 .release = buffer_pipe_buf_release,
7337 .steal = generic_pipe_buf_nosteal,
7338 .get = buffer_pipe_buf_get,
7342 * Callback from splice_to_pipe(), if we need to release some pages
7343 * at the end of the spd in case we error'ed out in filling the pipe.
7345 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
7347 struct buffer_ref *ref =
7348 (struct buffer_ref *)spd->partial[i].private;
7350 buffer_ref_release(ref);
7351 spd->partial[i].private = 0;
7355 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7356 struct pipe_inode_info *pipe, size_t len,
7359 struct ftrace_buffer_info *info = file->private_data;
7360 struct trace_iterator *iter = &info->iter;
7361 struct partial_page partial_def[PIPE_DEF_BUFFERS];
7362 struct page *pages_def[PIPE_DEF_BUFFERS];
7363 struct splice_pipe_desc spd = {
7365 .partial = partial_def,
7366 .nr_pages_max = PIPE_DEF_BUFFERS,
7367 .ops = &buffer_pipe_buf_ops,
7368 .spd_release = buffer_spd_release,
7370 struct buffer_ref *ref;
7374 #ifdef CONFIG_TRACER_MAX_TRACE
7375 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
7379 if (*ppos & (PAGE_SIZE - 1))
7382 if (len & (PAGE_SIZE - 1)) {
7383 if (len < PAGE_SIZE)
7388 if (splice_grow_spd(pipe, &spd))
7392 trace_access_lock(iter->cpu_file);
7393 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
7395 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
7399 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
7405 refcount_set(&ref->refcount, 1);
7406 ref->buffer = iter->trace_buffer->buffer;
7407 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
7408 if (IS_ERR(ref->page)) {
7409 ret = PTR_ERR(ref->page);
7414 ref->cpu = iter->cpu_file;
7416 r = ring_buffer_read_page(ref->buffer, &ref->page,
7417 len, iter->cpu_file, 1);
7419 ring_buffer_free_read_page(ref->buffer, ref->cpu,
7425 page = virt_to_page(ref->page);
7427 spd.pages[i] = page;
7428 spd.partial[i].len = PAGE_SIZE;
7429 spd.partial[i].offset = 0;
7430 spd.partial[i].private = (unsigned long)ref;
7434 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
7437 trace_access_unlock(iter->cpu_file);
7440 /* did we read anything? */
7441 if (!spd.nr_pages) {
7446 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
7449 ret = wait_on_pipe(iter, iter->tr->buffer_percent);
7456 ret = splice_to_pipe(pipe, &spd);
7458 splice_shrink_spd(&spd);
7463 static const struct file_operations tracing_buffers_fops = {
7464 .open = tracing_buffers_open,
7465 .read = tracing_buffers_read,
7466 .poll = tracing_buffers_poll,
7467 .release = tracing_buffers_release,
7468 .splice_read = tracing_buffers_splice_read,
7469 .llseek = no_llseek,
7473 tracing_stats_read(struct file *filp, char __user *ubuf,
7474 size_t count, loff_t *ppos)
7476 struct inode *inode = file_inode(filp);
7477 struct trace_array *tr = inode->i_private;
7478 struct trace_buffer *trace_buf = &tr->trace_buffer;
7479 int cpu = tracing_get_cpu(inode);
7480 struct trace_seq *s;
7482 unsigned long long t;
7483 unsigned long usec_rem;
7485 s = kmalloc(sizeof(*s), GFP_KERNEL);
7491 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
7492 trace_seq_printf(s, "entries: %ld\n", cnt);
7494 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
7495 trace_seq_printf(s, "overrun: %ld\n", cnt);
7497 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
7498 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
7500 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
7501 trace_seq_printf(s, "bytes: %ld\n", cnt);
7503 if (trace_clocks[tr->clock_id].in_ns) {
7504 /* local or global for trace_clock */
7505 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
7506 usec_rem = do_div(t, USEC_PER_SEC);
7507 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
7510 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
7511 usec_rem = do_div(t, USEC_PER_SEC);
7512 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
7514 /* counter or tsc mode for trace_clock */
7515 trace_seq_printf(s, "oldest event ts: %llu\n",
7516 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
7518 trace_seq_printf(s, "now ts: %llu\n",
7519 ring_buffer_time_stamp(trace_buf->buffer, cpu));
7522 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
7523 trace_seq_printf(s, "dropped events: %ld\n", cnt);
7525 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
7526 trace_seq_printf(s, "read events: %ld\n", cnt);
7528 count = simple_read_from_buffer(ubuf, count, ppos,
7529 s->buffer, trace_seq_used(s));
7536 static const struct file_operations tracing_stats_fops = {
7537 .open = tracing_open_generic_tr,
7538 .read = tracing_stats_read,
7539 .llseek = generic_file_llseek,
7540 .release = tracing_release_generic_tr,
7543 #ifdef CONFIG_DYNAMIC_FTRACE
7546 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
7547 size_t cnt, loff_t *ppos)
7549 unsigned long *p = filp->private_data;
7550 char buf[64]; /* Not too big for a shallow stack */
7553 r = scnprintf(buf, 63, "%ld", *p);
7556 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7559 static const struct file_operations tracing_dyn_info_fops = {
7560 .open = tracing_open_generic,
7561 .read = tracing_read_dyn_info,
7562 .llseek = generic_file_llseek,
7564 #endif /* CONFIG_DYNAMIC_FTRACE */
7566 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
7568 ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
7569 struct trace_array *tr, struct ftrace_probe_ops *ops,
7572 tracing_snapshot_instance(tr);
7576 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
7577 struct trace_array *tr, struct ftrace_probe_ops *ops,
7580 struct ftrace_func_mapper *mapper = data;
7584 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
7594 tracing_snapshot_instance(tr);
7598 ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
7599 struct ftrace_probe_ops *ops, void *data)
7601 struct ftrace_func_mapper *mapper = data;
7604 seq_printf(m, "%ps:", (void *)ip);
7606 seq_puts(m, "snapshot");
7609 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
7612 seq_printf(m, ":count=%ld\n", *count);
7614 seq_puts(m, ":unlimited\n");
7620 ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
7621 unsigned long ip, void *init_data, void **data)
7623 struct ftrace_func_mapper *mapper = *data;
7626 mapper = allocate_ftrace_func_mapper();
7632 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
7636 ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
7637 unsigned long ip, void *data)
7639 struct ftrace_func_mapper *mapper = data;
7644 free_ftrace_func_mapper(mapper, NULL);
7648 ftrace_func_mapper_remove_ip(mapper, ip);
7651 static struct ftrace_probe_ops snapshot_probe_ops = {
7652 .func = ftrace_snapshot,
7653 .print = ftrace_snapshot_print,
7656 static struct ftrace_probe_ops snapshot_count_probe_ops = {
7657 .func = ftrace_count_snapshot,
7658 .print = ftrace_snapshot_print,
7659 .init = ftrace_snapshot_init,
7660 .free = ftrace_snapshot_free,
7664 ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
7665 char *glob, char *cmd, char *param, int enable)
7667 struct ftrace_probe_ops *ops;
7668 void *count = (void *)-1;
7675 /* hash funcs only work with set_ftrace_filter */
7679 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
7682 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
7687 number = strsep(¶m, ":");
7689 if (!strlen(number))
7693 * We use the callback data field (which is a pointer)
7696 ret = kstrtoul(number, 0, (unsigned long *)&count);
7701 ret = tracing_alloc_snapshot_instance(tr);
7705 ret = register_ftrace_function_probe(glob, tr, ops, count);
7708 return ret < 0 ? ret : 0;
7711 static struct ftrace_func_command ftrace_snapshot_cmd = {
7713 .func = ftrace_trace_snapshot_callback,
7716 static __init int register_snapshot_cmd(void)
7718 return register_ftrace_command(&ftrace_snapshot_cmd);
7721 static inline __init int register_snapshot_cmd(void) { return 0; }
7722 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
7724 static struct dentry *tracing_get_dentry(struct trace_array *tr)
7726 if (WARN_ON(!tr->dir))
7727 return ERR_PTR(-ENODEV);
7729 /* Top directory uses NULL as the parent */
7730 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
7733 /* All sub buffers have a descriptor */
7737 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
7739 struct dentry *d_tracer;
7742 return tr->percpu_dir;
7744 d_tracer = tracing_get_dentry(tr);
7745 if (IS_ERR(d_tracer))
7748 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
7750 WARN_ONCE(!tr->percpu_dir,
7751 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
7753 return tr->percpu_dir;
7756 static struct dentry *
7757 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
7758 void *data, long cpu, const struct file_operations *fops)
7760 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
7762 if (ret) /* See tracing_get_cpu() */
7763 d_inode(ret)->i_cdev = (void *)(cpu + 1);
7768 tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
7770 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
7771 struct dentry *d_cpu;
7772 char cpu_dir[30]; /* 30 characters should be more than enough */
7777 snprintf(cpu_dir, 30, "cpu%ld", cpu);
7778 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
7780 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
7784 /* per cpu trace_pipe */
7785 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
7786 tr, cpu, &tracing_pipe_fops);
7789 trace_create_cpu_file("trace", 0644, d_cpu,
7790 tr, cpu, &tracing_fops);
7792 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
7793 tr, cpu, &tracing_buffers_fops);
7795 trace_create_cpu_file("stats", 0444, d_cpu,
7796 tr, cpu, &tracing_stats_fops);
7798 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
7799 tr, cpu, &tracing_entries_fops);
7801 #ifdef CONFIG_TRACER_SNAPSHOT
7802 trace_create_cpu_file("snapshot", 0644, d_cpu,
7803 tr, cpu, &snapshot_fops);
7805 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
7806 tr, cpu, &snapshot_raw_fops);
7810 #ifdef CONFIG_FTRACE_SELFTEST
7811 /* Let selftest have access to static functions in this file */
7812 #include "trace_selftest.c"
7816 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
7819 struct trace_option_dentry *topt = filp->private_data;
7822 if (topt->flags->val & topt->opt->bit)
7827 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
7831 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
7834 struct trace_option_dentry *topt = filp->private_data;
7838 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7842 if (val != 0 && val != 1)
7845 if (!!(topt->flags->val & topt->opt->bit) != val) {
7846 mutex_lock(&trace_types_lock);
7847 ret = __set_tracer_option(topt->tr, topt->flags,
7849 mutex_unlock(&trace_types_lock);
7860 static const struct file_operations trace_options_fops = {
7861 .open = tracing_open_generic,
7862 .read = trace_options_read,
7863 .write = trace_options_write,
7864 .llseek = generic_file_llseek,
7868 * In order to pass in both the trace_array descriptor as well as the index
7869 * to the flag that the trace option file represents, the trace_array
7870 * has a character array of trace_flags_index[], which holds the index
7871 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
7872 * The address of this character array is passed to the flag option file
7873 * read/write callbacks.
7875 * In order to extract both the index and the trace_array descriptor,
7876 * get_tr_index() uses the following algorithm.
7880 * As the pointer itself contains the address of the index (remember
7883 * Then to get the trace_array descriptor, by subtracting that index
7884 * from the ptr, we get to the start of the index itself.
7886 * ptr - idx == &index[0]
7888 * Then a simple container_of() from that pointer gets us to the
7889 * trace_array descriptor.
7891 static void get_tr_index(void *data, struct trace_array **ptr,
7892 unsigned int *pindex)
7894 *pindex = *(unsigned char *)data;
7896 *ptr = container_of(data - *pindex, struct trace_array,
7901 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
7904 void *tr_index = filp->private_data;
7905 struct trace_array *tr;
7909 get_tr_index(tr_index, &tr, &index);
7911 if (tr->trace_flags & (1 << index))
7916 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
7920 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
7923 void *tr_index = filp->private_data;
7924 struct trace_array *tr;
7929 get_tr_index(tr_index, &tr, &index);
7931 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7935 if (val != 0 && val != 1)
7938 mutex_lock(&trace_types_lock);
7939 ret = set_tracer_flag(tr, 1 << index, val);
7940 mutex_unlock(&trace_types_lock);
7950 static const struct file_operations trace_options_core_fops = {
7951 .open = tracing_open_generic,
7952 .read = trace_options_core_read,
7953 .write = trace_options_core_write,
7954 .llseek = generic_file_llseek,
7957 struct dentry *trace_create_file(const char *name,
7959 struct dentry *parent,
7961 const struct file_operations *fops)
7965 ret = tracefs_create_file(name, mode, parent, data, fops);
7967 pr_warn("Could not create tracefs '%s' entry\n", name);
7973 static struct dentry *trace_options_init_dentry(struct trace_array *tr)
7975 struct dentry *d_tracer;
7980 d_tracer = tracing_get_dentry(tr);
7981 if (IS_ERR(d_tracer))
7984 tr->options = tracefs_create_dir("options", d_tracer);
7986 pr_warn("Could not create tracefs directory 'options'\n");
7994 create_trace_option_file(struct trace_array *tr,
7995 struct trace_option_dentry *topt,
7996 struct tracer_flags *flags,
7997 struct tracer_opt *opt)
7999 struct dentry *t_options;
8001 t_options = trace_options_init_dentry(tr);
8005 topt->flags = flags;
8009 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
8010 &trace_options_fops);
8015 create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
8017 struct trace_option_dentry *topts;
8018 struct trace_options *tr_topts;
8019 struct tracer_flags *flags;
8020 struct tracer_opt *opts;
8027 flags = tracer->flags;
8029 if (!flags || !flags->opts)
8033 * If this is an instance, only create flags for tracers
8034 * the instance may have.
8036 if (!trace_ok_for_array(tracer, tr))
8039 for (i = 0; i < tr->nr_topts; i++) {
8040 /* Make sure there's no duplicate flags. */
8041 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
8047 for (cnt = 0; opts[cnt].name; cnt++)
8050 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
8054 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
8061 tr->topts = tr_topts;
8062 tr->topts[tr->nr_topts].tracer = tracer;
8063 tr->topts[tr->nr_topts].topts = topts;
8066 for (cnt = 0; opts[cnt].name; cnt++) {
8067 create_trace_option_file(tr, &topts[cnt], flags,
8069 WARN_ONCE(topts[cnt].entry == NULL,
8070 "Failed to create trace option: %s",
8075 static struct dentry *
8076 create_trace_option_core_file(struct trace_array *tr,
8077 const char *option, long index)
8079 struct dentry *t_options;
8081 t_options = trace_options_init_dentry(tr);
8085 return trace_create_file(option, 0644, t_options,
8086 (void *)&tr->trace_flags_index[index],
8087 &trace_options_core_fops);
8090 static void create_trace_options_dir(struct trace_array *tr)
8092 struct dentry *t_options;
8093 bool top_level = tr == &global_trace;
8096 t_options = trace_options_init_dentry(tr);
8100 for (i = 0; trace_options[i]; i++) {
8102 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
8103 create_trace_option_core_file(tr, trace_options[i], i);
8108 rb_simple_read(struct file *filp, char __user *ubuf,
8109 size_t cnt, loff_t *ppos)
8111 struct trace_array *tr = filp->private_data;
8115 r = tracer_tracing_is_on(tr);
8116 r = sprintf(buf, "%d\n", r);
8118 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8122 rb_simple_write(struct file *filp, const char __user *ubuf,
8123 size_t cnt, loff_t *ppos)
8125 struct trace_array *tr = filp->private_data;
8126 struct ring_buffer *buffer = tr->trace_buffer.buffer;
8130 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8135 mutex_lock(&trace_types_lock);
8136 if (!!val == tracer_tracing_is_on(tr)) {
8137 val = 0; /* do nothing */
8139 tracer_tracing_on(tr);
8140 if (tr->current_trace->start)
8141 tr->current_trace->start(tr);
8143 tracer_tracing_off(tr);
8144 if (tr->current_trace->stop)
8145 tr->current_trace->stop(tr);
8147 mutex_unlock(&trace_types_lock);
8155 static const struct file_operations rb_simple_fops = {
8156 .open = tracing_open_generic_tr,
8157 .read = rb_simple_read,
8158 .write = rb_simple_write,
8159 .release = tracing_release_generic_tr,
8160 .llseek = default_llseek,
8164 buffer_percent_read(struct file *filp, char __user *ubuf,
8165 size_t cnt, loff_t *ppos)
8167 struct trace_array *tr = filp->private_data;
8171 r = tr->buffer_percent;
8172 r = sprintf(buf, "%d\n", r);
8174 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8178 buffer_percent_write(struct file *filp, const char __user *ubuf,
8179 size_t cnt, loff_t *ppos)
8181 struct trace_array *tr = filp->private_data;
8185 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8195 tr->buffer_percent = val;
8202 static const struct file_operations buffer_percent_fops = {
8203 .open = tracing_open_generic_tr,
8204 .read = buffer_percent_read,
8205 .write = buffer_percent_write,
8206 .release = tracing_release_generic_tr,
8207 .llseek = default_llseek,
8210 static struct dentry *trace_instance_dir;
8213 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
8216 allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
8218 enum ring_buffer_flags rb_flags;
8220 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
8224 buf->buffer = ring_buffer_alloc(size, rb_flags);
8228 buf->data = alloc_percpu(struct trace_array_cpu);
8230 ring_buffer_free(buf->buffer);
8235 /* Allocate the first page for all buffers */
8236 set_buffer_entries(&tr->trace_buffer,
8237 ring_buffer_size(tr->trace_buffer.buffer, 0));
8242 static int allocate_trace_buffers(struct trace_array *tr, int size)
8246 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
8250 #ifdef CONFIG_TRACER_MAX_TRACE
8251 ret = allocate_trace_buffer(tr, &tr->max_buffer,
8252 allocate_snapshot ? size : 1);
8254 ring_buffer_free(tr->trace_buffer.buffer);
8255 tr->trace_buffer.buffer = NULL;
8256 free_percpu(tr->trace_buffer.data);
8257 tr->trace_buffer.data = NULL;
8260 tr->allocated_snapshot = allocate_snapshot;
8263 * Only the top level trace array gets its snapshot allocated
8264 * from the kernel command line.
8266 allocate_snapshot = false;
8271 static void free_trace_buffer(struct trace_buffer *buf)
8274 ring_buffer_free(buf->buffer);
8276 free_percpu(buf->data);
8281 static void free_trace_buffers(struct trace_array *tr)
8286 free_trace_buffer(&tr->trace_buffer);
8288 #ifdef CONFIG_TRACER_MAX_TRACE
8289 free_trace_buffer(&tr->max_buffer);
8293 static void init_trace_flags_index(struct trace_array *tr)
8297 /* Used by the trace options files */
8298 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
8299 tr->trace_flags_index[i] = i;
8302 static void __update_tracer_options(struct trace_array *tr)
8306 for (t = trace_types; t; t = t->next)
8307 add_tracer_options(tr, t);
8310 static void update_tracer_options(struct trace_array *tr)
8312 mutex_lock(&trace_types_lock);
8313 __update_tracer_options(tr);
8314 mutex_unlock(&trace_types_lock);
8317 struct trace_array *trace_array_create(const char *name)
8319 struct trace_array *tr;
8322 mutex_lock(&event_mutex);
8323 mutex_lock(&trace_types_lock);
8326 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8327 if (tr->name && strcmp(tr->name, name) == 0)
8332 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
8336 tr->name = kstrdup(name, GFP_KERNEL);
8340 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
8343 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
8345 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
8347 raw_spin_lock_init(&tr->start_lock);
8349 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
8351 tr->current_trace = &nop_trace;
8353 INIT_LIST_HEAD(&tr->systems);
8354 INIT_LIST_HEAD(&tr->events);
8355 INIT_LIST_HEAD(&tr->hist_vars);
8356 INIT_LIST_HEAD(&tr->err_log);
8358 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
8361 tr->dir = tracefs_create_dir(name, trace_instance_dir);
8365 ret = event_trace_add_tracer(tr->dir, tr);
8367 tracefs_remove_recursive(tr->dir);
8371 ftrace_init_trace_array(tr);
8373 init_tracer_tracefs(tr, tr->dir);
8374 init_trace_flags_index(tr);
8375 __update_tracer_options(tr);
8377 list_add(&tr->list, &ftrace_trace_arrays);
8379 mutex_unlock(&trace_types_lock);
8380 mutex_unlock(&event_mutex);
8385 free_trace_buffers(tr);
8386 free_cpumask_var(tr->tracing_cpumask);
8391 mutex_unlock(&trace_types_lock);
8392 mutex_unlock(&event_mutex);
8394 return ERR_PTR(ret);
8396 EXPORT_SYMBOL_GPL(trace_array_create);
8398 static int instance_mkdir(const char *name)
8400 return PTR_ERR_OR_ZERO(trace_array_create(name));
8403 static int __remove_instance(struct trace_array *tr)
8407 if (tr->ref || (tr->current_trace && tr->current_trace->ref))
8410 list_del(&tr->list);
8412 /* Disable all the flags that were enabled coming in */
8413 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
8414 if ((1 << i) & ZEROED_TRACE_FLAGS)
8415 set_tracer_flag(tr, 1 << i, 0);
8418 tracing_set_nop(tr);
8419 clear_ftrace_function_probes(tr);
8420 event_trace_del_tracer(tr);
8421 ftrace_clear_pids(tr);
8422 ftrace_destroy_function_files(tr);
8423 tracefs_remove_recursive(tr->dir);
8424 free_trace_buffers(tr);
8426 for (i = 0; i < tr->nr_topts; i++) {
8427 kfree(tr->topts[i].topts);
8431 free_cpumask_var(tr->tracing_cpumask);
8439 int trace_array_destroy(struct trace_array *tr)
8446 mutex_lock(&event_mutex);
8447 mutex_lock(&trace_types_lock);
8449 ret = __remove_instance(tr);
8451 mutex_unlock(&trace_types_lock);
8452 mutex_unlock(&event_mutex);
8456 EXPORT_SYMBOL_GPL(trace_array_destroy);
8458 static int instance_rmdir(const char *name)
8460 struct trace_array *tr;
8463 mutex_lock(&event_mutex);
8464 mutex_lock(&trace_types_lock);
8467 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8468 if (tr->name && strcmp(tr->name, name) == 0) {
8469 ret = __remove_instance(tr);
8474 mutex_unlock(&trace_types_lock);
8475 mutex_unlock(&event_mutex);
8480 static __init void create_trace_instances(struct dentry *d_tracer)
8482 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
8485 if (WARN_ON(!trace_instance_dir))
8490 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
8492 struct trace_event_file *file;
8495 trace_create_file("available_tracers", 0444, d_tracer,
8496 tr, &show_traces_fops);
8498 trace_create_file("current_tracer", 0644, d_tracer,
8499 tr, &set_tracer_fops);
8501 trace_create_file("tracing_cpumask", 0644, d_tracer,
8502 tr, &tracing_cpumask_fops);
8504 trace_create_file("trace_options", 0644, d_tracer,
8505 tr, &tracing_iter_fops);
8507 trace_create_file("trace", 0644, d_tracer,
8510 trace_create_file("trace_pipe", 0444, d_tracer,
8511 tr, &tracing_pipe_fops);
8513 trace_create_file("buffer_size_kb", 0644, d_tracer,
8514 tr, &tracing_entries_fops);
8516 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
8517 tr, &tracing_total_entries_fops);
8519 trace_create_file("free_buffer", 0200, d_tracer,
8520 tr, &tracing_free_buffer_fops);
8522 trace_create_file("trace_marker", 0220, d_tracer,
8523 tr, &tracing_mark_fops);
8525 file = __find_event_file(tr, "ftrace", "print");
8526 if (file && file->dir)
8527 trace_create_file("trigger", 0644, file->dir, file,
8528 &event_trigger_fops);
8529 tr->trace_marker_file = file;
8531 trace_create_file("trace_marker_raw", 0220, d_tracer,
8532 tr, &tracing_mark_raw_fops);
8534 trace_create_file("trace_clock", 0644, d_tracer, tr,
8537 trace_create_file("tracing_on", 0644, d_tracer,
8538 tr, &rb_simple_fops);
8540 trace_create_file("timestamp_mode", 0444, d_tracer, tr,
8541 &trace_time_stamp_mode_fops);
8543 tr->buffer_percent = 50;
8545 trace_create_file("buffer_percent", 0444, d_tracer,
8546 tr, &buffer_percent_fops);
8548 create_trace_options_dir(tr);
8550 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
8551 trace_create_file("tracing_max_latency", 0644, d_tracer,
8552 &tr->max_latency, &tracing_max_lat_fops);
8555 if (ftrace_create_function_files(tr, d_tracer))
8556 WARN(1, "Could not allocate function filter files");
8558 #ifdef CONFIG_TRACER_SNAPSHOT
8559 trace_create_file("snapshot", 0644, d_tracer,
8560 tr, &snapshot_fops);
8563 trace_create_file("error_log", 0644, d_tracer,
8564 tr, &tracing_err_log_fops);
8566 for_each_tracing_cpu(cpu)
8567 tracing_init_tracefs_percpu(tr, cpu);
8569 ftrace_init_tracefs(tr, d_tracer);
8572 static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
8574 struct vfsmount *mnt;
8575 struct file_system_type *type;
8578 * To maintain backward compatibility for tools that mount
8579 * debugfs to get to the tracing facility, tracefs is automatically
8580 * mounted to the debugfs/tracing directory.
8582 type = get_fs_type("tracefs");
8585 mnt = vfs_submount(mntpt, type, "tracefs", NULL);
8586 put_filesystem(type);
8595 * tracing_init_dentry - initialize top level trace array
8597 * This is called when creating files or directories in the tracing
8598 * directory. It is called via fs_initcall() by any of the boot up code
8599 * and expects to return the dentry of the top level tracing directory.
8601 struct dentry *tracing_init_dentry(void)
8603 struct trace_array *tr = &global_trace;
8605 /* The top level trace array uses NULL as parent */
8609 if (WARN_ON(!tracefs_initialized()) ||
8610 (IS_ENABLED(CONFIG_DEBUG_FS) &&
8611 WARN_ON(!debugfs_initialized())))
8612 return ERR_PTR(-ENODEV);
8615 * As there may still be users that expect the tracing
8616 * files to exist in debugfs/tracing, we must automount
8617 * the tracefs file system there, so older tools still
8618 * work with the newer kerenl.
8620 tr->dir = debugfs_create_automount("tracing", NULL,
8621 trace_automount, NULL);
8626 extern struct trace_eval_map *__start_ftrace_eval_maps[];
8627 extern struct trace_eval_map *__stop_ftrace_eval_maps[];
8629 static void __init trace_eval_init(void)
8633 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
8634 trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
8637 #ifdef CONFIG_MODULES
8638 static void trace_module_add_evals(struct module *mod)
8640 if (!mod->num_trace_evals)
8644 * Modules with bad taint do not have events created, do
8645 * not bother with enums either.
8647 if (trace_module_has_bad_taint(mod))
8650 trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
8653 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
8654 static void trace_module_remove_evals(struct module *mod)
8656 union trace_eval_map_item *map;
8657 union trace_eval_map_item **last = &trace_eval_maps;
8659 if (!mod->num_trace_evals)
8662 mutex_lock(&trace_eval_mutex);
8664 map = trace_eval_maps;
8667 if (map->head.mod == mod)
8669 map = trace_eval_jmp_to_tail(map);
8670 last = &map->tail.next;
8671 map = map->tail.next;
8676 *last = trace_eval_jmp_to_tail(map)->tail.next;
8679 mutex_unlock(&trace_eval_mutex);
8682 static inline void trace_module_remove_evals(struct module *mod) { }
8683 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
8685 static int trace_module_notify(struct notifier_block *self,
8686 unsigned long val, void *data)
8688 struct module *mod = data;
8691 case MODULE_STATE_COMING:
8692 trace_module_add_evals(mod);
8694 case MODULE_STATE_GOING:
8695 trace_module_remove_evals(mod);
8702 static struct notifier_block trace_module_nb = {
8703 .notifier_call = trace_module_notify,
8706 #endif /* CONFIG_MODULES */
8708 static __init int tracer_init_tracefs(void)
8710 struct dentry *d_tracer;
8712 trace_access_lock_init();
8714 d_tracer = tracing_init_dentry();
8715 if (IS_ERR(d_tracer))
8720 init_tracer_tracefs(&global_trace, d_tracer);
8721 ftrace_init_tracefs_toplevel(&global_trace, d_tracer);
8723 trace_create_file("tracing_thresh", 0644, d_tracer,
8724 &global_trace, &tracing_thresh_fops);
8726 trace_create_file("README", 0444, d_tracer,
8727 NULL, &tracing_readme_fops);
8729 trace_create_file("saved_cmdlines", 0444, d_tracer,
8730 NULL, &tracing_saved_cmdlines_fops);
8732 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
8733 NULL, &tracing_saved_cmdlines_size_fops);
8735 trace_create_file("saved_tgids", 0444, d_tracer,
8736 NULL, &tracing_saved_tgids_fops);
8740 trace_create_eval_file(d_tracer);
8742 #ifdef CONFIG_MODULES
8743 register_module_notifier(&trace_module_nb);
8746 #ifdef CONFIG_DYNAMIC_FTRACE
8747 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
8748 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
8751 create_trace_instances(d_tracer);
8753 update_tracer_options(&global_trace);
8758 static int trace_panic_handler(struct notifier_block *this,
8759 unsigned long event, void *unused)
8761 if (ftrace_dump_on_oops)
8762 ftrace_dump(ftrace_dump_on_oops);
8766 static struct notifier_block trace_panic_notifier = {
8767 .notifier_call = trace_panic_handler,
8769 .priority = 150 /* priority: INT_MAX >= x >= 0 */
8772 static int trace_die_handler(struct notifier_block *self,
8778 if (ftrace_dump_on_oops)
8779 ftrace_dump(ftrace_dump_on_oops);
8787 static struct notifier_block trace_die_notifier = {
8788 .notifier_call = trace_die_handler,
8793 * printk is set to max of 1024, we really don't need it that big.
8794 * Nothing should be printing 1000 characters anyway.
8796 #define TRACE_MAX_PRINT 1000
8799 * Define here KERN_TRACE so that we have one place to modify
8800 * it if we decide to change what log level the ftrace dump
8803 #define KERN_TRACE KERN_EMERG
8806 trace_printk_seq(struct trace_seq *s)
8808 /* Probably should print a warning here. */
8809 if (s->seq.len >= TRACE_MAX_PRINT)
8810 s->seq.len = TRACE_MAX_PRINT;
8813 * More paranoid code. Although the buffer size is set to
8814 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
8815 * an extra layer of protection.
8817 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
8818 s->seq.len = s->seq.size - 1;
8820 /* should be zero ended, but we are paranoid. */
8821 s->buffer[s->seq.len] = 0;
8823 printk(KERN_TRACE "%s", s->buffer);
8828 void trace_init_global_iter(struct trace_iterator *iter)
8830 iter->tr = &global_trace;
8831 iter->trace = iter->tr->current_trace;
8832 iter->cpu_file = RING_BUFFER_ALL_CPUS;
8833 iter->trace_buffer = &global_trace.trace_buffer;
8835 if (iter->trace && iter->trace->open)
8836 iter->trace->open(iter);
8838 /* Annotate start of buffers if we had overruns */
8839 if (ring_buffer_overruns(iter->trace_buffer->buffer))
8840 iter->iter_flags |= TRACE_FILE_ANNOTATE;
8842 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
8843 if (trace_clocks[iter->tr->clock_id].in_ns)
8844 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
8847 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
8849 /* use static because iter can be a bit big for the stack */
8850 static struct trace_iterator iter;
8851 static atomic_t dump_running;
8852 struct trace_array *tr = &global_trace;
8853 unsigned int old_userobj;
8854 unsigned long flags;
8857 /* Only allow one dump user at a time. */
8858 if (atomic_inc_return(&dump_running) != 1) {
8859 atomic_dec(&dump_running);
8864 * Always turn off tracing when we dump.
8865 * We don't need to show trace output of what happens
8866 * between multiple crashes.
8868 * If the user does a sysrq-z, then they can re-enable
8869 * tracing with echo 1 > tracing_on.
8873 local_irq_save(flags);
8874 printk_nmi_direct_enter();
8876 /* Simulate the iterator */
8877 trace_init_global_iter(&iter);
8879 for_each_tracing_cpu(cpu) {
8880 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
8883 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
8885 /* don't look at user memory in panic mode */
8886 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
8888 switch (oops_dump_mode) {
8890 iter.cpu_file = RING_BUFFER_ALL_CPUS;
8893 iter.cpu_file = raw_smp_processor_id();
8898 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
8899 iter.cpu_file = RING_BUFFER_ALL_CPUS;
8902 printk(KERN_TRACE "Dumping ftrace buffer:\n");
8904 /* Did function tracer already get disabled? */
8905 if (ftrace_is_dead()) {
8906 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
8907 printk("# MAY BE MISSING FUNCTION EVENTS\n");
8911 * We need to stop all tracing on all CPUS to read the
8912 * the next buffer. This is a bit expensive, but is
8913 * not done often. We fill all what we can read,
8914 * and then release the locks again.
8917 while (!trace_empty(&iter)) {
8920 printk(KERN_TRACE "---------------------------------\n");
8924 trace_iterator_reset(&iter);
8925 iter.iter_flags |= TRACE_FILE_LAT_FMT;
8927 if (trace_find_next_entry_inc(&iter) != NULL) {
8930 ret = print_trace_line(&iter);
8931 if (ret != TRACE_TYPE_NO_CONSUME)
8932 trace_consume(&iter);
8934 touch_nmi_watchdog();
8936 trace_printk_seq(&iter.seq);
8940 printk(KERN_TRACE " (ftrace buffer empty)\n");
8942 printk(KERN_TRACE "---------------------------------\n");
8945 tr->trace_flags |= old_userobj;
8947 for_each_tracing_cpu(cpu) {
8948 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
8950 atomic_dec(&dump_running);
8951 printk_nmi_direct_exit();
8952 local_irq_restore(flags);
8954 EXPORT_SYMBOL_GPL(ftrace_dump);
8956 int trace_run_command(const char *buf, int (*createfn)(int, char **))
8963 argv = argv_split(GFP_KERNEL, buf, &argc);
8968 ret = createfn(argc, argv);
8975 #define WRITE_BUFSIZE 4096
8977 ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
8978 size_t count, loff_t *ppos,
8979 int (*createfn)(int, char **))
8981 char *kbuf, *buf, *tmp;
8986 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
8990 while (done < count) {
8991 size = count - done;
8993 if (size >= WRITE_BUFSIZE)
8994 size = WRITE_BUFSIZE - 1;
8996 if (copy_from_user(kbuf, buffer + done, size)) {
9003 tmp = strchr(buf, '\n');
9006 size = tmp - buf + 1;
9009 if (done + size < count) {
9012 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
9013 pr_warn("Line length is too long: Should be less than %d\n",
9021 /* Remove comments */
9022 tmp = strchr(buf, '#');
9027 ret = trace_run_command(buf, createfn);
9032 } while (done < count);
9042 __init static int tracer_alloc_buffers(void)
9048 * Make sure we don't accidently add more trace options
9049 * than we have bits for.
9051 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
9053 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
9056 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
9057 goto out_free_buffer_mask;
9059 /* Only allocate trace_printk buffers if a trace_printk exists */
9060 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
9061 /* Must be called before global_trace.buffer is allocated */
9062 trace_printk_init_buffers();
9064 /* To save memory, keep the ring buffer size to its minimum */
9065 if (ring_buffer_expanded)
9066 ring_buf_size = trace_buf_size;
9070 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
9071 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
9073 raw_spin_lock_init(&global_trace.start_lock);
9076 * The prepare callbacks allocates some memory for the ring buffer. We
9077 * don't free the buffer if the if the CPU goes down. If we were to free
9078 * the buffer, then the user would lose any trace that was in the
9079 * buffer. The memory will be removed once the "instance" is removed.
9081 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
9082 "trace/RB:preapre", trace_rb_cpu_prepare,
9085 goto out_free_cpumask;
9086 /* Used for event triggers */
9088 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
9090 goto out_rm_hp_state;
9092 if (trace_create_savedcmd() < 0)
9093 goto out_free_temp_buffer;
9095 /* TODO: make the number of buffers hot pluggable with CPUS */
9096 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
9097 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
9099 goto out_free_savedcmd;
9102 if (global_trace.buffer_disabled)
9105 if (trace_boot_clock) {
9106 ret = tracing_set_clock(&global_trace, trace_boot_clock);
9108 pr_warn("Trace clock %s not defined, going back to default\n",
9113 * register_tracer() might reference current_trace, so it
9114 * needs to be set before we register anything. This is
9115 * just a bootstrap of current_trace anyway.
9117 global_trace.current_trace = &nop_trace;
9119 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
9121 ftrace_init_global_array_ops(&global_trace);
9123 init_trace_flags_index(&global_trace);
9125 register_tracer(&nop_trace);
9127 /* Function tracing may start here (via kernel command line) */
9128 init_function_trace();
9130 /* All seems OK, enable tracing */
9131 tracing_disabled = 0;
9133 atomic_notifier_chain_register(&panic_notifier_list,
9134 &trace_panic_notifier);
9136 register_die_notifier(&trace_die_notifier);
9138 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
9140 INIT_LIST_HEAD(&global_trace.systems);
9141 INIT_LIST_HEAD(&global_trace.events);
9142 INIT_LIST_HEAD(&global_trace.hist_vars);
9143 INIT_LIST_HEAD(&global_trace.err_log);
9144 list_add(&global_trace.list, &ftrace_trace_arrays);
9146 apply_trace_boot_options();
9148 register_snapshot_cmd();
9153 free_saved_cmdlines_buffer(savedcmd);
9154 out_free_temp_buffer:
9155 ring_buffer_free(temp_buffer);
9157 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
9159 free_cpumask_var(global_trace.tracing_cpumask);
9160 out_free_buffer_mask:
9161 free_cpumask_var(tracing_buffer_mask);
9166 void __init early_trace_init(void)
9168 if (tracepoint_printk) {
9169 tracepoint_print_iter =
9170 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
9171 if (WARN_ON(!tracepoint_print_iter))
9172 tracepoint_printk = 0;
9174 static_key_enable(&tracepoint_printk_key.key);
9176 tracer_alloc_buffers();
9179 void __init trace_init(void)
9184 __init static int clear_boot_tracer(void)
9187 * The default tracer at boot buffer is an init section.
9188 * This function is called in lateinit. If we did not
9189 * find the boot tracer, then clear it out, to prevent
9190 * later registration from accessing the buffer that is
9191 * about to be freed.
9193 if (!default_bootup_tracer)
9196 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
9197 default_bootup_tracer);
9198 default_bootup_tracer = NULL;
9203 fs_initcall(tracer_init_tracefs);
9204 late_initcall_sync(clear_boot_tracer);
9206 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
9207 __init static int tracing_set_default_clock(void)
9209 /* sched_clock_stable() is determined in late_initcall */
9210 if (!trace_boot_clock && !sched_clock_stable()) {
9212 "Unstable clock detected, switching default tracing clock to \"global\"\n"
9213 "If you want to keep using the local clock, then add:\n"
9214 " \"trace_clock=local\"\n"
9215 "on the kernel command line\n");
9216 tracing_set_clock(&global_trace, "global");
9221 late_initcall_sync(tracing_set_default_clock);