1 // SPDX-License-Identifier: GPL-2.0
3 * ring buffer based function tracer
5 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
8 * Originally taken from the RT patch by:
9 * Arnaldo Carvalho de Melo <acme@redhat.com>
11 * Based on code from the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 Nadia Yvette Chambers
15 #include <linux/ring_buffer.h>
16 #include <generated/utsrelease.h>
17 #include <linux/stacktrace.h>
18 #include <linux/writeback.h>
19 #include <linux/kallsyms.h>
20 #include <linux/security.h>
21 #include <linux/seq_file.h>
22 #include <linux/notifier.h>
23 #include <linux/irqflags.h>
24 #include <linux/debugfs.h>
25 #include <linux/tracefs.h>
26 #include <linux/pagemap.h>
27 #include <linux/hardirq.h>
28 #include <linux/linkage.h>
29 #include <linux/uaccess.h>
30 #include <linux/vmalloc.h>
31 #include <linux/ftrace.h>
32 #include <linux/module.h>
33 #include <linux/percpu.h>
34 #include <linux/splice.h>
35 #include <linux/kdebug.h>
36 #include <linux/string.h>
37 #include <linux/mount.h>
38 #include <linux/rwsem.h>
39 #include <linux/slab.h>
40 #include <linux/ctype.h>
41 #include <linux/init.h>
42 #include <linux/poll.h>
43 #include <linux/nmi.h>
45 #include <linux/trace.h>
46 #include <linux/sched/clock.h>
47 #include <linux/sched/rt.h>
50 #include "trace_output.h"
53 * On boot up, the ring buffer is set to the minimum size, so that
54 * we do not waste memory on systems that are not using tracing.
56 bool ring_buffer_expanded;
59 * We need to change this state when a selftest is running.
60 * A selftest will lurk into the ring-buffer to count the
61 * entries inserted during the selftest although some concurrent
62 * insertions into the ring-buffer such as trace_printk could occurred
63 * at the same time, giving false positive or negative results.
65 static bool __read_mostly tracing_selftest_running;
68 * If a tracer is running, we do not want to run SELFTEST.
70 bool __read_mostly tracing_selftest_disabled;
72 /* Pipe tracepoints to printk */
73 struct trace_iterator *tracepoint_print_iter;
74 int tracepoint_printk;
75 static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
77 /* For tracers that don't implement custom flags */
78 static struct tracer_opt dummy_tracer_opt[] = {
83 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
89 * To prevent the comm cache from being overwritten when no
90 * tracing is active, only save the comm when a trace event
93 static DEFINE_PER_CPU(bool, trace_taskinfo_save);
96 * Kill all tracing for good (never come back).
97 * It is initialized to 1 but will turn to zero if the initialization
98 * of the tracer is successful. But that is the only place that sets
101 static int tracing_disabled = 1;
103 cpumask_var_t __read_mostly tracing_buffer_mask;
106 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
108 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
109 * is set, then ftrace_dump is called. This will output the contents
110 * of the ftrace buffers to the console. This is very useful for
111 * capturing traces that lead to crashes and outputing it to a
114 * It is default off, but you can enable it with either specifying
115 * "ftrace_dump_on_oops" in the kernel command line, or setting
116 * /proc/sys/kernel/ftrace_dump_on_oops
117 * Set 1 if you want to dump buffers of all CPUs
118 * Set 2 if you want to dump the buffer of the CPU that triggered oops
121 enum ftrace_dump_mode ftrace_dump_on_oops;
123 /* When set, tracing will stop when a WARN*() is hit */
124 int __disable_trace_on_warning;
126 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
127 /* Map of enums to their values, for "eval_map" file */
128 struct trace_eval_map_head {
130 unsigned long length;
133 union trace_eval_map_item;
135 struct trace_eval_map_tail {
137 * "end" is first and points to NULL as it must be different
138 * than "mod" or "eval_string"
140 union trace_eval_map_item *next;
141 const char *end; /* points to NULL */
144 static DEFINE_MUTEX(trace_eval_mutex);
147 * The trace_eval_maps are saved in an array with two extra elements,
148 * one at the beginning, and one at the end. The beginning item contains
149 * the count of the saved maps (head.length), and the module they
150 * belong to if not built in (head.mod). The ending item contains a
151 * pointer to the next array of saved eval_map items.
153 union trace_eval_map_item {
154 struct trace_eval_map map;
155 struct trace_eval_map_head head;
156 struct trace_eval_map_tail tail;
159 static union trace_eval_map_item *trace_eval_maps;
160 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
162 static int tracing_set_tracer(struct trace_array *tr, const char *buf);
163 static void ftrace_trace_userstack(struct ring_buffer *buffer,
164 unsigned long flags, int pc);
166 #define MAX_TRACER_SIZE 100
167 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
168 static char *default_bootup_tracer;
170 static bool allocate_snapshot;
172 static int __init set_cmdline_ftrace(char *str)
174 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
175 default_bootup_tracer = bootup_tracer_buf;
176 /* We are using ftrace early, expand it */
177 ring_buffer_expanded = true;
180 __setup("ftrace=", set_cmdline_ftrace);
182 static int __init set_ftrace_dump_on_oops(char *str)
184 if (*str++ != '=' || !*str) {
185 ftrace_dump_on_oops = DUMP_ALL;
189 if (!strcmp("orig_cpu", str)) {
190 ftrace_dump_on_oops = DUMP_ORIG;
196 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
198 static int __init stop_trace_on_warning(char *str)
200 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
201 __disable_trace_on_warning = 1;
204 __setup("traceoff_on_warning", stop_trace_on_warning);
206 static int __init boot_alloc_snapshot(char *str)
208 allocate_snapshot = true;
209 /* We also need the main ring buffer expanded */
210 ring_buffer_expanded = true;
213 __setup("alloc_snapshot", boot_alloc_snapshot);
216 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
218 static int __init set_trace_boot_options(char *str)
220 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
223 __setup("trace_options=", set_trace_boot_options);
225 static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
226 static char *trace_boot_clock __initdata;
228 static int __init set_trace_boot_clock(char *str)
230 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
231 trace_boot_clock = trace_boot_clock_buf;
234 __setup("trace_clock=", set_trace_boot_clock);
236 static int __init set_tracepoint_printk(char *str)
238 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
239 tracepoint_printk = 1;
242 __setup("tp_printk", set_tracepoint_printk);
244 unsigned long long ns2usecs(u64 nsec)
251 /* trace_flags holds trace_options default values */
252 #define TRACE_DEFAULT_FLAGS \
253 (FUNCTION_DEFAULT_FLAGS | \
254 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
255 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
256 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
257 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
259 /* trace_options that are only supported by global_trace */
260 #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
261 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
263 /* trace_flags that are default zero for instances */
264 #define ZEROED_TRACE_FLAGS \
265 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
268 * The global_trace is the descriptor that holds the top-level tracing
269 * buffers for the live tracing.
271 static struct trace_array global_trace = {
272 .trace_flags = TRACE_DEFAULT_FLAGS,
275 LIST_HEAD(ftrace_trace_arrays);
277 int trace_array_get(struct trace_array *this_tr)
279 struct trace_array *tr;
282 mutex_lock(&trace_types_lock);
283 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
290 mutex_unlock(&trace_types_lock);
295 static void __trace_array_put(struct trace_array *this_tr)
297 WARN_ON(!this_tr->ref);
301 void trace_array_put(struct trace_array *this_tr)
303 mutex_lock(&trace_types_lock);
304 __trace_array_put(this_tr);
305 mutex_unlock(&trace_types_lock);
308 int tracing_check_open_get_tr(struct trace_array *tr)
312 ret = security_locked_down(LOCKDOWN_TRACEFS);
316 if (tracing_disabled)
319 if (tr && trace_array_get(tr) < 0)
325 int call_filter_check_discard(struct trace_event_call *call, void *rec,
326 struct ring_buffer *buffer,
327 struct ring_buffer_event *event)
329 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
330 !filter_match_preds(call->filter, rec)) {
331 __trace_event_discard_commit(buffer, event);
338 void trace_free_pid_list(struct trace_pid_list *pid_list)
340 vfree(pid_list->pids);
345 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
346 * @filtered_pids: The list of pids to check
347 * @search_pid: The PID to find in @filtered_pids
349 * Returns true if @search_pid is fonud in @filtered_pids, and false otherwis.
352 trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
355 * If pid_max changed after filtered_pids was created, we
356 * by default ignore all pids greater than the previous pid_max.
358 if (search_pid >= filtered_pids->pid_max)
361 return test_bit(search_pid, filtered_pids->pids);
365 * trace_ignore_this_task - should a task be ignored for tracing
366 * @filtered_pids: The list of pids to check
367 * @task: The task that should be ignored if not filtered
369 * Checks if @task should be traced or not from @filtered_pids.
370 * Returns true if @task should *NOT* be traced.
371 * Returns false if @task should be traced.
374 trace_ignore_this_task(struct trace_pid_list *filtered_pids, struct task_struct *task)
377 * Return false, because if filtered_pids does not exist,
378 * all pids are good to trace.
383 return !trace_find_filtered_pid(filtered_pids, task->pid);
387 * trace_filter_add_remove_task - Add or remove a task from a pid_list
388 * @pid_list: The list to modify
389 * @self: The current task for fork or NULL for exit
390 * @task: The task to add or remove
392 * If adding a task, if @self is defined, the task is only added if @self
393 * is also included in @pid_list. This happens on fork and tasks should
394 * only be added when the parent is listed. If @self is NULL, then the
395 * @task pid will be removed from the list, which would happen on exit
398 void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
399 struct task_struct *self,
400 struct task_struct *task)
405 /* For forks, we only add if the forking task is listed */
407 if (!trace_find_filtered_pid(pid_list, self->pid))
411 /* Sorry, but we don't support pid_max changing after setting */
412 if (task->pid >= pid_list->pid_max)
415 /* "self" is set for forks, and NULL for exits */
417 set_bit(task->pid, pid_list->pids);
419 clear_bit(task->pid, pid_list->pids);
423 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
424 * @pid_list: The pid list to show
425 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
426 * @pos: The position of the file
428 * This is used by the seq_file "next" operation to iterate the pids
429 * listed in a trace_pid_list structure.
431 * Returns the pid+1 as we want to display pid of zero, but NULL would
432 * stop the iteration.
434 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
436 unsigned long pid = (unsigned long)v;
440 /* pid already is +1 of the actual prevous bit */
441 pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid);
443 /* Return pid + 1 to allow zero to be represented */
444 if (pid < pid_list->pid_max)
445 return (void *)(pid + 1);
451 * trace_pid_start - Used for seq_file to start reading pid lists
452 * @pid_list: The pid list to show
453 * @pos: The position of the file
455 * This is used by seq_file "start" operation to start the iteration
458 * Returns the pid+1 as we want to display pid of zero, but NULL would
459 * stop the iteration.
461 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
466 pid = find_first_bit(pid_list->pids, pid_list->pid_max);
467 if (pid >= pid_list->pid_max)
470 /* Return pid + 1 so that zero can be the exit value */
471 for (pid++; pid && l < *pos;
472 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
478 * trace_pid_show - show the current pid in seq_file processing
479 * @m: The seq_file structure to write into
480 * @v: A void pointer of the pid (+1) value to display
482 * Can be directly used by seq_file operations to display the current
485 int trace_pid_show(struct seq_file *m, void *v)
487 unsigned long pid = (unsigned long)v - 1;
489 seq_printf(m, "%lu\n", pid);
493 /* 128 should be much more than enough */
494 #define PID_BUF_SIZE 127
496 int trace_pid_write(struct trace_pid_list *filtered_pids,
497 struct trace_pid_list **new_pid_list,
498 const char __user *ubuf, size_t cnt)
500 struct trace_pid_list *pid_list;
501 struct trace_parser parser;
509 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
513 * Always recreate a new array. The write is an all or nothing
514 * operation. Always create a new array when adding new pids by
515 * the user. If the operation fails, then the current list is
518 pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
520 trace_parser_put(&parser);
524 pid_list->pid_max = READ_ONCE(pid_max);
526 /* Only truncating will shrink pid_max */
527 if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max)
528 pid_list->pid_max = filtered_pids->pid_max;
530 pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
531 if (!pid_list->pids) {
532 trace_parser_put(&parser);
538 /* copy the current bits to the new max */
539 for_each_set_bit(pid, filtered_pids->pids,
540 filtered_pids->pid_max) {
541 set_bit(pid, pid_list->pids);
550 ret = trace_get_user(&parser, ubuf, cnt, &pos);
551 if (ret < 0 || !trace_parser_loaded(&parser))
559 if (kstrtoul(parser.buffer, 0, &val))
561 if (val >= pid_list->pid_max)
566 set_bit(pid, pid_list->pids);
569 trace_parser_clear(&parser);
572 trace_parser_put(&parser);
575 trace_free_pid_list(pid_list);
580 /* Cleared the list of pids */
581 trace_free_pid_list(pid_list);
586 *new_pid_list = pid_list;
591 static u64 buffer_ftrace_now(struct trace_buffer *buf, int cpu)
595 /* Early boot up does not have a buffer yet */
597 return trace_clock_local();
599 ts = ring_buffer_time_stamp(buf->buffer, cpu);
600 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
605 u64 ftrace_now(int cpu)
607 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
611 * tracing_is_enabled - Show if global_trace has been disabled
613 * Shows if the global trace has been enabled or not. It uses the
614 * mirror flag "buffer_disabled" to be used in fast paths such as for
615 * the irqsoff tracer. But it may be inaccurate due to races. If you
616 * need to know the accurate state, use tracing_is_on() which is a little
617 * slower, but accurate.
619 int tracing_is_enabled(void)
622 * For quick access (irqsoff uses this in fast path), just
623 * return the mirror variable of the state of the ring buffer.
624 * It's a little racy, but we don't really care.
627 return !global_trace.buffer_disabled;
631 * trace_buf_size is the size in bytes that is allocated
632 * for a buffer. Note, the number of bytes is always rounded
635 * This number is purposely set to a low number of 16384.
636 * If the dump on oops happens, it will be much appreciated
637 * to not have to wait for all that output. Anyway this can be
638 * boot time and run time configurable.
640 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
642 static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
644 /* trace_types holds a link list of available tracers. */
645 static struct tracer *trace_types __read_mostly;
648 * trace_types_lock is used to protect the trace_types list.
650 DEFINE_MUTEX(trace_types_lock);
653 * serialize the access of the ring buffer
655 * ring buffer serializes readers, but it is low level protection.
656 * The validity of the events (which returns by ring_buffer_peek() ..etc)
657 * are not protected by ring buffer.
659 * The content of events may become garbage if we allow other process consumes
660 * these events concurrently:
661 * A) the page of the consumed events may become a normal page
662 * (not reader page) in ring buffer, and this page will be rewrited
663 * by events producer.
664 * B) The page of the consumed events may become a page for splice_read,
665 * and this page will be returned to system.
667 * These primitives allow multi process access to different cpu ring buffer
670 * These primitives don't distinguish read-only and read-consume access.
671 * Multi read-only access are also serialized.
675 static DECLARE_RWSEM(all_cpu_access_lock);
676 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
678 static inline void trace_access_lock(int cpu)
680 if (cpu == RING_BUFFER_ALL_CPUS) {
681 /* gain it for accessing the whole ring buffer. */
682 down_write(&all_cpu_access_lock);
684 /* gain it for accessing a cpu ring buffer. */
686 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
687 down_read(&all_cpu_access_lock);
689 /* Secondly block other access to this @cpu ring buffer. */
690 mutex_lock(&per_cpu(cpu_access_lock, cpu));
694 static inline void trace_access_unlock(int cpu)
696 if (cpu == RING_BUFFER_ALL_CPUS) {
697 up_write(&all_cpu_access_lock);
699 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
700 up_read(&all_cpu_access_lock);
704 static inline void trace_access_lock_init(void)
708 for_each_possible_cpu(cpu)
709 mutex_init(&per_cpu(cpu_access_lock, cpu));
714 static DEFINE_MUTEX(access_lock);
716 static inline void trace_access_lock(int cpu)
719 mutex_lock(&access_lock);
722 static inline void trace_access_unlock(int cpu)
725 mutex_unlock(&access_lock);
728 static inline void trace_access_lock_init(void)
734 #ifdef CONFIG_STACKTRACE
735 static void __ftrace_trace_stack(struct ring_buffer *buffer,
737 int skip, int pc, struct pt_regs *regs);
738 static inline void ftrace_trace_stack(struct trace_array *tr,
739 struct ring_buffer *buffer,
741 int skip, int pc, struct pt_regs *regs);
744 static inline void __ftrace_trace_stack(struct ring_buffer *buffer,
746 int skip, int pc, struct pt_regs *regs)
749 static inline void ftrace_trace_stack(struct trace_array *tr,
750 struct ring_buffer *buffer,
752 int skip, int pc, struct pt_regs *regs)
758 static __always_inline void
759 trace_event_setup(struct ring_buffer_event *event,
760 int type, unsigned long flags, int pc)
762 struct trace_entry *ent = ring_buffer_event_data(event);
764 tracing_generic_entry_update(ent, type, flags, pc);
767 static __always_inline struct ring_buffer_event *
768 __trace_buffer_lock_reserve(struct ring_buffer *buffer,
771 unsigned long flags, int pc)
773 struct ring_buffer_event *event;
775 event = ring_buffer_lock_reserve(buffer, len);
777 trace_event_setup(event, type, flags, pc);
782 void tracer_tracing_on(struct trace_array *tr)
784 if (tr->trace_buffer.buffer)
785 ring_buffer_record_on(tr->trace_buffer.buffer);
787 * This flag is looked at when buffers haven't been allocated
788 * yet, or by some tracers (like irqsoff), that just want to
789 * know if the ring buffer has been disabled, but it can handle
790 * races of where it gets disabled but we still do a record.
791 * As the check is in the fast path of the tracers, it is more
792 * important to be fast than accurate.
794 tr->buffer_disabled = 0;
795 /* Make the flag seen by readers */
800 * tracing_on - enable tracing buffers
802 * This function enables tracing buffers that may have been
803 * disabled with tracing_off.
805 void tracing_on(void)
807 tracer_tracing_on(&global_trace);
809 EXPORT_SYMBOL_GPL(tracing_on);
812 static __always_inline void
813 __buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
815 __this_cpu_write(trace_taskinfo_save, true);
817 /* If this is the temp buffer, we need to commit fully */
818 if (this_cpu_read(trace_buffered_event) == event) {
819 /* Length is in event->array[0] */
820 ring_buffer_write(buffer, event->array[0], &event->array[1]);
821 /* Release the temp buffer */
822 this_cpu_dec(trace_buffered_event_cnt);
824 ring_buffer_unlock_commit(buffer, event);
828 * __trace_puts - write a constant string into the trace buffer.
829 * @ip: The address of the caller
830 * @str: The constant string to write
831 * @size: The size of the string.
833 int __trace_puts(unsigned long ip, const char *str, int size)
835 struct ring_buffer_event *event;
836 struct ring_buffer *buffer;
837 struct print_entry *entry;
838 unsigned long irq_flags;
842 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
845 pc = preempt_count();
847 if (unlikely(tracing_selftest_running || tracing_disabled))
850 alloc = sizeof(*entry) + size + 2; /* possible \n added */
852 local_save_flags(irq_flags);
853 buffer = global_trace.trace_buffer.buffer;
854 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
859 entry = ring_buffer_event_data(event);
862 memcpy(&entry->buf, str, size);
864 /* Add a newline if necessary */
865 if (entry->buf[size - 1] != '\n') {
866 entry->buf[size] = '\n';
867 entry->buf[size + 1] = '\0';
869 entry->buf[size] = '\0';
871 __buffer_unlock_commit(buffer, event);
872 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
876 EXPORT_SYMBOL_GPL(__trace_puts);
879 * __trace_bputs - write the pointer to a constant string into trace buffer
880 * @ip: The address of the caller
881 * @str: The constant string to write to the buffer to
883 int __trace_bputs(unsigned long ip, const char *str)
885 struct ring_buffer_event *event;
886 struct ring_buffer *buffer;
887 struct bputs_entry *entry;
888 unsigned long irq_flags;
889 int size = sizeof(struct bputs_entry);
892 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
895 pc = preempt_count();
897 if (unlikely(tracing_selftest_running || tracing_disabled))
900 local_save_flags(irq_flags);
901 buffer = global_trace.trace_buffer.buffer;
902 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
907 entry = ring_buffer_event_data(event);
911 __buffer_unlock_commit(buffer, event);
912 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
916 EXPORT_SYMBOL_GPL(__trace_bputs);
918 #ifdef CONFIG_TRACER_SNAPSHOT
919 void tracing_snapshot_instance_cond(struct trace_array *tr, void *cond_data)
921 struct tracer *tracer = tr->current_trace;
925 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
926 internal_trace_puts("*** snapshot is being ignored ***\n");
930 if (!tr->allocated_snapshot) {
931 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
932 internal_trace_puts("*** stopping trace here! ***\n");
937 /* Note, snapshot can not be used when the tracer uses it */
938 if (tracer->use_max_tr) {
939 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
940 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
944 local_irq_save(flags);
945 update_max_tr(tr, current, smp_processor_id(), cond_data);
946 local_irq_restore(flags);
949 void tracing_snapshot_instance(struct trace_array *tr)
951 tracing_snapshot_instance_cond(tr, NULL);
955 * tracing_snapshot - take a snapshot of the current buffer.
957 * This causes a swap between the snapshot buffer and the current live
958 * tracing buffer. You can use this to take snapshots of the live
959 * trace when some condition is triggered, but continue to trace.
961 * Note, make sure to allocate the snapshot with either
962 * a tracing_snapshot_alloc(), or by doing it manually
963 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
965 * If the snapshot buffer is not allocated, it will stop tracing.
966 * Basically making a permanent snapshot.
968 void tracing_snapshot(void)
970 struct trace_array *tr = &global_trace;
972 tracing_snapshot_instance(tr);
974 EXPORT_SYMBOL_GPL(tracing_snapshot);
977 * tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
978 * @tr: The tracing instance to snapshot
979 * @cond_data: The data to be tested conditionally, and possibly saved
981 * This is the same as tracing_snapshot() except that the snapshot is
982 * conditional - the snapshot will only happen if the
983 * cond_snapshot.update() implementation receiving the cond_data
984 * returns true, which means that the trace array's cond_snapshot
985 * update() operation used the cond_data to determine whether the
986 * snapshot should be taken, and if it was, presumably saved it along
989 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
991 tracing_snapshot_instance_cond(tr, cond_data);
993 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
996 * tracing_snapshot_cond_data - get the user data associated with a snapshot
997 * @tr: The tracing instance
999 * When the user enables a conditional snapshot using
1000 * tracing_snapshot_cond_enable(), the user-defined cond_data is saved
1001 * with the snapshot. This accessor is used to retrieve it.
1003 * Should not be called from cond_snapshot.update(), since it takes
1004 * the tr->max_lock lock, which the code calling
1005 * cond_snapshot.update() has already done.
1007 * Returns the cond_data associated with the trace array's snapshot.
1009 void *tracing_cond_snapshot_data(struct trace_array *tr)
1011 void *cond_data = NULL;
1013 arch_spin_lock(&tr->max_lock);
1015 if (tr->cond_snapshot)
1016 cond_data = tr->cond_snapshot->cond_data;
1018 arch_spin_unlock(&tr->max_lock);
1022 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1024 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
1025 struct trace_buffer *size_buf, int cpu_id);
1026 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
1028 int tracing_alloc_snapshot_instance(struct trace_array *tr)
1032 if (!tr->allocated_snapshot) {
1034 /* allocate spare buffer */
1035 ret = resize_buffer_duplicate_size(&tr->max_buffer,
1036 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
1040 tr->allocated_snapshot = true;
1046 static void free_snapshot(struct trace_array *tr)
1049 * We don't free the ring buffer. instead, resize it because
1050 * The max_tr ring buffer has some state (e.g. ring->clock) and
1051 * we want preserve it.
1053 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
1054 set_buffer_entries(&tr->max_buffer, 1);
1055 tracing_reset_online_cpus(&tr->max_buffer);
1056 tr->allocated_snapshot = false;
1060 * tracing_alloc_snapshot - allocate snapshot buffer.
1062 * This only allocates the snapshot buffer if it isn't already
1063 * allocated - it doesn't also take a snapshot.
1065 * This is meant to be used in cases where the snapshot buffer needs
1066 * to be set up for events that can't sleep but need to be able to
1067 * trigger a snapshot.
1069 int tracing_alloc_snapshot(void)
1071 struct trace_array *tr = &global_trace;
1074 ret = tracing_alloc_snapshot_instance(tr);
1079 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1082 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
1084 * This is similar to tracing_snapshot(), but it will allocate the
1085 * snapshot buffer if it isn't already allocated. Use this only
1086 * where it is safe to sleep, as the allocation may sleep.
1088 * This causes a swap between the snapshot buffer and the current live
1089 * tracing buffer. You can use this to take snapshots of the live
1090 * trace when some condition is triggered, but continue to trace.
1092 void tracing_snapshot_alloc(void)
1096 ret = tracing_alloc_snapshot();
1102 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1105 * tracing_snapshot_cond_enable - enable conditional snapshot for an instance
1106 * @tr: The tracing instance
1107 * @cond_data: User data to associate with the snapshot
1108 * @update: Implementation of the cond_snapshot update function
1110 * Check whether the conditional snapshot for the given instance has
1111 * already been enabled, or if the current tracer is already using a
1112 * snapshot; if so, return -EBUSY, else create a cond_snapshot and
1113 * save the cond_data and update function inside.
1115 * Returns 0 if successful, error otherwise.
1117 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
1118 cond_update_fn_t update)
1120 struct cond_snapshot *cond_snapshot;
1123 cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
1127 cond_snapshot->cond_data = cond_data;
1128 cond_snapshot->update = update;
1130 mutex_lock(&trace_types_lock);
1132 ret = tracing_alloc_snapshot_instance(tr);
1136 if (tr->current_trace->use_max_tr) {
1142 * The cond_snapshot can only change to NULL without the
1143 * trace_types_lock. We don't care if we race with it going
1144 * to NULL, but we want to make sure that it's not set to
1145 * something other than NULL when we get here, which we can
1146 * do safely with only holding the trace_types_lock and not
1147 * having to take the max_lock.
1149 if (tr->cond_snapshot) {
1154 arch_spin_lock(&tr->max_lock);
1155 tr->cond_snapshot = cond_snapshot;
1156 arch_spin_unlock(&tr->max_lock);
1158 mutex_unlock(&trace_types_lock);
1163 mutex_unlock(&trace_types_lock);
1164 kfree(cond_snapshot);
1167 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1170 * tracing_snapshot_cond_disable - disable conditional snapshot for an instance
1171 * @tr: The tracing instance
1173 * Check whether the conditional snapshot for the given instance is
1174 * enabled; if so, free the cond_snapshot associated with it,
1175 * otherwise return -EINVAL.
1177 * Returns 0 if successful, error otherwise.
1179 int tracing_snapshot_cond_disable(struct trace_array *tr)
1183 arch_spin_lock(&tr->max_lock);
1185 if (!tr->cond_snapshot)
1188 kfree(tr->cond_snapshot);
1189 tr->cond_snapshot = NULL;
1192 arch_spin_unlock(&tr->max_lock);
1196 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1198 void tracing_snapshot(void)
1200 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1202 EXPORT_SYMBOL_GPL(tracing_snapshot);
1203 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1205 WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used");
1207 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1208 int tracing_alloc_snapshot(void)
1210 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1213 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1214 void tracing_snapshot_alloc(void)
1219 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1220 void *tracing_cond_snapshot_data(struct trace_array *tr)
1224 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1225 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update)
1229 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1230 int tracing_snapshot_cond_disable(struct trace_array *tr)
1234 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1235 #endif /* CONFIG_TRACER_SNAPSHOT */
1237 void tracer_tracing_off(struct trace_array *tr)
1239 if (tr->trace_buffer.buffer)
1240 ring_buffer_record_off(tr->trace_buffer.buffer);
1242 * This flag is looked at when buffers haven't been allocated
1243 * yet, or by some tracers (like irqsoff), that just want to
1244 * know if the ring buffer has been disabled, but it can handle
1245 * races of where it gets disabled but we still do a record.
1246 * As the check is in the fast path of the tracers, it is more
1247 * important to be fast than accurate.
1249 tr->buffer_disabled = 1;
1250 /* Make the flag seen by readers */
1255 * tracing_off - turn off tracing buffers
1257 * This function stops the tracing buffers from recording data.
1258 * It does not disable any overhead the tracers themselves may
1259 * be causing. This function simply causes all recording to
1260 * the ring buffers to fail.
1262 void tracing_off(void)
1264 tracer_tracing_off(&global_trace);
1266 EXPORT_SYMBOL_GPL(tracing_off);
1268 void disable_trace_on_warning(void)
1270 if (__disable_trace_on_warning)
1275 * tracer_tracing_is_on - show real state of ring buffer enabled
1276 * @tr : the trace array to know if ring buffer is enabled
1278 * Shows real state of the ring buffer if it is enabled or not.
1280 bool tracer_tracing_is_on(struct trace_array *tr)
1282 if (tr->trace_buffer.buffer)
1283 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
1284 return !tr->buffer_disabled;
1288 * tracing_is_on - show state of ring buffers enabled
1290 int tracing_is_on(void)
1292 return tracer_tracing_is_on(&global_trace);
1294 EXPORT_SYMBOL_GPL(tracing_is_on);
1296 static int __init set_buf_size(char *str)
1298 unsigned long buf_size;
1302 buf_size = memparse(str, &str);
1303 /* nr_entries can not be zero */
1306 trace_buf_size = buf_size;
1309 __setup("trace_buf_size=", set_buf_size);
1311 static int __init set_tracing_thresh(char *str)
1313 unsigned long threshold;
1318 ret = kstrtoul(str, 0, &threshold);
1321 tracing_thresh = threshold * 1000;
1324 __setup("tracing_thresh=", set_tracing_thresh);
1326 unsigned long nsecs_to_usecs(unsigned long nsecs)
1328 return nsecs / 1000;
1332 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
1333 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
1334 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
1335 * of strings in the order that the evals (enum) were defined.
1340 /* These must match the bit postions in trace_iterator_flags */
1341 static const char *trace_options[] = {
1349 int in_ns; /* is this clock in nanoseconds? */
1350 } trace_clocks[] = {
1351 { trace_clock_local, "local", 1 },
1352 { trace_clock_global, "global", 1 },
1353 { trace_clock_counter, "counter", 0 },
1354 { trace_clock_jiffies, "uptime", 0 },
1355 { trace_clock, "perf", 1 },
1356 { ktime_get_mono_fast_ns, "mono", 1 },
1357 { ktime_get_raw_fast_ns, "mono_raw", 1 },
1358 { ktime_get_boot_fast_ns, "boot", 1 },
1362 bool trace_clock_in_ns(struct trace_array *tr)
1364 if (trace_clocks[tr->clock_id].in_ns)
1371 * trace_parser_get_init - gets the buffer for trace parser
1373 int trace_parser_get_init(struct trace_parser *parser, int size)
1375 memset(parser, 0, sizeof(*parser));
1377 parser->buffer = kmalloc(size, GFP_KERNEL);
1378 if (!parser->buffer)
1381 parser->size = size;
1386 * trace_parser_put - frees the buffer for trace parser
1388 void trace_parser_put(struct trace_parser *parser)
1390 kfree(parser->buffer);
1391 parser->buffer = NULL;
1395 * trace_get_user - reads the user input string separated by space
1396 * (matched by isspace(ch))
1398 * For each string found the 'struct trace_parser' is updated,
1399 * and the function returns.
1401 * Returns number of bytes read.
1403 * See kernel/trace/trace.h for 'struct trace_parser' details.
1405 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1406 size_t cnt, loff_t *ppos)
1413 trace_parser_clear(parser);
1415 ret = get_user(ch, ubuf++);
1423 * The parser is not finished with the last write,
1424 * continue reading the user input without skipping spaces.
1426 if (!parser->cont) {
1427 /* skip white space */
1428 while (cnt && isspace(ch)) {
1429 ret = get_user(ch, ubuf++);
1438 /* only spaces were written */
1439 if (isspace(ch) || !ch) {
1446 /* read the non-space input */
1447 while (cnt && !isspace(ch) && ch) {
1448 if (parser->idx < parser->size - 1)
1449 parser->buffer[parser->idx++] = ch;
1454 ret = get_user(ch, ubuf++);
1461 /* We either got finished input or we have to wait for another call. */
1462 if (isspace(ch) || !ch) {
1463 parser->buffer[parser->idx] = 0;
1464 parser->cont = false;
1465 } else if (parser->idx < parser->size - 1) {
1466 parser->cont = true;
1467 parser->buffer[parser->idx++] = ch;
1468 /* Make sure the parsed string always terminates with '\0'. */
1469 parser->buffer[parser->idx] = 0;
1482 /* TODO add a seq_buf_to_buffer() */
1483 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
1487 if (trace_seq_used(s) <= s->seq.readpos)
1490 len = trace_seq_used(s) - s->seq.readpos;
1493 memcpy(buf, s->buffer + s->seq.readpos, cnt);
1495 s->seq.readpos += cnt;
1499 unsigned long __read_mostly tracing_thresh;
1501 #ifdef CONFIG_TRACER_MAX_TRACE
1503 * Copy the new maximum trace into the separate maximum-trace
1504 * structure. (this way the maximum trace is permanently saved,
1505 * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
1508 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1510 struct trace_buffer *trace_buf = &tr->trace_buffer;
1511 struct trace_buffer *max_buf = &tr->max_buffer;
1512 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1513 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
1516 max_buf->time_start = data->preempt_timestamp;
1518 max_data->saved_latency = tr->max_latency;
1519 max_data->critical_start = data->critical_start;
1520 max_data->critical_end = data->critical_end;
1522 strncpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
1523 max_data->pid = tsk->pid;
1525 * If tsk == current, then use current_uid(), as that does not use
1526 * RCU. The irq tracer can be called out of RCU scope.
1529 max_data->uid = current_uid();
1531 max_data->uid = task_uid(tsk);
1533 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1534 max_data->policy = tsk->policy;
1535 max_data->rt_priority = tsk->rt_priority;
1537 /* record this tasks comm */
1538 tracing_record_cmdline(tsk);
1542 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1544 * @tsk: the task with the latency
1545 * @cpu: The cpu that initiated the trace.
1546 * @cond_data: User data associated with a conditional snapshot
1548 * Flip the buffers between the @tr and the max_tr and record information
1549 * about which task was the cause of this latency.
1552 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
1558 WARN_ON_ONCE(!irqs_disabled());
1560 if (!tr->allocated_snapshot) {
1561 /* Only the nop tracer should hit this when disabling */
1562 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1566 arch_spin_lock(&tr->max_lock);
1568 /* Inherit the recordable setting from trace_buffer */
1569 if (ring_buffer_record_is_set_on(tr->trace_buffer.buffer))
1570 ring_buffer_record_on(tr->max_buffer.buffer);
1572 ring_buffer_record_off(tr->max_buffer.buffer);
1574 #ifdef CONFIG_TRACER_SNAPSHOT
1575 if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data))
1578 swap(tr->trace_buffer.buffer, tr->max_buffer.buffer);
1580 __update_max_tr(tr, tsk, cpu);
1583 arch_spin_unlock(&tr->max_lock);
1587 * update_max_tr_single - only copy one trace over, and reset the rest
1589 * @tsk: task with the latency
1590 * @cpu: the cpu of the buffer to copy.
1592 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1595 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1602 WARN_ON_ONCE(!irqs_disabled());
1603 if (!tr->allocated_snapshot) {
1604 /* Only the nop tracer should hit this when disabling */
1605 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1609 arch_spin_lock(&tr->max_lock);
1611 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
1613 if (ret == -EBUSY) {
1615 * We failed to swap the buffer due to a commit taking
1616 * place on this CPU. We fail to record, but we reset
1617 * the max trace buffer (no one writes directly to it)
1618 * and flag that it failed.
1620 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1621 "Failed to swap buffers due to commit in progress\n");
1624 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1626 __update_max_tr(tr, tsk, cpu);
1627 arch_spin_unlock(&tr->max_lock);
1629 #endif /* CONFIG_TRACER_MAX_TRACE */
1631 static int wait_on_pipe(struct trace_iterator *iter, int full)
1633 /* Iterators are static, they should be filled or empty */
1634 if (trace_buffer_iter(iter, iter->cpu_file))
1637 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1641 #ifdef CONFIG_FTRACE_STARTUP_TEST
1642 static bool selftests_can_run;
1644 struct trace_selftests {
1645 struct list_head list;
1646 struct tracer *type;
1649 static LIST_HEAD(postponed_selftests);
1651 static int save_selftest(struct tracer *type)
1653 struct trace_selftests *selftest;
1655 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1659 selftest->type = type;
1660 list_add(&selftest->list, &postponed_selftests);
1664 static int run_tracer_selftest(struct tracer *type)
1666 struct trace_array *tr = &global_trace;
1667 struct tracer *saved_tracer = tr->current_trace;
1670 if (!type->selftest || tracing_selftest_disabled)
1674 * If a tracer registers early in boot up (before scheduling is
1675 * initialized and such), then do not run its selftests yet.
1676 * Instead, run it a little later in the boot process.
1678 if (!selftests_can_run)
1679 return save_selftest(type);
1682 * Run a selftest on this tracer.
1683 * Here we reset the trace buffer, and set the current
1684 * tracer to be this tracer. The tracer can then run some
1685 * internal tracing to verify that everything is in order.
1686 * If we fail, we do not register this tracer.
1688 tracing_reset_online_cpus(&tr->trace_buffer);
1690 tr->current_trace = type;
1692 #ifdef CONFIG_TRACER_MAX_TRACE
1693 if (type->use_max_tr) {
1694 /* If we expanded the buffers, make sure the max is expanded too */
1695 if (ring_buffer_expanded)
1696 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1697 RING_BUFFER_ALL_CPUS);
1698 tr->allocated_snapshot = true;
1702 /* the test is responsible for initializing and enabling */
1703 pr_info("Testing tracer %s: ", type->name);
1704 ret = type->selftest(type, tr);
1705 /* the test is responsible for resetting too */
1706 tr->current_trace = saved_tracer;
1708 printk(KERN_CONT "FAILED!\n");
1709 /* Add the warning after printing 'FAILED' */
1713 /* Only reset on passing, to avoid touching corrupted buffers */
1714 tracing_reset_online_cpus(&tr->trace_buffer);
1716 #ifdef CONFIG_TRACER_MAX_TRACE
1717 if (type->use_max_tr) {
1718 tr->allocated_snapshot = false;
1720 /* Shrink the max buffer again */
1721 if (ring_buffer_expanded)
1722 ring_buffer_resize(tr->max_buffer.buffer, 1,
1723 RING_BUFFER_ALL_CPUS);
1727 printk(KERN_CONT "PASSED\n");
1731 static __init int init_trace_selftests(void)
1733 struct trace_selftests *p, *n;
1734 struct tracer *t, **last;
1737 selftests_can_run = true;
1739 mutex_lock(&trace_types_lock);
1741 if (list_empty(&postponed_selftests))
1744 pr_info("Running postponed tracer tests:\n");
1746 list_for_each_entry_safe(p, n, &postponed_selftests, list) {
1747 /* This loop can take minutes when sanitizers are enabled, so
1748 * lets make sure we allow RCU processing.
1751 ret = run_tracer_selftest(p->type);
1752 /* If the test fails, then warn and remove from available_tracers */
1754 WARN(1, "tracer: %s failed selftest, disabling\n",
1756 last = &trace_types;
1757 for (t = trace_types; t; t = t->next) {
1770 mutex_unlock(&trace_types_lock);
1774 core_initcall(init_trace_selftests);
1776 static inline int run_tracer_selftest(struct tracer *type)
1780 #endif /* CONFIG_FTRACE_STARTUP_TEST */
1782 static void add_tracer_options(struct trace_array *tr, struct tracer *t);
1784 static void __init apply_trace_boot_options(void);
1787 * register_tracer - register a tracer with the ftrace system.
1788 * @type: the plugin for the tracer
1790 * Register a new plugin tracer.
1792 int __init register_tracer(struct tracer *type)
1798 pr_info("Tracer must have a name\n");
1802 if (strlen(type->name) >= MAX_TRACER_SIZE) {
1803 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1807 mutex_lock(&trace_types_lock);
1809 tracing_selftest_running = true;
1811 for (t = trace_types; t; t = t->next) {
1812 if (strcmp(type->name, t->name) == 0) {
1814 pr_info("Tracer %s already registered\n",
1821 if (!type->set_flag)
1822 type->set_flag = &dummy_set_flag;
1824 /*allocate a dummy tracer_flags*/
1825 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
1830 type->flags->val = 0;
1831 type->flags->opts = dummy_tracer_opt;
1833 if (!type->flags->opts)
1834 type->flags->opts = dummy_tracer_opt;
1836 /* store the tracer for __set_tracer_option */
1837 type->flags->trace = type;
1839 ret = run_tracer_selftest(type);
1843 type->next = trace_types;
1845 add_tracer_options(&global_trace, type);
1848 tracing_selftest_running = false;
1849 mutex_unlock(&trace_types_lock);
1851 if (ret || !default_bootup_tracer)
1854 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
1857 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1858 /* Do we want this tracer to start on bootup? */
1859 tracing_set_tracer(&global_trace, type->name);
1860 default_bootup_tracer = NULL;
1862 apply_trace_boot_options();
1864 /* disable other selftests, since this will break it. */
1865 tracing_selftest_disabled = true;
1866 #ifdef CONFIG_FTRACE_STARTUP_TEST
1867 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1875 static void tracing_reset_cpu(struct trace_buffer *buf, int cpu)
1877 struct ring_buffer *buffer = buf->buffer;
1882 ring_buffer_record_disable(buffer);
1884 /* Make sure all commits have finished */
1886 ring_buffer_reset_cpu(buffer, cpu);
1888 ring_buffer_record_enable(buffer);
1891 void tracing_reset_online_cpus(struct trace_buffer *buf)
1893 struct ring_buffer *buffer = buf->buffer;
1899 ring_buffer_record_disable(buffer);
1901 /* Make sure all commits have finished */
1904 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
1906 for_each_online_cpu(cpu)
1907 ring_buffer_reset_cpu(buffer, cpu);
1909 ring_buffer_record_enable(buffer);
1912 /* Must have trace_types_lock held */
1913 void tracing_reset_all_online_cpus(void)
1915 struct trace_array *tr;
1917 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1918 if (!tr->clear_trace)
1920 tr->clear_trace = false;
1921 tracing_reset_online_cpus(&tr->trace_buffer);
1922 #ifdef CONFIG_TRACER_MAX_TRACE
1923 tracing_reset_online_cpus(&tr->max_buffer);
1928 static int *tgid_map;
1930 #define SAVED_CMDLINES_DEFAULT 128
1931 #define NO_CMDLINE_MAP UINT_MAX
1932 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
1933 struct saved_cmdlines_buffer {
1934 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1935 unsigned *map_cmdline_to_pid;
1936 unsigned cmdline_num;
1938 char *saved_cmdlines;
1940 static struct saved_cmdlines_buffer *savedcmd;
1942 /* temporary disable recording */
1943 static atomic_t trace_record_taskinfo_disabled __read_mostly;
1945 static inline char *get_saved_cmdlines(int idx)
1947 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1950 static inline void set_cmdline(int idx, const char *cmdline)
1952 strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1955 static int allocate_cmdlines_buffer(unsigned int val,
1956 struct saved_cmdlines_buffer *s)
1958 s->map_cmdline_to_pid = kmalloc_array(val,
1959 sizeof(*s->map_cmdline_to_pid),
1961 if (!s->map_cmdline_to_pid)
1964 s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL);
1965 if (!s->saved_cmdlines) {
1966 kfree(s->map_cmdline_to_pid);
1971 s->cmdline_num = val;
1972 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1973 sizeof(s->map_pid_to_cmdline));
1974 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1975 val * sizeof(*s->map_cmdline_to_pid));
1980 static int trace_create_savedcmd(void)
1984 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
1988 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1998 int is_tracing_stopped(void)
2000 return global_trace.stop_count;
2004 * tracing_start - quick start of the tracer
2006 * If tracing is enabled but was stopped by tracing_stop,
2007 * this will start the tracer back up.
2009 void tracing_start(void)
2011 struct ring_buffer *buffer;
2012 unsigned long flags;
2014 if (tracing_disabled)
2017 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2018 if (--global_trace.stop_count) {
2019 if (global_trace.stop_count < 0) {
2020 /* Someone screwed up their debugging */
2022 global_trace.stop_count = 0;
2027 /* Prevent the buffers from switching */
2028 arch_spin_lock(&global_trace.max_lock);
2030 buffer = global_trace.trace_buffer.buffer;
2032 ring_buffer_record_enable(buffer);
2034 #ifdef CONFIG_TRACER_MAX_TRACE
2035 buffer = global_trace.max_buffer.buffer;
2037 ring_buffer_record_enable(buffer);
2040 arch_spin_unlock(&global_trace.max_lock);
2043 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2046 static void tracing_start_tr(struct trace_array *tr)
2048 struct ring_buffer *buffer;
2049 unsigned long flags;
2051 if (tracing_disabled)
2054 /* If global, we need to also start the max tracer */
2055 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2056 return tracing_start();
2058 raw_spin_lock_irqsave(&tr->start_lock, flags);
2060 if (--tr->stop_count) {
2061 if (tr->stop_count < 0) {
2062 /* Someone screwed up their debugging */
2069 buffer = tr->trace_buffer.buffer;
2071 ring_buffer_record_enable(buffer);
2074 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2078 * tracing_stop - quick stop of the tracer
2080 * Light weight way to stop tracing. Use in conjunction with
2083 void tracing_stop(void)
2085 struct ring_buffer *buffer;
2086 unsigned long flags;
2088 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2089 if (global_trace.stop_count++)
2092 /* Prevent the buffers from switching */
2093 arch_spin_lock(&global_trace.max_lock);
2095 buffer = global_trace.trace_buffer.buffer;
2097 ring_buffer_record_disable(buffer);
2099 #ifdef CONFIG_TRACER_MAX_TRACE
2100 buffer = global_trace.max_buffer.buffer;
2102 ring_buffer_record_disable(buffer);
2105 arch_spin_unlock(&global_trace.max_lock);
2108 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2111 static void tracing_stop_tr(struct trace_array *tr)
2113 struct ring_buffer *buffer;
2114 unsigned long flags;
2116 /* If global, we need to also stop the max tracer */
2117 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2118 return tracing_stop();
2120 raw_spin_lock_irqsave(&tr->start_lock, flags);
2121 if (tr->stop_count++)
2124 buffer = tr->trace_buffer.buffer;
2126 ring_buffer_record_disable(buffer);
2129 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2132 static int trace_save_cmdline(struct task_struct *tsk)
2136 /* treat recording of idle task as a success */
2140 if (unlikely(tsk->pid > PID_MAX_DEFAULT))
2144 * It's not the end of the world if we don't get
2145 * the lock, but we also don't want to spin
2146 * nor do we want to disable interrupts,
2147 * so if we miss here, then better luck next time.
2149 if (!arch_spin_trylock(&trace_cmdline_lock))
2152 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
2153 if (idx == NO_CMDLINE_MAP) {
2154 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
2157 * Check whether the cmdline buffer at idx has a pid
2158 * mapped. We are going to overwrite that entry so we
2159 * need to clear the map_pid_to_cmdline. Otherwise we
2160 * would read the new comm for the old pid.
2162 pid = savedcmd->map_cmdline_to_pid[idx];
2163 if (pid != NO_CMDLINE_MAP)
2164 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
2166 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
2167 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
2169 savedcmd->cmdline_idx = idx;
2172 set_cmdline(idx, tsk->comm);
2174 arch_spin_unlock(&trace_cmdline_lock);
2179 static void __trace_find_cmdline(int pid, char comm[])
2184 strcpy(comm, "<idle>");
2188 if (WARN_ON_ONCE(pid < 0)) {
2189 strcpy(comm, "<XXX>");
2193 if (pid > PID_MAX_DEFAULT) {
2194 strcpy(comm, "<...>");
2198 map = savedcmd->map_pid_to_cmdline[pid];
2199 if (map != NO_CMDLINE_MAP)
2200 strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
2202 strcpy(comm, "<...>");
2205 void trace_find_cmdline(int pid, char comm[])
2208 arch_spin_lock(&trace_cmdline_lock);
2210 __trace_find_cmdline(pid, comm);
2212 arch_spin_unlock(&trace_cmdline_lock);
2216 int trace_find_tgid(int pid)
2218 if (unlikely(!tgid_map || !pid || pid > PID_MAX_DEFAULT))
2221 return tgid_map[pid];
2224 static int trace_save_tgid(struct task_struct *tsk)
2226 /* treat recording of idle task as a success */
2230 if (unlikely(!tgid_map || tsk->pid > PID_MAX_DEFAULT))
2233 tgid_map[tsk->pid] = tsk->tgid;
2237 static bool tracing_record_taskinfo_skip(int flags)
2239 if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
2241 if (atomic_read(&trace_record_taskinfo_disabled) || !tracing_is_on())
2243 if (!__this_cpu_read(trace_taskinfo_save))
2249 * tracing_record_taskinfo - record the task info of a task
2251 * @task: task to record
2252 * @flags: TRACE_RECORD_CMDLINE for recording comm
2253 * TRACE_RECORD_TGID for recording tgid
2255 void tracing_record_taskinfo(struct task_struct *task, int flags)
2259 if (tracing_record_taskinfo_skip(flags))
2263 * Record as much task information as possible. If some fail, continue
2264 * to try to record the others.
2266 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
2267 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
2269 /* If recording any information failed, retry again soon. */
2273 __this_cpu_write(trace_taskinfo_save, false);
2277 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2279 * @prev: previous task during sched_switch
2280 * @next: next task during sched_switch
2281 * @flags: TRACE_RECORD_CMDLINE for recording comm
2282 * TRACE_RECORD_TGID for recording tgid
2284 void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2285 struct task_struct *next, int flags)
2289 if (tracing_record_taskinfo_skip(flags))
2293 * Record as much task information as possible. If some fail, continue
2294 * to try to record the others.
2296 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
2297 done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
2298 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
2299 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
2301 /* If recording any information failed, retry again soon. */
2305 __this_cpu_write(trace_taskinfo_save, false);
2308 /* Helpers to record a specific task information */
2309 void tracing_record_cmdline(struct task_struct *task)
2311 tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
2314 void tracing_record_tgid(struct task_struct *task)
2316 tracing_record_taskinfo(task, TRACE_RECORD_TGID);
2320 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2321 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2322 * simplifies those functions and keeps them in sync.
2324 enum print_line_t trace_handle_return(struct trace_seq *s)
2326 return trace_seq_has_overflowed(s) ?
2327 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2329 EXPORT_SYMBOL_GPL(trace_handle_return);
2332 tracing_generic_entry_update(struct trace_entry *entry, unsigned short type,
2333 unsigned long flags, int pc)
2335 struct task_struct *tsk = current;
2337 entry->preempt_count = pc & 0xff;
2338 entry->pid = (tsk) ? tsk->pid : 0;
2341 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
2342 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
2344 TRACE_FLAG_IRQS_NOSUPPORT |
2346 ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
2347 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
2348 ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
2349 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
2350 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
2352 EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
2354 struct ring_buffer_event *
2355 trace_buffer_lock_reserve(struct ring_buffer *buffer,
2358 unsigned long flags, int pc)
2360 return __trace_buffer_lock_reserve(buffer, type, len, flags, pc);
2363 DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2364 DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2365 static int trace_buffered_event_ref;
2368 * trace_buffered_event_enable - enable buffering events
2370 * When events are being filtered, it is quicker to use a temporary
2371 * buffer to write the event data into if there's a likely chance
2372 * that it will not be committed. The discard of the ring buffer
2373 * is not as fast as committing, and is much slower than copying
2376 * When an event is to be filtered, allocate per cpu buffers to
2377 * write the event data into, and if the event is filtered and discarded
2378 * it is simply dropped, otherwise, the entire data is to be committed
2381 void trace_buffered_event_enable(void)
2383 struct ring_buffer_event *event;
2387 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2389 if (trace_buffered_event_ref++)
2392 for_each_tracing_cpu(cpu) {
2393 page = alloc_pages_node(cpu_to_node(cpu),
2394 GFP_KERNEL | __GFP_NORETRY, 0);
2398 event = page_address(page);
2399 memset(event, 0, sizeof(*event));
2401 per_cpu(trace_buffered_event, cpu) = event;
2404 if (cpu == smp_processor_id() &&
2405 this_cpu_read(trace_buffered_event) !=
2406 per_cpu(trace_buffered_event, cpu))
2413 trace_buffered_event_disable();
2416 static void enable_trace_buffered_event(void *data)
2418 /* Probably not needed, but do it anyway */
2420 this_cpu_dec(trace_buffered_event_cnt);
2423 static void disable_trace_buffered_event(void *data)
2425 this_cpu_inc(trace_buffered_event_cnt);
2429 * trace_buffered_event_disable - disable buffering events
2431 * When a filter is removed, it is faster to not use the buffered
2432 * events, and to commit directly into the ring buffer. Free up
2433 * the temp buffers when there are no more users. This requires
2434 * special synchronization with current events.
2436 void trace_buffered_event_disable(void)
2440 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2442 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2445 if (--trace_buffered_event_ref)
2449 /* For each CPU, set the buffer as used. */
2450 smp_call_function_many(tracing_buffer_mask,
2451 disable_trace_buffered_event, NULL, 1);
2454 /* Wait for all current users to finish */
2457 for_each_tracing_cpu(cpu) {
2458 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2459 per_cpu(trace_buffered_event, cpu) = NULL;
2462 * Make sure trace_buffered_event is NULL before clearing
2463 * trace_buffered_event_cnt.
2468 /* Do the work on each cpu */
2469 smp_call_function_many(tracing_buffer_mask,
2470 enable_trace_buffered_event, NULL, 1);
2474 static struct ring_buffer *temp_buffer;
2476 struct ring_buffer_event *
2477 trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
2478 struct trace_event_file *trace_file,
2479 int type, unsigned long len,
2480 unsigned long flags, int pc)
2482 struct ring_buffer_event *entry;
2485 *current_rb = trace_file->tr->trace_buffer.buffer;
2487 if (!ring_buffer_time_stamp_abs(*current_rb) && (trace_file->flags &
2488 (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
2489 (entry = this_cpu_read(trace_buffered_event))) {
2490 /* Try to use the per cpu buffer first */
2491 val = this_cpu_inc_return(trace_buffered_event_cnt);
2493 trace_event_setup(entry, type, flags, pc);
2494 entry->array[0] = len;
2497 this_cpu_dec(trace_buffered_event_cnt);
2500 entry = __trace_buffer_lock_reserve(*current_rb,
2501 type, len, flags, pc);
2503 * If tracing is off, but we have triggers enabled
2504 * we still need to look at the event data. Use the temp_buffer
2505 * to store the trace event for the tigger to use. It's recusive
2506 * safe and will not be recorded anywhere.
2508 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2509 *current_rb = temp_buffer;
2510 entry = __trace_buffer_lock_reserve(*current_rb,
2511 type, len, flags, pc);
2515 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2517 static DEFINE_SPINLOCK(tracepoint_iter_lock);
2518 static DEFINE_MUTEX(tracepoint_printk_mutex);
2520 static void output_printk(struct trace_event_buffer *fbuffer)
2522 struct trace_event_call *event_call;
2523 struct trace_event *event;
2524 unsigned long flags;
2525 struct trace_iterator *iter = tracepoint_print_iter;
2527 /* We should never get here if iter is NULL */
2528 if (WARN_ON_ONCE(!iter))
2531 event_call = fbuffer->trace_file->event_call;
2532 if (!event_call || !event_call->event.funcs ||
2533 !event_call->event.funcs->trace)
2536 event = &fbuffer->trace_file->event_call->event;
2538 spin_lock_irqsave(&tracepoint_iter_lock, flags);
2539 trace_seq_init(&iter->seq);
2540 iter->ent = fbuffer->entry;
2541 event_call->event.funcs->trace(iter, 0, event);
2542 trace_seq_putc(&iter->seq, 0);
2543 printk("%s", iter->seq.buffer);
2545 spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2548 int tracepoint_printk_sysctl(struct ctl_table *table, int write,
2549 void __user *buffer, size_t *lenp,
2552 int save_tracepoint_printk;
2555 mutex_lock(&tracepoint_printk_mutex);
2556 save_tracepoint_printk = tracepoint_printk;
2558 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2561 * This will force exiting early, as tracepoint_printk
2562 * is always zero when tracepoint_printk_iter is not allocated
2564 if (!tracepoint_print_iter)
2565 tracepoint_printk = 0;
2567 if (save_tracepoint_printk == tracepoint_printk)
2570 if (tracepoint_printk)
2571 static_key_enable(&tracepoint_printk_key.key);
2573 static_key_disable(&tracepoint_printk_key.key);
2576 mutex_unlock(&tracepoint_printk_mutex);
2581 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2583 if (static_key_false(&tracepoint_printk_key.key))
2584 output_printk(fbuffer);
2586 event_trigger_unlock_commit(fbuffer->trace_file, fbuffer->buffer,
2587 fbuffer->event, fbuffer->entry,
2588 fbuffer->flags, fbuffer->pc);
2590 EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2595 * trace_buffer_unlock_commit_regs()
2596 * trace_event_buffer_commit()
2597 * trace_event_raw_event_xxx()
2599 # define STACK_SKIP 3
2601 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2602 struct ring_buffer *buffer,
2603 struct ring_buffer_event *event,
2604 unsigned long flags, int pc,
2605 struct pt_regs *regs)
2607 __buffer_unlock_commit(buffer, event);
2610 * If regs is not set, then skip the necessary functions.
2611 * Note, we can still get here via blktrace, wakeup tracer
2612 * and mmiotrace, but that's ok if they lose a function or
2613 * two. They are not that meaningful.
2615 ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs);
2616 ftrace_trace_userstack(buffer, flags, pc);
2620 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2623 trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer,
2624 struct ring_buffer_event *event)
2626 __buffer_unlock_commit(buffer, event);
2630 trace_process_export(struct trace_export *export,
2631 struct ring_buffer_event *event)
2633 struct trace_entry *entry;
2634 unsigned int size = 0;
2636 entry = ring_buffer_event_data(event);
2637 size = ring_buffer_event_length(event);
2638 export->write(export, entry, size);
2641 static DEFINE_MUTEX(ftrace_export_lock);
2643 static struct trace_export __rcu *ftrace_exports_list __read_mostly;
2645 static DEFINE_STATIC_KEY_FALSE(ftrace_exports_enabled);
2647 static inline void ftrace_exports_enable(void)
2649 static_branch_enable(&ftrace_exports_enabled);
2652 static inline void ftrace_exports_disable(void)
2654 static_branch_disable(&ftrace_exports_enabled);
2657 static void ftrace_exports(struct ring_buffer_event *event)
2659 struct trace_export *export;
2661 preempt_disable_notrace();
2663 export = rcu_dereference_raw_check(ftrace_exports_list);
2665 trace_process_export(export, event);
2666 export = rcu_dereference_raw_check(export->next);
2669 preempt_enable_notrace();
2673 add_trace_export(struct trace_export **list, struct trace_export *export)
2675 rcu_assign_pointer(export->next, *list);
2677 * We are entering export into the list but another
2678 * CPU might be walking that list. We need to make sure
2679 * the export->next pointer is valid before another CPU sees
2680 * the export pointer included into the list.
2682 rcu_assign_pointer(*list, export);
2686 rm_trace_export(struct trace_export **list, struct trace_export *export)
2688 struct trace_export **p;
2690 for (p = list; *p != NULL; p = &(*p)->next)
2697 rcu_assign_pointer(*p, (*p)->next);
2703 add_ftrace_export(struct trace_export **list, struct trace_export *export)
2706 ftrace_exports_enable();
2708 add_trace_export(list, export);
2712 rm_ftrace_export(struct trace_export **list, struct trace_export *export)
2716 ret = rm_trace_export(list, export);
2718 ftrace_exports_disable();
2723 int register_ftrace_export(struct trace_export *export)
2725 if (WARN_ON_ONCE(!export->write))
2728 mutex_lock(&ftrace_export_lock);
2730 add_ftrace_export(&ftrace_exports_list, export);
2732 mutex_unlock(&ftrace_export_lock);
2736 EXPORT_SYMBOL_GPL(register_ftrace_export);
2738 int unregister_ftrace_export(struct trace_export *export)
2742 mutex_lock(&ftrace_export_lock);
2744 ret = rm_ftrace_export(&ftrace_exports_list, export);
2746 mutex_unlock(&ftrace_export_lock);
2750 EXPORT_SYMBOL_GPL(unregister_ftrace_export);
2753 trace_function(struct trace_array *tr,
2754 unsigned long ip, unsigned long parent_ip, unsigned long flags,
2757 struct trace_event_call *call = &event_function;
2758 struct ring_buffer *buffer = tr->trace_buffer.buffer;
2759 struct ring_buffer_event *event;
2760 struct ftrace_entry *entry;
2762 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
2766 entry = ring_buffer_event_data(event);
2768 entry->parent_ip = parent_ip;
2770 if (!call_filter_check_discard(call, entry, buffer, event)) {
2771 if (static_branch_unlikely(&ftrace_exports_enabled))
2772 ftrace_exports(event);
2773 __buffer_unlock_commit(buffer, event);
2777 #ifdef CONFIG_STACKTRACE
2779 /* Allow 4 levels of nesting: normal, softirq, irq, NMI */
2780 #define FTRACE_KSTACK_NESTING 4
2782 #define FTRACE_KSTACK_ENTRIES (PAGE_SIZE / FTRACE_KSTACK_NESTING)
2784 struct ftrace_stack {
2785 unsigned long calls[FTRACE_KSTACK_ENTRIES];
2789 struct ftrace_stacks {
2790 struct ftrace_stack stacks[FTRACE_KSTACK_NESTING];
2793 static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
2794 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2796 static void __ftrace_trace_stack(struct ring_buffer *buffer,
2797 unsigned long flags,
2798 int skip, int pc, struct pt_regs *regs)
2800 struct trace_event_call *call = &event_kernel_stack;
2801 struct ring_buffer_event *event;
2802 unsigned int size, nr_entries;
2803 struct ftrace_stack *fstack;
2804 struct stack_entry *entry;
2808 * Add one, for this function and the call to save_stack_trace()
2809 * If regs is set, then these functions will not be in the way.
2811 #ifndef CONFIG_UNWINDER_ORC
2817 * Since events can happen in NMIs there's no safe way to
2818 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
2819 * or NMI comes in, it will just have to use the default
2820 * FTRACE_STACK_SIZE.
2822 preempt_disable_notrace();
2824 stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
2826 /* This should never happen. If it does, yell once and skip */
2827 if (WARN_ON_ONCE(stackidx > FTRACE_KSTACK_NESTING))
2831 * The above __this_cpu_inc_return() is 'atomic' cpu local. An
2832 * interrupt will either see the value pre increment or post
2833 * increment. If the interrupt happens pre increment it will have
2834 * restored the counter when it returns. We just need a barrier to
2835 * keep gcc from moving things around.
2839 fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx;
2840 size = ARRAY_SIZE(fstack->calls);
2843 nr_entries = stack_trace_save_regs(regs, fstack->calls,
2846 nr_entries = stack_trace_save(fstack->calls, size, skip);
2849 size = nr_entries * sizeof(unsigned long);
2850 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
2851 sizeof(*entry) + size, flags, pc);
2854 entry = ring_buffer_event_data(event);
2856 memcpy(&entry->caller, fstack->calls, size);
2857 entry->size = nr_entries;
2859 if (!call_filter_check_discard(call, entry, buffer, event))
2860 __buffer_unlock_commit(buffer, event);
2863 /* Again, don't let gcc optimize things here */
2865 __this_cpu_dec(ftrace_stack_reserve);
2866 preempt_enable_notrace();
2870 static inline void ftrace_trace_stack(struct trace_array *tr,
2871 struct ring_buffer *buffer,
2872 unsigned long flags,
2873 int skip, int pc, struct pt_regs *regs)
2875 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
2878 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
2881 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
2884 struct ring_buffer *buffer = tr->trace_buffer.buffer;
2886 if (rcu_is_watching()) {
2887 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
2892 * When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
2893 * but if the above rcu_is_watching() failed, then the NMI
2894 * triggered someplace critical, and rcu_irq_enter() should
2895 * not be called from NMI.
2897 if (unlikely(in_nmi()))
2900 rcu_irq_enter_irqson();
2901 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
2902 rcu_irq_exit_irqson();
2906 * trace_dump_stack - record a stack back trace in the trace buffer
2907 * @skip: Number of functions to skip (helper handlers)
2909 void trace_dump_stack(int skip)
2911 unsigned long flags;
2913 if (tracing_disabled || tracing_selftest_running)
2916 local_save_flags(flags);
2918 #ifndef CONFIG_UNWINDER_ORC
2919 /* Skip 1 to skip this function. */
2922 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
2923 flags, skip, preempt_count(), NULL);
2925 EXPORT_SYMBOL_GPL(trace_dump_stack);
2927 #ifdef CONFIG_USER_STACKTRACE_SUPPORT
2928 static DEFINE_PER_CPU(int, user_stack_count);
2931 ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
2933 struct trace_event_call *call = &event_user_stack;
2934 struct ring_buffer_event *event;
2935 struct userstack_entry *entry;
2937 if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
2941 * NMIs can not handle page faults, even with fix ups.
2942 * The save user stack can (and often does) fault.
2944 if (unlikely(in_nmi()))
2948 * prevent recursion, since the user stack tracing may
2949 * trigger other kernel events.
2952 if (__this_cpu_read(user_stack_count))
2955 __this_cpu_inc(user_stack_count);
2957 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
2958 sizeof(*entry), flags, pc);
2960 goto out_drop_count;
2961 entry = ring_buffer_event_data(event);
2963 entry->tgid = current->tgid;
2964 memset(&entry->caller, 0, sizeof(entry->caller));
2966 stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
2967 if (!call_filter_check_discard(call, entry, buffer, event))
2968 __buffer_unlock_commit(buffer, event);
2971 __this_cpu_dec(user_stack_count);
2975 #else /* CONFIG_USER_STACKTRACE_SUPPORT */
2976 static void ftrace_trace_userstack(struct ring_buffer *buffer,
2977 unsigned long flags, int pc)
2980 #endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
2982 #endif /* CONFIG_STACKTRACE */
2984 /* created for use with alloc_percpu */
2985 struct trace_buffer_struct {
2987 char buffer[4][TRACE_BUF_SIZE];
2990 static struct trace_buffer_struct *trace_percpu_buffer;
2993 * Thise allows for lockless recording. If we're nested too deeply, then
2994 * this returns NULL.
2996 static char *get_trace_buf(void)
2998 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
3000 if (!buffer || buffer->nesting >= 4)
3005 /* Interrupts must see nesting incremented before we use the buffer */
3007 return &buffer->buffer[buffer->nesting][0];
3010 static void put_trace_buf(void)
3012 /* Don't let the decrement of nesting leak before this */
3014 this_cpu_dec(trace_percpu_buffer->nesting);
3017 static int alloc_percpu_trace_buffer(void)
3019 struct trace_buffer_struct *buffers;
3021 buffers = alloc_percpu(struct trace_buffer_struct);
3022 if (WARN(!buffers, "Could not allocate percpu trace_printk buffer"))
3025 trace_percpu_buffer = buffers;
3029 static int buffers_allocated;
3031 void trace_printk_init_buffers(void)
3033 if (buffers_allocated)
3036 if (alloc_percpu_trace_buffer())
3039 /* trace_printk() is for debug use only. Don't use it in production. */
3042 pr_warn("**********************************************************\n");
3043 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3045 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
3047 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
3048 pr_warn("** unsafe for production use. **\n");
3050 pr_warn("** If you see this message and you are not debugging **\n");
3051 pr_warn("** the kernel, report this immediately to your vendor! **\n");
3053 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3054 pr_warn("**********************************************************\n");
3056 /* Expand the buffers to set size */
3057 tracing_update_buffers();
3059 buffers_allocated = 1;
3062 * trace_printk_init_buffers() can be called by modules.
3063 * If that happens, then we need to start cmdline recording
3064 * directly here. If the global_trace.buffer is already
3065 * allocated here, then this was called by module code.
3067 if (global_trace.trace_buffer.buffer)
3068 tracing_start_cmdline_record();
3070 EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
3072 void trace_printk_start_comm(void)
3074 /* Start tracing comms if trace printk is set */
3075 if (!buffers_allocated)
3077 tracing_start_cmdline_record();
3080 static void trace_printk_start_stop_comm(int enabled)
3082 if (!buffers_allocated)
3086 tracing_start_cmdline_record();
3088 tracing_stop_cmdline_record();
3092 * trace_vbprintk - write binary msg to tracing buffer
3093 * @ip: The address of the caller
3094 * @fmt: The string format to write to the buffer
3095 * @args: Arguments for @fmt
3097 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
3099 struct trace_event_call *call = &event_bprint;
3100 struct ring_buffer_event *event;
3101 struct ring_buffer *buffer;
3102 struct trace_array *tr = &global_trace;
3103 struct bprint_entry *entry;
3104 unsigned long flags;
3106 int len = 0, size, pc;
3108 if (unlikely(tracing_selftest_running || tracing_disabled))
3111 /* Don't pollute graph traces with trace_vprintk internals */
3112 pause_graph_tracing();
3114 pc = preempt_count();
3115 preempt_disable_notrace();
3117 tbuffer = get_trace_buf();
3123 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
3125 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
3128 local_save_flags(flags);
3129 size = sizeof(*entry) + sizeof(u32) * len;
3130 buffer = tr->trace_buffer.buffer;
3131 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
3135 entry = ring_buffer_event_data(event);
3139 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
3140 if (!call_filter_check_discard(call, entry, buffer, event)) {
3141 __buffer_unlock_commit(buffer, event);
3142 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
3149 preempt_enable_notrace();
3150 unpause_graph_tracing();
3154 EXPORT_SYMBOL_GPL(trace_vbprintk);
3158 __trace_array_vprintk(struct ring_buffer *buffer,
3159 unsigned long ip, const char *fmt, va_list args)
3161 struct trace_event_call *call = &event_print;
3162 struct ring_buffer_event *event;
3163 int len = 0, size, pc;
3164 struct print_entry *entry;
3165 unsigned long flags;
3168 if (tracing_disabled || tracing_selftest_running)
3171 /* Don't pollute graph traces with trace_vprintk internals */
3172 pause_graph_tracing();
3174 pc = preempt_count();
3175 preempt_disable_notrace();
3178 tbuffer = get_trace_buf();
3184 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
3186 local_save_flags(flags);
3187 size = sizeof(*entry) + len + 1;
3188 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
3192 entry = ring_buffer_event_data(event);
3195 memcpy(&entry->buf, tbuffer, len + 1);
3196 if (!call_filter_check_discard(call, entry, buffer, event)) {
3197 __buffer_unlock_commit(buffer, event);
3198 ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
3205 preempt_enable_notrace();
3206 unpause_graph_tracing();
3212 int trace_array_vprintk(struct trace_array *tr,
3213 unsigned long ip, const char *fmt, va_list args)
3215 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
3219 int trace_array_printk(struct trace_array *tr,
3220 unsigned long ip, const char *fmt, ...)
3225 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
3229 ret = trace_array_vprintk(tr, ip, fmt, ap);
3233 EXPORT_SYMBOL_GPL(trace_array_printk);
3236 int trace_array_printk_buf(struct ring_buffer *buffer,
3237 unsigned long ip, const char *fmt, ...)
3242 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
3246 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3252 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3254 return trace_array_vprintk(&global_trace, ip, fmt, args);
3256 EXPORT_SYMBOL_GPL(trace_vprintk);
3258 static void trace_iterator_increment(struct trace_iterator *iter)
3260 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3264 ring_buffer_read(buf_iter, NULL);
3267 static struct trace_entry *
3268 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3269 unsigned long *lost_events)
3271 struct ring_buffer_event *event;
3272 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
3275 event = ring_buffer_iter_peek(buf_iter, ts);
3277 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
3281 iter->ent_size = ring_buffer_event_length(event);
3282 return ring_buffer_event_data(event);
3288 static struct trace_entry *
3289 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3290 unsigned long *missing_events, u64 *ent_ts)
3292 struct ring_buffer *buffer = iter->trace_buffer->buffer;
3293 struct trace_entry *ent, *next = NULL;
3294 unsigned long lost_events = 0, next_lost = 0;
3295 int cpu_file = iter->cpu_file;
3296 u64 next_ts = 0, ts;
3302 * If we are in a per_cpu trace file, don't bother by iterating over
3303 * all cpu and peek directly.
3305 if (cpu_file > RING_BUFFER_ALL_CPUS) {
3306 if (ring_buffer_empty_cpu(buffer, cpu_file))
3308 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
3310 *ent_cpu = cpu_file;
3315 for_each_tracing_cpu(cpu) {
3317 if (ring_buffer_empty_cpu(buffer, cpu))
3320 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
3323 * Pick the entry with the smallest timestamp:
3325 if (ent && (!next || ts < next_ts)) {
3329 next_lost = lost_events;
3330 next_size = iter->ent_size;
3334 iter->ent_size = next_size;
3337 *ent_cpu = next_cpu;
3343 *missing_events = next_lost;
3348 /* Find the next real entry, without updating the iterator itself */
3349 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
3350 int *ent_cpu, u64 *ent_ts)
3352 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
3355 /* Find the next real entry, and increment the iterator to the next entry */
3356 void *trace_find_next_entry_inc(struct trace_iterator *iter)
3358 iter->ent = __find_next_entry(iter, &iter->cpu,
3359 &iter->lost_events, &iter->ts);
3362 trace_iterator_increment(iter);
3364 return iter->ent ? iter : NULL;
3367 static void trace_consume(struct trace_iterator *iter)
3369 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
3370 &iter->lost_events);
3373 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
3375 struct trace_iterator *iter = m->private;
3379 WARN_ON_ONCE(iter->leftover);
3383 /* can't go backwards */
3388 ent = trace_find_next_entry_inc(iter);
3392 while (ent && iter->idx < i)
3393 ent = trace_find_next_entry_inc(iter);
3400 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
3402 struct ring_buffer_event *event;
3403 struct ring_buffer_iter *buf_iter;
3404 unsigned long entries = 0;
3407 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
3409 buf_iter = trace_buffer_iter(iter, cpu);
3413 ring_buffer_iter_reset(buf_iter);
3416 * We could have the case with the max latency tracers
3417 * that a reset never took place on a cpu. This is evident
3418 * by the timestamp being before the start of the buffer.
3420 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
3421 if (ts >= iter->trace_buffer->time_start)
3424 ring_buffer_read(buf_iter, NULL);
3427 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
3431 * The current tracer is copied to avoid a global locking
3434 static void *s_start(struct seq_file *m, loff_t *pos)
3436 struct trace_iterator *iter = m->private;
3437 struct trace_array *tr = iter->tr;
3438 int cpu_file = iter->cpu_file;
3444 * copy the tracer to avoid using a global lock all around.
3445 * iter->trace is a copy of current_trace, the pointer to the
3446 * name may be used instead of a strcmp(), as iter->trace->name
3447 * will point to the same string as current_trace->name.
3449 mutex_lock(&trace_types_lock);
3450 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
3451 *iter->trace = *tr->current_trace;
3452 mutex_unlock(&trace_types_lock);
3454 #ifdef CONFIG_TRACER_MAX_TRACE
3455 if (iter->snapshot && iter->trace->use_max_tr)
3456 return ERR_PTR(-EBUSY);
3459 if (!iter->snapshot)
3460 atomic_inc(&trace_record_taskinfo_disabled);
3462 if (*pos != iter->pos) {
3467 if (cpu_file == RING_BUFFER_ALL_CPUS) {
3468 for_each_tracing_cpu(cpu)
3469 tracing_iter_reset(iter, cpu);
3471 tracing_iter_reset(iter, cpu_file);
3474 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
3479 * If we overflowed the seq_file before, then we want
3480 * to just reuse the trace_seq buffer again.
3486 p = s_next(m, p, &l);
3490 trace_event_read_lock();
3491 trace_access_lock(cpu_file);
3495 static void s_stop(struct seq_file *m, void *p)
3497 struct trace_iterator *iter = m->private;
3499 #ifdef CONFIG_TRACER_MAX_TRACE
3500 if (iter->snapshot && iter->trace->use_max_tr)
3504 if (!iter->snapshot)
3505 atomic_dec(&trace_record_taskinfo_disabled);
3507 trace_access_unlock(iter->cpu_file);
3508 trace_event_read_unlock();
3512 get_total_entries_cpu(struct trace_buffer *buf, unsigned long *total,
3513 unsigned long *entries, int cpu)
3515 unsigned long count;
3517 count = ring_buffer_entries_cpu(buf->buffer, cpu);
3519 * If this buffer has skipped entries, then we hold all
3520 * entries for the trace and we need to ignore the
3521 * ones before the time stamp.
3523 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
3524 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
3525 /* total is the same as the entries */
3529 ring_buffer_overrun_cpu(buf->buffer, cpu);
3534 get_total_entries(struct trace_buffer *buf,
3535 unsigned long *total, unsigned long *entries)
3543 for_each_tracing_cpu(cpu) {
3544 get_total_entries_cpu(buf, &t, &e, cpu);
3550 unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
3552 unsigned long total, entries;
3557 get_total_entries_cpu(&tr->trace_buffer, &total, &entries, cpu);
3562 unsigned long trace_total_entries(struct trace_array *tr)
3564 unsigned long total, entries;
3569 get_total_entries(&tr->trace_buffer, &total, &entries);
3574 static void print_lat_help_header(struct seq_file *m)
3576 seq_puts(m, "# _------=> CPU# \n"
3577 "# / _-----=> irqs-off \n"
3578 "# | / _----=> need-resched \n"
3579 "# || / _---=> hardirq/softirq \n"
3580 "# ||| / _--=> preempt-depth \n"
3582 "# cmd pid ||||| time | caller \n"
3583 "# \\ / ||||| \\ | / \n");
3586 static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
3588 unsigned long total;
3589 unsigned long entries;
3591 get_total_entries(buf, &total, &entries);
3592 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
3593 entries, total, num_online_cpus());
3597 static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m,
3600 bool tgid = flags & TRACE_ITER_RECORD_TGID;
3602 print_event_info(buf, m);
3604 seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? "TGID " : "");
3605 seq_printf(m, "# | | %s | | |\n", tgid ? " | " : "");
3608 static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m,
3611 bool tgid = flags & TRACE_ITER_RECORD_TGID;
3612 const char *space = " ";
3613 int prec = tgid ? 10 : 2;
3615 print_event_info(buf, m);
3617 seq_printf(m, "# %.*s _-----=> irqs-off\n", prec, space);
3618 seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);
3619 seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space);
3620 seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space);
3621 seq_printf(m, "# %.*s||| / delay\n", prec, space);
3622 seq_printf(m, "# TASK-PID %.*sCPU# |||| TIMESTAMP FUNCTION\n", prec, " TGID ");
3623 seq_printf(m, "# | | %.*s | |||| | |\n", prec, " | ");
3627 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
3629 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
3630 struct trace_buffer *buf = iter->trace_buffer;
3631 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
3632 struct tracer *type = iter->trace;
3633 unsigned long entries;
3634 unsigned long total;
3635 const char *name = "preemption";
3639 get_total_entries(buf, &total, &entries);
3641 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
3643 seq_puts(m, "# -----------------------------------"
3644 "---------------------------------\n");
3645 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
3646 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
3647 nsecs_to_usecs(data->saved_latency),
3651 #if defined(CONFIG_PREEMPT_NONE)
3653 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
3655 #elif defined(CONFIG_PREEMPT)
3660 /* These are reserved for later use */
3663 seq_printf(m, " #P:%d)\n", num_online_cpus());
3667 seq_puts(m, "# -----------------\n");
3668 seq_printf(m, "# | task: %.16s-%d "
3669 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
3670 data->comm, data->pid,
3671 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
3672 data->policy, data->rt_priority);
3673 seq_puts(m, "# -----------------\n");
3675 if (data->critical_start) {
3676 seq_puts(m, "# => started at: ");
3677 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
3678 trace_print_seq(m, &iter->seq);
3679 seq_puts(m, "\n# => ended at: ");
3680 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
3681 trace_print_seq(m, &iter->seq);
3682 seq_puts(m, "\n#\n");
3688 static void test_cpu_buff_start(struct trace_iterator *iter)
3690 struct trace_seq *s = &iter->seq;
3691 struct trace_array *tr = iter->tr;
3693 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
3696 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
3699 if (cpumask_available(iter->started) &&
3700 cpumask_test_cpu(iter->cpu, iter->started))
3703 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
3706 if (cpumask_available(iter->started))
3707 cpumask_set_cpu(iter->cpu, iter->started);
3709 /* Don't print started cpu buffer for the first entry of the trace */
3711 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
3715 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
3717 struct trace_array *tr = iter->tr;
3718 struct trace_seq *s = &iter->seq;
3719 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
3720 struct trace_entry *entry;
3721 struct trace_event *event;
3725 test_cpu_buff_start(iter);
3727 event = ftrace_find_event(entry->type);
3729 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
3730 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3731 trace_print_lat_context(iter);
3733 trace_print_context(iter);
3736 if (trace_seq_has_overflowed(s))
3737 return TRACE_TYPE_PARTIAL_LINE;
3740 return event->funcs->trace(iter, sym_flags, event);
3742 trace_seq_printf(s, "Unknown type %d\n", entry->type);
3744 return trace_handle_return(s);
3747 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
3749 struct trace_array *tr = iter->tr;
3750 struct trace_seq *s = &iter->seq;
3751 struct trace_entry *entry;
3752 struct trace_event *event;
3756 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
3757 trace_seq_printf(s, "%d %d %llu ",
3758 entry->pid, iter->cpu, iter->ts);
3760 if (trace_seq_has_overflowed(s))
3761 return TRACE_TYPE_PARTIAL_LINE;
3763 event = ftrace_find_event(entry->type);
3765 return event->funcs->raw(iter, 0, event);
3767 trace_seq_printf(s, "%d ?\n", entry->type);
3769 return trace_handle_return(s);
3772 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
3774 struct trace_array *tr = iter->tr;
3775 struct trace_seq *s = &iter->seq;
3776 unsigned char newline = '\n';
3777 struct trace_entry *entry;
3778 struct trace_event *event;
3782 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
3783 SEQ_PUT_HEX_FIELD(s, entry->pid);
3784 SEQ_PUT_HEX_FIELD(s, iter->cpu);
3785 SEQ_PUT_HEX_FIELD(s, iter->ts);
3786 if (trace_seq_has_overflowed(s))
3787 return TRACE_TYPE_PARTIAL_LINE;
3790 event = ftrace_find_event(entry->type);
3792 enum print_line_t ret = event->funcs->hex(iter, 0, event);
3793 if (ret != TRACE_TYPE_HANDLED)
3797 SEQ_PUT_FIELD(s, newline);
3799 return trace_handle_return(s);
3802 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
3804 struct trace_array *tr = iter->tr;
3805 struct trace_seq *s = &iter->seq;
3806 struct trace_entry *entry;
3807 struct trace_event *event;
3811 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
3812 SEQ_PUT_FIELD(s, entry->pid);
3813 SEQ_PUT_FIELD(s, iter->cpu);
3814 SEQ_PUT_FIELD(s, iter->ts);
3815 if (trace_seq_has_overflowed(s))
3816 return TRACE_TYPE_PARTIAL_LINE;
3819 event = ftrace_find_event(entry->type);
3820 return event ? event->funcs->binary(iter, 0, event) :
3824 int trace_empty(struct trace_iterator *iter)
3826 struct ring_buffer_iter *buf_iter;
3829 /* If we are looking at one CPU buffer, only check that one */
3830 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
3831 cpu = iter->cpu_file;
3832 buf_iter = trace_buffer_iter(iter, cpu);
3834 if (!ring_buffer_iter_empty(buf_iter))
3837 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
3843 for_each_tracing_cpu(cpu) {
3844 buf_iter = trace_buffer_iter(iter, cpu);
3846 if (!ring_buffer_iter_empty(buf_iter))
3849 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
3857 /* Called with trace_event_read_lock() held. */
3858 enum print_line_t print_trace_line(struct trace_iterator *iter)
3860 struct trace_array *tr = iter->tr;
3861 unsigned long trace_flags = tr->trace_flags;
3862 enum print_line_t ret;
3864 if (iter->lost_events) {
3865 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
3866 iter->cpu, iter->lost_events);
3867 if (trace_seq_has_overflowed(&iter->seq))
3868 return TRACE_TYPE_PARTIAL_LINE;
3871 if (iter->trace && iter->trace->print_line) {
3872 ret = iter->trace->print_line(iter);
3873 if (ret != TRACE_TYPE_UNHANDLED)
3877 if (iter->ent->type == TRACE_BPUTS &&
3878 trace_flags & TRACE_ITER_PRINTK &&
3879 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3880 return trace_print_bputs_msg_only(iter);
3882 if (iter->ent->type == TRACE_BPRINT &&
3883 trace_flags & TRACE_ITER_PRINTK &&
3884 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3885 return trace_print_bprintk_msg_only(iter);
3887 if (iter->ent->type == TRACE_PRINT &&
3888 trace_flags & TRACE_ITER_PRINTK &&
3889 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3890 return trace_print_printk_msg_only(iter);
3892 if (trace_flags & TRACE_ITER_BIN)
3893 return print_bin_fmt(iter);
3895 if (trace_flags & TRACE_ITER_HEX)
3896 return print_hex_fmt(iter);
3898 if (trace_flags & TRACE_ITER_RAW)
3899 return print_raw_fmt(iter);
3901 return print_trace_fmt(iter);
3904 void trace_latency_header(struct seq_file *m)
3906 struct trace_iterator *iter = m->private;
3907 struct trace_array *tr = iter->tr;
3909 /* print nothing if the buffers are empty */
3910 if (trace_empty(iter))
3913 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3914 print_trace_header(m, iter);
3916 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
3917 print_lat_help_header(m);
3920 void trace_default_header(struct seq_file *m)
3922 struct trace_iterator *iter = m->private;
3923 struct trace_array *tr = iter->tr;
3924 unsigned long trace_flags = tr->trace_flags;
3926 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
3929 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
3930 /* print nothing if the buffers are empty */
3931 if (trace_empty(iter))
3933 print_trace_header(m, iter);
3934 if (!(trace_flags & TRACE_ITER_VERBOSE))
3935 print_lat_help_header(m);
3937 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
3938 if (trace_flags & TRACE_ITER_IRQ_INFO)
3939 print_func_help_header_irq(iter->trace_buffer,
3942 print_func_help_header(iter->trace_buffer, m,
3948 static void test_ftrace_alive(struct seq_file *m)
3950 if (!ftrace_is_dead())
3952 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
3953 "# MAY BE MISSING FUNCTION EVENTS\n");
3956 #ifdef CONFIG_TRACER_MAX_TRACE
3957 static void show_snapshot_main_help(struct seq_file *m)
3959 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
3960 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3961 "# Takes a snapshot of the main buffer.\n"
3962 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
3963 "# (Doesn't have to be '2' works with any number that\n"
3964 "# is not a '0' or '1')\n");
3967 static void show_snapshot_percpu_help(struct seq_file *m)
3969 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
3970 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
3971 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3972 "# Takes a snapshot of the main buffer for this cpu.\n");
3974 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
3975 "# Must use main snapshot file to allocate.\n");
3977 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
3978 "# (Doesn't have to be '2' works with any number that\n"
3979 "# is not a '0' or '1')\n");
3982 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
3984 if (iter->tr->allocated_snapshot)
3985 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
3987 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
3989 seq_puts(m, "# Snapshot commands:\n");
3990 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
3991 show_snapshot_main_help(m);
3993 show_snapshot_percpu_help(m);
3996 /* Should never be called */
3997 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
4000 static int s_show(struct seq_file *m, void *v)
4002 struct trace_iterator *iter = v;
4005 if (iter->ent == NULL) {
4007 seq_printf(m, "# tracer: %s\n", iter->trace->name);
4009 test_ftrace_alive(m);
4011 if (iter->snapshot && trace_empty(iter))
4012 print_snapshot_help(m, iter);
4013 else if (iter->trace && iter->trace->print_header)
4014 iter->trace->print_header(m);
4016 trace_default_header(m);
4018 } else if (iter->leftover) {
4020 * If we filled the seq_file buffer earlier, we
4021 * want to just show it now.
4023 ret = trace_print_seq(m, &iter->seq);
4025 /* ret should this time be zero, but you never know */
4026 iter->leftover = ret;
4029 print_trace_line(iter);
4030 ret = trace_print_seq(m, &iter->seq);
4032 * If we overflow the seq_file buffer, then it will
4033 * ask us for this data again at start up.
4035 * ret is 0 if seq_file write succeeded.
4038 iter->leftover = ret;
4045 * Should be used after trace_array_get(), trace_types_lock
4046 * ensures that i_cdev was already initialized.
4048 static inline int tracing_get_cpu(struct inode *inode)
4050 if (inode->i_cdev) /* See trace_create_cpu_file() */
4051 return (long)inode->i_cdev - 1;
4052 return RING_BUFFER_ALL_CPUS;
4055 static const struct seq_operations tracer_seq_ops = {
4062 static struct trace_iterator *
4063 __tracing_open(struct inode *inode, struct file *file, bool snapshot)
4065 struct trace_array *tr = inode->i_private;
4066 struct trace_iterator *iter;
4069 if (tracing_disabled)
4070 return ERR_PTR(-ENODEV);
4072 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
4074 return ERR_PTR(-ENOMEM);
4076 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
4078 if (!iter->buffer_iter)
4082 * We make a copy of the current tracer to avoid concurrent
4083 * changes on it while we are reading.
4085 mutex_lock(&trace_types_lock);
4086 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
4090 *iter->trace = *tr->current_trace;
4092 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
4097 #ifdef CONFIG_TRACER_MAX_TRACE
4098 /* Currently only the top directory has a snapshot */
4099 if (tr->current_trace->print_max || snapshot)
4100 iter->trace_buffer = &tr->max_buffer;
4103 iter->trace_buffer = &tr->trace_buffer;
4104 iter->snapshot = snapshot;
4106 iter->cpu_file = tracing_get_cpu(inode);
4107 mutex_init(&iter->mutex);
4109 /* Notify the tracer early; before we stop tracing. */
4110 if (iter->trace && iter->trace->open)
4111 iter->trace->open(iter);
4113 /* Annotate start of buffers if we had overruns */
4114 if (ring_buffer_overruns(iter->trace_buffer->buffer))
4115 iter->iter_flags |= TRACE_FILE_ANNOTATE;
4117 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
4118 if (trace_clocks[tr->clock_id].in_ns)
4119 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4121 /* stop the trace while dumping if we are not opening "snapshot" */
4122 if (!iter->snapshot)
4123 tracing_stop_tr(tr);
4125 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
4126 for_each_tracing_cpu(cpu) {
4127 iter->buffer_iter[cpu] =
4128 ring_buffer_read_prepare(iter->trace_buffer->buffer,
4131 ring_buffer_read_prepare_sync();
4132 for_each_tracing_cpu(cpu) {
4133 ring_buffer_read_start(iter->buffer_iter[cpu]);
4134 tracing_iter_reset(iter, cpu);
4137 cpu = iter->cpu_file;
4138 iter->buffer_iter[cpu] =
4139 ring_buffer_read_prepare(iter->trace_buffer->buffer,
4141 ring_buffer_read_prepare_sync();
4142 ring_buffer_read_start(iter->buffer_iter[cpu]);
4143 tracing_iter_reset(iter, cpu);
4146 mutex_unlock(&trace_types_lock);
4151 mutex_unlock(&trace_types_lock);
4153 kfree(iter->buffer_iter);
4155 seq_release_private(inode, file);
4156 return ERR_PTR(-ENOMEM);
4159 int tracing_open_generic(struct inode *inode, struct file *filp)
4163 ret = tracing_check_open_get_tr(NULL);
4167 filp->private_data = inode->i_private;
4171 bool tracing_is_disabled(void)
4173 return (tracing_disabled) ? true: false;
4177 * Open and update trace_array ref count.
4178 * Must have the current trace_array passed to it.
4180 int tracing_open_generic_tr(struct inode *inode, struct file *filp)
4182 struct trace_array *tr = inode->i_private;
4185 ret = tracing_check_open_get_tr(tr);
4189 filp->private_data = inode->i_private;
4194 static int tracing_release(struct inode *inode, struct file *file)
4196 struct trace_array *tr = inode->i_private;
4197 struct seq_file *m = file->private_data;
4198 struct trace_iterator *iter;
4201 if (!(file->f_mode & FMODE_READ)) {
4202 trace_array_put(tr);
4206 /* Writes do not use seq_file */
4208 mutex_lock(&trace_types_lock);
4210 for_each_tracing_cpu(cpu) {
4211 if (iter->buffer_iter[cpu])
4212 ring_buffer_read_finish(iter->buffer_iter[cpu]);
4215 if (iter->trace && iter->trace->close)
4216 iter->trace->close(iter);
4218 if (!iter->snapshot)
4219 /* reenable tracing if it was previously enabled */
4220 tracing_start_tr(tr);
4222 __trace_array_put(tr);
4224 mutex_unlock(&trace_types_lock);
4226 mutex_destroy(&iter->mutex);
4227 free_cpumask_var(iter->started);
4229 kfree(iter->buffer_iter);
4230 seq_release_private(inode, file);
4235 static int tracing_release_generic_tr(struct inode *inode, struct file *file)
4237 struct trace_array *tr = inode->i_private;
4239 trace_array_put(tr);
4243 static int tracing_single_release_tr(struct inode *inode, struct file *file)
4245 struct trace_array *tr = inode->i_private;
4247 trace_array_put(tr);
4249 return single_release(inode, file);
4252 static int tracing_open(struct inode *inode, struct file *file)
4254 struct trace_array *tr = inode->i_private;
4255 struct trace_iterator *iter;
4258 ret = tracing_check_open_get_tr(tr);
4262 /* If this file was open for write, then erase contents */
4263 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
4264 int cpu = tracing_get_cpu(inode);
4265 struct trace_buffer *trace_buf = &tr->trace_buffer;
4267 #ifdef CONFIG_TRACER_MAX_TRACE
4268 if (tr->current_trace->print_max)
4269 trace_buf = &tr->max_buffer;
4272 if (cpu == RING_BUFFER_ALL_CPUS)
4273 tracing_reset_online_cpus(trace_buf);
4275 tracing_reset_cpu(trace_buf, cpu);
4278 if (file->f_mode & FMODE_READ) {
4279 iter = __tracing_open(inode, file, false);
4281 ret = PTR_ERR(iter);
4282 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
4283 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4287 trace_array_put(tr);
4293 * Some tracers are not suitable for instance buffers.
4294 * A tracer is always available for the global array (toplevel)
4295 * or if it explicitly states that it is.
4298 trace_ok_for_array(struct tracer *t, struct trace_array *tr)
4300 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
4303 /* Find the next tracer that this trace array may use */
4304 static struct tracer *
4305 get_tracer_for_array(struct trace_array *tr, struct tracer *t)
4307 while (t && !trace_ok_for_array(t, tr))
4314 t_next(struct seq_file *m, void *v, loff_t *pos)
4316 struct trace_array *tr = m->private;
4317 struct tracer *t = v;
4322 t = get_tracer_for_array(tr, t->next);
4327 static void *t_start(struct seq_file *m, loff_t *pos)
4329 struct trace_array *tr = m->private;
4333 mutex_lock(&trace_types_lock);
4335 t = get_tracer_for_array(tr, trace_types);
4336 for (; t && l < *pos; t = t_next(m, t, &l))
4342 static void t_stop(struct seq_file *m, void *p)
4344 mutex_unlock(&trace_types_lock);
4347 static int t_show(struct seq_file *m, void *v)
4349 struct tracer *t = v;
4354 seq_puts(m, t->name);
4363 static const struct seq_operations show_traces_seq_ops = {
4370 static int show_traces_open(struct inode *inode, struct file *file)
4372 struct trace_array *tr = inode->i_private;
4376 ret = tracing_check_open_get_tr(tr);
4380 ret = seq_open(file, &show_traces_seq_ops);
4382 trace_array_put(tr);
4386 m = file->private_data;
4392 static int show_traces_release(struct inode *inode, struct file *file)
4394 struct trace_array *tr = inode->i_private;
4396 trace_array_put(tr);
4397 return seq_release(inode, file);
4401 tracing_write_stub(struct file *filp, const char __user *ubuf,
4402 size_t count, loff_t *ppos)
4407 loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
4411 if (file->f_mode & FMODE_READ)
4412 ret = seq_lseek(file, offset, whence);
4414 file->f_pos = ret = 0;
4419 static const struct file_operations tracing_fops = {
4420 .open = tracing_open,
4422 .write = tracing_write_stub,
4423 .llseek = tracing_lseek,
4424 .release = tracing_release,
4427 static const struct file_operations show_traces_fops = {
4428 .open = show_traces_open,
4430 .llseek = seq_lseek,
4431 .release = show_traces_release,
4435 tracing_cpumask_read(struct file *filp, char __user *ubuf,
4436 size_t count, loff_t *ppos)
4438 struct trace_array *tr = file_inode(filp)->i_private;
4442 len = snprintf(NULL, 0, "%*pb\n",
4443 cpumask_pr_args(tr->tracing_cpumask)) + 1;
4444 mask_str = kmalloc(len, GFP_KERNEL);
4448 len = snprintf(mask_str, len, "%*pb\n",
4449 cpumask_pr_args(tr->tracing_cpumask));
4454 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
4463 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
4464 size_t count, loff_t *ppos)
4466 struct trace_array *tr = file_inode(filp)->i_private;
4467 cpumask_var_t tracing_cpumask_new;
4470 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
4473 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
4477 local_irq_disable();
4478 arch_spin_lock(&tr->max_lock);
4479 for_each_tracing_cpu(cpu) {
4481 * Increase/decrease the disabled counter if we are
4482 * about to flip a bit in the cpumask:
4484 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
4485 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
4486 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
4487 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
4489 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
4490 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
4491 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
4492 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
4495 arch_spin_unlock(&tr->max_lock);
4498 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
4499 free_cpumask_var(tracing_cpumask_new);
4504 free_cpumask_var(tracing_cpumask_new);
4509 static const struct file_operations tracing_cpumask_fops = {
4510 .open = tracing_open_generic_tr,
4511 .read = tracing_cpumask_read,
4512 .write = tracing_cpumask_write,
4513 .release = tracing_release_generic_tr,
4514 .llseek = generic_file_llseek,
4517 static int tracing_trace_options_show(struct seq_file *m, void *v)
4519 struct tracer_opt *trace_opts;
4520 struct trace_array *tr = m->private;
4524 mutex_lock(&trace_types_lock);
4525 tracer_flags = tr->current_trace->flags->val;
4526 trace_opts = tr->current_trace->flags->opts;
4528 for (i = 0; trace_options[i]; i++) {
4529 if (tr->trace_flags & (1 << i))
4530 seq_printf(m, "%s\n", trace_options[i]);
4532 seq_printf(m, "no%s\n", trace_options[i]);
4535 for (i = 0; trace_opts[i].name; i++) {
4536 if (tracer_flags & trace_opts[i].bit)
4537 seq_printf(m, "%s\n", trace_opts[i].name);
4539 seq_printf(m, "no%s\n", trace_opts[i].name);
4541 mutex_unlock(&trace_types_lock);
4546 static int __set_tracer_option(struct trace_array *tr,
4547 struct tracer_flags *tracer_flags,
4548 struct tracer_opt *opts, int neg)
4550 struct tracer *trace = tracer_flags->trace;
4553 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
4558 tracer_flags->val &= ~opts->bit;
4560 tracer_flags->val |= opts->bit;
4564 /* Try to assign a tracer specific option */
4565 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
4567 struct tracer *trace = tr->current_trace;
4568 struct tracer_flags *tracer_flags = trace->flags;
4569 struct tracer_opt *opts = NULL;
4572 for (i = 0; tracer_flags->opts[i].name; i++) {
4573 opts = &tracer_flags->opts[i];
4575 if (strcmp(cmp, opts->name) == 0)
4576 return __set_tracer_option(tr, trace->flags, opts, neg);
4582 /* Some tracers require overwrite to stay enabled */
4583 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
4585 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
4591 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
4593 /* do nothing if flag is already set */
4594 if (!!(tr->trace_flags & mask) == !!enabled)
4597 /* Give the tracer a chance to approve the change */
4598 if (tr->current_trace->flag_changed)
4599 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
4603 tr->trace_flags |= mask;
4605 tr->trace_flags &= ~mask;
4607 if (mask == TRACE_ITER_RECORD_CMD)
4608 trace_event_enable_cmd_record(enabled);
4610 if (mask == TRACE_ITER_RECORD_TGID) {
4612 tgid_map = kcalloc(PID_MAX_DEFAULT + 1,
4616 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
4620 trace_event_enable_tgid_record(enabled);
4623 if (mask == TRACE_ITER_EVENT_FORK)
4624 trace_event_follow_fork(tr, enabled);
4626 if (mask == TRACE_ITER_FUNC_FORK)
4627 ftrace_pid_follow_fork(tr, enabled);
4629 if (mask == TRACE_ITER_OVERWRITE) {
4630 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
4631 #ifdef CONFIG_TRACER_MAX_TRACE
4632 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
4636 if (mask == TRACE_ITER_PRINTK) {
4637 trace_printk_start_stop_comm(enabled);
4638 trace_printk_control(enabled);
4644 static int trace_set_options(struct trace_array *tr, char *option)
4649 size_t orig_len = strlen(option);
4652 cmp = strstrip(option);
4654 len = str_has_prefix(cmp, "no");
4660 mutex_lock(&trace_types_lock);
4662 ret = match_string(trace_options, -1, cmp);
4663 /* If no option could be set, test the specific tracer options */
4665 ret = set_tracer_option(tr, cmp, neg);
4667 ret = set_tracer_flag(tr, 1 << ret, !neg);
4669 mutex_unlock(&trace_types_lock);
4672 * If the first trailing whitespace is replaced with '\0' by strstrip,
4673 * turn it back into a space.
4675 if (orig_len > strlen(option))
4676 option[strlen(option)] = ' ';
4681 static void __init apply_trace_boot_options(void)
4683 char *buf = trace_boot_options_buf;
4687 option = strsep(&buf, ",");
4693 trace_set_options(&global_trace, option);
4695 /* Put back the comma to allow this to be called again */
4702 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
4703 size_t cnt, loff_t *ppos)
4705 struct seq_file *m = filp->private_data;
4706 struct trace_array *tr = m->private;
4710 if (cnt >= sizeof(buf))
4713 if (copy_from_user(buf, ubuf, cnt))
4718 ret = trace_set_options(tr, buf);
4727 static int tracing_trace_options_open(struct inode *inode, struct file *file)
4729 struct trace_array *tr = inode->i_private;
4732 ret = tracing_check_open_get_tr(tr);
4736 ret = single_open(file, tracing_trace_options_show, inode->i_private);
4738 trace_array_put(tr);
4743 static const struct file_operations tracing_iter_fops = {
4744 .open = tracing_trace_options_open,
4746 .llseek = seq_lseek,
4747 .release = tracing_single_release_tr,
4748 .write = tracing_trace_options_write,
4751 static const char readme_msg[] =
4752 "tracing mini-HOWTO:\n\n"
4753 "# echo 0 > tracing_on : quick way to disable tracing\n"
4754 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
4755 " Important files:\n"
4756 " trace\t\t\t- The static contents of the buffer\n"
4757 "\t\t\t To clear the buffer write into this file: echo > trace\n"
4758 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
4759 " current_tracer\t- function and latency tracers\n"
4760 " available_tracers\t- list of configured tracers for current_tracer\n"
4761 " error_log\t- error log for failed commands (that support it)\n"
4762 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
4763 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
4764 " trace_clock\t\t-change the clock used to order events\n"
4765 " local: Per cpu clock but may not be synced across CPUs\n"
4766 " global: Synced across CPUs but slows tracing down.\n"
4767 " counter: Not a clock, but just an increment\n"
4768 " uptime: Jiffy counter from time of boot\n"
4769 " perf: Same clock that perf events use\n"
4770 #ifdef CONFIG_X86_64
4771 " x86-tsc: TSC cycle counter\n"
4773 "\n timestamp_mode\t-view the mode used to timestamp events\n"
4774 " delta: Delta difference against a buffer-wide timestamp\n"
4775 " absolute: Absolute (standalone) timestamp\n"
4776 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
4777 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
4778 " tracing_cpumask\t- Limit which CPUs to trace\n"
4779 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
4780 "\t\t\t Remove sub-buffer with rmdir\n"
4781 " trace_options\t\t- Set format or modify how tracing happens\n"
4782 "\t\t\t Disable an option by prefixing 'no' to the\n"
4783 "\t\t\t option name\n"
4784 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
4785 #ifdef CONFIG_DYNAMIC_FTRACE
4786 "\n available_filter_functions - list of functions that can be filtered on\n"
4787 " set_ftrace_filter\t- echo function name in here to only trace these\n"
4788 "\t\t\t functions\n"
4789 "\t accepts: func_full_name or glob-matching-pattern\n"
4790 "\t modules: Can select a group via module\n"
4791 "\t Format: :mod:<module-name>\n"
4792 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
4793 "\t triggers: a command to perform when function is hit\n"
4794 "\t Format: <function>:<trigger>[:count]\n"
4795 "\t trigger: traceon, traceoff\n"
4796 "\t\t enable_event:<system>:<event>\n"
4797 "\t\t disable_event:<system>:<event>\n"
4798 #ifdef CONFIG_STACKTRACE
4801 #ifdef CONFIG_TRACER_SNAPSHOT
4806 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
4807 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
4808 "\t The first one will disable tracing every time do_fault is hit\n"
4809 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
4810 "\t The first time do trap is hit and it disables tracing, the\n"
4811 "\t counter will decrement to 2. If tracing is already disabled,\n"
4812 "\t the counter will not decrement. It only decrements when the\n"
4813 "\t trigger did work\n"
4814 "\t To remove trigger without count:\n"
4815 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
4816 "\t To remove trigger with a count:\n"
4817 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
4818 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
4819 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
4820 "\t modules: Can select a group via module command :mod:\n"
4821 "\t Does not accept triggers\n"
4822 #endif /* CONFIG_DYNAMIC_FTRACE */
4823 #ifdef CONFIG_FUNCTION_TRACER
4824 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
4827 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4828 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
4829 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
4830 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
4832 #ifdef CONFIG_TRACER_SNAPSHOT
4833 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
4834 "\t\t\t snapshot buffer. Read the contents for more\n"
4835 "\t\t\t information\n"
4837 #ifdef CONFIG_STACK_TRACER
4838 " stack_trace\t\t- Shows the max stack trace when active\n"
4839 " stack_max_size\t- Shows current max stack size that was traced\n"
4840 "\t\t\t Write into this file to reset the max size (trigger a\n"
4841 "\t\t\t new trace)\n"
4842 #ifdef CONFIG_DYNAMIC_FTRACE
4843 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
4846 #endif /* CONFIG_STACK_TRACER */
4847 #ifdef CONFIG_DYNAMIC_EVENTS
4848 " dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n"
4849 "\t\t\t Write into this file to define/undefine new trace events.\n"
4851 #ifdef CONFIG_KPROBE_EVENTS
4852 " kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n"
4853 "\t\t\t Write into this file to define/undefine new trace events.\n"
4855 #ifdef CONFIG_UPROBE_EVENTS
4856 " uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n"
4857 "\t\t\t Write into this file to define/undefine new trace events.\n"
4859 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
4860 "\t accepts: event-definitions (one definition per line)\n"
4861 "\t Format: p[:[<group>/]<event>] <place> [<args>]\n"
4862 "\t r[maxactive][:[<group>/]<event>] <place> [<args>]\n"
4863 #ifdef CONFIG_HIST_TRIGGERS
4864 "\t s:[synthetic/]<event> <field> [<field>]\n"
4866 "\t -:[<group>/]<event>\n"
4867 #ifdef CONFIG_KPROBE_EVENTS
4868 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
4869 "place (kretprobe): [<module>:]<symbol>[+<offset>]|<memaddr>\n"
4871 #ifdef CONFIG_UPROBE_EVENTS
4872 " place (uprobe): <path>:<offset>[(ref_ctr_offset)]\n"
4874 "\t args: <name>=fetcharg[:type]\n"
4875 "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n"
4876 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
4877 "\t $stack<index>, $stack, $retval, $comm, $arg<N>,\n"
4879 "\t $stack<index>, $stack, $retval, $comm,\n"
4881 "\t +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n"
4882 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string, symbol,\n"
4883 "\t b<bit-width>@<bit-offset>/<container-size>, ustring,\n"
4884 "\t <type>\\[<array-size>\\]\n"
4885 #ifdef CONFIG_HIST_TRIGGERS
4886 "\t field: <stype> <name>;\n"
4887 "\t stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n"
4888 "\t [unsigned] char/int/long\n"
4891 " events/\t\t- Directory containing all trace event subsystems:\n"
4892 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
4893 " events/<system>/\t- Directory containing all trace events for <system>:\n"
4894 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
4896 " filter\t\t- If set, only events passing filter are traced\n"
4897 " events/<system>/<event>/\t- Directory containing control files for\n"
4899 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
4900 " filter\t\t- If set, only events passing filter are traced\n"
4901 " trigger\t\t- If set, a command to perform when event is hit\n"
4902 "\t Format: <trigger>[:count][if <filter>]\n"
4903 "\t trigger: traceon, traceoff\n"
4904 "\t enable_event:<system>:<event>\n"
4905 "\t disable_event:<system>:<event>\n"
4906 #ifdef CONFIG_HIST_TRIGGERS
4907 "\t enable_hist:<system>:<event>\n"
4908 "\t disable_hist:<system>:<event>\n"
4910 #ifdef CONFIG_STACKTRACE
4913 #ifdef CONFIG_TRACER_SNAPSHOT
4916 #ifdef CONFIG_HIST_TRIGGERS
4917 "\t\t hist (see below)\n"
4919 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
4920 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
4921 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
4922 "\t events/block/block_unplug/trigger\n"
4923 "\t The first disables tracing every time block_unplug is hit.\n"
4924 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
4925 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
4926 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
4927 "\t Like function triggers, the counter is only decremented if it\n"
4928 "\t enabled or disabled tracing.\n"
4929 "\t To remove a trigger without a count:\n"
4930 "\t echo '!<trigger> > <system>/<event>/trigger\n"
4931 "\t To remove a trigger with a count:\n"
4932 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
4933 "\t Filters can be ignored when removing a trigger.\n"
4934 #ifdef CONFIG_HIST_TRIGGERS
4935 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
4936 "\t Format: hist:keys=<field1[,field2,...]>\n"
4937 "\t [:values=<field1[,field2,...]>]\n"
4938 "\t [:sort=<field1[,field2,...]>]\n"
4939 "\t [:size=#entries]\n"
4940 "\t [:pause][:continue][:clear]\n"
4941 "\t [:name=histname1]\n"
4942 "\t [:<handler>.<action>]\n"
4943 "\t [if <filter>]\n\n"
4944 "\t When a matching event is hit, an entry is added to a hash\n"
4945 "\t table using the key(s) and value(s) named, and the value of a\n"
4946 "\t sum called 'hitcount' is incremented. Keys and values\n"
4947 "\t correspond to fields in the event's format description. Keys\n"
4948 "\t can be any field, or the special string 'stacktrace'.\n"
4949 "\t Compound keys consisting of up to two fields can be specified\n"
4950 "\t by the 'keys' keyword. Values must correspond to numeric\n"
4951 "\t fields. Sort keys consisting of up to two fields can be\n"
4952 "\t specified using the 'sort' keyword. The sort direction can\n"
4953 "\t be modified by appending '.descending' or '.ascending' to a\n"
4954 "\t sort field. The 'size' parameter can be used to specify more\n"
4955 "\t or fewer than the default 2048 entries for the hashtable size.\n"
4956 "\t If a hist trigger is given a name using the 'name' parameter,\n"
4957 "\t its histogram data will be shared with other triggers of the\n"
4958 "\t same name, and trigger hits will update this common data.\n\n"
4959 "\t Reading the 'hist' file for the event will dump the hash\n"
4960 "\t table in its entirety to stdout. If there are multiple hist\n"
4961 "\t triggers attached to an event, there will be a table for each\n"
4962 "\t trigger in the output. The table displayed for a named\n"
4963 "\t trigger will be the same as any other instance having the\n"
4964 "\t same name. The default format used to display a given field\n"
4965 "\t can be modified by appending any of the following modifiers\n"
4966 "\t to the field name, as applicable:\n\n"
4967 "\t .hex display a number as a hex value\n"
4968 "\t .sym display an address as a symbol\n"
4969 "\t .sym-offset display an address as a symbol and offset\n"
4970 "\t .execname display a common_pid as a program name\n"
4971 "\t .syscall display a syscall id as a syscall name\n"
4972 "\t .log2 display log2 value rather than raw number\n"
4973 "\t .usecs display a common_timestamp in microseconds\n\n"
4974 "\t The 'pause' parameter can be used to pause an existing hist\n"
4975 "\t trigger or to start a hist trigger but not log any events\n"
4976 "\t until told to do so. 'continue' can be used to start or\n"
4977 "\t restart a paused hist trigger.\n\n"
4978 "\t The 'clear' parameter will clear the contents of a running\n"
4979 "\t hist trigger and leave its current paused/active state\n"
4981 "\t The enable_hist and disable_hist triggers can be used to\n"
4982 "\t have one event conditionally start and stop another event's\n"
4983 "\t already-attached hist trigger. The syntax is analogous to\n"
4984 "\t the enable_event and disable_event triggers.\n\n"
4985 "\t Hist trigger handlers and actions are executed whenever a\n"
4986 "\t a histogram entry is added or updated. They take the form:\n\n"
4987 "\t <handler>.<action>\n\n"
4988 "\t The available handlers are:\n\n"
4989 "\t onmatch(matching.event) - invoke on addition or update\n"
4990 "\t onmax(var) - invoke if var exceeds current max\n"
4991 "\t onchange(var) - invoke action if var changes\n\n"
4992 "\t The available actions are:\n\n"
4993 "\t trace(<synthetic_event>,param list) - generate synthetic event\n"
4994 "\t save(field,...) - save current event fields\n"
4995 #ifdef CONFIG_TRACER_SNAPSHOT
4996 "\t snapshot() - snapshot the trace buffer\n"
5002 tracing_readme_read(struct file *filp, char __user *ubuf,
5003 size_t cnt, loff_t *ppos)
5005 return simple_read_from_buffer(ubuf, cnt, ppos,
5006 readme_msg, strlen(readme_msg));
5009 static const struct file_operations tracing_readme_fops = {
5010 .open = tracing_open_generic,
5011 .read = tracing_readme_read,
5012 .llseek = generic_file_llseek,
5015 static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
5019 if (*pos || m->count)
5024 for (; ptr <= &tgid_map[PID_MAX_DEFAULT]; ptr++) {
5025 if (trace_find_tgid(*ptr))
5032 static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
5042 v = saved_tgids_next(m, v, &l);
5050 static void saved_tgids_stop(struct seq_file *m, void *v)
5054 static int saved_tgids_show(struct seq_file *m, void *v)
5056 int pid = (int *)v - tgid_map;
5058 seq_printf(m, "%d %d\n", pid, trace_find_tgid(pid));
5062 static const struct seq_operations tracing_saved_tgids_seq_ops = {
5063 .start = saved_tgids_start,
5064 .stop = saved_tgids_stop,
5065 .next = saved_tgids_next,
5066 .show = saved_tgids_show,
5069 static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
5073 ret = tracing_check_open_get_tr(NULL);
5077 return seq_open(filp, &tracing_saved_tgids_seq_ops);
5081 static const struct file_operations tracing_saved_tgids_fops = {
5082 .open = tracing_saved_tgids_open,
5084 .llseek = seq_lseek,
5085 .release = seq_release,
5088 static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
5090 unsigned int *ptr = v;
5092 if (*pos || m->count)
5097 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
5099 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
5108 static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
5114 arch_spin_lock(&trace_cmdline_lock);
5116 v = &savedcmd->map_cmdline_to_pid[0];
5118 v = saved_cmdlines_next(m, v, &l);
5126 static void saved_cmdlines_stop(struct seq_file *m, void *v)
5128 arch_spin_unlock(&trace_cmdline_lock);
5132 static int saved_cmdlines_show(struct seq_file *m, void *v)
5134 char buf[TASK_COMM_LEN];
5135 unsigned int *pid = v;
5137 __trace_find_cmdline(*pid, buf);
5138 seq_printf(m, "%d %s\n", *pid, buf);
5142 static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
5143 .start = saved_cmdlines_start,
5144 .next = saved_cmdlines_next,
5145 .stop = saved_cmdlines_stop,
5146 .show = saved_cmdlines_show,
5149 static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
5153 ret = tracing_check_open_get_tr(NULL);
5157 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
5160 static const struct file_operations tracing_saved_cmdlines_fops = {
5161 .open = tracing_saved_cmdlines_open,
5163 .llseek = seq_lseek,
5164 .release = seq_release,
5168 tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
5169 size_t cnt, loff_t *ppos)
5174 arch_spin_lock(&trace_cmdline_lock);
5175 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
5176 arch_spin_unlock(&trace_cmdline_lock);
5178 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5181 static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
5183 kfree(s->saved_cmdlines);
5184 kfree(s->map_cmdline_to_pid);
5188 static int tracing_resize_saved_cmdlines(unsigned int val)
5190 struct saved_cmdlines_buffer *s, *savedcmd_temp;
5192 s = kmalloc(sizeof(*s), GFP_KERNEL);
5196 if (allocate_cmdlines_buffer(val, s) < 0) {
5201 arch_spin_lock(&trace_cmdline_lock);
5202 savedcmd_temp = savedcmd;
5204 arch_spin_unlock(&trace_cmdline_lock);
5205 free_saved_cmdlines_buffer(savedcmd_temp);
5211 tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
5212 size_t cnt, loff_t *ppos)
5217 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5221 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
5222 if (!val || val > PID_MAX_DEFAULT)
5225 ret = tracing_resize_saved_cmdlines((unsigned int)val);
5234 static const struct file_operations tracing_saved_cmdlines_size_fops = {
5235 .open = tracing_open_generic,
5236 .read = tracing_saved_cmdlines_size_read,
5237 .write = tracing_saved_cmdlines_size_write,
5240 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
5241 static union trace_eval_map_item *
5242 update_eval_map(union trace_eval_map_item *ptr)
5244 if (!ptr->map.eval_string) {
5245 if (ptr->tail.next) {
5246 ptr = ptr->tail.next;
5247 /* Set ptr to the next real item (skip head) */
5255 static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
5257 union trace_eval_map_item *ptr = v;
5260 * Paranoid! If ptr points to end, we don't want to increment past it.
5261 * This really should never happen.
5263 ptr = update_eval_map(ptr);
5264 if (WARN_ON_ONCE(!ptr))
5271 ptr = update_eval_map(ptr);
5276 static void *eval_map_start(struct seq_file *m, loff_t *pos)
5278 union trace_eval_map_item *v;
5281 mutex_lock(&trace_eval_mutex);
5283 v = trace_eval_maps;
5287 while (v && l < *pos) {
5288 v = eval_map_next(m, v, &l);
5294 static void eval_map_stop(struct seq_file *m, void *v)
5296 mutex_unlock(&trace_eval_mutex);
5299 static int eval_map_show(struct seq_file *m, void *v)
5301 union trace_eval_map_item *ptr = v;
5303 seq_printf(m, "%s %ld (%s)\n",
5304 ptr->map.eval_string, ptr->map.eval_value,
5310 static const struct seq_operations tracing_eval_map_seq_ops = {
5311 .start = eval_map_start,
5312 .next = eval_map_next,
5313 .stop = eval_map_stop,
5314 .show = eval_map_show,
5317 static int tracing_eval_map_open(struct inode *inode, struct file *filp)
5321 ret = tracing_check_open_get_tr(NULL);
5325 return seq_open(filp, &tracing_eval_map_seq_ops);
5328 static const struct file_operations tracing_eval_map_fops = {
5329 .open = tracing_eval_map_open,
5331 .llseek = seq_lseek,
5332 .release = seq_release,
5335 static inline union trace_eval_map_item *
5336 trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
5338 /* Return tail of array given the head */
5339 return ptr + ptr->head.length + 1;
5343 trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
5346 struct trace_eval_map **stop;
5347 struct trace_eval_map **map;
5348 union trace_eval_map_item *map_array;
5349 union trace_eval_map_item *ptr;
5354 * The trace_eval_maps contains the map plus a head and tail item,
5355 * where the head holds the module and length of array, and the
5356 * tail holds a pointer to the next list.
5358 map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
5360 pr_warn("Unable to allocate trace eval mapping\n");
5364 mutex_lock(&trace_eval_mutex);
5366 if (!trace_eval_maps)
5367 trace_eval_maps = map_array;
5369 ptr = trace_eval_maps;
5371 ptr = trace_eval_jmp_to_tail(ptr);
5372 if (!ptr->tail.next)
5374 ptr = ptr->tail.next;
5377 ptr->tail.next = map_array;
5379 map_array->head.mod = mod;
5380 map_array->head.length = len;
5383 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
5384 map_array->map = **map;
5387 memset(map_array, 0, sizeof(*map_array));
5389 mutex_unlock(&trace_eval_mutex);
5392 static void trace_create_eval_file(struct dentry *d_tracer)
5394 trace_create_file("eval_map", 0444, d_tracer,
5395 NULL, &tracing_eval_map_fops);
5398 #else /* CONFIG_TRACE_EVAL_MAP_FILE */
5399 static inline void trace_create_eval_file(struct dentry *d_tracer) { }
5400 static inline void trace_insert_eval_map_file(struct module *mod,
5401 struct trace_eval_map **start, int len) { }
5402 #endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
5404 static void trace_insert_eval_map(struct module *mod,
5405 struct trace_eval_map **start, int len)
5407 struct trace_eval_map **map;
5414 trace_event_eval_update(map, len);
5416 trace_insert_eval_map_file(mod, start, len);
5420 tracing_set_trace_read(struct file *filp, char __user *ubuf,
5421 size_t cnt, loff_t *ppos)
5423 struct trace_array *tr = filp->private_data;
5424 char buf[MAX_TRACER_SIZE+2];
5427 mutex_lock(&trace_types_lock);
5428 r = sprintf(buf, "%s\n", tr->current_trace->name);
5429 mutex_unlock(&trace_types_lock);
5431 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5434 int tracer_init(struct tracer *t, struct trace_array *tr)
5436 tracing_reset_online_cpus(&tr->trace_buffer);
5440 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
5444 for_each_tracing_cpu(cpu)
5445 per_cpu_ptr(buf->data, cpu)->entries = val;
5448 #ifdef CONFIG_TRACER_MAX_TRACE
5449 /* resize @tr's buffer to the size of @size_tr's entries */
5450 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
5451 struct trace_buffer *size_buf, int cpu_id)
5455 if (cpu_id == RING_BUFFER_ALL_CPUS) {
5456 for_each_tracing_cpu(cpu) {
5457 ret = ring_buffer_resize(trace_buf->buffer,
5458 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
5461 per_cpu_ptr(trace_buf->data, cpu)->entries =
5462 per_cpu_ptr(size_buf->data, cpu)->entries;
5465 ret = ring_buffer_resize(trace_buf->buffer,
5466 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
5468 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
5469 per_cpu_ptr(size_buf->data, cpu_id)->entries;
5474 #endif /* CONFIG_TRACER_MAX_TRACE */
5476 static int __tracing_resize_ring_buffer(struct trace_array *tr,
5477 unsigned long size, int cpu)
5482 * If kernel or user changes the size of the ring buffer
5483 * we use the size that was given, and we can forget about
5484 * expanding it later.
5486 ring_buffer_expanded = true;
5488 /* May be called before buffers are initialized */
5489 if (!tr->trace_buffer.buffer)
5492 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
5496 #ifdef CONFIG_TRACER_MAX_TRACE
5497 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
5498 !tr->current_trace->use_max_tr)
5501 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
5503 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
5504 &tr->trace_buffer, cpu);
5507 * AARGH! We are left with different
5508 * size max buffer!!!!
5509 * The max buffer is our "snapshot" buffer.
5510 * When a tracer needs a snapshot (one of the
5511 * latency tracers), it swaps the max buffer
5512 * with the saved snap shot. We succeeded to
5513 * update the size of the main buffer, but failed to
5514 * update the size of the max buffer. But when we tried
5515 * to reset the main buffer to the original size, we
5516 * failed there too. This is very unlikely to
5517 * happen, but if it does, warn and kill all
5521 tracing_disabled = 1;
5526 if (cpu == RING_BUFFER_ALL_CPUS)
5527 set_buffer_entries(&tr->max_buffer, size);
5529 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
5532 #endif /* CONFIG_TRACER_MAX_TRACE */
5534 if (cpu == RING_BUFFER_ALL_CPUS)
5535 set_buffer_entries(&tr->trace_buffer, size);
5537 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
5542 static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
5543 unsigned long size, int cpu_id)
5547 mutex_lock(&trace_types_lock);
5549 if (cpu_id != RING_BUFFER_ALL_CPUS) {
5550 /* make sure, this cpu is enabled in the mask */
5551 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
5557 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
5562 mutex_unlock(&trace_types_lock);
5569 * tracing_update_buffers - used by tracing facility to expand ring buffers
5571 * To save on memory when the tracing is never used on a system with it
5572 * configured in. The ring buffers are set to a minimum size. But once
5573 * a user starts to use the tracing facility, then they need to grow
5574 * to their default size.
5576 * This function is to be called when a tracer is about to be used.
5578 int tracing_update_buffers(void)
5582 mutex_lock(&trace_types_lock);
5583 if (!ring_buffer_expanded)
5584 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
5585 RING_BUFFER_ALL_CPUS);
5586 mutex_unlock(&trace_types_lock);
5591 struct trace_option_dentry;
5594 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
5597 * Used to clear out the tracer before deletion of an instance.
5598 * Must have trace_types_lock held.
5600 static void tracing_set_nop(struct trace_array *tr)
5602 if (tr->current_trace == &nop_trace)
5605 tr->current_trace->enabled--;
5607 if (tr->current_trace->reset)
5608 tr->current_trace->reset(tr);
5610 tr->current_trace = &nop_trace;
5613 static void add_tracer_options(struct trace_array *tr, struct tracer *t)
5615 /* Only enable if the directory has been created already. */
5619 create_trace_option_files(tr, t);
5622 static int tracing_set_tracer(struct trace_array *tr, const char *buf)
5625 #ifdef CONFIG_TRACER_MAX_TRACE
5630 mutex_lock(&trace_types_lock);
5632 if (!ring_buffer_expanded) {
5633 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
5634 RING_BUFFER_ALL_CPUS);
5640 for (t = trace_types; t; t = t->next) {
5641 if (strcmp(t->name, buf) == 0)
5648 if (t == tr->current_trace)
5651 #ifdef CONFIG_TRACER_SNAPSHOT
5652 if (t->use_max_tr) {
5653 arch_spin_lock(&tr->max_lock);
5654 if (tr->cond_snapshot)
5656 arch_spin_unlock(&tr->max_lock);
5661 /* Some tracers won't work on kernel command line */
5662 if (system_state < SYSTEM_RUNNING && t->noboot) {
5663 pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
5668 /* Some tracers are only allowed for the top level buffer */
5669 if (!trace_ok_for_array(t, tr)) {
5674 /* If trace pipe files are being read, we can't change the tracer */
5675 if (tr->current_trace->ref) {
5680 trace_branch_disable();
5682 tr->current_trace->enabled--;
5684 if (tr->current_trace->reset)
5685 tr->current_trace->reset(tr);
5687 /* Current trace needs to be nop_trace before synchronize_rcu */
5688 tr->current_trace = &nop_trace;
5690 #ifdef CONFIG_TRACER_MAX_TRACE
5691 had_max_tr = tr->allocated_snapshot;
5693 if (had_max_tr && !t->use_max_tr) {
5695 * We need to make sure that the update_max_tr sees that
5696 * current_trace changed to nop_trace to keep it from
5697 * swapping the buffers after we resize it.
5698 * The update_max_tr is called from interrupts disabled
5699 * so a synchronized_sched() is sufficient.
5706 #ifdef CONFIG_TRACER_MAX_TRACE
5707 if (t->use_max_tr && !had_max_tr) {
5708 ret = tracing_alloc_snapshot_instance(tr);
5715 ret = tracer_init(t, tr);
5720 tr->current_trace = t;
5721 tr->current_trace->enabled++;
5722 trace_branch_enable(tr);
5724 mutex_unlock(&trace_types_lock);
5730 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
5731 size_t cnt, loff_t *ppos)
5733 struct trace_array *tr = filp->private_data;
5734 char buf[MAX_TRACER_SIZE+1];
5741 if (cnt > MAX_TRACER_SIZE)
5742 cnt = MAX_TRACER_SIZE;
5744 if (copy_from_user(buf, ubuf, cnt))
5749 /* strip ending whitespace. */
5750 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
5753 err = tracing_set_tracer(tr, buf);
5763 tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
5764 size_t cnt, loff_t *ppos)
5769 r = snprintf(buf, sizeof(buf), "%ld\n",
5770 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
5771 if (r > sizeof(buf))
5773 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5777 tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
5778 size_t cnt, loff_t *ppos)
5783 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5793 tracing_thresh_read(struct file *filp, char __user *ubuf,
5794 size_t cnt, loff_t *ppos)
5796 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
5800 tracing_thresh_write(struct file *filp, const char __user *ubuf,
5801 size_t cnt, loff_t *ppos)
5803 struct trace_array *tr = filp->private_data;
5806 mutex_lock(&trace_types_lock);
5807 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
5811 if (tr->current_trace->update_thresh) {
5812 ret = tr->current_trace->update_thresh(tr);
5819 mutex_unlock(&trace_types_lock);
5824 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
5827 tracing_max_lat_read(struct file *filp, char __user *ubuf,
5828 size_t cnt, loff_t *ppos)
5830 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
5834 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
5835 size_t cnt, loff_t *ppos)
5837 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
5842 static int tracing_open_pipe(struct inode *inode, struct file *filp)
5844 struct trace_array *tr = inode->i_private;
5845 struct trace_iterator *iter;
5848 ret = tracing_check_open_get_tr(tr);
5852 mutex_lock(&trace_types_lock);
5854 /* create a buffer to store the information to pass to userspace */
5855 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5858 __trace_array_put(tr);
5862 trace_seq_init(&iter->seq);
5863 iter->trace = tr->current_trace;
5865 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
5870 /* trace pipe does not show start of buffer */
5871 cpumask_setall(iter->started);
5873 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
5874 iter->iter_flags |= TRACE_FILE_LAT_FMT;
5876 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
5877 if (trace_clocks[tr->clock_id].in_ns)
5878 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
5881 iter->trace_buffer = &tr->trace_buffer;
5882 iter->cpu_file = tracing_get_cpu(inode);
5883 mutex_init(&iter->mutex);
5884 filp->private_data = iter;
5886 if (iter->trace->pipe_open)
5887 iter->trace->pipe_open(iter);
5889 nonseekable_open(inode, filp);
5891 tr->current_trace->ref++;
5893 mutex_unlock(&trace_types_lock);
5898 __trace_array_put(tr);
5899 mutex_unlock(&trace_types_lock);
5903 static int tracing_release_pipe(struct inode *inode, struct file *file)
5905 struct trace_iterator *iter = file->private_data;
5906 struct trace_array *tr = inode->i_private;
5908 mutex_lock(&trace_types_lock);
5910 tr->current_trace->ref--;
5912 if (iter->trace->pipe_close)
5913 iter->trace->pipe_close(iter);
5915 mutex_unlock(&trace_types_lock);
5917 free_cpumask_var(iter->started);
5918 mutex_destroy(&iter->mutex);
5921 trace_array_put(tr);
5927 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
5929 struct trace_array *tr = iter->tr;
5931 /* Iterators are static, they should be filled or empty */
5932 if (trace_buffer_iter(iter, iter->cpu_file))
5933 return EPOLLIN | EPOLLRDNORM;
5935 if (tr->trace_flags & TRACE_ITER_BLOCK)
5937 * Always select as readable when in blocking mode
5939 return EPOLLIN | EPOLLRDNORM;
5941 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
5946 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
5948 struct trace_iterator *iter = filp->private_data;
5950 return trace_poll(iter, filp, poll_table);
5953 /* Must be called with iter->mutex held. */
5954 static int tracing_wait_pipe(struct file *filp)
5956 struct trace_iterator *iter = filp->private_data;
5959 while (trace_empty(iter)) {
5961 if ((filp->f_flags & O_NONBLOCK)) {
5966 * We block until we read something and tracing is disabled.
5967 * We still block if tracing is disabled, but we have never
5968 * read anything. This allows a user to cat this file, and
5969 * then enable tracing. But after we have read something,
5970 * we give an EOF when tracing is again disabled.
5972 * iter->pos will be 0 if we haven't read anything.
5974 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
5977 mutex_unlock(&iter->mutex);
5979 ret = wait_on_pipe(iter, 0);
5981 mutex_lock(&iter->mutex);
5994 tracing_read_pipe(struct file *filp, char __user *ubuf,
5995 size_t cnt, loff_t *ppos)
5997 struct trace_iterator *iter = filp->private_data;
6001 * Avoid more than one consumer on a single file descriptor
6002 * This is just a matter of traces coherency, the ring buffer itself
6005 mutex_lock(&iter->mutex);
6007 /* return any leftover data */
6008 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6012 trace_seq_init(&iter->seq);
6014 if (iter->trace->read) {
6015 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
6021 sret = tracing_wait_pipe(filp);
6025 /* stop when tracing is finished */
6026 if (trace_empty(iter)) {
6031 if (cnt >= PAGE_SIZE)
6032 cnt = PAGE_SIZE - 1;
6034 /* reset all but tr, trace, and overruns */
6035 memset(&iter->seq, 0,
6036 sizeof(struct trace_iterator) -
6037 offsetof(struct trace_iterator, seq));
6038 cpumask_clear(iter->started);
6039 trace_seq_init(&iter->seq);
6042 trace_event_read_lock();
6043 trace_access_lock(iter->cpu_file);
6044 while (trace_find_next_entry_inc(iter) != NULL) {
6045 enum print_line_t ret;
6046 int save_len = iter->seq.seq.len;
6048 ret = print_trace_line(iter);
6049 if (ret == TRACE_TYPE_PARTIAL_LINE) {
6050 /* don't print partial lines */
6051 iter->seq.seq.len = save_len;
6054 if (ret != TRACE_TYPE_NO_CONSUME)
6055 trace_consume(iter);
6057 if (trace_seq_used(&iter->seq) >= cnt)
6061 * Setting the full flag means we reached the trace_seq buffer
6062 * size and we should leave by partial output condition above.
6063 * One of the trace_seq_* functions is not used properly.
6065 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
6068 trace_access_unlock(iter->cpu_file);
6069 trace_event_read_unlock();
6071 /* Now copy what we have to the user */
6072 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6073 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
6074 trace_seq_init(&iter->seq);
6077 * If there was nothing to send to user, in spite of consuming trace
6078 * entries, go back to wait for more entries.
6084 mutex_unlock(&iter->mutex);
6089 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
6092 __free_page(spd->pages[idx]);
6095 static const struct pipe_buf_operations tracing_pipe_buf_ops = {
6096 .confirm = generic_pipe_buf_confirm,
6097 .release = generic_pipe_buf_release,
6098 .steal = generic_pipe_buf_steal,
6099 .get = generic_pipe_buf_get,
6103 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
6109 /* Seq buffer is page-sized, exactly what we need. */
6111 save_len = iter->seq.seq.len;
6112 ret = print_trace_line(iter);
6114 if (trace_seq_has_overflowed(&iter->seq)) {
6115 iter->seq.seq.len = save_len;
6120 * This should not be hit, because it should only
6121 * be set if the iter->seq overflowed. But check it
6122 * anyway to be safe.
6124 if (ret == TRACE_TYPE_PARTIAL_LINE) {
6125 iter->seq.seq.len = save_len;
6129 count = trace_seq_used(&iter->seq) - save_len;
6132 iter->seq.seq.len = save_len;
6136 if (ret != TRACE_TYPE_NO_CONSUME)
6137 trace_consume(iter);
6139 if (!trace_find_next_entry_inc(iter)) {
6149 static ssize_t tracing_splice_read_pipe(struct file *filp,
6151 struct pipe_inode_info *pipe,
6155 struct page *pages_def[PIPE_DEF_BUFFERS];
6156 struct partial_page partial_def[PIPE_DEF_BUFFERS];
6157 struct trace_iterator *iter = filp->private_data;
6158 struct splice_pipe_desc spd = {
6160 .partial = partial_def,
6161 .nr_pages = 0, /* This gets updated below. */
6162 .nr_pages_max = PIPE_DEF_BUFFERS,
6163 .ops = &tracing_pipe_buf_ops,
6164 .spd_release = tracing_spd_release_pipe,
6170 if (splice_grow_spd(pipe, &spd))
6173 mutex_lock(&iter->mutex);
6175 if (iter->trace->splice_read) {
6176 ret = iter->trace->splice_read(iter, filp,
6177 ppos, pipe, len, flags);
6182 ret = tracing_wait_pipe(filp);
6186 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
6191 trace_event_read_lock();
6192 trace_access_lock(iter->cpu_file);
6194 /* Fill as many pages as possible. */
6195 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
6196 spd.pages[i] = alloc_page(GFP_KERNEL);
6200 rem = tracing_fill_pipe_page(rem, iter);
6202 /* Copy the data into the page, so we can start over. */
6203 ret = trace_seq_to_buffer(&iter->seq,
6204 page_address(spd.pages[i]),
6205 trace_seq_used(&iter->seq));
6207 __free_page(spd.pages[i]);
6210 spd.partial[i].offset = 0;
6211 spd.partial[i].len = trace_seq_used(&iter->seq);
6213 trace_seq_init(&iter->seq);
6216 trace_access_unlock(iter->cpu_file);
6217 trace_event_read_unlock();
6218 mutex_unlock(&iter->mutex);
6223 ret = splice_to_pipe(pipe, &spd);
6227 splice_shrink_spd(&spd);
6231 mutex_unlock(&iter->mutex);
6236 tracing_entries_read(struct file *filp, char __user *ubuf,
6237 size_t cnt, loff_t *ppos)
6239 struct inode *inode = file_inode(filp);
6240 struct trace_array *tr = inode->i_private;
6241 int cpu = tracing_get_cpu(inode);
6246 mutex_lock(&trace_types_lock);
6248 if (cpu == RING_BUFFER_ALL_CPUS) {
6249 int cpu, buf_size_same;
6254 /* check if all cpu sizes are same */
6255 for_each_tracing_cpu(cpu) {
6256 /* fill in the size from first enabled cpu */
6258 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
6259 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
6265 if (buf_size_same) {
6266 if (!ring_buffer_expanded)
6267 r = sprintf(buf, "%lu (expanded: %lu)\n",
6269 trace_buf_size >> 10);
6271 r = sprintf(buf, "%lu\n", size >> 10);
6273 r = sprintf(buf, "X\n");
6275 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
6277 mutex_unlock(&trace_types_lock);
6279 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6284 tracing_entries_write(struct file *filp, const char __user *ubuf,
6285 size_t cnt, loff_t *ppos)
6287 struct inode *inode = file_inode(filp);
6288 struct trace_array *tr = inode->i_private;
6292 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6296 /* must have at least 1 entry */
6300 /* value is in KB */
6302 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
6312 tracing_total_entries_read(struct file *filp, char __user *ubuf,
6313 size_t cnt, loff_t *ppos)
6315 struct trace_array *tr = filp->private_data;
6318 unsigned long size = 0, expanded_size = 0;
6320 mutex_lock(&trace_types_lock);
6321 for_each_tracing_cpu(cpu) {
6322 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
6323 if (!ring_buffer_expanded)
6324 expanded_size += trace_buf_size >> 10;
6326 if (ring_buffer_expanded)
6327 r = sprintf(buf, "%lu\n", size);
6329 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
6330 mutex_unlock(&trace_types_lock);
6332 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6336 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
6337 size_t cnt, loff_t *ppos)
6340 * There is no need to read what the user has written, this function
6341 * is just to make sure that there is no error when "echo" is used
6350 tracing_free_buffer_release(struct inode *inode, struct file *filp)
6352 struct trace_array *tr = inode->i_private;
6354 /* disable tracing ? */
6355 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
6356 tracer_tracing_off(tr);
6357 /* resize the ring buffer to 0 */
6358 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
6360 trace_array_put(tr);
6366 tracing_mark_write(struct file *filp, const char __user *ubuf,
6367 size_t cnt, loff_t *fpos)
6369 struct trace_array *tr = filp->private_data;
6370 struct ring_buffer_event *event;
6371 enum event_trigger_type tt = ETT_NONE;
6372 struct ring_buffer *buffer;
6373 struct print_entry *entry;
6374 unsigned long irq_flags;
6379 /* Used in tracing_mark_raw_write() as well */
6380 #define FAULTED_STR "<faulted>"
6381 #define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */
6383 if (tracing_disabled)
6386 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6389 if (cnt > TRACE_BUF_SIZE)
6390 cnt = TRACE_BUF_SIZE;
6392 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
6394 local_save_flags(irq_flags);
6395 size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
6397 /* If less than "<faulted>", then make sure we can still add that */
6398 if (cnt < FAULTED_SIZE)
6399 size += FAULTED_SIZE - cnt;
6401 buffer = tr->trace_buffer.buffer;
6402 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
6403 irq_flags, preempt_count());
6404 if (unlikely(!event))
6405 /* Ring buffer disabled, return as if not open for write */
6408 entry = ring_buffer_event_data(event);
6409 entry->ip = _THIS_IP_;
6411 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
6413 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
6420 if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
6421 /* do not add \n before testing triggers, but add \0 */
6422 entry->buf[cnt] = '\0';
6423 tt = event_triggers_call(tr->trace_marker_file, entry, event);
6426 if (entry->buf[cnt - 1] != '\n') {
6427 entry->buf[cnt] = '\n';
6428 entry->buf[cnt + 1] = '\0';
6430 entry->buf[cnt] = '\0';
6432 __buffer_unlock_commit(buffer, event);
6435 event_triggers_post_call(tr->trace_marker_file, tt);
6443 /* Limit it for now to 3K (including tag) */
6444 #define RAW_DATA_MAX_SIZE (1024*3)
6447 tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
6448 size_t cnt, loff_t *fpos)
6450 struct trace_array *tr = filp->private_data;
6451 struct ring_buffer_event *event;
6452 struct ring_buffer *buffer;
6453 struct raw_data_entry *entry;
6454 unsigned long irq_flags;
6459 #define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
6461 if (tracing_disabled)
6464 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6467 /* The marker must at least have a tag id */
6468 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
6471 if (cnt > TRACE_BUF_SIZE)
6472 cnt = TRACE_BUF_SIZE;
6474 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
6476 local_save_flags(irq_flags);
6477 size = sizeof(*entry) + cnt;
6478 if (cnt < FAULT_SIZE_ID)
6479 size += FAULT_SIZE_ID - cnt;
6481 buffer = tr->trace_buffer.buffer;
6482 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
6483 irq_flags, preempt_count());
6485 /* Ring buffer disabled, return as if not open for write */
6488 entry = ring_buffer_event_data(event);
6490 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
6493 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
6498 __buffer_unlock_commit(buffer, event);
6506 static int tracing_clock_show(struct seq_file *m, void *v)
6508 struct trace_array *tr = m->private;
6511 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
6513 "%s%s%s%s", i ? " " : "",
6514 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
6515 i == tr->clock_id ? "]" : "");
6521 int tracing_set_clock(struct trace_array *tr, const char *clockstr)
6525 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
6526 if (strcmp(trace_clocks[i].name, clockstr) == 0)
6529 if (i == ARRAY_SIZE(trace_clocks))
6532 mutex_lock(&trace_types_lock);
6536 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
6539 * New clock may not be consistent with the previous clock.
6540 * Reset the buffer so that it doesn't have incomparable timestamps.
6542 tracing_reset_online_cpus(&tr->trace_buffer);
6544 #ifdef CONFIG_TRACER_MAX_TRACE
6545 if (tr->max_buffer.buffer)
6546 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
6547 tracing_reset_online_cpus(&tr->max_buffer);
6550 mutex_unlock(&trace_types_lock);
6555 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
6556 size_t cnt, loff_t *fpos)
6558 struct seq_file *m = filp->private_data;
6559 struct trace_array *tr = m->private;
6561 const char *clockstr;
6564 if (cnt >= sizeof(buf))
6567 if (copy_from_user(buf, ubuf, cnt))
6572 clockstr = strstrip(buf);
6574 ret = tracing_set_clock(tr, clockstr);
6583 static int tracing_clock_open(struct inode *inode, struct file *file)
6585 struct trace_array *tr = inode->i_private;
6588 ret = tracing_check_open_get_tr(tr);
6592 ret = single_open(file, tracing_clock_show, inode->i_private);
6594 trace_array_put(tr);
6599 static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
6601 struct trace_array *tr = m->private;
6603 mutex_lock(&trace_types_lock);
6605 if (ring_buffer_time_stamp_abs(tr->trace_buffer.buffer))
6606 seq_puts(m, "delta [absolute]\n");
6608 seq_puts(m, "[delta] absolute\n");
6610 mutex_unlock(&trace_types_lock);
6615 static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
6617 struct trace_array *tr = inode->i_private;
6620 ret = tracing_check_open_get_tr(tr);
6624 ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
6626 trace_array_put(tr);
6631 int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs)
6635 mutex_lock(&trace_types_lock);
6637 if (abs && tr->time_stamp_abs_ref++)
6641 if (WARN_ON_ONCE(!tr->time_stamp_abs_ref)) {
6646 if (--tr->time_stamp_abs_ref)
6650 ring_buffer_set_time_stamp_abs(tr->trace_buffer.buffer, abs);
6652 #ifdef CONFIG_TRACER_MAX_TRACE
6653 if (tr->max_buffer.buffer)
6654 ring_buffer_set_time_stamp_abs(tr->max_buffer.buffer, abs);
6657 mutex_unlock(&trace_types_lock);
6662 struct ftrace_buffer_info {
6663 struct trace_iterator iter;
6665 unsigned int spare_cpu;
6669 #ifdef CONFIG_TRACER_SNAPSHOT
6670 static int tracing_snapshot_open(struct inode *inode, struct file *file)
6672 struct trace_array *tr = inode->i_private;
6673 struct trace_iterator *iter;
6677 ret = tracing_check_open_get_tr(tr);
6681 if (file->f_mode & FMODE_READ) {
6682 iter = __tracing_open(inode, file, true);
6684 ret = PTR_ERR(iter);
6686 /* Writes still need the seq_file to hold the private data */
6688 m = kzalloc(sizeof(*m), GFP_KERNEL);
6691 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6699 iter->trace_buffer = &tr->max_buffer;
6700 iter->cpu_file = tracing_get_cpu(inode);
6702 file->private_data = m;
6706 trace_array_put(tr);
6712 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
6715 struct seq_file *m = filp->private_data;
6716 struct trace_iterator *iter = m->private;
6717 struct trace_array *tr = iter->tr;
6721 ret = tracing_update_buffers();
6725 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6729 mutex_lock(&trace_types_lock);
6731 if (tr->current_trace->use_max_tr) {
6736 arch_spin_lock(&tr->max_lock);
6737 if (tr->cond_snapshot)
6739 arch_spin_unlock(&tr->max_lock);
6745 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6749 if (tr->allocated_snapshot)
6753 /* Only allow per-cpu swap if the ring buffer supports it */
6754 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
6755 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6760 if (tr->allocated_snapshot)
6761 ret = resize_buffer_duplicate_size(&tr->max_buffer,
6762 &tr->trace_buffer, iter->cpu_file);
6764 ret = tracing_alloc_snapshot_instance(tr);
6767 local_irq_disable();
6768 /* Now, we're going to swap */
6769 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
6770 update_max_tr(tr, current, smp_processor_id(), NULL);
6772 update_max_tr_single(tr, current, iter->cpu_file);
6776 if (tr->allocated_snapshot) {
6777 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
6778 tracing_reset_online_cpus(&tr->max_buffer);
6780 tracing_reset_cpu(&tr->max_buffer, iter->cpu_file);
6790 mutex_unlock(&trace_types_lock);
6794 static int tracing_snapshot_release(struct inode *inode, struct file *file)
6796 struct seq_file *m = file->private_data;
6799 ret = tracing_release(inode, file);
6801 if (file->f_mode & FMODE_READ)
6804 /* If write only, the seq_file is just a stub */
6812 static int tracing_buffers_open(struct inode *inode, struct file *filp);
6813 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
6814 size_t count, loff_t *ppos);
6815 static int tracing_buffers_release(struct inode *inode, struct file *file);
6816 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6817 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
6819 static int snapshot_raw_open(struct inode *inode, struct file *filp)
6821 struct ftrace_buffer_info *info;
6824 /* The following checks for tracefs lockdown */
6825 ret = tracing_buffers_open(inode, filp);
6829 info = filp->private_data;
6831 if (info->iter.trace->use_max_tr) {
6832 tracing_buffers_release(inode, filp);
6836 info->iter.snapshot = true;
6837 info->iter.trace_buffer = &info->iter.tr->max_buffer;
6842 #endif /* CONFIG_TRACER_SNAPSHOT */
6845 static const struct file_operations tracing_thresh_fops = {
6846 .open = tracing_open_generic,
6847 .read = tracing_thresh_read,
6848 .write = tracing_thresh_write,
6849 .llseek = generic_file_llseek,
6852 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
6853 static const struct file_operations tracing_max_lat_fops = {
6854 .open = tracing_open_generic,
6855 .read = tracing_max_lat_read,
6856 .write = tracing_max_lat_write,
6857 .llseek = generic_file_llseek,
6861 static const struct file_operations set_tracer_fops = {
6862 .open = tracing_open_generic,
6863 .read = tracing_set_trace_read,
6864 .write = tracing_set_trace_write,
6865 .llseek = generic_file_llseek,
6868 static const struct file_operations tracing_pipe_fops = {
6869 .open = tracing_open_pipe,
6870 .poll = tracing_poll_pipe,
6871 .read = tracing_read_pipe,
6872 .splice_read = tracing_splice_read_pipe,
6873 .release = tracing_release_pipe,
6874 .llseek = no_llseek,
6877 static const struct file_operations tracing_entries_fops = {
6878 .open = tracing_open_generic_tr,
6879 .read = tracing_entries_read,
6880 .write = tracing_entries_write,
6881 .llseek = generic_file_llseek,
6882 .release = tracing_release_generic_tr,
6885 static const struct file_operations tracing_total_entries_fops = {
6886 .open = tracing_open_generic_tr,
6887 .read = tracing_total_entries_read,
6888 .llseek = generic_file_llseek,
6889 .release = tracing_release_generic_tr,
6892 static const struct file_operations tracing_free_buffer_fops = {
6893 .open = tracing_open_generic_tr,
6894 .write = tracing_free_buffer_write,
6895 .release = tracing_free_buffer_release,
6898 static const struct file_operations tracing_mark_fops = {
6899 .open = tracing_open_generic_tr,
6900 .write = tracing_mark_write,
6901 .llseek = generic_file_llseek,
6902 .release = tracing_release_generic_tr,
6905 static const struct file_operations tracing_mark_raw_fops = {
6906 .open = tracing_open_generic_tr,
6907 .write = tracing_mark_raw_write,
6908 .llseek = generic_file_llseek,
6909 .release = tracing_release_generic_tr,
6912 static const struct file_operations trace_clock_fops = {
6913 .open = tracing_clock_open,
6915 .llseek = seq_lseek,
6916 .release = tracing_single_release_tr,
6917 .write = tracing_clock_write,
6920 static const struct file_operations trace_time_stamp_mode_fops = {
6921 .open = tracing_time_stamp_mode_open,
6923 .llseek = seq_lseek,
6924 .release = tracing_single_release_tr,
6927 #ifdef CONFIG_TRACER_SNAPSHOT
6928 static const struct file_operations snapshot_fops = {
6929 .open = tracing_snapshot_open,
6931 .write = tracing_snapshot_write,
6932 .llseek = tracing_lseek,
6933 .release = tracing_snapshot_release,
6936 static const struct file_operations snapshot_raw_fops = {
6937 .open = snapshot_raw_open,
6938 .read = tracing_buffers_read,
6939 .release = tracing_buffers_release,
6940 .splice_read = tracing_buffers_splice_read,
6941 .llseek = no_llseek,
6944 #endif /* CONFIG_TRACER_SNAPSHOT */
6946 #define TRACING_LOG_ERRS_MAX 8
6947 #define TRACING_LOG_LOC_MAX 128
6949 #define CMD_PREFIX " Command: "
6952 const char **errs; /* ptr to loc-specific array of err strings */
6953 u8 type; /* index into errs -> specific err string */
6954 u8 pos; /* MAX_FILTER_STR_VAL = 256 */
6958 struct tracing_log_err {
6959 struct list_head list;
6960 struct err_info info;
6961 char loc[TRACING_LOG_LOC_MAX]; /* err location */
6962 char cmd[MAX_FILTER_STR_VAL]; /* what caused err */
6965 static DEFINE_MUTEX(tracing_err_log_lock);
6967 static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr)
6969 struct tracing_log_err *err;
6971 if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) {
6972 err = kzalloc(sizeof(*err), GFP_KERNEL);
6974 err = ERR_PTR(-ENOMEM);
6975 tr->n_err_log_entries++;
6980 err = list_first_entry(&tr->err_log, struct tracing_log_err, list);
6981 list_del(&err->list);
6987 * err_pos - find the position of a string within a command for error careting
6988 * @cmd: The tracing command that caused the error
6989 * @str: The string to position the caret at within @cmd
6991 * Finds the position of the first occurence of @str within @cmd. The
6992 * return value can be passed to tracing_log_err() for caret placement
6995 * Returns the index within @cmd of the first occurence of @str or 0
6996 * if @str was not found.
6998 unsigned int err_pos(char *cmd, const char *str)
7002 if (WARN_ON(!strlen(cmd)))
7005 found = strstr(cmd, str);
7013 * tracing_log_err - write an error to the tracing error log
7014 * @tr: The associated trace array for the error (NULL for top level array)
7015 * @loc: A string describing where the error occurred
7016 * @cmd: The tracing command that caused the error
7017 * @errs: The array of loc-specific static error strings
7018 * @type: The index into errs[], which produces the specific static err string
7019 * @pos: The position the caret should be placed in the cmd
7021 * Writes an error into tracing/error_log of the form:
7023 * <loc>: error: <text>
7027 * tracing/error_log is a small log file containing the last
7028 * TRACING_LOG_ERRS_MAX errors (8). Memory for errors isn't allocated
7029 * unless there has been a tracing error, and the error log can be
7030 * cleared and have its memory freed by writing the empty string in
7031 * truncation mode to it i.e. echo > tracing/error_log.
7033 * NOTE: the @errs array along with the @type param are used to
7034 * produce a static error string - this string is not copied and saved
7035 * when the error is logged - only a pointer to it is saved. See
7036 * existing callers for examples of how static strings are typically
7037 * defined for use with tracing_log_err().
7039 void tracing_log_err(struct trace_array *tr,
7040 const char *loc, const char *cmd,
7041 const char **errs, u8 type, u8 pos)
7043 struct tracing_log_err *err;
7048 mutex_lock(&tracing_err_log_lock);
7049 err = get_tracing_log_err(tr);
7050 if (PTR_ERR(err) == -ENOMEM) {
7051 mutex_unlock(&tracing_err_log_lock);
7055 snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);
7056 snprintf(err->cmd, MAX_FILTER_STR_VAL,"\n" CMD_PREFIX "%s\n", cmd);
7058 err->info.errs = errs;
7059 err->info.type = type;
7060 err->info.pos = pos;
7061 err->info.ts = local_clock();
7063 list_add_tail(&err->list, &tr->err_log);
7064 mutex_unlock(&tracing_err_log_lock);
7067 static void clear_tracing_err_log(struct trace_array *tr)
7069 struct tracing_log_err *err, *next;
7071 mutex_lock(&tracing_err_log_lock);
7072 list_for_each_entry_safe(err, next, &tr->err_log, list) {
7073 list_del(&err->list);
7077 tr->n_err_log_entries = 0;
7078 mutex_unlock(&tracing_err_log_lock);
7081 static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos)
7083 struct trace_array *tr = m->private;
7085 mutex_lock(&tracing_err_log_lock);
7087 return seq_list_start(&tr->err_log, *pos);
7090 static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos)
7092 struct trace_array *tr = m->private;
7094 return seq_list_next(v, &tr->err_log, pos);
7097 static void tracing_err_log_seq_stop(struct seq_file *m, void *v)
7099 mutex_unlock(&tracing_err_log_lock);
7102 static void tracing_err_log_show_pos(struct seq_file *m, u8 pos)
7106 for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++)
7108 for (i = 0; i < pos; i++)
7113 static int tracing_err_log_seq_show(struct seq_file *m, void *v)
7115 struct tracing_log_err *err = v;
7118 const char *err_text = err->info.errs[err->info.type];
7119 u64 sec = err->info.ts;
7122 nsec = do_div(sec, NSEC_PER_SEC);
7123 seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000,
7124 err->loc, err_text);
7125 seq_printf(m, "%s", err->cmd);
7126 tracing_err_log_show_pos(m, err->info.pos);
7132 static const struct seq_operations tracing_err_log_seq_ops = {
7133 .start = tracing_err_log_seq_start,
7134 .next = tracing_err_log_seq_next,
7135 .stop = tracing_err_log_seq_stop,
7136 .show = tracing_err_log_seq_show
7139 static int tracing_err_log_open(struct inode *inode, struct file *file)
7141 struct trace_array *tr = inode->i_private;
7144 ret = tracing_check_open_get_tr(tr);
7148 /* If this file was opened for write, then erase contents */
7149 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
7150 clear_tracing_err_log(tr);
7152 if (file->f_mode & FMODE_READ) {
7153 ret = seq_open(file, &tracing_err_log_seq_ops);
7155 struct seq_file *m = file->private_data;
7158 trace_array_put(tr);
7164 static ssize_t tracing_err_log_write(struct file *file,
7165 const char __user *buffer,
7166 size_t count, loff_t *ppos)
7171 static int tracing_err_log_release(struct inode *inode, struct file *file)
7173 struct trace_array *tr = inode->i_private;
7175 trace_array_put(tr);
7177 if (file->f_mode & FMODE_READ)
7178 seq_release(inode, file);
7183 static const struct file_operations tracing_err_log_fops = {
7184 .open = tracing_err_log_open,
7185 .write = tracing_err_log_write,
7187 .llseek = seq_lseek,
7188 .release = tracing_err_log_release,
7191 static int tracing_buffers_open(struct inode *inode, struct file *filp)
7193 struct trace_array *tr = inode->i_private;
7194 struct ftrace_buffer_info *info;
7197 ret = tracing_check_open_get_tr(tr);
7201 info = kzalloc(sizeof(*info), GFP_KERNEL);
7203 trace_array_put(tr);
7207 mutex_lock(&trace_types_lock);
7210 info->iter.cpu_file = tracing_get_cpu(inode);
7211 info->iter.trace = tr->current_trace;
7212 info->iter.trace_buffer = &tr->trace_buffer;
7214 /* Force reading ring buffer for first read */
7215 info->read = (unsigned int)-1;
7217 filp->private_data = info;
7219 tr->current_trace->ref++;
7221 mutex_unlock(&trace_types_lock);
7223 ret = nonseekable_open(inode, filp);
7225 trace_array_put(tr);
7231 tracing_buffers_poll(struct file *filp, poll_table *poll_table)
7233 struct ftrace_buffer_info *info = filp->private_data;
7234 struct trace_iterator *iter = &info->iter;
7236 return trace_poll(iter, filp, poll_table);
7240 tracing_buffers_read(struct file *filp, char __user *ubuf,
7241 size_t count, loff_t *ppos)
7243 struct ftrace_buffer_info *info = filp->private_data;
7244 struct trace_iterator *iter = &info->iter;
7251 #ifdef CONFIG_TRACER_MAX_TRACE
7252 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
7257 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
7259 if (IS_ERR(info->spare)) {
7260 ret = PTR_ERR(info->spare);
7263 info->spare_cpu = iter->cpu_file;
7269 /* Do we have previous read data to read? */
7270 if (info->read < PAGE_SIZE)
7274 trace_access_lock(iter->cpu_file);
7275 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
7279 trace_access_unlock(iter->cpu_file);
7282 if (trace_empty(iter)) {
7283 if ((filp->f_flags & O_NONBLOCK))
7286 ret = wait_on_pipe(iter, 0);
7297 size = PAGE_SIZE - info->read;
7301 ret = copy_to_user(ubuf, info->spare + info->read, size);
7313 static int tracing_buffers_release(struct inode *inode, struct file *file)
7315 struct ftrace_buffer_info *info = file->private_data;
7316 struct trace_iterator *iter = &info->iter;
7318 mutex_lock(&trace_types_lock);
7320 iter->tr->current_trace->ref--;
7322 __trace_array_put(iter->tr);
7325 ring_buffer_free_read_page(iter->trace_buffer->buffer,
7326 info->spare_cpu, info->spare);
7329 mutex_unlock(&trace_types_lock);
7335 struct ring_buffer *buffer;
7338 refcount_t refcount;
7341 static void buffer_ref_release(struct buffer_ref *ref)
7343 if (!refcount_dec_and_test(&ref->refcount))
7345 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
7349 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
7350 struct pipe_buffer *buf)
7352 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
7354 buffer_ref_release(ref);
7358 static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
7359 struct pipe_buffer *buf)
7361 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
7363 if (refcount_read(&ref->refcount) > INT_MAX/2)
7366 refcount_inc(&ref->refcount);
7370 /* Pipe buffer operations for a buffer. */
7371 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
7372 .confirm = generic_pipe_buf_confirm,
7373 .release = buffer_pipe_buf_release,
7374 .steal = generic_pipe_buf_nosteal,
7375 .get = buffer_pipe_buf_get,
7379 * Callback from splice_to_pipe(), if we need to release some pages
7380 * at the end of the spd in case we error'ed out in filling the pipe.
7382 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
7384 struct buffer_ref *ref =
7385 (struct buffer_ref *)spd->partial[i].private;
7387 buffer_ref_release(ref);
7388 spd->partial[i].private = 0;
7392 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7393 struct pipe_inode_info *pipe, size_t len,
7396 struct ftrace_buffer_info *info = file->private_data;
7397 struct trace_iterator *iter = &info->iter;
7398 struct partial_page partial_def[PIPE_DEF_BUFFERS];
7399 struct page *pages_def[PIPE_DEF_BUFFERS];
7400 struct splice_pipe_desc spd = {
7402 .partial = partial_def,
7403 .nr_pages_max = PIPE_DEF_BUFFERS,
7404 .ops = &buffer_pipe_buf_ops,
7405 .spd_release = buffer_spd_release,
7407 struct buffer_ref *ref;
7411 #ifdef CONFIG_TRACER_MAX_TRACE
7412 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
7416 if (*ppos & (PAGE_SIZE - 1))
7419 if (len & (PAGE_SIZE - 1)) {
7420 if (len < PAGE_SIZE)
7425 if (splice_grow_spd(pipe, &spd))
7429 trace_access_lock(iter->cpu_file);
7430 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
7432 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
7436 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
7442 refcount_set(&ref->refcount, 1);
7443 ref->buffer = iter->trace_buffer->buffer;
7444 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
7445 if (IS_ERR(ref->page)) {
7446 ret = PTR_ERR(ref->page);
7451 ref->cpu = iter->cpu_file;
7453 r = ring_buffer_read_page(ref->buffer, &ref->page,
7454 len, iter->cpu_file, 1);
7456 ring_buffer_free_read_page(ref->buffer, ref->cpu,
7462 page = virt_to_page(ref->page);
7464 spd.pages[i] = page;
7465 spd.partial[i].len = PAGE_SIZE;
7466 spd.partial[i].offset = 0;
7467 spd.partial[i].private = (unsigned long)ref;
7471 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
7474 trace_access_unlock(iter->cpu_file);
7477 /* did we read anything? */
7478 if (!spd.nr_pages) {
7483 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
7486 ret = wait_on_pipe(iter, iter->tr->buffer_percent);
7493 ret = splice_to_pipe(pipe, &spd);
7495 splice_shrink_spd(&spd);
7500 static const struct file_operations tracing_buffers_fops = {
7501 .open = tracing_buffers_open,
7502 .read = tracing_buffers_read,
7503 .poll = tracing_buffers_poll,
7504 .release = tracing_buffers_release,
7505 .splice_read = tracing_buffers_splice_read,
7506 .llseek = no_llseek,
7510 tracing_stats_read(struct file *filp, char __user *ubuf,
7511 size_t count, loff_t *ppos)
7513 struct inode *inode = file_inode(filp);
7514 struct trace_array *tr = inode->i_private;
7515 struct trace_buffer *trace_buf = &tr->trace_buffer;
7516 int cpu = tracing_get_cpu(inode);
7517 struct trace_seq *s;
7519 unsigned long long t;
7520 unsigned long usec_rem;
7522 s = kmalloc(sizeof(*s), GFP_KERNEL);
7528 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
7529 trace_seq_printf(s, "entries: %ld\n", cnt);
7531 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
7532 trace_seq_printf(s, "overrun: %ld\n", cnt);
7534 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
7535 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
7537 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
7538 trace_seq_printf(s, "bytes: %ld\n", cnt);
7540 if (trace_clocks[tr->clock_id].in_ns) {
7541 /* local or global for trace_clock */
7542 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
7543 usec_rem = do_div(t, USEC_PER_SEC);
7544 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
7547 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
7548 usec_rem = do_div(t, USEC_PER_SEC);
7549 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
7551 /* counter or tsc mode for trace_clock */
7552 trace_seq_printf(s, "oldest event ts: %llu\n",
7553 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
7555 trace_seq_printf(s, "now ts: %llu\n",
7556 ring_buffer_time_stamp(trace_buf->buffer, cpu));
7559 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
7560 trace_seq_printf(s, "dropped events: %ld\n", cnt);
7562 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
7563 trace_seq_printf(s, "read events: %ld\n", cnt);
7565 count = simple_read_from_buffer(ubuf, count, ppos,
7566 s->buffer, trace_seq_used(s));
7573 static const struct file_operations tracing_stats_fops = {
7574 .open = tracing_open_generic_tr,
7575 .read = tracing_stats_read,
7576 .llseek = generic_file_llseek,
7577 .release = tracing_release_generic_tr,
7580 #ifdef CONFIG_DYNAMIC_FTRACE
7583 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
7584 size_t cnt, loff_t *ppos)
7586 unsigned long *p = filp->private_data;
7587 char buf[64]; /* Not too big for a shallow stack */
7590 r = scnprintf(buf, 63, "%ld", *p);
7593 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7596 static const struct file_operations tracing_dyn_info_fops = {
7597 .open = tracing_open_generic,
7598 .read = tracing_read_dyn_info,
7599 .llseek = generic_file_llseek,
7601 #endif /* CONFIG_DYNAMIC_FTRACE */
7603 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
7605 ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
7606 struct trace_array *tr, struct ftrace_probe_ops *ops,
7609 tracing_snapshot_instance(tr);
7613 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
7614 struct trace_array *tr, struct ftrace_probe_ops *ops,
7617 struct ftrace_func_mapper *mapper = data;
7621 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
7631 tracing_snapshot_instance(tr);
7635 ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
7636 struct ftrace_probe_ops *ops, void *data)
7638 struct ftrace_func_mapper *mapper = data;
7641 seq_printf(m, "%ps:", (void *)ip);
7643 seq_puts(m, "snapshot");
7646 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
7649 seq_printf(m, ":count=%ld\n", *count);
7651 seq_puts(m, ":unlimited\n");
7657 ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
7658 unsigned long ip, void *init_data, void **data)
7660 struct ftrace_func_mapper *mapper = *data;
7663 mapper = allocate_ftrace_func_mapper();
7669 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
7673 ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
7674 unsigned long ip, void *data)
7676 struct ftrace_func_mapper *mapper = data;
7681 free_ftrace_func_mapper(mapper, NULL);
7685 ftrace_func_mapper_remove_ip(mapper, ip);
7688 static struct ftrace_probe_ops snapshot_probe_ops = {
7689 .func = ftrace_snapshot,
7690 .print = ftrace_snapshot_print,
7693 static struct ftrace_probe_ops snapshot_count_probe_ops = {
7694 .func = ftrace_count_snapshot,
7695 .print = ftrace_snapshot_print,
7696 .init = ftrace_snapshot_init,
7697 .free = ftrace_snapshot_free,
7701 ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
7702 char *glob, char *cmd, char *param, int enable)
7704 struct ftrace_probe_ops *ops;
7705 void *count = (void *)-1;
7712 /* hash funcs only work with set_ftrace_filter */
7716 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
7719 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
7724 number = strsep(¶m, ":");
7726 if (!strlen(number))
7730 * We use the callback data field (which is a pointer)
7733 ret = kstrtoul(number, 0, (unsigned long *)&count);
7738 ret = tracing_alloc_snapshot_instance(tr);
7742 ret = register_ftrace_function_probe(glob, tr, ops, count);
7745 return ret < 0 ? ret : 0;
7748 static struct ftrace_func_command ftrace_snapshot_cmd = {
7750 .func = ftrace_trace_snapshot_callback,
7753 static __init int register_snapshot_cmd(void)
7755 return register_ftrace_command(&ftrace_snapshot_cmd);
7758 static inline __init int register_snapshot_cmd(void) { return 0; }
7759 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
7761 static struct dentry *tracing_get_dentry(struct trace_array *tr)
7763 if (WARN_ON(!tr->dir))
7764 return ERR_PTR(-ENODEV);
7766 /* Top directory uses NULL as the parent */
7767 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
7770 /* All sub buffers have a descriptor */
7774 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
7776 struct dentry *d_tracer;
7779 return tr->percpu_dir;
7781 d_tracer = tracing_get_dentry(tr);
7782 if (IS_ERR(d_tracer))
7785 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
7787 WARN_ONCE(!tr->percpu_dir,
7788 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
7790 return tr->percpu_dir;
7793 static struct dentry *
7794 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
7795 void *data, long cpu, const struct file_operations *fops)
7797 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
7799 if (ret) /* See tracing_get_cpu() */
7800 d_inode(ret)->i_cdev = (void *)(cpu + 1);
7805 tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
7807 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
7808 struct dentry *d_cpu;
7809 char cpu_dir[30]; /* 30 characters should be more than enough */
7814 snprintf(cpu_dir, 30, "cpu%ld", cpu);
7815 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
7817 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
7821 /* per cpu trace_pipe */
7822 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
7823 tr, cpu, &tracing_pipe_fops);
7826 trace_create_cpu_file("trace", 0644, d_cpu,
7827 tr, cpu, &tracing_fops);
7829 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
7830 tr, cpu, &tracing_buffers_fops);
7832 trace_create_cpu_file("stats", 0444, d_cpu,
7833 tr, cpu, &tracing_stats_fops);
7835 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
7836 tr, cpu, &tracing_entries_fops);
7838 #ifdef CONFIG_TRACER_SNAPSHOT
7839 trace_create_cpu_file("snapshot", 0644, d_cpu,
7840 tr, cpu, &snapshot_fops);
7842 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
7843 tr, cpu, &snapshot_raw_fops);
7847 #ifdef CONFIG_FTRACE_SELFTEST
7848 /* Let selftest have access to static functions in this file */
7849 #include "trace_selftest.c"
7853 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
7856 struct trace_option_dentry *topt = filp->private_data;
7859 if (topt->flags->val & topt->opt->bit)
7864 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
7868 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
7871 struct trace_option_dentry *topt = filp->private_data;
7875 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7879 if (val != 0 && val != 1)
7882 if (!!(topt->flags->val & topt->opt->bit) != val) {
7883 mutex_lock(&trace_types_lock);
7884 ret = __set_tracer_option(topt->tr, topt->flags,
7886 mutex_unlock(&trace_types_lock);
7897 static const struct file_operations trace_options_fops = {
7898 .open = tracing_open_generic,
7899 .read = trace_options_read,
7900 .write = trace_options_write,
7901 .llseek = generic_file_llseek,
7905 * In order to pass in both the trace_array descriptor as well as the index
7906 * to the flag that the trace option file represents, the trace_array
7907 * has a character array of trace_flags_index[], which holds the index
7908 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
7909 * The address of this character array is passed to the flag option file
7910 * read/write callbacks.
7912 * In order to extract both the index and the trace_array descriptor,
7913 * get_tr_index() uses the following algorithm.
7917 * As the pointer itself contains the address of the index (remember
7920 * Then to get the trace_array descriptor, by subtracting that index
7921 * from the ptr, we get to the start of the index itself.
7923 * ptr - idx == &index[0]
7925 * Then a simple container_of() from that pointer gets us to the
7926 * trace_array descriptor.
7928 static void get_tr_index(void *data, struct trace_array **ptr,
7929 unsigned int *pindex)
7931 *pindex = *(unsigned char *)data;
7933 *ptr = container_of(data - *pindex, struct trace_array,
7938 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
7941 void *tr_index = filp->private_data;
7942 struct trace_array *tr;
7946 get_tr_index(tr_index, &tr, &index);
7948 if (tr->trace_flags & (1 << index))
7953 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
7957 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
7960 void *tr_index = filp->private_data;
7961 struct trace_array *tr;
7966 get_tr_index(tr_index, &tr, &index);
7968 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7972 if (val != 0 && val != 1)
7975 mutex_lock(&trace_types_lock);
7976 ret = set_tracer_flag(tr, 1 << index, val);
7977 mutex_unlock(&trace_types_lock);
7987 static const struct file_operations trace_options_core_fops = {
7988 .open = tracing_open_generic,
7989 .read = trace_options_core_read,
7990 .write = trace_options_core_write,
7991 .llseek = generic_file_llseek,
7994 struct dentry *trace_create_file(const char *name,
7996 struct dentry *parent,
7998 const struct file_operations *fops)
8002 ret = tracefs_create_file(name, mode, parent, data, fops);
8004 pr_warn("Could not create tracefs '%s' entry\n", name);
8010 static struct dentry *trace_options_init_dentry(struct trace_array *tr)
8012 struct dentry *d_tracer;
8017 d_tracer = tracing_get_dentry(tr);
8018 if (IS_ERR(d_tracer))
8021 tr->options = tracefs_create_dir("options", d_tracer);
8023 pr_warn("Could not create tracefs directory 'options'\n");
8031 create_trace_option_file(struct trace_array *tr,
8032 struct trace_option_dentry *topt,
8033 struct tracer_flags *flags,
8034 struct tracer_opt *opt)
8036 struct dentry *t_options;
8038 t_options = trace_options_init_dentry(tr);
8042 topt->flags = flags;
8046 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
8047 &trace_options_fops);
8052 create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
8054 struct trace_option_dentry *topts;
8055 struct trace_options *tr_topts;
8056 struct tracer_flags *flags;
8057 struct tracer_opt *opts;
8064 flags = tracer->flags;
8066 if (!flags || !flags->opts)
8070 * If this is an instance, only create flags for tracers
8071 * the instance may have.
8073 if (!trace_ok_for_array(tracer, tr))
8076 for (i = 0; i < tr->nr_topts; i++) {
8077 /* Make sure there's no duplicate flags. */
8078 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
8084 for (cnt = 0; opts[cnt].name; cnt++)
8087 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
8091 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
8098 tr->topts = tr_topts;
8099 tr->topts[tr->nr_topts].tracer = tracer;
8100 tr->topts[tr->nr_topts].topts = topts;
8103 for (cnt = 0; opts[cnt].name; cnt++) {
8104 create_trace_option_file(tr, &topts[cnt], flags,
8106 WARN_ONCE(topts[cnt].entry == NULL,
8107 "Failed to create trace option: %s",
8112 static struct dentry *
8113 create_trace_option_core_file(struct trace_array *tr,
8114 const char *option, long index)
8116 struct dentry *t_options;
8118 t_options = trace_options_init_dentry(tr);
8122 return trace_create_file(option, 0644, t_options,
8123 (void *)&tr->trace_flags_index[index],
8124 &trace_options_core_fops);
8127 static void create_trace_options_dir(struct trace_array *tr)
8129 struct dentry *t_options;
8130 bool top_level = tr == &global_trace;
8133 t_options = trace_options_init_dentry(tr);
8137 for (i = 0; trace_options[i]; i++) {
8139 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
8140 create_trace_option_core_file(tr, trace_options[i], i);
8145 rb_simple_read(struct file *filp, char __user *ubuf,
8146 size_t cnt, loff_t *ppos)
8148 struct trace_array *tr = filp->private_data;
8152 r = tracer_tracing_is_on(tr);
8153 r = sprintf(buf, "%d\n", r);
8155 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8159 rb_simple_write(struct file *filp, const char __user *ubuf,
8160 size_t cnt, loff_t *ppos)
8162 struct trace_array *tr = filp->private_data;
8163 struct ring_buffer *buffer = tr->trace_buffer.buffer;
8167 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8172 mutex_lock(&trace_types_lock);
8173 if (!!val == tracer_tracing_is_on(tr)) {
8174 val = 0; /* do nothing */
8176 tracer_tracing_on(tr);
8177 if (tr->current_trace->start)
8178 tr->current_trace->start(tr);
8180 tracer_tracing_off(tr);
8181 if (tr->current_trace->stop)
8182 tr->current_trace->stop(tr);
8184 mutex_unlock(&trace_types_lock);
8192 static const struct file_operations rb_simple_fops = {
8193 .open = tracing_open_generic_tr,
8194 .read = rb_simple_read,
8195 .write = rb_simple_write,
8196 .release = tracing_release_generic_tr,
8197 .llseek = default_llseek,
8201 buffer_percent_read(struct file *filp, char __user *ubuf,
8202 size_t cnt, loff_t *ppos)
8204 struct trace_array *tr = filp->private_data;
8208 r = tr->buffer_percent;
8209 r = sprintf(buf, "%d\n", r);
8211 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8215 buffer_percent_write(struct file *filp, const char __user *ubuf,
8216 size_t cnt, loff_t *ppos)
8218 struct trace_array *tr = filp->private_data;
8222 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8232 tr->buffer_percent = val;
8239 static const struct file_operations buffer_percent_fops = {
8240 .open = tracing_open_generic_tr,
8241 .read = buffer_percent_read,
8242 .write = buffer_percent_write,
8243 .release = tracing_release_generic_tr,
8244 .llseek = default_llseek,
8247 static struct dentry *trace_instance_dir;
8250 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
8253 allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
8255 enum ring_buffer_flags rb_flags;
8257 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
8261 buf->buffer = ring_buffer_alloc(size, rb_flags);
8265 buf->data = alloc_percpu(struct trace_array_cpu);
8267 ring_buffer_free(buf->buffer);
8272 /* Allocate the first page for all buffers */
8273 set_buffer_entries(&tr->trace_buffer,
8274 ring_buffer_size(tr->trace_buffer.buffer, 0));
8279 static int allocate_trace_buffers(struct trace_array *tr, int size)
8283 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
8287 #ifdef CONFIG_TRACER_MAX_TRACE
8288 ret = allocate_trace_buffer(tr, &tr->max_buffer,
8289 allocate_snapshot ? size : 1);
8291 ring_buffer_free(tr->trace_buffer.buffer);
8292 tr->trace_buffer.buffer = NULL;
8293 free_percpu(tr->trace_buffer.data);
8294 tr->trace_buffer.data = NULL;
8297 tr->allocated_snapshot = allocate_snapshot;
8300 * Only the top level trace array gets its snapshot allocated
8301 * from the kernel command line.
8303 allocate_snapshot = false;
8308 static void free_trace_buffer(struct trace_buffer *buf)
8311 ring_buffer_free(buf->buffer);
8313 free_percpu(buf->data);
8318 static void free_trace_buffers(struct trace_array *tr)
8323 free_trace_buffer(&tr->trace_buffer);
8325 #ifdef CONFIG_TRACER_MAX_TRACE
8326 free_trace_buffer(&tr->max_buffer);
8330 static void init_trace_flags_index(struct trace_array *tr)
8334 /* Used by the trace options files */
8335 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
8336 tr->trace_flags_index[i] = i;
8339 static void __update_tracer_options(struct trace_array *tr)
8343 for (t = trace_types; t; t = t->next)
8344 add_tracer_options(tr, t);
8347 static void update_tracer_options(struct trace_array *tr)
8349 mutex_lock(&trace_types_lock);
8350 __update_tracer_options(tr);
8351 mutex_unlock(&trace_types_lock);
8354 struct trace_array *trace_array_create(const char *name)
8356 struct trace_array *tr;
8359 mutex_lock(&event_mutex);
8360 mutex_lock(&trace_types_lock);
8363 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8364 if (tr->name && strcmp(tr->name, name) == 0)
8369 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
8373 tr->name = kstrdup(name, GFP_KERNEL);
8377 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
8380 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
8382 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
8384 raw_spin_lock_init(&tr->start_lock);
8386 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
8388 tr->current_trace = &nop_trace;
8390 INIT_LIST_HEAD(&tr->systems);
8391 INIT_LIST_HEAD(&tr->events);
8392 INIT_LIST_HEAD(&tr->hist_vars);
8393 INIT_LIST_HEAD(&tr->err_log);
8395 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
8398 tr->dir = tracefs_create_dir(name, trace_instance_dir);
8402 ret = event_trace_add_tracer(tr->dir, tr);
8404 tracefs_remove_recursive(tr->dir);
8408 ftrace_init_trace_array(tr);
8410 init_tracer_tracefs(tr, tr->dir);
8411 init_trace_flags_index(tr);
8412 __update_tracer_options(tr);
8414 list_add(&tr->list, &ftrace_trace_arrays);
8416 mutex_unlock(&trace_types_lock);
8417 mutex_unlock(&event_mutex);
8422 free_trace_buffers(tr);
8423 free_cpumask_var(tr->tracing_cpumask);
8428 mutex_unlock(&trace_types_lock);
8429 mutex_unlock(&event_mutex);
8431 return ERR_PTR(ret);
8433 EXPORT_SYMBOL_GPL(trace_array_create);
8435 static int instance_mkdir(const char *name)
8437 return PTR_ERR_OR_ZERO(trace_array_create(name));
8440 static int __remove_instance(struct trace_array *tr)
8444 if (tr->ref || (tr->current_trace && tr->current_trace->ref))
8447 list_del(&tr->list);
8449 /* Disable all the flags that were enabled coming in */
8450 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
8451 if ((1 << i) & ZEROED_TRACE_FLAGS)
8452 set_tracer_flag(tr, 1 << i, 0);
8455 tracing_set_nop(tr);
8456 clear_ftrace_function_probes(tr);
8457 event_trace_del_tracer(tr);
8458 ftrace_clear_pids(tr);
8459 ftrace_destroy_function_files(tr);
8460 tracefs_remove_recursive(tr->dir);
8461 free_trace_buffers(tr);
8463 for (i = 0; i < tr->nr_topts; i++) {
8464 kfree(tr->topts[i].topts);
8468 free_cpumask_var(tr->tracing_cpumask);
8476 int trace_array_destroy(struct trace_array *tr)
8483 mutex_lock(&event_mutex);
8484 mutex_lock(&trace_types_lock);
8486 ret = __remove_instance(tr);
8488 mutex_unlock(&trace_types_lock);
8489 mutex_unlock(&event_mutex);
8493 EXPORT_SYMBOL_GPL(trace_array_destroy);
8495 static int instance_rmdir(const char *name)
8497 struct trace_array *tr;
8500 mutex_lock(&event_mutex);
8501 mutex_lock(&trace_types_lock);
8504 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8505 if (tr->name && strcmp(tr->name, name) == 0) {
8506 ret = __remove_instance(tr);
8511 mutex_unlock(&trace_types_lock);
8512 mutex_unlock(&event_mutex);
8517 static __init void create_trace_instances(struct dentry *d_tracer)
8519 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
8522 if (WARN_ON(!trace_instance_dir))
8527 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
8529 struct trace_event_file *file;
8532 trace_create_file("available_tracers", 0444, d_tracer,
8533 tr, &show_traces_fops);
8535 trace_create_file("current_tracer", 0644, d_tracer,
8536 tr, &set_tracer_fops);
8538 trace_create_file("tracing_cpumask", 0644, d_tracer,
8539 tr, &tracing_cpumask_fops);
8541 trace_create_file("trace_options", 0644, d_tracer,
8542 tr, &tracing_iter_fops);
8544 trace_create_file("trace", 0644, d_tracer,
8547 trace_create_file("trace_pipe", 0444, d_tracer,
8548 tr, &tracing_pipe_fops);
8550 trace_create_file("buffer_size_kb", 0644, d_tracer,
8551 tr, &tracing_entries_fops);
8553 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
8554 tr, &tracing_total_entries_fops);
8556 trace_create_file("free_buffer", 0200, d_tracer,
8557 tr, &tracing_free_buffer_fops);
8559 trace_create_file("trace_marker", 0220, d_tracer,
8560 tr, &tracing_mark_fops);
8562 file = __find_event_file(tr, "ftrace", "print");
8563 if (file && file->dir)
8564 trace_create_file("trigger", 0644, file->dir, file,
8565 &event_trigger_fops);
8566 tr->trace_marker_file = file;
8568 trace_create_file("trace_marker_raw", 0220, d_tracer,
8569 tr, &tracing_mark_raw_fops);
8571 trace_create_file("trace_clock", 0644, d_tracer, tr,
8574 trace_create_file("tracing_on", 0644, d_tracer,
8575 tr, &rb_simple_fops);
8577 trace_create_file("timestamp_mode", 0444, d_tracer, tr,
8578 &trace_time_stamp_mode_fops);
8580 tr->buffer_percent = 50;
8582 trace_create_file("buffer_percent", 0444, d_tracer,
8583 tr, &buffer_percent_fops);
8585 create_trace_options_dir(tr);
8587 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
8588 trace_create_file("tracing_max_latency", 0644, d_tracer,
8589 &tr->max_latency, &tracing_max_lat_fops);
8592 if (ftrace_create_function_files(tr, d_tracer))
8593 WARN(1, "Could not allocate function filter files");
8595 #ifdef CONFIG_TRACER_SNAPSHOT
8596 trace_create_file("snapshot", 0644, d_tracer,
8597 tr, &snapshot_fops);
8600 trace_create_file("error_log", 0644, d_tracer,
8601 tr, &tracing_err_log_fops);
8603 for_each_tracing_cpu(cpu)
8604 tracing_init_tracefs_percpu(tr, cpu);
8606 ftrace_init_tracefs(tr, d_tracer);
8609 static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
8611 struct vfsmount *mnt;
8612 struct file_system_type *type;
8615 * To maintain backward compatibility for tools that mount
8616 * debugfs to get to the tracing facility, tracefs is automatically
8617 * mounted to the debugfs/tracing directory.
8619 type = get_fs_type("tracefs");
8622 mnt = vfs_submount(mntpt, type, "tracefs", NULL);
8623 put_filesystem(type);
8632 * tracing_init_dentry - initialize top level trace array
8634 * This is called when creating files or directories in the tracing
8635 * directory. It is called via fs_initcall() by any of the boot up code
8636 * and expects to return the dentry of the top level tracing directory.
8638 struct dentry *tracing_init_dentry(void)
8640 struct trace_array *tr = &global_trace;
8642 /* The top level trace array uses NULL as parent */
8646 if (WARN_ON(!tracefs_initialized()) ||
8647 (IS_ENABLED(CONFIG_DEBUG_FS) &&
8648 WARN_ON(!debugfs_initialized())))
8649 return ERR_PTR(-ENODEV);
8652 * As there may still be users that expect the tracing
8653 * files to exist in debugfs/tracing, we must automount
8654 * the tracefs file system there, so older tools still
8655 * work with the newer kerenl.
8657 tr->dir = debugfs_create_automount("tracing", NULL,
8658 trace_automount, NULL);
8663 extern struct trace_eval_map *__start_ftrace_eval_maps[];
8664 extern struct trace_eval_map *__stop_ftrace_eval_maps[];
8666 static void __init trace_eval_init(void)
8670 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
8671 trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
8674 #ifdef CONFIG_MODULES
8675 static void trace_module_add_evals(struct module *mod)
8677 if (!mod->num_trace_evals)
8681 * Modules with bad taint do not have events created, do
8682 * not bother with enums either.
8684 if (trace_module_has_bad_taint(mod))
8687 trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
8690 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
8691 static void trace_module_remove_evals(struct module *mod)
8693 union trace_eval_map_item *map;
8694 union trace_eval_map_item **last = &trace_eval_maps;
8696 if (!mod->num_trace_evals)
8699 mutex_lock(&trace_eval_mutex);
8701 map = trace_eval_maps;
8704 if (map->head.mod == mod)
8706 map = trace_eval_jmp_to_tail(map);
8707 last = &map->tail.next;
8708 map = map->tail.next;
8713 *last = trace_eval_jmp_to_tail(map)->tail.next;
8716 mutex_unlock(&trace_eval_mutex);
8719 static inline void trace_module_remove_evals(struct module *mod) { }
8720 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
8722 static int trace_module_notify(struct notifier_block *self,
8723 unsigned long val, void *data)
8725 struct module *mod = data;
8728 case MODULE_STATE_COMING:
8729 trace_module_add_evals(mod);
8731 case MODULE_STATE_GOING:
8732 trace_module_remove_evals(mod);
8739 static struct notifier_block trace_module_nb = {
8740 .notifier_call = trace_module_notify,
8743 #endif /* CONFIG_MODULES */
8745 static __init int tracer_init_tracefs(void)
8747 struct dentry *d_tracer;
8749 trace_access_lock_init();
8751 d_tracer = tracing_init_dentry();
8752 if (IS_ERR(d_tracer))
8757 init_tracer_tracefs(&global_trace, d_tracer);
8758 ftrace_init_tracefs_toplevel(&global_trace, d_tracer);
8760 trace_create_file("tracing_thresh", 0644, d_tracer,
8761 &global_trace, &tracing_thresh_fops);
8763 trace_create_file("README", 0444, d_tracer,
8764 NULL, &tracing_readme_fops);
8766 trace_create_file("saved_cmdlines", 0444, d_tracer,
8767 NULL, &tracing_saved_cmdlines_fops);
8769 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
8770 NULL, &tracing_saved_cmdlines_size_fops);
8772 trace_create_file("saved_tgids", 0444, d_tracer,
8773 NULL, &tracing_saved_tgids_fops);
8777 trace_create_eval_file(d_tracer);
8779 #ifdef CONFIG_MODULES
8780 register_module_notifier(&trace_module_nb);
8783 #ifdef CONFIG_DYNAMIC_FTRACE
8784 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
8785 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
8788 create_trace_instances(d_tracer);
8790 update_tracer_options(&global_trace);
8795 static int trace_panic_handler(struct notifier_block *this,
8796 unsigned long event, void *unused)
8798 if (ftrace_dump_on_oops)
8799 ftrace_dump(ftrace_dump_on_oops);
8803 static struct notifier_block trace_panic_notifier = {
8804 .notifier_call = trace_panic_handler,
8806 .priority = 150 /* priority: INT_MAX >= x >= 0 */
8809 static int trace_die_handler(struct notifier_block *self,
8815 if (ftrace_dump_on_oops)
8816 ftrace_dump(ftrace_dump_on_oops);
8824 static struct notifier_block trace_die_notifier = {
8825 .notifier_call = trace_die_handler,
8830 * printk is set to max of 1024, we really don't need it that big.
8831 * Nothing should be printing 1000 characters anyway.
8833 #define TRACE_MAX_PRINT 1000
8836 * Define here KERN_TRACE so that we have one place to modify
8837 * it if we decide to change what log level the ftrace dump
8840 #define KERN_TRACE KERN_EMERG
8843 trace_printk_seq(struct trace_seq *s)
8845 /* Probably should print a warning here. */
8846 if (s->seq.len >= TRACE_MAX_PRINT)
8847 s->seq.len = TRACE_MAX_PRINT;
8850 * More paranoid code. Although the buffer size is set to
8851 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
8852 * an extra layer of protection.
8854 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
8855 s->seq.len = s->seq.size - 1;
8857 /* should be zero ended, but we are paranoid. */
8858 s->buffer[s->seq.len] = 0;
8860 printk(KERN_TRACE "%s", s->buffer);
8865 void trace_init_global_iter(struct trace_iterator *iter)
8867 iter->tr = &global_trace;
8868 iter->trace = iter->tr->current_trace;
8869 iter->cpu_file = RING_BUFFER_ALL_CPUS;
8870 iter->trace_buffer = &global_trace.trace_buffer;
8872 if (iter->trace && iter->trace->open)
8873 iter->trace->open(iter);
8875 /* Annotate start of buffers if we had overruns */
8876 if (ring_buffer_overruns(iter->trace_buffer->buffer))
8877 iter->iter_flags |= TRACE_FILE_ANNOTATE;
8879 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
8880 if (trace_clocks[iter->tr->clock_id].in_ns)
8881 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
8884 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
8886 /* use static because iter can be a bit big for the stack */
8887 static struct trace_iterator iter;
8888 static atomic_t dump_running;
8889 struct trace_array *tr = &global_trace;
8890 unsigned int old_userobj;
8891 unsigned long flags;
8894 /* Only allow one dump user at a time. */
8895 if (atomic_inc_return(&dump_running) != 1) {
8896 atomic_dec(&dump_running);
8901 * Always turn off tracing when we dump.
8902 * We don't need to show trace output of what happens
8903 * between multiple crashes.
8905 * If the user does a sysrq-z, then they can re-enable
8906 * tracing with echo 1 > tracing_on.
8910 local_irq_save(flags);
8911 printk_nmi_direct_enter();
8913 /* Simulate the iterator */
8914 trace_init_global_iter(&iter);
8916 for_each_tracing_cpu(cpu) {
8917 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
8920 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
8922 /* don't look at user memory in panic mode */
8923 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
8925 switch (oops_dump_mode) {
8927 iter.cpu_file = RING_BUFFER_ALL_CPUS;
8930 iter.cpu_file = raw_smp_processor_id();
8935 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
8936 iter.cpu_file = RING_BUFFER_ALL_CPUS;
8939 printk(KERN_TRACE "Dumping ftrace buffer:\n");
8941 /* Did function tracer already get disabled? */
8942 if (ftrace_is_dead()) {
8943 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
8944 printk("# MAY BE MISSING FUNCTION EVENTS\n");
8948 * We need to stop all tracing on all CPUS to read the
8949 * the next buffer. This is a bit expensive, but is
8950 * not done often. We fill all what we can read,
8951 * and then release the locks again.
8954 while (!trace_empty(&iter)) {
8957 printk(KERN_TRACE "---------------------------------\n");
8961 trace_iterator_reset(&iter);
8962 iter.iter_flags |= TRACE_FILE_LAT_FMT;
8964 if (trace_find_next_entry_inc(&iter) != NULL) {
8967 ret = print_trace_line(&iter);
8968 if (ret != TRACE_TYPE_NO_CONSUME)
8969 trace_consume(&iter);
8971 touch_nmi_watchdog();
8973 trace_printk_seq(&iter.seq);
8977 printk(KERN_TRACE " (ftrace buffer empty)\n");
8979 printk(KERN_TRACE "---------------------------------\n");
8982 tr->trace_flags |= old_userobj;
8984 for_each_tracing_cpu(cpu) {
8985 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
8987 atomic_dec(&dump_running);
8988 printk_nmi_direct_exit();
8989 local_irq_restore(flags);
8991 EXPORT_SYMBOL_GPL(ftrace_dump);
8993 int trace_run_command(const char *buf, int (*createfn)(int, char **))
9000 argv = argv_split(GFP_KERNEL, buf, &argc);
9005 ret = createfn(argc, argv);
9012 #define WRITE_BUFSIZE 4096
9014 ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
9015 size_t count, loff_t *ppos,
9016 int (*createfn)(int, char **))
9018 char *kbuf, *buf, *tmp;
9023 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
9027 while (done < count) {
9028 size = count - done;
9030 if (size >= WRITE_BUFSIZE)
9031 size = WRITE_BUFSIZE - 1;
9033 if (copy_from_user(kbuf, buffer + done, size)) {
9040 tmp = strchr(buf, '\n');
9043 size = tmp - buf + 1;
9046 if (done + size < count) {
9049 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
9050 pr_warn("Line length is too long: Should be less than %d\n",
9058 /* Remove comments */
9059 tmp = strchr(buf, '#');
9064 ret = trace_run_command(buf, createfn);
9069 } while (done < count);
9079 __init static int tracer_alloc_buffers(void)
9085 * Make sure we don't accidently add more trace options
9086 * than we have bits for.
9088 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
9090 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
9093 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
9094 goto out_free_buffer_mask;
9096 /* Only allocate trace_printk buffers if a trace_printk exists */
9097 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
9098 /* Must be called before global_trace.buffer is allocated */
9099 trace_printk_init_buffers();
9101 /* To save memory, keep the ring buffer size to its minimum */
9102 if (ring_buffer_expanded)
9103 ring_buf_size = trace_buf_size;
9107 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
9108 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
9110 raw_spin_lock_init(&global_trace.start_lock);
9113 * The prepare callbacks allocates some memory for the ring buffer. We
9114 * don't free the buffer if the if the CPU goes down. If we were to free
9115 * the buffer, then the user would lose any trace that was in the
9116 * buffer. The memory will be removed once the "instance" is removed.
9118 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
9119 "trace/RB:preapre", trace_rb_cpu_prepare,
9122 goto out_free_cpumask;
9123 /* Used for event triggers */
9125 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
9127 goto out_rm_hp_state;
9129 if (trace_create_savedcmd() < 0)
9130 goto out_free_temp_buffer;
9132 /* TODO: make the number of buffers hot pluggable with CPUS */
9133 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
9134 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
9136 goto out_free_savedcmd;
9139 if (global_trace.buffer_disabled)
9142 if (trace_boot_clock) {
9143 ret = tracing_set_clock(&global_trace, trace_boot_clock);
9145 pr_warn("Trace clock %s not defined, going back to default\n",
9150 * register_tracer() might reference current_trace, so it
9151 * needs to be set before we register anything. This is
9152 * just a bootstrap of current_trace anyway.
9154 global_trace.current_trace = &nop_trace;
9156 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
9158 ftrace_init_global_array_ops(&global_trace);
9160 init_trace_flags_index(&global_trace);
9162 register_tracer(&nop_trace);
9164 /* Function tracing may start here (via kernel command line) */
9165 init_function_trace();
9167 /* All seems OK, enable tracing */
9168 tracing_disabled = 0;
9170 atomic_notifier_chain_register(&panic_notifier_list,
9171 &trace_panic_notifier);
9173 register_die_notifier(&trace_die_notifier);
9175 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
9177 INIT_LIST_HEAD(&global_trace.systems);
9178 INIT_LIST_HEAD(&global_trace.events);
9179 INIT_LIST_HEAD(&global_trace.hist_vars);
9180 INIT_LIST_HEAD(&global_trace.err_log);
9181 list_add(&global_trace.list, &ftrace_trace_arrays);
9183 apply_trace_boot_options();
9185 register_snapshot_cmd();
9190 free_saved_cmdlines_buffer(savedcmd);
9191 out_free_temp_buffer:
9192 ring_buffer_free(temp_buffer);
9194 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
9196 free_cpumask_var(global_trace.tracing_cpumask);
9197 out_free_buffer_mask:
9198 free_cpumask_var(tracing_buffer_mask);
9203 void __init early_trace_init(void)
9205 if (tracepoint_printk) {
9206 tracepoint_print_iter =
9207 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
9208 if (WARN_ON(!tracepoint_print_iter))
9209 tracepoint_printk = 0;
9211 static_key_enable(&tracepoint_printk_key.key);
9213 tracer_alloc_buffers();
9216 void __init trace_init(void)
9221 __init static int clear_boot_tracer(void)
9224 * The default tracer at boot buffer is an init section.
9225 * This function is called in lateinit. If we did not
9226 * find the boot tracer, then clear it out, to prevent
9227 * later registration from accessing the buffer that is
9228 * about to be freed.
9230 if (!default_bootup_tracer)
9233 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
9234 default_bootup_tracer);
9235 default_bootup_tracer = NULL;
9240 fs_initcall(tracer_init_tracefs);
9241 late_initcall_sync(clear_boot_tracer);
9243 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
9244 __init static int tracing_set_default_clock(void)
9246 /* sched_clock_stable() is determined in late_initcall */
9247 if (!trace_boot_clock && !sched_clock_stable()) {
9249 "Unstable clock detected, switching default tracing clock to \"global\"\n"
9250 "If you want to keep using the local clock, then add:\n"
9251 " \"trace_clock=local\"\n"
9252 "on the kernel command line\n");
9253 tracing_set_clock(&global_trace, "global");
9258 late_initcall_sync(tracing_set_default_clock);