1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Ftrace header. For implementation details beyond the random comments
4 * scattered below, see: Documentation/trace/ftrace-design.rst
7 #ifndef _LINUX_FTRACE_H
8 #define _LINUX_FTRACE_H
10 #include <linux/trace_clock.h>
11 #include <linux/kallsyms.h>
12 #include <linux/linkage.h>
13 #include <linux/bitops.h>
14 #include <linux/ptrace.h>
15 #include <linux/ktime.h>
16 #include <linux/sched.h>
17 #include <linux/types.h>
18 #include <linux/init.h>
21 #include <asm/ftrace.h>
24 * If the arch supports passing the variable contents of
25 * function_trace_op as the third parameter back from the
26 * mcount call, then the arch should define this as 1.
28 #ifndef ARCH_SUPPORTS_FTRACE_OPS
29 #define ARCH_SUPPORTS_FTRACE_OPS 0
33 * If the arch's mcount caller does not support all of ftrace's
34 * features, then it must call an indirect function that
35 * does. Or at least does enough to prevent any unwelcomed side effects.
37 #if !ARCH_SUPPORTS_FTRACE_OPS
38 # define FTRACE_FORCE_LIST_FUNC 1
40 # define FTRACE_FORCE_LIST_FUNC 0
43 /* Main tracing buffer and events set up */
45 void trace_init(void);
46 void early_trace_init(void);
48 static inline void trace_init(void) { }
49 static inline void early_trace_init(void) { }
55 #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_MODULES) && \
56 defined(CONFIG_DYNAMIC_FTRACE)
58 ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
59 unsigned long *off, char **modname, char *sym);
60 int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
61 char *type, char *name,
62 char *module_name, int *exported);
64 static inline const char *
65 ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
66 unsigned long *off, char **modname, char *sym)
70 static inline int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
71 char *type, char *name,
72 char *module_name, int *exported)
79 #ifdef CONFIG_FUNCTION_TRACER
81 extern int ftrace_enabled;
83 ftrace_enable_sysctl(struct ctl_table *table, int write,
84 void __user *buffer, size_t *lenp,
89 typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
90 struct ftrace_ops *op, struct pt_regs *regs);
92 ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
95 * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are
96 * set in the flags member.
97 * CONTROL, SAVE_REGS, SAVE_REGS_IF_SUPPORTED, RECURSION_SAFE, STUB and
98 * IPMODIFY are a kind of attribute flags which can be set only before
99 * registering the ftrace_ops, and can not be modified while registered.
100 * Changing those attribute flags after registering ftrace_ops will
101 * cause unexpected results.
103 * ENABLED - set/unset when ftrace_ops is registered/unregistered
104 * DYNAMIC - set when ftrace_ops is registered to denote dynamically
105 * allocated ftrace_ops which need special care
106 * SAVE_REGS - The ftrace_ops wants regs saved at each function called
107 * and passed to the callback. If this flag is set, but the
108 * architecture does not support passing regs
109 * (CONFIG_DYNAMIC_FTRACE_WITH_REGS is not defined), then the
110 * ftrace_ops will fail to register, unless the next flag
112 * SAVE_REGS_IF_SUPPORTED - This is the same as SAVE_REGS, but if the
113 * handler can handle an arch that does not save regs
114 * (the handler tests if regs == NULL), then it can set
115 * this flag instead. It will not fail registering the ftrace_ops
116 * but, the regs field will be NULL if the arch does not support
117 * passing regs to the handler.
118 * Note, if this flag is set, the SAVE_REGS flag will automatically
119 * get set upon registering the ftrace_ops, if the arch supports it.
120 * RECURSION_SAFE - The ftrace_ops can set this to tell the ftrace infrastructure
121 * that the call back has its own recursion protection. If it does
122 * not set this, then the ftrace infrastructure will add recursion
123 * protection for the caller.
124 * STUB - The ftrace_ops is just a place holder.
125 * INITIALIZED - The ftrace_ops has already been initialized (first use time
126 * register_ftrace_function() is called, it will initialized the ops)
127 * DELETED - The ops are being deleted, do not let them be registered again.
128 * ADDING - The ops is in the process of being added.
129 * REMOVING - The ops is in the process of being removed.
130 * MODIFYING - The ops is in the process of changing its filter functions.
131 * ALLOC_TRAMP - A dynamic trampoline was allocated by the core code.
132 * The arch specific code sets this flag when it allocated a
133 * trampoline. This lets the arch know that it can update the
134 * trampoline in case the callback function changes.
135 * The ftrace_ops trampoline can be set by the ftrace users, and
136 * in such cases the arch must not modify it. Only the arch ftrace
137 * core code should set this flag.
138 * IPMODIFY - The ops can modify the IP register. This can only be set with
139 * SAVE_REGS. If another ops with this flag set is already registered
140 * for any of the functions that this ops will be registered for, then
141 * this ops will fail to register or set_filter_ip.
142 * PID - Is affected by set_ftrace_pid (allows filtering on those pids)
143 * RCU - Set when the ops can only be called when RCU is watching.
144 * TRACE_ARRAY - The ops->private points to a trace_array descriptor.
147 FTRACE_OPS_FL_ENABLED = 1 << 0,
148 FTRACE_OPS_FL_DYNAMIC = 1 << 1,
149 FTRACE_OPS_FL_SAVE_REGS = 1 << 2,
150 FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 3,
151 FTRACE_OPS_FL_RECURSION_SAFE = 1 << 4,
152 FTRACE_OPS_FL_STUB = 1 << 5,
153 FTRACE_OPS_FL_INITIALIZED = 1 << 6,
154 FTRACE_OPS_FL_DELETED = 1 << 7,
155 FTRACE_OPS_FL_ADDING = 1 << 8,
156 FTRACE_OPS_FL_REMOVING = 1 << 9,
157 FTRACE_OPS_FL_MODIFYING = 1 << 10,
158 FTRACE_OPS_FL_ALLOC_TRAMP = 1 << 11,
159 FTRACE_OPS_FL_IPMODIFY = 1 << 12,
160 FTRACE_OPS_FL_PID = 1 << 13,
161 FTRACE_OPS_FL_RCU = 1 << 14,
162 FTRACE_OPS_FL_TRACE_ARRAY = 1 << 15,
165 #ifdef CONFIG_DYNAMIC_FTRACE
166 /* The hash used to know what functions callbacks trace */
167 struct ftrace_ops_hash {
168 struct ftrace_hash __rcu *notrace_hash;
169 struct ftrace_hash __rcu *filter_hash;
170 struct mutex regex_lock;
173 void ftrace_free_init_mem(void);
174 void ftrace_free_mem(struct module *mod, void *start, void *end);
176 static inline void ftrace_free_init_mem(void) { }
177 static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { }
181 * Note, ftrace_ops can be referenced outside of RCU protection, unless
182 * the RCU flag is set. If ftrace_ops is allocated and not part of kernel
183 * core data, the unregistering of it will perform a scheduling on all CPUs
184 * to make sure that there are no more users. Depending on the load of the
185 * system that may take a bit of time.
187 * Any private data added must also take care not to be freed and if private
188 * data is added to a ftrace_ops that is in core code, the user of the
189 * ftrace_ops must perform a schedule_on_each_cpu() before freeing it.
193 struct ftrace_ops __rcu *next;
196 ftrace_func_t saved_func;
197 #ifdef CONFIG_DYNAMIC_FTRACE
198 struct ftrace_ops_hash local_hash;
199 struct ftrace_ops_hash *func_hash;
200 struct ftrace_ops_hash old_hash;
201 unsigned long trampoline;
202 unsigned long trampoline_size;
207 * Type of the current tracing.
209 enum ftrace_tracing_type_t {
210 FTRACE_TYPE_ENTER = 0, /* Hook the call of the function */
211 FTRACE_TYPE_RETURN, /* Hook the return of the function */
214 /* Current tracing type, default is FTRACE_TYPE_ENTER */
215 extern enum ftrace_tracing_type_t ftrace_tracing_type;
218 * The ftrace_ops must be a static and should also
219 * be read_mostly. These functions do modify read_mostly variables
220 * so use them sparely. Never free an ftrace_op or modify the
221 * next pointer after it has been registered. Even after unregistering
222 * it, the next pointer may still be used internally.
224 int register_ftrace_function(struct ftrace_ops *ops);
225 int unregister_ftrace_function(struct ftrace_ops *ops);
227 extern void ftrace_stub(unsigned long a0, unsigned long a1,
228 struct ftrace_ops *op, struct pt_regs *regs);
230 #else /* !CONFIG_FUNCTION_TRACER */
232 * (un)register_ftrace_function must be a macro since the ops parameter
233 * must not be evaluated.
235 #define register_ftrace_function(ops) ({ 0; })
236 #define unregister_ftrace_function(ops) ({ 0; })
237 static inline void ftrace_kill(void) { }
238 static inline void ftrace_free_init_mem(void) { }
239 static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { }
240 #endif /* CONFIG_FUNCTION_TRACER */
242 #ifdef CONFIG_STACK_TRACER
244 #define STACK_TRACE_ENTRIES 500
248 extern unsigned stack_trace_index[];
249 extern struct stack_trace stack_trace_max;
250 extern unsigned long stack_trace_max_size;
251 extern arch_spinlock_t stack_trace_max_lock;
253 extern int stack_tracer_enabled;
254 void stack_trace_print(void);
256 stack_trace_sysctl(struct ctl_table *table, int write,
257 void __user *buffer, size_t *lenp,
260 /* DO NOT MODIFY THIS VARIABLE DIRECTLY! */
261 DECLARE_PER_CPU(int, disable_stack_tracer);
264 * stack_tracer_disable - temporarily disable the stack tracer
266 * There's a few locations (namely in RCU) where stack tracing
267 * cannot be executed. This function is used to disable stack
268 * tracing during those critical sections.
270 * This function must be called with preemption or interrupts
271 * disabled and stack_tracer_enable() must be called shortly after
272 * while preemption or interrupts are still disabled.
274 static inline void stack_tracer_disable(void)
276 /* Preemption or interupts must be disabled */
277 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT))
278 WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
279 this_cpu_inc(disable_stack_tracer);
283 * stack_tracer_enable - re-enable the stack tracer
285 * After stack_tracer_disable() is called, stack_tracer_enable()
286 * must be called shortly afterward.
288 static inline void stack_tracer_enable(void)
290 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT))
291 WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
292 this_cpu_dec(disable_stack_tracer);
295 static inline void stack_tracer_disable(void) { }
296 static inline void stack_tracer_enable(void) { }
299 #ifdef CONFIG_DYNAMIC_FTRACE
301 int ftrace_arch_code_modify_prepare(void);
302 int ftrace_arch_code_modify_post_process(void);
306 enum ftrace_bug_type {
313 extern enum ftrace_bug_type ftrace_bug_type;
316 * Archs can set this to point to a variable that holds the value that was
317 * expected at the call site before calling ftrace_bug().
319 extern const void *ftrace_expected;
321 void ftrace_bug(int err, struct dyn_ftrace *rec);
325 extern int ftrace_text_reserved(const void *start, const void *end);
327 struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr);
329 bool is_ftrace_trampoline(unsigned long addr);
332 * The dyn_ftrace record's flags field is split into two parts.
333 * the first part which is '0-FTRACE_REF_MAX' is a counter of
334 * the number of callbacks that have registered the function that
335 * the dyn_ftrace descriptor represents.
337 * The second part is a mask:
338 * ENABLED - the function is being traced
339 * REGS - the record wants the function to save regs
340 * REGS_EN - the function is set up to save regs.
341 * IPMODIFY - the record allows for the IP address to be changed.
342 * DISABLED - the record is not ready to be touched yet
344 * When a new ftrace_ops is registered and wants a function to save
345 * pt_regs, the rec->flag REGS is set. When the function has been
346 * set up to save regs, the REG_EN flag is set. Once a function
347 * starts saving regs it will do so until all ftrace_ops are removed
348 * from tracing that function.
351 FTRACE_FL_ENABLED = (1UL << 31),
352 FTRACE_FL_REGS = (1UL << 30),
353 FTRACE_FL_REGS_EN = (1UL << 29),
354 FTRACE_FL_TRAMP = (1UL << 28),
355 FTRACE_FL_TRAMP_EN = (1UL << 27),
356 FTRACE_FL_IPMODIFY = (1UL << 26),
357 FTRACE_FL_DISABLED = (1UL << 25),
360 #define FTRACE_REF_MAX_SHIFT 25
361 #define FTRACE_FL_BITS 7
362 #define FTRACE_FL_MASKED_BITS ((1UL << FTRACE_FL_BITS) - 1)
363 #define FTRACE_FL_MASK (FTRACE_FL_MASKED_BITS << FTRACE_REF_MAX_SHIFT)
364 #define FTRACE_REF_MAX ((1UL << FTRACE_REF_MAX_SHIFT) - 1)
366 #define ftrace_rec_count(rec) ((rec)->flags & ~FTRACE_FL_MASK)
369 unsigned long ip; /* address of mcount call-site */
371 struct dyn_arch_ftrace arch;
374 int ftrace_force_update(void);
375 int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
376 int remove, int reset);
377 int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
379 int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
381 void ftrace_set_global_filter(unsigned char *buf, int len, int reset);
382 void ftrace_set_global_notrace(unsigned char *buf, int len, int reset);
383 void ftrace_free_filter(struct ftrace_ops *ops);
384 void ftrace_ops_set_global_filter(struct ftrace_ops *ops);
387 FTRACE_UPDATE_CALLS = (1 << 0),
388 FTRACE_DISABLE_CALLS = (1 << 1),
389 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
390 FTRACE_START_FUNC_RET = (1 << 3),
391 FTRACE_STOP_FUNC_RET = (1 << 4),
395 * The FTRACE_UPDATE_* enum is used to pass information back
396 * from the ftrace_update_record() and ftrace_test_record()
397 * functions. These are called by the code update routines
398 * to find out what is to be done for a given function.
400 * IGNORE - The function is already what we want it to be
401 * MAKE_CALL - Start tracing the function
402 * MODIFY_CALL - Stop saving regs for the function
403 * MAKE_NOP - Stop tracing the function
406 FTRACE_UPDATE_IGNORE,
407 FTRACE_UPDATE_MAKE_CALL,
408 FTRACE_UPDATE_MODIFY_CALL,
409 FTRACE_UPDATE_MAKE_NOP,
413 FTRACE_ITER_FILTER = (1 << 0),
414 FTRACE_ITER_NOTRACE = (1 << 1),
415 FTRACE_ITER_PRINTALL = (1 << 2),
416 FTRACE_ITER_DO_PROBES = (1 << 3),
417 FTRACE_ITER_PROBE = (1 << 4),
418 FTRACE_ITER_MOD = (1 << 5),
419 FTRACE_ITER_ENABLED = (1 << 6),
422 void arch_ftrace_update_code(int command);
423 void arch_ftrace_update_trampoline(struct ftrace_ops *ops);
424 void *arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec);
425 void arch_ftrace_trampoline_free(struct ftrace_ops *ops);
427 struct ftrace_rec_iter;
429 struct ftrace_rec_iter *ftrace_rec_iter_start(void);
430 struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter);
431 struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter);
433 #define for_ftrace_rec_iter(iter) \
434 for (iter = ftrace_rec_iter_start(); \
436 iter = ftrace_rec_iter_next(iter))
439 int ftrace_update_record(struct dyn_ftrace *rec, int enable);
440 int ftrace_test_record(struct dyn_ftrace *rec, int enable);
441 void ftrace_run_stop_machine(int command);
442 unsigned long ftrace_location(unsigned long ip);
443 unsigned long ftrace_location_range(unsigned long start, unsigned long end);
444 unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec);
445 unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec);
447 extern ftrace_func_t ftrace_trace_function;
449 int ftrace_regex_open(struct ftrace_ops *ops, int flag,
450 struct inode *inode, struct file *file);
451 ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
452 size_t cnt, loff_t *ppos);
453 ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
454 size_t cnt, loff_t *ppos);
455 int ftrace_regex_release(struct inode *inode, struct file *file);
458 ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable);
460 /* defined in arch */
461 extern int ftrace_ip_converted(unsigned long ip);
462 extern int ftrace_dyn_arch_init(void);
463 extern void ftrace_replace_code(int enable);
464 extern int ftrace_update_ftrace_func(ftrace_func_t func);
465 extern void ftrace_caller(void);
466 extern void ftrace_regs_caller(void);
467 extern void ftrace_call(void);
468 extern void ftrace_regs_call(void);
469 extern void mcount_call(void);
471 void ftrace_modify_all_code(int command);
474 #define FTRACE_ADDR ((unsigned long)ftrace_caller)
477 #ifndef FTRACE_GRAPH_ADDR
478 #define FTRACE_GRAPH_ADDR ((unsigned long)ftrace_graph_caller)
481 #ifndef FTRACE_REGS_ADDR
482 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
483 # define FTRACE_REGS_ADDR ((unsigned long)ftrace_regs_caller)
485 # define FTRACE_REGS_ADDR FTRACE_ADDR
490 * If an arch would like functions that are only traced
491 * by the function graph tracer to jump directly to its own
492 * trampoline, then they can define FTRACE_GRAPH_TRAMP_ADDR
493 * to be that address to jump to.
495 #ifndef FTRACE_GRAPH_TRAMP_ADDR
496 #define FTRACE_GRAPH_TRAMP_ADDR ((unsigned long) 0)
499 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
500 extern void ftrace_graph_caller(void);
501 extern int ftrace_enable_ftrace_graph_caller(void);
502 extern int ftrace_disable_ftrace_graph_caller(void);
504 static inline int ftrace_enable_ftrace_graph_caller(void) { return 0; }
505 static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; }
509 * ftrace_make_nop - convert code into nop
510 * @mod: module structure if called by module load initialization
511 * @rec: the mcount call site record
512 * @addr: the address that the call site should be calling
514 * This is a very sensitive operation and great care needs
515 * to be taken by the arch. The operation should carefully
516 * read the location, check to see if what is read is indeed
517 * what we expect it to be, and then on success of the compare,
518 * it should write to the location.
520 * The code segment at @rec->ip should be a caller to @addr
524 * -EFAULT on error reading the location
525 * -EINVAL on a failed compare of the contents
526 * -EPERM on error writing to the location
527 * Any other value will be considered a failure.
529 extern int ftrace_make_nop(struct module *mod,
530 struct dyn_ftrace *rec, unsigned long addr);
533 * ftrace_make_call - convert a nop call site into a call to addr
534 * @rec: the mcount call site record
535 * @addr: the address that the call site should call
537 * This is a very sensitive operation and great care needs
538 * to be taken by the arch. The operation should carefully
539 * read the location, check to see if what is read is indeed
540 * what we expect it to be, and then on success of the compare,
541 * it should write to the location.
543 * The code segment at @rec->ip should be a nop
547 * -EFAULT on error reading the location
548 * -EINVAL on a failed compare of the contents
549 * -EPERM on error writing to the location
550 * Any other value will be considered a failure.
552 extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);
554 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
556 * ftrace_modify_call - convert from one addr to another (no nop)
557 * @rec: the mcount call site record
558 * @old_addr: the address expected to be currently called to
559 * @addr: the address to change to
561 * This is a very sensitive operation and great care needs
562 * to be taken by the arch. The operation should carefully
563 * read the location, check to see if what is read is indeed
564 * what we expect it to be, and then on success of the compare,
565 * it should write to the location.
567 * The code segment at @rec->ip should be a caller to @old_addr
571 * -EFAULT on error reading the location
572 * -EINVAL on a failed compare of the contents
573 * -EPERM on error writing to the location
574 * Any other value will be considered a failure.
576 extern int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
579 /* Should never be called */
580 static inline int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
587 /* May be defined in arch */
588 extern int ftrace_arch_read_dyn_info(char *buf, int size);
590 extern int skip_trace(unsigned long ip);
591 extern void ftrace_module_init(struct module *mod);
592 extern void ftrace_module_enable(struct module *mod);
593 extern void ftrace_release_mod(struct module *mod);
595 extern void ftrace_disable_daemon(void);
596 extern void ftrace_enable_daemon(void);
597 #else /* CONFIG_DYNAMIC_FTRACE */
598 static inline int skip_trace(unsigned long ip) { return 0; }
599 static inline int ftrace_force_update(void) { return 0; }
600 static inline void ftrace_disable_daemon(void) { }
601 static inline void ftrace_enable_daemon(void) { }
602 static inline void ftrace_module_init(struct module *mod) { }
603 static inline void ftrace_module_enable(struct module *mod) { }
604 static inline void ftrace_release_mod(struct module *mod) { }
605 static inline int ftrace_text_reserved(const void *start, const void *end)
609 static inline unsigned long ftrace_location(unsigned long ip)
615 * Again users of functions that have ftrace_ops may not
616 * have them defined when ftrace is not enabled, but these
617 * functions may still be called. Use a macro instead of inline.
619 #define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; })
620 #define ftrace_set_early_filter(ops, buf, enable) do { } while (0)
621 #define ftrace_set_filter_ip(ops, ip, remove, reset) ({ -ENODEV; })
622 #define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; })
623 #define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; })
624 #define ftrace_free_filter(ops) do { } while (0)
625 #define ftrace_ops_set_global_filter(ops) do { } while (0)
627 static inline ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
628 size_t cnt, loff_t *ppos) { return -ENODEV; }
629 static inline ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
630 size_t cnt, loff_t *ppos) { return -ENODEV; }
632 ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; }
634 static inline bool is_ftrace_trampoline(unsigned long addr)
638 #endif /* CONFIG_DYNAMIC_FTRACE */
640 /* totally disable ftrace - can not re-enable after this */
641 void ftrace_kill(void);
643 static inline void tracer_disable(void)
645 #ifdef CONFIG_FUNCTION_TRACER
651 * Ftrace disable/restore without lock. Some synchronization mechanism
652 * must be used to prevent ftrace_enabled to be changed between
655 static inline int __ftrace_enabled_save(void)
657 #ifdef CONFIG_FUNCTION_TRACER
658 int saved_ftrace_enabled = ftrace_enabled;
660 return saved_ftrace_enabled;
666 static inline void __ftrace_enabled_restore(int enabled)
668 #ifdef CONFIG_FUNCTION_TRACER
669 ftrace_enabled = enabled;
673 /* All archs should have this, but we define it for consistency */
674 #ifndef ftrace_return_address0
675 # define ftrace_return_address0 __builtin_return_address(0)
678 /* Archs may use other ways for ADDR1 and beyond */
679 #ifndef ftrace_return_address
680 # ifdef CONFIG_FRAME_POINTER
681 # define ftrace_return_address(n) __builtin_return_address(n)
683 # define ftrace_return_address(n) 0UL
687 #define CALLER_ADDR0 ((unsigned long)ftrace_return_address0)
688 #define CALLER_ADDR1 ((unsigned long)ftrace_return_address(1))
689 #define CALLER_ADDR2 ((unsigned long)ftrace_return_address(2))
690 #define CALLER_ADDR3 ((unsigned long)ftrace_return_address(3))
691 #define CALLER_ADDR4 ((unsigned long)ftrace_return_address(4))
692 #define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5))
693 #define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6))
695 static inline unsigned long get_lock_parent_ip(void)
697 unsigned long addr = CALLER_ADDR0;
699 if (!in_lock_functions(addr))
702 if (!in_lock_functions(addr))
707 #ifdef CONFIG_TRACE_PREEMPT_TOGGLE
708 extern void trace_preempt_on(unsigned long a0, unsigned long a1);
709 extern void trace_preempt_off(unsigned long a0, unsigned long a1);
712 * Use defines instead of static inlines because some arches will make code out
713 * of the CALLER_ADDR, when we really want these to be a real nop.
715 # define trace_preempt_on(a0, a1) do { } while (0)
716 # define trace_preempt_off(a0, a1) do { } while (0)
719 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
720 extern void ftrace_init(void);
722 static inline void ftrace_init(void) { }
726 * Structure that defines an entry function trace.
727 * It's already packed but the attribute "packed" is needed
728 * to remove extra padding at the end.
730 struct ftrace_graph_ent {
731 unsigned long func; /* Current function */
736 * Structure that defines a return function trace.
737 * It's already packed but the attribute "packed" is needed
738 * to remove extra padding at the end.
740 struct ftrace_graph_ret {
741 unsigned long func; /* Current function */
742 /* Number of functions that overran the depth limit for current task */
743 unsigned long overrun;
744 unsigned long long calltime;
745 unsigned long long rettime;
749 /* Type of the callback handlers for tracing function graph*/
750 typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */
751 typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */
753 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
756 * Stack of return addresses for functions
758 * Used in struct thread_info
760 struct ftrace_ret_stack {
763 unsigned long long calltime;
764 #ifdef CONFIG_FUNCTION_PROFILER
765 unsigned long long subtime;
767 #ifdef HAVE_FUNCTION_GRAPH_FP_TEST
770 #ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
776 * Primary handler of a function return.
777 * It relays on ftrace_return_to_handler.
778 * Defined in entry_32/64.S
780 extern void return_to_handler(void);
783 function_graph_enter(unsigned long ret, unsigned long func,
784 unsigned long frame_pointer, unsigned long *retp);
786 unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
787 unsigned long ret, unsigned long *retp);
790 * Sometimes we don't want to trace a function with the function
791 * graph tracer but we want them to keep traced by the usual function
792 * tracer if the function graph tracer is not configured.
794 #define __notrace_funcgraph notrace
796 #define FTRACE_NOTRACE_DEPTH 65536
797 #define FTRACE_RETFUNC_DEPTH 50
798 #define FTRACE_RETSTACK_ALLOC_SIZE 32
799 extern int register_ftrace_graph(trace_func_graph_ret_t retfunc,
800 trace_func_graph_ent_t entryfunc);
802 extern bool ftrace_graph_is_dead(void);
803 extern void ftrace_graph_stop(void);
805 /* The current handlers in use */
806 extern trace_func_graph_ret_t ftrace_graph_return;
807 extern trace_func_graph_ent_t ftrace_graph_entry;
809 extern void unregister_ftrace_graph(void);
811 extern void ftrace_graph_init_task(struct task_struct *t);
812 extern void ftrace_graph_exit_task(struct task_struct *t);
813 extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu);
815 static inline int task_curr_ret_stack(struct task_struct *t)
817 return t->curr_ret_stack;
820 static inline void pause_graph_tracing(void)
822 atomic_inc(¤t->tracing_graph_pause);
825 static inline void unpause_graph_tracing(void)
827 atomic_dec(¤t->tracing_graph_pause);
829 #else /* !CONFIG_FUNCTION_GRAPH_TRACER */
831 #define __notrace_funcgraph
833 static inline void ftrace_graph_init_task(struct task_struct *t) { }
834 static inline void ftrace_graph_exit_task(struct task_struct *t) { }
835 static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { }
837 static inline int register_ftrace_graph(trace_func_graph_ret_t retfunc,
838 trace_func_graph_ent_t entryfunc)
842 static inline void unregister_ftrace_graph(void) { }
844 static inline int task_curr_ret_stack(struct task_struct *tsk)
849 static inline unsigned long
850 ftrace_graph_ret_addr(struct task_struct *task, int *idx, unsigned long ret,
856 static inline void pause_graph_tracing(void) { }
857 static inline void unpause_graph_tracing(void) { }
858 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
860 #ifdef CONFIG_TRACING
862 /* flags for current->trace */
864 TSK_TRACE_FL_TRACE_BIT = 0,
865 TSK_TRACE_FL_GRAPH_BIT = 1,
868 TSK_TRACE_FL_TRACE = 1 << TSK_TRACE_FL_TRACE_BIT,
869 TSK_TRACE_FL_GRAPH = 1 << TSK_TRACE_FL_GRAPH_BIT,
872 static inline void set_tsk_trace_trace(struct task_struct *tsk)
874 set_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
877 static inline void clear_tsk_trace_trace(struct task_struct *tsk)
879 clear_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
882 static inline int test_tsk_trace_trace(struct task_struct *tsk)
884 return tsk->trace & TSK_TRACE_FL_TRACE;
887 static inline void set_tsk_trace_graph(struct task_struct *tsk)
889 set_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
892 static inline void clear_tsk_trace_graph(struct task_struct *tsk)
894 clear_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
897 static inline int test_tsk_trace_graph(struct task_struct *tsk)
899 return tsk->trace & TSK_TRACE_FL_GRAPH;
902 enum ftrace_dump_mode;
904 extern enum ftrace_dump_mode ftrace_dump_on_oops;
905 extern int tracepoint_printk;
907 extern void disable_trace_on_warning(void);
908 extern int __disable_trace_on_warning;
910 int tracepoint_printk_sysctl(struct ctl_table *table, int write,
911 void __user *buffer, size_t *lenp,
914 #else /* CONFIG_TRACING */
915 static inline void disable_trace_on_warning(void) { }
916 #endif /* CONFIG_TRACING */
918 #ifdef CONFIG_FTRACE_SYSCALLS
920 unsigned long arch_syscall_addr(int nr);
922 #endif /* CONFIG_FTRACE_SYSCALLS */
924 #endif /* _LINUX_FTRACE_H */