]> asedeno.scripts.mit.edu Git - linux.git/blob - kernel/trace/fgraph.c
fgraph: Add new fgraph_ops structure to enable function graph hooks
[linux.git] / kernel / trace / fgraph.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Infrastructure to took into function calls and returns.
4  * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
5  * Mostly borrowed from function tracer which
6  * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
7  *
8  * Highly modified by Steven Rostedt (VMware).
9  */
10 #include <linux/suspend.h>
11 #include <linux/ftrace.h>
12 #include <linux/slab.h>
13
14 #include <trace/events/sched.h>
15
16 #include "ftrace_internal.h"
17
18 #ifdef CONFIG_DYNAMIC_FTRACE
19 #define ASSIGN_OPS_HASH(opsname, val) \
20         .func_hash              = val, \
21         .local_hash.regex_lock  = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
22 #else
23 #define ASSIGN_OPS_HASH(opsname, val)
24 #endif
25
26 static bool kill_ftrace_graph;
27 int ftrace_graph_active;
28
29 /* Both enabled by default (can be cleared by function_graph tracer flags */
30 static bool fgraph_sleep_time = true;
31
32 /**
33  * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called
34  *
35  * ftrace_graph_stop() is called when a severe error is detected in
36  * the function graph tracing. This function is called by the critical
37  * paths of function graph to keep those paths from doing any more harm.
38  */
39 bool ftrace_graph_is_dead(void)
40 {
41         return kill_ftrace_graph;
42 }
43
44 /**
45  * ftrace_graph_stop - set to permanently disable function graph tracincg
46  *
47  * In case of an error int function graph tracing, this is called
48  * to try to keep function graph tracing from causing any more harm.
49  * Usually this is pretty severe and this is called to try to at least
50  * get a warning out to the user.
51  */
52 void ftrace_graph_stop(void)
53 {
54         kill_ftrace_graph = true;
55 }
56
57 /* Add a function return address to the trace stack on thread info.*/
58 static int
59 ftrace_push_return_trace(unsigned long ret, unsigned long func,
60                          unsigned long frame_pointer, unsigned long *retp)
61 {
62         unsigned long long calltime;
63         int index;
64
65         if (unlikely(ftrace_graph_is_dead()))
66                 return -EBUSY;
67
68         if (!current->ret_stack)
69                 return -EBUSY;
70
71         /*
72          * We must make sure the ret_stack is tested before we read
73          * anything else.
74          */
75         smp_rmb();
76
77         /* The return trace stack is full */
78         if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
79                 atomic_inc(&current->trace_overrun);
80                 return -EBUSY;
81         }
82
83         calltime = trace_clock_local();
84
85         index = ++current->curr_ret_stack;
86         barrier();
87         current->ret_stack[index].ret = ret;
88         current->ret_stack[index].func = func;
89         current->ret_stack[index].calltime = calltime;
90 #ifdef HAVE_FUNCTION_GRAPH_FP_TEST
91         current->ret_stack[index].fp = frame_pointer;
92 #endif
93 #ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
94         current->ret_stack[index].retp = retp;
95 #endif
96         return 0;
97 }
98
99 int function_graph_enter(unsigned long ret, unsigned long func,
100                          unsigned long frame_pointer, unsigned long *retp)
101 {
102         struct ftrace_graph_ent trace;
103
104         trace.func = func;
105         trace.depth = ++current->curr_ret_depth;
106
107         if (ftrace_push_return_trace(ret, func, frame_pointer, retp))
108                 goto out;
109
110         /* Only trace if the calling function expects to */
111         if (!ftrace_graph_entry(&trace))
112                 goto out_ret;
113
114         return 0;
115  out_ret:
116         current->curr_ret_stack--;
117  out:
118         current->curr_ret_depth--;
119         return -EBUSY;
120 }
121
122 /* Retrieve a function return address to the trace stack on thread info.*/
123 static void
124 ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
125                         unsigned long frame_pointer)
126 {
127         int index;
128
129         index = current->curr_ret_stack;
130
131         if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) {
132                 ftrace_graph_stop();
133                 WARN_ON(1);
134                 /* Might as well panic, otherwise we have no where to go */
135                 *ret = (unsigned long)panic;
136                 return;
137         }
138
139 #ifdef HAVE_FUNCTION_GRAPH_FP_TEST
140         /*
141          * The arch may choose to record the frame pointer used
142          * and check it here to make sure that it is what we expect it
143          * to be. If gcc does not set the place holder of the return
144          * address in the frame pointer, and does a copy instead, then
145          * the function graph trace will fail. This test detects this
146          * case.
147          *
148          * Currently, x86_32 with optimize for size (-Os) makes the latest
149          * gcc do the above.
150          *
151          * Note, -mfentry does not use frame pointers, and this test
152          *  is not needed if CC_USING_FENTRY is set.
153          */
154         if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
155                 ftrace_graph_stop();
156                 WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
157                      "  from func %ps return to %lx\n",
158                      current->ret_stack[index].fp,
159                      frame_pointer,
160                      (void *)current->ret_stack[index].func,
161                      current->ret_stack[index].ret);
162                 *ret = (unsigned long)panic;
163                 return;
164         }
165 #endif
166
167         *ret = current->ret_stack[index].ret;
168         trace->func = current->ret_stack[index].func;
169         trace->calltime = current->ret_stack[index].calltime;
170         trace->overrun = atomic_read(&current->trace_overrun);
171         trace->depth = current->curr_ret_depth--;
172         /*
173          * We still want to trace interrupts coming in if
174          * max_depth is set to 1. Make sure the decrement is
175          * seen before ftrace_graph_return.
176          */
177         barrier();
178 }
179
180 /*
181  * Hibernation protection.
182  * The state of the current task is too much unstable during
183  * suspend/restore to disk. We want to protect against that.
184  */
185 static int
186 ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
187                                                         void *unused)
188 {
189         switch (state) {
190         case PM_HIBERNATION_PREPARE:
191                 pause_graph_tracing();
192                 break;
193
194         case PM_POST_HIBERNATION:
195                 unpause_graph_tracing();
196                 break;
197         }
198         return NOTIFY_DONE;
199 }
200
201 static struct notifier_block ftrace_suspend_notifier = {
202         .notifier_call = ftrace_suspend_notifier_call,
203 };
204
205 /*
206  * Send the trace to the ring-buffer.
207  * @return the original return address.
208  */
209 unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
210 {
211         struct ftrace_graph_ret trace;
212         unsigned long ret;
213
214         ftrace_pop_return_trace(&trace, &ret, frame_pointer);
215         trace.rettime = trace_clock_local();
216         ftrace_graph_return(&trace);
217         /*
218          * The ftrace_graph_return() may still access the current
219          * ret_stack structure, we need to make sure the update of
220          * curr_ret_stack is after that.
221          */
222         barrier();
223         current->curr_ret_stack--;
224
225         if (unlikely(!ret)) {
226                 ftrace_graph_stop();
227                 WARN_ON(1);
228                 /* Might as well panic. What else to do? */
229                 ret = (unsigned long)panic;
230         }
231
232         return ret;
233 }
234
235 static struct ftrace_ops graph_ops = {
236         .func                   = ftrace_stub,
237         .flags                  = FTRACE_OPS_FL_RECURSION_SAFE |
238                                    FTRACE_OPS_FL_INITIALIZED |
239                                    FTRACE_OPS_FL_PID |
240                                    FTRACE_OPS_FL_STUB,
241 #ifdef FTRACE_GRAPH_TRAMP_ADDR
242         .trampoline             = FTRACE_GRAPH_TRAMP_ADDR,
243         /* trampoline_size is only needed for dynamically allocated tramps */
244 #endif
245         ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash)
246 };
247
248 void ftrace_graph_sleep_time_control(bool enable)
249 {
250         fgraph_sleep_time = enable;
251 }
252
253 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
254 {
255         return 0;
256 }
257
258 /* The callbacks that hook a function */
259 trace_func_graph_ret_t ftrace_graph_return =
260                         (trace_func_graph_ret_t)ftrace_stub;
261 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
262 static trace_func_graph_ent_t __ftrace_graph_entry = ftrace_graph_entry_stub;
263
264 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
265 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
266 {
267         int i;
268         int ret = 0;
269         int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
270         struct task_struct *g, *t;
271
272         for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
273                 ret_stack_list[i] =
274                         kmalloc_array(FTRACE_RETFUNC_DEPTH,
275                                       sizeof(struct ftrace_ret_stack),
276                                       GFP_KERNEL);
277                 if (!ret_stack_list[i]) {
278                         start = 0;
279                         end = i;
280                         ret = -ENOMEM;
281                         goto free;
282                 }
283         }
284
285         read_lock(&tasklist_lock);
286         do_each_thread(g, t) {
287                 if (start == end) {
288                         ret = -EAGAIN;
289                         goto unlock;
290                 }
291
292                 if (t->ret_stack == NULL) {
293                         atomic_set(&t->tracing_graph_pause, 0);
294                         atomic_set(&t->trace_overrun, 0);
295                         t->curr_ret_stack = -1;
296                         t->curr_ret_depth = -1;
297                         /* Make sure the tasks see the -1 first: */
298                         smp_wmb();
299                         t->ret_stack = ret_stack_list[start++];
300                 }
301         } while_each_thread(g, t);
302
303 unlock:
304         read_unlock(&tasklist_lock);
305 free:
306         for (i = start; i < end; i++)
307                 kfree(ret_stack_list[i]);
308         return ret;
309 }
310
311 static void
312 ftrace_graph_probe_sched_switch(void *ignore, bool preempt,
313                         struct task_struct *prev, struct task_struct *next)
314 {
315         unsigned long long timestamp;
316         int index;
317
318         /*
319          * Does the user want to count the time a function was asleep.
320          * If so, do not update the time stamps.
321          */
322         if (fgraph_sleep_time)
323                 return;
324
325         timestamp = trace_clock_local();
326
327         prev->ftrace_timestamp = timestamp;
328
329         /* only process tasks that we timestamped */
330         if (!next->ftrace_timestamp)
331                 return;
332
333         /*
334          * Update all the counters in next to make up for the
335          * time next was sleeping.
336          */
337         timestamp -= next->ftrace_timestamp;
338
339         for (index = next->curr_ret_stack; index >= 0; index--)
340                 next->ret_stack[index].calltime += timestamp;
341 }
342
343 static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace)
344 {
345         if (!ftrace_ops_test(&global_ops, trace->func, NULL))
346                 return 0;
347         return __ftrace_graph_entry(trace);
348 }
349
350 /*
351  * The function graph tracer should only trace the functions defined
352  * by set_ftrace_filter and set_ftrace_notrace. If another function
353  * tracer ops is registered, the graph tracer requires testing the
354  * function against the global ops, and not just trace any function
355  * that any ftrace_ops registered.
356  */
357 void update_function_graph_func(void)
358 {
359         struct ftrace_ops *op;
360         bool do_test = false;
361
362         /*
363          * The graph and global ops share the same set of functions
364          * to test. If any other ops is on the list, then
365          * the graph tracing needs to test if its the function
366          * it should call.
367          */
368         do_for_each_ftrace_op(op, ftrace_ops_list) {
369                 if (op != &global_ops && op != &graph_ops &&
370                     op != &ftrace_list_end) {
371                         do_test = true;
372                         /* in double loop, break out with goto */
373                         goto out;
374                 }
375         } while_for_each_ftrace_op(op);
376  out:
377         if (do_test)
378                 ftrace_graph_entry = ftrace_graph_entry_test;
379         else
380                 ftrace_graph_entry = __ftrace_graph_entry;
381 }
382
383 static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
384
385 static void
386 graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
387 {
388         atomic_set(&t->tracing_graph_pause, 0);
389         atomic_set(&t->trace_overrun, 0);
390         t->ftrace_timestamp = 0;
391         /* make curr_ret_stack visible before we add the ret_stack */
392         smp_wmb();
393         t->ret_stack = ret_stack;
394 }
395
396 /*
397  * Allocate a return stack for the idle task. May be the first
398  * time through, or it may be done by CPU hotplug online.
399  */
400 void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
401 {
402         t->curr_ret_stack = -1;
403         t->curr_ret_depth = -1;
404         /*
405          * The idle task has no parent, it either has its own
406          * stack or no stack at all.
407          */
408         if (t->ret_stack)
409                 WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
410
411         if (ftrace_graph_active) {
412                 struct ftrace_ret_stack *ret_stack;
413
414                 ret_stack = per_cpu(idle_ret_stack, cpu);
415                 if (!ret_stack) {
416                         ret_stack =
417                                 kmalloc_array(FTRACE_RETFUNC_DEPTH,
418                                               sizeof(struct ftrace_ret_stack),
419                                               GFP_KERNEL);
420                         if (!ret_stack)
421                                 return;
422                         per_cpu(idle_ret_stack, cpu) = ret_stack;
423                 }
424                 graph_init_task(t, ret_stack);
425         }
426 }
427
428 /* Allocate a return stack for newly created task */
429 void ftrace_graph_init_task(struct task_struct *t)
430 {
431         /* Make sure we do not use the parent ret_stack */
432         t->ret_stack = NULL;
433         t->curr_ret_stack = -1;
434         t->curr_ret_depth = -1;
435
436         if (ftrace_graph_active) {
437                 struct ftrace_ret_stack *ret_stack;
438
439                 ret_stack = kmalloc_array(FTRACE_RETFUNC_DEPTH,
440                                           sizeof(struct ftrace_ret_stack),
441                                           GFP_KERNEL);
442                 if (!ret_stack)
443                         return;
444                 graph_init_task(t, ret_stack);
445         }
446 }
447
448 void ftrace_graph_exit_task(struct task_struct *t)
449 {
450         struct ftrace_ret_stack *ret_stack = t->ret_stack;
451
452         t->ret_stack = NULL;
453         /* NULL must become visible to IRQs before we free it: */
454         barrier();
455
456         kfree(ret_stack);
457 }
458
459 /* Allocate a return stack for each task */
460 static int start_graph_tracing(void)
461 {
462         struct ftrace_ret_stack **ret_stack_list;
463         int ret, cpu;
464
465         ret_stack_list = kmalloc_array(FTRACE_RETSTACK_ALLOC_SIZE,
466                                        sizeof(struct ftrace_ret_stack *),
467                                        GFP_KERNEL);
468
469         if (!ret_stack_list)
470                 return -ENOMEM;
471
472         /* The cpu_boot init_task->ret_stack will never be freed */
473         for_each_online_cpu(cpu) {
474                 if (!idle_task(cpu)->ret_stack)
475                         ftrace_graph_init_idle_task(idle_task(cpu), cpu);
476         }
477
478         do {
479                 ret = alloc_retstack_tasklist(ret_stack_list);
480         } while (ret == -EAGAIN);
481
482         if (!ret) {
483                 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
484                 if (ret)
485                         pr_info("ftrace_graph: Couldn't activate tracepoint"
486                                 " probe to kernel_sched_switch\n");
487         }
488
489         kfree(ret_stack_list);
490         return ret;
491 }
492
493 int register_ftrace_graph(struct fgraph_ops *gops)
494 {
495         int ret = 0;
496
497         mutex_lock(&ftrace_lock);
498
499         /* we currently allow only one tracer registered at a time */
500         if (ftrace_graph_active) {
501                 ret = -EBUSY;
502                 goto out;
503         }
504
505         register_pm_notifier(&ftrace_suspend_notifier);
506
507         ftrace_graph_active++;
508         ret = start_graph_tracing();
509         if (ret) {
510                 ftrace_graph_active--;
511                 goto out;
512         }
513
514         ftrace_graph_return = gops->retfunc;
515
516         /*
517          * Update the indirect function to the entryfunc, and the
518          * function that gets called to the entry_test first. Then
519          * call the update fgraph entry function to determine if
520          * the entryfunc should be called directly or not.
521          */
522         __ftrace_graph_entry = gops->entryfunc;
523         ftrace_graph_entry = ftrace_graph_entry_test;
524         update_function_graph_func();
525
526         ret = ftrace_startup(&graph_ops, FTRACE_START_FUNC_RET);
527 out:
528         mutex_unlock(&ftrace_lock);
529         return ret;
530 }
531
532 void unregister_ftrace_graph(struct fgraph_ops *gops)
533 {
534         mutex_lock(&ftrace_lock);
535
536         if (unlikely(!ftrace_graph_active))
537                 goto out;
538
539         ftrace_graph_active--;
540         ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
541         ftrace_graph_entry = ftrace_graph_entry_stub;
542         __ftrace_graph_entry = ftrace_graph_entry_stub;
543         ftrace_shutdown(&graph_ops, FTRACE_STOP_FUNC_RET);
544         unregister_pm_notifier(&ftrace_suspend_notifier);
545         unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
546
547  out:
548         mutex_unlock(&ftrace_lock);
549 }