]> asedeno.scripts.mit.edu Git - linux.git/blob - kernel/trace/trace_kprobe.c
tracing/kprobe: Do not run kprobe boot tests if kprobe_event is on cmdline
[linux.git] / kernel / trace / trace_kprobe.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Kprobes-based tracing events
4  *
5  * Created by Masami Hiramatsu <mhiramat@redhat.com>
6  *
7  */
8 #define pr_fmt(fmt)     "trace_kprobe: " fmt
9
10 #include <linux/module.h>
11 #include <linux/uaccess.h>
12 #include <linux/rculist.h>
13 #include <linux/error-injection.h>
14
15 #include <asm/setup.h>  /* for COMMAND_LINE_SIZE */
16
17 #include "trace_dynevent.h"
18 #include "trace_kprobe_selftest.h"
19 #include "trace_probe.h"
20 #include "trace_probe_tmpl.h"
21
22 #define KPROBE_EVENT_SYSTEM "kprobes"
23 #define KRETPROBE_MAXACTIVE_MAX 4096
24 #define MAX_KPROBE_CMDLINE_SIZE 1024
25
26 /* Kprobe early definition from command line */
27 static char kprobe_boot_events_buf[COMMAND_LINE_SIZE] __initdata;
28 static bool kprobe_boot_events_enabled __initdata;
29
30 static int __init set_kprobe_boot_events(char *str)
31 {
32         strlcpy(kprobe_boot_events_buf, str, COMMAND_LINE_SIZE);
33         return 0;
34 }
35 __setup("kprobe_event=", set_kprobe_boot_events);
36
37 static int trace_kprobe_create(int argc, const char **argv);
38 static int trace_kprobe_show(struct seq_file *m, struct dyn_event *ev);
39 static int trace_kprobe_release(struct dyn_event *ev);
40 static bool trace_kprobe_is_busy(struct dyn_event *ev);
41 static bool trace_kprobe_match(const char *system, const char *event,
42                                struct dyn_event *ev);
43
44 static struct dyn_event_operations trace_kprobe_ops = {
45         .create = trace_kprobe_create,
46         .show = trace_kprobe_show,
47         .is_busy = trace_kprobe_is_busy,
48         .free = trace_kprobe_release,
49         .match = trace_kprobe_match,
50 };
51
52 /*
53  * Kprobe event core functions
54  */
55 struct trace_kprobe {
56         struct dyn_event        devent;
57         struct kretprobe        rp;     /* Use rp.kp for kprobe use */
58         unsigned long __percpu *nhit;
59         const char              *symbol;        /* symbol name */
60         struct trace_probe      tp;
61 };
62
63 static bool is_trace_kprobe(struct dyn_event *ev)
64 {
65         return ev->ops == &trace_kprobe_ops;
66 }
67
68 static struct trace_kprobe *to_trace_kprobe(struct dyn_event *ev)
69 {
70         return container_of(ev, struct trace_kprobe, devent);
71 }
72
73 /**
74  * for_each_trace_kprobe - iterate over the trace_kprobe list
75  * @pos:        the struct trace_kprobe * for each entry
76  * @dpos:       the struct dyn_event * to use as a loop cursor
77  */
78 #define for_each_trace_kprobe(pos, dpos)        \
79         for_each_dyn_event(dpos)                \
80                 if (is_trace_kprobe(dpos) && (pos = to_trace_kprobe(dpos)))
81
82 #define SIZEOF_TRACE_KPROBE(n)                          \
83         (offsetof(struct trace_kprobe, tp.args) +       \
84         (sizeof(struct probe_arg) * (n)))
85
86 static nokprobe_inline bool trace_kprobe_is_return(struct trace_kprobe *tk)
87 {
88         return tk->rp.handler != NULL;
89 }
90
91 static nokprobe_inline const char *trace_kprobe_symbol(struct trace_kprobe *tk)
92 {
93         return tk->symbol ? tk->symbol : "unknown";
94 }
95
96 static nokprobe_inline unsigned long trace_kprobe_offset(struct trace_kprobe *tk)
97 {
98         return tk->rp.kp.offset;
99 }
100
101 static nokprobe_inline bool trace_kprobe_has_gone(struct trace_kprobe *tk)
102 {
103         return !!(kprobe_gone(&tk->rp.kp));
104 }
105
106 static nokprobe_inline bool trace_kprobe_within_module(struct trace_kprobe *tk,
107                                                  struct module *mod)
108 {
109         int len = strlen(mod->name);
110         const char *name = trace_kprobe_symbol(tk);
111         return strncmp(mod->name, name, len) == 0 && name[len] == ':';
112 }
113
114 static nokprobe_inline bool trace_kprobe_module_exist(struct trace_kprobe *tk)
115 {
116         char *p;
117         bool ret;
118
119         if (!tk->symbol)
120                 return false;
121         p = strchr(tk->symbol, ':');
122         if (!p)
123                 return true;
124         *p = '\0';
125         mutex_lock(&module_mutex);
126         ret = !!find_module(tk->symbol);
127         mutex_unlock(&module_mutex);
128         *p = ':';
129
130         return ret;
131 }
132
133 static bool trace_kprobe_is_busy(struct dyn_event *ev)
134 {
135         struct trace_kprobe *tk = to_trace_kprobe(ev);
136
137         return trace_probe_is_enabled(&tk->tp);
138 }
139
140 static bool trace_kprobe_match(const char *system, const char *event,
141                                struct dyn_event *ev)
142 {
143         struct trace_kprobe *tk = to_trace_kprobe(ev);
144
145         return strcmp(trace_event_name(&tk->tp.call), event) == 0 &&
146             (!system || strcmp(tk->tp.call.class->system, system) == 0);
147 }
148
149 static nokprobe_inline unsigned long trace_kprobe_nhit(struct trace_kprobe *tk)
150 {
151         unsigned long nhit = 0;
152         int cpu;
153
154         for_each_possible_cpu(cpu)
155                 nhit += *per_cpu_ptr(tk->nhit, cpu);
156
157         return nhit;
158 }
159
160 /* Return 0 if it fails to find the symbol address */
161 static nokprobe_inline
162 unsigned long trace_kprobe_address(struct trace_kprobe *tk)
163 {
164         unsigned long addr;
165
166         if (tk->symbol) {
167                 addr = (unsigned long)
168                         kallsyms_lookup_name(trace_kprobe_symbol(tk));
169                 if (addr)
170                         addr += tk->rp.kp.offset;
171         } else {
172                 addr = (unsigned long)tk->rp.kp.addr;
173         }
174         return addr;
175 }
176
177 bool trace_kprobe_on_func_entry(struct trace_event_call *call)
178 {
179         struct trace_kprobe *tk = (struct trace_kprobe *)call->data;
180
181         return kprobe_on_func_entry(tk->rp.kp.addr,
182                         tk->rp.kp.addr ? NULL : tk->rp.kp.symbol_name,
183                         tk->rp.kp.addr ? 0 : tk->rp.kp.offset);
184 }
185
186 bool trace_kprobe_error_injectable(struct trace_event_call *call)
187 {
188         struct trace_kprobe *tk = (struct trace_kprobe *)call->data;
189
190         return within_error_injection_list(trace_kprobe_address(tk));
191 }
192
193 static int register_kprobe_event(struct trace_kprobe *tk);
194 static int unregister_kprobe_event(struct trace_kprobe *tk);
195
196 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
197 static int kretprobe_dispatcher(struct kretprobe_instance *ri,
198                                 struct pt_regs *regs);
199
200 /*
201  * Allocate new trace_probe and initialize it (including kprobes).
202  */
203 static struct trace_kprobe *alloc_trace_kprobe(const char *group,
204                                              const char *event,
205                                              void *addr,
206                                              const char *symbol,
207                                              unsigned long offs,
208                                              int maxactive,
209                                              int nargs, bool is_return)
210 {
211         struct trace_kprobe *tk;
212         int ret = -ENOMEM;
213
214         tk = kzalloc(SIZEOF_TRACE_KPROBE(nargs), GFP_KERNEL);
215         if (!tk)
216                 return ERR_PTR(ret);
217
218         tk->nhit = alloc_percpu(unsigned long);
219         if (!tk->nhit)
220                 goto error;
221
222         if (symbol) {
223                 tk->symbol = kstrdup(symbol, GFP_KERNEL);
224                 if (!tk->symbol)
225                         goto error;
226                 tk->rp.kp.symbol_name = tk->symbol;
227                 tk->rp.kp.offset = offs;
228         } else
229                 tk->rp.kp.addr = addr;
230
231         if (is_return)
232                 tk->rp.handler = kretprobe_dispatcher;
233         else
234                 tk->rp.kp.pre_handler = kprobe_dispatcher;
235
236         tk->rp.maxactive = maxactive;
237
238         if (!event || !group) {
239                 ret = -EINVAL;
240                 goto error;
241         }
242
243         tk->tp.call.class = &tk->tp.class;
244         tk->tp.call.name = kstrdup(event, GFP_KERNEL);
245         if (!tk->tp.call.name)
246                 goto error;
247
248         tk->tp.class.system = kstrdup(group, GFP_KERNEL);
249         if (!tk->tp.class.system)
250                 goto error;
251
252         dyn_event_init(&tk->devent, &trace_kprobe_ops);
253         INIT_LIST_HEAD(&tk->tp.files);
254         return tk;
255 error:
256         kfree(tk->tp.call.name);
257         kfree(tk->symbol);
258         free_percpu(tk->nhit);
259         kfree(tk);
260         return ERR_PTR(ret);
261 }
262
263 static void free_trace_kprobe(struct trace_kprobe *tk)
264 {
265         int i;
266
267         if (!tk)
268                 return;
269
270         for (i = 0; i < tk->tp.nr_args; i++)
271                 traceprobe_free_probe_arg(&tk->tp.args[i]);
272
273         kfree(tk->tp.call.class->system);
274         kfree(tk->tp.call.name);
275         kfree(tk->symbol);
276         free_percpu(tk->nhit);
277         kfree(tk);
278 }
279
280 static struct trace_kprobe *find_trace_kprobe(const char *event,
281                                               const char *group)
282 {
283         struct dyn_event *pos;
284         struct trace_kprobe *tk;
285
286         for_each_trace_kprobe(tk, pos)
287                 if (strcmp(trace_event_name(&tk->tp.call), event) == 0 &&
288                     strcmp(tk->tp.call.class->system, group) == 0)
289                         return tk;
290         return NULL;
291 }
292
293 static inline int __enable_trace_kprobe(struct trace_kprobe *tk)
294 {
295         int ret = 0;
296
297         if (trace_probe_is_registered(&tk->tp) && !trace_kprobe_has_gone(tk)) {
298                 if (trace_kprobe_is_return(tk))
299                         ret = enable_kretprobe(&tk->rp);
300                 else
301                         ret = enable_kprobe(&tk->rp.kp);
302         }
303
304         return ret;
305 }
306
307 /*
308  * Enable trace_probe
309  * if the file is NULL, enable "perf" handler, or enable "trace" handler.
310  */
311 static int
312 enable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
313 {
314         struct event_file_link *link;
315         int ret = 0;
316
317         if (file) {
318                 link = kmalloc(sizeof(*link), GFP_KERNEL);
319                 if (!link) {
320                         ret = -ENOMEM;
321                         goto out;
322                 }
323
324                 link->file = file;
325                 list_add_tail_rcu(&link->list, &tk->tp.files);
326
327                 tk->tp.flags |= TP_FLAG_TRACE;
328                 ret = __enable_trace_kprobe(tk);
329                 if (ret) {
330                         list_del_rcu(&link->list);
331                         kfree(link);
332                         tk->tp.flags &= ~TP_FLAG_TRACE;
333                 }
334
335         } else {
336                 tk->tp.flags |= TP_FLAG_PROFILE;
337                 ret = __enable_trace_kprobe(tk);
338                 if (ret)
339                         tk->tp.flags &= ~TP_FLAG_PROFILE;
340         }
341  out:
342         return ret;
343 }
344
345 /*
346  * Disable trace_probe
347  * if the file is NULL, disable "perf" handler, or disable "trace" handler.
348  */
349 static int
350 disable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
351 {
352         struct event_file_link *link = NULL;
353         int wait = 0;
354         int ret = 0;
355
356         if (file) {
357                 link = find_event_file_link(&tk->tp, file);
358                 if (!link) {
359                         ret = -EINVAL;
360                         goto out;
361                 }
362
363                 list_del_rcu(&link->list);
364                 wait = 1;
365                 if (!list_empty(&tk->tp.files))
366                         goto out;
367
368                 tk->tp.flags &= ~TP_FLAG_TRACE;
369         } else
370                 tk->tp.flags &= ~TP_FLAG_PROFILE;
371
372         if (!trace_probe_is_enabled(&tk->tp) && trace_probe_is_registered(&tk->tp)) {
373                 if (trace_kprobe_is_return(tk))
374                         disable_kretprobe(&tk->rp);
375                 else
376                         disable_kprobe(&tk->rp.kp);
377                 wait = 1;
378         }
379
380         /*
381          * if tk is not added to any list, it must be a local trace_kprobe
382          * created with perf_event_open. We don't need to wait for these
383          * trace_kprobes
384          */
385         if (list_empty(&tk->devent.list))
386                 wait = 0;
387  out:
388         if (wait) {
389                 /*
390                  * Synchronize with kprobe_trace_func/kretprobe_trace_func
391                  * to ensure disabled (all running handlers are finished).
392                  * This is not only for kfree(), but also the caller,
393                  * trace_remove_event_call() supposes it for releasing
394                  * event_call related objects, which will be accessed in
395                  * the kprobe_trace_func/kretprobe_trace_func.
396                  */
397                 synchronize_rcu();
398                 kfree(link);    /* Ignored if link == NULL */
399         }
400
401         return ret;
402 }
403
404 #if defined(CONFIG_KPROBES_ON_FTRACE) && \
405         !defined(CONFIG_KPROBE_EVENTS_ON_NOTRACE)
406 static bool within_notrace_func(struct trace_kprobe *tk)
407 {
408         unsigned long offset, size, addr;
409
410         addr = trace_kprobe_address(tk);
411         if (!addr || !kallsyms_lookup_size_offset(addr, &size, &offset))
412                 return false;
413
414         /* Get the entry address of the target function */
415         addr -= offset;
416
417         /*
418          * Since ftrace_location_range() does inclusive range check, we need
419          * to subtract 1 byte from the end address.
420          */
421         return !ftrace_location_range(addr, addr + size - 1);
422 }
423 #else
424 #define within_notrace_func(tk) (false)
425 #endif
426
427 /* Internal register function - just handle k*probes and flags */
428 static int __register_trace_kprobe(struct trace_kprobe *tk)
429 {
430         int i, ret;
431
432         if (trace_probe_is_registered(&tk->tp))
433                 return -EINVAL;
434
435         if (within_notrace_func(tk)) {
436                 pr_warn("Could not probe notrace function %s\n",
437                         trace_kprobe_symbol(tk));
438                 return -EINVAL;
439         }
440
441         for (i = 0; i < tk->tp.nr_args; i++) {
442                 ret = traceprobe_update_arg(&tk->tp.args[i]);
443                 if (ret)
444                         return ret;
445         }
446
447         /* Set/clear disabled flag according to tp->flag */
448         if (trace_probe_is_enabled(&tk->tp))
449                 tk->rp.kp.flags &= ~KPROBE_FLAG_DISABLED;
450         else
451                 tk->rp.kp.flags |= KPROBE_FLAG_DISABLED;
452
453         if (trace_kprobe_is_return(tk))
454                 ret = register_kretprobe(&tk->rp);
455         else
456                 ret = register_kprobe(&tk->rp.kp);
457
458         if (ret == 0)
459                 tk->tp.flags |= TP_FLAG_REGISTERED;
460         return ret;
461 }
462
463 /* Internal unregister function - just handle k*probes and flags */
464 static void __unregister_trace_kprobe(struct trace_kprobe *tk)
465 {
466         if (trace_probe_is_registered(&tk->tp)) {
467                 if (trace_kprobe_is_return(tk))
468                         unregister_kretprobe(&tk->rp);
469                 else
470                         unregister_kprobe(&tk->rp.kp);
471                 tk->tp.flags &= ~TP_FLAG_REGISTERED;
472                 /* Cleanup kprobe for reuse */
473                 if (tk->rp.kp.symbol_name)
474                         tk->rp.kp.addr = NULL;
475         }
476 }
477
478 /* Unregister a trace_probe and probe_event */
479 static int unregister_trace_kprobe(struct trace_kprobe *tk)
480 {
481         /* Enabled event can not be unregistered */
482         if (trace_probe_is_enabled(&tk->tp))
483                 return -EBUSY;
484
485         /* Will fail if probe is being used by ftrace or perf */
486         if (unregister_kprobe_event(tk))
487                 return -EBUSY;
488
489         __unregister_trace_kprobe(tk);
490         dyn_event_remove(&tk->devent);
491
492         return 0;
493 }
494
495 /* Register a trace_probe and probe_event */
496 static int register_trace_kprobe(struct trace_kprobe *tk)
497 {
498         struct trace_kprobe *old_tk;
499         int ret;
500
501         mutex_lock(&event_mutex);
502
503         /* Delete old (same name) event if exist */
504         old_tk = find_trace_kprobe(trace_event_name(&tk->tp.call),
505                         tk->tp.call.class->system);
506         if (old_tk) {
507                 ret = unregister_trace_kprobe(old_tk);
508                 if (ret < 0)
509                         goto end;
510                 free_trace_kprobe(old_tk);
511         }
512
513         /* Register new event */
514         ret = register_kprobe_event(tk);
515         if (ret) {
516                 pr_warn("Failed to register probe event(%d)\n", ret);
517                 goto end;
518         }
519
520         /* Register k*probe */
521         ret = __register_trace_kprobe(tk);
522         if (ret == -ENOENT && !trace_kprobe_module_exist(tk)) {
523                 pr_warn("This probe might be able to register after target module is loaded. Continue.\n");
524                 ret = 0;
525         }
526
527         if (ret < 0)
528                 unregister_kprobe_event(tk);
529         else
530                 dyn_event_add(&tk->devent);
531
532 end:
533         mutex_unlock(&event_mutex);
534         return ret;
535 }
536
537 /* Module notifier call back, checking event on the module */
538 static int trace_kprobe_module_callback(struct notifier_block *nb,
539                                        unsigned long val, void *data)
540 {
541         struct module *mod = data;
542         struct dyn_event *pos;
543         struct trace_kprobe *tk;
544         int ret;
545
546         if (val != MODULE_STATE_COMING)
547                 return NOTIFY_DONE;
548
549         /* Update probes on coming module */
550         mutex_lock(&event_mutex);
551         for_each_trace_kprobe(tk, pos) {
552                 if (trace_kprobe_within_module(tk, mod)) {
553                         /* Don't need to check busy - this should have gone. */
554                         __unregister_trace_kprobe(tk);
555                         ret = __register_trace_kprobe(tk);
556                         if (ret)
557                                 pr_warn("Failed to re-register probe %s on %s: %d\n",
558                                         trace_event_name(&tk->tp.call),
559                                         mod->name, ret);
560                 }
561         }
562         mutex_unlock(&event_mutex);
563
564         return NOTIFY_DONE;
565 }
566
567 static struct notifier_block trace_kprobe_module_nb = {
568         .notifier_call = trace_kprobe_module_callback,
569         .priority = 1   /* Invoked after kprobe module callback */
570 };
571
572 /* Convert certain expected symbols into '_' when generating event names */
573 static inline void sanitize_event_name(char *name)
574 {
575         while (*name++ != '\0')
576                 if (*name == ':' || *name == '.')
577                         *name = '_';
578 }
579
580 static int trace_kprobe_create(int argc, const char *argv[])
581 {
582         /*
583          * Argument syntax:
584          *  - Add kprobe:
585          *      p[:[GRP/]EVENT] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS]
586          *  - Add kretprobe:
587          *      r[MAXACTIVE][:[GRP/]EVENT] [MOD:]KSYM[+0] [FETCHARGS]
588          * Fetch args:
589          *  $retval     : fetch return value
590          *  $stack      : fetch stack address
591          *  $stackN     : fetch Nth of stack (N:0-)
592          *  $comm       : fetch current task comm
593          *  @ADDR       : fetch memory at ADDR (ADDR should be in kernel)
594          *  @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
595          *  %REG        : fetch register REG
596          * Dereferencing memory fetch:
597          *  +|-offs(ARG) : fetch memory at ARG +|- offs address.
598          * Alias name of args:
599          *  NAME=FETCHARG : set NAME as alias of FETCHARG.
600          * Type of args:
601          *  FETCHARG:TYPE : use TYPE instead of unsigned long.
602          */
603         struct trace_kprobe *tk = NULL;
604         int i, len, ret = 0;
605         bool is_return = false;
606         char *symbol = NULL, *tmp = NULL;
607         const char *event = NULL, *group = KPROBE_EVENT_SYSTEM;
608         int maxactive = 0;
609         long offset = 0;
610         void *addr = NULL;
611         char buf[MAX_EVENT_NAME_LEN];
612         unsigned int flags = TPARG_FL_KERNEL;
613
614         switch (argv[0][0]) {
615         case 'r':
616                 is_return = true;
617                 flags |= TPARG_FL_RETURN;
618                 break;
619         case 'p':
620                 break;
621         default:
622                 return -ECANCELED;
623         }
624         if (argc < 2)
625                 return -ECANCELED;
626
627         trace_probe_log_init("trace_kprobe", argc, argv);
628
629         event = strchr(&argv[0][1], ':');
630         if (event)
631                 event++;
632
633         if (isdigit(argv[0][1])) {
634                 if (!is_return) {
635                         trace_probe_log_err(1, MAXACT_NO_KPROBE);
636                         goto parse_error;
637                 }
638                 if (event)
639                         len = event - &argv[0][1] - 1;
640                 else
641                         len = strlen(&argv[0][1]);
642                 if (len > MAX_EVENT_NAME_LEN - 1) {
643                         trace_probe_log_err(1, BAD_MAXACT);
644                         goto parse_error;
645                 }
646                 memcpy(buf, &argv[0][1], len);
647                 buf[len] = '\0';
648                 ret = kstrtouint(buf, 0, &maxactive);
649                 if (ret || !maxactive) {
650                         trace_probe_log_err(1, BAD_MAXACT);
651                         goto parse_error;
652                 }
653                 /* kretprobes instances are iterated over via a list. The
654                  * maximum should stay reasonable.
655                  */
656                 if (maxactive > KRETPROBE_MAXACTIVE_MAX) {
657                         trace_probe_log_err(1, MAXACT_TOO_BIG);
658                         goto parse_error;
659                 }
660         }
661
662         /* try to parse an address. if that fails, try to read the
663          * input as a symbol. */
664         if (kstrtoul(argv[1], 0, (unsigned long *)&addr)) {
665                 trace_probe_log_set_index(1);
666                 /* Check whether uprobe event specified */
667                 if (strchr(argv[1], '/') && strchr(argv[1], ':')) {
668                         ret = -ECANCELED;
669                         goto error;
670                 }
671                 /* a symbol specified */
672                 symbol = kstrdup(argv[1], GFP_KERNEL);
673                 if (!symbol)
674                         return -ENOMEM;
675                 /* TODO: support .init module functions */
676                 ret = traceprobe_split_symbol_offset(symbol, &offset);
677                 if (ret || offset < 0 || offset > UINT_MAX) {
678                         trace_probe_log_err(0, BAD_PROBE_ADDR);
679                         goto parse_error;
680                 }
681                 if (kprobe_on_func_entry(NULL, symbol, offset))
682                         flags |= TPARG_FL_FENTRY;
683                 if (offset && is_return && !(flags & TPARG_FL_FENTRY)) {
684                         trace_probe_log_err(0, BAD_RETPROBE);
685                         goto parse_error;
686                 }
687         }
688
689         trace_probe_log_set_index(0);
690         if (event) {
691                 ret = traceprobe_parse_event_name(&event, &group, buf,
692                                                   event - argv[0]);
693                 if (ret)
694                         goto parse_error;
695         } else {
696                 /* Make a new event name */
697                 if (symbol)
698                         snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld",
699                                  is_return ? 'r' : 'p', symbol, offset);
700                 else
701                         snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p",
702                                  is_return ? 'r' : 'p', addr);
703                 sanitize_event_name(buf);
704                 event = buf;
705         }
706
707         /* setup a probe */
708         tk = alloc_trace_kprobe(group, event, addr, symbol, offset, maxactive,
709                                argc - 2, is_return);
710         if (IS_ERR(tk)) {
711                 ret = PTR_ERR(tk);
712                 /* This must return -ENOMEM, else there is a bug */
713                 WARN_ON_ONCE(ret != -ENOMEM);
714                 goto out;       /* We know tk is not allocated */
715         }
716         argc -= 2; argv += 2;
717
718         /* parse arguments */
719         for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
720                 tmp = kstrdup(argv[i], GFP_KERNEL);
721                 if (!tmp) {
722                         ret = -ENOMEM;
723                         goto error;
724                 }
725
726                 trace_probe_log_set_index(i + 2);
727                 ret = traceprobe_parse_probe_arg(&tk->tp, i, tmp, flags);
728                 kfree(tmp);
729                 if (ret)
730                         goto error;     /* This can be -ENOMEM */
731         }
732
733         ret = register_trace_kprobe(tk);
734         if (ret) {
735                 trace_probe_log_set_index(1);
736                 if (ret == -EILSEQ)
737                         trace_probe_log_err(0, BAD_INSN_BNDRY);
738                 else if (ret == -ENOENT)
739                         trace_probe_log_err(0, BAD_PROBE_ADDR);
740                 else if (ret != -ENOMEM)
741                         trace_probe_log_err(0, FAIL_REG_PROBE);
742                 goto error;
743         }
744
745 out:
746         trace_probe_log_clear();
747         kfree(symbol);
748         return ret;
749
750 parse_error:
751         ret = -EINVAL;
752 error:
753         free_trace_kprobe(tk);
754         goto out;
755 }
756
757 static int create_or_delete_trace_kprobe(int argc, char **argv)
758 {
759         int ret;
760
761         if (argv[0][0] == '-')
762                 return dyn_event_release(argc, argv, &trace_kprobe_ops);
763
764         ret = trace_kprobe_create(argc, (const char **)argv);
765         return ret == -ECANCELED ? -EINVAL : ret;
766 }
767
768 static int trace_kprobe_release(struct dyn_event *ev)
769 {
770         struct trace_kprobe *tk = to_trace_kprobe(ev);
771         int ret = unregister_trace_kprobe(tk);
772
773         if (!ret)
774                 free_trace_kprobe(tk);
775         return ret;
776 }
777
778 static int trace_kprobe_show(struct seq_file *m, struct dyn_event *ev)
779 {
780         struct trace_kprobe *tk = to_trace_kprobe(ev);
781         int i;
782
783         seq_putc(m, trace_kprobe_is_return(tk) ? 'r' : 'p');
784         seq_printf(m, ":%s/%s", tk->tp.call.class->system,
785                         trace_event_name(&tk->tp.call));
786
787         if (!tk->symbol)
788                 seq_printf(m, " 0x%p", tk->rp.kp.addr);
789         else if (tk->rp.kp.offset)
790                 seq_printf(m, " %s+%u", trace_kprobe_symbol(tk),
791                            tk->rp.kp.offset);
792         else
793                 seq_printf(m, " %s", trace_kprobe_symbol(tk));
794
795         for (i = 0; i < tk->tp.nr_args; i++)
796                 seq_printf(m, " %s=%s", tk->tp.args[i].name, tk->tp.args[i].comm);
797         seq_putc(m, '\n');
798
799         return 0;
800 }
801
802 static int probes_seq_show(struct seq_file *m, void *v)
803 {
804         struct dyn_event *ev = v;
805
806         if (!is_trace_kprobe(ev))
807                 return 0;
808
809         return trace_kprobe_show(m, ev);
810 }
811
812 static const struct seq_operations probes_seq_op = {
813         .start  = dyn_event_seq_start,
814         .next   = dyn_event_seq_next,
815         .stop   = dyn_event_seq_stop,
816         .show   = probes_seq_show
817 };
818
819 static int probes_open(struct inode *inode, struct file *file)
820 {
821         int ret;
822
823         if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
824                 ret = dyn_events_release_all(&trace_kprobe_ops);
825                 if (ret < 0)
826                         return ret;
827         }
828
829         return seq_open(file, &probes_seq_op);
830 }
831
832 static ssize_t probes_write(struct file *file, const char __user *buffer,
833                             size_t count, loff_t *ppos)
834 {
835         return trace_parse_run_command(file, buffer, count, ppos,
836                                        create_or_delete_trace_kprobe);
837 }
838
839 static const struct file_operations kprobe_events_ops = {
840         .owner          = THIS_MODULE,
841         .open           = probes_open,
842         .read           = seq_read,
843         .llseek         = seq_lseek,
844         .release        = seq_release,
845         .write          = probes_write,
846 };
847
848 /* Probes profiling interfaces */
849 static int probes_profile_seq_show(struct seq_file *m, void *v)
850 {
851         struct dyn_event *ev = v;
852         struct trace_kprobe *tk;
853
854         if (!is_trace_kprobe(ev))
855                 return 0;
856
857         tk = to_trace_kprobe(ev);
858         seq_printf(m, "  %-44s %15lu %15lu\n",
859                    trace_event_name(&tk->tp.call),
860                    trace_kprobe_nhit(tk),
861                    tk->rp.kp.nmissed);
862
863         return 0;
864 }
865
866 static const struct seq_operations profile_seq_op = {
867         .start  = dyn_event_seq_start,
868         .next   = dyn_event_seq_next,
869         .stop   = dyn_event_seq_stop,
870         .show   = probes_profile_seq_show
871 };
872
873 static int profile_open(struct inode *inode, struct file *file)
874 {
875         return seq_open(file, &profile_seq_op);
876 }
877
878 static const struct file_operations kprobe_profile_ops = {
879         .owner          = THIS_MODULE,
880         .open           = profile_open,
881         .read           = seq_read,
882         .llseek         = seq_lseek,
883         .release        = seq_release,
884 };
885
886 /* Kprobe specific fetch functions */
887
888 /* Return the length of string -- including null terminal byte */
889 static nokprobe_inline int
890 fetch_store_strlen(unsigned long addr)
891 {
892         int ret, len = 0;
893         u8 c;
894
895         do {
896                 ret = probe_kernel_read(&c, (u8 *)addr + len, 1);
897                 len++;
898         } while (c && ret == 0 && len < MAX_STRING_SIZE);
899
900         return (ret < 0) ? ret : len;
901 }
902
903 /* Return the length of string -- including null terminal byte */
904 static nokprobe_inline int
905 fetch_store_strlen_user(unsigned long addr)
906 {
907         const void __user *uaddr =  (__force const void __user *)addr;
908
909         return strnlen_unsafe_user(uaddr, MAX_STRING_SIZE);
910 }
911
912 /*
913  * Fetch a null-terminated string. Caller MUST set *(u32 *)buf with max
914  * length and relative data location.
915  */
916 static nokprobe_inline int
917 fetch_store_string(unsigned long addr, void *dest, void *base)
918 {
919         int maxlen = get_loc_len(*(u32 *)dest);
920         void *__dest;
921         long ret;
922
923         if (unlikely(!maxlen))
924                 return -ENOMEM;
925
926         __dest = get_loc_data(dest, base);
927
928         /*
929          * Try to get string again, since the string can be changed while
930          * probing.
931          */
932         ret = strncpy_from_unsafe(__dest, (void *)addr, maxlen);
933         if (ret >= 0)
934                 *(u32 *)dest = make_data_loc(ret, __dest - base);
935
936         return ret;
937 }
938
939 /*
940  * Fetch a null-terminated string from user. Caller MUST set *(u32 *)buf
941  * with max length and relative data location.
942  */
943 static nokprobe_inline int
944 fetch_store_string_user(unsigned long addr, void *dest, void *base)
945 {
946         const void __user *uaddr =  (__force const void __user *)addr;
947         int maxlen = get_loc_len(*(u32 *)dest);
948         void *__dest;
949         long ret;
950
951         if (unlikely(!maxlen))
952                 return -ENOMEM;
953
954         __dest = get_loc_data(dest, base);
955
956         ret = strncpy_from_unsafe_user(__dest, uaddr, maxlen);
957         if (ret >= 0)
958                 *(u32 *)dest = make_data_loc(ret, __dest - base);
959
960         return ret;
961 }
962
963 static nokprobe_inline int
964 probe_mem_read(void *dest, void *src, size_t size)
965 {
966         return probe_kernel_read(dest, src, size);
967 }
968
969 static nokprobe_inline int
970 probe_mem_read_user(void *dest, void *src, size_t size)
971 {
972         const void __user *uaddr =  (__force const void __user *)src;
973
974         return probe_user_read(dest, uaddr, size);
975 }
976
977 /* Note that we don't verify it, since the code does not come from user space */
978 static int
979 process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest,
980                    void *base)
981 {
982         unsigned long val;
983
984 retry:
985         /* 1st stage: get value from context */
986         switch (code->op) {
987         case FETCH_OP_REG:
988                 val = regs_get_register(regs, code->param);
989                 break;
990         case FETCH_OP_STACK:
991                 val = regs_get_kernel_stack_nth(regs, code->param);
992                 break;
993         case FETCH_OP_STACKP:
994                 val = kernel_stack_pointer(regs);
995                 break;
996         case FETCH_OP_RETVAL:
997                 val = regs_return_value(regs);
998                 break;
999         case FETCH_OP_IMM:
1000                 val = code->immediate;
1001                 break;
1002         case FETCH_OP_COMM:
1003                 val = (unsigned long)current->comm;
1004                 break;
1005 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
1006         case FETCH_OP_ARG:
1007                 val = regs_get_kernel_argument(regs, code->param);
1008                 break;
1009 #endif
1010         case FETCH_NOP_SYMBOL:  /* Ignore a place holder */
1011                 code++;
1012                 goto retry;
1013         default:
1014                 return -EILSEQ;
1015         }
1016         code++;
1017
1018         return process_fetch_insn_bottom(code, val, dest, base);
1019 }
1020 NOKPROBE_SYMBOL(process_fetch_insn)
1021
1022 /* Kprobe handler */
1023 static nokprobe_inline void
1024 __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
1025                     struct trace_event_file *trace_file)
1026 {
1027         struct kprobe_trace_entry_head *entry;
1028         struct ring_buffer_event *event;
1029         struct ring_buffer *buffer;
1030         int size, dsize, pc;
1031         unsigned long irq_flags;
1032         struct trace_event_call *call = &tk->tp.call;
1033
1034         WARN_ON(call != trace_file->event_call);
1035
1036         if (trace_trigger_soft_disabled(trace_file))
1037                 return;
1038
1039         local_save_flags(irq_flags);
1040         pc = preempt_count();
1041
1042         dsize = __get_data_size(&tk->tp, regs);
1043         size = sizeof(*entry) + tk->tp.size + dsize;
1044
1045         event = trace_event_buffer_lock_reserve(&buffer, trace_file,
1046                                                 call->event.type,
1047                                                 size, irq_flags, pc);
1048         if (!event)
1049                 return;
1050
1051         entry = ring_buffer_event_data(event);
1052         entry->ip = (unsigned long)tk->rp.kp.addr;
1053         store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
1054
1055         event_trigger_unlock_commit_regs(trace_file, buffer, event,
1056                                          entry, irq_flags, pc, regs);
1057 }
1058
1059 static void
1060 kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs)
1061 {
1062         struct event_file_link *link;
1063
1064         list_for_each_entry_rcu(link, &tk->tp.files, list)
1065                 __kprobe_trace_func(tk, regs, link->file);
1066 }
1067 NOKPROBE_SYMBOL(kprobe_trace_func);
1068
1069 /* Kretprobe handler */
1070 static nokprobe_inline void
1071 __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1072                        struct pt_regs *regs,
1073                        struct trace_event_file *trace_file)
1074 {
1075         struct kretprobe_trace_entry_head *entry;
1076         struct ring_buffer_event *event;
1077         struct ring_buffer *buffer;
1078         int size, pc, dsize;
1079         unsigned long irq_flags;
1080         struct trace_event_call *call = &tk->tp.call;
1081
1082         WARN_ON(call != trace_file->event_call);
1083
1084         if (trace_trigger_soft_disabled(trace_file))
1085                 return;
1086
1087         local_save_flags(irq_flags);
1088         pc = preempt_count();
1089
1090         dsize = __get_data_size(&tk->tp, regs);
1091         size = sizeof(*entry) + tk->tp.size + dsize;
1092
1093         event = trace_event_buffer_lock_reserve(&buffer, trace_file,
1094                                                 call->event.type,
1095                                                 size, irq_flags, pc);
1096         if (!event)
1097                 return;
1098
1099         entry = ring_buffer_event_data(event);
1100         entry->func = (unsigned long)tk->rp.kp.addr;
1101         entry->ret_ip = (unsigned long)ri->ret_addr;
1102         store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
1103
1104         event_trigger_unlock_commit_regs(trace_file, buffer, event,
1105                                          entry, irq_flags, pc, regs);
1106 }
1107
1108 static void
1109 kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1110                      struct pt_regs *regs)
1111 {
1112         struct event_file_link *link;
1113
1114         list_for_each_entry_rcu(link, &tk->tp.files, list)
1115                 __kretprobe_trace_func(tk, ri, regs, link->file);
1116 }
1117 NOKPROBE_SYMBOL(kretprobe_trace_func);
1118
1119 /* Event entry printers */
1120 static enum print_line_t
1121 print_kprobe_event(struct trace_iterator *iter, int flags,
1122                    struct trace_event *event)
1123 {
1124         struct kprobe_trace_entry_head *field;
1125         struct trace_seq *s = &iter->seq;
1126         struct trace_probe *tp;
1127
1128         field = (struct kprobe_trace_entry_head *)iter->ent;
1129         tp = container_of(event, struct trace_probe, call.event);
1130
1131         trace_seq_printf(s, "%s: (", trace_event_name(&tp->call));
1132
1133         if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
1134                 goto out;
1135
1136         trace_seq_putc(s, ')');
1137
1138         if (print_probe_args(s, tp->args, tp->nr_args,
1139                              (u8 *)&field[1], field) < 0)
1140                 goto out;
1141
1142         trace_seq_putc(s, '\n');
1143  out:
1144         return trace_handle_return(s);
1145 }
1146
1147 static enum print_line_t
1148 print_kretprobe_event(struct trace_iterator *iter, int flags,
1149                       struct trace_event *event)
1150 {
1151         struct kretprobe_trace_entry_head *field;
1152         struct trace_seq *s = &iter->seq;
1153         struct trace_probe *tp;
1154
1155         field = (struct kretprobe_trace_entry_head *)iter->ent;
1156         tp = container_of(event, struct trace_probe, call.event);
1157
1158         trace_seq_printf(s, "%s: (", trace_event_name(&tp->call));
1159
1160         if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
1161                 goto out;
1162
1163         trace_seq_puts(s, " <- ");
1164
1165         if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
1166                 goto out;
1167
1168         trace_seq_putc(s, ')');
1169
1170         if (print_probe_args(s, tp->args, tp->nr_args,
1171                              (u8 *)&field[1], field) < 0)
1172                 goto out;
1173
1174         trace_seq_putc(s, '\n');
1175
1176  out:
1177         return trace_handle_return(s);
1178 }
1179
1180
1181 static int kprobe_event_define_fields(struct trace_event_call *event_call)
1182 {
1183         int ret;
1184         struct kprobe_trace_entry_head field;
1185         struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data;
1186
1187         DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
1188
1189         return traceprobe_define_arg_fields(event_call, sizeof(field), &tk->tp);
1190 }
1191
1192 static int kretprobe_event_define_fields(struct trace_event_call *event_call)
1193 {
1194         int ret;
1195         struct kretprobe_trace_entry_head field;
1196         struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data;
1197
1198         DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
1199         DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
1200
1201         return traceprobe_define_arg_fields(event_call, sizeof(field), &tk->tp);
1202 }
1203
1204 #ifdef CONFIG_PERF_EVENTS
1205
1206 /* Kprobe profile handler */
1207 static int
1208 kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
1209 {
1210         struct trace_event_call *call = &tk->tp.call;
1211         struct kprobe_trace_entry_head *entry;
1212         struct hlist_head *head;
1213         int size, __size, dsize;
1214         int rctx;
1215
1216         if (bpf_prog_array_valid(call)) {
1217                 unsigned long orig_ip = instruction_pointer(regs);
1218                 int ret;
1219
1220                 ret = trace_call_bpf(call, regs);
1221
1222                 /*
1223                  * We need to check and see if we modified the pc of the
1224                  * pt_regs, and if so return 1 so that we don't do the
1225                  * single stepping.
1226                  */
1227                 if (orig_ip != instruction_pointer(regs))
1228                         return 1;
1229                 if (!ret)
1230                         return 0;
1231         }
1232
1233         head = this_cpu_ptr(call->perf_events);
1234         if (hlist_empty(head))
1235                 return 0;
1236
1237         dsize = __get_data_size(&tk->tp, regs);
1238         __size = sizeof(*entry) + tk->tp.size + dsize;
1239         size = ALIGN(__size + sizeof(u32), sizeof(u64));
1240         size -= sizeof(u32);
1241
1242         entry = perf_trace_buf_alloc(size, NULL, &rctx);
1243         if (!entry)
1244                 return 0;
1245
1246         entry->ip = (unsigned long)tk->rp.kp.addr;
1247         memset(&entry[1], 0, dsize);
1248         store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
1249         perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1250                               head, NULL);
1251         return 0;
1252 }
1253 NOKPROBE_SYMBOL(kprobe_perf_func);
1254
1255 /* Kretprobe profile handler */
1256 static void
1257 kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1258                     struct pt_regs *regs)
1259 {
1260         struct trace_event_call *call = &tk->tp.call;
1261         struct kretprobe_trace_entry_head *entry;
1262         struct hlist_head *head;
1263         int size, __size, dsize;
1264         int rctx;
1265
1266         if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs))
1267                 return;
1268
1269         head = this_cpu_ptr(call->perf_events);
1270         if (hlist_empty(head))
1271                 return;
1272
1273         dsize = __get_data_size(&tk->tp, regs);
1274         __size = sizeof(*entry) + tk->tp.size + dsize;
1275         size = ALIGN(__size + sizeof(u32), sizeof(u64));
1276         size -= sizeof(u32);
1277
1278         entry = perf_trace_buf_alloc(size, NULL, &rctx);
1279         if (!entry)
1280                 return;
1281
1282         entry->func = (unsigned long)tk->rp.kp.addr;
1283         entry->ret_ip = (unsigned long)ri->ret_addr;
1284         store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
1285         perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1286                               head, NULL);
1287 }
1288 NOKPROBE_SYMBOL(kretprobe_perf_func);
1289
1290 int bpf_get_kprobe_info(const struct perf_event *event, u32 *fd_type,
1291                         const char **symbol, u64 *probe_offset,
1292                         u64 *probe_addr, bool perf_type_tracepoint)
1293 {
1294         const char *pevent = trace_event_name(event->tp_event);
1295         const char *group = event->tp_event->class->system;
1296         struct trace_kprobe *tk;
1297
1298         if (perf_type_tracepoint)
1299                 tk = find_trace_kprobe(pevent, group);
1300         else
1301                 tk = event->tp_event->data;
1302         if (!tk)
1303                 return -EINVAL;
1304
1305         *fd_type = trace_kprobe_is_return(tk) ? BPF_FD_TYPE_KRETPROBE
1306                                               : BPF_FD_TYPE_KPROBE;
1307         if (tk->symbol) {
1308                 *symbol = tk->symbol;
1309                 *probe_offset = tk->rp.kp.offset;
1310                 *probe_addr = 0;
1311         } else {
1312                 *symbol = NULL;
1313                 *probe_offset = 0;
1314                 *probe_addr = (unsigned long)tk->rp.kp.addr;
1315         }
1316         return 0;
1317 }
1318 #endif  /* CONFIG_PERF_EVENTS */
1319
1320 /*
1321  * called by perf_trace_init() or __ftrace_set_clr_event() under event_mutex.
1322  *
1323  * kprobe_trace_self_tests_init() does enable_trace_probe/disable_trace_probe
1324  * lockless, but we can't race with this __init function.
1325  */
1326 static int kprobe_register(struct trace_event_call *event,
1327                            enum trace_reg type, void *data)
1328 {
1329         struct trace_kprobe *tk = (struct trace_kprobe *)event->data;
1330         struct trace_event_file *file = data;
1331
1332         switch (type) {
1333         case TRACE_REG_REGISTER:
1334                 return enable_trace_kprobe(tk, file);
1335         case TRACE_REG_UNREGISTER:
1336                 return disable_trace_kprobe(tk, file);
1337
1338 #ifdef CONFIG_PERF_EVENTS
1339         case TRACE_REG_PERF_REGISTER:
1340                 return enable_trace_kprobe(tk, NULL);
1341         case TRACE_REG_PERF_UNREGISTER:
1342                 return disable_trace_kprobe(tk, NULL);
1343         case TRACE_REG_PERF_OPEN:
1344         case TRACE_REG_PERF_CLOSE:
1345         case TRACE_REG_PERF_ADD:
1346         case TRACE_REG_PERF_DEL:
1347                 return 0;
1348 #endif
1349         }
1350         return 0;
1351 }
1352
1353 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
1354 {
1355         struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp);
1356         int ret = 0;
1357
1358         raw_cpu_inc(*tk->nhit);
1359
1360         if (tk->tp.flags & TP_FLAG_TRACE)
1361                 kprobe_trace_func(tk, regs);
1362 #ifdef CONFIG_PERF_EVENTS
1363         if (tk->tp.flags & TP_FLAG_PROFILE)
1364                 ret = kprobe_perf_func(tk, regs);
1365 #endif
1366         return ret;
1367 }
1368 NOKPROBE_SYMBOL(kprobe_dispatcher);
1369
1370 static int
1371 kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
1372 {
1373         struct trace_kprobe *tk = container_of(ri->rp, struct trace_kprobe, rp);
1374
1375         raw_cpu_inc(*tk->nhit);
1376
1377         if (tk->tp.flags & TP_FLAG_TRACE)
1378                 kretprobe_trace_func(tk, ri, regs);
1379 #ifdef CONFIG_PERF_EVENTS
1380         if (tk->tp.flags & TP_FLAG_PROFILE)
1381                 kretprobe_perf_func(tk, ri, regs);
1382 #endif
1383         return 0;       /* We don't tweek kernel, so just return 0 */
1384 }
1385 NOKPROBE_SYMBOL(kretprobe_dispatcher);
1386
1387 static struct trace_event_functions kretprobe_funcs = {
1388         .trace          = print_kretprobe_event
1389 };
1390
1391 static struct trace_event_functions kprobe_funcs = {
1392         .trace          = print_kprobe_event
1393 };
1394
1395 static inline void init_trace_event_call(struct trace_kprobe *tk,
1396                                          struct trace_event_call *call)
1397 {
1398         INIT_LIST_HEAD(&call->class->fields);
1399         if (trace_kprobe_is_return(tk)) {
1400                 call->event.funcs = &kretprobe_funcs;
1401                 call->class->define_fields = kretprobe_event_define_fields;
1402         } else {
1403                 call->event.funcs = &kprobe_funcs;
1404                 call->class->define_fields = kprobe_event_define_fields;
1405         }
1406
1407         call->flags = TRACE_EVENT_FL_KPROBE;
1408         call->class->reg = kprobe_register;
1409         call->data = tk;
1410 }
1411
1412 static int register_kprobe_event(struct trace_kprobe *tk)
1413 {
1414         struct trace_event_call *call = &tk->tp.call;
1415         int ret = 0;
1416
1417         init_trace_event_call(tk, call);
1418
1419         if (traceprobe_set_print_fmt(&tk->tp, trace_kprobe_is_return(tk)) < 0)
1420                 return -ENOMEM;
1421         ret = register_trace_event(&call->event);
1422         if (!ret) {
1423                 kfree(call->print_fmt);
1424                 return -ENODEV;
1425         }
1426         ret = trace_add_event_call(call);
1427         if (ret) {
1428                 pr_info("Failed to register kprobe event: %s\n",
1429                         trace_event_name(call));
1430                 kfree(call->print_fmt);
1431                 unregister_trace_event(&call->event);
1432         }
1433         return ret;
1434 }
1435
1436 static int unregister_kprobe_event(struct trace_kprobe *tk)
1437 {
1438         int ret;
1439
1440         /* tp->event is unregistered in trace_remove_event_call() */
1441         ret = trace_remove_event_call(&tk->tp.call);
1442         if (!ret)
1443                 kfree(tk->tp.call.print_fmt);
1444         return ret;
1445 }
1446
1447 #ifdef CONFIG_PERF_EVENTS
1448 /* create a trace_kprobe, but don't add it to global lists */
1449 struct trace_event_call *
1450 create_local_trace_kprobe(char *func, void *addr, unsigned long offs,
1451                           bool is_return)
1452 {
1453         struct trace_kprobe *tk;
1454         int ret;
1455         char *event;
1456
1457         /*
1458          * local trace_kprobes are not added to dyn_event, so they are never
1459          * searched in find_trace_kprobe(). Therefore, there is no concern of
1460          * duplicated name here.
1461          */
1462         event = func ? func : "DUMMY_EVENT";
1463
1464         tk = alloc_trace_kprobe(KPROBE_EVENT_SYSTEM, event, (void *)addr, func,
1465                                 offs, 0 /* maxactive */, 0 /* nargs */,
1466                                 is_return);
1467
1468         if (IS_ERR(tk)) {
1469                 pr_info("Failed to allocate trace_probe.(%d)\n",
1470                         (int)PTR_ERR(tk));
1471                 return ERR_CAST(tk);
1472         }
1473
1474         init_trace_event_call(tk, &tk->tp.call);
1475
1476         if (traceprobe_set_print_fmt(&tk->tp, trace_kprobe_is_return(tk)) < 0) {
1477                 ret = -ENOMEM;
1478                 goto error;
1479         }
1480
1481         ret = __register_trace_kprobe(tk);
1482         if (ret < 0) {
1483                 kfree(tk->tp.call.print_fmt);
1484                 goto error;
1485         }
1486
1487         return &tk->tp.call;
1488 error:
1489         free_trace_kprobe(tk);
1490         return ERR_PTR(ret);
1491 }
1492
1493 void destroy_local_trace_kprobe(struct trace_event_call *event_call)
1494 {
1495         struct trace_kprobe *tk;
1496
1497         tk = container_of(event_call, struct trace_kprobe, tp.call);
1498
1499         if (trace_probe_is_enabled(&tk->tp)) {
1500                 WARN_ON(1);
1501                 return;
1502         }
1503
1504         __unregister_trace_kprobe(tk);
1505
1506         kfree(tk->tp.call.print_fmt);
1507         free_trace_kprobe(tk);
1508 }
1509 #endif /* CONFIG_PERF_EVENTS */
1510
1511 static __init void enable_boot_kprobe_events(void)
1512 {
1513         struct trace_array *tr = top_trace_array();
1514         struct trace_event_file *file;
1515         struct trace_kprobe *tk;
1516         struct dyn_event *pos;
1517
1518         mutex_lock(&event_mutex);
1519         for_each_trace_kprobe(tk, pos) {
1520                 list_for_each_entry(file, &tr->events, list)
1521                         if (file->event_call == &tk->tp.call)
1522                                 trace_event_enable_disable(file, 1, 0);
1523         }
1524         mutex_unlock(&event_mutex);
1525 }
1526
1527 static __init void setup_boot_kprobe_events(void)
1528 {
1529         char *p, *cmd = kprobe_boot_events_buf;
1530         int ret;
1531
1532         strreplace(kprobe_boot_events_buf, ',', ' ');
1533
1534         while (cmd && *cmd != '\0') {
1535                 p = strchr(cmd, ';');
1536                 if (p)
1537                         *p++ = '\0';
1538
1539                 ret = trace_run_command(cmd, create_or_delete_trace_kprobe);
1540                 if (ret)
1541                         pr_warn("Failed to add event(%d): %s\n", ret, cmd);
1542                 else
1543                         kprobe_boot_events_enabled = true;
1544
1545                 cmd = p;
1546         }
1547
1548         enable_boot_kprobe_events();
1549 }
1550
1551 /* Make a tracefs interface for controlling probe points */
1552 static __init int init_kprobe_trace(void)
1553 {
1554         struct dentry *d_tracer;
1555         struct dentry *entry;
1556         int ret;
1557
1558         ret = dyn_event_register(&trace_kprobe_ops);
1559         if (ret)
1560                 return ret;
1561
1562         if (register_module_notifier(&trace_kprobe_module_nb))
1563                 return -EINVAL;
1564
1565         d_tracer = tracing_init_dentry();
1566         if (IS_ERR(d_tracer))
1567                 return 0;
1568
1569         entry = tracefs_create_file("kprobe_events", 0644, d_tracer,
1570                                     NULL, &kprobe_events_ops);
1571
1572         /* Event list interface */
1573         if (!entry)
1574                 pr_warn("Could not create tracefs 'kprobe_events' entry\n");
1575
1576         /* Profile interface */
1577         entry = tracefs_create_file("kprobe_profile", 0444, d_tracer,
1578                                     NULL, &kprobe_profile_ops);
1579
1580         if (!entry)
1581                 pr_warn("Could not create tracefs 'kprobe_profile' entry\n");
1582
1583         setup_boot_kprobe_events();
1584
1585         return 0;
1586 }
1587 fs_initcall(init_kprobe_trace);
1588
1589
1590 #ifdef CONFIG_FTRACE_STARTUP_TEST
1591 static __init struct trace_event_file *
1592 find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr)
1593 {
1594         struct trace_event_file *file;
1595
1596         list_for_each_entry(file, &tr->events, list)
1597                 if (file->event_call == &tk->tp.call)
1598                         return file;
1599
1600         return NULL;
1601 }
1602
1603 /*
1604  * Nobody but us can call enable_trace_kprobe/disable_trace_kprobe at this
1605  * stage, we can do this lockless.
1606  */
1607 static __init int kprobe_trace_self_tests_init(void)
1608 {
1609         int ret, warn = 0;
1610         int (*target)(int, int, int, int, int, int);
1611         struct trace_kprobe *tk;
1612         struct trace_event_file *file;
1613
1614         if (tracing_is_disabled())
1615                 return -ENODEV;
1616
1617         if (kprobe_boot_events_enabled) {
1618                 pr_info("Skipping kprobe tests due to kprobe_event on cmdline\n");
1619                 return 0;
1620         }
1621
1622         target = kprobe_trace_selftest_target;
1623
1624         pr_info("Testing kprobe tracing: ");
1625
1626         ret = trace_run_command("p:testprobe kprobe_trace_selftest_target $stack $stack0 +0($stack)",
1627                                 create_or_delete_trace_kprobe);
1628         if (WARN_ON_ONCE(ret)) {
1629                 pr_warn("error on probing function entry.\n");
1630                 warn++;
1631         } else {
1632                 /* Enable trace point */
1633                 tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
1634                 if (WARN_ON_ONCE(tk == NULL)) {
1635                         pr_warn("error on getting new probe.\n");
1636                         warn++;
1637                 } else {
1638                         file = find_trace_probe_file(tk, top_trace_array());
1639                         if (WARN_ON_ONCE(file == NULL)) {
1640                                 pr_warn("error on getting probe file.\n");
1641                                 warn++;
1642                         } else
1643                                 enable_trace_kprobe(tk, file);
1644                 }
1645         }
1646
1647         ret = trace_run_command("r:testprobe2 kprobe_trace_selftest_target $retval",
1648                                 create_or_delete_trace_kprobe);
1649         if (WARN_ON_ONCE(ret)) {
1650                 pr_warn("error on probing function return.\n");
1651                 warn++;
1652         } else {
1653                 /* Enable trace point */
1654                 tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
1655                 if (WARN_ON_ONCE(tk == NULL)) {
1656                         pr_warn("error on getting 2nd new probe.\n");
1657                         warn++;
1658                 } else {
1659                         file = find_trace_probe_file(tk, top_trace_array());
1660                         if (WARN_ON_ONCE(file == NULL)) {
1661                                 pr_warn("error on getting probe file.\n");
1662                                 warn++;
1663                         } else
1664                                 enable_trace_kprobe(tk, file);
1665                 }
1666         }
1667
1668         if (warn)
1669                 goto end;
1670
1671         ret = target(1, 2, 3, 4, 5, 6);
1672
1673         /*
1674          * Not expecting an error here, the check is only to prevent the
1675          * optimizer from removing the call to target() as otherwise there
1676          * are no side-effects and the call is never performed.
1677          */
1678         if (ret != 21)
1679                 warn++;
1680
1681         /* Disable trace points before removing it */
1682         tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
1683         if (WARN_ON_ONCE(tk == NULL)) {
1684                 pr_warn("error on getting test probe.\n");
1685                 warn++;
1686         } else {
1687                 if (trace_kprobe_nhit(tk) != 1) {
1688                         pr_warn("incorrect number of testprobe hits\n");
1689                         warn++;
1690                 }
1691
1692                 file = find_trace_probe_file(tk, top_trace_array());
1693                 if (WARN_ON_ONCE(file == NULL)) {
1694                         pr_warn("error on getting probe file.\n");
1695                         warn++;
1696                 } else
1697                         disable_trace_kprobe(tk, file);
1698         }
1699
1700         tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
1701         if (WARN_ON_ONCE(tk == NULL)) {
1702                 pr_warn("error on getting 2nd test probe.\n");
1703                 warn++;
1704         } else {
1705                 if (trace_kprobe_nhit(tk) != 1) {
1706                         pr_warn("incorrect number of testprobe2 hits\n");
1707                         warn++;
1708                 }
1709
1710                 file = find_trace_probe_file(tk, top_trace_array());
1711                 if (WARN_ON_ONCE(file == NULL)) {
1712                         pr_warn("error on getting probe file.\n");
1713                         warn++;
1714                 } else
1715                         disable_trace_kprobe(tk, file);
1716         }
1717
1718         ret = trace_run_command("-:testprobe", create_or_delete_trace_kprobe);
1719         if (WARN_ON_ONCE(ret)) {
1720                 pr_warn("error on deleting a probe.\n");
1721                 warn++;
1722         }
1723
1724         ret = trace_run_command("-:testprobe2", create_or_delete_trace_kprobe);
1725         if (WARN_ON_ONCE(ret)) {
1726                 pr_warn("error on deleting a probe.\n");
1727                 warn++;
1728         }
1729
1730 end:
1731         ret = dyn_events_release_all(&trace_kprobe_ops);
1732         if (WARN_ON_ONCE(ret)) {
1733                 pr_warn("error on cleaning up probes.\n");
1734                 warn++;
1735         }
1736         /*
1737          * Wait for the optimizer work to finish. Otherwise it might fiddle
1738          * with probes in already freed __init text.
1739          */
1740         wait_for_kprobe_optimizer();
1741         if (warn)
1742                 pr_cont("NG: Some tests are failed. Please check them.\n");
1743         else
1744                 pr_cont("OK\n");
1745         return 0;
1746 }
1747
1748 late_initcall(kprobe_trace_self_tests_init);
1749
1750 #endif