]> asedeno.scripts.mit.edu Git - linux.git/blob - kernel/trace/trace_kprobe.c
Merge branch 'for-5.4' of git://git.kernel.org/pub/scm/linux/kernel/git/dennis/percpu
[linux.git] / kernel / trace / trace_kprobe.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Kprobes-based tracing events
4  *
5  * Created by Masami Hiramatsu <mhiramat@redhat.com>
6  *
7  */
8 #define pr_fmt(fmt)     "trace_kprobe: " fmt
9
10 #include <linux/module.h>
11 #include <linux/uaccess.h>
12 #include <linux/rculist.h>
13 #include <linux/error-injection.h>
14
15 #include <asm/setup.h>  /* for COMMAND_LINE_SIZE */
16
17 #include "trace_dynevent.h"
18 #include "trace_kprobe_selftest.h"
19 #include "trace_probe.h"
20 #include "trace_probe_tmpl.h"
21
22 #define KPROBE_EVENT_SYSTEM "kprobes"
23 #define KRETPROBE_MAXACTIVE_MAX 4096
24 #define MAX_KPROBE_CMDLINE_SIZE 1024
25
26 /* Kprobe early definition from command line */
27 static char kprobe_boot_events_buf[COMMAND_LINE_SIZE] __initdata;
28 static bool kprobe_boot_events_enabled __initdata;
29
30 static int __init set_kprobe_boot_events(char *str)
31 {
32         strlcpy(kprobe_boot_events_buf, str, COMMAND_LINE_SIZE);
33         return 0;
34 }
35 __setup("kprobe_event=", set_kprobe_boot_events);
36
37 static int trace_kprobe_create(int argc, const char **argv);
38 static int trace_kprobe_show(struct seq_file *m, struct dyn_event *ev);
39 static int trace_kprobe_release(struct dyn_event *ev);
40 static bool trace_kprobe_is_busy(struct dyn_event *ev);
41 static bool trace_kprobe_match(const char *system, const char *event,
42                                struct dyn_event *ev);
43
44 static struct dyn_event_operations trace_kprobe_ops = {
45         .create = trace_kprobe_create,
46         .show = trace_kprobe_show,
47         .is_busy = trace_kprobe_is_busy,
48         .free = trace_kprobe_release,
49         .match = trace_kprobe_match,
50 };
51
52 /*
53  * Kprobe event core functions
54  */
55 struct trace_kprobe {
56         struct dyn_event        devent;
57         struct kretprobe        rp;     /* Use rp.kp for kprobe use */
58         unsigned long __percpu *nhit;
59         const char              *symbol;        /* symbol name */
60         struct trace_probe      tp;
61 };
62
63 static bool is_trace_kprobe(struct dyn_event *ev)
64 {
65         return ev->ops == &trace_kprobe_ops;
66 }
67
68 static struct trace_kprobe *to_trace_kprobe(struct dyn_event *ev)
69 {
70         return container_of(ev, struct trace_kprobe, devent);
71 }
72
73 /**
74  * for_each_trace_kprobe - iterate over the trace_kprobe list
75  * @pos:        the struct trace_kprobe * for each entry
76  * @dpos:       the struct dyn_event * to use as a loop cursor
77  */
78 #define for_each_trace_kprobe(pos, dpos)        \
79         for_each_dyn_event(dpos)                \
80                 if (is_trace_kprobe(dpos) && (pos = to_trace_kprobe(dpos)))
81
82 #define SIZEOF_TRACE_KPROBE(n)                          \
83         (offsetof(struct trace_kprobe, tp.args) +       \
84         (sizeof(struct probe_arg) * (n)))
85
86 static nokprobe_inline bool trace_kprobe_is_return(struct trace_kprobe *tk)
87 {
88         return tk->rp.handler != NULL;
89 }
90
91 static nokprobe_inline const char *trace_kprobe_symbol(struct trace_kprobe *tk)
92 {
93         return tk->symbol ? tk->symbol : "unknown";
94 }
95
96 static nokprobe_inline unsigned long trace_kprobe_offset(struct trace_kprobe *tk)
97 {
98         return tk->rp.kp.offset;
99 }
100
101 static nokprobe_inline bool trace_kprobe_has_gone(struct trace_kprobe *tk)
102 {
103         return !!(kprobe_gone(&tk->rp.kp));
104 }
105
106 static nokprobe_inline bool trace_kprobe_within_module(struct trace_kprobe *tk,
107                                                  struct module *mod)
108 {
109         int len = strlen(mod->name);
110         const char *name = trace_kprobe_symbol(tk);
111         return strncmp(mod->name, name, len) == 0 && name[len] == ':';
112 }
113
114 static nokprobe_inline bool trace_kprobe_module_exist(struct trace_kprobe *tk)
115 {
116         char *p;
117         bool ret;
118
119         if (!tk->symbol)
120                 return false;
121         p = strchr(tk->symbol, ':');
122         if (!p)
123                 return true;
124         *p = '\0';
125         mutex_lock(&module_mutex);
126         ret = !!find_module(tk->symbol);
127         mutex_unlock(&module_mutex);
128         *p = ':';
129
130         return ret;
131 }
132
133 static bool trace_kprobe_is_busy(struct dyn_event *ev)
134 {
135         struct trace_kprobe *tk = to_trace_kprobe(ev);
136
137         return trace_probe_is_enabled(&tk->tp);
138 }
139
140 static bool trace_kprobe_match(const char *system, const char *event,
141                                struct dyn_event *ev)
142 {
143         struct trace_kprobe *tk = to_trace_kprobe(ev);
144
145         return strcmp(trace_probe_name(&tk->tp), event) == 0 &&
146             (!system || strcmp(trace_probe_group_name(&tk->tp), system) == 0);
147 }
148
149 static nokprobe_inline unsigned long trace_kprobe_nhit(struct trace_kprobe *tk)
150 {
151         unsigned long nhit = 0;
152         int cpu;
153
154         for_each_possible_cpu(cpu)
155                 nhit += *per_cpu_ptr(tk->nhit, cpu);
156
157         return nhit;
158 }
159
160 static nokprobe_inline bool trace_kprobe_is_registered(struct trace_kprobe *tk)
161 {
162         return !(list_empty(&tk->rp.kp.list) &&
163                  hlist_unhashed(&tk->rp.kp.hlist));
164 }
165
166 /* Return 0 if it fails to find the symbol address */
167 static nokprobe_inline
168 unsigned long trace_kprobe_address(struct trace_kprobe *tk)
169 {
170         unsigned long addr;
171
172         if (tk->symbol) {
173                 addr = (unsigned long)
174                         kallsyms_lookup_name(trace_kprobe_symbol(tk));
175                 if (addr)
176                         addr += tk->rp.kp.offset;
177         } else {
178                 addr = (unsigned long)tk->rp.kp.addr;
179         }
180         return addr;
181 }
182
183 bool trace_kprobe_on_func_entry(struct trace_event_call *call)
184 {
185         struct trace_kprobe *tk = (struct trace_kprobe *)call->data;
186
187         return kprobe_on_func_entry(tk->rp.kp.addr,
188                         tk->rp.kp.addr ? NULL : tk->rp.kp.symbol_name,
189                         tk->rp.kp.addr ? 0 : tk->rp.kp.offset);
190 }
191
192 bool trace_kprobe_error_injectable(struct trace_event_call *call)
193 {
194         struct trace_kprobe *tk = (struct trace_kprobe *)call->data;
195
196         return within_error_injection_list(trace_kprobe_address(tk));
197 }
198
199 static int register_kprobe_event(struct trace_kprobe *tk);
200 static int unregister_kprobe_event(struct trace_kprobe *tk);
201
202 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
203 static int kretprobe_dispatcher(struct kretprobe_instance *ri,
204                                 struct pt_regs *regs);
205
206 static void free_trace_kprobe(struct trace_kprobe *tk)
207 {
208         if (tk) {
209                 trace_probe_cleanup(&tk->tp);
210                 kfree(tk->symbol);
211                 free_percpu(tk->nhit);
212                 kfree(tk);
213         }
214 }
215
216 /*
217  * Allocate new trace_probe and initialize it (including kprobes).
218  */
219 static struct trace_kprobe *alloc_trace_kprobe(const char *group,
220                                              const char *event,
221                                              void *addr,
222                                              const char *symbol,
223                                              unsigned long offs,
224                                              int maxactive,
225                                              int nargs, bool is_return)
226 {
227         struct trace_kprobe *tk;
228         int ret = -ENOMEM;
229
230         tk = kzalloc(SIZEOF_TRACE_KPROBE(nargs), GFP_KERNEL);
231         if (!tk)
232                 return ERR_PTR(ret);
233
234         tk->nhit = alloc_percpu(unsigned long);
235         if (!tk->nhit)
236                 goto error;
237
238         if (symbol) {
239                 tk->symbol = kstrdup(symbol, GFP_KERNEL);
240                 if (!tk->symbol)
241                         goto error;
242                 tk->rp.kp.symbol_name = tk->symbol;
243                 tk->rp.kp.offset = offs;
244         } else
245                 tk->rp.kp.addr = addr;
246
247         if (is_return)
248                 tk->rp.handler = kretprobe_dispatcher;
249         else
250                 tk->rp.kp.pre_handler = kprobe_dispatcher;
251
252         tk->rp.maxactive = maxactive;
253         INIT_HLIST_NODE(&tk->rp.kp.hlist);
254         INIT_LIST_HEAD(&tk->rp.kp.list);
255
256         ret = trace_probe_init(&tk->tp, event, group);
257         if (ret < 0)
258                 goto error;
259
260         dyn_event_init(&tk->devent, &trace_kprobe_ops);
261         return tk;
262 error:
263         free_trace_kprobe(tk);
264         return ERR_PTR(ret);
265 }
266
267 static struct trace_kprobe *find_trace_kprobe(const char *event,
268                                               const char *group)
269 {
270         struct dyn_event *pos;
271         struct trace_kprobe *tk;
272
273         for_each_trace_kprobe(tk, pos)
274                 if (strcmp(trace_probe_name(&tk->tp), event) == 0 &&
275                     strcmp(trace_probe_group_name(&tk->tp), group) == 0)
276                         return tk;
277         return NULL;
278 }
279
280 static inline int __enable_trace_kprobe(struct trace_kprobe *tk)
281 {
282         int ret = 0;
283
284         if (trace_kprobe_is_registered(tk) && !trace_kprobe_has_gone(tk)) {
285                 if (trace_kprobe_is_return(tk))
286                         ret = enable_kretprobe(&tk->rp);
287                 else
288                         ret = enable_kprobe(&tk->rp.kp);
289         }
290
291         return ret;
292 }
293
294 /*
295  * Enable trace_probe
296  * if the file is NULL, enable "perf" handler, or enable "trace" handler.
297  */
298 static int
299 enable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
300 {
301         bool enabled = trace_probe_is_enabled(&tk->tp);
302         int ret = 0;
303
304         if (file) {
305                 ret = trace_probe_add_file(&tk->tp, file);
306                 if (ret)
307                         return ret;
308         } else
309                 trace_probe_set_flag(&tk->tp, TP_FLAG_PROFILE);
310
311         if (enabled)
312                 return 0;
313
314         ret = __enable_trace_kprobe(tk);
315         if (ret) {
316                 if (file)
317                         trace_probe_remove_file(&tk->tp, file);
318                 else
319                         trace_probe_clear_flag(&tk->tp, TP_FLAG_PROFILE);
320         }
321
322         return ret;
323 }
324
325 /*
326  * Disable trace_probe
327  * if the file is NULL, disable "perf" handler, or disable "trace" handler.
328  */
329 static int
330 disable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
331 {
332         struct trace_probe *tp = &tk->tp;
333         int ret = 0;
334
335         if (file) {
336                 if (!trace_probe_get_file_link(tp, file))
337                         return -ENOENT;
338                 if (!trace_probe_has_single_file(tp))
339                         goto out;
340                 trace_probe_clear_flag(tp, TP_FLAG_TRACE);
341         } else
342                 trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
343
344         if (!trace_probe_is_enabled(tp) && trace_kprobe_is_registered(tk)) {
345                 if (trace_kprobe_is_return(tk))
346                         disable_kretprobe(&tk->rp);
347                 else
348                         disable_kprobe(&tk->rp.kp);
349         }
350
351  out:
352         if (file)
353                 /*
354                  * Synchronization is done in below function. For perf event,
355                  * file == NULL and perf_trace_event_unreg() calls
356                  * tracepoint_synchronize_unregister() to ensure synchronize
357                  * event. We don't need to care about it.
358                  */
359                 trace_probe_remove_file(tp, file);
360
361         return ret;
362 }
363
364 #if defined(CONFIG_KPROBES_ON_FTRACE) && \
365         !defined(CONFIG_KPROBE_EVENTS_ON_NOTRACE)
366 static bool within_notrace_func(struct trace_kprobe *tk)
367 {
368         unsigned long offset, size, addr;
369
370         addr = trace_kprobe_address(tk);
371         if (!addr || !kallsyms_lookup_size_offset(addr, &size, &offset))
372                 return false;
373
374         /* Get the entry address of the target function */
375         addr -= offset;
376
377         /*
378          * Since ftrace_location_range() does inclusive range check, we need
379          * to subtract 1 byte from the end address.
380          */
381         return !ftrace_location_range(addr, addr + size - 1);
382 }
383 #else
384 #define within_notrace_func(tk) (false)
385 #endif
386
387 /* Internal register function - just handle k*probes and flags */
388 static int __register_trace_kprobe(struct trace_kprobe *tk)
389 {
390         int i, ret;
391
392         if (trace_kprobe_is_registered(tk))
393                 return -EINVAL;
394
395         if (within_notrace_func(tk)) {
396                 pr_warn("Could not probe notrace function %s\n",
397                         trace_kprobe_symbol(tk));
398                 return -EINVAL;
399         }
400
401         for (i = 0; i < tk->tp.nr_args; i++) {
402                 ret = traceprobe_update_arg(&tk->tp.args[i]);
403                 if (ret)
404                         return ret;
405         }
406
407         /* Set/clear disabled flag according to tp->flag */
408         if (trace_probe_is_enabled(&tk->tp))
409                 tk->rp.kp.flags &= ~KPROBE_FLAG_DISABLED;
410         else
411                 tk->rp.kp.flags |= KPROBE_FLAG_DISABLED;
412
413         if (trace_kprobe_is_return(tk))
414                 ret = register_kretprobe(&tk->rp);
415         else
416                 ret = register_kprobe(&tk->rp.kp);
417
418         return ret;
419 }
420
421 /* Internal unregister function - just handle k*probes and flags */
422 static void __unregister_trace_kprobe(struct trace_kprobe *tk)
423 {
424         if (trace_kprobe_is_registered(tk)) {
425                 if (trace_kprobe_is_return(tk))
426                         unregister_kretprobe(&tk->rp);
427                 else
428                         unregister_kprobe(&tk->rp.kp);
429                 /* Cleanup kprobe for reuse and mark it unregistered */
430                 INIT_HLIST_NODE(&tk->rp.kp.hlist);
431                 INIT_LIST_HEAD(&tk->rp.kp.list);
432                 if (tk->rp.kp.symbol_name)
433                         tk->rp.kp.addr = NULL;
434         }
435 }
436
437 /* Unregister a trace_probe and probe_event */
438 static int unregister_trace_kprobe(struct trace_kprobe *tk)
439 {
440         /* Enabled event can not be unregistered */
441         if (trace_probe_is_enabled(&tk->tp))
442                 return -EBUSY;
443
444         /* Will fail if probe is being used by ftrace or perf */
445         if (unregister_kprobe_event(tk))
446                 return -EBUSY;
447
448         __unregister_trace_kprobe(tk);
449         dyn_event_remove(&tk->devent);
450
451         return 0;
452 }
453
454 /* Register a trace_probe and probe_event */
455 static int register_trace_kprobe(struct trace_kprobe *tk)
456 {
457         struct trace_kprobe *old_tk;
458         int ret;
459
460         mutex_lock(&event_mutex);
461
462         /* Delete old (same name) event if exist */
463         old_tk = find_trace_kprobe(trace_probe_name(&tk->tp),
464                                    trace_probe_group_name(&tk->tp));
465         if (old_tk) {
466                 ret = unregister_trace_kprobe(old_tk);
467                 if (ret < 0)
468                         goto end;
469                 free_trace_kprobe(old_tk);
470         }
471
472         /* Register new event */
473         ret = register_kprobe_event(tk);
474         if (ret) {
475                 pr_warn("Failed to register probe event(%d)\n", ret);
476                 goto end;
477         }
478
479         /* Register k*probe */
480         ret = __register_trace_kprobe(tk);
481         if (ret == -ENOENT && !trace_kprobe_module_exist(tk)) {
482                 pr_warn("This probe might be able to register after target module is loaded. Continue.\n");
483                 ret = 0;
484         }
485
486         if (ret < 0)
487                 unregister_kprobe_event(tk);
488         else
489                 dyn_event_add(&tk->devent);
490
491 end:
492         mutex_unlock(&event_mutex);
493         return ret;
494 }
495
496 /* Module notifier call back, checking event on the module */
497 static int trace_kprobe_module_callback(struct notifier_block *nb,
498                                        unsigned long val, void *data)
499 {
500         struct module *mod = data;
501         struct dyn_event *pos;
502         struct trace_kprobe *tk;
503         int ret;
504
505         if (val != MODULE_STATE_COMING)
506                 return NOTIFY_DONE;
507
508         /* Update probes on coming module */
509         mutex_lock(&event_mutex);
510         for_each_trace_kprobe(tk, pos) {
511                 if (trace_kprobe_within_module(tk, mod)) {
512                         /* Don't need to check busy - this should have gone. */
513                         __unregister_trace_kprobe(tk);
514                         ret = __register_trace_kprobe(tk);
515                         if (ret)
516                                 pr_warn("Failed to re-register probe %s on %s: %d\n",
517                                         trace_probe_name(&tk->tp),
518                                         mod->name, ret);
519                 }
520         }
521         mutex_unlock(&event_mutex);
522
523         return NOTIFY_DONE;
524 }
525
526 static struct notifier_block trace_kprobe_module_nb = {
527         .notifier_call = trace_kprobe_module_callback,
528         .priority = 1   /* Invoked after kprobe module callback */
529 };
530
531 /* Convert certain expected symbols into '_' when generating event names */
532 static inline void sanitize_event_name(char *name)
533 {
534         while (*name++ != '\0')
535                 if (*name == ':' || *name == '.')
536                         *name = '_';
537 }
538
539 static int trace_kprobe_create(int argc, const char *argv[])
540 {
541         /*
542          * Argument syntax:
543          *  - Add kprobe:
544          *      p[:[GRP/]EVENT] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS]
545          *  - Add kretprobe:
546          *      r[MAXACTIVE][:[GRP/]EVENT] [MOD:]KSYM[+0] [FETCHARGS]
547          * Fetch args:
548          *  $retval     : fetch return value
549          *  $stack      : fetch stack address
550          *  $stackN     : fetch Nth of stack (N:0-)
551          *  $comm       : fetch current task comm
552          *  @ADDR       : fetch memory at ADDR (ADDR should be in kernel)
553          *  @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
554          *  %REG        : fetch register REG
555          * Dereferencing memory fetch:
556          *  +|-offs(ARG) : fetch memory at ARG +|- offs address.
557          * Alias name of args:
558          *  NAME=FETCHARG : set NAME as alias of FETCHARG.
559          * Type of args:
560          *  FETCHARG:TYPE : use TYPE instead of unsigned long.
561          */
562         struct trace_kprobe *tk = NULL;
563         int i, len, ret = 0;
564         bool is_return = false;
565         char *symbol = NULL, *tmp = NULL;
566         const char *event = NULL, *group = KPROBE_EVENT_SYSTEM;
567         int maxactive = 0;
568         long offset = 0;
569         void *addr = NULL;
570         char buf[MAX_EVENT_NAME_LEN];
571         unsigned int flags = TPARG_FL_KERNEL;
572
573         switch (argv[0][0]) {
574         case 'r':
575                 is_return = true;
576                 flags |= TPARG_FL_RETURN;
577                 break;
578         case 'p':
579                 break;
580         default:
581                 return -ECANCELED;
582         }
583         if (argc < 2)
584                 return -ECANCELED;
585
586         trace_probe_log_init("trace_kprobe", argc, argv);
587
588         event = strchr(&argv[0][1], ':');
589         if (event)
590                 event++;
591
592         if (isdigit(argv[0][1])) {
593                 if (!is_return) {
594                         trace_probe_log_err(1, MAXACT_NO_KPROBE);
595                         goto parse_error;
596                 }
597                 if (event)
598                         len = event - &argv[0][1] - 1;
599                 else
600                         len = strlen(&argv[0][1]);
601                 if (len > MAX_EVENT_NAME_LEN - 1) {
602                         trace_probe_log_err(1, BAD_MAXACT);
603                         goto parse_error;
604                 }
605                 memcpy(buf, &argv[0][1], len);
606                 buf[len] = '\0';
607                 ret = kstrtouint(buf, 0, &maxactive);
608                 if (ret || !maxactive) {
609                         trace_probe_log_err(1, BAD_MAXACT);
610                         goto parse_error;
611                 }
612                 /* kretprobes instances are iterated over via a list. The
613                  * maximum should stay reasonable.
614                  */
615                 if (maxactive > KRETPROBE_MAXACTIVE_MAX) {
616                         trace_probe_log_err(1, MAXACT_TOO_BIG);
617                         goto parse_error;
618                 }
619         }
620
621         /* try to parse an address. if that fails, try to read the
622          * input as a symbol. */
623         if (kstrtoul(argv[1], 0, (unsigned long *)&addr)) {
624                 trace_probe_log_set_index(1);
625                 /* Check whether uprobe event specified */
626                 if (strchr(argv[1], '/') && strchr(argv[1], ':')) {
627                         ret = -ECANCELED;
628                         goto error;
629                 }
630                 /* a symbol specified */
631                 symbol = kstrdup(argv[1], GFP_KERNEL);
632                 if (!symbol)
633                         return -ENOMEM;
634                 /* TODO: support .init module functions */
635                 ret = traceprobe_split_symbol_offset(symbol, &offset);
636                 if (ret || offset < 0 || offset > UINT_MAX) {
637                         trace_probe_log_err(0, BAD_PROBE_ADDR);
638                         goto parse_error;
639                 }
640                 if (kprobe_on_func_entry(NULL, symbol, offset))
641                         flags |= TPARG_FL_FENTRY;
642                 if (offset && is_return && !(flags & TPARG_FL_FENTRY)) {
643                         trace_probe_log_err(0, BAD_RETPROBE);
644                         goto parse_error;
645                 }
646         }
647
648         trace_probe_log_set_index(0);
649         if (event) {
650                 ret = traceprobe_parse_event_name(&event, &group, buf,
651                                                   event - argv[0]);
652                 if (ret)
653                         goto parse_error;
654         } else {
655                 /* Make a new event name */
656                 if (symbol)
657                         snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld",
658                                  is_return ? 'r' : 'p', symbol, offset);
659                 else
660                         snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p",
661                                  is_return ? 'r' : 'p', addr);
662                 sanitize_event_name(buf);
663                 event = buf;
664         }
665
666         /* setup a probe */
667         tk = alloc_trace_kprobe(group, event, addr, symbol, offset, maxactive,
668                                argc - 2, is_return);
669         if (IS_ERR(tk)) {
670                 ret = PTR_ERR(tk);
671                 /* This must return -ENOMEM, else there is a bug */
672                 WARN_ON_ONCE(ret != -ENOMEM);
673                 goto out;       /* We know tk is not allocated */
674         }
675         argc -= 2; argv += 2;
676
677         /* parse arguments */
678         for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
679                 tmp = kstrdup(argv[i], GFP_KERNEL);
680                 if (!tmp) {
681                         ret = -ENOMEM;
682                         goto error;
683                 }
684
685                 trace_probe_log_set_index(i + 2);
686                 ret = traceprobe_parse_probe_arg(&tk->tp, i, tmp, flags);
687                 kfree(tmp);
688                 if (ret)
689                         goto error;     /* This can be -ENOMEM */
690         }
691
692         ret = traceprobe_set_print_fmt(&tk->tp, is_return);
693         if (ret < 0)
694                 goto error;
695
696         ret = register_trace_kprobe(tk);
697         if (ret) {
698                 trace_probe_log_set_index(1);
699                 if (ret == -EILSEQ)
700                         trace_probe_log_err(0, BAD_INSN_BNDRY);
701                 else if (ret == -ENOENT)
702                         trace_probe_log_err(0, BAD_PROBE_ADDR);
703                 else if (ret != -ENOMEM)
704                         trace_probe_log_err(0, FAIL_REG_PROBE);
705                 goto error;
706         }
707
708 out:
709         trace_probe_log_clear();
710         kfree(symbol);
711         return ret;
712
713 parse_error:
714         ret = -EINVAL;
715 error:
716         free_trace_kprobe(tk);
717         goto out;
718 }
719
720 static int create_or_delete_trace_kprobe(int argc, char **argv)
721 {
722         int ret;
723
724         if (argv[0][0] == '-')
725                 return dyn_event_release(argc, argv, &trace_kprobe_ops);
726
727         ret = trace_kprobe_create(argc, (const char **)argv);
728         return ret == -ECANCELED ? -EINVAL : ret;
729 }
730
731 static int trace_kprobe_release(struct dyn_event *ev)
732 {
733         struct trace_kprobe *tk = to_trace_kprobe(ev);
734         int ret = unregister_trace_kprobe(tk);
735
736         if (!ret)
737                 free_trace_kprobe(tk);
738         return ret;
739 }
740
741 static int trace_kprobe_show(struct seq_file *m, struct dyn_event *ev)
742 {
743         struct trace_kprobe *tk = to_trace_kprobe(ev);
744         int i;
745
746         seq_putc(m, trace_kprobe_is_return(tk) ? 'r' : 'p');
747         seq_printf(m, ":%s/%s", trace_probe_group_name(&tk->tp),
748                                 trace_probe_name(&tk->tp));
749
750         if (!tk->symbol)
751                 seq_printf(m, " 0x%p", tk->rp.kp.addr);
752         else if (tk->rp.kp.offset)
753                 seq_printf(m, " %s+%u", trace_kprobe_symbol(tk),
754                            tk->rp.kp.offset);
755         else
756                 seq_printf(m, " %s", trace_kprobe_symbol(tk));
757
758         for (i = 0; i < tk->tp.nr_args; i++)
759                 seq_printf(m, " %s=%s", tk->tp.args[i].name, tk->tp.args[i].comm);
760         seq_putc(m, '\n');
761
762         return 0;
763 }
764
765 static int probes_seq_show(struct seq_file *m, void *v)
766 {
767         struct dyn_event *ev = v;
768
769         if (!is_trace_kprobe(ev))
770                 return 0;
771
772         return trace_kprobe_show(m, ev);
773 }
774
775 static const struct seq_operations probes_seq_op = {
776         .start  = dyn_event_seq_start,
777         .next   = dyn_event_seq_next,
778         .stop   = dyn_event_seq_stop,
779         .show   = probes_seq_show
780 };
781
782 static int probes_open(struct inode *inode, struct file *file)
783 {
784         int ret;
785
786         if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
787                 ret = dyn_events_release_all(&trace_kprobe_ops);
788                 if (ret < 0)
789                         return ret;
790         }
791
792         return seq_open(file, &probes_seq_op);
793 }
794
795 static ssize_t probes_write(struct file *file, const char __user *buffer,
796                             size_t count, loff_t *ppos)
797 {
798         return trace_parse_run_command(file, buffer, count, ppos,
799                                        create_or_delete_trace_kprobe);
800 }
801
802 static const struct file_operations kprobe_events_ops = {
803         .owner          = THIS_MODULE,
804         .open           = probes_open,
805         .read           = seq_read,
806         .llseek         = seq_lseek,
807         .release        = seq_release,
808         .write          = probes_write,
809 };
810
811 /* Probes profiling interfaces */
812 static int probes_profile_seq_show(struct seq_file *m, void *v)
813 {
814         struct dyn_event *ev = v;
815         struct trace_kprobe *tk;
816
817         if (!is_trace_kprobe(ev))
818                 return 0;
819
820         tk = to_trace_kprobe(ev);
821         seq_printf(m, "  %-44s %15lu %15lu\n",
822                    trace_probe_name(&tk->tp),
823                    trace_kprobe_nhit(tk),
824                    tk->rp.kp.nmissed);
825
826         return 0;
827 }
828
829 static const struct seq_operations profile_seq_op = {
830         .start  = dyn_event_seq_start,
831         .next   = dyn_event_seq_next,
832         .stop   = dyn_event_seq_stop,
833         .show   = probes_profile_seq_show
834 };
835
836 static int profile_open(struct inode *inode, struct file *file)
837 {
838         return seq_open(file, &profile_seq_op);
839 }
840
841 static const struct file_operations kprobe_profile_ops = {
842         .owner          = THIS_MODULE,
843         .open           = profile_open,
844         .read           = seq_read,
845         .llseek         = seq_lseek,
846         .release        = seq_release,
847 };
848
849 /* Kprobe specific fetch functions */
850
851 /* Return the length of string -- including null terminal byte */
852 static nokprobe_inline int
853 fetch_store_strlen(unsigned long addr)
854 {
855         int ret, len = 0;
856         u8 c;
857
858         do {
859                 ret = probe_kernel_read(&c, (u8 *)addr + len, 1);
860                 len++;
861         } while (c && ret == 0 && len < MAX_STRING_SIZE);
862
863         return (ret < 0) ? ret : len;
864 }
865
866 /* Return the length of string -- including null terminal byte */
867 static nokprobe_inline int
868 fetch_store_strlen_user(unsigned long addr)
869 {
870         const void __user *uaddr =  (__force const void __user *)addr;
871
872         return strnlen_unsafe_user(uaddr, MAX_STRING_SIZE);
873 }
874
875 /*
876  * Fetch a null-terminated string. Caller MUST set *(u32 *)buf with max
877  * length and relative data location.
878  */
879 static nokprobe_inline int
880 fetch_store_string(unsigned long addr, void *dest, void *base)
881 {
882         int maxlen = get_loc_len(*(u32 *)dest);
883         void *__dest;
884         long ret;
885
886         if (unlikely(!maxlen))
887                 return -ENOMEM;
888
889         __dest = get_loc_data(dest, base);
890
891         /*
892          * Try to get string again, since the string can be changed while
893          * probing.
894          */
895         ret = strncpy_from_unsafe(__dest, (void *)addr, maxlen);
896         if (ret >= 0)
897                 *(u32 *)dest = make_data_loc(ret, __dest - base);
898
899         return ret;
900 }
901
902 /*
903  * Fetch a null-terminated string from user. Caller MUST set *(u32 *)buf
904  * with max length and relative data location.
905  */
906 static nokprobe_inline int
907 fetch_store_string_user(unsigned long addr, void *dest, void *base)
908 {
909         const void __user *uaddr =  (__force const void __user *)addr;
910         int maxlen = get_loc_len(*(u32 *)dest);
911         void *__dest;
912         long ret;
913
914         if (unlikely(!maxlen))
915                 return -ENOMEM;
916
917         __dest = get_loc_data(dest, base);
918
919         ret = strncpy_from_unsafe_user(__dest, uaddr, maxlen);
920         if (ret >= 0)
921                 *(u32 *)dest = make_data_loc(ret, __dest - base);
922
923         return ret;
924 }
925
926 static nokprobe_inline int
927 probe_mem_read(void *dest, void *src, size_t size)
928 {
929         return probe_kernel_read(dest, src, size);
930 }
931
932 static nokprobe_inline int
933 probe_mem_read_user(void *dest, void *src, size_t size)
934 {
935         const void __user *uaddr =  (__force const void __user *)src;
936
937         return probe_user_read(dest, uaddr, size);
938 }
939
940 /* Note that we don't verify it, since the code does not come from user space */
941 static int
942 process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest,
943                    void *base)
944 {
945         unsigned long val;
946
947 retry:
948         /* 1st stage: get value from context */
949         switch (code->op) {
950         case FETCH_OP_REG:
951                 val = regs_get_register(regs, code->param);
952                 break;
953         case FETCH_OP_STACK:
954                 val = regs_get_kernel_stack_nth(regs, code->param);
955                 break;
956         case FETCH_OP_STACKP:
957                 val = kernel_stack_pointer(regs);
958                 break;
959         case FETCH_OP_RETVAL:
960                 val = regs_return_value(regs);
961                 break;
962         case FETCH_OP_IMM:
963                 val = code->immediate;
964                 break;
965         case FETCH_OP_COMM:
966                 val = (unsigned long)current->comm;
967                 break;
968 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
969         case FETCH_OP_ARG:
970                 val = regs_get_kernel_argument(regs, code->param);
971                 break;
972 #endif
973         case FETCH_NOP_SYMBOL:  /* Ignore a place holder */
974                 code++;
975                 goto retry;
976         default:
977                 return -EILSEQ;
978         }
979         code++;
980
981         return process_fetch_insn_bottom(code, val, dest, base);
982 }
983 NOKPROBE_SYMBOL(process_fetch_insn)
984
985 /* Kprobe handler */
986 static nokprobe_inline void
987 __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
988                     struct trace_event_file *trace_file)
989 {
990         struct kprobe_trace_entry_head *entry;
991         struct ring_buffer_event *event;
992         struct ring_buffer *buffer;
993         int size, dsize, pc;
994         unsigned long irq_flags;
995         struct trace_event_call *call = trace_probe_event_call(&tk->tp);
996
997         WARN_ON(call != trace_file->event_call);
998
999         if (trace_trigger_soft_disabled(trace_file))
1000                 return;
1001
1002         local_save_flags(irq_flags);
1003         pc = preempt_count();
1004
1005         dsize = __get_data_size(&tk->tp, regs);
1006         size = sizeof(*entry) + tk->tp.size + dsize;
1007
1008         event = trace_event_buffer_lock_reserve(&buffer, trace_file,
1009                                                 call->event.type,
1010                                                 size, irq_flags, pc);
1011         if (!event)
1012                 return;
1013
1014         entry = ring_buffer_event_data(event);
1015         entry->ip = (unsigned long)tk->rp.kp.addr;
1016         store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
1017
1018         event_trigger_unlock_commit_regs(trace_file, buffer, event,
1019                                          entry, irq_flags, pc, regs);
1020 }
1021
1022 static void
1023 kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs)
1024 {
1025         struct event_file_link *link;
1026
1027         trace_probe_for_each_link_rcu(link, &tk->tp)
1028                 __kprobe_trace_func(tk, regs, link->file);
1029 }
1030 NOKPROBE_SYMBOL(kprobe_trace_func);
1031
1032 /* Kretprobe handler */
1033 static nokprobe_inline void
1034 __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1035                        struct pt_regs *regs,
1036                        struct trace_event_file *trace_file)
1037 {
1038         struct kretprobe_trace_entry_head *entry;
1039         struct ring_buffer_event *event;
1040         struct ring_buffer *buffer;
1041         int size, pc, dsize;
1042         unsigned long irq_flags;
1043         struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1044
1045         WARN_ON(call != trace_file->event_call);
1046
1047         if (trace_trigger_soft_disabled(trace_file))
1048                 return;
1049
1050         local_save_flags(irq_flags);
1051         pc = preempt_count();
1052
1053         dsize = __get_data_size(&tk->tp, regs);
1054         size = sizeof(*entry) + tk->tp.size + dsize;
1055
1056         event = trace_event_buffer_lock_reserve(&buffer, trace_file,
1057                                                 call->event.type,
1058                                                 size, irq_flags, pc);
1059         if (!event)
1060                 return;
1061
1062         entry = ring_buffer_event_data(event);
1063         entry->func = (unsigned long)tk->rp.kp.addr;
1064         entry->ret_ip = (unsigned long)ri->ret_addr;
1065         store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
1066
1067         event_trigger_unlock_commit_regs(trace_file, buffer, event,
1068                                          entry, irq_flags, pc, regs);
1069 }
1070
1071 static void
1072 kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1073                      struct pt_regs *regs)
1074 {
1075         struct event_file_link *link;
1076
1077         trace_probe_for_each_link_rcu(link, &tk->tp)
1078                 __kretprobe_trace_func(tk, ri, regs, link->file);
1079 }
1080 NOKPROBE_SYMBOL(kretprobe_trace_func);
1081
1082 /* Event entry printers */
1083 static enum print_line_t
1084 print_kprobe_event(struct trace_iterator *iter, int flags,
1085                    struct trace_event *event)
1086 {
1087         struct kprobe_trace_entry_head *field;
1088         struct trace_seq *s = &iter->seq;
1089         struct trace_probe *tp;
1090
1091         field = (struct kprobe_trace_entry_head *)iter->ent;
1092         tp = container_of(event, struct trace_probe, call.event);
1093
1094         trace_seq_printf(s, "%s: (", trace_probe_name(tp));
1095
1096         if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
1097                 goto out;
1098
1099         trace_seq_putc(s, ')');
1100
1101         if (print_probe_args(s, tp->args, tp->nr_args,
1102                              (u8 *)&field[1], field) < 0)
1103                 goto out;
1104
1105         trace_seq_putc(s, '\n');
1106  out:
1107         return trace_handle_return(s);
1108 }
1109
1110 static enum print_line_t
1111 print_kretprobe_event(struct trace_iterator *iter, int flags,
1112                       struct trace_event *event)
1113 {
1114         struct kretprobe_trace_entry_head *field;
1115         struct trace_seq *s = &iter->seq;
1116         struct trace_probe *tp;
1117
1118         field = (struct kretprobe_trace_entry_head *)iter->ent;
1119         tp = container_of(event, struct trace_probe, call.event);
1120
1121         trace_seq_printf(s, "%s: (", trace_probe_name(tp));
1122
1123         if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
1124                 goto out;
1125
1126         trace_seq_puts(s, " <- ");
1127
1128         if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
1129                 goto out;
1130
1131         trace_seq_putc(s, ')');
1132
1133         if (print_probe_args(s, tp->args, tp->nr_args,
1134                              (u8 *)&field[1], field) < 0)
1135                 goto out;
1136
1137         trace_seq_putc(s, '\n');
1138
1139  out:
1140         return trace_handle_return(s);
1141 }
1142
1143
1144 static int kprobe_event_define_fields(struct trace_event_call *event_call)
1145 {
1146         int ret;
1147         struct kprobe_trace_entry_head field;
1148         struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data;
1149
1150         DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
1151
1152         return traceprobe_define_arg_fields(event_call, sizeof(field), &tk->tp);
1153 }
1154
1155 static int kretprobe_event_define_fields(struct trace_event_call *event_call)
1156 {
1157         int ret;
1158         struct kretprobe_trace_entry_head field;
1159         struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data;
1160
1161         DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
1162         DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
1163
1164         return traceprobe_define_arg_fields(event_call, sizeof(field), &tk->tp);
1165 }
1166
1167 #ifdef CONFIG_PERF_EVENTS
1168
1169 /* Kprobe profile handler */
1170 static int
1171 kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
1172 {
1173         struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1174         struct kprobe_trace_entry_head *entry;
1175         struct hlist_head *head;
1176         int size, __size, dsize;
1177         int rctx;
1178
1179         if (bpf_prog_array_valid(call)) {
1180                 unsigned long orig_ip = instruction_pointer(regs);
1181                 int ret;
1182
1183                 ret = trace_call_bpf(call, regs);
1184
1185                 /*
1186                  * We need to check and see if we modified the pc of the
1187                  * pt_regs, and if so return 1 so that we don't do the
1188                  * single stepping.
1189                  */
1190                 if (orig_ip != instruction_pointer(regs))
1191                         return 1;
1192                 if (!ret)
1193                         return 0;
1194         }
1195
1196         head = this_cpu_ptr(call->perf_events);
1197         if (hlist_empty(head))
1198                 return 0;
1199
1200         dsize = __get_data_size(&tk->tp, regs);
1201         __size = sizeof(*entry) + tk->tp.size + dsize;
1202         size = ALIGN(__size + sizeof(u32), sizeof(u64));
1203         size -= sizeof(u32);
1204
1205         entry = perf_trace_buf_alloc(size, NULL, &rctx);
1206         if (!entry)
1207                 return 0;
1208
1209         entry->ip = (unsigned long)tk->rp.kp.addr;
1210         memset(&entry[1], 0, dsize);
1211         store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
1212         perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1213                               head, NULL);
1214         return 0;
1215 }
1216 NOKPROBE_SYMBOL(kprobe_perf_func);
1217
1218 /* Kretprobe profile handler */
1219 static void
1220 kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1221                     struct pt_regs *regs)
1222 {
1223         struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1224         struct kretprobe_trace_entry_head *entry;
1225         struct hlist_head *head;
1226         int size, __size, dsize;
1227         int rctx;
1228
1229         if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs))
1230                 return;
1231
1232         head = this_cpu_ptr(call->perf_events);
1233         if (hlist_empty(head))
1234                 return;
1235
1236         dsize = __get_data_size(&tk->tp, regs);
1237         __size = sizeof(*entry) + tk->tp.size + dsize;
1238         size = ALIGN(__size + sizeof(u32), sizeof(u64));
1239         size -= sizeof(u32);
1240
1241         entry = perf_trace_buf_alloc(size, NULL, &rctx);
1242         if (!entry)
1243                 return;
1244
1245         entry->func = (unsigned long)tk->rp.kp.addr;
1246         entry->ret_ip = (unsigned long)ri->ret_addr;
1247         store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
1248         perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1249                               head, NULL);
1250 }
1251 NOKPROBE_SYMBOL(kretprobe_perf_func);
1252
1253 int bpf_get_kprobe_info(const struct perf_event *event, u32 *fd_type,
1254                         const char **symbol, u64 *probe_offset,
1255                         u64 *probe_addr, bool perf_type_tracepoint)
1256 {
1257         const char *pevent = trace_event_name(event->tp_event);
1258         const char *group = event->tp_event->class->system;
1259         struct trace_kprobe *tk;
1260
1261         if (perf_type_tracepoint)
1262                 tk = find_trace_kprobe(pevent, group);
1263         else
1264                 tk = event->tp_event->data;
1265         if (!tk)
1266                 return -EINVAL;
1267
1268         *fd_type = trace_kprobe_is_return(tk) ? BPF_FD_TYPE_KRETPROBE
1269                                               : BPF_FD_TYPE_KPROBE;
1270         if (tk->symbol) {
1271                 *symbol = tk->symbol;
1272                 *probe_offset = tk->rp.kp.offset;
1273                 *probe_addr = 0;
1274         } else {
1275                 *symbol = NULL;
1276                 *probe_offset = 0;
1277                 *probe_addr = (unsigned long)tk->rp.kp.addr;
1278         }
1279         return 0;
1280 }
1281 #endif  /* CONFIG_PERF_EVENTS */
1282
1283 /*
1284  * called by perf_trace_init() or __ftrace_set_clr_event() under event_mutex.
1285  *
1286  * kprobe_trace_self_tests_init() does enable_trace_probe/disable_trace_probe
1287  * lockless, but we can't race with this __init function.
1288  */
1289 static int kprobe_register(struct trace_event_call *event,
1290                            enum trace_reg type, void *data)
1291 {
1292         struct trace_kprobe *tk = (struct trace_kprobe *)event->data;
1293         struct trace_event_file *file = data;
1294
1295         switch (type) {
1296         case TRACE_REG_REGISTER:
1297                 return enable_trace_kprobe(tk, file);
1298         case TRACE_REG_UNREGISTER:
1299                 return disable_trace_kprobe(tk, file);
1300
1301 #ifdef CONFIG_PERF_EVENTS
1302         case TRACE_REG_PERF_REGISTER:
1303                 return enable_trace_kprobe(tk, NULL);
1304         case TRACE_REG_PERF_UNREGISTER:
1305                 return disable_trace_kprobe(tk, NULL);
1306         case TRACE_REG_PERF_OPEN:
1307         case TRACE_REG_PERF_CLOSE:
1308         case TRACE_REG_PERF_ADD:
1309         case TRACE_REG_PERF_DEL:
1310                 return 0;
1311 #endif
1312         }
1313         return 0;
1314 }
1315
1316 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
1317 {
1318         struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp);
1319         int ret = 0;
1320
1321         raw_cpu_inc(*tk->nhit);
1322
1323         if (trace_probe_test_flag(&tk->tp, TP_FLAG_TRACE))
1324                 kprobe_trace_func(tk, regs);
1325 #ifdef CONFIG_PERF_EVENTS
1326         if (trace_probe_test_flag(&tk->tp, TP_FLAG_PROFILE))
1327                 ret = kprobe_perf_func(tk, regs);
1328 #endif
1329         return ret;
1330 }
1331 NOKPROBE_SYMBOL(kprobe_dispatcher);
1332
1333 static int
1334 kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
1335 {
1336         struct trace_kprobe *tk = container_of(ri->rp, struct trace_kprobe, rp);
1337
1338         raw_cpu_inc(*tk->nhit);
1339
1340         if (trace_probe_test_flag(&tk->tp, TP_FLAG_TRACE))
1341                 kretprobe_trace_func(tk, ri, regs);
1342 #ifdef CONFIG_PERF_EVENTS
1343         if (trace_probe_test_flag(&tk->tp, TP_FLAG_PROFILE))
1344                 kretprobe_perf_func(tk, ri, regs);
1345 #endif
1346         return 0;       /* We don't tweek kernel, so just return 0 */
1347 }
1348 NOKPROBE_SYMBOL(kretprobe_dispatcher);
1349
1350 static struct trace_event_functions kretprobe_funcs = {
1351         .trace          = print_kretprobe_event
1352 };
1353
1354 static struct trace_event_functions kprobe_funcs = {
1355         .trace          = print_kprobe_event
1356 };
1357
1358 static inline void init_trace_event_call(struct trace_kprobe *tk)
1359 {
1360         struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1361
1362         if (trace_kprobe_is_return(tk)) {
1363                 call->event.funcs = &kretprobe_funcs;
1364                 call->class->define_fields = kretprobe_event_define_fields;
1365         } else {
1366                 call->event.funcs = &kprobe_funcs;
1367                 call->class->define_fields = kprobe_event_define_fields;
1368         }
1369
1370         call->flags = TRACE_EVENT_FL_KPROBE;
1371         call->class->reg = kprobe_register;
1372         call->data = tk;
1373 }
1374
1375 static int register_kprobe_event(struct trace_kprobe *tk)
1376 {
1377         init_trace_event_call(tk);
1378
1379         return trace_probe_register_event_call(&tk->tp);
1380 }
1381
1382 static int unregister_kprobe_event(struct trace_kprobe *tk)
1383 {
1384         return trace_probe_unregister_event_call(&tk->tp);
1385 }
1386
1387 #ifdef CONFIG_PERF_EVENTS
1388 /* create a trace_kprobe, but don't add it to global lists */
1389 struct trace_event_call *
1390 create_local_trace_kprobe(char *func, void *addr, unsigned long offs,
1391                           bool is_return)
1392 {
1393         struct trace_kprobe *tk;
1394         int ret;
1395         char *event;
1396
1397         /*
1398          * local trace_kprobes are not added to dyn_event, so they are never
1399          * searched in find_trace_kprobe(). Therefore, there is no concern of
1400          * duplicated name here.
1401          */
1402         event = func ? func : "DUMMY_EVENT";
1403
1404         tk = alloc_trace_kprobe(KPROBE_EVENT_SYSTEM, event, (void *)addr, func,
1405                                 offs, 0 /* maxactive */, 0 /* nargs */,
1406                                 is_return);
1407
1408         if (IS_ERR(tk)) {
1409                 pr_info("Failed to allocate trace_probe.(%d)\n",
1410                         (int)PTR_ERR(tk));
1411                 return ERR_CAST(tk);
1412         }
1413
1414         init_trace_event_call(tk);
1415
1416         if (traceprobe_set_print_fmt(&tk->tp, trace_kprobe_is_return(tk)) < 0) {
1417                 ret = -ENOMEM;
1418                 goto error;
1419         }
1420
1421         ret = __register_trace_kprobe(tk);
1422         if (ret < 0)
1423                 goto error;
1424
1425         return trace_probe_event_call(&tk->tp);
1426 error:
1427         free_trace_kprobe(tk);
1428         return ERR_PTR(ret);
1429 }
1430
1431 void destroy_local_trace_kprobe(struct trace_event_call *event_call)
1432 {
1433         struct trace_kprobe *tk;
1434
1435         tk = container_of(event_call, struct trace_kprobe, tp.call);
1436
1437         if (trace_probe_is_enabled(&tk->tp)) {
1438                 WARN_ON(1);
1439                 return;
1440         }
1441
1442         __unregister_trace_kprobe(tk);
1443
1444         free_trace_kprobe(tk);
1445 }
1446 #endif /* CONFIG_PERF_EVENTS */
1447
1448 static __init void enable_boot_kprobe_events(void)
1449 {
1450         struct trace_array *tr = top_trace_array();
1451         struct trace_event_file *file;
1452         struct trace_kprobe *tk;
1453         struct dyn_event *pos;
1454
1455         mutex_lock(&event_mutex);
1456         for_each_trace_kprobe(tk, pos) {
1457                 list_for_each_entry(file, &tr->events, list)
1458                         if (file->event_call == trace_probe_event_call(&tk->tp))
1459                                 trace_event_enable_disable(file, 1, 0);
1460         }
1461         mutex_unlock(&event_mutex);
1462 }
1463
1464 static __init void setup_boot_kprobe_events(void)
1465 {
1466         char *p, *cmd = kprobe_boot_events_buf;
1467         int ret;
1468
1469         strreplace(kprobe_boot_events_buf, ',', ' ');
1470
1471         while (cmd && *cmd != '\0') {
1472                 p = strchr(cmd, ';');
1473                 if (p)
1474                         *p++ = '\0';
1475
1476                 ret = trace_run_command(cmd, create_or_delete_trace_kprobe);
1477                 if (ret)
1478                         pr_warn("Failed to add event(%d): %s\n", ret, cmd);
1479                 else
1480                         kprobe_boot_events_enabled = true;
1481
1482                 cmd = p;
1483         }
1484
1485         enable_boot_kprobe_events();
1486 }
1487
1488 /* Make a tracefs interface for controlling probe points */
1489 static __init int init_kprobe_trace(void)
1490 {
1491         struct dentry *d_tracer;
1492         struct dentry *entry;
1493         int ret;
1494
1495         ret = dyn_event_register(&trace_kprobe_ops);
1496         if (ret)
1497                 return ret;
1498
1499         if (register_module_notifier(&trace_kprobe_module_nb))
1500                 return -EINVAL;
1501
1502         d_tracer = tracing_init_dentry();
1503         if (IS_ERR(d_tracer))
1504                 return 0;
1505
1506         entry = tracefs_create_file("kprobe_events", 0644, d_tracer,
1507                                     NULL, &kprobe_events_ops);
1508
1509         /* Event list interface */
1510         if (!entry)
1511                 pr_warn("Could not create tracefs 'kprobe_events' entry\n");
1512
1513         /* Profile interface */
1514         entry = tracefs_create_file("kprobe_profile", 0444, d_tracer,
1515                                     NULL, &kprobe_profile_ops);
1516
1517         if (!entry)
1518                 pr_warn("Could not create tracefs 'kprobe_profile' entry\n");
1519
1520         setup_boot_kprobe_events();
1521
1522         return 0;
1523 }
1524 fs_initcall(init_kprobe_trace);
1525
1526
1527 #ifdef CONFIG_FTRACE_STARTUP_TEST
1528 static __init struct trace_event_file *
1529 find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr)
1530 {
1531         struct trace_event_file *file;
1532
1533         list_for_each_entry(file, &tr->events, list)
1534                 if (file->event_call == trace_probe_event_call(&tk->tp))
1535                         return file;
1536
1537         return NULL;
1538 }
1539
1540 /*
1541  * Nobody but us can call enable_trace_kprobe/disable_trace_kprobe at this
1542  * stage, we can do this lockless.
1543  */
1544 static __init int kprobe_trace_self_tests_init(void)
1545 {
1546         int ret, warn = 0;
1547         int (*target)(int, int, int, int, int, int);
1548         struct trace_kprobe *tk;
1549         struct trace_event_file *file;
1550
1551         if (tracing_is_disabled())
1552                 return -ENODEV;
1553
1554         if (kprobe_boot_events_enabled) {
1555                 pr_info("Skipping kprobe tests due to kprobe_event on cmdline\n");
1556                 return 0;
1557         }
1558
1559         target = kprobe_trace_selftest_target;
1560
1561         pr_info("Testing kprobe tracing: ");
1562
1563         ret = trace_run_command("p:testprobe kprobe_trace_selftest_target $stack $stack0 +0($stack)",
1564                                 create_or_delete_trace_kprobe);
1565         if (WARN_ON_ONCE(ret)) {
1566                 pr_warn("error on probing function entry.\n");
1567                 warn++;
1568         } else {
1569                 /* Enable trace point */
1570                 tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
1571                 if (WARN_ON_ONCE(tk == NULL)) {
1572                         pr_warn("error on getting new probe.\n");
1573                         warn++;
1574                 } else {
1575                         file = find_trace_probe_file(tk, top_trace_array());
1576                         if (WARN_ON_ONCE(file == NULL)) {
1577                                 pr_warn("error on getting probe file.\n");
1578                                 warn++;
1579                         } else
1580                                 enable_trace_kprobe(tk, file);
1581                 }
1582         }
1583
1584         ret = trace_run_command("r:testprobe2 kprobe_trace_selftest_target $retval",
1585                                 create_or_delete_trace_kprobe);
1586         if (WARN_ON_ONCE(ret)) {
1587                 pr_warn("error on probing function return.\n");
1588                 warn++;
1589         } else {
1590                 /* Enable trace point */
1591                 tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
1592                 if (WARN_ON_ONCE(tk == NULL)) {
1593                         pr_warn("error on getting 2nd new probe.\n");
1594                         warn++;
1595                 } else {
1596                         file = find_trace_probe_file(tk, top_trace_array());
1597                         if (WARN_ON_ONCE(file == NULL)) {
1598                                 pr_warn("error on getting probe file.\n");
1599                                 warn++;
1600                         } else
1601                                 enable_trace_kprobe(tk, file);
1602                 }
1603         }
1604
1605         if (warn)
1606                 goto end;
1607
1608         ret = target(1, 2, 3, 4, 5, 6);
1609
1610         /*
1611          * Not expecting an error here, the check is only to prevent the
1612          * optimizer from removing the call to target() as otherwise there
1613          * are no side-effects and the call is never performed.
1614          */
1615         if (ret != 21)
1616                 warn++;
1617
1618         /* Disable trace points before removing it */
1619         tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
1620         if (WARN_ON_ONCE(tk == NULL)) {
1621                 pr_warn("error on getting test probe.\n");
1622                 warn++;
1623         } else {
1624                 if (trace_kprobe_nhit(tk) != 1) {
1625                         pr_warn("incorrect number of testprobe hits\n");
1626                         warn++;
1627                 }
1628
1629                 file = find_trace_probe_file(tk, top_trace_array());
1630                 if (WARN_ON_ONCE(file == NULL)) {
1631                         pr_warn("error on getting probe file.\n");
1632                         warn++;
1633                 } else
1634                         disable_trace_kprobe(tk, file);
1635         }
1636
1637         tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
1638         if (WARN_ON_ONCE(tk == NULL)) {
1639                 pr_warn("error on getting 2nd test probe.\n");
1640                 warn++;
1641         } else {
1642                 if (trace_kprobe_nhit(tk) != 1) {
1643                         pr_warn("incorrect number of testprobe2 hits\n");
1644                         warn++;
1645                 }
1646
1647                 file = find_trace_probe_file(tk, top_trace_array());
1648                 if (WARN_ON_ONCE(file == NULL)) {
1649                         pr_warn("error on getting probe file.\n");
1650                         warn++;
1651                 } else
1652                         disable_trace_kprobe(tk, file);
1653         }
1654
1655         ret = trace_run_command("-:testprobe", create_or_delete_trace_kprobe);
1656         if (WARN_ON_ONCE(ret)) {
1657                 pr_warn("error on deleting a probe.\n");
1658                 warn++;
1659         }
1660
1661         ret = trace_run_command("-:testprobe2", create_or_delete_trace_kprobe);
1662         if (WARN_ON_ONCE(ret)) {
1663                 pr_warn("error on deleting a probe.\n");
1664                 warn++;
1665         }
1666
1667 end:
1668         ret = dyn_events_release_all(&trace_kprobe_ops);
1669         if (WARN_ON_ONCE(ret)) {
1670                 pr_warn("error on cleaning up probes.\n");
1671                 warn++;
1672         }
1673         /*
1674          * Wait for the optimizer work to finish. Otherwise it might fiddle
1675          * with probes in already freed __init text.
1676          */
1677         wait_for_kprobe_optimizer();
1678         if (warn)
1679                 pr_cont("NG: Some tests are failed. Please check them.\n");
1680         else
1681                 pr_cont("OK\n");
1682         return 0;
1683 }
1684
1685 late_initcall(kprobe_trace_self_tests_init);
1686
1687 #endif