]> asedeno.scripts.mit.edu Git - linux.git/blob - kernel/trace/trace_uprobe.c
Merge tag 'for-linus-5.3-ofs1' of git://git.kernel.org/pub/scm/linux/kernel/git/hubca...
[linux.git] / kernel / trace / trace_uprobe.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * uprobes-based tracing events
4  *
5  * Copyright (C) IBM Corporation, 2010-2012
6  * Author:      Srikar Dronamraju <srikar@linux.vnet.ibm.com>
7  */
8 #define pr_fmt(fmt)     "trace_uprobe: " fmt
9
10 #include <linux/ctype.h>
11 #include <linux/module.h>
12 #include <linux/uaccess.h>
13 #include <linux/uprobes.h>
14 #include <linux/namei.h>
15 #include <linux/string.h>
16 #include <linux/rculist.h>
17
18 #include "trace_dynevent.h"
19 #include "trace_probe.h"
20 #include "trace_probe_tmpl.h"
21
22 #define UPROBE_EVENT_SYSTEM     "uprobes"
23
24 struct uprobe_trace_entry_head {
25         struct trace_entry      ent;
26         unsigned long           vaddr[];
27 };
28
29 #define SIZEOF_TRACE_ENTRY(is_return)                   \
30         (sizeof(struct uprobe_trace_entry_head) +       \
31          sizeof(unsigned long) * (is_return ? 2 : 1))
32
33 #define DATAOF_TRACE_ENTRY(entry, is_return)            \
34         ((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return))
35
36 struct trace_uprobe_filter {
37         rwlock_t                rwlock;
38         int                     nr_systemwide;
39         struct list_head        perf_events;
40 };
41
42 static int trace_uprobe_create(int argc, const char **argv);
43 static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev);
44 static int trace_uprobe_release(struct dyn_event *ev);
45 static bool trace_uprobe_is_busy(struct dyn_event *ev);
46 static bool trace_uprobe_match(const char *system, const char *event,
47                                struct dyn_event *ev);
48
49 static struct dyn_event_operations trace_uprobe_ops = {
50         .create = trace_uprobe_create,
51         .show = trace_uprobe_show,
52         .is_busy = trace_uprobe_is_busy,
53         .free = trace_uprobe_release,
54         .match = trace_uprobe_match,
55 };
56
57 /*
58  * uprobe event core functions
59  */
60 struct trace_uprobe {
61         struct dyn_event                devent;
62         struct trace_uprobe_filter      filter;
63         struct uprobe_consumer          consumer;
64         struct path                     path;
65         struct inode                    *inode;
66         char                            *filename;
67         unsigned long                   offset;
68         unsigned long                   ref_ctr_offset;
69         unsigned long                   nhit;
70         struct trace_probe              tp;
71 };
72
73 static bool is_trace_uprobe(struct dyn_event *ev)
74 {
75         return ev->ops == &trace_uprobe_ops;
76 }
77
78 static struct trace_uprobe *to_trace_uprobe(struct dyn_event *ev)
79 {
80         return container_of(ev, struct trace_uprobe, devent);
81 }
82
83 /**
84  * for_each_trace_uprobe - iterate over the trace_uprobe list
85  * @pos:        the struct trace_uprobe * for each entry
86  * @dpos:       the struct dyn_event * to use as a loop cursor
87  */
88 #define for_each_trace_uprobe(pos, dpos)        \
89         for_each_dyn_event(dpos)                \
90                 if (is_trace_uprobe(dpos) && (pos = to_trace_uprobe(dpos)))
91
92 #define SIZEOF_TRACE_UPROBE(n)                          \
93         (offsetof(struct trace_uprobe, tp.args) +       \
94         (sizeof(struct probe_arg) * (n)))
95
96 static int register_uprobe_event(struct trace_uprobe *tu);
97 static int unregister_uprobe_event(struct trace_uprobe *tu);
98
99 struct uprobe_dispatch_data {
100         struct trace_uprobe     *tu;
101         unsigned long           bp_addr;
102 };
103
104 static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs);
105 static int uretprobe_dispatcher(struct uprobe_consumer *con,
106                                 unsigned long func, struct pt_regs *regs);
107
108 #ifdef CONFIG_STACK_GROWSUP
109 static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
110 {
111         return addr - (n * sizeof(long));
112 }
113 #else
114 static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
115 {
116         return addr + (n * sizeof(long));
117 }
118 #endif
119
120 static unsigned long get_user_stack_nth(struct pt_regs *regs, unsigned int n)
121 {
122         unsigned long ret;
123         unsigned long addr = user_stack_pointer(regs);
124
125         addr = adjust_stack_addr(addr, n);
126
127         if (copy_from_user(&ret, (void __force __user *) addr, sizeof(ret)))
128                 return 0;
129
130         return ret;
131 }
132
133 /*
134  * Uprobes-specific fetch functions
135  */
136 static nokprobe_inline int
137 probe_mem_read(void *dest, void *src, size_t size)
138 {
139         void __user *vaddr = (void __force __user *)src;
140
141         return copy_from_user(dest, vaddr, size) ? -EFAULT : 0;
142 }
143 /*
144  * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
145  * length and relative data location.
146  */
147 static nokprobe_inline int
148 fetch_store_string(unsigned long addr, void *dest, void *base)
149 {
150         long ret;
151         u32 loc = *(u32 *)dest;
152         int maxlen  = get_loc_len(loc);
153         u8 *dst = get_loc_data(dest, base);
154         void __user *src = (void __force __user *) addr;
155
156         if (unlikely(!maxlen))
157                 return -ENOMEM;
158
159         if (addr == FETCH_TOKEN_COMM)
160                 ret = strlcpy(dst, current->comm, maxlen);
161         else
162                 ret = strncpy_from_user(dst, src, maxlen);
163         if (ret >= 0) {
164                 if (ret == maxlen)
165                         dst[ret - 1] = '\0';
166                 else
167                         /*
168                          * Include the terminating null byte. In this case it
169                          * was copied by strncpy_from_user but not accounted
170                          * for in ret.
171                          */
172                         ret++;
173                 *(u32 *)dest = make_data_loc(ret, (void *)dst - base);
174         }
175
176         return ret;
177 }
178
179 /* Return the length of string -- including null terminal byte */
180 static nokprobe_inline int
181 fetch_store_strlen(unsigned long addr)
182 {
183         int len;
184         void __user *vaddr = (void __force __user *) addr;
185
186         if (addr == FETCH_TOKEN_COMM)
187                 len = strlen(current->comm) + 1;
188         else
189                 len = strnlen_user(vaddr, MAX_STRING_SIZE);
190
191         return (len > MAX_STRING_SIZE) ? 0 : len;
192 }
193
194 static unsigned long translate_user_vaddr(unsigned long file_offset)
195 {
196         unsigned long base_addr;
197         struct uprobe_dispatch_data *udd;
198
199         udd = (void *) current->utask->vaddr;
200
201         base_addr = udd->bp_addr - udd->tu->offset;
202         return base_addr + file_offset;
203 }
204
205 /* Note that we don't verify it, since the code does not come from user space */
206 static int
207 process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest,
208                    void *base)
209 {
210         unsigned long val;
211
212         /* 1st stage: get value from context */
213         switch (code->op) {
214         case FETCH_OP_REG:
215                 val = regs_get_register(regs, code->param);
216                 break;
217         case FETCH_OP_STACK:
218                 val = get_user_stack_nth(regs, code->param);
219                 break;
220         case FETCH_OP_STACKP:
221                 val = user_stack_pointer(regs);
222                 break;
223         case FETCH_OP_RETVAL:
224                 val = regs_return_value(regs);
225                 break;
226         case FETCH_OP_IMM:
227                 val = code->immediate;
228                 break;
229         case FETCH_OP_COMM:
230                 val = FETCH_TOKEN_COMM;
231                 break;
232         case FETCH_OP_FOFFS:
233                 val = translate_user_vaddr(code->immediate);
234                 break;
235         default:
236                 return -EILSEQ;
237         }
238         code++;
239
240         return process_fetch_insn_bottom(code, val, dest, base);
241 }
242 NOKPROBE_SYMBOL(process_fetch_insn)
243
244 static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter)
245 {
246         rwlock_init(&filter->rwlock);
247         filter->nr_systemwide = 0;
248         INIT_LIST_HEAD(&filter->perf_events);
249 }
250
251 static inline bool uprobe_filter_is_empty(struct trace_uprobe_filter *filter)
252 {
253         return !filter->nr_systemwide && list_empty(&filter->perf_events);
254 }
255
256 static inline bool is_ret_probe(struct trace_uprobe *tu)
257 {
258         return tu->consumer.ret_handler != NULL;
259 }
260
261 static bool trace_uprobe_is_busy(struct dyn_event *ev)
262 {
263         struct trace_uprobe *tu = to_trace_uprobe(ev);
264
265         return trace_probe_is_enabled(&tu->tp);
266 }
267
268 static bool trace_uprobe_match(const char *system, const char *event,
269                                struct dyn_event *ev)
270 {
271         struct trace_uprobe *tu = to_trace_uprobe(ev);
272
273         return strcmp(trace_event_name(&tu->tp.call), event) == 0 &&
274                 (!system || strcmp(tu->tp.call.class->system, system) == 0);
275 }
276
277 /*
278  * Allocate new trace_uprobe and initialize it (including uprobes).
279  */
280 static struct trace_uprobe *
281 alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
282 {
283         struct trace_uprobe *tu;
284
285         if (!event || !group)
286                 return ERR_PTR(-EINVAL);
287
288         tu = kzalloc(SIZEOF_TRACE_UPROBE(nargs), GFP_KERNEL);
289         if (!tu)
290                 return ERR_PTR(-ENOMEM);
291
292         tu->tp.call.class = &tu->tp.class;
293         tu->tp.call.name = kstrdup(event, GFP_KERNEL);
294         if (!tu->tp.call.name)
295                 goto error;
296
297         tu->tp.class.system = kstrdup(group, GFP_KERNEL);
298         if (!tu->tp.class.system)
299                 goto error;
300
301         dyn_event_init(&tu->devent, &trace_uprobe_ops);
302         INIT_LIST_HEAD(&tu->tp.files);
303         tu->consumer.handler = uprobe_dispatcher;
304         if (is_ret)
305                 tu->consumer.ret_handler = uretprobe_dispatcher;
306         init_trace_uprobe_filter(&tu->filter);
307         return tu;
308
309 error:
310         kfree(tu->tp.call.name);
311         kfree(tu);
312
313         return ERR_PTR(-ENOMEM);
314 }
315
316 static void free_trace_uprobe(struct trace_uprobe *tu)
317 {
318         int i;
319
320         if (!tu)
321                 return;
322
323         for (i = 0; i < tu->tp.nr_args; i++)
324                 traceprobe_free_probe_arg(&tu->tp.args[i]);
325
326         path_put(&tu->path);
327         kfree(tu->tp.call.class->system);
328         kfree(tu->tp.call.name);
329         kfree(tu->filename);
330         kfree(tu);
331 }
332
333 static struct trace_uprobe *find_probe_event(const char *event, const char *group)
334 {
335         struct dyn_event *pos;
336         struct trace_uprobe *tu;
337
338         for_each_trace_uprobe(tu, pos)
339                 if (strcmp(trace_event_name(&tu->tp.call), event) == 0 &&
340                     strcmp(tu->tp.call.class->system, group) == 0)
341                         return tu;
342
343         return NULL;
344 }
345
346 /* Unregister a trace_uprobe and probe_event */
347 static int unregister_trace_uprobe(struct trace_uprobe *tu)
348 {
349         int ret;
350
351         ret = unregister_uprobe_event(tu);
352         if (ret)
353                 return ret;
354
355         dyn_event_remove(&tu->devent);
356         free_trace_uprobe(tu);
357         return 0;
358 }
359
360 /*
361  * Uprobe with multiple reference counter is not allowed. i.e.
362  * If inode and offset matches, reference counter offset *must*
363  * match as well. Though, there is one exception: If user is
364  * replacing old trace_uprobe with new one(same group/event),
365  * then we allow same uprobe with new reference counter as far
366  * as the new one does not conflict with any other existing
367  * ones.
368  */
369 static struct trace_uprobe *find_old_trace_uprobe(struct trace_uprobe *new)
370 {
371         struct dyn_event *pos;
372         struct trace_uprobe *tmp, *old = NULL;
373         struct inode *new_inode = d_real_inode(new->path.dentry);
374
375         old = find_probe_event(trace_event_name(&new->tp.call),
376                                 new->tp.call.class->system);
377
378         for_each_trace_uprobe(tmp, pos) {
379                 if ((old ? old != tmp : true) &&
380                     new_inode == d_real_inode(tmp->path.dentry) &&
381                     new->offset == tmp->offset &&
382                     new->ref_ctr_offset != tmp->ref_ctr_offset) {
383                         pr_warn("Reference counter offset mismatch.");
384                         return ERR_PTR(-EINVAL);
385                 }
386         }
387         return old;
388 }
389
390 /* Register a trace_uprobe and probe_event */
391 static int register_trace_uprobe(struct trace_uprobe *tu)
392 {
393         struct trace_uprobe *old_tu;
394         int ret;
395
396         mutex_lock(&event_mutex);
397
398         /* register as an event */
399         old_tu = find_old_trace_uprobe(tu);
400         if (IS_ERR(old_tu)) {
401                 ret = PTR_ERR(old_tu);
402                 goto end;
403         }
404
405         if (old_tu) {
406                 /* delete old event */
407                 ret = unregister_trace_uprobe(old_tu);
408                 if (ret)
409                         goto end;
410         }
411
412         ret = register_uprobe_event(tu);
413         if (ret) {
414                 pr_warn("Failed to register probe event(%d)\n", ret);
415                 goto end;
416         }
417
418         dyn_event_add(&tu->devent);
419
420 end:
421         mutex_unlock(&event_mutex);
422
423         return ret;
424 }
425
426 /*
427  * Argument syntax:
428  *  - Add uprobe: p|r[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS]
429  */
430 static int trace_uprobe_create(int argc, const char **argv)
431 {
432         struct trace_uprobe *tu;
433         const char *event = NULL, *group = UPROBE_EVENT_SYSTEM;
434         char *arg, *filename, *rctr, *rctr_end, *tmp;
435         char buf[MAX_EVENT_NAME_LEN];
436         struct path path;
437         unsigned long offset, ref_ctr_offset;
438         bool is_return = false;
439         int i, ret;
440
441         ret = 0;
442         ref_ctr_offset = 0;
443
444         switch (argv[0][0]) {
445         case 'r':
446                 is_return = true;
447                 break;
448         case 'p':
449                 break;
450         default:
451                 return -ECANCELED;
452         }
453
454         if (argc < 2)
455                 return -ECANCELED;
456
457         if (argv[0][1] == ':')
458                 event = &argv[0][2];
459
460         if (!strchr(argv[1], '/'))
461                 return -ECANCELED;
462
463         filename = kstrdup(argv[1], GFP_KERNEL);
464         if (!filename)
465                 return -ENOMEM;
466
467         /* Find the last occurrence, in case the path contains ':' too. */
468         arg = strrchr(filename, ':');
469         if (!arg || !isdigit(arg[1])) {
470                 kfree(filename);
471                 return -ECANCELED;
472         }
473
474         trace_probe_log_init("trace_uprobe", argc, argv);
475         trace_probe_log_set_index(1);   /* filename is the 2nd argument */
476
477         *arg++ = '\0';
478         ret = kern_path(filename, LOOKUP_FOLLOW, &path);
479         if (ret) {
480                 trace_probe_log_err(0, FILE_NOT_FOUND);
481                 kfree(filename);
482                 trace_probe_log_clear();
483                 return ret;
484         }
485         if (!d_is_reg(path.dentry)) {
486                 trace_probe_log_err(0, NO_REGULAR_FILE);
487                 ret = -EINVAL;
488                 goto fail_address_parse;
489         }
490
491         /* Parse reference counter offset if specified. */
492         rctr = strchr(arg, '(');
493         if (rctr) {
494                 rctr_end = strchr(rctr, ')');
495                 if (!rctr_end) {
496                         ret = -EINVAL;
497                         rctr_end = rctr + strlen(rctr);
498                         trace_probe_log_err(rctr_end - filename,
499                                             REFCNT_OPEN_BRACE);
500                         goto fail_address_parse;
501                 } else if (rctr_end[1] != '\0') {
502                         ret = -EINVAL;
503                         trace_probe_log_err(rctr_end + 1 - filename,
504                                             BAD_REFCNT_SUFFIX);
505                         goto fail_address_parse;
506                 }
507
508                 *rctr++ = '\0';
509                 *rctr_end = '\0';
510                 ret = kstrtoul(rctr, 0, &ref_ctr_offset);
511                 if (ret) {
512                         trace_probe_log_err(rctr - filename, BAD_REFCNT);
513                         goto fail_address_parse;
514                 }
515         }
516
517         /* Parse uprobe offset. */
518         ret = kstrtoul(arg, 0, &offset);
519         if (ret) {
520                 trace_probe_log_err(arg - filename, BAD_UPROBE_OFFS);
521                 goto fail_address_parse;
522         }
523
524         /* setup a probe */
525         trace_probe_log_set_index(0);
526         if (event) {
527                 ret = traceprobe_parse_event_name(&event, &group, buf,
528                                                   event - argv[0]);
529                 if (ret)
530                         goto fail_address_parse;
531         } else {
532                 char *tail;
533                 char *ptr;
534
535                 tail = kstrdup(kbasename(filename), GFP_KERNEL);
536                 if (!tail) {
537                         ret = -ENOMEM;
538                         goto fail_address_parse;
539                 }
540
541                 ptr = strpbrk(tail, ".-_");
542                 if (ptr)
543                         *ptr = '\0';
544
545                 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_0x%lx", 'p', tail, offset);
546                 event = buf;
547                 kfree(tail);
548         }
549
550         argc -= 2;
551         argv += 2;
552
553         tu = alloc_trace_uprobe(group, event, argc, is_return);
554         if (IS_ERR(tu)) {
555                 ret = PTR_ERR(tu);
556                 /* This must return -ENOMEM otherwise there is a bug */
557                 WARN_ON_ONCE(ret != -ENOMEM);
558                 goto fail_address_parse;
559         }
560         tu->offset = offset;
561         tu->ref_ctr_offset = ref_ctr_offset;
562         tu->path = path;
563         tu->filename = filename;
564
565         /* parse arguments */
566         for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
567                 tmp = kstrdup(argv[i], GFP_KERNEL);
568                 if (!tmp) {
569                         ret = -ENOMEM;
570                         goto error;
571                 }
572
573                 trace_probe_log_set_index(i + 2);
574                 ret = traceprobe_parse_probe_arg(&tu->tp, i, tmp,
575                                         is_return ? TPARG_FL_RETURN : 0);
576                 kfree(tmp);
577                 if (ret)
578                         goto error;
579         }
580
581         ret = register_trace_uprobe(tu);
582         if (!ret)
583                 goto out;
584
585 error:
586         free_trace_uprobe(tu);
587 out:
588         trace_probe_log_clear();
589         return ret;
590
591 fail_address_parse:
592         trace_probe_log_clear();
593         path_put(&path);
594         kfree(filename);
595
596         return ret;
597 }
598
599 static int create_or_delete_trace_uprobe(int argc, char **argv)
600 {
601         int ret;
602
603         if (argv[0][0] == '-')
604                 return dyn_event_release(argc, argv, &trace_uprobe_ops);
605
606         ret = trace_uprobe_create(argc, (const char **)argv);
607         return ret == -ECANCELED ? -EINVAL : ret;
608 }
609
610 static int trace_uprobe_release(struct dyn_event *ev)
611 {
612         struct trace_uprobe *tu = to_trace_uprobe(ev);
613
614         return unregister_trace_uprobe(tu);
615 }
616
617 /* Probes listing interfaces */
618 static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev)
619 {
620         struct trace_uprobe *tu = to_trace_uprobe(ev);
621         char c = is_ret_probe(tu) ? 'r' : 'p';
622         int i;
623
624         seq_printf(m, "%c:%s/%s %s:0x%0*lx", c, tu->tp.call.class->system,
625                         trace_event_name(&tu->tp.call), tu->filename,
626                         (int)(sizeof(void *) * 2), tu->offset);
627
628         if (tu->ref_ctr_offset)
629                 seq_printf(m, "(0x%lx)", tu->ref_ctr_offset);
630
631         for (i = 0; i < tu->tp.nr_args; i++)
632                 seq_printf(m, " %s=%s", tu->tp.args[i].name, tu->tp.args[i].comm);
633
634         seq_putc(m, '\n');
635         return 0;
636 }
637
638 static int probes_seq_show(struct seq_file *m, void *v)
639 {
640         struct dyn_event *ev = v;
641
642         if (!is_trace_uprobe(ev))
643                 return 0;
644
645         return trace_uprobe_show(m, ev);
646 }
647
648 static const struct seq_operations probes_seq_op = {
649         .start  = dyn_event_seq_start,
650         .next   = dyn_event_seq_next,
651         .stop   = dyn_event_seq_stop,
652         .show   = probes_seq_show
653 };
654
655 static int probes_open(struct inode *inode, struct file *file)
656 {
657         int ret;
658
659         if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
660                 ret = dyn_events_release_all(&trace_uprobe_ops);
661                 if (ret)
662                         return ret;
663         }
664
665         return seq_open(file, &probes_seq_op);
666 }
667
668 static ssize_t probes_write(struct file *file, const char __user *buffer,
669                             size_t count, loff_t *ppos)
670 {
671         return trace_parse_run_command(file, buffer, count, ppos,
672                                         create_or_delete_trace_uprobe);
673 }
674
675 static const struct file_operations uprobe_events_ops = {
676         .owner          = THIS_MODULE,
677         .open           = probes_open,
678         .read           = seq_read,
679         .llseek         = seq_lseek,
680         .release        = seq_release,
681         .write          = probes_write,
682 };
683
684 /* Probes profiling interfaces */
685 static int probes_profile_seq_show(struct seq_file *m, void *v)
686 {
687         struct dyn_event *ev = v;
688         struct trace_uprobe *tu;
689
690         if (!is_trace_uprobe(ev))
691                 return 0;
692
693         tu = to_trace_uprobe(ev);
694         seq_printf(m, "  %s %-44s %15lu\n", tu->filename,
695                         trace_event_name(&tu->tp.call), tu->nhit);
696         return 0;
697 }
698
699 static const struct seq_operations profile_seq_op = {
700         .start  = dyn_event_seq_start,
701         .next   = dyn_event_seq_next,
702         .stop   = dyn_event_seq_stop,
703         .show   = probes_profile_seq_show
704 };
705
706 static int profile_open(struct inode *inode, struct file *file)
707 {
708         return seq_open(file, &profile_seq_op);
709 }
710
711 static const struct file_operations uprobe_profile_ops = {
712         .owner          = THIS_MODULE,
713         .open           = profile_open,
714         .read           = seq_read,
715         .llseek         = seq_lseek,
716         .release        = seq_release,
717 };
718
719 struct uprobe_cpu_buffer {
720         struct mutex mutex;
721         void *buf;
722 };
723 static struct uprobe_cpu_buffer __percpu *uprobe_cpu_buffer;
724 static int uprobe_buffer_refcnt;
725
726 static int uprobe_buffer_init(void)
727 {
728         int cpu, err_cpu;
729
730         uprobe_cpu_buffer = alloc_percpu(struct uprobe_cpu_buffer);
731         if (uprobe_cpu_buffer == NULL)
732                 return -ENOMEM;
733
734         for_each_possible_cpu(cpu) {
735                 struct page *p = alloc_pages_node(cpu_to_node(cpu),
736                                                   GFP_KERNEL, 0);
737                 if (p == NULL) {
738                         err_cpu = cpu;
739                         goto err;
740                 }
741                 per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf = page_address(p);
742                 mutex_init(&per_cpu_ptr(uprobe_cpu_buffer, cpu)->mutex);
743         }
744
745         return 0;
746
747 err:
748         for_each_possible_cpu(cpu) {
749                 if (cpu == err_cpu)
750                         break;
751                 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf);
752         }
753
754         free_percpu(uprobe_cpu_buffer);
755         return -ENOMEM;
756 }
757
758 static int uprobe_buffer_enable(void)
759 {
760         int ret = 0;
761
762         BUG_ON(!mutex_is_locked(&event_mutex));
763
764         if (uprobe_buffer_refcnt++ == 0) {
765                 ret = uprobe_buffer_init();
766                 if (ret < 0)
767                         uprobe_buffer_refcnt--;
768         }
769
770         return ret;
771 }
772
773 static void uprobe_buffer_disable(void)
774 {
775         int cpu;
776
777         BUG_ON(!mutex_is_locked(&event_mutex));
778
779         if (--uprobe_buffer_refcnt == 0) {
780                 for_each_possible_cpu(cpu)
781                         free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer,
782                                                              cpu)->buf);
783
784                 free_percpu(uprobe_cpu_buffer);
785                 uprobe_cpu_buffer = NULL;
786         }
787 }
788
789 static struct uprobe_cpu_buffer *uprobe_buffer_get(void)
790 {
791         struct uprobe_cpu_buffer *ucb;
792         int cpu;
793
794         cpu = raw_smp_processor_id();
795         ucb = per_cpu_ptr(uprobe_cpu_buffer, cpu);
796
797         /*
798          * Use per-cpu buffers for fastest access, but we might migrate
799          * so the mutex makes sure we have sole access to it.
800          */
801         mutex_lock(&ucb->mutex);
802
803         return ucb;
804 }
805
806 static void uprobe_buffer_put(struct uprobe_cpu_buffer *ucb)
807 {
808         mutex_unlock(&ucb->mutex);
809 }
810
811 static void __uprobe_trace_func(struct trace_uprobe *tu,
812                                 unsigned long func, struct pt_regs *regs,
813                                 struct uprobe_cpu_buffer *ucb, int dsize,
814                                 struct trace_event_file *trace_file)
815 {
816         struct uprobe_trace_entry_head *entry;
817         struct ring_buffer_event *event;
818         struct ring_buffer *buffer;
819         void *data;
820         int size, esize;
821         struct trace_event_call *call = &tu->tp.call;
822
823         WARN_ON(call != trace_file->event_call);
824
825         if (WARN_ON_ONCE(tu->tp.size + dsize > PAGE_SIZE))
826                 return;
827
828         if (trace_trigger_soft_disabled(trace_file))
829                 return;
830
831         esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
832         size = esize + tu->tp.size + dsize;
833         event = trace_event_buffer_lock_reserve(&buffer, trace_file,
834                                                 call->event.type, size, 0, 0);
835         if (!event)
836                 return;
837
838         entry = ring_buffer_event_data(event);
839         if (is_ret_probe(tu)) {
840                 entry->vaddr[0] = func;
841                 entry->vaddr[1] = instruction_pointer(regs);
842                 data = DATAOF_TRACE_ENTRY(entry, true);
843         } else {
844                 entry->vaddr[0] = instruction_pointer(regs);
845                 data = DATAOF_TRACE_ENTRY(entry, false);
846         }
847
848         memcpy(data, ucb->buf, tu->tp.size + dsize);
849
850         event_trigger_unlock_commit(trace_file, buffer, event, entry, 0, 0);
851 }
852
853 /* uprobe handler */
854 static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs,
855                              struct uprobe_cpu_buffer *ucb, int dsize)
856 {
857         struct event_file_link *link;
858
859         if (is_ret_probe(tu))
860                 return 0;
861
862         rcu_read_lock();
863         list_for_each_entry_rcu(link, &tu->tp.files, list)
864                 __uprobe_trace_func(tu, 0, regs, ucb, dsize, link->file);
865         rcu_read_unlock();
866
867         return 0;
868 }
869
870 static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func,
871                                  struct pt_regs *regs,
872                                  struct uprobe_cpu_buffer *ucb, int dsize)
873 {
874         struct event_file_link *link;
875
876         rcu_read_lock();
877         list_for_each_entry_rcu(link, &tu->tp.files, list)
878                 __uprobe_trace_func(tu, func, regs, ucb, dsize, link->file);
879         rcu_read_unlock();
880 }
881
882 /* Event entry printers */
883 static enum print_line_t
884 print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event)
885 {
886         struct uprobe_trace_entry_head *entry;
887         struct trace_seq *s = &iter->seq;
888         struct trace_uprobe *tu;
889         u8 *data;
890
891         entry = (struct uprobe_trace_entry_head *)iter->ent;
892         tu = container_of(event, struct trace_uprobe, tp.call.event);
893
894         if (is_ret_probe(tu)) {
895                 trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)",
896                                  trace_event_name(&tu->tp.call),
897                                  entry->vaddr[1], entry->vaddr[0]);
898                 data = DATAOF_TRACE_ENTRY(entry, true);
899         } else {
900                 trace_seq_printf(s, "%s: (0x%lx)",
901                                  trace_event_name(&tu->tp.call),
902                                  entry->vaddr[0]);
903                 data = DATAOF_TRACE_ENTRY(entry, false);
904         }
905
906         if (print_probe_args(s, tu->tp.args, tu->tp.nr_args, data, entry) < 0)
907                 goto out;
908
909         trace_seq_putc(s, '\n');
910
911  out:
912         return trace_handle_return(s);
913 }
914
915 typedef bool (*filter_func_t)(struct uprobe_consumer *self,
916                                 enum uprobe_filter_ctx ctx,
917                                 struct mm_struct *mm);
918
919 static int
920 probe_event_enable(struct trace_uprobe *tu, struct trace_event_file *file,
921                    filter_func_t filter)
922 {
923         bool enabled = trace_probe_is_enabled(&tu->tp);
924         struct event_file_link *link = NULL;
925         int ret;
926
927         if (file) {
928                 if (tu->tp.flags & TP_FLAG_PROFILE)
929                         return -EINTR;
930
931                 link = kmalloc(sizeof(*link), GFP_KERNEL);
932                 if (!link)
933                         return -ENOMEM;
934
935                 link->file = file;
936                 list_add_tail_rcu(&link->list, &tu->tp.files);
937
938                 tu->tp.flags |= TP_FLAG_TRACE;
939         } else {
940                 if (tu->tp.flags & TP_FLAG_TRACE)
941                         return -EINTR;
942
943                 tu->tp.flags |= TP_FLAG_PROFILE;
944         }
945
946         WARN_ON(!uprobe_filter_is_empty(&tu->filter));
947
948         if (enabled)
949                 return 0;
950
951         ret = uprobe_buffer_enable();
952         if (ret)
953                 goto err_flags;
954
955         tu->consumer.filter = filter;
956         tu->inode = d_real_inode(tu->path.dentry);
957         if (tu->ref_ctr_offset) {
958                 ret = uprobe_register_refctr(tu->inode, tu->offset,
959                                 tu->ref_ctr_offset, &tu->consumer);
960         } else {
961                 ret = uprobe_register(tu->inode, tu->offset, &tu->consumer);
962         }
963
964         if (ret)
965                 goto err_buffer;
966
967         return 0;
968
969  err_buffer:
970         uprobe_buffer_disable();
971
972  err_flags:
973         if (file) {
974                 list_del(&link->list);
975                 kfree(link);
976                 tu->tp.flags &= ~TP_FLAG_TRACE;
977         } else {
978                 tu->tp.flags &= ~TP_FLAG_PROFILE;
979         }
980         return ret;
981 }
982
983 static void
984 probe_event_disable(struct trace_uprobe *tu, struct trace_event_file *file)
985 {
986         if (!trace_probe_is_enabled(&tu->tp))
987                 return;
988
989         if (file) {
990                 struct event_file_link *link;
991
992                 link = find_event_file_link(&tu->tp, file);
993                 if (!link)
994                         return;
995
996                 list_del_rcu(&link->list);
997                 /* synchronize with u{,ret}probe_trace_func */
998                 synchronize_rcu();
999                 kfree(link);
1000
1001                 if (!list_empty(&tu->tp.files))
1002                         return;
1003         }
1004
1005         WARN_ON(!uprobe_filter_is_empty(&tu->filter));
1006
1007         uprobe_unregister(tu->inode, tu->offset, &tu->consumer);
1008         tu->inode = NULL;
1009         tu->tp.flags &= file ? ~TP_FLAG_TRACE : ~TP_FLAG_PROFILE;
1010
1011         uprobe_buffer_disable();
1012 }
1013
1014 static int uprobe_event_define_fields(struct trace_event_call *event_call)
1015 {
1016         int ret, size;
1017         struct uprobe_trace_entry_head field;
1018         struct trace_uprobe *tu = event_call->data;
1019
1020         if (is_ret_probe(tu)) {
1021                 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_FUNC, 0);
1022                 DEFINE_FIELD(unsigned long, vaddr[1], FIELD_STRING_RETIP, 0);
1023                 size = SIZEOF_TRACE_ENTRY(true);
1024         } else {
1025                 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_IP, 0);
1026                 size = SIZEOF_TRACE_ENTRY(false);
1027         }
1028
1029         return traceprobe_define_arg_fields(event_call, size, &tu->tp);
1030 }
1031
1032 #ifdef CONFIG_PERF_EVENTS
1033 static bool
1034 __uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
1035 {
1036         struct perf_event *event;
1037
1038         if (filter->nr_systemwide)
1039                 return true;
1040
1041         list_for_each_entry(event, &filter->perf_events, hw.tp_list) {
1042                 if (event->hw.target->mm == mm)
1043                         return true;
1044         }
1045
1046         return false;
1047 }
1048
1049 static inline bool
1050 uprobe_filter_event(struct trace_uprobe *tu, struct perf_event *event)
1051 {
1052         return __uprobe_perf_filter(&tu->filter, event->hw.target->mm);
1053 }
1054
1055 static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event)
1056 {
1057         bool done;
1058
1059         write_lock(&tu->filter.rwlock);
1060         if (event->hw.target) {
1061                 list_del(&event->hw.tp_list);
1062                 done = tu->filter.nr_systemwide ||
1063                         (event->hw.target->flags & PF_EXITING) ||
1064                         uprobe_filter_event(tu, event);
1065         } else {
1066                 tu->filter.nr_systemwide--;
1067                 done = tu->filter.nr_systemwide;
1068         }
1069         write_unlock(&tu->filter.rwlock);
1070
1071         if (!done)
1072                 return uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
1073
1074         return 0;
1075 }
1076
1077 static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event)
1078 {
1079         bool done;
1080         int err;
1081
1082         write_lock(&tu->filter.rwlock);
1083         if (event->hw.target) {
1084                 /*
1085                  * event->parent != NULL means copy_process(), we can avoid
1086                  * uprobe_apply(). current->mm must be probed and we can rely
1087                  * on dup_mmap() which preserves the already installed bp's.
1088                  *
1089                  * attr.enable_on_exec means that exec/mmap will install the
1090                  * breakpoints we need.
1091                  */
1092                 done = tu->filter.nr_systemwide ||
1093                         event->parent || event->attr.enable_on_exec ||
1094                         uprobe_filter_event(tu, event);
1095                 list_add(&event->hw.tp_list, &tu->filter.perf_events);
1096         } else {
1097                 done = tu->filter.nr_systemwide;
1098                 tu->filter.nr_systemwide++;
1099         }
1100         write_unlock(&tu->filter.rwlock);
1101
1102         err = 0;
1103         if (!done) {
1104                 err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
1105                 if (err)
1106                         uprobe_perf_close(tu, event);
1107         }
1108         return err;
1109 }
1110
1111 static bool uprobe_perf_filter(struct uprobe_consumer *uc,
1112                                 enum uprobe_filter_ctx ctx, struct mm_struct *mm)
1113 {
1114         struct trace_uprobe *tu;
1115         int ret;
1116
1117         tu = container_of(uc, struct trace_uprobe, consumer);
1118         read_lock(&tu->filter.rwlock);
1119         ret = __uprobe_perf_filter(&tu->filter, mm);
1120         read_unlock(&tu->filter.rwlock);
1121
1122         return ret;
1123 }
1124
1125 static void __uprobe_perf_func(struct trace_uprobe *tu,
1126                                unsigned long func, struct pt_regs *regs,
1127                                struct uprobe_cpu_buffer *ucb, int dsize)
1128 {
1129         struct trace_event_call *call = &tu->tp.call;
1130         struct uprobe_trace_entry_head *entry;
1131         struct hlist_head *head;
1132         void *data;
1133         int size, esize;
1134         int rctx;
1135
1136         if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs))
1137                 return;
1138
1139         esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1140
1141         size = esize + tu->tp.size + dsize;
1142         size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32);
1143         if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough"))
1144                 return;
1145
1146         preempt_disable();
1147         head = this_cpu_ptr(call->perf_events);
1148         if (hlist_empty(head))
1149                 goto out;
1150
1151         entry = perf_trace_buf_alloc(size, NULL, &rctx);
1152         if (!entry)
1153                 goto out;
1154
1155         if (is_ret_probe(tu)) {
1156                 entry->vaddr[0] = func;
1157                 entry->vaddr[1] = instruction_pointer(regs);
1158                 data = DATAOF_TRACE_ENTRY(entry, true);
1159         } else {
1160                 entry->vaddr[0] = instruction_pointer(regs);
1161                 data = DATAOF_TRACE_ENTRY(entry, false);
1162         }
1163
1164         memcpy(data, ucb->buf, tu->tp.size + dsize);
1165
1166         if (size - esize > tu->tp.size + dsize) {
1167                 int len = tu->tp.size + dsize;
1168
1169                 memset(data + len, 0, size - esize - len);
1170         }
1171
1172         perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1173                               head, NULL);
1174  out:
1175         preempt_enable();
1176 }
1177
1178 /* uprobe profile handler */
1179 static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs,
1180                             struct uprobe_cpu_buffer *ucb, int dsize)
1181 {
1182         if (!uprobe_perf_filter(&tu->consumer, 0, current->mm))
1183                 return UPROBE_HANDLER_REMOVE;
1184
1185         if (!is_ret_probe(tu))
1186                 __uprobe_perf_func(tu, 0, regs, ucb, dsize);
1187         return 0;
1188 }
1189
1190 static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func,
1191                                 struct pt_regs *regs,
1192                                 struct uprobe_cpu_buffer *ucb, int dsize)
1193 {
1194         __uprobe_perf_func(tu, func, regs, ucb, dsize);
1195 }
1196
1197 int bpf_get_uprobe_info(const struct perf_event *event, u32 *fd_type,
1198                         const char **filename, u64 *probe_offset,
1199                         bool perf_type_tracepoint)
1200 {
1201         const char *pevent = trace_event_name(event->tp_event);
1202         const char *group = event->tp_event->class->system;
1203         struct trace_uprobe *tu;
1204
1205         if (perf_type_tracepoint)
1206                 tu = find_probe_event(pevent, group);
1207         else
1208                 tu = event->tp_event->data;
1209         if (!tu)
1210                 return -EINVAL;
1211
1212         *fd_type = is_ret_probe(tu) ? BPF_FD_TYPE_URETPROBE
1213                                     : BPF_FD_TYPE_UPROBE;
1214         *filename = tu->filename;
1215         *probe_offset = tu->offset;
1216         return 0;
1217 }
1218 #endif  /* CONFIG_PERF_EVENTS */
1219
1220 static int
1221 trace_uprobe_register(struct trace_event_call *event, enum trace_reg type,
1222                       void *data)
1223 {
1224         struct trace_uprobe *tu = event->data;
1225         struct trace_event_file *file = data;
1226
1227         switch (type) {
1228         case TRACE_REG_REGISTER:
1229                 return probe_event_enable(tu, file, NULL);
1230
1231         case TRACE_REG_UNREGISTER:
1232                 probe_event_disable(tu, file);
1233                 return 0;
1234
1235 #ifdef CONFIG_PERF_EVENTS
1236         case TRACE_REG_PERF_REGISTER:
1237                 return probe_event_enable(tu, NULL, uprobe_perf_filter);
1238
1239         case TRACE_REG_PERF_UNREGISTER:
1240                 probe_event_disable(tu, NULL);
1241                 return 0;
1242
1243         case TRACE_REG_PERF_OPEN:
1244                 return uprobe_perf_open(tu, data);
1245
1246         case TRACE_REG_PERF_CLOSE:
1247                 return uprobe_perf_close(tu, data);
1248
1249 #endif
1250         default:
1251                 return 0;
1252         }
1253         return 0;
1254 }
1255
1256 static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
1257 {
1258         struct trace_uprobe *tu;
1259         struct uprobe_dispatch_data udd;
1260         struct uprobe_cpu_buffer *ucb;
1261         int dsize, esize;
1262         int ret = 0;
1263
1264
1265         tu = container_of(con, struct trace_uprobe, consumer);
1266         tu->nhit++;
1267
1268         udd.tu = tu;
1269         udd.bp_addr = instruction_pointer(regs);
1270
1271         current->utask->vaddr = (unsigned long) &udd;
1272
1273         if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1274                 return 0;
1275
1276         dsize = __get_data_size(&tu->tp, regs);
1277         esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1278
1279         ucb = uprobe_buffer_get();
1280         store_trace_args(ucb->buf, &tu->tp, regs, esize, dsize);
1281
1282         if (tu->tp.flags & TP_FLAG_TRACE)
1283                 ret |= uprobe_trace_func(tu, regs, ucb, dsize);
1284
1285 #ifdef CONFIG_PERF_EVENTS
1286         if (tu->tp.flags & TP_FLAG_PROFILE)
1287                 ret |= uprobe_perf_func(tu, regs, ucb, dsize);
1288 #endif
1289         uprobe_buffer_put(ucb);
1290         return ret;
1291 }
1292
1293 static int uretprobe_dispatcher(struct uprobe_consumer *con,
1294                                 unsigned long func, struct pt_regs *regs)
1295 {
1296         struct trace_uprobe *tu;
1297         struct uprobe_dispatch_data udd;
1298         struct uprobe_cpu_buffer *ucb;
1299         int dsize, esize;
1300
1301         tu = container_of(con, struct trace_uprobe, consumer);
1302
1303         udd.tu = tu;
1304         udd.bp_addr = func;
1305
1306         current->utask->vaddr = (unsigned long) &udd;
1307
1308         if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1309                 return 0;
1310
1311         dsize = __get_data_size(&tu->tp, regs);
1312         esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1313
1314         ucb = uprobe_buffer_get();
1315         store_trace_args(ucb->buf, &tu->tp, regs, esize, dsize);
1316
1317         if (tu->tp.flags & TP_FLAG_TRACE)
1318                 uretprobe_trace_func(tu, func, regs, ucb, dsize);
1319
1320 #ifdef CONFIG_PERF_EVENTS
1321         if (tu->tp.flags & TP_FLAG_PROFILE)
1322                 uretprobe_perf_func(tu, func, regs, ucb, dsize);
1323 #endif
1324         uprobe_buffer_put(ucb);
1325         return 0;
1326 }
1327
1328 static struct trace_event_functions uprobe_funcs = {
1329         .trace          = print_uprobe_event
1330 };
1331
1332 static inline void init_trace_event_call(struct trace_uprobe *tu,
1333                                          struct trace_event_call *call)
1334 {
1335         INIT_LIST_HEAD(&call->class->fields);
1336         call->event.funcs = &uprobe_funcs;
1337         call->class->define_fields = uprobe_event_define_fields;
1338
1339         call->flags = TRACE_EVENT_FL_UPROBE | TRACE_EVENT_FL_CAP_ANY;
1340         call->class->reg = trace_uprobe_register;
1341         call->data = tu;
1342 }
1343
1344 static int register_uprobe_event(struct trace_uprobe *tu)
1345 {
1346         struct trace_event_call *call = &tu->tp.call;
1347         int ret = 0;
1348
1349         init_trace_event_call(tu, call);
1350
1351         if (traceprobe_set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0)
1352                 return -ENOMEM;
1353
1354         ret = register_trace_event(&call->event);
1355         if (!ret) {
1356                 kfree(call->print_fmt);
1357                 return -ENODEV;
1358         }
1359
1360         ret = trace_add_event_call(call);
1361
1362         if (ret) {
1363                 pr_info("Failed to register uprobe event: %s\n",
1364                         trace_event_name(call));
1365                 kfree(call->print_fmt);
1366                 unregister_trace_event(&call->event);
1367         }
1368
1369         return ret;
1370 }
1371
1372 static int unregister_uprobe_event(struct trace_uprobe *tu)
1373 {
1374         int ret;
1375
1376         /* tu->event is unregistered in trace_remove_event_call() */
1377         ret = trace_remove_event_call(&tu->tp.call);
1378         if (ret)
1379                 return ret;
1380         kfree(tu->tp.call.print_fmt);
1381         tu->tp.call.print_fmt = NULL;
1382         return 0;
1383 }
1384
1385 #ifdef CONFIG_PERF_EVENTS
1386 struct trace_event_call *
1387 create_local_trace_uprobe(char *name, unsigned long offs,
1388                           unsigned long ref_ctr_offset, bool is_return)
1389 {
1390         struct trace_uprobe *tu;
1391         struct path path;
1392         int ret;
1393
1394         ret = kern_path(name, LOOKUP_FOLLOW, &path);
1395         if (ret)
1396                 return ERR_PTR(ret);
1397
1398         if (!d_is_reg(path.dentry)) {
1399                 path_put(&path);
1400                 return ERR_PTR(-EINVAL);
1401         }
1402
1403         /*
1404          * local trace_kprobes are not added to dyn_event, so they are never
1405          * searched in find_trace_kprobe(). Therefore, there is no concern of
1406          * duplicated name "DUMMY_EVENT" here.
1407          */
1408         tu = alloc_trace_uprobe(UPROBE_EVENT_SYSTEM, "DUMMY_EVENT", 0,
1409                                 is_return);
1410
1411         if (IS_ERR(tu)) {
1412                 pr_info("Failed to allocate trace_uprobe.(%d)\n",
1413                         (int)PTR_ERR(tu));
1414                 path_put(&path);
1415                 return ERR_CAST(tu);
1416         }
1417
1418         tu->offset = offs;
1419         tu->path = path;
1420         tu->ref_ctr_offset = ref_ctr_offset;
1421         tu->filename = kstrdup(name, GFP_KERNEL);
1422         init_trace_event_call(tu, &tu->tp.call);
1423
1424         if (traceprobe_set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0) {
1425                 ret = -ENOMEM;
1426                 goto error;
1427         }
1428
1429         return &tu->tp.call;
1430 error:
1431         free_trace_uprobe(tu);
1432         return ERR_PTR(ret);
1433 }
1434
1435 void destroy_local_trace_uprobe(struct trace_event_call *event_call)
1436 {
1437         struct trace_uprobe *tu;
1438
1439         tu = container_of(event_call, struct trace_uprobe, tp.call);
1440
1441         kfree(tu->tp.call.print_fmt);
1442         tu->tp.call.print_fmt = NULL;
1443
1444         free_trace_uprobe(tu);
1445 }
1446 #endif /* CONFIG_PERF_EVENTS */
1447
1448 /* Make a trace interface for controling probe points */
1449 static __init int init_uprobe_trace(void)
1450 {
1451         struct dentry *d_tracer;
1452         int ret;
1453
1454         ret = dyn_event_register(&trace_uprobe_ops);
1455         if (ret)
1456                 return ret;
1457
1458         d_tracer = tracing_init_dentry();
1459         if (IS_ERR(d_tracer))
1460                 return 0;
1461
1462         trace_create_file("uprobe_events", 0644, d_tracer,
1463                                     NULL, &uprobe_events_ops);
1464         /* Profile interface */
1465         trace_create_file("uprobe_profile", 0444, d_tracer,
1466                                     NULL, &uprobe_profile_ops);
1467         return 0;
1468 }
1469
1470 fs_initcall(init_uprobe_trace);