]> asedeno.scripts.mit.edu Git - linux.git/blob - kernel/trace/trace_uprobe.c
tracing/probe: Split trace_event related data from trace_probe
[linux.git] / kernel / trace / trace_uprobe.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * uprobes-based tracing events
4  *
5  * Copyright (C) IBM Corporation, 2010-2012
6  * Author:      Srikar Dronamraju <srikar@linux.vnet.ibm.com>
7  */
8 #define pr_fmt(fmt)     "trace_uprobe: " fmt
9
10 #include <linux/ctype.h>
11 #include <linux/module.h>
12 #include <linux/uaccess.h>
13 #include <linux/uprobes.h>
14 #include <linux/namei.h>
15 #include <linux/string.h>
16 #include <linux/rculist.h>
17
18 #include "trace_dynevent.h"
19 #include "trace_probe.h"
20 #include "trace_probe_tmpl.h"
21
22 #define UPROBE_EVENT_SYSTEM     "uprobes"
23
24 struct uprobe_trace_entry_head {
25         struct trace_entry      ent;
26         unsigned long           vaddr[];
27 };
28
29 #define SIZEOF_TRACE_ENTRY(is_return)                   \
30         (sizeof(struct uprobe_trace_entry_head) +       \
31          sizeof(unsigned long) * (is_return ? 2 : 1))
32
33 #define DATAOF_TRACE_ENTRY(entry, is_return)            \
34         ((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return))
35
36 struct trace_uprobe_filter {
37         rwlock_t                rwlock;
38         int                     nr_systemwide;
39         struct list_head        perf_events;
40 };
41
42 static int trace_uprobe_create(int argc, const char **argv);
43 static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev);
44 static int trace_uprobe_release(struct dyn_event *ev);
45 static bool trace_uprobe_is_busy(struct dyn_event *ev);
46 static bool trace_uprobe_match(const char *system, const char *event,
47                                struct dyn_event *ev);
48
49 static struct dyn_event_operations trace_uprobe_ops = {
50         .create = trace_uprobe_create,
51         .show = trace_uprobe_show,
52         .is_busy = trace_uprobe_is_busy,
53         .free = trace_uprobe_release,
54         .match = trace_uprobe_match,
55 };
56
57 /*
58  * uprobe event core functions
59  */
60 struct trace_uprobe {
61         struct dyn_event                devent;
62         struct trace_uprobe_filter      filter;
63         struct uprobe_consumer          consumer;
64         struct path                     path;
65         struct inode                    *inode;
66         char                            *filename;
67         unsigned long                   offset;
68         unsigned long                   ref_ctr_offset;
69         unsigned long                   nhit;
70         struct trace_probe              tp;
71 };
72
73 static bool is_trace_uprobe(struct dyn_event *ev)
74 {
75         return ev->ops == &trace_uprobe_ops;
76 }
77
78 static struct trace_uprobe *to_trace_uprobe(struct dyn_event *ev)
79 {
80         return container_of(ev, struct trace_uprobe, devent);
81 }
82
83 /**
84  * for_each_trace_uprobe - iterate over the trace_uprobe list
85  * @pos:        the struct trace_uprobe * for each entry
86  * @dpos:       the struct dyn_event * to use as a loop cursor
87  */
88 #define for_each_trace_uprobe(pos, dpos)        \
89         for_each_dyn_event(dpos)                \
90                 if (is_trace_uprobe(dpos) && (pos = to_trace_uprobe(dpos)))
91
92 #define SIZEOF_TRACE_UPROBE(n)                          \
93         (offsetof(struct trace_uprobe, tp.args) +       \
94         (sizeof(struct probe_arg) * (n)))
95
96 static int register_uprobe_event(struct trace_uprobe *tu);
97 static int unregister_uprobe_event(struct trace_uprobe *tu);
98
99 struct uprobe_dispatch_data {
100         struct trace_uprobe     *tu;
101         unsigned long           bp_addr;
102 };
103
104 static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs);
105 static int uretprobe_dispatcher(struct uprobe_consumer *con,
106                                 unsigned long func, struct pt_regs *regs);
107
108 #ifdef CONFIG_STACK_GROWSUP
109 static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
110 {
111         return addr - (n * sizeof(long));
112 }
113 #else
114 static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
115 {
116         return addr + (n * sizeof(long));
117 }
118 #endif
119
120 static unsigned long get_user_stack_nth(struct pt_regs *regs, unsigned int n)
121 {
122         unsigned long ret;
123         unsigned long addr = user_stack_pointer(regs);
124
125         addr = adjust_stack_addr(addr, n);
126
127         if (copy_from_user(&ret, (void __force __user *) addr, sizeof(ret)))
128                 return 0;
129
130         return ret;
131 }
132
133 /*
134  * Uprobes-specific fetch functions
135  */
136 static nokprobe_inline int
137 probe_mem_read(void *dest, void *src, size_t size)
138 {
139         void __user *vaddr = (void __force __user *)src;
140
141         return copy_from_user(dest, vaddr, size) ? -EFAULT : 0;
142 }
143
144 static nokprobe_inline int
145 probe_mem_read_user(void *dest, void *src, size_t size)
146 {
147         return probe_mem_read(dest, src, size);
148 }
149
150 /*
151  * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
152  * length and relative data location.
153  */
154 static nokprobe_inline int
155 fetch_store_string(unsigned long addr, void *dest, void *base)
156 {
157         long ret;
158         u32 loc = *(u32 *)dest;
159         int maxlen  = get_loc_len(loc);
160         u8 *dst = get_loc_data(dest, base);
161         void __user *src = (void __force __user *) addr;
162
163         if (unlikely(!maxlen))
164                 return -ENOMEM;
165
166         if (addr == FETCH_TOKEN_COMM)
167                 ret = strlcpy(dst, current->comm, maxlen);
168         else
169                 ret = strncpy_from_user(dst, src, maxlen);
170         if (ret >= 0) {
171                 if (ret == maxlen)
172                         dst[ret - 1] = '\0';
173                 else
174                         /*
175                          * Include the terminating null byte. In this case it
176                          * was copied by strncpy_from_user but not accounted
177                          * for in ret.
178                          */
179                         ret++;
180                 *(u32 *)dest = make_data_loc(ret, (void *)dst - base);
181         }
182
183         return ret;
184 }
185
186 static nokprobe_inline int
187 fetch_store_string_user(unsigned long addr, void *dest, void *base)
188 {
189         return fetch_store_string(addr, dest, base);
190 }
191
192 /* Return the length of string -- including null terminal byte */
193 static nokprobe_inline int
194 fetch_store_strlen(unsigned long addr)
195 {
196         int len;
197         void __user *vaddr = (void __force __user *) addr;
198
199         if (addr == FETCH_TOKEN_COMM)
200                 len = strlen(current->comm) + 1;
201         else
202                 len = strnlen_user(vaddr, MAX_STRING_SIZE);
203
204         return (len > MAX_STRING_SIZE) ? 0 : len;
205 }
206
207 static nokprobe_inline int
208 fetch_store_strlen_user(unsigned long addr)
209 {
210         return fetch_store_strlen(addr);
211 }
212
213 static unsigned long translate_user_vaddr(unsigned long file_offset)
214 {
215         unsigned long base_addr;
216         struct uprobe_dispatch_data *udd;
217
218         udd = (void *) current->utask->vaddr;
219
220         base_addr = udd->bp_addr - udd->tu->offset;
221         return base_addr + file_offset;
222 }
223
224 /* Note that we don't verify it, since the code does not come from user space */
225 static int
226 process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest,
227                    void *base)
228 {
229         unsigned long val;
230
231         /* 1st stage: get value from context */
232         switch (code->op) {
233         case FETCH_OP_REG:
234                 val = regs_get_register(regs, code->param);
235                 break;
236         case FETCH_OP_STACK:
237                 val = get_user_stack_nth(regs, code->param);
238                 break;
239         case FETCH_OP_STACKP:
240                 val = user_stack_pointer(regs);
241                 break;
242         case FETCH_OP_RETVAL:
243                 val = regs_return_value(regs);
244                 break;
245         case FETCH_OP_IMM:
246                 val = code->immediate;
247                 break;
248         case FETCH_OP_COMM:
249                 val = FETCH_TOKEN_COMM;
250                 break;
251         case FETCH_OP_FOFFS:
252                 val = translate_user_vaddr(code->immediate);
253                 break;
254         default:
255                 return -EILSEQ;
256         }
257         code++;
258
259         return process_fetch_insn_bottom(code, val, dest, base);
260 }
261 NOKPROBE_SYMBOL(process_fetch_insn)
262
263 static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter)
264 {
265         rwlock_init(&filter->rwlock);
266         filter->nr_systemwide = 0;
267         INIT_LIST_HEAD(&filter->perf_events);
268 }
269
270 static inline bool uprobe_filter_is_empty(struct trace_uprobe_filter *filter)
271 {
272         return !filter->nr_systemwide && list_empty(&filter->perf_events);
273 }
274
275 static inline bool is_ret_probe(struct trace_uprobe *tu)
276 {
277         return tu->consumer.ret_handler != NULL;
278 }
279
280 static bool trace_uprobe_is_busy(struct dyn_event *ev)
281 {
282         struct trace_uprobe *tu = to_trace_uprobe(ev);
283
284         return trace_probe_is_enabled(&tu->tp);
285 }
286
287 static bool trace_uprobe_match(const char *system, const char *event,
288                                struct dyn_event *ev)
289 {
290         struct trace_uprobe *tu = to_trace_uprobe(ev);
291
292         return strcmp(trace_probe_name(&tu->tp), event) == 0 &&
293             (!system || strcmp(trace_probe_group_name(&tu->tp), system) == 0);
294 }
295
296 static nokprobe_inline struct trace_uprobe *
297 trace_uprobe_primary_from_call(struct trace_event_call *call)
298 {
299         struct trace_probe *tp;
300
301         tp = trace_probe_primary_from_call(call);
302         if (WARN_ON_ONCE(!tp))
303                 return NULL;
304
305         return container_of(tp, struct trace_uprobe, tp);
306 }
307
308 /*
309  * Allocate new trace_uprobe and initialize it (including uprobes).
310  */
311 static struct trace_uprobe *
312 alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
313 {
314         struct trace_uprobe *tu;
315         int ret;
316
317         tu = kzalloc(SIZEOF_TRACE_UPROBE(nargs), GFP_KERNEL);
318         if (!tu)
319                 return ERR_PTR(-ENOMEM);
320
321         ret = trace_probe_init(&tu->tp, event, group);
322         if (ret < 0)
323                 goto error;
324
325         dyn_event_init(&tu->devent, &trace_uprobe_ops);
326         tu->consumer.handler = uprobe_dispatcher;
327         if (is_ret)
328                 tu->consumer.ret_handler = uretprobe_dispatcher;
329         init_trace_uprobe_filter(&tu->filter);
330         return tu;
331
332 error:
333         kfree(tu);
334
335         return ERR_PTR(ret);
336 }
337
338 static void free_trace_uprobe(struct trace_uprobe *tu)
339 {
340         if (!tu)
341                 return;
342
343         path_put(&tu->path);
344         trace_probe_cleanup(&tu->tp);
345         kfree(tu->filename);
346         kfree(tu);
347 }
348
349 static struct trace_uprobe *find_probe_event(const char *event, const char *group)
350 {
351         struct dyn_event *pos;
352         struct trace_uprobe *tu;
353
354         for_each_trace_uprobe(tu, pos)
355                 if (strcmp(trace_probe_name(&tu->tp), event) == 0 &&
356                     strcmp(trace_probe_group_name(&tu->tp), group) == 0)
357                         return tu;
358
359         return NULL;
360 }
361
362 /* Unregister a trace_uprobe and probe_event */
363 static int unregister_trace_uprobe(struct trace_uprobe *tu)
364 {
365         int ret;
366
367         ret = unregister_uprobe_event(tu);
368         if (ret)
369                 return ret;
370
371         dyn_event_remove(&tu->devent);
372         free_trace_uprobe(tu);
373         return 0;
374 }
375
376 /*
377  * Uprobe with multiple reference counter is not allowed. i.e.
378  * If inode and offset matches, reference counter offset *must*
379  * match as well. Though, there is one exception: If user is
380  * replacing old trace_uprobe with new one(same group/event),
381  * then we allow same uprobe with new reference counter as far
382  * as the new one does not conflict with any other existing
383  * ones.
384  */
385 static struct trace_uprobe *find_old_trace_uprobe(struct trace_uprobe *new)
386 {
387         struct dyn_event *pos;
388         struct trace_uprobe *tmp, *old = NULL;
389         struct inode *new_inode = d_real_inode(new->path.dentry);
390
391         old = find_probe_event(trace_probe_name(&new->tp),
392                                 trace_probe_group_name(&new->tp));
393
394         for_each_trace_uprobe(tmp, pos) {
395                 if ((old ? old != tmp : true) &&
396                     new_inode == d_real_inode(tmp->path.dentry) &&
397                     new->offset == tmp->offset &&
398                     new->ref_ctr_offset != tmp->ref_ctr_offset) {
399                         pr_warn("Reference counter offset mismatch.");
400                         return ERR_PTR(-EINVAL);
401                 }
402         }
403         return old;
404 }
405
406 /* Register a trace_uprobe and probe_event */
407 static int register_trace_uprobe(struct trace_uprobe *tu)
408 {
409         struct trace_uprobe *old_tu;
410         int ret;
411
412         mutex_lock(&event_mutex);
413
414         /* register as an event */
415         old_tu = find_old_trace_uprobe(tu);
416         if (IS_ERR(old_tu)) {
417                 ret = PTR_ERR(old_tu);
418                 goto end;
419         }
420
421         if (old_tu) {
422                 /* delete old event */
423                 ret = unregister_trace_uprobe(old_tu);
424                 if (ret)
425                         goto end;
426         }
427
428         ret = register_uprobe_event(tu);
429         if (ret) {
430                 pr_warn("Failed to register probe event(%d)\n", ret);
431                 goto end;
432         }
433
434         dyn_event_add(&tu->devent);
435
436 end:
437         mutex_unlock(&event_mutex);
438
439         return ret;
440 }
441
442 /*
443  * Argument syntax:
444  *  - Add uprobe: p|r[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS]
445  */
446 static int trace_uprobe_create(int argc, const char **argv)
447 {
448         struct trace_uprobe *tu;
449         const char *event = NULL, *group = UPROBE_EVENT_SYSTEM;
450         char *arg, *filename, *rctr, *rctr_end, *tmp;
451         char buf[MAX_EVENT_NAME_LEN];
452         struct path path;
453         unsigned long offset, ref_ctr_offset;
454         bool is_return = false;
455         int i, ret;
456
457         ret = 0;
458         ref_ctr_offset = 0;
459
460         switch (argv[0][0]) {
461         case 'r':
462                 is_return = true;
463                 break;
464         case 'p':
465                 break;
466         default:
467                 return -ECANCELED;
468         }
469
470         if (argc < 2)
471                 return -ECANCELED;
472
473         if (argv[0][1] == ':')
474                 event = &argv[0][2];
475
476         if (!strchr(argv[1], '/'))
477                 return -ECANCELED;
478
479         filename = kstrdup(argv[1], GFP_KERNEL);
480         if (!filename)
481                 return -ENOMEM;
482
483         /* Find the last occurrence, in case the path contains ':' too. */
484         arg = strrchr(filename, ':');
485         if (!arg || !isdigit(arg[1])) {
486                 kfree(filename);
487                 return -ECANCELED;
488         }
489
490         trace_probe_log_init("trace_uprobe", argc, argv);
491         trace_probe_log_set_index(1);   /* filename is the 2nd argument */
492
493         *arg++ = '\0';
494         ret = kern_path(filename, LOOKUP_FOLLOW, &path);
495         if (ret) {
496                 trace_probe_log_err(0, FILE_NOT_FOUND);
497                 kfree(filename);
498                 trace_probe_log_clear();
499                 return ret;
500         }
501         if (!d_is_reg(path.dentry)) {
502                 trace_probe_log_err(0, NO_REGULAR_FILE);
503                 ret = -EINVAL;
504                 goto fail_address_parse;
505         }
506
507         /* Parse reference counter offset if specified. */
508         rctr = strchr(arg, '(');
509         if (rctr) {
510                 rctr_end = strchr(rctr, ')');
511                 if (!rctr_end) {
512                         ret = -EINVAL;
513                         rctr_end = rctr + strlen(rctr);
514                         trace_probe_log_err(rctr_end - filename,
515                                             REFCNT_OPEN_BRACE);
516                         goto fail_address_parse;
517                 } else if (rctr_end[1] != '\0') {
518                         ret = -EINVAL;
519                         trace_probe_log_err(rctr_end + 1 - filename,
520                                             BAD_REFCNT_SUFFIX);
521                         goto fail_address_parse;
522                 }
523
524                 *rctr++ = '\0';
525                 *rctr_end = '\0';
526                 ret = kstrtoul(rctr, 0, &ref_ctr_offset);
527                 if (ret) {
528                         trace_probe_log_err(rctr - filename, BAD_REFCNT);
529                         goto fail_address_parse;
530                 }
531         }
532
533         /* Parse uprobe offset. */
534         ret = kstrtoul(arg, 0, &offset);
535         if (ret) {
536                 trace_probe_log_err(arg - filename, BAD_UPROBE_OFFS);
537                 goto fail_address_parse;
538         }
539
540         /* setup a probe */
541         trace_probe_log_set_index(0);
542         if (event) {
543                 ret = traceprobe_parse_event_name(&event, &group, buf,
544                                                   event - argv[0]);
545                 if (ret)
546                         goto fail_address_parse;
547         } else {
548                 char *tail;
549                 char *ptr;
550
551                 tail = kstrdup(kbasename(filename), GFP_KERNEL);
552                 if (!tail) {
553                         ret = -ENOMEM;
554                         goto fail_address_parse;
555                 }
556
557                 ptr = strpbrk(tail, ".-_");
558                 if (ptr)
559                         *ptr = '\0';
560
561                 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_0x%lx", 'p', tail, offset);
562                 event = buf;
563                 kfree(tail);
564         }
565
566         argc -= 2;
567         argv += 2;
568
569         tu = alloc_trace_uprobe(group, event, argc, is_return);
570         if (IS_ERR(tu)) {
571                 ret = PTR_ERR(tu);
572                 /* This must return -ENOMEM otherwise there is a bug */
573                 WARN_ON_ONCE(ret != -ENOMEM);
574                 goto fail_address_parse;
575         }
576         tu->offset = offset;
577         tu->ref_ctr_offset = ref_ctr_offset;
578         tu->path = path;
579         tu->filename = filename;
580
581         /* parse arguments */
582         for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
583                 tmp = kstrdup(argv[i], GFP_KERNEL);
584                 if (!tmp) {
585                         ret = -ENOMEM;
586                         goto error;
587                 }
588
589                 trace_probe_log_set_index(i + 2);
590                 ret = traceprobe_parse_probe_arg(&tu->tp, i, tmp,
591                                         is_return ? TPARG_FL_RETURN : 0);
592                 kfree(tmp);
593                 if (ret)
594                         goto error;
595         }
596
597         ret = traceprobe_set_print_fmt(&tu->tp, is_ret_probe(tu));
598         if (ret < 0)
599                 goto error;
600
601         ret = register_trace_uprobe(tu);
602         if (!ret)
603                 goto out;
604
605 error:
606         free_trace_uprobe(tu);
607 out:
608         trace_probe_log_clear();
609         return ret;
610
611 fail_address_parse:
612         trace_probe_log_clear();
613         path_put(&path);
614         kfree(filename);
615
616         return ret;
617 }
618
619 static int create_or_delete_trace_uprobe(int argc, char **argv)
620 {
621         int ret;
622
623         if (argv[0][0] == '-')
624                 return dyn_event_release(argc, argv, &trace_uprobe_ops);
625
626         ret = trace_uprobe_create(argc, (const char **)argv);
627         return ret == -ECANCELED ? -EINVAL : ret;
628 }
629
630 static int trace_uprobe_release(struct dyn_event *ev)
631 {
632         struct trace_uprobe *tu = to_trace_uprobe(ev);
633
634         return unregister_trace_uprobe(tu);
635 }
636
637 /* Probes listing interfaces */
638 static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev)
639 {
640         struct trace_uprobe *tu = to_trace_uprobe(ev);
641         char c = is_ret_probe(tu) ? 'r' : 'p';
642         int i;
643
644         seq_printf(m, "%c:%s/%s %s:0x%0*lx", c, trace_probe_group_name(&tu->tp),
645                         trace_probe_name(&tu->tp), tu->filename,
646                         (int)(sizeof(void *) * 2), tu->offset);
647
648         if (tu->ref_ctr_offset)
649                 seq_printf(m, "(0x%lx)", tu->ref_ctr_offset);
650
651         for (i = 0; i < tu->tp.nr_args; i++)
652                 seq_printf(m, " %s=%s", tu->tp.args[i].name, tu->tp.args[i].comm);
653
654         seq_putc(m, '\n');
655         return 0;
656 }
657
658 static int probes_seq_show(struct seq_file *m, void *v)
659 {
660         struct dyn_event *ev = v;
661
662         if (!is_trace_uprobe(ev))
663                 return 0;
664
665         return trace_uprobe_show(m, ev);
666 }
667
668 static const struct seq_operations probes_seq_op = {
669         .start  = dyn_event_seq_start,
670         .next   = dyn_event_seq_next,
671         .stop   = dyn_event_seq_stop,
672         .show   = probes_seq_show
673 };
674
675 static int probes_open(struct inode *inode, struct file *file)
676 {
677         int ret;
678
679         if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
680                 ret = dyn_events_release_all(&trace_uprobe_ops);
681                 if (ret)
682                         return ret;
683         }
684
685         return seq_open(file, &probes_seq_op);
686 }
687
688 static ssize_t probes_write(struct file *file, const char __user *buffer,
689                             size_t count, loff_t *ppos)
690 {
691         return trace_parse_run_command(file, buffer, count, ppos,
692                                         create_or_delete_trace_uprobe);
693 }
694
695 static const struct file_operations uprobe_events_ops = {
696         .owner          = THIS_MODULE,
697         .open           = probes_open,
698         .read           = seq_read,
699         .llseek         = seq_lseek,
700         .release        = seq_release,
701         .write          = probes_write,
702 };
703
704 /* Probes profiling interfaces */
705 static int probes_profile_seq_show(struct seq_file *m, void *v)
706 {
707         struct dyn_event *ev = v;
708         struct trace_uprobe *tu;
709
710         if (!is_trace_uprobe(ev))
711                 return 0;
712
713         tu = to_trace_uprobe(ev);
714         seq_printf(m, "  %s %-44s %15lu\n", tu->filename,
715                         trace_probe_name(&tu->tp), tu->nhit);
716         return 0;
717 }
718
719 static const struct seq_operations profile_seq_op = {
720         .start  = dyn_event_seq_start,
721         .next   = dyn_event_seq_next,
722         .stop   = dyn_event_seq_stop,
723         .show   = probes_profile_seq_show
724 };
725
726 static int profile_open(struct inode *inode, struct file *file)
727 {
728         return seq_open(file, &profile_seq_op);
729 }
730
731 static const struct file_operations uprobe_profile_ops = {
732         .owner          = THIS_MODULE,
733         .open           = profile_open,
734         .read           = seq_read,
735         .llseek         = seq_lseek,
736         .release        = seq_release,
737 };
738
739 struct uprobe_cpu_buffer {
740         struct mutex mutex;
741         void *buf;
742 };
743 static struct uprobe_cpu_buffer __percpu *uprobe_cpu_buffer;
744 static int uprobe_buffer_refcnt;
745
746 static int uprobe_buffer_init(void)
747 {
748         int cpu, err_cpu;
749
750         uprobe_cpu_buffer = alloc_percpu(struct uprobe_cpu_buffer);
751         if (uprobe_cpu_buffer == NULL)
752                 return -ENOMEM;
753
754         for_each_possible_cpu(cpu) {
755                 struct page *p = alloc_pages_node(cpu_to_node(cpu),
756                                                   GFP_KERNEL, 0);
757                 if (p == NULL) {
758                         err_cpu = cpu;
759                         goto err;
760                 }
761                 per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf = page_address(p);
762                 mutex_init(&per_cpu_ptr(uprobe_cpu_buffer, cpu)->mutex);
763         }
764
765         return 0;
766
767 err:
768         for_each_possible_cpu(cpu) {
769                 if (cpu == err_cpu)
770                         break;
771                 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf);
772         }
773
774         free_percpu(uprobe_cpu_buffer);
775         return -ENOMEM;
776 }
777
778 static int uprobe_buffer_enable(void)
779 {
780         int ret = 0;
781
782         BUG_ON(!mutex_is_locked(&event_mutex));
783
784         if (uprobe_buffer_refcnt++ == 0) {
785                 ret = uprobe_buffer_init();
786                 if (ret < 0)
787                         uprobe_buffer_refcnt--;
788         }
789
790         return ret;
791 }
792
793 static void uprobe_buffer_disable(void)
794 {
795         int cpu;
796
797         BUG_ON(!mutex_is_locked(&event_mutex));
798
799         if (--uprobe_buffer_refcnt == 0) {
800                 for_each_possible_cpu(cpu)
801                         free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer,
802                                                              cpu)->buf);
803
804                 free_percpu(uprobe_cpu_buffer);
805                 uprobe_cpu_buffer = NULL;
806         }
807 }
808
809 static struct uprobe_cpu_buffer *uprobe_buffer_get(void)
810 {
811         struct uprobe_cpu_buffer *ucb;
812         int cpu;
813
814         cpu = raw_smp_processor_id();
815         ucb = per_cpu_ptr(uprobe_cpu_buffer, cpu);
816
817         /*
818          * Use per-cpu buffers for fastest access, but we might migrate
819          * so the mutex makes sure we have sole access to it.
820          */
821         mutex_lock(&ucb->mutex);
822
823         return ucb;
824 }
825
826 static void uprobe_buffer_put(struct uprobe_cpu_buffer *ucb)
827 {
828         mutex_unlock(&ucb->mutex);
829 }
830
831 static void __uprobe_trace_func(struct trace_uprobe *tu,
832                                 unsigned long func, struct pt_regs *regs,
833                                 struct uprobe_cpu_buffer *ucb, int dsize,
834                                 struct trace_event_file *trace_file)
835 {
836         struct uprobe_trace_entry_head *entry;
837         struct ring_buffer_event *event;
838         struct ring_buffer *buffer;
839         void *data;
840         int size, esize;
841         struct trace_event_call *call = trace_probe_event_call(&tu->tp);
842
843         WARN_ON(call != trace_file->event_call);
844
845         if (WARN_ON_ONCE(tu->tp.size + dsize > PAGE_SIZE))
846                 return;
847
848         if (trace_trigger_soft_disabled(trace_file))
849                 return;
850
851         esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
852         size = esize + tu->tp.size + dsize;
853         event = trace_event_buffer_lock_reserve(&buffer, trace_file,
854                                                 call->event.type, size, 0, 0);
855         if (!event)
856                 return;
857
858         entry = ring_buffer_event_data(event);
859         if (is_ret_probe(tu)) {
860                 entry->vaddr[0] = func;
861                 entry->vaddr[1] = instruction_pointer(regs);
862                 data = DATAOF_TRACE_ENTRY(entry, true);
863         } else {
864                 entry->vaddr[0] = instruction_pointer(regs);
865                 data = DATAOF_TRACE_ENTRY(entry, false);
866         }
867
868         memcpy(data, ucb->buf, tu->tp.size + dsize);
869
870         event_trigger_unlock_commit(trace_file, buffer, event, entry, 0, 0);
871 }
872
873 /* uprobe handler */
874 static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs,
875                              struct uprobe_cpu_buffer *ucb, int dsize)
876 {
877         struct event_file_link *link;
878
879         if (is_ret_probe(tu))
880                 return 0;
881
882         rcu_read_lock();
883         trace_probe_for_each_link_rcu(link, &tu->tp)
884                 __uprobe_trace_func(tu, 0, regs, ucb, dsize, link->file);
885         rcu_read_unlock();
886
887         return 0;
888 }
889
890 static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func,
891                                  struct pt_regs *regs,
892                                  struct uprobe_cpu_buffer *ucb, int dsize)
893 {
894         struct event_file_link *link;
895
896         rcu_read_lock();
897         trace_probe_for_each_link_rcu(link, &tu->tp)
898                 __uprobe_trace_func(tu, func, regs, ucb, dsize, link->file);
899         rcu_read_unlock();
900 }
901
902 /* Event entry printers */
903 static enum print_line_t
904 print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event)
905 {
906         struct uprobe_trace_entry_head *entry;
907         struct trace_seq *s = &iter->seq;
908         struct trace_uprobe *tu;
909         u8 *data;
910
911         entry = (struct uprobe_trace_entry_head *)iter->ent;
912         tu = trace_uprobe_primary_from_call(
913                 container_of(event, struct trace_event_call, event));
914         if (unlikely(!tu))
915                 goto out;
916
917         if (is_ret_probe(tu)) {
918                 trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)",
919                                  trace_probe_name(&tu->tp),
920                                  entry->vaddr[1], entry->vaddr[0]);
921                 data = DATAOF_TRACE_ENTRY(entry, true);
922         } else {
923                 trace_seq_printf(s, "%s: (0x%lx)",
924                                  trace_probe_name(&tu->tp),
925                                  entry->vaddr[0]);
926                 data = DATAOF_TRACE_ENTRY(entry, false);
927         }
928
929         if (print_probe_args(s, tu->tp.args, tu->tp.nr_args, data, entry) < 0)
930                 goto out;
931
932         trace_seq_putc(s, '\n');
933
934  out:
935         return trace_handle_return(s);
936 }
937
938 typedef bool (*filter_func_t)(struct uprobe_consumer *self,
939                                 enum uprobe_filter_ctx ctx,
940                                 struct mm_struct *mm);
941
942 static int trace_uprobe_enable(struct trace_uprobe *tu, filter_func_t filter)
943 {
944         int ret;
945
946         tu->consumer.filter = filter;
947         tu->inode = d_real_inode(tu->path.dentry);
948
949         if (tu->ref_ctr_offset)
950                 ret = uprobe_register_refctr(tu->inode, tu->offset,
951                                 tu->ref_ctr_offset, &tu->consumer);
952         else
953                 ret = uprobe_register(tu->inode, tu->offset, &tu->consumer);
954
955         if (ret)
956                 tu->inode = NULL;
957
958         return ret;
959 }
960
961 static void __probe_event_disable(struct trace_probe *tp)
962 {
963         struct trace_probe *pos;
964         struct trace_uprobe *tu;
965
966         list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
967                 tu = container_of(pos, struct trace_uprobe, tp);
968                 if (!tu->inode)
969                         continue;
970
971                 WARN_ON(!uprobe_filter_is_empty(&tu->filter));
972
973                 uprobe_unregister(tu->inode, tu->offset, &tu->consumer);
974                 tu->inode = NULL;
975         }
976 }
977
978 static int probe_event_enable(struct trace_event_call *call,
979                         struct trace_event_file *file, filter_func_t filter)
980 {
981         struct trace_probe *pos, *tp;
982         struct trace_uprobe *tu;
983         bool enabled;
984         int ret;
985
986         tp = trace_probe_primary_from_call(call);
987         if (WARN_ON_ONCE(!tp))
988                 return -ENODEV;
989         enabled = trace_probe_is_enabled(tp);
990
991         /* This may also change "enabled" state */
992         if (file) {
993                 if (trace_probe_test_flag(tp, TP_FLAG_PROFILE))
994                         return -EINTR;
995
996                 ret = trace_probe_add_file(tp, file);
997                 if (ret < 0)
998                         return ret;
999         } else {
1000                 if (trace_probe_test_flag(tp, TP_FLAG_TRACE))
1001                         return -EINTR;
1002
1003                 trace_probe_set_flag(tp, TP_FLAG_PROFILE);
1004         }
1005
1006         tu = container_of(tp, struct trace_uprobe, tp);
1007         WARN_ON(!uprobe_filter_is_empty(&tu->filter));
1008
1009         if (enabled)
1010                 return 0;
1011
1012         ret = uprobe_buffer_enable();
1013         if (ret)
1014                 goto err_flags;
1015
1016         list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
1017                 tu = container_of(pos, struct trace_uprobe, tp);
1018                 ret = trace_uprobe_enable(tu, filter);
1019                 if (ret) {
1020                         __probe_event_disable(tp);
1021                         goto err_buffer;
1022                 }
1023         }
1024
1025         return 0;
1026
1027  err_buffer:
1028         uprobe_buffer_disable();
1029
1030  err_flags:
1031         if (file)
1032                 trace_probe_remove_file(tp, file);
1033         else
1034                 trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
1035
1036         return ret;
1037 }
1038
1039 static void probe_event_disable(struct trace_event_call *call,
1040                                 struct trace_event_file *file)
1041 {
1042         struct trace_probe *tp;
1043
1044         tp = trace_probe_primary_from_call(call);
1045         if (WARN_ON_ONCE(!tp))
1046                 return;
1047
1048         if (!trace_probe_is_enabled(tp))
1049                 return;
1050
1051         if (file) {
1052                 if (trace_probe_remove_file(tp, file) < 0)
1053                         return;
1054
1055                 if (trace_probe_is_enabled(tp))
1056                         return;
1057         } else
1058                 trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
1059
1060         __probe_event_disable(tp);
1061         uprobe_buffer_disable();
1062 }
1063
1064 static int uprobe_event_define_fields(struct trace_event_call *event_call)
1065 {
1066         int ret, size;
1067         struct uprobe_trace_entry_head field;
1068         struct trace_uprobe *tu;
1069
1070         tu = trace_uprobe_primary_from_call(event_call);
1071         if (unlikely(!tu))
1072                 return -ENODEV;
1073
1074         if (is_ret_probe(tu)) {
1075                 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_FUNC, 0);
1076                 DEFINE_FIELD(unsigned long, vaddr[1], FIELD_STRING_RETIP, 0);
1077                 size = SIZEOF_TRACE_ENTRY(true);
1078         } else {
1079                 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_IP, 0);
1080                 size = SIZEOF_TRACE_ENTRY(false);
1081         }
1082
1083         return traceprobe_define_arg_fields(event_call, size, &tu->tp);
1084 }
1085
1086 #ifdef CONFIG_PERF_EVENTS
1087 static bool
1088 __uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
1089 {
1090         struct perf_event *event;
1091
1092         if (filter->nr_systemwide)
1093                 return true;
1094
1095         list_for_each_entry(event, &filter->perf_events, hw.tp_list) {
1096                 if (event->hw.target->mm == mm)
1097                         return true;
1098         }
1099
1100         return false;
1101 }
1102
1103 static inline bool
1104 uprobe_filter_event(struct trace_uprobe *tu, struct perf_event *event)
1105 {
1106         return __uprobe_perf_filter(&tu->filter, event->hw.target->mm);
1107 }
1108
1109 static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event)
1110 {
1111         bool done;
1112
1113         write_lock(&tu->filter.rwlock);
1114         if (event->hw.target) {
1115                 list_del(&event->hw.tp_list);
1116                 done = tu->filter.nr_systemwide ||
1117                         (event->hw.target->flags & PF_EXITING) ||
1118                         uprobe_filter_event(tu, event);
1119         } else {
1120                 tu->filter.nr_systemwide--;
1121                 done = tu->filter.nr_systemwide;
1122         }
1123         write_unlock(&tu->filter.rwlock);
1124
1125         if (!done)
1126                 return uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
1127
1128         return 0;
1129 }
1130
1131 static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event)
1132 {
1133         bool done;
1134         int err;
1135
1136         write_lock(&tu->filter.rwlock);
1137         if (event->hw.target) {
1138                 /*
1139                  * event->parent != NULL means copy_process(), we can avoid
1140                  * uprobe_apply(). current->mm must be probed and we can rely
1141                  * on dup_mmap() which preserves the already installed bp's.
1142                  *
1143                  * attr.enable_on_exec means that exec/mmap will install the
1144                  * breakpoints we need.
1145                  */
1146                 done = tu->filter.nr_systemwide ||
1147                         event->parent || event->attr.enable_on_exec ||
1148                         uprobe_filter_event(tu, event);
1149                 list_add(&event->hw.tp_list, &tu->filter.perf_events);
1150         } else {
1151                 done = tu->filter.nr_systemwide;
1152                 tu->filter.nr_systemwide++;
1153         }
1154         write_unlock(&tu->filter.rwlock);
1155
1156         err = 0;
1157         if (!done) {
1158                 err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
1159                 if (err)
1160                         uprobe_perf_close(tu, event);
1161         }
1162         return err;
1163 }
1164
1165 static int uprobe_perf_multi_call(struct trace_event_call *call,
1166                                   struct perf_event *event,
1167                 int (*op)(struct trace_uprobe *tu, struct perf_event *event))
1168 {
1169         struct trace_probe *pos, *tp;
1170         struct trace_uprobe *tu;
1171         int ret = 0;
1172
1173         tp = trace_probe_primary_from_call(call);
1174         if (WARN_ON_ONCE(!tp))
1175                 return -ENODEV;
1176
1177         list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
1178                 tu = container_of(pos, struct trace_uprobe, tp);
1179                 ret = op(tu, event);
1180                 if (ret)
1181                         break;
1182         }
1183
1184         return ret;
1185 }
1186 static bool uprobe_perf_filter(struct uprobe_consumer *uc,
1187                                 enum uprobe_filter_ctx ctx, struct mm_struct *mm)
1188 {
1189         struct trace_uprobe *tu;
1190         int ret;
1191
1192         tu = container_of(uc, struct trace_uprobe, consumer);
1193         read_lock(&tu->filter.rwlock);
1194         ret = __uprobe_perf_filter(&tu->filter, mm);
1195         read_unlock(&tu->filter.rwlock);
1196
1197         return ret;
1198 }
1199
1200 static void __uprobe_perf_func(struct trace_uprobe *tu,
1201                                unsigned long func, struct pt_regs *regs,
1202                                struct uprobe_cpu_buffer *ucb, int dsize)
1203 {
1204         struct trace_event_call *call = trace_probe_event_call(&tu->tp);
1205         struct uprobe_trace_entry_head *entry;
1206         struct hlist_head *head;
1207         void *data;
1208         int size, esize;
1209         int rctx;
1210
1211         if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs))
1212                 return;
1213
1214         esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1215
1216         size = esize + tu->tp.size + dsize;
1217         size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32);
1218         if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough"))
1219                 return;
1220
1221         preempt_disable();
1222         head = this_cpu_ptr(call->perf_events);
1223         if (hlist_empty(head))
1224                 goto out;
1225
1226         entry = perf_trace_buf_alloc(size, NULL, &rctx);
1227         if (!entry)
1228                 goto out;
1229
1230         if (is_ret_probe(tu)) {
1231                 entry->vaddr[0] = func;
1232                 entry->vaddr[1] = instruction_pointer(regs);
1233                 data = DATAOF_TRACE_ENTRY(entry, true);
1234         } else {
1235                 entry->vaddr[0] = instruction_pointer(regs);
1236                 data = DATAOF_TRACE_ENTRY(entry, false);
1237         }
1238
1239         memcpy(data, ucb->buf, tu->tp.size + dsize);
1240
1241         if (size - esize > tu->tp.size + dsize) {
1242                 int len = tu->tp.size + dsize;
1243
1244                 memset(data + len, 0, size - esize - len);
1245         }
1246
1247         perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1248                               head, NULL);
1249  out:
1250         preempt_enable();
1251 }
1252
1253 /* uprobe profile handler */
1254 static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs,
1255                             struct uprobe_cpu_buffer *ucb, int dsize)
1256 {
1257         if (!uprobe_perf_filter(&tu->consumer, 0, current->mm))
1258                 return UPROBE_HANDLER_REMOVE;
1259
1260         if (!is_ret_probe(tu))
1261                 __uprobe_perf_func(tu, 0, regs, ucb, dsize);
1262         return 0;
1263 }
1264
1265 static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func,
1266                                 struct pt_regs *regs,
1267                                 struct uprobe_cpu_buffer *ucb, int dsize)
1268 {
1269         __uprobe_perf_func(tu, func, regs, ucb, dsize);
1270 }
1271
1272 int bpf_get_uprobe_info(const struct perf_event *event, u32 *fd_type,
1273                         const char **filename, u64 *probe_offset,
1274                         bool perf_type_tracepoint)
1275 {
1276         const char *pevent = trace_event_name(event->tp_event);
1277         const char *group = event->tp_event->class->system;
1278         struct trace_uprobe *tu;
1279
1280         if (perf_type_tracepoint)
1281                 tu = find_probe_event(pevent, group);
1282         else
1283                 tu = event->tp_event->data;
1284         if (!tu)
1285                 return -EINVAL;
1286
1287         *fd_type = is_ret_probe(tu) ? BPF_FD_TYPE_URETPROBE
1288                                     : BPF_FD_TYPE_UPROBE;
1289         *filename = tu->filename;
1290         *probe_offset = tu->offset;
1291         return 0;
1292 }
1293 #endif  /* CONFIG_PERF_EVENTS */
1294
1295 static int
1296 trace_uprobe_register(struct trace_event_call *event, enum trace_reg type,
1297                       void *data)
1298 {
1299         struct trace_event_file *file = data;
1300
1301         switch (type) {
1302         case TRACE_REG_REGISTER:
1303                 return probe_event_enable(event, file, NULL);
1304
1305         case TRACE_REG_UNREGISTER:
1306                 probe_event_disable(event, file);
1307                 return 0;
1308
1309 #ifdef CONFIG_PERF_EVENTS
1310         case TRACE_REG_PERF_REGISTER:
1311                 return probe_event_enable(event, NULL, uprobe_perf_filter);
1312
1313         case TRACE_REG_PERF_UNREGISTER:
1314                 probe_event_disable(event, NULL);
1315                 return 0;
1316
1317         case TRACE_REG_PERF_OPEN:
1318                 return uprobe_perf_multi_call(event, data, uprobe_perf_open);
1319
1320         case TRACE_REG_PERF_CLOSE:
1321                 return uprobe_perf_multi_call(event, data, uprobe_perf_close);
1322
1323 #endif
1324         default:
1325                 return 0;
1326         }
1327         return 0;
1328 }
1329
1330 static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
1331 {
1332         struct trace_uprobe *tu;
1333         struct uprobe_dispatch_data udd;
1334         struct uprobe_cpu_buffer *ucb;
1335         int dsize, esize;
1336         int ret = 0;
1337
1338
1339         tu = container_of(con, struct trace_uprobe, consumer);
1340         tu->nhit++;
1341
1342         udd.tu = tu;
1343         udd.bp_addr = instruction_pointer(regs);
1344
1345         current->utask->vaddr = (unsigned long) &udd;
1346
1347         if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1348                 return 0;
1349
1350         dsize = __get_data_size(&tu->tp, regs);
1351         esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1352
1353         ucb = uprobe_buffer_get();
1354         store_trace_args(ucb->buf, &tu->tp, regs, esize, dsize);
1355
1356         if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
1357                 ret |= uprobe_trace_func(tu, regs, ucb, dsize);
1358
1359 #ifdef CONFIG_PERF_EVENTS
1360         if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
1361                 ret |= uprobe_perf_func(tu, regs, ucb, dsize);
1362 #endif
1363         uprobe_buffer_put(ucb);
1364         return ret;
1365 }
1366
1367 static int uretprobe_dispatcher(struct uprobe_consumer *con,
1368                                 unsigned long func, struct pt_regs *regs)
1369 {
1370         struct trace_uprobe *tu;
1371         struct uprobe_dispatch_data udd;
1372         struct uprobe_cpu_buffer *ucb;
1373         int dsize, esize;
1374
1375         tu = container_of(con, struct trace_uprobe, consumer);
1376
1377         udd.tu = tu;
1378         udd.bp_addr = func;
1379
1380         current->utask->vaddr = (unsigned long) &udd;
1381
1382         if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1383                 return 0;
1384
1385         dsize = __get_data_size(&tu->tp, regs);
1386         esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1387
1388         ucb = uprobe_buffer_get();
1389         store_trace_args(ucb->buf, &tu->tp, regs, esize, dsize);
1390
1391         if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
1392                 uretprobe_trace_func(tu, func, regs, ucb, dsize);
1393
1394 #ifdef CONFIG_PERF_EVENTS
1395         if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
1396                 uretprobe_perf_func(tu, func, regs, ucb, dsize);
1397 #endif
1398         uprobe_buffer_put(ucb);
1399         return 0;
1400 }
1401
1402 static struct trace_event_functions uprobe_funcs = {
1403         .trace          = print_uprobe_event
1404 };
1405
1406 static inline void init_trace_event_call(struct trace_uprobe *tu)
1407 {
1408         struct trace_event_call *call = trace_probe_event_call(&tu->tp);
1409
1410         call->event.funcs = &uprobe_funcs;
1411         call->class->define_fields = uprobe_event_define_fields;
1412
1413         call->flags = TRACE_EVENT_FL_UPROBE | TRACE_EVENT_FL_CAP_ANY;
1414         call->class->reg = trace_uprobe_register;
1415 }
1416
1417 static int register_uprobe_event(struct trace_uprobe *tu)
1418 {
1419         init_trace_event_call(tu);
1420
1421         return trace_probe_register_event_call(&tu->tp);
1422 }
1423
1424 static int unregister_uprobe_event(struct trace_uprobe *tu)
1425 {
1426         return trace_probe_unregister_event_call(&tu->tp);
1427 }
1428
1429 #ifdef CONFIG_PERF_EVENTS
1430 struct trace_event_call *
1431 create_local_trace_uprobe(char *name, unsigned long offs,
1432                           unsigned long ref_ctr_offset, bool is_return)
1433 {
1434         struct trace_uprobe *tu;
1435         struct path path;
1436         int ret;
1437
1438         ret = kern_path(name, LOOKUP_FOLLOW, &path);
1439         if (ret)
1440                 return ERR_PTR(ret);
1441
1442         if (!d_is_reg(path.dentry)) {
1443                 path_put(&path);
1444                 return ERR_PTR(-EINVAL);
1445         }
1446
1447         /*
1448          * local trace_kprobes are not added to dyn_event, so they are never
1449          * searched in find_trace_kprobe(). Therefore, there is no concern of
1450          * duplicated name "DUMMY_EVENT" here.
1451          */
1452         tu = alloc_trace_uprobe(UPROBE_EVENT_SYSTEM, "DUMMY_EVENT", 0,
1453                                 is_return);
1454
1455         if (IS_ERR(tu)) {
1456                 pr_info("Failed to allocate trace_uprobe.(%d)\n",
1457                         (int)PTR_ERR(tu));
1458                 path_put(&path);
1459                 return ERR_CAST(tu);
1460         }
1461
1462         tu->offset = offs;
1463         tu->path = path;
1464         tu->ref_ctr_offset = ref_ctr_offset;
1465         tu->filename = kstrdup(name, GFP_KERNEL);
1466         init_trace_event_call(tu);
1467
1468         if (traceprobe_set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0) {
1469                 ret = -ENOMEM;
1470                 goto error;
1471         }
1472
1473         return trace_probe_event_call(&tu->tp);
1474 error:
1475         free_trace_uprobe(tu);
1476         return ERR_PTR(ret);
1477 }
1478
1479 void destroy_local_trace_uprobe(struct trace_event_call *event_call)
1480 {
1481         struct trace_uprobe *tu;
1482
1483         tu = trace_uprobe_primary_from_call(event_call);
1484
1485         free_trace_uprobe(tu);
1486 }
1487 #endif /* CONFIG_PERF_EVENTS */
1488
1489 /* Make a trace interface for controling probe points */
1490 static __init int init_uprobe_trace(void)
1491 {
1492         struct dentry *d_tracer;
1493         int ret;
1494
1495         ret = dyn_event_register(&trace_uprobe_ops);
1496         if (ret)
1497                 return ret;
1498
1499         d_tracer = tracing_init_dentry();
1500         if (IS_ERR(d_tracer))
1501                 return 0;
1502
1503         trace_create_file("uprobe_events", 0644, d_tracer,
1504                                     NULL, &uprobe_events_ops);
1505         /* Profile interface */
1506         trace_create_file("uprobe_profile", 0444, d_tracer,
1507                                     NULL, &uprobe_profile_ops);
1508         return 0;
1509 }
1510
1511 fs_initcall(init_uprobe_trace);