1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
3 * Copyright (c) 2016 Facebook
5 #include <linux/kernel.h>
6 #include <linux/types.h>
7 #include <linux/slab.h>
9 #include <linux/bpf_perf_event.h>
10 #include <linux/filter.h>
11 #include <linux/uaccess.h>
12 #include <linux/ctype.h>
13 #include <linux/kprobes.h>
14 #include <linux/syscalls.h>
15 #include <linux/error-injection.h>
19 #include "trace_probe.h"
22 #define bpf_event_rcu_dereference(p) \
23 rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex))
26 struct bpf_trace_module {
27 struct module *module;
28 struct list_head list;
31 static LIST_HEAD(bpf_trace_modules);
32 static DEFINE_MUTEX(bpf_module_mutex);
34 static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
36 struct bpf_raw_event_map *btp, *ret = NULL;
37 struct bpf_trace_module *btm;
40 mutex_lock(&bpf_module_mutex);
41 list_for_each_entry(btm, &bpf_trace_modules, list) {
42 for (i = 0; i < btm->module->num_bpf_raw_events; ++i) {
43 btp = &btm->module->bpf_raw_events[i];
44 if (!strcmp(btp->tp->name, name)) {
45 if (try_module_get(btm->module))
52 mutex_unlock(&bpf_module_mutex);
56 static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
60 #endif /* CONFIG_MODULES */
62 u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
63 u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
66 * trace_call_bpf - invoke BPF program
67 * @call: tracepoint event
68 * @ctx: opaque context pointer
70 * kprobe handlers execute BPF programs via this helper.
71 * Can be used from static tracepoints in the future.
73 * Return: BPF programs always return an integer which is interpreted by
75 * 0 - return from kprobe (event is filtered out)
76 * 1 - store kprobe event into ring buffer
77 * Other values are reserved and currently alias to 1
79 unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
83 if (in_nmi()) /* not supported yet */
88 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
90 * since some bpf program is already running on this cpu,
91 * don't call into another bpf program (same or different)
92 * and don't send kprobe event into ring-buffer,
100 * Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock
101 * to all call sites, we did a bpf_prog_array_valid() there to check
102 * whether call->prog_array is empty or not, which is
103 * a heurisitc to speed up execution.
105 * If bpf_prog_array_valid() fetched prog_array was
106 * non-NULL, we go into trace_call_bpf() and do the actual
107 * proper rcu_dereference() under RCU lock.
108 * If it turns out that prog_array is NULL then, we bail out.
109 * For the opposite, if the bpf_prog_array_valid() fetched pointer
110 * was NULL, you'll skip the prog_array with the risk of missing
111 * out of events when it was updated in between this and the
112 * rcu_dereference() which is accepted risk.
114 ret = BPF_PROG_RUN_ARRAY_CHECK(call->prog_array, ctx, BPF_PROG_RUN);
117 __this_cpu_dec(bpf_prog_active);
122 EXPORT_SYMBOL_GPL(trace_call_bpf);
124 #ifdef CONFIG_BPF_KPROBE_OVERRIDE
125 BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc)
127 regs_set_return_value(regs, rc);
128 override_function_with_return(regs);
132 static const struct bpf_func_proto bpf_override_return_proto = {
133 .func = bpf_override_return,
135 .ret_type = RET_INTEGER,
136 .arg1_type = ARG_PTR_TO_CTX,
137 .arg2_type = ARG_ANYTHING,
141 BPF_CALL_3(bpf_probe_read, void *, dst, u32, size, const void *, unsafe_ptr)
145 ret = probe_kernel_read(dst, unsafe_ptr, size);
146 if (unlikely(ret < 0))
147 memset(dst, 0, size);
152 static const struct bpf_func_proto bpf_probe_read_proto = {
153 .func = bpf_probe_read,
155 .ret_type = RET_INTEGER,
156 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
157 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
158 .arg3_type = ARG_ANYTHING,
161 BPF_CALL_3(bpf_probe_write_user, void *, unsafe_ptr, const void *, src,
165 * Ensure we're in user context which is safe for the helper to
166 * run. This helper has no business in a kthread.
168 * access_ok() should prevent writing to non-user memory, but in
169 * some situations (nommu, temporary switch, etc) access_ok() does
170 * not provide enough validation, hence the check on KERNEL_DS.
172 * nmi_uaccess_okay() ensures the probe is not run in an interim
173 * state, when the task or mm are switched. This is specifically
174 * required to prevent the use of temporary mm.
177 if (unlikely(in_interrupt() ||
178 current->flags & (PF_KTHREAD | PF_EXITING)))
180 if (unlikely(uaccess_kernel()))
182 if (unlikely(!nmi_uaccess_okay()))
184 if (!access_ok(unsafe_ptr, size))
187 return probe_kernel_write(unsafe_ptr, src, size);
190 static const struct bpf_func_proto bpf_probe_write_user_proto = {
191 .func = bpf_probe_write_user,
193 .ret_type = RET_INTEGER,
194 .arg1_type = ARG_ANYTHING,
195 .arg2_type = ARG_PTR_TO_MEM,
196 .arg3_type = ARG_CONST_SIZE,
199 static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
201 pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!",
202 current->comm, task_pid_nr(current));
204 return &bpf_probe_write_user_proto;
208 * Only limited trace_printk() conversion specifiers allowed:
209 * %d %i %u %x %ld %li %lu %lx %lld %lli %llu %llx %p %s
211 BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
212 u64, arg2, u64, arg3)
214 bool str_seen = false;
222 * bpf_check()->check_func_arg()->check_stack_boundary()
223 * guarantees that fmt points to bpf program stack,
224 * fmt_size bytes of it were initialized and fmt_size > 0
226 if (fmt[--fmt_size] != 0)
229 /* check format string for allowed specifiers */
230 for (i = 0; i < fmt_size; i++) {
231 if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i]))
240 /* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */
245 } else if (fmt[i] == 'p' || fmt[i] == 's') {
247 /* disallow any further format extensions */
248 if (fmt[i + 1] != 0 &&
249 !isspace(fmt[i + 1]) &&
250 !ispunct(fmt[i + 1]))
255 /* allow only one '%s' per fmt string */
274 strncpy_from_unsafe(buf,
275 (void *) (long) unsafe_addr,
286 if (fmt[i] != 'i' && fmt[i] != 'd' &&
287 fmt[i] != 'u' && fmt[i] != 'x')
292 /* Horrid workaround for getting va_list handling working with different
293 * argument type combinations generically for 32 and 64 bit archs.
295 #define __BPF_TP_EMIT() __BPF_ARG3_TP()
296 #define __BPF_TP(...) \
297 __trace_printk(0 /* Fake ip */, \
300 #define __BPF_ARG1_TP(...) \
301 ((mod[0] == 2 || (mod[0] == 1 && __BITS_PER_LONG == 64)) \
302 ? __BPF_TP(arg1, ##__VA_ARGS__) \
303 : ((mod[0] == 1 || (mod[0] == 0 && __BITS_PER_LONG == 32)) \
304 ? __BPF_TP((long)arg1, ##__VA_ARGS__) \
305 : __BPF_TP((u32)arg1, ##__VA_ARGS__)))
307 #define __BPF_ARG2_TP(...) \
308 ((mod[1] == 2 || (mod[1] == 1 && __BITS_PER_LONG == 64)) \
309 ? __BPF_ARG1_TP(arg2, ##__VA_ARGS__) \
310 : ((mod[1] == 1 || (mod[1] == 0 && __BITS_PER_LONG == 32)) \
311 ? __BPF_ARG1_TP((long)arg2, ##__VA_ARGS__) \
312 : __BPF_ARG1_TP((u32)arg2, ##__VA_ARGS__)))
314 #define __BPF_ARG3_TP(...) \
315 ((mod[2] == 2 || (mod[2] == 1 && __BITS_PER_LONG == 64)) \
316 ? __BPF_ARG2_TP(arg3, ##__VA_ARGS__) \
317 : ((mod[2] == 1 || (mod[2] == 0 && __BITS_PER_LONG == 32)) \
318 ? __BPF_ARG2_TP((long)arg3, ##__VA_ARGS__) \
319 : __BPF_ARG2_TP((u32)arg3, ##__VA_ARGS__)))
321 return __BPF_TP_EMIT();
324 static const struct bpf_func_proto bpf_trace_printk_proto = {
325 .func = bpf_trace_printk,
327 .ret_type = RET_INTEGER,
328 .arg1_type = ARG_PTR_TO_MEM,
329 .arg2_type = ARG_CONST_SIZE,
332 const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
335 * this program might be calling bpf_trace_printk,
336 * so allocate per-cpu printk buffers
338 trace_printk_init_buffers();
340 return &bpf_trace_printk_proto;
343 static __always_inline int
344 get_map_perf_counter(struct bpf_map *map, u64 flags,
345 u64 *value, u64 *enabled, u64 *running)
347 struct bpf_array *array = container_of(map, struct bpf_array, map);
348 unsigned int cpu = smp_processor_id();
349 u64 index = flags & BPF_F_INDEX_MASK;
350 struct bpf_event_entry *ee;
352 if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
354 if (index == BPF_F_CURRENT_CPU)
356 if (unlikely(index >= array->map.max_entries))
359 ee = READ_ONCE(array->ptrs[index]);
363 return perf_event_read_local(ee->event, value, enabled, running);
366 BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
371 err = get_map_perf_counter(map, flags, &value, NULL, NULL);
373 * this api is ugly since we miss [-22..-2] range of valid
374 * counter values, but that's uapi
381 static const struct bpf_func_proto bpf_perf_event_read_proto = {
382 .func = bpf_perf_event_read,
384 .ret_type = RET_INTEGER,
385 .arg1_type = ARG_CONST_MAP_PTR,
386 .arg2_type = ARG_ANYTHING,
389 BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags,
390 struct bpf_perf_event_value *, buf, u32, size)
394 if (unlikely(size != sizeof(struct bpf_perf_event_value)))
396 err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled,
402 memset(buf, 0, size);
406 static const struct bpf_func_proto bpf_perf_event_read_value_proto = {
407 .func = bpf_perf_event_read_value,
409 .ret_type = RET_INTEGER,
410 .arg1_type = ARG_CONST_MAP_PTR,
411 .arg2_type = ARG_ANYTHING,
412 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
413 .arg4_type = ARG_CONST_SIZE,
416 static __always_inline u64
417 __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
418 u64 flags, struct perf_sample_data *sd)
420 struct bpf_array *array = container_of(map, struct bpf_array, map);
421 unsigned int cpu = smp_processor_id();
422 u64 index = flags & BPF_F_INDEX_MASK;
423 struct bpf_event_entry *ee;
424 struct perf_event *event;
426 if (index == BPF_F_CURRENT_CPU)
428 if (unlikely(index >= array->map.max_entries))
431 ee = READ_ONCE(array->ptrs[index]);
436 if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
437 event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
440 if (unlikely(event->oncpu != cpu))
443 return perf_event_output(event, sd, regs);
447 * Support executing tracepoints in normal, irq, and nmi context that each call
448 * bpf_perf_event_output
450 struct bpf_trace_sample_data {
451 struct perf_sample_data sds[3];
454 static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_trace_sds);
455 static DEFINE_PER_CPU(int, bpf_trace_nest_level);
456 BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
457 u64, flags, void *, data, u64, size)
459 struct bpf_trace_sample_data *sds = this_cpu_ptr(&bpf_trace_sds);
460 int nest_level = this_cpu_inc_return(bpf_trace_nest_level);
461 struct perf_raw_record raw = {
467 struct perf_sample_data *sd;
470 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) {
475 sd = &sds->sds[nest_level - 1];
477 if (unlikely(flags & ~(BPF_F_INDEX_MASK))) {
482 perf_sample_data_init(sd, 0, 0);
485 err = __bpf_perf_event_output(regs, map, flags, sd);
488 this_cpu_dec(bpf_trace_nest_level);
492 static const struct bpf_func_proto bpf_perf_event_output_proto = {
493 .func = bpf_perf_event_output,
495 .ret_type = RET_INTEGER,
496 .arg1_type = ARG_PTR_TO_CTX,
497 .arg2_type = ARG_CONST_MAP_PTR,
498 .arg3_type = ARG_ANYTHING,
499 .arg4_type = ARG_PTR_TO_MEM,
500 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
503 static DEFINE_PER_CPU(struct pt_regs, bpf_pt_regs);
504 static DEFINE_PER_CPU(struct perf_sample_data, bpf_misc_sd);
506 u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
507 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
509 struct perf_sample_data *sd = this_cpu_ptr(&bpf_misc_sd);
510 struct pt_regs *regs = this_cpu_ptr(&bpf_pt_regs);
511 struct perf_raw_frag frag = {
516 struct perf_raw_record raw = {
519 .next = ctx_size ? &frag : NULL,
526 perf_fetch_caller_regs(regs);
527 perf_sample_data_init(sd, 0, 0);
530 return __bpf_perf_event_output(regs, map, flags, sd);
533 BPF_CALL_0(bpf_get_current_task)
535 return (long) current;
538 static const struct bpf_func_proto bpf_get_current_task_proto = {
539 .func = bpf_get_current_task,
541 .ret_type = RET_INTEGER,
544 BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
546 struct bpf_array *array = container_of(map, struct bpf_array, map);
549 if (unlikely(idx >= array->map.max_entries))
552 cgrp = READ_ONCE(array->ptrs[idx]);
556 return task_under_cgroup_hierarchy(current, cgrp);
559 static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
560 .func = bpf_current_task_under_cgroup,
562 .ret_type = RET_INTEGER,
563 .arg1_type = ARG_CONST_MAP_PTR,
564 .arg2_type = ARG_ANYTHING,
567 BPF_CALL_3(bpf_probe_read_str, void *, dst, u32, size,
568 const void *, unsafe_ptr)
573 * The strncpy_from_unsafe() call will likely not fill the entire
574 * buffer, but that's okay in this circumstance as we're probing
575 * arbitrary memory anyway similar to bpf_probe_read() and might
576 * as well probe the stack. Thus, memory is explicitly cleared
577 * only in error case, so that improper users ignoring return
578 * code altogether don't copy garbage; otherwise length of string
579 * is returned that can be used for bpf_perf_event_output() et al.
581 ret = strncpy_from_unsafe(dst, unsafe_ptr, size);
582 if (unlikely(ret < 0))
583 memset(dst, 0, size);
588 static const struct bpf_func_proto bpf_probe_read_str_proto = {
589 .func = bpf_probe_read_str,
591 .ret_type = RET_INTEGER,
592 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
593 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
594 .arg3_type = ARG_ANYTHING,
597 struct send_signal_irq_work {
598 struct irq_work irq_work;
599 struct task_struct *task;
603 static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work);
605 static void do_bpf_send_signal(struct irq_work *entry)
607 struct send_signal_irq_work *work;
609 work = container_of(entry, struct send_signal_irq_work, irq_work);
610 group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, PIDTYPE_TGID);
613 BPF_CALL_1(bpf_send_signal, u32, sig)
615 struct send_signal_irq_work *work = NULL;
617 /* Similar to bpf_probe_write_user, task needs to be
618 * in a sound condition and kernel memory access be
619 * permitted in order to send signal to the current
622 if (unlikely(current->flags & (PF_KTHREAD | PF_EXITING)))
624 if (unlikely(uaccess_kernel()))
626 if (unlikely(!nmi_uaccess_okay()))
630 /* Do an early check on signal validity. Otherwise,
631 * the error is lost in deferred irq_work.
633 if (unlikely(!valid_signal(sig)))
636 work = this_cpu_ptr(&send_signal_work);
637 if (work->irq_work.flags & IRQ_WORK_BUSY)
640 /* Add the current task, which is the target of sending signal,
641 * to the irq_work. The current task may change when queued
642 * irq works get executed.
644 work->task = current;
646 irq_work_queue(&work->irq_work);
650 return group_send_sig_info(sig, SEND_SIG_PRIV, current, PIDTYPE_TGID);
653 static const struct bpf_func_proto bpf_send_signal_proto = {
654 .func = bpf_send_signal,
656 .ret_type = RET_INTEGER,
657 .arg1_type = ARG_ANYTHING,
660 static const struct bpf_func_proto *
661 tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
664 case BPF_FUNC_map_lookup_elem:
665 return &bpf_map_lookup_elem_proto;
666 case BPF_FUNC_map_update_elem:
667 return &bpf_map_update_elem_proto;
668 case BPF_FUNC_map_delete_elem:
669 return &bpf_map_delete_elem_proto;
670 case BPF_FUNC_map_push_elem:
671 return &bpf_map_push_elem_proto;
672 case BPF_FUNC_map_pop_elem:
673 return &bpf_map_pop_elem_proto;
674 case BPF_FUNC_map_peek_elem:
675 return &bpf_map_peek_elem_proto;
676 case BPF_FUNC_probe_read:
677 return &bpf_probe_read_proto;
678 case BPF_FUNC_ktime_get_ns:
679 return &bpf_ktime_get_ns_proto;
680 case BPF_FUNC_tail_call:
681 return &bpf_tail_call_proto;
682 case BPF_FUNC_get_current_pid_tgid:
683 return &bpf_get_current_pid_tgid_proto;
684 case BPF_FUNC_get_current_task:
685 return &bpf_get_current_task_proto;
686 case BPF_FUNC_get_current_uid_gid:
687 return &bpf_get_current_uid_gid_proto;
688 case BPF_FUNC_get_current_comm:
689 return &bpf_get_current_comm_proto;
690 case BPF_FUNC_trace_printk:
691 return bpf_get_trace_printk_proto();
692 case BPF_FUNC_get_smp_processor_id:
693 return &bpf_get_smp_processor_id_proto;
694 case BPF_FUNC_get_numa_node_id:
695 return &bpf_get_numa_node_id_proto;
696 case BPF_FUNC_perf_event_read:
697 return &bpf_perf_event_read_proto;
698 case BPF_FUNC_probe_write_user:
699 return bpf_get_probe_write_proto();
700 case BPF_FUNC_current_task_under_cgroup:
701 return &bpf_current_task_under_cgroup_proto;
702 case BPF_FUNC_get_prandom_u32:
703 return &bpf_get_prandom_u32_proto;
704 case BPF_FUNC_probe_read_str:
705 return &bpf_probe_read_str_proto;
706 #ifdef CONFIG_CGROUPS
707 case BPF_FUNC_get_current_cgroup_id:
708 return &bpf_get_current_cgroup_id_proto;
710 case BPF_FUNC_send_signal:
711 return &bpf_send_signal_proto;
717 static const struct bpf_func_proto *
718 kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
721 case BPF_FUNC_perf_event_output:
722 return &bpf_perf_event_output_proto;
723 case BPF_FUNC_get_stackid:
724 return &bpf_get_stackid_proto;
725 case BPF_FUNC_get_stack:
726 return &bpf_get_stack_proto;
727 case BPF_FUNC_perf_event_read_value:
728 return &bpf_perf_event_read_value_proto;
729 #ifdef CONFIG_BPF_KPROBE_OVERRIDE
730 case BPF_FUNC_override_return:
731 return &bpf_override_return_proto;
734 return tracing_func_proto(func_id, prog);
738 /* bpf+kprobe programs can access fields of 'struct pt_regs' */
739 static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
740 const struct bpf_prog *prog,
741 struct bpf_insn_access_aux *info)
743 if (off < 0 || off >= sizeof(struct pt_regs))
745 if (type != BPF_READ)
750 * Assertion for 32 bit to make sure last 8 byte access
751 * (BPF_DW) to the last 4 byte member is disallowed.
753 if (off + size > sizeof(struct pt_regs))
759 const struct bpf_verifier_ops kprobe_verifier_ops = {
760 .get_func_proto = kprobe_prog_func_proto,
761 .is_valid_access = kprobe_prog_is_valid_access,
764 const struct bpf_prog_ops kprobe_prog_ops = {
767 BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map,
768 u64, flags, void *, data, u64, size)
770 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
773 * r1 points to perf tracepoint buffer where first 8 bytes are hidden
774 * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
775 * from there and call the same bpf_perf_event_output() helper inline.
777 return ____bpf_perf_event_output(regs, map, flags, data, size);
780 static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
781 .func = bpf_perf_event_output_tp,
783 .ret_type = RET_INTEGER,
784 .arg1_type = ARG_PTR_TO_CTX,
785 .arg2_type = ARG_CONST_MAP_PTR,
786 .arg3_type = ARG_ANYTHING,
787 .arg4_type = ARG_PTR_TO_MEM,
788 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
791 BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map,
794 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
797 * Same comment as in bpf_perf_event_output_tp(), only that this time
798 * the other helper's function body cannot be inlined due to being
799 * external, thus we need to call raw helper function.
801 return bpf_get_stackid((unsigned long) regs, (unsigned long) map,
805 static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
806 .func = bpf_get_stackid_tp,
808 .ret_type = RET_INTEGER,
809 .arg1_type = ARG_PTR_TO_CTX,
810 .arg2_type = ARG_CONST_MAP_PTR,
811 .arg3_type = ARG_ANYTHING,
814 BPF_CALL_4(bpf_get_stack_tp, void *, tp_buff, void *, buf, u32, size,
817 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
819 return bpf_get_stack((unsigned long) regs, (unsigned long) buf,
820 (unsigned long) size, flags, 0);
823 static const struct bpf_func_proto bpf_get_stack_proto_tp = {
824 .func = bpf_get_stack_tp,
826 .ret_type = RET_INTEGER,
827 .arg1_type = ARG_PTR_TO_CTX,
828 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
829 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
830 .arg4_type = ARG_ANYTHING,
833 static const struct bpf_func_proto *
834 tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
837 case BPF_FUNC_perf_event_output:
838 return &bpf_perf_event_output_proto_tp;
839 case BPF_FUNC_get_stackid:
840 return &bpf_get_stackid_proto_tp;
841 case BPF_FUNC_get_stack:
842 return &bpf_get_stack_proto_tp;
844 return tracing_func_proto(func_id, prog);
848 static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
849 const struct bpf_prog *prog,
850 struct bpf_insn_access_aux *info)
852 if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
854 if (type != BPF_READ)
859 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64));
863 const struct bpf_verifier_ops tracepoint_verifier_ops = {
864 .get_func_proto = tp_prog_func_proto,
865 .is_valid_access = tp_prog_is_valid_access,
868 const struct bpf_prog_ops tracepoint_prog_ops = {
871 BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx,
872 struct bpf_perf_event_value *, buf, u32, size)
876 if (unlikely(size != sizeof(struct bpf_perf_event_value)))
878 err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled,
884 memset(buf, 0, size);
888 static const struct bpf_func_proto bpf_perf_prog_read_value_proto = {
889 .func = bpf_perf_prog_read_value,
891 .ret_type = RET_INTEGER,
892 .arg1_type = ARG_PTR_TO_CTX,
893 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
894 .arg3_type = ARG_CONST_SIZE,
897 static const struct bpf_func_proto *
898 pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
901 case BPF_FUNC_perf_event_output:
902 return &bpf_perf_event_output_proto_tp;
903 case BPF_FUNC_get_stackid:
904 return &bpf_get_stackid_proto_tp;
905 case BPF_FUNC_get_stack:
906 return &bpf_get_stack_proto_tp;
907 case BPF_FUNC_perf_prog_read_value:
908 return &bpf_perf_prog_read_value_proto;
910 return tracing_func_proto(func_id, prog);
915 * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp
916 * to avoid potential recursive reuse issue when/if tracepoints are added
917 * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack.
919 * Since raw tracepoints run despite bpf_prog_active, support concurrent usage
920 * in normal, irq, and nmi context.
922 struct bpf_raw_tp_regs {
923 struct pt_regs regs[3];
925 static DEFINE_PER_CPU(struct bpf_raw_tp_regs, bpf_raw_tp_regs);
926 static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level);
927 static struct pt_regs *get_bpf_raw_tp_regs(void)
929 struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs);
930 int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level);
932 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) {
933 this_cpu_dec(bpf_raw_tp_nest_level);
934 return ERR_PTR(-EBUSY);
937 return &tp_regs->regs[nest_level - 1];
940 static void put_bpf_raw_tp_regs(void)
942 this_cpu_dec(bpf_raw_tp_nest_level);
945 BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args,
946 struct bpf_map *, map, u64, flags, void *, data, u64, size)
948 struct pt_regs *regs = get_bpf_raw_tp_regs();
952 return PTR_ERR(regs);
954 perf_fetch_caller_regs(regs);
955 ret = ____bpf_perf_event_output(regs, map, flags, data, size);
957 put_bpf_raw_tp_regs();
961 static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = {
962 .func = bpf_perf_event_output_raw_tp,
964 .ret_type = RET_INTEGER,
965 .arg1_type = ARG_PTR_TO_CTX,
966 .arg2_type = ARG_CONST_MAP_PTR,
967 .arg3_type = ARG_ANYTHING,
968 .arg4_type = ARG_PTR_TO_MEM,
969 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
972 BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args,
973 struct bpf_map *, map, u64, flags)
975 struct pt_regs *regs = get_bpf_raw_tp_regs();
979 return PTR_ERR(regs);
981 perf_fetch_caller_regs(regs);
982 /* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */
983 ret = bpf_get_stackid((unsigned long) regs, (unsigned long) map,
985 put_bpf_raw_tp_regs();
989 static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = {
990 .func = bpf_get_stackid_raw_tp,
992 .ret_type = RET_INTEGER,
993 .arg1_type = ARG_PTR_TO_CTX,
994 .arg2_type = ARG_CONST_MAP_PTR,
995 .arg3_type = ARG_ANYTHING,
998 BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args,
999 void *, buf, u32, size, u64, flags)
1001 struct pt_regs *regs = get_bpf_raw_tp_regs();
1005 return PTR_ERR(regs);
1007 perf_fetch_caller_regs(regs);
1008 ret = bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1009 (unsigned long) size, flags, 0);
1010 put_bpf_raw_tp_regs();
1014 static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = {
1015 .func = bpf_get_stack_raw_tp,
1017 .ret_type = RET_INTEGER,
1018 .arg1_type = ARG_PTR_TO_CTX,
1019 .arg2_type = ARG_PTR_TO_MEM,
1020 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
1021 .arg4_type = ARG_ANYTHING,
1024 static const struct bpf_func_proto *
1025 raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1028 case BPF_FUNC_perf_event_output:
1029 return &bpf_perf_event_output_proto_raw_tp;
1030 case BPF_FUNC_get_stackid:
1031 return &bpf_get_stackid_proto_raw_tp;
1032 case BPF_FUNC_get_stack:
1033 return &bpf_get_stack_proto_raw_tp;
1035 return tracing_func_proto(func_id, prog);
1039 static bool raw_tp_prog_is_valid_access(int off, int size,
1040 enum bpf_access_type type,
1041 const struct bpf_prog *prog,
1042 struct bpf_insn_access_aux *info)
1044 /* largest tracepoint in the kernel has 12 args */
1045 if (off < 0 || off >= sizeof(__u64) * 12)
1047 if (type != BPF_READ)
1049 if (off % size != 0)
1054 const struct bpf_verifier_ops raw_tracepoint_verifier_ops = {
1055 .get_func_proto = raw_tp_prog_func_proto,
1056 .is_valid_access = raw_tp_prog_is_valid_access,
1059 const struct bpf_prog_ops raw_tracepoint_prog_ops = {
1062 static bool raw_tp_writable_prog_is_valid_access(int off, int size,
1063 enum bpf_access_type type,
1064 const struct bpf_prog *prog,
1065 struct bpf_insn_access_aux *info)
1068 if (size != sizeof(u64) || type != BPF_READ)
1070 info->reg_type = PTR_TO_TP_BUFFER;
1072 return raw_tp_prog_is_valid_access(off, size, type, prog, info);
1075 const struct bpf_verifier_ops raw_tracepoint_writable_verifier_ops = {
1076 .get_func_proto = raw_tp_prog_func_proto,
1077 .is_valid_access = raw_tp_writable_prog_is_valid_access,
1080 const struct bpf_prog_ops raw_tracepoint_writable_prog_ops = {
1083 static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1084 const struct bpf_prog *prog,
1085 struct bpf_insn_access_aux *info)
1087 const int size_u64 = sizeof(u64);
1089 if (off < 0 || off >= sizeof(struct bpf_perf_event_data))
1091 if (type != BPF_READ)
1093 if (off % size != 0) {
1094 if (sizeof(unsigned long) != 4)
1098 if (off % size != 4)
1103 case bpf_ctx_range(struct bpf_perf_event_data, sample_period):
1104 bpf_ctx_record_field_size(info, size_u64);
1105 if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
1108 case bpf_ctx_range(struct bpf_perf_event_data, addr):
1109 bpf_ctx_record_field_size(info, size_u64);
1110 if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
1114 if (size != sizeof(long))
1121 static u32 pe_prog_convert_ctx_access(enum bpf_access_type type,
1122 const struct bpf_insn *si,
1123 struct bpf_insn *insn_buf,
1124 struct bpf_prog *prog, u32 *target_size)
1126 struct bpf_insn *insn = insn_buf;
1129 case offsetof(struct bpf_perf_event_data, sample_period):
1130 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
1131 data), si->dst_reg, si->src_reg,
1132 offsetof(struct bpf_perf_event_data_kern, data));
1133 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
1134 bpf_target_off(struct perf_sample_data, period, 8,
1137 case offsetof(struct bpf_perf_event_data, addr):
1138 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
1139 data), si->dst_reg, si->src_reg,
1140 offsetof(struct bpf_perf_event_data_kern, data));
1141 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
1142 bpf_target_off(struct perf_sample_data, addr, 8,
1146 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
1147 regs), si->dst_reg, si->src_reg,
1148 offsetof(struct bpf_perf_event_data_kern, regs));
1149 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg,
1154 return insn - insn_buf;
1157 const struct bpf_verifier_ops perf_event_verifier_ops = {
1158 .get_func_proto = pe_prog_func_proto,
1159 .is_valid_access = pe_prog_is_valid_access,
1160 .convert_ctx_access = pe_prog_convert_ctx_access,
1163 const struct bpf_prog_ops perf_event_prog_ops = {
1166 static DEFINE_MUTEX(bpf_event_mutex);
1168 #define BPF_TRACE_MAX_PROGS 64
1170 int perf_event_attach_bpf_prog(struct perf_event *event,
1171 struct bpf_prog *prog)
1173 struct bpf_prog_array *old_array;
1174 struct bpf_prog_array *new_array;
1178 * Kprobe override only works if they are on the function entry,
1179 * and only if they are on the opt-in list.
1181 if (prog->kprobe_override &&
1182 (!trace_kprobe_on_func_entry(event->tp_event) ||
1183 !trace_kprobe_error_injectable(event->tp_event)))
1186 mutex_lock(&bpf_event_mutex);
1191 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
1193 bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) {
1198 ret = bpf_prog_array_copy(old_array, NULL, prog, &new_array);
1202 /* set the new array to event->tp_event and set event->prog */
1204 rcu_assign_pointer(event->tp_event->prog_array, new_array);
1205 bpf_prog_array_free(old_array);
1208 mutex_unlock(&bpf_event_mutex);
1212 void perf_event_detach_bpf_prog(struct perf_event *event)
1214 struct bpf_prog_array *old_array;
1215 struct bpf_prog_array *new_array;
1218 mutex_lock(&bpf_event_mutex);
1223 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
1224 ret = bpf_prog_array_copy(old_array, event->prog, NULL, &new_array);
1228 bpf_prog_array_delete_safe(old_array, event->prog);
1230 rcu_assign_pointer(event->tp_event->prog_array, new_array);
1231 bpf_prog_array_free(old_array);
1234 bpf_prog_put(event->prog);
1238 mutex_unlock(&bpf_event_mutex);
1241 int perf_event_query_prog_array(struct perf_event *event, void __user *info)
1243 struct perf_event_query_bpf __user *uquery = info;
1244 struct perf_event_query_bpf query = {};
1245 struct bpf_prog_array *progs;
1246 u32 *ids, prog_cnt, ids_len;
1249 if (!capable(CAP_SYS_ADMIN))
1251 if (event->attr.type != PERF_TYPE_TRACEPOINT)
1253 if (copy_from_user(&query, uquery, sizeof(query)))
1256 ids_len = query.ids_len;
1257 if (ids_len > BPF_TRACE_MAX_PROGS)
1259 ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN);
1263 * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which
1264 * is required when user only wants to check for uquery->prog_cnt.
1265 * There is no need to check for it since the case is handled
1266 * gracefully in bpf_prog_array_copy_info.
1269 mutex_lock(&bpf_event_mutex);
1270 progs = bpf_event_rcu_dereference(event->tp_event->prog_array);
1271 ret = bpf_prog_array_copy_info(progs, ids, ids_len, &prog_cnt);
1272 mutex_unlock(&bpf_event_mutex);
1274 if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) ||
1275 copy_to_user(uquery->ids, ids, ids_len * sizeof(u32)))
1282 extern struct bpf_raw_event_map __start__bpf_raw_tp[];
1283 extern struct bpf_raw_event_map __stop__bpf_raw_tp[];
1285 struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
1287 struct bpf_raw_event_map *btp = __start__bpf_raw_tp;
1289 for (; btp < __stop__bpf_raw_tp; btp++) {
1290 if (!strcmp(btp->tp->name, name))
1294 return bpf_get_raw_tracepoint_module(name);
1297 void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
1299 struct module *mod = __module_address((unsigned long)btp);
1305 static __always_inline
1306 void __bpf_trace_run(struct bpf_prog *prog, u64 *args)
1310 (void) BPF_PROG_RUN(prog, args);
1315 #define UNPACK(...) __VA_ARGS__
1316 #define REPEAT_1(FN, DL, X, ...) FN(X)
1317 #define REPEAT_2(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__)
1318 #define REPEAT_3(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__)
1319 #define REPEAT_4(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__)
1320 #define REPEAT_5(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__)
1321 #define REPEAT_6(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__)
1322 #define REPEAT_7(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__)
1323 #define REPEAT_8(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__)
1324 #define REPEAT_9(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__)
1325 #define REPEAT_10(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__)
1326 #define REPEAT_11(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__)
1327 #define REPEAT_12(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__)
1328 #define REPEAT(X, FN, DL, ...) REPEAT_##X(FN, DL, __VA_ARGS__)
1330 #define SARG(X) u64 arg##X
1331 #define COPY(X) args[X] = arg##X
1333 #define __DL_COM (,)
1334 #define __DL_SEM (;)
1336 #define __SEQ_0_11 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
1338 #define BPF_TRACE_DEFN_x(x) \
1339 void bpf_trace_run##x(struct bpf_prog *prog, \
1340 REPEAT(x, SARG, __DL_COM, __SEQ_0_11)) \
1343 REPEAT(x, COPY, __DL_SEM, __SEQ_0_11); \
1344 __bpf_trace_run(prog, args); \
1346 EXPORT_SYMBOL_GPL(bpf_trace_run##x)
1347 BPF_TRACE_DEFN_x(1);
1348 BPF_TRACE_DEFN_x(2);
1349 BPF_TRACE_DEFN_x(3);
1350 BPF_TRACE_DEFN_x(4);
1351 BPF_TRACE_DEFN_x(5);
1352 BPF_TRACE_DEFN_x(6);
1353 BPF_TRACE_DEFN_x(7);
1354 BPF_TRACE_DEFN_x(8);
1355 BPF_TRACE_DEFN_x(9);
1356 BPF_TRACE_DEFN_x(10);
1357 BPF_TRACE_DEFN_x(11);
1358 BPF_TRACE_DEFN_x(12);
1360 static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
1362 struct tracepoint *tp = btp->tp;
1365 * check that program doesn't access arguments beyond what's
1366 * available in this tracepoint
1368 if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64))
1371 if (prog->aux->max_tp_access > btp->writable_size)
1374 return tracepoint_probe_register(tp, (void *)btp->bpf_func, prog);
1377 int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
1379 return __bpf_probe_register(btp, prog);
1382 int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
1384 return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
1387 int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
1388 u32 *fd_type, const char **buf,
1389 u64 *probe_offset, u64 *probe_addr)
1391 bool is_tracepoint, is_syscall_tp;
1392 struct bpf_prog *prog;
1399 /* not supporting BPF_PROG_TYPE_PERF_EVENT yet */
1400 if (prog->type == BPF_PROG_TYPE_PERF_EVENT)
1403 *prog_id = prog->aux->id;
1404 flags = event->tp_event->flags;
1405 is_tracepoint = flags & TRACE_EVENT_FL_TRACEPOINT;
1406 is_syscall_tp = is_syscall_trace_event(event->tp_event);
1408 if (is_tracepoint || is_syscall_tp) {
1409 *buf = is_tracepoint ? event->tp_event->tp->name
1410 : event->tp_event->name;
1411 *fd_type = BPF_FD_TYPE_TRACEPOINT;
1412 *probe_offset = 0x0;
1417 #ifdef CONFIG_KPROBE_EVENTS
1418 if (flags & TRACE_EVENT_FL_KPROBE)
1419 err = bpf_get_kprobe_info(event, fd_type, buf,
1420 probe_offset, probe_addr,
1421 event->attr.type == PERF_TYPE_TRACEPOINT);
1423 #ifdef CONFIG_UPROBE_EVENTS
1424 if (flags & TRACE_EVENT_FL_UPROBE)
1425 err = bpf_get_uprobe_info(event, fd_type, buf,
1427 event->attr.type == PERF_TYPE_TRACEPOINT);
1434 static int __init send_signal_irq_work_init(void)
1437 struct send_signal_irq_work *work;
1439 for_each_possible_cpu(cpu) {
1440 work = per_cpu_ptr(&send_signal_work, cpu);
1441 init_irq_work(&work->irq_work, do_bpf_send_signal);
1446 subsys_initcall(send_signal_irq_work_init);
1448 #ifdef CONFIG_MODULES
1449 static int bpf_event_notify(struct notifier_block *nb, unsigned long op,
1452 struct bpf_trace_module *btm, *tmp;
1453 struct module *mod = module;
1455 if (mod->num_bpf_raw_events == 0 ||
1456 (op != MODULE_STATE_COMING && op != MODULE_STATE_GOING))
1459 mutex_lock(&bpf_module_mutex);
1462 case MODULE_STATE_COMING:
1463 btm = kzalloc(sizeof(*btm), GFP_KERNEL);
1465 btm->module = module;
1466 list_add(&btm->list, &bpf_trace_modules);
1469 case MODULE_STATE_GOING:
1470 list_for_each_entry_safe(btm, tmp, &bpf_trace_modules, list) {
1471 if (btm->module == module) {
1472 list_del(&btm->list);
1480 mutex_unlock(&bpf_module_mutex);
1485 static struct notifier_block bpf_module_nb = {
1486 .notifier_call = bpf_event_notify,
1489 static int __init bpf_event_init(void)
1491 register_module_notifier(&bpf_module_nb);
1495 fs_initcall(bpf_event_init);
1496 #endif /* CONFIG_MODULES */