2 * Kernel Probes (KProbes)
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 * Copyright (C) IBM Corporation, 2002, 2004
21 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
22 * Probes initial implementation (includes suggestions from
24 * 2004-Aug Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
25 * hlists and exceptions notifier as suggested by Andi Kleen.
26 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
27 * interface to access function arguments.
28 * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
29 * exceptions notifier to be first on the priority list.
30 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
31 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
32 * <prasanna@in.ibm.com> added function-return probes.
34 #include <linux/kprobes.h>
35 #include <linux/hash.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/stddef.h>
39 #include <linux/export.h>
40 #include <linux/moduleloader.h>
41 #include <linux/kallsyms.h>
42 #include <linux/freezer.h>
43 #include <linux/seq_file.h>
44 #include <linux/debugfs.h>
45 #include <linux/sysctl.h>
46 #include <linux/kdebug.h>
47 #include <linux/memory.h>
48 #include <linux/ftrace.h>
49 #include <linux/cpu.h>
50 #include <linux/jump_label.h>
52 #include <asm/sections.h>
53 #include <asm/cacheflush.h>
54 #include <asm/errno.h>
55 #include <linux/uaccess.h>
57 #define KPROBE_HASH_BITS 6
58 #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
61 static int kprobes_initialized;
62 static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
63 static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
65 /* NOTE: change this value only with kprobe_mutex held */
66 static bool kprobes_all_disarmed;
68 /* This protects kprobe_table and optimizing_list */
69 static DEFINE_MUTEX(kprobe_mutex);
70 static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
72 raw_spinlock_t lock ____cacheline_aligned_in_smp;
73 } kretprobe_table_locks[KPROBE_TABLE_SIZE];
75 kprobe_opcode_t * __weak kprobe_lookup_name(const char *name,
76 unsigned int __unused)
78 return ((kprobe_opcode_t *)(kallsyms_lookup_name(name)));
81 static raw_spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
83 return &(kretprobe_table_locks[hash].lock);
86 /* List of symbols that can be overriden for error injection. */
87 static LIST_HEAD(kprobe_error_injection_list);
88 static DEFINE_MUTEX(kprobe_ei_mutex);
89 struct kprobe_ei_entry {
90 struct list_head list;
91 unsigned long start_addr;
92 unsigned long end_addr;
96 /* Blacklist -- list of struct kprobe_blacklist_entry */
97 static LIST_HEAD(kprobe_blacklist);
99 #ifdef __ARCH_WANT_KPROBES_INSN_SLOT
101 * kprobe->ainsn.insn points to the copy of the instruction to be
102 * single-stepped. x86_64, POWER4 and above have no-exec support and
103 * stepping on the instruction on a vmalloced/kmalloced/data page
104 * is a recipe for disaster
106 struct kprobe_insn_page {
107 struct list_head list;
108 kprobe_opcode_t *insns; /* Page of instruction slots */
109 struct kprobe_insn_cache *cache;
115 #define KPROBE_INSN_PAGE_SIZE(slots) \
116 (offsetof(struct kprobe_insn_page, slot_used) + \
117 (sizeof(char) * (slots)))
119 static int slots_per_page(struct kprobe_insn_cache *c)
121 return PAGE_SIZE/(c->insn_size * sizeof(kprobe_opcode_t));
124 enum kprobe_slot_state {
130 void __weak *alloc_insn_page(void)
132 return module_alloc(PAGE_SIZE);
135 void __weak free_insn_page(void *page)
137 module_memfree(page);
140 struct kprobe_insn_cache kprobe_insn_slots = {
141 .mutex = __MUTEX_INITIALIZER(kprobe_insn_slots.mutex),
142 .alloc = alloc_insn_page,
143 .free = free_insn_page,
144 .pages = LIST_HEAD_INIT(kprobe_insn_slots.pages),
145 .insn_size = MAX_INSN_SIZE,
148 static int collect_garbage_slots(struct kprobe_insn_cache *c);
151 * __get_insn_slot() - Find a slot on an executable page for an instruction.
152 * We allocate an executable page if there's no room on existing ones.
154 kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c)
156 struct kprobe_insn_page *kip;
157 kprobe_opcode_t *slot = NULL;
159 /* Since the slot array is not protected by rcu, we need a mutex */
160 mutex_lock(&c->mutex);
163 list_for_each_entry_rcu(kip, &c->pages, list) {
164 if (kip->nused < slots_per_page(c)) {
166 for (i = 0; i < slots_per_page(c); i++) {
167 if (kip->slot_used[i] == SLOT_CLEAN) {
168 kip->slot_used[i] = SLOT_USED;
170 slot = kip->insns + (i * c->insn_size);
175 /* kip->nused is broken. Fix it. */
176 kip->nused = slots_per_page(c);
182 /* If there are any garbage slots, collect it and try again. */
183 if (c->nr_garbage && collect_garbage_slots(c) == 0)
186 /* All out of space. Need to allocate a new page. */
187 kip = kmalloc(KPROBE_INSN_PAGE_SIZE(slots_per_page(c)), GFP_KERNEL);
192 * Use module_alloc so this page is within +/- 2GB of where the
193 * kernel image and loaded module images reside. This is required
194 * so x86_64 can correctly handle the %rip-relative fixups.
196 kip->insns = c->alloc();
201 INIT_LIST_HEAD(&kip->list);
202 memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c));
203 kip->slot_used[0] = SLOT_USED;
207 list_add_rcu(&kip->list, &c->pages);
210 mutex_unlock(&c->mutex);
214 /* Return 1 if all garbages are collected, otherwise 0. */
215 static int collect_one_slot(struct kprobe_insn_page *kip, int idx)
217 kip->slot_used[idx] = SLOT_CLEAN;
219 if (kip->nused == 0) {
221 * Page is no longer in use. Free it unless
222 * it's the last one. We keep the last one
223 * so as not to have to set it up again the
224 * next time somebody inserts a probe.
226 if (!list_is_singular(&kip->list)) {
227 list_del_rcu(&kip->list);
229 kip->cache->free(kip->insns);
237 static int collect_garbage_slots(struct kprobe_insn_cache *c)
239 struct kprobe_insn_page *kip, *next;
241 /* Ensure no-one is interrupted on the garbages */
244 list_for_each_entry_safe(kip, next, &c->pages, list) {
246 if (kip->ngarbage == 0)
248 kip->ngarbage = 0; /* we will collect all garbages */
249 for (i = 0; i < slots_per_page(c); i++) {
250 if (kip->slot_used[i] == SLOT_DIRTY && collect_one_slot(kip, i))
258 void __free_insn_slot(struct kprobe_insn_cache *c,
259 kprobe_opcode_t *slot, int dirty)
261 struct kprobe_insn_page *kip;
264 mutex_lock(&c->mutex);
266 list_for_each_entry_rcu(kip, &c->pages, list) {
267 idx = ((long)slot - (long)kip->insns) /
268 (c->insn_size * sizeof(kprobe_opcode_t));
269 if (idx >= 0 && idx < slots_per_page(c))
272 /* Could not find this slot. */
277 /* Mark and sweep: this may sleep */
279 /* Check double free */
280 WARN_ON(kip->slot_used[idx] != SLOT_USED);
282 kip->slot_used[idx] = SLOT_DIRTY;
284 if (++c->nr_garbage > slots_per_page(c))
285 collect_garbage_slots(c);
287 collect_one_slot(kip, idx);
290 mutex_unlock(&c->mutex);
294 * Check given address is on the page of kprobe instruction slots.
295 * This will be used for checking whether the address on a stack
296 * is on a text area or not.
298 bool __is_insn_slot_addr(struct kprobe_insn_cache *c, unsigned long addr)
300 struct kprobe_insn_page *kip;
304 list_for_each_entry_rcu(kip, &c->pages, list) {
305 if (addr >= (unsigned long)kip->insns &&
306 addr < (unsigned long)kip->insns + PAGE_SIZE) {
316 #ifdef CONFIG_OPTPROBES
317 /* For optimized_kprobe buffer */
318 struct kprobe_insn_cache kprobe_optinsn_slots = {
319 .mutex = __MUTEX_INITIALIZER(kprobe_optinsn_slots.mutex),
320 .alloc = alloc_insn_page,
321 .free = free_insn_page,
322 .pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages),
323 /* .insn_size is initialized later */
329 /* We have preemption disabled.. so it is safe to use __ versions */
330 static inline void set_kprobe_instance(struct kprobe *kp)
332 __this_cpu_write(kprobe_instance, kp);
335 static inline void reset_kprobe_instance(void)
337 __this_cpu_write(kprobe_instance, NULL);
341 * This routine is called either:
342 * - under the kprobe_mutex - during kprobe_[un]register()
344 * - with preemption disabled - from arch/xxx/kernel/kprobes.c
346 struct kprobe *get_kprobe(void *addr)
348 struct hlist_head *head;
351 head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
352 hlist_for_each_entry_rcu(p, head, hlist) {
359 NOKPROBE_SYMBOL(get_kprobe);
361 static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs);
363 /* Return true if the kprobe is an aggregator */
364 static inline int kprobe_aggrprobe(struct kprobe *p)
366 return p->pre_handler == aggr_pre_handler;
369 /* Return true(!0) if the kprobe is unused */
370 static inline int kprobe_unused(struct kprobe *p)
372 return kprobe_aggrprobe(p) && kprobe_disabled(p) &&
373 list_empty(&p->list);
377 * Keep all fields in the kprobe consistent
379 static inline void copy_kprobe(struct kprobe *ap, struct kprobe *p)
381 memcpy(&p->opcode, &ap->opcode, sizeof(kprobe_opcode_t));
382 memcpy(&p->ainsn, &ap->ainsn, sizeof(struct arch_specific_insn));
385 #ifdef CONFIG_OPTPROBES
386 /* NOTE: change this value only with kprobe_mutex held */
387 static bool kprobes_allow_optimization;
390 * Call all pre_handler on the list, but ignores its return value.
391 * This must be called from arch-dep optimized caller.
393 void opt_pre_handler(struct kprobe *p, struct pt_regs *regs)
397 list_for_each_entry_rcu(kp, &p->list, list) {
398 if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
399 set_kprobe_instance(kp);
400 kp->pre_handler(kp, regs);
402 reset_kprobe_instance();
405 NOKPROBE_SYMBOL(opt_pre_handler);
407 /* Free optimized instructions and optimized_kprobe */
408 static void free_aggr_kprobe(struct kprobe *p)
410 struct optimized_kprobe *op;
412 op = container_of(p, struct optimized_kprobe, kp);
413 arch_remove_optimized_kprobe(op);
414 arch_remove_kprobe(p);
418 /* Return true(!0) if the kprobe is ready for optimization. */
419 static inline int kprobe_optready(struct kprobe *p)
421 struct optimized_kprobe *op;
423 if (kprobe_aggrprobe(p)) {
424 op = container_of(p, struct optimized_kprobe, kp);
425 return arch_prepared_optinsn(&op->optinsn);
431 /* Return true(!0) if the kprobe is disarmed. Note: p must be on hash list */
432 static inline int kprobe_disarmed(struct kprobe *p)
434 struct optimized_kprobe *op;
436 /* If kprobe is not aggr/opt probe, just return kprobe is disabled */
437 if (!kprobe_aggrprobe(p))
438 return kprobe_disabled(p);
440 op = container_of(p, struct optimized_kprobe, kp);
442 return kprobe_disabled(p) && list_empty(&op->list);
445 /* Return true(!0) if the probe is queued on (un)optimizing lists */
446 static int kprobe_queued(struct kprobe *p)
448 struct optimized_kprobe *op;
450 if (kprobe_aggrprobe(p)) {
451 op = container_of(p, struct optimized_kprobe, kp);
452 if (!list_empty(&op->list))
459 * Return an optimized kprobe whose optimizing code replaces
460 * instructions including addr (exclude breakpoint).
462 static struct kprobe *get_optimized_kprobe(unsigned long addr)
465 struct kprobe *p = NULL;
466 struct optimized_kprobe *op;
468 /* Don't check i == 0, since that is a breakpoint case. */
469 for (i = 1; !p && i < MAX_OPTIMIZED_LENGTH; i++)
470 p = get_kprobe((void *)(addr - i));
472 if (p && kprobe_optready(p)) {
473 op = container_of(p, struct optimized_kprobe, kp);
474 if (arch_within_optimized_kprobe(op, addr))
481 /* Optimization staging list, protected by kprobe_mutex */
482 static LIST_HEAD(optimizing_list);
483 static LIST_HEAD(unoptimizing_list);
484 static LIST_HEAD(freeing_list);
486 static void kprobe_optimizer(struct work_struct *work);
487 static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
488 #define OPTIMIZE_DELAY 5
491 * Optimize (replace a breakpoint with a jump) kprobes listed on
494 static void do_optimize_kprobes(void)
497 * The optimization/unoptimization refers online_cpus via
498 * stop_machine() and cpu-hotplug modifies online_cpus.
499 * And same time, text_mutex will be held in cpu-hotplug and here.
500 * This combination can cause a deadlock (cpu-hotplug try to lock
501 * text_mutex but stop_machine can not be done because online_cpus
503 * To avoid this deadlock, caller must have locked cpu hotplug
504 * for preventing cpu-hotplug outside of text_mutex locking.
506 lockdep_assert_cpus_held();
508 /* Optimization never be done when disarmed */
509 if (kprobes_all_disarmed || !kprobes_allow_optimization ||
510 list_empty(&optimizing_list))
513 mutex_lock(&text_mutex);
514 arch_optimize_kprobes(&optimizing_list);
515 mutex_unlock(&text_mutex);
519 * Unoptimize (replace a jump with a breakpoint and remove the breakpoint
520 * if need) kprobes listed on unoptimizing_list.
522 static void do_unoptimize_kprobes(void)
524 struct optimized_kprobe *op, *tmp;
526 /* See comment in do_optimize_kprobes() */
527 lockdep_assert_cpus_held();
529 /* Unoptimization must be done anytime */
530 if (list_empty(&unoptimizing_list))
533 mutex_lock(&text_mutex);
534 arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
535 /* Loop free_list for disarming */
536 list_for_each_entry_safe(op, tmp, &freeing_list, list) {
537 /* Disarm probes if marked disabled */
538 if (kprobe_disabled(&op->kp))
539 arch_disarm_kprobe(&op->kp);
540 if (kprobe_unused(&op->kp)) {
542 * Remove unused probes from hash list. After waiting
543 * for synchronization, these probes are reclaimed.
544 * (reclaiming is done by do_free_cleaned_kprobes.)
546 hlist_del_rcu(&op->kp.hlist);
548 list_del_init(&op->list);
550 mutex_unlock(&text_mutex);
553 /* Reclaim all kprobes on the free_list */
554 static void do_free_cleaned_kprobes(void)
556 struct optimized_kprobe *op, *tmp;
558 list_for_each_entry_safe(op, tmp, &freeing_list, list) {
559 BUG_ON(!kprobe_unused(&op->kp));
560 list_del_init(&op->list);
561 free_aggr_kprobe(&op->kp);
565 /* Start optimizer after OPTIMIZE_DELAY passed */
566 static void kick_kprobe_optimizer(void)
568 schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY);
571 /* Kprobe jump optimizer */
572 static void kprobe_optimizer(struct work_struct *work)
574 mutex_lock(&kprobe_mutex);
576 /* Lock modules while optimizing kprobes */
577 mutex_lock(&module_mutex);
580 * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed)
581 * kprobes before waiting for quiesence period.
583 do_unoptimize_kprobes();
586 * Step 2: Wait for quiesence period to ensure all potentially
587 * preempted tasks to have normally scheduled. Because optprobe
588 * may modify multiple instructions, there is a chance that Nth
589 * instruction is preempted. In that case, such tasks can return
590 * to 2nd-Nth byte of jump instruction. This wait is for avoiding it.
591 * Note that on non-preemptive kernel, this is transparently converted
592 * to synchronoze_sched() to wait for all interrupts to have completed.
594 synchronize_rcu_tasks();
596 /* Step 3: Optimize kprobes after quiesence period */
597 do_optimize_kprobes();
599 /* Step 4: Free cleaned kprobes after quiesence period */
600 do_free_cleaned_kprobes();
602 mutex_unlock(&module_mutex);
604 mutex_unlock(&kprobe_mutex);
606 /* Step 5: Kick optimizer again if needed */
607 if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list))
608 kick_kprobe_optimizer();
611 /* Wait for completing optimization and unoptimization */
612 void wait_for_kprobe_optimizer(void)
614 mutex_lock(&kprobe_mutex);
616 while (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) {
617 mutex_unlock(&kprobe_mutex);
619 /* this will also make optimizing_work execute immmediately */
620 flush_delayed_work(&optimizing_work);
621 /* @optimizing_work might not have been queued yet, relax */
624 mutex_lock(&kprobe_mutex);
627 mutex_unlock(&kprobe_mutex);
630 /* Optimize kprobe if p is ready to be optimized */
631 static void optimize_kprobe(struct kprobe *p)
633 struct optimized_kprobe *op;
635 /* Check if the kprobe is disabled or not ready for optimization. */
636 if (!kprobe_optready(p) || !kprobes_allow_optimization ||
637 (kprobe_disabled(p) || kprobes_all_disarmed))
640 /* Both of break_handler and post_handler are not supported. */
641 if (p->break_handler || p->post_handler)
644 op = container_of(p, struct optimized_kprobe, kp);
646 /* Check there is no other kprobes at the optimized instructions */
647 if (arch_check_optimized_kprobe(op) < 0)
650 /* Check if it is already optimized. */
651 if (op->kp.flags & KPROBE_FLAG_OPTIMIZED)
653 op->kp.flags |= KPROBE_FLAG_OPTIMIZED;
655 if (!list_empty(&op->list))
656 /* This is under unoptimizing. Just dequeue the probe */
657 list_del_init(&op->list);
659 list_add(&op->list, &optimizing_list);
660 kick_kprobe_optimizer();
664 /* Short cut to direct unoptimizing */
665 static void force_unoptimize_kprobe(struct optimized_kprobe *op)
667 lockdep_assert_cpus_held();
668 arch_unoptimize_kprobe(op);
669 if (kprobe_disabled(&op->kp))
670 arch_disarm_kprobe(&op->kp);
673 /* Unoptimize a kprobe if p is optimized */
674 static void unoptimize_kprobe(struct kprobe *p, bool force)
676 struct optimized_kprobe *op;
678 if (!kprobe_aggrprobe(p) || kprobe_disarmed(p))
679 return; /* This is not an optprobe nor optimized */
681 op = container_of(p, struct optimized_kprobe, kp);
682 if (!kprobe_optimized(p)) {
683 /* Unoptimized or unoptimizing case */
684 if (force && !list_empty(&op->list)) {
686 * Only if this is unoptimizing kprobe and forced,
687 * forcibly unoptimize it. (No need to unoptimize
688 * unoptimized kprobe again :)
690 list_del_init(&op->list);
691 force_unoptimize_kprobe(op);
696 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
697 if (!list_empty(&op->list)) {
698 /* Dequeue from the optimization queue */
699 list_del_init(&op->list);
702 /* Optimized kprobe case */
704 /* Forcibly update the code: this is a special case */
705 force_unoptimize_kprobe(op);
707 list_add(&op->list, &unoptimizing_list);
708 kick_kprobe_optimizer();
712 /* Cancel unoptimizing for reusing */
713 static void reuse_unused_kprobe(struct kprobe *ap)
715 struct optimized_kprobe *op;
717 BUG_ON(!kprobe_unused(ap));
719 * Unused kprobe MUST be on the way of delayed unoptimizing (means
720 * there is still a relative jump) and disabled.
722 op = container_of(ap, struct optimized_kprobe, kp);
723 if (unlikely(list_empty(&op->list)))
724 printk(KERN_WARNING "Warning: found a stray unused "
725 "aggrprobe@%p\n", ap->addr);
726 /* Enable the probe again */
727 ap->flags &= ~KPROBE_FLAG_DISABLED;
728 /* Optimize it again (remove from op->list) */
729 BUG_ON(!kprobe_optready(ap));
733 /* Remove optimized instructions */
734 static void kill_optimized_kprobe(struct kprobe *p)
736 struct optimized_kprobe *op;
738 op = container_of(p, struct optimized_kprobe, kp);
739 if (!list_empty(&op->list))
740 /* Dequeue from the (un)optimization queue */
741 list_del_init(&op->list);
742 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
744 if (kprobe_unused(p)) {
745 /* Enqueue if it is unused */
746 list_add(&op->list, &freeing_list);
748 * Remove unused probes from the hash list. After waiting
749 * for synchronization, this probe is reclaimed.
750 * (reclaiming is done by do_free_cleaned_kprobes().)
752 hlist_del_rcu(&op->kp.hlist);
755 /* Don't touch the code, because it is already freed. */
756 arch_remove_optimized_kprobe(op);
760 void __prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
762 if (!kprobe_ftrace(p))
763 arch_prepare_optimized_kprobe(op, p);
766 /* Try to prepare optimized instructions */
767 static void prepare_optimized_kprobe(struct kprobe *p)
769 struct optimized_kprobe *op;
771 op = container_of(p, struct optimized_kprobe, kp);
772 __prepare_optimized_kprobe(op, p);
775 /* Allocate new optimized_kprobe and try to prepare optimized instructions */
776 static struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
778 struct optimized_kprobe *op;
780 op = kzalloc(sizeof(struct optimized_kprobe), GFP_KERNEL);
784 INIT_LIST_HEAD(&op->list);
785 op->kp.addr = p->addr;
786 __prepare_optimized_kprobe(op, p);
791 static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p);
794 * Prepare an optimized_kprobe and optimize it
795 * NOTE: p must be a normal registered kprobe
797 static void try_to_optimize_kprobe(struct kprobe *p)
800 struct optimized_kprobe *op;
802 /* Impossible to optimize ftrace-based kprobe */
803 if (kprobe_ftrace(p))
806 /* For preparing optimization, jump_label_text_reserved() is called */
809 mutex_lock(&text_mutex);
811 ap = alloc_aggr_kprobe(p);
815 op = container_of(ap, struct optimized_kprobe, kp);
816 if (!arch_prepared_optinsn(&op->optinsn)) {
817 /* If failed to setup optimizing, fallback to kprobe */
818 arch_remove_optimized_kprobe(op);
823 init_aggr_kprobe(ap, p);
824 optimize_kprobe(ap); /* This just kicks optimizer thread */
827 mutex_unlock(&text_mutex);
833 static void optimize_all_kprobes(void)
835 struct hlist_head *head;
839 mutex_lock(&kprobe_mutex);
840 /* If optimization is already allowed, just return */
841 if (kprobes_allow_optimization)
845 kprobes_allow_optimization = true;
846 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
847 head = &kprobe_table[i];
848 hlist_for_each_entry_rcu(p, head, hlist)
849 if (!kprobe_disabled(p))
853 printk(KERN_INFO "Kprobes globally optimized\n");
855 mutex_unlock(&kprobe_mutex);
858 static void unoptimize_all_kprobes(void)
860 struct hlist_head *head;
864 mutex_lock(&kprobe_mutex);
865 /* If optimization is already prohibited, just return */
866 if (!kprobes_allow_optimization) {
867 mutex_unlock(&kprobe_mutex);
872 kprobes_allow_optimization = false;
873 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
874 head = &kprobe_table[i];
875 hlist_for_each_entry_rcu(p, head, hlist) {
876 if (!kprobe_disabled(p))
877 unoptimize_kprobe(p, false);
881 mutex_unlock(&kprobe_mutex);
883 /* Wait for unoptimizing completion */
884 wait_for_kprobe_optimizer();
885 printk(KERN_INFO "Kprobes globally unoptimized\n");
888 static DEFINE_MUTEX(kprobe_sysctl_mutex);
889 int sysctl_kprobes_optimization;
890 int proc_kprobes_optimization_handler(struct ctl_table *table, int write,
891 void __user *buffer, size_t *length,
896 mutex_lock(&kprobe_sysctl_mutex);
897 sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0;
898 ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
900 if (sysctl_kprobes_optimization)
901 optimize_all_kprobes();
903 unoptimize_all_kprobes();
904 mutex_unlock(&kprobe_sysctl_mutex);
908 #endif /* CONFIG_SYSCTL */
910 /* Put a breakpoint for a probe. Must be called with text_mutex locked */
911 static void __arm_kprobe(struct kprobe *p)
915 /* Check collision with other optimized kprobes */
916 _p = get_optimized_kprobe((unsigned long)p->addr);
918 /* Fallback to unoptimized kprobe */
919 unoptimize_kprobe(_p, true);
922 optimize_kprobe(p); /* Try to optimize (add kprobe to a list) */
925 /* Remove the breakpoint of a probe. Must be called with text_mutex locked */
926 static void __disarm_kprobe(struct kprobe *p, bool reopt)
930 /* Try to unoptimize */
931 unoptimize_kprobe(p, kprobes_all_disarmed);
933 if (!kprobe_queued(p)) {
934 arch_disarm_kprobe(p);
935 /* If another kprobe was blocked, optimize it. */
936 _p = get_optimized_kprobe((unsigned long)p->addr);
937 if (unlikely(_p) && reopt)
940 /* TODO: reoptimize others after unoptimized this probe */
943 #else /* !CONFIG_OPTPROBES */
945 #define optimize_kprobe(p) do {} while (0)
946 #define unoptimize_kprobe(p, f) do {} while (0)
947 #define kill_optimized_kprobe(p) do {} while (0)
948 #define prepare_optimized_kprobe(p) do {} while (0)
949 #define try_to_optimize_kprobe(p) do {} while (0)
950 #define __arm_kprobe(p) arch_arm_kprobe(p)
951 #define __disarm_kprobe(p, o) arch_disarm_kprobe(p)
952 #define kprobe_disarmed(p) kprobe_disabled(p)
953 #define wait_for_kprobe_optimizer() do {} while (0)
955 /* There should be no unused kprobes can be reused without optimization */
956 static void reuse_unused_kprobe(struct kprobe *ap)
958 printk(KERN_ERR "Error: There should be no unused kprobe here.\n");
959 BUG_ON(kprobe_unused(ap));
962 static void free_aggr_kprobe(struct kprobe *p)
964 arch_remove_kprobe(p);
968 static struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
970 return kzalloc(sizeof(struct kprobe), GFP_KERNEL);
972 #endif /* CONFIG_OPTPROBES */
974 #ifdef CONFIG_KPROBES_ON_FTRACE
975 static struct ftrace_ops kprobe_ftrace_ops __read_mostly = {
976 .func = kprobe_ftrace_handler,
977 .flags = FTRACE_OPS_FL_SAVE_REGS | FTRACE_OPS_FL_IPMODIFY,
979 static int kprobe_ftrace_enabled;
981 /* Must ensure p->addr is really on ftrace */
982 static int prepare_kprobe(struct kprobe *p)
984 if (!kprobe_ftrace(p))
985 return arch_prepare_kprobe(p);
987 return arch_prepare_kprobe_ftrace(p);
990 /* Caller must lock kprobe_mutex */
991 static void arm_kprobe_ftrace(struct kprobe *p)
995 ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
996 (unsigned long)p->addr, 0, 0);
997 WARN(ret < 0, "Failed to arm kprobe-ftrace at %p (%d)\n", p->addr, ret);
998 kprobe_ftrace_enabled++;
999 if (kprobe_ftrace_enabled == 1) {
1000 ret = register_ftrace_function(&kprobe_ftrace_ops);
1001 WARN(ret < 0, "Failed to init kprobe-ftrace (%d)\n", ret);
1005 /* Caller must lock kprobe_mutex */
1006 static void disarm_kprobe_ftrace(struct kprobe *p)
1010 kprobe_ftrace_enabled--;
1011 if (kprobe_ftrace_enabled == 0) {
1012 ret = unregister_ftrace_function(&kprobe_ftrace_ops);
1013 WARN(ret < 0, "Failed to init kprobe-ftrace (%d)\n", ret);
1015 ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
1016 (unsigned long)p->addr, 1, 0);
1017 WARN(ret < 0, "Failed to disarm kprobe-ftrace at %p (%d)\n", p->addr, ret);
1019 #else /* !CONFIG_KPROBES_ON_FTRACE */
1020 #define prepare_kprobe(p) arch_prepare_kprobe(p)
1021 #define arm_kprobe_ftrace(p) do {} while (0)
1022 #define disarm_kprobe_ftrace(p) do {} while (0)
1025 /* Arm a kprobe with text_mutex */
1026 static void arm_kprobe(struct kprobe *kp)
1028 if (unlikely(kprobe_ftrace(kp))) {
1029 arm_kprobe_ftrace(kp);
1033 mutex_lock(&text_mutex);
1035 mutex_unlock(&text_mutex);
1039 /* Disarm a kprobe with text_mutex */
1040 static void disarm_kprobe(struct kprobe *kp, bool reopt)
1042 if (unlikely(kprobe_ftrace(kp))) {
1043 disarm_kprobe_ftrace(kp);
1048 mutex_lock(&text_mutex);
1049 __disarm_kprobe(kp, reopt);
1050 mutex_unlock(&text_mutex);
1055 * Aggregate handlers for multiple kprobes support - these handlers
1056 * take care of invoking the individual kprobe handlers on p->list
1058 static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
1062 list_for_each_entry_rcu(kp, &p->list, list) {
1063 if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
1064 set_kprobe_instance(kp);
1065 if (kp->pre_handler(kp, regs))
1068 reset_kprobe_instance();
1072 NOKPROBE_SYMBOL(aggr_pre_handler);
1074 static void aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
1075 unsigned long flags)
1079 list_for_each_entry_rcu(kp, &p->list, list) {
1080 if (kp->post_handler && likely(!kprobe_disabled(kp))) {
1081 set_kprobe_instance(kp);
1082 kp->post_handler(kp, regs, flags);
1083 reset_kprobe_instance();
1087 NOKPROBE_SYMBOL(aggr_post_handler);
1089 static int aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
1092 struct kprobe *cur = __this_cpu_read(kprobe_instance);
1095 * if we faulted "during" the execution of a user specified
1096 * probe handler, invoke just that probe's fault handler
1098 if (cur && cur->fault_handler) {
1099 if (cur->fault_handler(cur, regs, trapnr))
1104 NOKPROBE_SYMBOL(aggr_fault_handler);
1106 static int aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
1108 struct kprobe *cur = __this_cpu_read(kprobe_instance);
1111 if (cur && cur->break_handler) {
1112 if (cur->break_handler(cur, regs))
1115 reset_kprobe_instance();
1118 NOKPROBE_SYMBOL(aggr_break_handler);
1120 /* Walks the list and increments nmissed count for multiprobe case */
1121 void kprobes_inc_nmissed_count(struct kprobe *p)
1124 if (!kprobe_aggrprobe(p)) {
1127 list_for_each_entry_rcu(kp, &p->list, list)
1132 NOKPROBE_SYMBOL(kprobes_inc_nmissed_count);
1134 void recycle_rp_inst(struct kretprobe_instance *ri,
1135 struct hlist_head *head)
1137 struct kretprobe *rp = ri->rp;
1139 /* remove rp inst off the rprobe_inst_table */
1140 hlist_del(&ri->hlist);
1141 INIT_HLIST_NODE(&ri->hlist);
1143 raw_spin_lock(&rp->lock);
1144 hlist_add_head(&ri->hlist, &rp->free_instances);
1145 raw_spin_unlock(&rp->lock);
1148 hlist_add_head(&ri->hlist, head);
1150 NOKPROBE_SYMBOL(recycle_rp_inst);
1152 void kretprobe_hash_lock(struct task_struct *tsk,
1153 struct hlist_head **head, unsigned long *flags)
1154 __acquires(hlist_lock)
1156 unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
1157 raw_spinlock_t *hlist_lock;
1159 *head = &kretprobe_inst_table[hash];
1160 hlist_lock = kretprobe_table_lock_ptr(hash);
1161 raw_spin_lock_irqsave(hlist_lock, *flags);
1163 NOKPROBE_SYMBOL(kretprobe_hash_lock);
1165 static void kretprobe_table_lock(unsigned long hash,
1166 unsigned long *flags)
1167 __acquires(hlist_lock)
1169 raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
1170 raw_spin_lock_irqsave(hlist_lock, *flags);
1172 NOKPROBE_SYMBOL(kretprobe_table_lock);
1174 void kretprobe_hash_unlock(struct task_struct *tsk,
1175 unsigned long *flags)
1176 __releases(hlist_lock)
1178 unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
1179 raw_spinlock_t *hlist_lock;
1181 hlist_lock = kretprobe_table_lock_ptr(hash);
1182 raw_spin_unlock_irqrestore(hlist_lock, *flags);
1184 NOKPROBE_SYMBOL(kretprobe_hash_unlock);
1186 static void kretprobe_table_unlock(unsigned long hash,
1187 unsigned long *flags)
1188 __releases(hlist_lock)
1190 raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
1191 raw_spin_unlock_irqrestore(hlist_lock, *flags);
1193 NOKPROBE_SYMBOL(kretprobe_table_unlock);
1196 * This function is called from finish_task_switch when task tk becomes dead,
1197 * so that we can recycle any function-return probe instances associated
1198 * with this task. These left over instances represent probed functions
1199 * that have been called but will never return.
1201 void kprobe_flush_task(struct task_struct *tk)
1203 struct kretprobe_instance *ri;
1204 struct hlist_head *head, empty_rp;
1205 struct hlist_node *tmp;
1206 unsigned long hash, flags = 0;
1208 if (unlikely(!kprobes_initialized))
1209 /* Early boot. kretprobe_table_locks not yet initialized. */
1212 INIT_HLIST_HEAD(&empty_rp);
1213 hash = hash_ptr(tk, KPROBE_HASH_BITS);
1214 head = &kretprobe_inst_table[hash];
1215 kretprobe_table_lock(hash, &flags);
1216 hlist_for_each_entry_safe(ri, tmp, head, hlist) {
1218 recycle_rp_inst(ri, &empty_rp);
1220 kretprobe_table_unlock(hash, &flags);
1221 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
1222 hlist_del(&ri->hlist);
1226 NOKPROBE_SYMBOL(kprobe_flush_task);
1228 static inline void free_rp_inst(struct kretprobe *rp)
1230 struct kretprobe_instance *ri;
1231 struct hlist_node *next;
1233 hlist_for_each_entry_safe(ri, next, &rp->free_instances, hlist) {
1234 hlist_del(&ri->hlist);
1239 static void cleanup_rp_inst(struct kretprobe *rp)
1241 unsigned long flags, hash;
1242 struct kretprobe_instance *ri;
1243 struct hlist_node *next;
1244 struct hlist_head *head;
1247 for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) {
1248 kretprobe_table_lock(hash, &flags);
1249 head = &kretprobe_inst_table[hash];
1250 hlist_for_each_entry_safe(ri, next, head, hlist) {
1254 kretprobe_table_unlock(hash, &flags);
1258 NOKPROBE_SYMBOL(cleanup_rp_inst);
1261 * Add the new probe to ap->list. Fail if this is the
1262 * second jprobe at the address - two jprobes can't coexist
1264 static int add_new_kprobe(struct kprobe *ap, struct kprobe *p)
1266 BUG_ON(kprobe_gone(ap) || kprobe_gone(p));
1268 if (p->break_handler || p->post_handler)
1269 unoptimize_kprobe(ap, true); /* Fall back to normal kprobe */
1271 if (p->break_handler) {
1272 if (ap->break_handler)
1274 list_add_tail_rcu(&p->list, &ap->list);
1275 ap->break_handler = aggr_break_handler;
1277 list_add_rcu(&p->list, &ap->list);
1278 if (p->post_handler && !ap->post_handler)
1279 ap->post_handler = aggr_post_handler;
1285 * Fill in the required fields of the "manager kprobe". Replace the
1286 * earlier kprobe in the hlist with the manager kprobe
1288 static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
1290 /* Copy p's insn slot to ap */
1292 flush_insn_slot(ap);
1294 ap->flags = p->flags & ~KPROBE_FLAG_OPTIMIZED;
1295 ap->pre_handler = aggr_pre_handler;
1296 ap->fault_handler = aggr_fault_handler;
1297 /* We don't care the kprobe which has gone. */
1298 if (p->post_handler && !kprobe_gone(p))
1299 ap->post_handler = aggr_post_handler;
1300 if (p->break_handler && !kprobe_gone(p))
1301 ap->break_handler = aggr_break_handler;
1303 INIT_LIST_HEAD(&ap->list);
1304 INIT_HLIST_NODE(&ap->hlist);
1306 list_add_rcu(&p->list, &ap->list);
1307 hlist_replace_rcu(&p->hlist, &ap->hlist);
1311 * This is the second or subsequent kprobe at the address - handle
1314 static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p)
1317 struct kprobe *ap = orig_p;
1321 /* For preparing optimization, jump_label_text_reserved() is called */
1323 mutex_lock(&text_mutex);
1325 if (!kprobe_aggrprobe(orig_p)) {
1326 /* If orig_p is not an aggr_kprobe, create new aggr_kprobe. */
1327 ap = alloc_aggr_kprobe(orig_p);
1332 init_aggr_kprobe(ap, orig_p);
1333 } else if (kprobe_unused(ap))
1334 /* This probe is going to die. Rescue it */
1335 reuse_unused_kprobe(ap);
1337 if (kprobe_gone(ap)) {
1339 * Attempting to insert new probe at the same location that
1340 * had a probe in the module vaddr area which already
1341 * freed. So, the instruction slot has already been
1342 * released. We need a new slot for the new probe.
1344 ret = arch_prepare_kprobe(ap);
1347 * Even if fail to allocate new slot, don't need to
1348 * free aggr_probe. It will be used next time, or
1349 * freed by unregister_kprobe.
1353 /* Prepare optimized instructions if possible. */
1354 prepare_optimized_kprobe(ap);
1357 * Clear gone flag to prevent allocating new slot again, and
1358 * set disabled flag because it is not armed yet.
1360 ap->flags = (ap->flags & ~KPROBE_FLAG_GONE)
1361 | KPROBE_FLAG_DISABLED;
1364 /* Copy ap's insn slot to p */
1366 ret = add_new_kprobe(ap, p);
1369 mutex_unlock(&text_mutex);
1370 jump_label_unlock();
1373 if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) {
1374 ap->flags &= ~KPROBE_FLAG_DISABLED;
1375 if (!kprobes_all_disarmed)
1376 /* Arm the breakpoint again. */
1382 bool __weak arch_within_kprobe_blacklist(unsigned long addr)
1384 /* The __kprobes marked functions and entry code must not be probed */
1385 return addr >= (unsigned long)__kprobes_text_start &&
1386 addr < (unsigned long)__kprobes_text_end;
1389 bool within_kprobe_blacklist(unsigned long addr)
1391 struct kprobe_blacklist_entry *ent;
1393 if (arch_within_kprobe_blacklist(addr))
1396 * If there exists a kprobe_blacklist, verify and
1397 * fail any probe registration in the prohibited area
1399 list_for_each_entry(ent, &kprobe_blacklist, list) {
1400 if (addr >= ent->start_addr && addr < ent->end_addr)
1407 bool within_kprobe_error_injection_list(unsigned long addr)
1409 struct kprobe_ei_entry *ent;
1411 list_for_each_entry(ent, &kprobe_error_injection_list, list) {
1412 if (addr >= ent->start_addr && addr < ent->end_addr)
1419 * If we have a symbol_name argument, look it up and add the offset field
1420 * to it. This way, we can specify a relative address to a symbol.
1421 * This returns encoded errors if it fails to look up symbol or invalid
1422 * combination of parameters.
1424 static kprobe_opcode_t *_kprobe_addr(kprobe_opcode_t *addr,
1425 const char *symbol_name, unsigned int offset)
1427 if ((symbol_name && addr) || (!symbol_name && !addr))
1431 addr = kprobe_lookup_name(symbol_name, offset);
1433 return ERR_PTR(-ENOENT);
1436 addr = (kprobe_opcode_t *)(((char *)addr) + offset);
1441 return ERR_PTR(-EINVAL);
1444 static kprobe_opcode_t *kprobe_addr(struct kprobe *p)
1446 return _kprobe_addr(p->addr, p->symbol_name, p->offset);
1449 /* Check passed kprobe is valid and return kprobe in kprobe_table. */
1450 static struct kprobe *__get_valid_kprobe(struct kprobe *p)
1452 struct kprobe *ap, *list_p;
1454 ap = get_kprobe(p->addr);
1459 list_for_each_entry_rcu(list_p, &ap->list, list)
1461 /* kprobe p is a valid probe */
1469 /* Return error if the kprobe is being re-registered */
1470 static inline int check_kprobe_rereg(struct kprobe *p)
1474 mutex_lock(&kprobe_mutex);
1475 if (__get_valid_kprobe(p))
1477 mutex_unlock(&kprobe_mutex);
1482 int __weak arch_check_ftrace_location(struct kprobe *p)
1484 unsigned long ftrace_addr;
1486 ftrace_addr = ftrace_location((unsigned long)p->addr);
1488 #ifdef CONFIG_KPROBES_ON_FTRACE
1489 /* Given address is not on the instruction boundary */
1490 if ((unsigned long)p->addr != ftrace_addr)
1492 p->flags |= KPROBE_FLAG_FTRACE;
1493 #else /* !CONFIG_KPROBES_ON_FTRACE */
1500 static int check_kprobe_address_safe(struct kprobe *p,
1501 struct module **probed_mod)
1505 ret = arch_check_ftrace_location(p);
1511 /* Ensure it is not in reserved area nor out of text */
1512 if (!kernel_text_address((unsigned long) p->addr) ||
1513 within_kprobe_blacklist((unsigned long) p->addr) ||
1514 jump_label_text_reserved(p->addr, p->addr)) {
1519 /* Check if are we probing a module */
1520 *probed_mod = __module_text_address((unsigned long) p->addr);
1523 * We must hold a refcount of the probed module while updating
1524 * its code to prohibit unexpected unloading.
1526 if (unlikely(!try_module_get(*probed_mod))) {
1532 * If the module freed .init.text, we couldn't insert
1535 if (within_module_init((unsigned long)p->addr, *probed_mod) &&
1536 (*probed_mod)->state != MODULE_STATE_COMING) {
1537 module_put(*probed_mod);
1544 jump_label_unlock();
1549 int register_kprobe(struct kprobe *p)
1552 struct kprobe *old_p;
1553 struct module *probed_mod;
1554 kprobe_opcode_t *addr;
1556 /* Adjust probe address from symbol */
1557 addr = kprobe_addr(p);
1559 return PTR_ERR(addr);
1562 ret = check_kprobe_rereg(p);
1566 /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
1567 p->flags &= KPROBE_FLAG_DISABLED;
1569 INIT_LIST_HEAD(&p->list);
1571 ret = check_kprobe_address_safe(p, &probed_mod);
1575 mutex_lock(&kprobe_mutex);
1577 old_p = get_kprobe(p->addr);
1579 /* Since this may unoptimize old_p, locking text_mutex. */
1580 ret = register_aggr_kprobe(old_p, p);
1585 /* Prevent text modification */
1586 mutex_lock(&text_mutex);
1587 ret = prepare_kprobe(p);
1588 mutex_unlock(&text_mutex);
1593 INIT_HLIST_NODE(&p->hlist);
1594 hlist_add_head_rcu(&p->hlist,
1595 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
1597 if (!kprobes_all_disarmed && !kprobe_disabled(p))
1600 /* Try to optimize kprobe */
1601 try_to_optimize_kprobe(p);
1603 mutex_unlock(&kprobe_mutex);
1606 module_put(probed_mod);
1610 EXPORT_SYMBOL_GPL(register_kprobe);
1612 /* Check if all probes on the aggrprobe are disabled */
1613 static int aggr_kprobe_disabled(struct kprobe *ap)
1617 list_for_each_entry_rcu(kp, &ap->list, list)
1618 if (!kprobe_disabled(kp))
1620 * There is an active probe on the list.
1621 * We can't disable this ap.
1628 /* Disable one kprobe: Make sure called under kprobe_mutex is locked */
1629 static struct kprobe *__disable_kprobe(struct kprobe *p)
1631 struct kprobe *orig_p;
1633 /* Get an original kprobe for return */
1634 orig_p = __get_valid_kprobe(p);
1635 if (unlikely(orig_p == NULL))
1638 if (!kprobe_disabled(p)) {
1639 /* Disable probe if it is a child probe */
1641 p->flags |= KPROBE_FLAG_DISABLED;
1643 /* Try to disarm and disable this/parent probe */
1644 if (p == orig_p || aggr_kprobe_disabled(orig_p)) {
1646 * If kprobes_all_disarmed is set, orig_p
1647 * should have already been disarmed, so
1648 * skip unneed disarming process.
1650 if (!kprobes_all_disarmed)
1651 disarm_kprobe(orig_p, true);
1652 orig_p->flags |= KPROBE_FLAG_DISABLED;
1660 * Unregister a kprobe without a scheduler synchronization.
1662 static int __unregister_kprobe_top(struct kprobe *p)
1664 struct kprobe *ap, *list_p;
1666 /* Disable kprobe. This will disarm it if needed. */
1667 ap = __disable_kprobe(p);
1673 * This probe is an independent(and non-optimized) kprobe
1674 * (not an aggrprobe). Remove from the hash list.
1678 /* Following process expects this probe is an aggrprobe */
1679 WARN_ON(!kprobe_aggrprobe(ap));
1681 if (list_is_singular(&ap->list) && kprobe_disarmed(ap))
1683 * !disarmed could be happen if the probe is under delayed
1688 /* If disabling probe has special handlers, update aggrprobe */
1689 if (p->break_handler && !kprobe_gone(p))
1690 ap->break_handler = NULL;
1691 if (p->post_handler && !kprobe_gone(p)) {
1692 list_for_each_entry_rcu(list_p, &ap->list, list) {
1693 if ((list_p != p) && (list_p->post_handler))
1696 ap->post_handler = NULL;
1700 * Remove from the aggrprobe: this path will do nothing in
1701 * __unregister_kprobe_bottom().
1703 list_del_rcu(&p->list);
1704 if (!kprobe_disabled(ap) && !kprobes_all_disarmed)
1706 * Try to optimize this probe again, because post
1707 * handler may have been changed.
1709 optimize_kprobe(ap);
1714 BUG_ON(!kprobe_disarmed(ap));
1715 hlist_del_rcu(&ap->hlist);
1719 static void __unregister_kprobe_bottom(struct kprobe *p)
1723 if (list_empty(&p->list))
1724 /* This is an independent kprobe */
1725 arch_remove_kprobe(p);
1726 else if (list_is_singular(&p->list)) {
1727 /* This is the last child of an aggrprobe */
1728 ap = list_entry(p->list.next, struct kprobe, list);
1730 free_aggr_kprobe(ap);
1732 /* Otherwise, do nothing. */
1735 int register_kprobes(struct kprobe **kps, int num)
1741 for (i = 0; i < num; i++) {
1742 ret = register_kprobe(kps[i]);
1745 unregister_kprobes(kps, i);
1751 EXPORT_SYMBOL_GPL(register_kprobes);
1753 void unregister_kprobe(struct kprobe *p)
1755 unregister_kprobes(&p, 1);
1757 EXPORT_SYMBOL_GPL(unregister_kprobe);
1759 void unregister_kprobes(struct kprobe **kps, int num)
1765 mutex_lock(&kprobe_mutex);
1766 for (i = 0; i < num; i++)
1767 if (__unregister_kprobe_top(kps[i]) < 0)
1768 kps[i]->addr = NULL;
1769 mutex_unlock(&kprobe_mutex);
1771 synchronize_sched();
1772 for (i = 0; i < num; i++)
1774 __unregister_kprobe_bottom(kps[i]);
1776 EXPORT_SYMBOL_GPL(unregister_kprobes);
1778 int __weak kprobe_exceptions_notify(struct notifier_block *self,
1779 unsigned long val, void *data)
1783 NOKPROBE_SYMBOL(kprobe_exceptions_notify);
1785 static struct notifier_block kprobe_exceptions_nb = {
1786 .notifier_call = kprobe_exceptions_notify,
1787 .priority = 0x7fffffff /* we need to be notified first */
1790 unsigned long __weak arch_deref_entry_point(void *entry)
1792 return (unsigned long)entry;
1796 int register_jprobes(struct jprobe **jps, int num)
1803 for (i = 0; i < num; i++) {
1804 ret = register_jprobe(jps[i]);
1808 unregister_jprobes(jps, i);
1815 EXPORT_SYMBOL_GPL(register_jprobes);
1817 int register_jprobe(struct jprobe *jp)
1819 unsigned long addr, offset;
1820 struct kprobe *kp = &jp->kp;
1823 * Verify probepoint as well as the jprobe handler are
1824 * valid function entry points.
1826 addr = arch_deref_entry_point(jp->entry);
1828 if (kallsyms_lookup_size_offset(addr, NULL, &offset) && offset == 0 &&
1829 kprobe_on_func_entry(kp->addr, kp->symbol_name, kp->offset)) {
1830 kp->pre_handler = setjmp_pre_handler;
1831 kp->break_handler = longjmp_break_handler;
1832 return register_kprobe(kp);
1837 EXPORT_SYMBOL_GPL(register_jprobe);
1839 void unregister_jprobe(struct jprobe *jp)
1841 unregister_jprobes(&jp, 1);
1843 EXPORT_SYMBOL_GPL(unregister_jprobe);
1845 void unregister_jprobes(struct jprobe **jps, int num)
1851 mutex_lock(&kprobe_mutex);
1852 for (i = 0; i < num; i++)
1853 if (__unregister_kprobe_top(&jps[i]->kp) < 0)
1854 jps[i]->kp.addr = NULL;
1855 mutex_unlock(&kprobe_mutex);
1857 synchronize_sched();
1858 for (i = 0; i < num; i++) {
1859 if (jps[i]->kp.addr)
1860 __unregister_kprobe_bottom(&jps[i]->kp);
1863 EXPORT_SYMBOL_GPL(unregister_jprobes);
1866 #ifdef CONFIG_KRETPROBES
1868 * This kprobe pre_handler is registered with every kretprobe. When probe
1869 * hits it will set up the return probe.
1871 static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
1873 struct kretprobe *rp = container_of(p, struct kretprobe, kp);
1874 unsigned long hash, flags = 0;
1875 struct kretprobe_instance *ri;
1878 * To avoid deadlocks, prohibit return probing in NMI contexts,
1879 * just skip the probe and increase the (inexact) 'nmissed'
1880 * statistical counter, so that the user is informed that
1881 * something happened:
1883 if (unlikely(in_nmi())) {
1888 /* TODO: consider to only swap the RA after the last pre_handler fired */
1889 hash = hash_ptr(current, KPROBE_HASH_BITS);
1890 raw_spin_lock_irqsave(&rp->lock, flags);
1891 if (!hlist_empty(&rp->free_instances)) {
1892 ri = hlist_entry(rp->free_instances.first,
1893 struct kretprobe_instance, hlist);
1894 hlist_del(&ri->hlist);
1895 raw_spin_unlock_irqrestore(&rp->lock, flags);
1900 if (rp->entry_handler && rp->entry_handler(ri, regs)) {
1901 raw_spin_lock_irqsave(&rp->lock, flags);
1902 hlist_add_head(&ri->hlist, &rp->free_instances);
1903 raw_spin_unlock_irqrestore(&rp->lock, flags);
1907 arch_prepare_kretprobe(ri, regs);
1909 /* XXX(hch): why is there no hlist_move_head? */
1910 INIT_HLIST_NODE(&ri->hlist);
1911 kretprobe_table_lock(hash, &flags);
1912 hlist_add_head(&ri->hlist, &kretprobe_inst_table[hash]);
1913 kretprobe_table_unlock(hash, &flags);
1916 raw_spin_unlock_irqrestore(&rp->lock, flags);
1920 NOKPROBE_SYMBOL(pre_handler_kretprobe);
1922 bool __weak arch_kprobe_on_func_entry(unsigned long offset)
1927 bool kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset)
1929 kprobe_opcode_t *kp_addr = _kprobe_addr(addr, sym, offset);
1931 if (IS_ERR(kp_addr))
1934 if (!kallsyms_lookup_size_offset((unsigned long)kp_addr, NULL, &offset) ||
1935 !arch_kprobe_on_func_entry(offset))
1941 int register_kretprobe(struct kretprobe *rp)
1944 struct kretprobe_instance *inst;
1948 if (!kprobe_on_func_entry(rp->kp.addr, rp->kp.symbol_name, rp->kp.offset))
1951 if (kretprobe_blacklist_size) {
1952 addr = kprobe_addr(&rp->kp);
1954 return PTR_ERR(addr);
1956 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
1957 if (kretprobe_blacklist[i].addr == addr)
1962 rp->kp.pre_handler = pre_handler_kretprobe;
1963 rp->kp.post_handler = NULL;
1964 rp->kp.fault_handler = NULL;
1965 rp->kp.break_handler = NULL;
1967 /* Pre-allocate memory for max kretprobe instances */
1968 if (rp->maxactive <= 0) {
1969 #ifdef CONFIG_PREEMPT
1970 rp->maxactive = max_t(unsigned int, 10, 2*num_possible_cpus());
1972 rp->maxactive = num_possible_cpus();
1975 raw_spin_lock_init(&rp->lock);
1976 INIT_HLIST_HEAD(&rp->free_instances);
1977 for (i = 0; i < rp->maxactive; i++) {
1978 inst = kmalloc(sizeof(struct kretprobe_instance) +
1979 rp->data_size, GFP_KERNEL);
1984 INIT_HLIST_NODE(&inst->hlist);
1985 hlist_add_head(&inst->hlist, &rp->free_instances);
1989 /* Establish function entry probe point */
1990 ret = register_kprobe(&rp->kp);
1995 EXPORT_SYMBOL_GPL(register_kretprobe);
1997 int register_kretprobes(struct kretprobe **rps, int num)
2003 for (i = 0; i < num; i++) {
2004 ret = register_kretprobe(rps[i]);
2007 unregister_kretprobes(rps, i);
2013 EXPORT_SYMBOL_GPL(register_kretprobes);
2015 void unregister_kretprobe(struct kretprobe *rp)
2017 unregister_kretprobes(&rp, 1);
2019 EXPORT_SYMBOL_GPL(unregister_kretprobe);
2021 void unregister_kretprobes(struct kretprobe **rps, int num)
2027 mutex_lock(&kprobe_mutex);
2028 for (i = 0; i < num; i++)
2029 if (__unregister_kprobe_top(&rps[i]->kp) < 0)
2030 rps[i]->kp.addr = NULL;
2031 mutex_unlock(&kprobe_mutex);
2033 synchronize_sched();
2034 for (i = 0; i < num; i++) {
2035 if (rps[i]->kp.addr) {
2036 __unregister_kprobe_bottom(&rps[i]->kp);
2037 cleanup_rp_inst(rps[i]);
2041 EXPORT_SYMBOL_GPL(unregister_kretprobes);
2043 #else /* CONFIG_KRETPROBES */
2044 int register_kretprobe(struct kretprobe *rp)
2048 EXPORT_SYMBOL_GPL(register_kretprobe);
2050 int register_kretprobes(struct kretprobe **rps, int num)
2054 EXPORT_SYMBOL_GPL(register_kretprobes);
2056 void unregister_kretprobe(struct kretprobe *rp)
2059 EXPORT_SYMBOL_GPL(unregister_kretprobe);
2061 void unregister_kretprobes(struct kretprobe **rps, int num)
2064 EXPORT_SYMBOL_GPL(unregister_kretprobes);
2066 static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
2070 NOKPROBE_SYMBOL(pre_handler_kretprobe);
2072 #endif /* CONFIG_KRETPROBES */
2074 /* Set the kprobe gone and remove its instruction buffer. */
2075 static void kill_kprobe(struct kprobe *p)
2079 p->flags |= KPROBE_FLAG_GONE;
2080 if (kprobe_aggrprobe(p)) {
2082 * If this is an aggr_kprobe, we have to list all the
2083 * chained probes and mark them GONE.
2085 list_for_each_entry_rcu(kp, &p->list, list)
2086 kp->flags |= KPROBE_FLAG_GONE;
2087 p->post_handler = NULL;
2088 p->break_handler = NULL;
2089 kill_optimized_kprobe(p);
2092 * Here, we can remove insn_slot safely, because no thread calls
2093 * the original probed function (which will be freed soon) any more.
2095 arch_remove_kprobe(p);
2098 /* Disable one kprobe */
2099 int disable_kprobe(struct kprobe *kp)
2103 mutex_lock(&kprobe_mutex);
2105 /* Disable this kprobe */
2106 if (__disable_kprobe(kp) == NULL)
2109 mutex_unlock(&kprobe_mutex);
2112 EXPORT_SYMBOL_GPL(disable_kprobe);
2114 /* Enable one kprobe */
2115 int enable_kprobe(struct kprobe *kp)
2120 mutex_lock(&kprobe_mutex);
2122 /* Check whether specified probe is valid. */
2123 p = __get_valid_kprobe(kp);
2124 if (unlikely(p == NULL)) {
2129 if (kprobe_gone(kp)) {
2130 /* This kprobe has gone, we couldn't enable it. */
2136 kp->flags &= ~KPROBE_FLAG_DISABLED;
2138 if (!kprobes_all_disarmed && kprobe_disabled(p)) {
2139 p->flags &= ~KPROBE_FLAG_DISABLED;
2143 mutex_unlock(&kprobe_mutex);
2146 EXPORT_SYMBOL_GPL(enable_kprobe);
2148 void dump_kprobe(struct kprobe *kp)
2150 printk(KERN_WARNING "Dumping kprobe:\n");
2151 printk(KERN_WARNING "Name: %s\nAddress: %p\nOffset: %x\n",
2152 kp->symbol_name, kp->addr, kp->offset);
2154 NOKPROBE_SYMBOL(dump_kprobe);
2157 * Lookup and populate the kprobe_blacklist.
2159 * Unlike the kretprobe blacklist, we'll need to determine
2160 * the range of addresses that belong to the said functions,
2161 * since a kprobe need not necessarily be at the beginning
2164 static int __init populate_kprobe_blacklist(unsigned long *start,
2167 unsigned long *iter;
2168 struct kprobe_blacklist_entry *ent;
2169 unsigned long entry, offset = 0, size = 0;
2171 for (iter = start; iter < end; iter++) {
2172 entry = arch_deref_entry_point((void *)*iter);
2174 if (!kernel_text_address(entry) ||
2175 !kallsyms_lookup_size_offset(entry, &size, &offset)) {
2176 pr_err("Failed to find blacklist at %p\n",
2181 ent = kmalloc(sizeof(*ent), GFP_KERNEL);
2184 ent->start_addr = entry;
2185 ent->end_addr = entry + size;
2186 INIT_LIST_HEAD(&ent->list);
2187 list_add_tail(&ent->list, &kprobe_blacklist);
2192 #ifdef CONFIG_BPF_KPROBE_OVERRIDE
2193 /* Markers of the _kprobe_error_inject_list section */
2194 extern unsigned long __start_kprobe_error_inject_list[];
2195 extern unsigned long __stop_kprobe_error_inject_list[];
2198 * Lookup and populate the kprobe_error_injection_list.
2200 * For safety reasons we only allow certain functions to be overriden with
2201 * bpf_error_injection, so we need to populate the list of the symbols that have
2202 * been marked as safe for overriding.
2204 static void populate_kprobe_error_injection_list(unsigned long *start,
2208 unsigned long *iter;
2209 struct kprobe_ei_entry *ent;
2210 unsigned long entry, offset = 0, size = 0;
2212 mutex_lock(&kprobe_ei_mutex);
2213 for (iter = start; iter < end; iter++) {
2214 entry = arch_deref_entry_point((void *)*iter);
2216 if (!kernel_text_address(entry) ||
2217 !kallsyms_lookup_size_offset(entry, &size, &offset)) {
2218 pr_err("Failed to find error inject entry at %p\n",
2223 ent = kmalloc(sizeof(*ent), GFP_KERNEL);
2226 ent->start_addr = entry;
2227 ent->end_addr = entry + size;
2229 INIT_LIST_HEAD(&ent->list);
2230 list_add_tail(&ent->list, &kprobe_error_injection_list);
2232 mutex_unlock(&kprobe_ei_mutex);
2235 static void __init populate_kernel_kprobe_ei_list(void)
2237 populate_kprobe_error_injection_list(__start_kprobe_error_inject_list,
2238 __stop_kprobe_error_inject_list,
2242 static void module_load_kprobe_ei_list(struct module *mod)
2244 if (!mod->num_kprobe_ei_funcs)
2246 populate_kprobe_error_injection_list(mod->kprobe_ei_funcs,
2247 mod->kprobe_ei_funcs +
2248 mod->num_kprobe_ei_funcs, mod);
2251 static void module_unload_kprobe_ei_list(struct module *mod)
2253 struct kprobe_ei_entry *ent, *n;
2254 if (!mod->num_kprobe_ei_funcs)
2257 mutex_lock(&kprobe_ei_mutex);
2258 list_for_each_entry_safe(ent, n, &kprobe_error_injection_list, list) {
2259 if (ent->priv == mod) {
2260 list_del_init(&ent->list);
2264 mutex_unlock(&kprobe_ei_mutex);
2267 static inline void __init populate_kernel_kprobe_ei_list(void) {}
2268 static inline void module_load_kprobe_ei_list(struct module *m) {}
2269 static inline void module_unload_kprobe_ei_list(struct module *m) {}
2272 /* Module notifier call back, checking kprobes on the module */
2273 static int kprobes_module_callback(struct notifier_block *nb,
2274 unsigned long val, void *data)
2276 struct module *mod = data;
2277 struct hlist_head *head;
2280 int checkcore = (val == MODULE_STATE_GOING);
2282 if (val == MODULE_STATE_COMING)
2283 module_load_kprobe_ei_list(mod);
2284 else if (val == MODULE_STATE_GOING)
2285 module_unload_kprobe_ei_list(mod);
2287 if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE)
2291 * When MODULE_STATE_GOING was notified, both of module .text and
2292 * .init.text sections would be freed. When MODULE_STATE_LIVE was
2293 * notified, only .init.text section would be freed. We need to
2294 * disable kprobes which have been inserted in the sections.
2296 mutex_lock(&kprobe_mutex);
2297 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2298 head = &kprobe_table[i];
2299 hlist_for_each_entry_rcu(p, head, hlist)
2300 if (within_module_init((unsigned long)p->addr, mod) ||
2302 within_module_core((unsigned long)p->addr, mod))) {
2304 * The vaddr this probe is installed will soon
2305 * be vfreed buy not synced to disk. Hence,
2306 * disarming the breakpoint isn't needed.
2308 * Note, this will also move any optimized probes
2309 * that are pending to be removed from their
2310 * corresponding lists to the freeing_list and
2311 * will not be touched by the delayed
2312 * kprobe_optimizer work handler.
2317 mutex_unlock(&kprobe_mutex);
2321 static struct notifier_block kprobe_module_nb = {
2322 .notifier_call = kprobes_module_callback,
2326 /* Markers of _kprobe_blacklist section */
2327 extern unsigned long __start_kprobe_blacklist[];
2328 extern unsigned long __stop_kprobe_blacklist[];
2330 static int __init init_kprobes(void)
2334 /* FIXME allocate the probe table, currently defined statically */
2335 /* initialize all list heads */
2336 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2337 INIT_HLIST_HEAD(&kprobe_table[i]);
2338 INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
2339 raw_spin_lock_init(&(kretprobe_table_locks[i].lock));
2342 err = populate_kprobe_blacklist(__start_kprobe_blacklist,
2343 __stop_kprobe_blacklist);
2345 pr_err("kprobes: failed to populate blacklist: %d\n", err);
2346 pr_err("Please take care of using kprobes.\n");
2349 populate_kernel_kprobe_ei_list();
2351 if (kretprobe_blacklist_size) {
2352 /* lookup the function address from its name */
2353 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
2354 kretprobe_blacklist[i].addr =
2355 kprobe_lookup_name(kretprobe_blacklist[i].name, 0);
2356 if (!kretprobe_blacklist[i].addr)
2357 printk("kretprobe: lookup failed: %s\n",
2358 kretprobe_blacklist[i].name);
2362 #if defined(CONFIG_OPTPROBES)
2363 #if defined(__ARCH_WANT_KPROBES_INSN_SLOT)
2364 /* Init kprobe_optinsn_slots */
2365 kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
2367 /* By default, kprobes can be optimized */
2368 kprobes_allow_optimization = true;
2371 /* By default, kprobes are armed */
2372 kprobes_all_disarmed = false;
2374 err = arch_init_kprobes();
2376 err = register_die_notifier(&kprobe_exceptions_nb);
2378 err = register_module_notifier(&kprobe_module_nb);
2380 kprobes_initialized = (err == 0);
2387 #ifdef CONFIG_DEBUG_FS
2388 static void report_probe(struct seq_file *pi, struct kprobe *p,
2389 const char *sym, int offset, char *modname, struct kprobe *pp)
2393 if (p->pre_handler == pre_handler_kretprobe)
2395 else if (p->pre_handler == setjmp_pre_handler)
2401 seq_printf(pi, "%p %s %s+0x%x %s ",
2402 p->addr, kprobe_type, sym, offset,
2403 (modname ? modname : " "));
2405 seq_printf(pi, "%p %s %p ",
2406 p->addr, kprobe_type, p->addr);
2410 seq_printf(pi, "%s%s%s%s\n",
2411 (kprobe_gone(p) ? "[GONE]" : ""),
2412 ((kprobe_disabled(p) && !kprobe_gone(p)) ? "[DISABLED]" : ""),
2413 (kprobe_optimized(pp) ? "[OPTIMIZED]" : ""),
2414 (kprobe_ftrace(pp) ? "[FTRACE]" : ""));
2417 static void *kprobe_seq_start(struct seq_file *f, loff_t *pos)
2419 return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
2422 static void *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
2425 if (*pos >= KPROBE_TABLE_SIZE)
2430 static void kprobe_seq_stop(struct seq_file *f, void *v)
2435 static int show_kprobe_addr(struct seq_file *pi, void *v)
2437 struct hlist_head *head;
2438 struct kprobe *p, *kp;
2439 const char *sym = NULL;
2440 unsigned int i = *(loff_t *) v;
2441 unsigned long offset = 0;
2442 char *modname, namebuf[KSYM_NAME_LEN];
2444 head = &kprobe_table[i];
2446 hlist_for_each_entry_rcu(p, head, hlist) {
2447 sym = kallsyms_lookup((unsigned long)p->addr, NULL,
2448 &offset, &modname, namebuf);
2449 if (kprobe_aggrprobe(p)) {
2450 list_for_each_entry_rcu(kp, &p->list, list)
2451 report_probe(pi, kp, sym, offset, modname, p);
2453 report_probe(pi, p, sym, offset, modname, NULL);
2459 static const struct seq_operations kprobes_seq_ops = {
2460 .start = kprobe_seq_start,
2461 .next = kprobe_seq_next,
2462 .stop = kprobe_seq_stop,
2463 .show = show_kprobe_addr
2466 static int kprobes_open(struct inode *inode, struct file *filp)
2468 return seq_open(filp, &kprobes_seq_ops);
2471 static const struct file_operations debugfs_kprobes_operations = {
2472 .open = kprobes_open,
2474 .llseek = seq_lseek,
2475 .release = seq_release,
2478 /* kprobes/blacklist -- shows which functions can not be probed */
2479 static void *kprobe_blacklist_seq_start(struct seq_file *m, loff_t *pos)
2481 return seq_list_start(&kprobe_blacklist, *pos);
2484 static void *kprobe_blacklist_seq_next(struct seq_file *m, void *v, loff_t *pos)
2486 return seq_list_next(v, &kprobe_blacklist, pos);
2489 static int kprobe_blacklist_seq_show(struct seq_file *m, void *v)
2491 struct kprobe_blacklist_entry *ent =
2492 list_entry(v, struct kprobe_blacklist_entry, list);
2494 seq_printf(m, "0x%p-0x%p\t%ps\n", (void *)ent->start_addr,
2495 (void *)ent->end_addr, (void *)ent->start_addr);
2499 static const struct seq_operations kprobe_blacklist_seq_ops = {
2500 .start = kprobe_blacklist_seq_start,
2501 .next = kprobe_blacklist_seq_next,
2502 .stop = kprobe_seq_stop, /* Reuse void function */
2503 .show = kprobe_blacklist_seq_show,
2506 static int kprobe_blacklist_open(struct inode *inode, struct file *filp)
2508 return seq_open(filp, &kprobe_blacklist_seq_ops);
2511 static const struct file_operations debugfs_kprobe_blacklist_ops = {
2512 .open = kprobe_blacklist_open,
2514 .llseek = seq_lseek,
2515 .release = seq_release,
2519 * kprobes/error_injection_list -- shows which functions can be overriden for
2522 static void *kprobe_ei_seq_start(struct seq_file *m, loff_t *pos)
2524 mutex_lock(&kprobe_ei_mutex);
2525 return seq_list_start(&kprobe_error_injection_list, *pos);
2528 static void kprobe_ei_seq_stop(struct seq_file *m, void *v)
2530 mutex_unlock(&kprobe_ei_mutex);
2533 static void *kprobe_ei_seq_next(struct seq_file *m, void *v, loff_t *pos)
2535 return seq_list_next(v, &kprobe_error_injection_list, pos);
2538 static int kprobe_ei_seq_show(struct seq_file *m, void *v)
2540 char buffer[KSYM_SYMBOL_LEN];
2541 struct kprobe_ei_entry *ent =
2542 list_entry(v, struct kprobe_ei_entry, list);
2544 sprint_symbol(buffer, ent->start_addr);
2545 seq_printf(m, "%s\n", buffer);
2549 static const struct seq_operations kprobe_ei_seq_ops = {
2550 .start = kprobe_ei_seq_start,
2551 .next = kprobe_ei_seq_next,
2552 .stop = kprobe_ei_seq_stop,
2553 .show = kprobe_ei_seq_show,
2556 static int kprobe_ei_open(struct inode *inode, struct file *filp)
2558 return seq_open(filp, &kprobe_ei_seq_ops);
2561 static const struct file_operations debugfs_kprobe_ei_ops = {
2562 .open = kprobe_ei_open,
2564 .llseek = seq_lseek,
2565 .release = seq_release,
2568 static void arm_all_kprobes(void)
2570 struct hlist_head *head;
2574 mutex_lock(&kprobe_mutex);
2576 /* If kprobes are armed, just return */
2577 if (!kprobes_all_disarmed)
2578 goto already_enabled;
2581 * optimize_kprobe() called by arm_kprobe() checks
2582 * kprobes_all_disarmed, so set kprobes_all_disarmed before
2585 kprobes_all_disarmed = false;
2586 /* Arming kprobes doesn't optimize kprobe itself */
2587 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2588 head = &kprobe_table[i];
2589 hlist_for_each_entry_rcu(p, head, hlist)
2590 if (!kprobe_disabled(p))
2594 printk(KERN_INFO "Kprobes globally enabled\n");
2597 mutex_unlock(&kprobe_mutex);
2601 static void disarm_all_kprobes(void)
2603 struct hlist_head *head;
2607 mutex_lock(&kprobe_mutex);
2609 /* If kprobes are already disarmed, just return */
2610 if (kprobes_all_disarmed) {
2611 mutex_unlock(&kprobe_mutex);
2615 kprobes_all_disarmed = true;
2616 printk(KERN_INFO "Kprobes globally disabled\n");
2618 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2619 head = &kprobe_table[i];
2620 hlist_for_each_entry_rcu(p, head, hlist) {
2621 if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p))
2622 disarm_kprobe(p, false);
2625 mutex_unlock(&kprobe_mutex);
2627 /* Wait for disarming all kprobes by optimizer */
2628 wait_for_kprobe_optimizer();
2632 * XXX: The debugfs bool file interface doesn't allow for callbacks
2633 * when the bool state is switched. We can reuse that facility when
2636 static ssize_t read_enabled_file_bool(struct file *file,
2637 char __user *user_buf, size_t count, loff_t *ppos)
2641 if (!kprobes_all_disarmed)
2647 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
2650 static ssize_t write_enabled_file_bool(struct file *file,
2651 const char __user *user_buf, size_t count, loff_t *ppos)
2656 buf_size = min(count, (sizeof(buf)-1));
2657 if (copy_from_user(buf, user_buf, buf_size))
2660 buf[buf_size] = '\0';
2670 disarm_all_kprobes();
2679 static const struct file_operations fops_kp = {
2680 .read = read_enabled_file_bool,
2681 .write = write_enabled_file_bool,
2682 .llseek = default_llseek,
2685 static int __init debugfs_kprobe_init(void)
2687 struct dentry *dir, *file;
2688 unsigned int value = 1;
2690 dir = debugfs_create_dir("kprobes", NULL);
2694 file = debugfs_create_file("list", 0444, dir, NULL,
2695 &debugfs_kprobes_operations);
2699 file = debugfs_create_file("enabled", 0600, dir,
2704 file = debugfs_create_file("blacklist", 0444, dir, NULL,
2705 &debugfs_kprobe_blacklist_ops);
2709 file = debugfs_create_file("error_injection_list", 0444, dir, NULL,
2710 &debugfs_kprobe_ei_ops);
2717 debugfs_remove(dir);
2721 late_initcall(debugfs_kprobe_init);
2722 #endif /* CONFIG_DEBUG_FS */
2724 module_init(init_kprobes);
2726 /* defined in arch/.../kernel/kprobes.c */
2727 EXPORT_SYMBOL_GPL(jprobe_return);