2 * Kernel Probes (KProbes)
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright (C) IBM Corporation, 2002, 2004
20 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
21 * Probes initial implementation ( includes contributions from
23 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
24 * interface to access function arguments.
25 * 2004-Nov Ananth N Mavinakayanahalli <ananth@in.ibm.com> kprobes port
29 #include <linux/kprobes.h>
30 #include <linux/ptrace.h>
31 #include <linux/preempt.h>
32 #include <linux/extable.h>
33 #include <linux/kdebug.h>
34 #include <linux/slab.h>
35 #include <asm/code-patching.h>
36 #include <asm/cacheflush.h>
37 #include <asm/sstep.h>
38 #include <linux/uaccess.h>
40 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
41 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
43 struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}};
45 kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset)
47 kprobe_opcode_t *addr;
49 #ifdef PPC64_ELF_ABI_v2
50 /* PPC64 ABIv2 needs local entry point */
51 addr = (kprobe_opcode_t *)kallsyms_lookup_name(name);
53 addr = (kprobe_opcode_t *)ppc_function_entry(addr);
54 #elif defined(PPC64_ELF_ABI_v1)
56 * 64bit powerpc ABIv1 uses function descriptors:
57 * - Check for the dot variant of the symbol first.
58 * - If that fails, try looking up the symbol provided.
60 * This ensures we always get to the actual symbol and not
63 * Also handle <module:symbol> format.
65 char dot_name[MODULE_NAME_LEN + 1 + KSYM_NAME_LEN];
67 bool dot_appended = false;
68 if ((modsym = strchr(name, ':')) != NULL) {
70 if (*modsym != '\0' && *modsym != '.') {
71 /* Convert to <module:.symbol> */
72 strncpy(dot_name, name, modsym - name);
73 dot_name[modsym - name] = '.';
74 dot_name[modsym - name + 1] = '\0';
75 strncat(dot_name, modsym,
76 sizeof(dot_name) - (modsym - name) - 2);
80 strncat(dot_name, name, sizeof(dot_name) - 1);
82 } else if (name[0] != '.') {
85 strncat(dot_name, name, KSYM_NAME_LEN - 2);
89 strncat(dot_name, name, KSYM_NAME_LEN - 1);
91 addr = (kprobe_opcode_t *)kallsyms_lookup_name(dot_name);
92 if (!addr && dot_appended) {
93 /* Let's try the original non-dot symbol lookup */
94 addr = (kprobe_opcode_t *)kallsyms_lookup_name(name);
97 addr = (kprobe_opcode_t *)kallsyms_lookup_name(name);
103 int __kprobes arch_prepare_kprobe(struct kprobe *p)
106 kprobe_opcode_t insn = *p->addr;
108 if ((unsigned long)p->addr & 0x03) {
109 printk("Attempt to register kprobe at an unaligned address\n");
111 } else if (IS_MTMSRD(insn) || IS_RFID(insn) || IS_RFI(insn)) {
112 printk("Cannot register a kprobe on rfi/rfid or mtmsr[d]\n");
116 /* insn must be on a special executable page on ppc64. This is
117 * not explicitly required on ppc32 (right now), but it doesn't hurt */
119 p->ainsn.insn = get_insn_slot();
125 memcpy(p->ainsn.insn, p->addr,
126 MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
127 p->opcode = *p->addr;
128 flush_icache_range((unsigned long)p->ainsn.insn,
129 (unsigned long)p->ainsn.insn + sizeof(kprobe_opcode_t));
132 p->ainsn.boostable = 0;
136 void __kprobes arch_arm_kprobe(struct kprobe *p)
138 *p->addr = BREAKPOINT_INSTRUCTION;
139 flush_icache_range((unsigned long) p->addr,
140 (unsigned long) p->addr + sizeof(kprobe_opcode_t));
143 void __kprobes arch_disarm_kprobe(struct kprobe *p)
145 *p->addr = p->opcode;
146 flush_icache_range((unsigned long) p->addr,
147 (unsigned long) p->addr + sizeof(kprobe_opcode_t));
150 void __kprobes arch_remove_kprobe(struct kprobe *p)
153 free_insn_slot(p->ainsn.insn, 0);
154 p->ainsn.insn = NULL;
158 static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
160 enable_single_step(regs);
163 * On powerpc we should single step on the original
164 * instruction even if the probed insn is a trap
165 * variant as values in regs could play a part in
166 * if the trap is taken or not
168 regs->nip = (unsigned long)p->ainsn.insn;
171 static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
173 kcb->prev_kprobe.kp = kprobe_running();
174 kcb->prev_kprobe.status = kcb->kprobe_status;
175 kcb->prev_kprobe.saved_msr = kcb->kprobe_saved_msr;
178 static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
180 __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
181 kcb->kprobe_status = kcb->prev_kprobe.status;
182 kcb->kprobe_saved_msr = kcb->prev_kprobe.saved_msr;
185 static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
186 struct kprobe_ctlblk *kcb)
188 __this_cpu_write(current_kprobe, p);
189 kcb->kprobe_saved_msr = regs->msr;
192 bool arch_function_offset_within_entry(unsigned long offset)
194 #ifdef PPC64_ELF_ABI_v2
201 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
202 struct pt_regs *regs)
204 ri->ret_addr = (kprobe_opcode_t *)regs->link;
206 /* Replace the return addr with trampoline addr */
207 regs->link = (unsigned long)kretprobe_trampoline;
210 int __kprobes try_to_emulate(struct kprobe *p, struct pt_regs *regs)
213 unsigned int insn = *p->ainsn.insn;
215 /* regs->nip is also adjusted if emulate_step returns 1 */
216 ret = emulate_step(regs, insn);
219 * Once this instruction has been boosted
220 * successfully, set the boostable flag
222 if (unlikely(p->ainsn.boostable == 0))
223 p->ainsn.boostable = 1;
224 } else if (ret < 0) {
226 * We don't allow kprobes on mtmsr(d)/rfi(d), etc.
227 * So, we should never get here... but, its still
228 * good to catch them, just in case...
230 printk("Can't step on instruction %x\n", insn);
233 /* This instruction can't be boosted */
234 p->ainsn.boostable = -1;
239 int __kprobes kprobe_handler(struct pt_regs *regs)
243 unsigned int *addr = (unsigned int *)regs->nip;
244 struct kprobe_ctlblk *kcb;
250 * We don't want to be preempted for the entire
251 * duration of kprobe processing
254 kcb = get_kprobe_ctlblk();
256 /* Check we're not actually recursing */
257 if (kprobe_running()) {
258 p = get_kprobe(addr);
260 kprobe_opcode_t insn = *p->ainsn.insn;
261 if (kcb->kprobe_status == KPROBE_HIT_SS &&
263 /* Turn off 'trace' bits */
264 regs->msr &= ~MSR_SINGLESTEP;
265 regs->msr |= kcb->kprobe_saved_msr;
268 /* We have reentered the kprobe_handler(), since
269 * another probe was hit while within the handler.
270 * We here save the original kprobes variables and
271 * just single step on the instruction of the new probe
272 * without calling any user handlers.
274 save_previous_kprobe(kcb);
275 set_current_kprobe(p, regs, kcb);
276 kcb->kprobe_saved_msr = regs->msr;
277 kprobes_inc_nmissed_count(p);
278 prepare_singlestep(p, regs);
279 kcb->kprobe_status = KPROBE_REENTER;
280 if (p->ainsn.boostable >= 0) {
281 ret = try_to_emulate(p, regs);
284 restore_previous_kprobe(kcb);
290 if (*addr != BREAKPOINT_INSTRUCTION) {
291 /* If trap variant, then it belongs not to us */
292 kprobe_opcode_t cur_insn = *addr;
293 if (is_trap(cur_insn))
295 /* The breakpoint instruction was removed by
296 * another cpu right after we hit, no further
297 * handling of this interrupt is appropriate
302 p = __this_cpu_read(current_kprobe);
303 if (p->break_handler && p->break_handler(p, regs)) {
310 p = get_kprobe(addr);
312 if (*addr != BREAKPOINT_INSTRUCTION) {
314 * PowerPC has multiple variants of the "trap"
315 * instruction. If the current instruction is a
316 * trap variant, it could belong to someone else
318 kprobe_opcode_t cur_insn = *addr;
319 if (is_trap(cur_insn))
322 * The breakpoint instruction was removed right
323 * after we hit it. Another cpu has removed
324 * either a probepoint or a debugger breakpoint
325 * at this address. In either case, no further
326 * handling of this interrupt is appropriate.
330 /* Not one of ours: let kernel handle it */
334 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
335 set_current_kprobe(p, regs, kcb);
336 if (p->pre_handler && p->pre_handler(p, regs))
337 /* handler has already set things up, so skip ss setup */
341 if (p->ainsn.boostable >= 0) {
342 ret = try_to_emulate(p, regs);
346 p->post_handler(p, regs, 0);
348 kcb->kprobe_status = KPROBE_HIT_SSDONE;
349 reset_current_kprobe();
350 preempt_enable_no_resched();
354 prepare_singlestep(p, regs);
355 kcb->kprobe_status = KPROBE_HIT_SS;
359 preempt_enable_no_resched();
364 * Function return probe trampoline:
365 * - init_kprobes() establishes a probepoint here
366 * - When the probed function returns, this probe
367 * causes the handlers to fire
369 asm(".global kretprobe_trampoline\n"
370 ".type kretprobe_trampoline, @function\n"
371 "kretprobe_trampoline:\n"
374 ".size kretprobe_trampoline, .-kretprobe_trampoline\n");
377 * Called when the probe at kretprobe trampoline is hit
379 static int __kprobes trampoline_probe_handler(struct kprobe *p,
380 struct pt_regs *regs)
382 struct kretprobe_instance *ri = NULL;
383 struct hlist_head *head, empty_rp;
384 struct hlist_node *tmp;
385 unsigned long flags, orig_ret_address = 0;
386 unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
388 INIT_HLIST_HEAD(&empty_rp);
389 kretprobe_hash_lock(current, &head, &flags);
392 * It is possible to have multiple instances associated with a given
393 * task either because an multiple functions in the call path
394 * have a return probe installed on them, and/or more than one return
395 * return probe was registered for a target function.
397 * We can handle this because:
398 * - instances are always inserted at the head of the list
399 * - when multiple return probes are registered for the same
400 * function, the first instance's ret_addr will point to the
401 * real return address, and all the rest will point to
402 * kretprobe_trampoline
404 hlist_for_each_entry_safe(ri, tmp, head, hlist) {
405 if (ri->task != current)
406 /* another task is sharing our hash bucket */
409 if (ri->rp && ri->rp->handler)
410 ri->rp->handler(ri, regs);
412 orig_ret_address = (unsigned long)ri->ret_addr;
413 recycle_rp_inst(ri, &empty_rp);
415 if (orig_ret_address != trampoline_address)
417 * This is the real return address. Any other
418 * instances associated with this task are for
419 * other calls deeper on the call stack
424 kretprobe_assert(ri, orig_ret_address, trampoline_address);
425 regs->nip = orig_ret_address;
427 * Make LR point to the orig_ret_address.
428 * When the 'nop' inside the kretprobe_trampoline
429 * is optimized, we can do a 'blr' after executing the
430 * detour buffer code.
432 regs->link = orig_ret_address;
434 reset_current_kprobe();
435 kretprobe_hash_unlock(current, &flags);
436 preempt_enable_no_resched();
438 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
439 hlist_del(&ri->hlist);
443 * By returning a non-zero value, we are telling
444 * kprobe_handler() that we don't want the post_handler
445 * to run (and have re-enabled preemption)
451 * Called after single-stepping. p->addr is the address of the
452 * instruction whose first byte has been replaced by the "breakpoint"
453 * instruction. To avoid the SMP problems that can occur when we
454 * temporarily put back the original opcode to single-step, we
455 * single-stepped a copy of the instruction. The address of this
456 * copy is p->ainsn.insn.
458 int __kprobes kprobe_post_handler(struct pt_regs *regs)
460 struct kprobe *cur = kprobe_running();
461 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
463 if (!cur || user_mode(regs))
466 /* make sure we got here for instruction we have a kprobe on */
467 if (((unsigned long)cur->ainsn.insn + 4) != regs->nip)
470 if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
471 kcb->kprobe_status = KPROBE_HIT_SSDONE;
472 cur->post_handler(cur, regs, 0);
475 /* Adjust nip to after the single-stepped instruction */
476 regs->nip = (unsigned long)cur->addr + 4;
477 regs->msr |= kcb->kprobe_saved_msr;
479 /*Restore back the original saved kprobes variables and continue. */
480 if (kcb->kprobe_status == KPROBE_REENTER) {
481 restore_previous_kprobe(kcb);
484 reset_current_kprobe();
486 preempt_enable_no_resched();
489 * if somebody else is singlestepping across a probe point, msr
490 * will have DE/SE set, in which case, continue the remaining processing
491 * of do_debug, as if this is not a probe hit.
493 if (regs->msr & MSR_SINGLESTEP)
499 int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
501 struct kprobe *cur = kprobe_running();
502 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
503 const struct exception_table_entry *entry;
505 switch(kcb->kprobe_status) {
509 * We are here because the instruction being single
510 * stepped caused a page fault. We reset the current
511 * kprobe and the nip points back to the probe address
512 * and allow the page fault handler to continue as a
515 regs->nip = (unsigned long)cur->addr;
516 regs->msr &= ~MSR_SINGLESTEP; /* Turn off 'trace' bits */
517 regs->msr |= kcb->kprobe_saved_msr;
518 if (kcb->kprobe_status == KPROBE_REENTER)
519 restore_previous_kprobe(kcb);
521 reset_current_kprobe();
522 preempt_enable_no_resched();
524 case KPROBE_HIT_ACTIVE:
525 case KPROBE_HIT_SSDONE:
527 * We increment the nmissed count for accounting,
528 * we can also use npre/npostfault count for accounting
529 * these specific fault cases.
531 kprobes_inc_nmissed_count(cur);
534 * We come here because instructions in the pre/post
535 * handler caused the page_fault, this could happen
536 * if handler tries to access user space by
537 * copy_from_user(), get_user() etc. Let the
538 * user-specified handler try to fix it first.
540 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
544 * In case the user-specified fault handler returned
545 * zero, try to fix up.
547 if ((entry = search_exception_tables(regs->nip)) != NULL) {
548 regs->nip = extable_fixup(entry);
553 * fixup_exception() could not handle it,
554 * Let do_page_fault() fix it.
563 unsigned long arch_deref_entry_point(void *entry)
565 return ppc_global_function_entry(entry);
568 int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
570 struct jprobe *jp = container_of(p, struct jprobe, kp);
571 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
573 memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs));
575 /* setup return addr to the jprobe handler routine */
576 regs->nip = arch_deref_entry_point(jp->entry);
577 #ifdef PPC64_ELF_ABI_v2
578 regs->gpr[12] = (unsigned long)jp->entry;
579 #elif defined(PPC64_ELF_ABI_v1)
580 regs->gpr[2] = (unsigned long)(((func_descr_t *)jp->entry)->toc);
586 void __used __kprobes jprobe_return(void)
588 asm volatile("trap" ::: "memory");
591 static void __used __kprobes jprobe_return_end(void)
595 int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
597 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
600 * FIXME - we should ideally be validating that we got here 'cos
601 * of the "trap" in jprobe_return() above, before restoring the
604 memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
605 preempt_enable_no_resched();
609 static struct kprobe trampoline_p = {
610 .addr = (kprobe_opcode_t *) &kretprobe_trampoline,
611 .pre_handler = trampoline_probe_handler
614 int __init arch_init_kprobes(void)
616 return register_kprobe(&trampoline_p);
619 int __kprobes arch_trampoline_kprobe(struct kprobe *p)
621 if (p->addr == (kprobe_opcode_t *)&kretprobe_trampoline)