]> asedeno.scripts.mit.edu Git - linux.git/blob - arch/s390/kernel/stacktrace.c
arm64: mm: allow the kernel to handle alignment faults on user accesses
[linux.git] / arch / s390 / kernel / stacktrace.c
1 /*
2  * Stack trace management functions
3  *
4  *  Copyright IBM Corp. 2006
5  *  Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6  */
7
8 #include <linux/sched.h>
9 #include <linux/stacktrace.h>
10 #include <linux/kallsyms.h>
11 #include <linux/module.h>
12
13 static unsigned long save_context_stack(struct stack_trace *trace,
14                                         unsigned long sp,
15                                         unsigned long low,
16                                         unsigned long high,
17                                         int savesched)
18 {
19         struct stack_frame *sf;
20         struct pt_regs *regs;
21         unsigned long addr;
22
23         while(1) {
24                 if (sp < low || sp > high)
25                         return sp;
26                 sf = (struct stack_frame *)sp;
27                 while(1) {
28                         addr = sf->gprs[8];
29                         if (!trace->skip)
30                                 trace->entries[trace->nr_entries++] = addr;
31                         else
32                                 trace->skip--;
33                         if (trace->nr_entries >= trace->max_entries)
34                                 return sp;
35                         low = sp;
36                         sp = sf->back_chain;
37                         if (!sp)
38                                 break;
39                         if (sp <= low || sp > high - sizeof(*sf))
40                                 return sp;
41                         sf = (struct stack_frame *)sp;
42                 }
43                 /* Zero backchain detected, check for interrupt frame. */
44                 sp = (unsigned long)(sf + 1);
45                 if (sp <= low || sp > high - sizeof(*regs))
46                         return sp;
47                 regs = (struct pt_regs *)sp;
48                 addr = regs->psw.addr;
49                 if (savesched || !in_sched_functions(addr)) {
50                         if (!trace->skip)
51                                 trace->entries[trace->nr_entries++] = addr;
52                         else
53                                 trace->skip--;
54                 }
55                 if (trace->nr_entries >= trace->max_entries)
56                         return sp;
57                 low = sp;
58                 sp = regs->gprs[15];
59         }
60 }
61
62 void save_stack_trace(struct stack_trace *trace)
63 {
64         register unsigned long sp asm ("15");
65         unsigned long orig_sp, new_sp;
66
67         orig_sp = sp;
68         new_sp = save_context_stack(trace, orig_sp,
69                                     S390_lowcore.panic_stack - PAGE_SIZE,
70                                     S390_lowcore.panic_stack, 1);
71         if (new_sp != orig_sp)
72                 return;
73         new_sp = save_context_stack(trace, new_sp,
74                                     S390_lowcore.async_stack - ASYNC_SIZE,
75                                     S390_lowcore.async_stack, 1);
76         if (new_sp != orig_sp)
77                 return;
78         save_context_stack(trace, new_sp,
79                            S390_lowcore.thread_info,
80                            S390_lowcore.thread_info + THREAD_SIZE, 1);
81 }
82 EXPORT_SYMBOL_GPL(save_stack_trace);
83
84 void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
85 {
86         unsigned long sp, low, high;
87
88         sp = tsk->thread.ksp;
89         low = (unsigned long) task_stack_page(tsk);
90         high = (unsigned long) task_pt_regs(tsk);
91         save_context_stack(trace, sp, low, high, 0);
92         if (trace->nr_entries < trace->max_entries)
93                 trace->entries[trace->nr_entries++] = ULONG_MAX;
94 }
95 EXPORT_SYMBOL_GPL(save_stack_trace_tsk);