1 /* SPDX-License-Identifier: GPL-2.0 */
5 #include <asm/ptrace.h> /* for STACK_FRAME_REGS_MARKER */
8 * MSR_KERNEL is > 0x8000 on 4xx/Book-E since it include MSR_CE.
10 .macro __LOAD_MSR_KERNEL r, x
18 #define LOAD_MSR_KERNEL(r, x) __LOAD_MSR_KERNEL r, x
21 * Exception entry code. This code runs with address translation
22 * turned off, i.e. using physical addresses.
23 * We assume sprg3 has the physical address of the current
24 * task's thread_struct.
27 .macro EXCEPTION_PROLOG
28 mtspr SPRN_SPRG_SCRATCH0,r10
29 mtspr SPRN_SPRG_SCRATCH1,r11
35 .macro EXCEPTION_PROLOG_1
36 mfspr r11,SPRN_SRR1 /* check whether user or kernel */
38 tophys(r11,r1) /* use tophys(r1) if kernel */
40 mfspr r11,SPRN_SPRG_THREAD
41 lwz r11,TASK_STACK-THREAD(r11)
42 addi r11,r11,THREAD_SIZE
44 1: subi r11,r11,INT_FRAME_SIZE /* alloc exc. frame */
47 .macro EXCEPTION_PROLOG_2
48 stw r10,_CCR(r11) /* save registers */
51 mfspr r10,SPRN_SPRG_SCRATCH0
53 mfspr r12,SPRN_SPRG_SCRATCH1
61 tovirt(r1,r11) /* set new kernel sp */
63 rlwinm r9,r9,0,14,12 /* clear MSR_WE (necessary?) */
65 li r10,MSR_KERNEL & ~(MSR_IR|MSR_DR) /* can take exceptions */
66 MTMSRD(r10) /* (except for mach check in rtas) */
69 lis r10,STACK_FRAME_REGS_MARKER@ha /* exception frame marker */
70 addi r10,r10,STACK_FRAME_REGS_MARKER@l
76 .macro SYSCALL_ENTRY trapno
77 mfspr r12,SPRN_SPRG_THREAD
79 lwz r11,TASK_STACK-THREAD(r12)
81 addi r11,r11,THREAD_SIZE - INT_FRAME_SIZE
82 rlwinm r10,r10,0,4,2 /* Clear SO bit in CR */
84 stw r10,_CCR(r11) /* save registers */
90 tovirt(r1,r11) /* set new kernel sp */
93 rlwinm r9,r9,0,14,12 /* clear MSR_WE (necessary?) */
95 LOAD_MSR_KERNEL(r10, MSR_KERNEL & ~(MSR_IR|MSR_DR)) /* can take exceptions */
96 MTMSRD(r10) /* (except for mach check in rtas) */
98 lis r10,STACK_FRAME_REGS_MARKER@ha /* exception frame marker */
100 addi r10,r10,STACK_FRAME_REGS_MARKER@l
108 addi r11,r1,STACK_FRAME_OVERHEAD
111 #if defined(CONFIG_40x)
112 /* Check to see if the dbcr0 register is set up to debug. Use the
113 internal debug mode bit to do this. */
114 lwz r12,THREAD_DBCR0(r12)
115 andis. r12,r12,DBCR0_IDM@h
117 ACCOUNT_CPU_USER_ENTRY(r2, r11, r12)
118 #if defined(CONFIG_40x)
120 /* From user and task is ptraced - load up global dbcr0 */
121 li r12,-1 /* clear all pending debug events */
123 lis r11,global_dbcr0@ha
125 addi r11,r11,global_dbcr0@l
134 tovirt(r2, r2) /* set r2 to current */
135 lis r11, transfer_to_syscall@h
136 ori r11, r11, transfer_to_syscall@l
137 #ifdef CONFIG_TRACE_IRQFLAGS
139 * If MSR is changing we need to keep interrupts disabled at this point
140 * otherwise we might risk taking an interrupt before we tell lockdep
143 LOAD_MSR_KERNEL(r10, MSR_KERNEL)
144 rlwimi r10, r9, 0, MSR_EE
146 LOAD_MSR_KERNEL(r10, MSR_KERNEL | MSR_EE)
148 #if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
154 RFI /* jump to handler, enable MMU */
158 * Note: code which follows this uses cr0.eq (set if from kernel),
159 * r11, r12 (SRR0), and r9 (SRR1).
161 * Note2: once we have set r1 we are in a position to take exceptions
162 * again, and we could thus set MSR:RI at that point.
168 #ifdef CONFIG_PPC_BOOK3S
169 #define START_EXCEPTION(n, label) \
175 #define START_EXCEPTION(n, label) \
181 #define EXCEPTION(n, label, hdlr, xfer) \
182 START_EXCEPTION(n, label) \
184 addi r3,r1,STACK_FRAME_OVERHEAD; \
187 #define EXC_XFER_TEMPLATE(hdlr, trap, msr, tfer, ret) \
189 stw r10,_TRAP(r11); \
190 LOAD_MSR_KERNEL(r10, msr); \
195 #define EXC_XFER_STD(n, hdlr) \
196 EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, transfer_to_handler_full, \
197 ret_from_except_full)
199 #define EXC_XFER_LITE(n, hdlr) \
200 EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, transfer_to_handler, \
203 #endif /* __HEAD_32_H__ */