1 /* SPDX-License-Identifier: GPL-2.0 */
5 #include <asm/ptrace.h> /* for STACK_FRAME_REGS_MARKER */
8 * Exception entry code. This code runs with address translation
9 * turned off, i.e. using physical addresses.
10 * We assume sprg3 has the physical address of the current
11 * task's thread_struct.
13 .macro EXCEPTION_PROLOG handle_dar_dsisr=0
14 EXCEPTION_PROLOG_0 handle_dar_dsisr=\handle_dar_dsisr
16 EXCEPTION_PROLOG_2 handle_dar_dsisr=\handle_dar_dsisr
19 .macro EXCEPTION_PROLOG_0 handle_dar_dsisr=0
20 mtspr SPRN_SPRG_SCRATCH0,r10
21 mtspr SPRN_SPRG_SCRATCH1,r11
22 #ifdef CONFIG_VMAP_STACK
23 mfspr r10, SPRN_SPRG_THREAD
33 mfspr r11, SPRN_SRR1 /* check whether user or kernel */
34 #ifdef CONFIG_VMAP_STACK
38 andi. r11, r11, MSR_PR
41 .macro EXCEPTION_PROLOG_1 for_rtas=0
42 #ifdef CONFIG_VMAP_STACK
44 li r11, MSR_KERNEL & ~(MSR_IR | MSR_RI) /* can take DTLB miss */
48 subi r11, r1, INT_FRAME_SIZE /* use r1 if kernel */
50 tophys(r11,r1) /* use tophys(r1) if kernel */
51 subi r11, r11, INT_FRAME_SIZE /* alloc exc. frame */
54 mfspr r11,SPRN_SPRG_THREAD
55 tovirt_vmstack r11, r11
56 lwz r11,TASK_STACK-THREAD(r11)
57 addi r11, r11, THREAD_SIZE - INT_FRAME_SIZE
58 tophys_novmstack r11, r11
60 #ifdef CONFIG_VMAP_STACK
62 bt 32 - THREAD_ALIGN_SHIFT, stack_overflow
66 .macro EXCEPTION_PROLOG_2 handle_dar_dsisr=0
67 #if defined(CONFIG_VMAP_STACK) && defined(CONFIG_PPC_BOOK3S)
72 ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_HPTE_TABLE)
74 stw r10,_CCR(r11) /* save registers */
76 mfspr r10, SPRN_SPRG_SCRATCH0
80 #if defined(CONFIG_VMAP_STACK) && defined(CONFIG_PPC_BOOK3S)
84 END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
86 mfspr r12,SPRN_SPRG_SCRATCH1
90 #ifdef CONFIG_VMAP_STACK
91 mfspr r12, SPRN_SPRG_THREAD
100 #if defined(CONFIG_VMAP_STACK) && defined(CONFIG_PPC_BOOK3S)
101 BEGIN_MMU_FTR_SECTION
102 andi. r10, r9, MSR_PR
103 END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
112 tovirt_novmstack r1, r11 /* set new kernel sp */
114 rlwinm r9,r9,0,14,12 /* clear MSR_WE (necessary?) */
116 #ifdef CONFIG_VMAP_STACK
117 li r10, MSR_KERNEL & ~MSR_IR /* can take exceptions */
119 li r10,MSR_KERNEL & ~(MSR_IR|MSR_DR) /* can take exceptions */
121 mtmsr r10 /* (except for mach check in rtas) */
124 lis r10,STACK_FRAME_REGS_MARKER@ha /* exception frame marker */
125 addi r10,r10,STACK_FRAME_REGS_MARKER@l
131 .macro SYSCALL_ENTRY trapno
132 mfspr r12,SPRN_SPRG_THREAD
133 #ifdef CONFIG_VMAP_STACK
140 lwz r11,TASK_STACK-THREAD(r12)
141 rlwinm r10,r10,0,4,2 /* Clear SO bit in CR */
142 addi r11, r11, THREAD_SIZE - INT_FRAME_SIZE
143 #ifdef CONFIG_VMAP_STACK
144 li r9, MSR_KERNEL & ~(MSR_IR | MSR_RI) /* can take DTLB miss */
148 tovirt_vmstack r12, r12
149 tophys_novmstack r11, r11
151 stw r10,_CCR(r11) /* save registers */
153 #ifdef CONFIG_VMAP_STACK
162 tovirt_novmstack r1, r11 /* set new kernel sp */
165 rlwinm r9,r9,0,14,12 /* clear MSR_WE (necessary?) */
167 #ifdef CONFIG_VMAP_STACK
168 LOAD_REG_IMMEDIATE(r10, MSR_KERNEL & ~MSR_IR) /* can take exceptions */
170 LOAD_REG_IMMEDIATE(r10, MSR_KERNEL & ~(MSR_IR|MSR_DR)) /* can take exceptions */
172 mtmsr r10 /* (except for mach check in rtas) */
174 lis r10,STACK_FRAME_REGS_MARKER@ha /* exception frame marker */
176 addi r10,r10,STACK_FRAME_REGS_MARKER@l
184 addi r11,r1,STACK_FRAME_OVERHEAD
187 #if defined(CONFIG_40x)
188 /* Check to see if the dbcr0 register is set up to debug. Use the
189 internal debug mode bit to do this. */
190 lwz r12,THREAD_DBCR0(r12)
191 andis. r12,r12,DBCR0_IDM@h
193 ACCOUNT_CPU_USER_ENTRY(r2, r11, r12)
194 #if defined(CONFIG_40x)
196 /* From user and task is ptraced - load up global dbcr0 */
197 li r12,-1 /* clear all pending debug events */
199 lis r11,global_dbcr0@ha
201 addi r11,r11,global_dbcr0@l
210 tovirt_novmstack r2, r2 /* set r2 to current */
211 lis r11, transfer_to_syscall@h
212 ori r11, r11, transfer_to_syscall@l
213 #ifdef CONFIG_TRACE_IRQFLAGS
215 * If MSR is changing we need to keep interrupts disabled at this point
216 * otherwise we might risk taking an interrupt before we tell lockdep
219 LOAD_REG_IMMEDIATE(r10, MSR_KERNEL)
220 rlwimi r10, r9, 0, MSR_EE
222 LOAD_REG_IMMEDIATE(r10, MSR_KERNEL | MSR_EE)
224 #if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
230 RFI /* jump to handler, enable MMU */
233 .macro save_dar_dsisr_on_stack reg1, reg2, sp
234 #ifndef CONFIG_VMAP_STACK
235 mfspr \reg1, SPRN_DAR
236 mfspr \reg2, SPRN_DSISR
238 stw \reg2, _DSISR(\sp)
242 .macro get_and_save_dar_dsisr_on_stack reg1, reg2, sp
243 #ifdef CONFIG_VMAP_STACK
245 lwz \reg2, _DSISR(\sp)
247 save_dar_dsisr_on_stack \reg1, \reg2, \sp
251 .macro tovirt_vmstack dst, src
252 #ifdef CONFIG_VMAP_STACK
261 .macro tovirt_novmstack dst, src
262 #ifndef CONFIG_VMAP_STACK
271 .macro tophys_novmstack dst, src
272 #ifndef CONFIG_VMAP_STACK
282 * Note: code which follows this uses cr0.eq (set if from kernel),
283 * r11, r12 (SRR0), and r9 (SRR1).
285 * Note2: once we have set r1 we are in a position to take exceptions
286 * again, and we could thus set MSR:RI at that point.
292 #ifdef CONFIG_PPC_BOOK3S
293 #define START_EXCEPTION(n, label) \
299 #define START_EXCEPTION(n, label) \
305 #define EXCEPTION(n, label, hdlr, xfer) \
306 START_EXCEPTION(n, label) \
308 addi r3,r1,STACK_FRAME_OVERHEAD; \
311 #define EXC_XFER_TEMPLATE(hdlr, trap, msr, tfer, ret) \
313 stw r10,_TRAP(r11); \
314 LOAD_REG_IMMEDIATE(r10, msr); \
319 #define EXC_XFER_STD(n, hdlr) \
320 EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, transfer_to_handler_full, \
321 ret_from_except_full)
323 #define EXC_XFER_LITE(n, hdlr) \
324 EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, transfer_to_handler, \
327 .macro vmap_stack_overflow_exception
328 #ifdef CONFIG_VMAP_STACK
330 mfspr r11, SPRN_SPRG_THREAD
332 lwz r11, TASK_CPU - THREAD(r11)
334 addis r11, r11, emergency_ctx@ha
336 lis r11, emergency_ctx@ha
338 lwz r11, emergency_ctx@l(r11)
341 lis r11, init_thread_union@ha
342 addi r11, r11, init_thread_union@l
343 1: addi r11, r11, THREAD_SIZE - INT_FRAME_SIZE
346 addi r3, r1, STACK_FRAME_OVERHEAD
347 EXC_XFER_STD(0, stack_overflow_exception)
351 #endif /* __HEAD_32_H__ */