]> asedeno.scripts.mit.edu Git - linux.git/blob - arch/arm64/kernel/traps.c
Linux 5.6-rc7
[linux.git] / arch / arm64 / kernel / traps.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Based on arch/arm/kernel/traps.c
4  *
5  * Copyright (C) 1995-2009 Russell King
6  * Copyright (C) 2012 ARM Ltd.
7  */
8
9 #include <linux/bug.h>
10 #include <linux/context_tracking.h>
11 #include <linux/signal.h>
12 #include <linux/personality.h>
13 #include <linux/kallsyms.h>
14 #include <linux/kprobes.h>
15 #include <linux/spinlock.h>
16 #include <linux/uaccess.h>
17 #include <linux/hardirq.h>
18 #include <linux/kdebug.h>
19 #include <linux/module.h>
20 #include <linux/kexec.h>
21 #include <linux/delay.h>
22 #include <linux/init.h>
23 #include <linux/sched/signal.h>
24 #include <linux/sched/debug.h>
25 #include <linux/sched/task_stack.h>
26 #include <linux/sizes.h>
27 #include <linux/syscalls.h>
28 #include <linux/mm_types.h>
29 #include <linux/kasan.h>
30
31 #include <asm/atomic.h>
32 #include <asm/bug.h>
33 #include <asm/cpufeature.h>
34 #include <asm/daifflags.h>
35 #include <asm/debug-monitors.h>
36 #include <asm/esr.h>
37 #include <asm/insn.h>
38 #include <asm/kprobes.h>
39 #include <asm/traps.h>
40 #include <asm/smp.h>
41 #include <asm/stack_pointer.h>
42 #include <asm/stacktrace.h>
43 #include <asm/exception.h>
44 #include <asm/system_misc.h>
45 #include <asm/sysreg.h>
46
47 static const char *handler[]= {
48         "Synchronous Abort",
49         "IRQ",
50         "FIQ",
51         "Error"
52 };
53
54 int show_unhandled_signals = 0;
55
56 static void dump_backtrace_entry(unsigned long where)
57 {
58         printk(" %pS\n", (void *)where);
59 }
60
61 static void dump_kernel_instr(const char *lvl, struct pt_regs *regs)
62 {
63         unsigned long addr = instruction_pointer(regs);
64         char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
65         int i;
66
67         if (user_mode(regs))
68                 return;
69
70         for (i = -4; i < 1; i++) {
71                 unsigned int val, bad;
72
73                 bad = aarch64_insn_read(&((u32 *)addr)[i], &val);
74
75                 if (!bad)
76                         p += sprintf(p, i == 0 ? "(%08x) " : "%08x ", val);
77                 else {
78                         p += sprintf(p, "bad PC value");
79                         break;
80                 }
81         }
82
83         printk("%sCode: %s\n", lvl, str);
84 }
85
86 void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
87 {
88         struct stackframe frame;
89         int skip = 0;
90
91         pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
92
93         if (regs) {
94                 if (user_mode(regs))
95                         return;
96                 skip = 1;
97         }
98
99         if (!tsk)
100                 tsk = current;
101
102         if (!try_get_task_stack(tsk))
103                 return;
104
105         if (tsk == current) {
106                 start_backtrace(&frame,
107                                 (unsigned long)__builtin_frame_address(0),
108                                 (unsigned long)dump_backtrace);
109         } else {
110                 /*
111                  * task blocked in __switch_to
112                  */
113                 start_backtrace(&frame,
114                                 thread_saved_fp(tsk),
115                                 thread_saved_pc(tsk));
116         }
117
118         printk("Call trace:\n");
119         do {
120                 /* skip until specified stack frame */
121                 if (!skip) {
122                         dump_backtrace_entry(frame.pc);
123                 } else if (frame.fp == regs->regs[29]) {
124                         skip = 0;
125                         /*
126                          * Mostly, this is the case where this function is
127                          * called in panic/abort. As exception handler's
128                          * stack frame does not contain the corresponding pc
129                          * at which an exception has taken place, use regs->pc
130                          * instead.
131                          */
132                         dump_backtrace_entry(regs->pc);
133                 }
134         } while (!unwind_frame(tsk, &frame));
135
136         put_task_stack(tsk);
137 }
138
139 void show_stack(struct task_struct *tsk, unsigned long *sp)
140 {
141         dump_backtrace(NULL, tsk);
142         barrier();
143 }
144
145 #ifdef CONFIG_PREEMPT
146 #define S_PREEMPT " PREEMPT"
147 #elif defined(CONFIG_PREEMPT_RT)
148 #define S_PREEMPT " PREEMPT_RT"
149 #else
150 #define S_PREEMPT ""
151 #endif
152
153 #define S_SMP " SMP"
154
155 static int __die(const char *str, int err, struct pt_regs *regs)
156 {
157         static int die_counter;
158         int ret;
159
160         pr_emerg("Internal error: %s: %x [#%d]" S_PREEMPT S_SMP "\n",
161                  str, err, ++die_counter);
162
163         /* trap and error numbers are mostly meaningless on ARM */
164         ret = notify_die(DIE_OOPS, str, regs, err, 0, SIGSEGV);
165         if (ret == NOTIFY_STOP)
166                 return ret;
167
168         print_modules();
169         show_regs(regs);
170
171         dump_kernel_instr(KERN_EMERG, regs);
172
173         return ret;
174 }
175
176 static DEFINE_RAW_SPINLOCK(die_lock);
177
178 /*
179  * This function is protected against re-entrancy.
180  */
181 void die(const char *str, struct pt_regs *regs, int err)
182 {
183         int ret;
184         unsigned long flags;
185
186         raw_spin_lock_irqsave(&die_lock, flags);
187
188         oops_enter();
189
190         console_verbose();
191         bust_spinlocks(1);
192         ret = __die(str, err, regs);
193
194         if (regs && kexec_should_crash(current))
195                 crash_kexec(regs);
196
197         bust_spinlocks(0);
198         add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
199         oops_exit();
200
201         if (in_interrupt())
202                 panic("Fatal exception in interrupt");
203         if (panic_on_oops)
204                 panic("Fatal exception");
205
206         raw_spin_unlock_irqrestore(&die_lock, flags);
207
208         if (ret != NOTIFY_STOP)
209                 do_exit(SIGSEGV);
210 }
211
212 static void arm64_show_signal(int signo, const char *str)
213 {
214         static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
215                                       DEFAULT_RATELIMIT_BURST);
216         struct task_struct *tsk = current;
217         unsigned int esr = tsk->thread.fault_code;
218         struct pt_regs *regs = task_pt_regs(tsk);
219
220         /* Leave if the signal won't be shown */
221         if (!show_unhandled_signals ||
222             !unhandled_signal(tsk, signo) ||
223             !__ratelimit(&rs))
224                 return;
225
226         pr_info("%s[%d]: unhandled exception: ", tsk->comm, task_pid_nr(tsk));
227         if (esr)
228                 pr_cont("%s, ESR 0x%08x, ", esr_get_class_string(esr), esr);
229
230         pr_cont("%s", str);
231         print_vma_addr(KERN_CONT " in ", regs->pc);
232         pr_cont("\n");
233         __show_regs(regs);
234 }
235
236 void arm64_force_sig_fault(int signo, int code, void __user *addr,
237                            const char *str)
238 {
239         arm64_show_signal(signo, str);
240         if (signo == SIGKILL)
241                 force_sig(SIGKILL);
242         else
243                 force_sig_fault(signo, code, addr);
244 }
245
246 void arm64_force_sig_mceerr(int code, void __user *addr, short lsb,
247                             const char *str)
248 {
249         arm64_show_signal(SIGBUS, str);
250         force_sig_mceerr(code, addr, lsb);
251 }
252
253 void arm64_force_sig_ptrace_errno_trap(int errno, void __user *addr,
254                                        const char *str)
255 {
256         arm64_show_signal(SIGTRAP, str);
257         force_sig_ptrace_errno_trap(errno, addr);
258 }
259
260 void arm64_notify_die(const char *str, struct pt_regs *regs,
261                       int signo, int sicode, void __user *addr,
262                       int err)
263 {
264         if (user_mode(regs)) {
265                 WARN_ON(regs != current_pt_regs());
266                 current->thread.fault_address = 0;
267                 current->thread.fault_code = err;
268
269                 arm64_force_sig_fault(signo, sicode, addr, str);
270         } else {
271                 die(str, regs, err);
272         }
273 }
274
275 void arm64_skip_faulting_instruction(struct pt_regs *regs, unsigned long size)
276 {
277         regs->pc += size;
278
279         /*
280          * If we were single stepping, we want to get the step exception after
281          * we return from the trap.
282          */
283         if (user_mode(regs))
284                 user_fastforward_single_step(current);
285 }
286
287 static LIST_HEAD(undef_hook);
288 static DEFINE_RAW_SPINLOCK(undef_lock);
289
290 void register_undef_hook(struct undef_hook *hook)
291 {
292         unsigned long flags;
293
294         raw_spin_lock_irqsave(&undef_lock, flags);
295         list_add(&hook->node, &undef_hook);
296         raw_spin_unlock_irqrestore(&undef_lock, flags);
297 }
298
299 void unregister_undef_hook(struct undef_hook *hook)
300 {
301         unsigned long flags;
302
303         raw_spin_lock_irqsave(&undef_lock, flags);
304         list_del(&hook->node);
305         raw_spin_unlock_irqrestore(&undef_lock, flags);
306 }
307
308 static int call_undef_hook(struct pt_regs *regs)
309 {
310         struct undef_hook *hook;
311         unsigned long flags;
312         u32 instr;
313         int (*fn)(struct pt_regs *regs, u32 instr) = NULL;
314         void __user *pc = (void __user *)instruction_pointer(regs);
315
316         if (!user_mode(regs)) {
317                 __le32 instr_le;
318                 if (probe_kernel_address((__force __le32 *)pc, instr_le))
319                         goto exit;
320                 instr = le32_to_cpu(instr_le);
321         } else if (compat_thumb_mode(regs)) {
322                 /* 16-bit Thumb instruction */
323                 __le16 instr_le;
324                 if (get_user(instr_le, (__le16 __user *)pc))
325                         goto exit;
326                 instr = le16_to_cpu(instr_le);
327                 if (aarch32_insn_is_wide(instr)) {
328                         u32 instr2;
329
330                         if (get_user(instr_le, (__le16 __user *)(pc + 2)))
331                                 goto exit;
332                         instr2 = le16_to_cpu(instr_le);
333                         instr = (instr << 16) | instr2;
334                 }
335         } else {
336                 /* 32-bit ARM instruction */
337                 __le32 instr_le;
338                 if (get_user(instr_le, (__le32 __user *)pc))
339                         goto exit;
340                 instr = le32_to_cpu(instr_le);
341         }
342
343         raw_spin_lock_irqsave(&undef_lock, flags);
344         list_for_each_entry(hook, &undef_hook, node)
345                 if ((instr & hook->instr_mask) == hook->instr_val &&
346                         (regs->pstate & hook->pstate_mask) == hook->pstate_val)
347                         fn = hook->fn;
348
349         raw_spin_unlock_irqrestore(&undef_lock, flags);
350 exit:
351         return fn ? fn(regs, instr) : 1;
352 }
353
354 void force_signal_inject(int signal, int code, unsigned long address)
355 {
356         const char *desc;
357         struct pt_regs *regs = current_pt_regs();
358
359         if (WARN_ON(!user_mode(regs)))
360                 return;
361
362         switch (signal) {
363         case SIGILL:
364                 desc = "undefined instruction";
365                 break;
366         case SIGSEGV:
367                 desc = "illegal memory access";
368                 break;
369         default:
370                 desc = "unknown or unrecoverable error";
371                 break;
372         }
373
374         /* Force signals we don't understand to SIGKILL */
375         if (WARN_ON(signal != SIGKILL &&
376                     siginfo_layout(signal, code) != SIL_FAULT)) {
377                 signal = SIGKILL;
378         }
379
380         arm64_notify_die(desc, regs, signal, code, (void __user *)address, 0);
381 }
382
383 /*
384  * Set up process info to signal segmentation fault - called on access error.
385  */
386 void arm64_notify_segfault(unsigned long addr)
387 {
388         int code;
389
390         down_read(&current->mm->mmap_sem);
391         if (find_vma(current->mm, addr) == NULL)
392                 code = SEGV_MAPERR;
393         else
394                 code = SEGV_ACCERR;
395         up_read(&current->mm->mmap_sem);
396
397         force_signal_inject(SIGSEGV, code, addr);
398 }
399
400 void do_undefinstr(struct pt_regs *regs)
401 {
402         /* check for AArch32 breakpoint instructions */
403         if (!aarch32_break_handler(regs))
404                 return;
405
406         if (call_undef_hook(regs) == 0)
407                 return;
408
409         BUG_ON(!user_mode(regs));
410         force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc);
411 }
412 NOKPROBE_SYMBOL(do_undefinstr);
413
414 #define __user_cache_maint(insn, address, res)                  \
415         if (address >= user_addr_max()) {                       \
416                 res = -EFAULT;                                  \
417         } else {                                                \
418                 uaccess_ttbr0_enable();                         \
419                 asm volatile (                                  \
420                         "1:     " insn ", %1\n"                 \
421                         "       mov     %w0, #0\n"              \
422                         "2:\n"                                  \
423                         "       .pushsection .fixup,\"ax\"\n"   \
424                         "       .align  2\n"                    \
425                         "3:     mov     %w0, %w2\n"             \
426                         "       b       2b\n"                   \
427                         "       .popsection\n"                  \
428                         _ASM_EXTABLE(1b, 3b)                    \
429                         : "=r" (res)                            \
430                         : "r" (address), "i" (-EFAULT));        \
431                 uaccess_ttbr0_disable();                        \
432         }
433
434 static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs)
435 {
436         unsigned long address;
437         int rt = ESR_ELx_SYS64_ISS_RT(esr);
438         int crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT;
439         int ret = 0;
440
441         address = untagged_addr(pt_regs_read_reg(regs, rt));
442
443         switch (crm) {
444         case ESR_ELx_SYS64_ISS_CRM_DC_CVAU:     /* DC CVAU, gets promoted */
445                 __user_cache_maint("dc civac", address, ret);
446                 break;
447         case ESR_ELx_SYS64_ISS_CRM_DC_CVAC:     /* DC CVAC, gets promoted */
448                 __user_cache_maint("dc civac", address, ret);
449                 break;
450         case ESR_ELx_SYS64_ISS_CRM_DC_CVADP:    /* DC CVADP */
451                 __user_cache_maint("sys 3, c7, c13, 1", address, ret);
452                 break;
453         case ESR_ELx_SYS64_ISS_CRM_DC_CVAP:     /* DC CVAP */
454                 __user_cache_maint("sys 3, c7, c12, 1", address, ret);
455                 break;
456         case ESR_ELx_SYS64_ISS_CRM_DC_CIVAC:    /* DC CIVAC */
457                 __user_cache_maint("dc civac", address, ret);
458                 break;
459         case ESR_ELx_SYS64_ISS_CRM_IC_IVAU:     /* IC IVAU */
460                 __user_cache_maint("ic ivau", address, ret);
461                 break;
462         default:
463                 force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc);
464                 return;
465         }
466
467         if (ret)
468                 arm64_notify_segfault(address);
469         else
470                 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
471 }
472
473 static void ctr_read_handler(unsigned int esr, struct pt_regs *regs)
474 {
475         int rt = ESR_ELx_SYS64_ISS_RT(esr);
476         unsigned long val = arm64_ftr_reg_user_value(&arm64_ftr_reg_ctrel0);
477
478         if (cpus_have_const_cap(ARM64_WORKAROUND_1542419)) {
479                 /* Hide DIC so that we can trap the unnecessary maintenance...*/
480                 val &= ~BIT(CTR_DIC_SHIFT);
481
482                 /* ... and fake IminLine to reduce the number of traps. */
483                 val &= ~CTR_IMINLINE_MASK;
484                 val |= (PAGE_SHIFT - 2) & CTR_IMINLINE_MASK;
485         }
486
487         pt_regs_write_reg(regs, rt, val);
488
489         arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
490 }
491
492 static void cntvct_read_handler(unsigned int esr, struct pt_regs *regs)
493 {
494         int rt = ESR_ELx_SYS64_ISS_RT(esr);
495
496         pt_regs_write_reg(regs, rt, arch_timer_read_counter());
497         arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
498 }
499
500 static void cntfrq_read_handler(unsigned int esr, struct pt_regs *regs)
501 {
502         int rt = ESR_ELx_SYS64_ISS_RT(esr);
503
504         pt_regs_write_reg(regs, rt, arch_timer_get_rate());
505         arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
506 }
507
508 static void mrs_handler(unsigned int esr, struct pt_regs *regs)
509 {
510         u32 sysreg, rt;
511
512         rt = ESR_ELx_SYS64_ISS_RT(esr);
513         sysreg = esr_sys64_to_sysreg(esr);
514
515         if (do_emulate_mrs(regs, sysreg, rt) != 0)
516                 force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc);
517 }
518
519 static void wfi_handler(unsigned int esr, struct pt_regs *regs)
520 {
521         arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
522 }
523
524 struct sys64_hook {
525         unsigned int esr_mask;
526         unsigned int esr_val;
527         void (*handler)(unsigned int esr, struct pt_regs *regs);
528 };
529
530 static const struct sys64_hook sys64_hooks[] = {
531         {
532                 .esr_mask = ESR_ELx_SYS64_ISS_EL0_CACHE_OP_MASK,
533                 .esr_val = ESR_ELx_SYS64_ISS_EL0_CACHE_OP_VAL,
534                 .handler = user_cache_maint_handler,
535         },
536         {
537                 /* Trap read access to CTR_EL0 */
538                 .esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK,
539                 .esr_val = ESR_ELx_SYS64_ISS_SYS_CTR_READ,
540                 .handler = ctr_read_handler,
541         },
542         {
543                 /* Trap read access to CNTVCT_EL0 */
544                 .esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK,
545                 .esr_val = ESR_ELx_SYS64_ISS_SYS_CNTVCT,
546                 .handler = cntvct_read_handler,
547         },
548         {
549                 /* Trap read access to CNTFRQ_EL0 */
550                 .esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK,
551                 .esr_val = ESR_ELx_SYS64_ISS_SYS_CNTFRQ,
552                 .handler = cntfrq_read_handler,
553         },
554         {
555                 /* Trap read access to CPUID registers */
556                 .esr_mask = ESR_ELx_SYS64_ISS_SYS_MRS_OP_MASK,
557                 .esr_val = ESR_ELx_SYS64_ISS_SYS_MRS_OP_VAL,
558                 .handler = mrs_handler,
559         },
560         {
561                 /* Trap WFI instructions executed in userspace */
562                 .esr_mask = ESR_ELx_WFx_MASK,
563                 .esr_val = ESR_ELx_WFx_WFI_VAL,
564                 .handler = wfi_handler,
565         },
566         {},
567 };
568
569
570 #ifdef CONFIG_COMPAT
571 #define PSTATE_IT_1_0_SHIFT     25
572 #define PSTATE_IT_1_0_MASK      (0x3 << PSTATE_IT_1_0_SHIFT)
573 #define PSTATE_IT_7_2_SHIFT     10
574 #define PSTATE_IT_7_2_MASK      (0x3f << PSTATE_IT_7_2_SHIFT)
575
576 static u32 compat_get_it_state(struct pt_regs *regs)
577 {
578         u32 it, pstate = regs->pstate;
579
580         it  = (pstate & PSTATE_IT_1_0_MASK) >> PSTATE_IT_1_0_SHIFT;
581         it |= ((pstate & PSTATE_IT_7_2_MASK) >> PSTATE_IT_7_2_SHIFT) << 2;
582
583         return it;
584 }
585
586 static void compat_set_it_state(struct pt_regs *regs, u32 it)
587 {
588         u32 pstate_it;
589
590         pstate_it  = (it << PSTATE_IT_1_0_SHIFT) & PSTATE_IT_1_0_MASK;
591         pstate_it |= ((it >> 2) << PSTATE_IT_7_2_SHIFT) & PSTATE_IT_7_2_MASK;
592
593         regs->pstate &= ~PSR_AA32_IT_MASK;
594         regs->pstate |= pstate_it;
595 }
596
597 static bool cp15_cond_valid(unsigned int esr, struct pt_regs *regs)
598 {
599         int cond;
600
601         /* Only a T32 instruction can trap without CV being set */
602         if (!(esr & ESR_ELx_CV)) {
603                 u32 it;
604
605                 it = compat_get_it_state(regs);
606                 if (!it)
607                         return true;
608
609                 cond = it >> 4;
610         } else {
611                 cond = (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
612         }
613
614         return aarch32_opcode_cond_checks[cond](regs->pstate);
615 }
616
617 static void advance_itstate(struct pt_regs *regs)
618 {
619         u32 it;
620
621         /* ARM mode */
622         if (!(regs->pstate & PSR_AA32_T_BIT) ||
623             !(regs->pstate & PSR_AA32_IT_MASK))
624                 return;
625
626         it  = compat_get_it_state(regs);
627
628         /*
629          * If this is the last instruction of the block, wipe the IT
630          * state. Otherwise advance it.
631          */
632         if (!(it & 7))
633                 it = 0;
634         else
635                 it = (it & 0xe0) | ((it << 1) & 0x1f);
636
637         compat_set_it_state(regs, it);
638 }
639
640 static void arm64_compat_skip_faulting_instruction(struct pt_regs *regs,
641                                                    unsigned int sz)
642 {
643         advance_itstate(regs);
644         arm64_skip_faulting_instruction(regs, sz);
645 }
646
647 static void compat_cntfrq_read_handler(unsigned int esr, struct pt_regs *regs)
648 {
649         int reg = (esr & ESR_ELx_CP15_32_ISS_RT_MASK) >> ESR_ELx_CP15_32_ISS_RT_SHIFT;
650
651         pt_regs_write_reg(regs, reg, arch_timer_get_rate());
652         arm64_compat_skip_faulting_instruction(regs, 4);
653 }
654
655 static const struct sys64_hook cp15_32_hooks[] = {
656         {
657                 .esr_mask = ESR_ELx_CP15_32_ISS_SYS_MASK,
658                 .esr_val = ESR_ELx_CP15_32_ISS_SYS_CNTFRQ,
659                 .handler = compat_cntfrq_read_handler,
660         },
661         {},
662 };
663
664 static void compat_cntvct_read_handler(unsigned int esr, struct pt_regs *regs)
665 {
666         int rt = (esr & ESR_ELx_CP15_64_ISS_RT_MASK) >> ESR_ELx_CP15_64_ISS_RT_SHIFT;
667         int rt2 = (esr & ESR_ELx_CP15_64_ISS_RT2_MASK) >> ESR_ELx_CP15_64_ISS_RT2_SHIFT;
668         u64 val = arch_timer_read_counter();
669
670         pt_regs_write_reg(regs, rt, lower_32_bits(val));
671         pt_regs_write_reg(regs, rt2, upper_32_bits(val));
672         arm64_compat_skip_faulting_instruction(regs, 4);
673 }
674
675 static const struct sys64_hook cp15_64_hooks[] = {
676         {
677                 .esr_mask = ESR_ELx_CP15_64_ISS_SYS_MASK,
678                 .esr_val = ESR_ELx_CP15_64_ISS_SYS_CNTVCT,
679                 .handler = compat_cntvct_read_handler,
680         },
681         {},
682 };
683
684 void do_cp15instr(unsigned int esr, struct pt_regs *regs)
685 {
686         const struct sys64_hook *hook, *hook_base;
687
688         if (!cp15_cond_valid(esr, regs)) {
689                 /*
690                  * There is no T16 variant of a CP access, so we
691                  * always advance PC by 4 bytes.
692                  */
693                 arm64_compat_skip_faulting_instruction(regs, 4);
694                 return;
695         }
696
697         switch (ESR_ELx_EC(esr)) {
698         case ESR_ELx_EC_CP15_32:
699                 hook_base = cp15_32_hooks;
700                 break;
701         case ESR_ELx_EC_CP15_64:
702                 hook_base = cp15_64_hooks;
703                 break;
704         default:
705                 do_undefinstr(regs);
706                 return;
707         }
708
709         for (hook = hook_base; hook->handler; hook++)
710                 if ((hook->esr_mask & esr) == hook->esr_val) {
711                         hook->handler(esr, regs);
712                         return;
713                 }
714
715         /*
716          * New cp15 instructions may previously have been undefined at
717          * EL0. Fall back to our usual undefined instruction handler
718          * so that we handle these consistently.
719          */
720         do_undefinstr(regs);
721 }
722 NOKPROBE_SYMBOL(do_cp15instr);
723 #endif
724
725 void do_sysinstr(unsigned int esr, struct pt_regs *regs)
726 {
727         const struct sys64_hook *hook;
728
729         for (hook = sys64_hooks; hook->handler; hook++)
730                 if ((hook->esr_mask & esr) == hook->esr_val) {
731                         hook->handler(esr, regs);
732                         return;
733                 }
734
735         /*
736          * New SYS instructions may previously have been undefined at EL0. Fall
737          * back to our usual undefined instruction handler so that we handle
738          * these consistently.
739          */
740         do_undefinstr(regs);
741 }
742 NOKPROBE_SYMBOL(do_sysinstr);
743
744 static const char *esr_class_str[] = {
745         [0 ... ESR_ELx_EC_MAX]          = "UNRECOGNIZED EC",
746         [ESR_ELx_EC_UNKNOWN]            = "Unknown/Uncategorized",
747         [ESR_ELx_EC_WFx]                = "WFI/WFE",
748         [ESR_ELx_EC_CP15_32]            = "CP15 MCR/MRC",
749         [ESR_ELx_EC_CP15_64]            = "CP15 MCRR/MRRC",
750         [ESR_ELx_EC_CP14_MR]            = "CP14 MCR/MRC",
751         [ESR_ELx_EC_CP14_LS]            = "CP14 LDC/STC",
752         [ESR_ELx_EC_FP_ASIMD]           = "ASIMD",
753         [ESR_ELx_EC_CP10_ID]            = "CP10 MRC/VMRS",
754         [ESR_ELx_EC_PAC]                = "PAC",
755         [ESR_ELx_EC_CP14_64]            = "CP14 MCRR/MRRC",
756         [ESR_ELx_EC_ILL]                = "PSTATE.IL",
757         [ESR_ELx_EC_SVC32]              = "SVC (AArch32)",
758         [ESR_ELx_EC_HVC32]              = "HVC (AArch32)",
759         [ESR_ELx_EC_SMC32]              = "SMC (AArch32)",
760         [ESR_ELx_EC_SVC64]              = "SVC (AArch64)",
761         [ESR_ELx_EC_HVC64]              = "HVC (AArch64)",
762         [ESR_ELx_EC_SMC64]              = "SMC (AArch64)",
763         [ESR_ELx_EC_SYS64]              = "MSR/MRS (AArch64)",
764         [ESR_ELx_EC_SVE]                = "SVE",
765         [ESR_ELx_EC_ERET]               = "ERET/ERETAA/ERETAB",
766         [ESR_ELx_EC_IMP_DEF]            = "EL3 IMP DEF",
767         [ESR_ELx_EC_IABT_LOW]           = "IABT (lower EL)",
768         [ESR_ELx_EC_IABT_CUR]           = "IABT (current EL)",
769         [ESR_ELx_EC_PC_ALIGN]           = "PC Alignment",
770         [ESR_ELx_EC_DABT_LOW]           = "DABT (lower EL)",
771         [ESR_ELx_EC_DABT_CUR]           = "DABT (current EL)",
772         [ESR_ELx_EC_SP_ALIGN]           = "SP Alignment",
773         [ESR_ELx_EC_FP_EXC32]           = "FP (AArch32)",
774         [ESR_ELx_EC_FP_EXC64]           = "FP (AArch64)",
775         [ESR_ELx_EC_SERROR]             = "SError",
776         [ESR_ELx_EC_BREAKPT_LOW]        = "Breakpoint (lower EL)",
777         [ESR_ELx_EC_BREAKPT_CUR]        = "Breakpoint (current EL)",
778         [ESR_ELx_EC_SOFTSTP_LOW]        = "Software Step (lower EL)",
779         [ESR_ELx_EC_SOFTSTP_CUR]        = "Software Step (current EL)",
780         [ESR_ELx_EC_WATCHPT_LOW]        = "Watchpoint (lower EL)",
781         [ESR_ELx_EC_WATCHPT_CUR]        = "Watchpoint (current EL)",
782         [ESR_ELx_EC_BKPT32]             = "BKPT (AArch32)",
783         [ESR_ELx_EC_VECTOR32]           = "Vector catch (AArch32)",
784         [ESR_ELx_EC_BRK64]              = "BRK (AArch64)",
785 };
786
787 const char *esr_get_class_string(u32 esr)
788 {
789         return esr_class_str[ESR_ELx_EC(esr)];
790 }
791
792 /*
793  * bad_mode handles the impossible case in the exception vector. This is always
794  * fatal.
795  */
796 asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
797 {
798         console_verbose();
799
800         pr_crit("Bad mode in %s handler detected on CPU%d, code 0x%08x -- %s\n",
801                 handler[reason], smp_processor_id(), esr,
802                 esr_get_class_string(esr));
803
804         local_daif_mask();
805         panic("bad mode");
806 }
807
808 /*
809  * bad_el0_sync handles unexpected, but potentially recoverable synchronous
810  * exceptions taken from EL0. Unlike bad_mode, this returns.
811  */
812 void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr)
813 {
814         void __user *pc = (void __user *)instruction_pointer(regs);
815
816         current->thread.fault_address = 0;
817         current->thread.fault_code = esr;
818
819         arm64_force_sig_fault(SIGILL, ILL_ILLOPC, pc,
820                               "Bad EL0 synchronous exception");
821 }
822
823 #ifdef CONFIG_VMAP_STACK
824
825 DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack)
826         __aligned(16);
827
828 asmlinkage void handle_bad_stack(struct pt_regs *regs)
829 {
830         unsigned long tsk_stk = (unsigned long)current->stack;
831         unsigned long irq_stk = (unsigned long)this_cpu_read(irq_stack_ptr);
832         unsigned long ovf_stk = (unsigned long)this_cpu_ptr(overflow_stack);
833         unsigned int esr = read_sysreg(esr_el1);
834         unsigned long far = read_sysreg(far_el1);
835
836         console_verbose();
837         pr_emerg("Insufficient stack space to handle exception!");
838
839         pr_emerg("ESR: 0x%08x -- %s\n", esr, esr_get_class_string(esr));
840         pr_emerg("FAR: 0x%016lx\n", far);
841
842         pr_emerg("Task stack:     [0x%016lx..0x%016lx]\n",
843                  tsk_stk, tsk_stk + THREAD_SIZE);
844         pr_emerg("IRQ stack:      [0x%016lx..0x%016lx]\n",
845                  irq_stk, irq_stk + THREAD_SIZE);
846         pr_emerg("Overflow stack: [0x%016lx..0x%016lx]\n",
847                  ovf_stk, ovf_stk + OVERFLOW_STACK_SIZE);
848
849         __show_regs(regs);
850
851         /*
852          * We use nmi_panic to limit the potential for recusive overflows, and
853          * to get a better stack trace.
854          */
855         nmi_panic(NULL, "kernel stack overflow");
856         cpu_park_loop();
857 }
858 #endif
859
860 void __noreturn arm64_serror_panic(struct pt_regs *regs, u32 esr)
861 {
862         console_verbose();
863
864         pr_crit("SError Interrupt on CPU%d, code 0x%08x -- %s\n",
865                 smp_processor_id(), esr, esr_get_class_string(esr));
866         if (regs)
867                 __show_regs(regs);
868
869         nmi_panic(regs, "Asynchronous SError Interrupt");
870
871         cpu_park_loop();
872         unreachable();
873 }
874
875 bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned int esr)
876 {
877         u32 aet = arm64_ras_serror_get_severity(esr);
878
879         switch (aet) {
880         case ESR_ELx_AET_CE:    /* corrected error */
881         case ESR_ELx_AET_UEO:   /* restartable, not yet consumed */
882                 /*
883                  * The CPU can make progress. We may take UEO again as
884                  * a more severe error.
885                  */
886                 return false;
887
888         case ESR_ELx_AET_UEU:   /* Uncorrected Unrecoverable */
889         case ESR_ELx_AET_UER:   /* Uncorrected Recoverable */
890                 /*
891                  * The CPU can't make progress. The exception may have
892                  * been imprecise.
893                  *
894                  * Neoverse-N1 #1349291 means a non-KVM SError reported as
895                  * Unrecoverable should be treated as Uncontainable. We
896                  * call arm64_serror_panic() in both cases.
897                  */
898                 return true;
899
900         case ESR_ELx_AET_UC:    /* Uncontainable or Uncategorized error */
901         default:
902                 /* Error has been silently propagated */
903                 arm64_serror_panic(regs, esr);
904         }
905 }
906
907 asmlinkage void do_serror(struct pt_regs *regs, unsigned int esr)
908 {
909         const bool was_in_nmi = in_nmi();
910
911         if (!was_in_nmi)
912                 nmi_enter();
913
914         /* non-RAS errors are not containable */
915         if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(regs, esr))
916                 arm64_serror_panic(regs, esr);
917
918         if (!was_in_nmi)
919                 nmi_exit();
920 }
921
922 asmlinkage void enter_from_user_mode(void)
923 {
924         CT_WARN_ON(ct_state() != CONTEXT_USER);
925         user_exit_irqoff();
926 }
927 NOKPROBE_SYMBOL(enter_from_user_mode);
928
929 void __pte_error(const char *file, int line, unsigned long val)
930 {
931         pr_err("%s:%d: bad pte %016lx.\n", file, line, val);
932 }
933
934 void __pmd_error(const char *file, int line, unsigned long val)
935 {
936         pr_err("%s:%d: bad pmd %016lx.\n", file, line, val);
937 }
938
939 void __pud_error(const char *file, int line, unsigned long val)
940 {
941         pr_err("%s:%d: bad pud %016lx.\n", file, line, val);
942 }
943
944 void __pgd_error(const char *file, int line, unsigned long val)
945 {
946         pr_err("%s:%d: bad pgd %016lx.\n", file, line, val);
947 }
948
949 /* GENERIC_BUG traps */
950
951 int is_valid_bugaddr(unsigned long addr)
952 {
953         /*
954          * bug_handler() only called for BRK #BUG_BRK_IMM.
955          * So the answer is trivial -- any spurious instances with no
956          * bug table entry will be rejected by report_bug() and passed
957          * back to the debug-monitors code and handled as a fatal
958          * unexpected debug exception.
959          */
960         return 1;
961 }
962
963 static int bug_handler(struct pt_regs *regs, unsigned int esr)
964 {
965         switch (report_bug(regs->pc, regs)) {
966         case BUG_TRAP_TYPE_BUG:
967                 die("Oops - BUG", regs, 0);
968                 break;
969
970         case BUG_TRAP_TYPE_WARN:
971                 break;
972
973         default:
974                 /* unknown/unrecognised bug trap type */
975                 return DBG_HOOK_ERROR;
976         }
977
978         /* If thread survives, skip over the BUG instruction and continue: */
979         arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
980         return DBG_HOOK_HANDLED;
981 }
982
983 static struct break_hook bug_break_hook = {
984         .fn = bug_handler,
985         .imm = BUG_BRK_IMM,
986 };
987
988 #ifdef CONFIG_KASAN_SW_TAGS
989
990 #define KASAN_ESR_RECOVER       0x20
991 #define KASAN_ESR_WRITE 0x10
992 #define KASAN_ESR_SIZE_MASK     0x0f
993 #define KASAN_ESR_SIZE(esr)     (1 << ((esr) & KASAN_ESR_SIZE_MASK))
994
995 static int kasan_handler(struct pt_regs *regs, unsigned int esr)
996 {
997         bool recover = esr & KASAN_ESR_RECOVER;
998         bool write = esr & KASAN_ESR_WRITE;
999         size_t size = KASAN_ESR_SIZE(esr);
1000         u64 addr = regs->regs[0];
1001         u64 pc = regs->pc;
1002
1003         kasan_report(addr, size, write, pc);
1004
1005         /*
1006          * The instrumentation allows to control whether we can proceed after
1007          * a crash was detected. This is done by passing the -recover flag to
1008          * the compiler. Disabling recovery allows to generate more compact
1009          * code.
1010          *
1011          * Unfortunately disabling recovery doesn't work for the kernel right
1012          * now. KASAN reporting is disabled in some contexts (for example when
1013          * the allocator accesses slab object metadata; this is controlled by
1014          * current->kasan_depth). All these accesses are detected by the tool,
1015          * even though the reports for them are not printed.
1016          *
1017          * This is something that might be fixed at some point in the future.
1018          */
1019         if (!recover)
1020                 die("Oops - KASAN", regs, 0);
1021
1022         /* If thread survives, skip over the brk instruction and continue: */
1023         arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
1024         return DBG_HOOK_HANDLED;
1025 }
1026
1027 static struct break_hook kasan_break_hook = {
1028         .fn     = kasan_handler,
1029         .imm    = KASAN_BRK_IMM,
1030         .mask   = KASAN_BRK_MASK,
1031 };
1032 #endif
1033
1034 /*
1035  * Initial handler for AArch64 BRK exceptions
1036  * This handler only used until debug_traps_init().
1037  */
1038 int __init early_brk64(unsigned long addr, unsigned int esr,
1039                 struct pt_regs *regs)
1040 {
1041 #ifdef CONFIG_KASAN_SW_TAGS
1042         unsigned int comment = esr & ESR_ELx_BRK64_ISS_COMMENT_MASK;
1043
1044         if ((comment & ~KASAN_BRK_MASK) == KASAN_BRK_IMM)
1045                 return kasan_handler(regs, esr) != DBG_HOOK_HANDLED;
1046 #endif
1047         return bug_handler(regs, esr) != DBG_HOOK_HANDLED;
1048 }
1049
1050 /* This registration must happen early, before debug_traps_init(). */
1051 void __init trap_init(void)
1052 {
1053         register_kernel_break_hook(&bug_break_hook);
1054 #ifdef CONFIG_KASAN_SW_TAGS
1055         register_kernel_break_hook(&kasan_break_hook);
1056 #endif
1057 }