1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright IBM Corp. 1999
5 * Author(s): Hartmut Penner (hp@de.ibm.com)
6 * Ulrich Weigand (uweigand@de.ibm.com)
8 * Derived from "arch/i386/mm/fault.c"
9 * Copyright (C) 1995 Linus Torvalds
12 #include <linux/kernel_stat.h>
13 #include <linux/perf_event.h>
14 #include <linux/signal.h>
15 #include <linux/sched.h>
16 #include <linux/sched/debug.h>
17 #include <linux/kernel.h>
18 #include <linux/errno.h>
19 #include <linux/string.h>
20 #include <linux/types.h>
21 #include <linux/ptrace.h>
22 #include <linux/mman.h>
24 #include <linux/compat.h>
25 #include <linux/smp.h>
26 #include <linux/kdebug.h>
27 #include <linux/init.h>
28 #include <linux/console.h>
29 #include <linux/extable.h>
30 #include <linux/hardirq.h>
31 #include <linux/kprobes.h>
32 #include <linux/uaccess.h>
33 #include <linux/hugetlb.h>
34 #include <asm/asm-offsets.h>
36 #include <asm/pgtable.h>
39 #include <asm/mmu_context.h>
40 #include <asm/facility.h>
41 #include "../kernel/entry.h"
43 #define __FAIL_ADDR_MASK -4096L
44 #define __SUBCODE_MASK 0x0600
45 #define __PF_RES_FIELD 0x8000000000000000ULL
47 #define VM_FAULT_BADCONTEXT 0x010000
48 #define VM_FAULT_BADMAP 0x020000
49 #define VM_FAULT_BADACCESS 0x040000
50 #define VM_FAULT_SIGNAL 0x080000
51 #define VM_FAULT_PFAULT 0x100000
60 static unsigned long store_indication __read_mostly;
62 static int __init fault_init(void)
64 if (test_facility(75))
65 store_indication = 0xc00;
68 early_initcall(fault_init);
71 * Find out which address space caused the exception.
73 static enum fault_type get_fault_type(struct pt_regs *regs)
75 unsigned long trans_exc_code;
77 trans_exc_code = regs->int_parm_long & 3;
78 if (likely(trans_exc_code == 0)) {
79 /* primary space exception */
80 if (IS_ENABLED(CONFIG_PGSTE) &&
81 test_pt_regs_flag(regs, PIF_GUEST_FAULT))
83 if (current->thread.mm_segment == USER_DS)
87 if (trans_exc_code == 2) {
88 /* secondary space exception */
89 if (current->thread.mm_segment & 1) {
90 if (current->thread.mm_segment == USER_DS_SACF)
96 if (trans_exc_code == 1) {
97 /* access register mode, not used in the kernel */
100 /* home space exception -> access via kernel ASCE */
104 static int bad_address(void *p)
108 return probe_kernel_address((unsigned long *)p, dummy);
111 static void dump_pagetable(unsigned long asce, unsigned long address)
113 unsigned long *table = __va(asce & _ASCE_ORIGIN);
115 pr_alert("AS:%016lx ", asce);
116 switch (asce & _ASCE_TYPE_MASK) {
117 case _ASCE_TYPE_REGION1:
118 table += (address & _REGION1_INDEX) >> _REGION1_SHIFT;
119 if (bad_address(table))
121 pr_cont("R1:%016lx ", *table);
122 if (*table & _REGION_ENTRY_INVALID)
124 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
126 case _ASCE_TYPE_REGION2:
127 table += (address & _REGION2_INDEX) >> _REGION2_SHIFT;
128 if (bad_address(table))
130 pr_cont("R2:%016lx ", *table);
131 if (*table & _REGION_ENTRY_INVALID)
133 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
135 case _ASCE_TYPE_REGION3:
136 table += (address & _REGION3_INDEX) >> _REGION3_SHIFT;
137 if (bad_address(table))
139 pr_cont("R3:%016lx ", *table);
140 if (*table & (_REGION_ENTRY_INVALID | _REGION3_ENTRY_LARGE))
142 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
144 case _ASCE_TYPE_SEGMENT:
145 table += (address & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
146 if (bad_address(table))
148 pr_cont("S:%016lx ", *table);
149 if (*table & (_SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_LARGE))
151 table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
153 table += (address & _PAGE_INDEX) >> _PAGE_SHIFT;
154 if (bad_address(table))
156 pr_cont("P:%016lx ", *table);
164 static void dump_fault_info(struct pt_regs *regs)
168 pr_alert("Failing address: %016lx TEID: %016lx\n",
169 regs->int_parm_long & __FAIL_ADDR_MASK, regs->int_parm_long);
170 pr_alert("Fault in ");
171 switch (regs->int_parm_long & 3) {
173 pr_cont("home space ");
176 pr_cont("secondary space ");
179 pr_cont("access register ");
182 pr_cont("primary space ");
185 pr_cont("mode while using ");
186 switch (get_fault_type(regs)) {
188 asce = S390_lowcore.user_asce;
192 asce = S390_lowcore.vdso_asce;
196 asce = ((struct gmap *) S390_lowcore.gmap)->asce;
200 asce = S390_lowcore.kernel_asce;
207 dump_pagetable(asce, regs->int_parm_long & __FAIL_ADDR_MASK);
210 int show_unhandled_signals = 1;
212 void report_user_fault(struct pt_regs *regs, long signr, int is_mm_fault)
214 if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
216 if (!unhandled_signal(current, signr))
218 if (!printk_ratelimit())
220 printk(KERN_ALERT "User process fault: interruption code %04x ilc:%d ",
221 regs->int_code & 0xffff, regs->int_code >> 17);
222 print_vma_addr(KERN_CONT "in ", regs->psw.addr);
223 printk(KERN_CONT "\n");
225 dump_fault_info(regs);
230 * Send SIGSEGV to task. This is an external routine
231 * to keep the stack usage of do_page_fault small.
233 static noinline void do_sigsegv(struct pt_regs *regs, int si_code)
235 report_user_fault(regs, SIGSEGV, 1);
236 force_sig_fault(SIGSEGV, si_code,
237 (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK));
240 const struct exception_table_entry *s390_search_extables(unsigned long addr)
242 const struct exception_table_entry *fixup;
244 fixup = search_extable(__start_dma_ex_table,
245 __stop_dma_ex_table - __start_dma_ex_table,
248 fixup = search_exception_tables(addr);
252 static noinline void do_no_context(struct pt_regs *regs)
254 const struct exception_table_entry *fixup;
256 /* Are we prepared to handle this kernel fault? */
257 fixup = s390_search_extables(regs->psw.addr);
259 regs->psw.addr = extable_fixup(fixup);
264 * Oops. The kernel tried to access some bad page. We'll have to
265 * terminate things with extreme prejudice.
267 if (get_fault_type(regs) == KERNEL_FAULT)
268 printk(KERN_ALERT "Unable to handle kernel pointer dereference"
269 " in virtual kernel address space\n");
271 printk(KERN_ALERT "Unable to handle kernel paging request"
272 " in virtual user address space\n");
273 dump_fault_info(regs);
278 static noinline void do_low_address(struct pt_regs *regs)
280 /* Low-address protection hit in kernel mode means
281 NULL pointer write access in kernel mode. */
282 if (regs->psw.mask & PSW_MASK_PSTATE) {
283 /* Low-address protection hit in user mode 'cannot happen'. */
284 die (regs, "Low-address protection");
291 static noinline void do_sigbus(struct pt_regs *regs)
294 * Send a sigbus, regardless of whether we were in kernel
297 force_sig_fault(SIGBUS, BUS_ADRERR,
298 (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK));
301 static noinline int signal_return(struct pt_regs *regs)
306 rc = __get_user(instruction, (u16 __user *) regs->psw.addr);
309 if (instruction == 0x0a77) {
310 set_pt_regs_flag(regs, PIF_SYSCALL);
311 regs->int_code = 0x00040077;
313 } else if (instruction == 0x0aad) {
314 set_pt_regs_flag(regs, PIF_SYSCALL);
315 regs->int_code = 0x000400ad;
321 static noinline void do_fault_error(struct pt_regs *regs, int access,
327 case VM_FAULT_BADACCESS:
328 if (access == VM_EXEC && signal_return(regs) == 0)
330 case VM_FAULT_BADMAP:
331 /* Bad memory access. Check if it is kernel or user space. */
332 if (user_mode(regs)) {
333 /* User mode accesses just cause a SIGSEGV */
334 si_code = (fault == VM_FAULT_BADMAP) ?
335 SEGV_MAPERR : SEGV_ACCERR;
336 do_sigsegv(regs, si_code);
339 case VM_FAULT_BADCONTEXT:
340 case VM_FAULT_PFAULT:
343 case VM_FAULT_SIGNAL:
344 if (!user_mode(regs))
347 default: /* fault & VM_FAULT_ERROR */
348 if (fault & VM_FAULT_OOM) {
349 if (!user_mode(regs))
352 pagefault_out_of_memory();
353 } else if (fault & VM_FAULT_SIGSEGV) {
354 /* Kernel mode? Handle exceptions or die */
355 if (!user_mode(regs))
358 do_sigsegv(regs, SEGV_MAPERR);
359 } else if (fault & VM_FAULT_SIGBUS) {
360 /* Kernel mode? Handle exceptions or die */
361 if (!user_mode(regs))
372 * This routine handles page faults. It determines the address,
373 * and the problem, and then passes it off to one of the appropriate
376 * interruption code (int_code):
377 * 04 Protection -> Write-Protection (suprression)
378 * 10 Segment translation -> Not present (nullification)
379 * 11 Page translation -> Not present (nullification)
380 * 3b Region third trans. -> Not present (nullification)
382 static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
385 struct task_struct *tsk;
386 struct mm_struct *mm;
387 struct vm_area_struct *vma;
388 enum fault_type type;
389 unsigned long trans_exc_code;
390 unsigned long address;
396 * The instruction that caused the program check has
397 * been nullified. Don't signal single step via SIGTRAP.
399 clear_pt_regs_flag(regs, PIF_PER_TRAP);
401 if (kprobe_page_fault(regs, 14))
405 trans_exc_code = regs->int_parm_long;
408 * Verify that the fault happened in user space, that
409 * we are not in an interrupt and that there is a
412 fault = VM_FAULT_BADCONTEXT;
413 type = get_fault_type(regs);
418 fault = VM_FAULT_BADMAP;
422 if (faulthandler_disabled() || !mm)
427 address = trans_exc_code & __FAIL_ADDR_MASK;
428 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
429 flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
431 flags |= FAULT_FLAG_USER;
432 if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
433 flags |= FAULT_FLAG_WRITE;
434 down_read(&mm->mmap_sem);
437 if (IS_ENABLED(CONFIG_PGSTE) && type == GMAP_FAULT) {
438 gmap = (struct gmap *) S390_lowcore.gmap;
439 current->thread.gmap_addr = address;
440 current->thread.gmap_write_flag = !!(flags & FAULT_FLAG_WRITE);
441 current->thread.gmap_int_code = regs->int_code & 0xffff;
442 address = __gmap_translate(gmap, address);
443 if (address == -EFAULT) {
444 fault = VM_FAULT_BADMAP;
447 if (gmap->pfault_enabled)
448 flags |= FAULT_FLAG_RETRY_NOWAIT;
452 fault = VM_FAULT_BADMAP;
453 vma = find_vma(mm, address);
457 if (unlikely(vma->vm_start > address)) {
458 if (!(vma->vm_flags & VM_GROWSDOWN))
460 if (expand_stack(vma, address))
465 * Ok, we have a good vm_area for this memory access, so
468 fault = VM_FAULT_BADACCESS;
469 if (unlikely(!(vma->vm_flags & access)))
472 if (is_vm_hugetlb_page(vma))
473 address &= HPAGE_MASK;
475 * If for any reason at all we couldn't handle the fault,
476 * make sure we exit gracefully rather than endlessly redo
479 fault = handle_mm_fault(vma, address, flags);
480 /* No reason to continue if interrupted by SIGKILL. */
481 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
482 fault = VM_FAULT_SIGNAL;
483 if (flags & FAULT_FLAG_RETRY_NOWAIT)
487 if (unlikely(fault & VM_FAULT_ERROR))
491 * Major/minor page fault accounting is only done on the
492 * initial attempt. If we go through a retry, it is extremely
493 * likely that the page will be found in page cache at that point.
495 if (flags & FAULT_FLAG_ALLOW_RETRY) {
496 if (fault & VM_FAULT_MAJOR) {
498 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
502 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
505 if (fault & VM_FAULT_RETRY) {
506 if (IS_ENABLED(CONFIG_PGSTE) && gmap &&
507 (flags & FAULT_FLAG_RETRY_NOWAIT)) {
508 /* FAULT_FLAG_RETRY_NOWAIT has been set,
509 * mmap_sem has not been released */
510 current->thread.gmap_pfault = 1;
511 fault = VM_FAULT_PFAULT;
514 /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
516 flags &= ~(FAULT_FLAG_ALLOW_RETRY |
517 FAULT_FLAG_RETRY_NOWAIT);
518 flags |= FAULT_FLAG_TRIED;
519 down_read(&mm->mmap_sem);
523 if (IS_ENABLED(CONFIG_PGSTE) && gmap) {
524 address = __gmap_link(gmap, current->thread.gmap_addr,
526 if (address == -EFAULT) {
527 fault = VM_FAULT_BADMAP;
530 if (address == -ENOMEM) {
531 fault = VM_FAULT_OOM;
537 up_read(&mm->mmap_sem);
542 void do_protection_exception(struct pt_regs *regs)
544 unsigned long trans_exc_code;
548 trans_exc_code = regs->int_parm_long;
550 * Protection exceptions are suppressing, decrement psw address.
551 * The exception to this rule are aborted transactions, for these
552 * the PSW already points to the correct location.
554 if (!(regs->int_code & 0x200))
555 regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16);
557 * Check for low-address protection. This needs to be treated
558 * as a special case because the translation exception code
559 * field is not guaranteed to contain valid data in this case.
561 if (unlikely(!(trans_exc_code & 4))) {
562 do_low_address(regs);
565 if (unlikely(MACHINE_HAS_NX && (trans_exc_code & 0x80))) {
566 regs->int_parm_long = (trans_exc_code & ~PAGE_MASK) |
567 (regs->psw.addr & PAGE_MASK);
569 fault = VM_FAULT_BADACCESS;
572 fault = do_exception(regs, access);
575 do_fault_error(regs, access, fault);
577 NOKPROBE_SYMBOL(do_protection_exception);
579 void do_dat_exception(struct pt_regs *regs)
584 access = VM_READ | VM_EXEC | VM_WRITE;
585 fault = do_exception(regs, access);
587 do_fault_error(regs, access, fault);
589 NOKPROBE_SYMBOL(do_dat_exception);
593 * 'pfault' pseudo page faults routines.
595 static int pfault_disable;
597 static int __init nopfault(char *str)
603 __setup("nopfault", nopfault);
605 struct pfault_refbk {
614 } __attribute__ ((packed, aligned(8)));
616 static struct pfault_refbk pfault_init_refbk = {
621 .refgaddr = __LC_LPP,
622 .refselmk = 1ULL << 48,
623 .refcmpmk = 1ULL << 48,
624 .reserved = __PF_RES_FIELD
627 int pfault_init(void)
633 diag_stat_inc(DIAG_STAT_X258);
635 " diag %1,%0,0x258\n"
641 : "a" (&pfault_init_refbk), "m" (pfault_init_refbk) : "cc");
645 static struct pfault_refbk pfault_fini_refbk = {
652 void pfault_fini(void)
657 diag_stat_inc(DIAG_STAT_X258);
662 : : "a" (&pfault_fini_refbk), "m" (pfault_fini_refbk) : "cc");
665 static DEFINE_SPINLOCK(pfault_lock);
666 static LIST_HEAD(pfault_list);
668 #define PF_COMPLETE 0x0080
671 * The mechanism of our pfault code: if Linux is running as guest, runs a user
672 * space process and the user space process accesses a page that the host has
673 * paged out we get a pfault interrupt.
675 * This allows us, within the guest, to schedule a different process. Without
676 * this mechanism the host would have to suspend the whole virtual cpu until
677 * the page has been paged in.
679 * So when we get such an interrupt then we set the state of the current task
680 * to uninterruptible and also set the need_resched flag. Both happens within
681 * interrupt context(!). If we later on want to return to user space we
682 * recognize the need_resched flag and then call schedule(). It's not very
683 * obvious how this works...
685 * Of course we have a lot of additional fun with the completion interrupt (->
686 * host signals that a page of a process has been paged in and the process can
687 * continue to run). This interrupt can arrive on any cpu and, since we have
688 * virtual cpus, actually appear before the interrupt that signals that a page
691 static void pfault_interrupt(struct ext_code ext_code,
692 unsigned int param32, unsigned long param64)
694 struct task_struct *tsk;
699 * Get the external interruption subcode & pfault initial/completion
700 * signal bit. VM stores this in the 'cpu address' field associated
701 * with the external interrupt.
703 subcode = ext_code.subcode;
704 if ((subcode & 0xff00) != __SUBCODE_MASK)
706 inc_irq_stat(IRQEXT_PFL);
707 /* Get the token (= pid of the affected task). */
708 pid = param64 & LPP_PID_MASK;
710 tsk = find_task_by_pid_ns(pid, &init_pid_ns);
712 get_task_struct(tsk);
716 spin_lock(&pfault_lock);
717 if (subcode & PF_COMPLETE) {
718 /* signal bit is set -> a page has been swapped in by VM */
719 if (tsk->thread.pfault_wait == 1) {
720 /* Initial interrupt was faster than the completion
721 * interrupt. pfault_wait is valid. Set pfault_wait
722 * back to zero and wake up the process. This can
723 * safely be done because the task is still sleeping
724 * and can't produce new pfaults. */
725 tsk->thread.pfault_wait = 0;
726 list_del(&tsk->thread.list);
727 wake_up_process(tsk);
728 put_task_struct(tsk);
730 /* Completion interrupt was faster than initial
731 * interrupt. Set pfault_wait to -1 so the initial
732 * interrupt doesn't put the task to sleep.
733 * If the task is not running, ignore the completion
734 * interrupt since it must be a leftover of a PFAULT
735 * CANCEL operation which didn't remove all pending
736 * completion interrupts. */
737 if (tsk->state == TASK_RUNNING)
738 tsk->thread.pfault_wait = -1;
741 /* signal bit not set -> a real page is missing. */
742 if (WARN_ON_ONCE(tsk != current))
744 if (tsk->thread.pfault_wait == 1) {
745 /* Already on the list with a reference: put to sleep */
747 } else if (tsk->thread.pfault_wait == -1) {
748 /* Completion interrupt was faster than the initial
749 * interrupt (pfault_wait == -1). Set pfault_wait
750 * back to zero and exit. */
751 tsk->thread.pfault_wait = 0;
753 /* Initial interrupt arrived before completion
754 * interrupt. Let the task sleep.
755 * An extra task reference is needed since a different
756 * cpu may set the task state to TASK_RUNNING again
757 * before the scheduler is reached. */
758 get_task_struct(tsk);
759 tsk->thread.pfault_wait = 1;
760 list_add(&tsk->thread.list, &pfault_list);
762 /* Since this must be a userspace fault, there
763 * is no kernel task state to trample. Rely on the
764 * return to userspace schedule() to block. */
765 __set_current_state(TASK_UNINTERRUPTIBLE);
766 set_tsk_need_resched(tsk);
767 set_preempt_need_resched();
771 spin_unlock(&pfault_lock);
772 put_task_struct(tsk);
775 static int pfault_cpu_dead(unsigned int cpu)
777 struct thread_struct *thread, *next;
778 struct task_struct *tsk;
780 spin_lock_irq(&pfault_lock);
781 list_for_each_entry_safe(thread, next, &pfault_list, list) {
782 thread->pfault_wait = 0;
783 list_del(&thread->list);
784 tsk = container_of(thread, struct task_struct, thread);
785 wake_up_process(tsk);
786 put_task_struct(tsk);
788 spin_unlock_irq(&pfault_lock);
792 static int __init pfault_irq_init(void)
796 rc = register_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
799 rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP;
802 irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
803 cpuhp_setup_state_nocalls(CPUHP_S390_PFAULT_DEAD, "s390/pfault:dead",
804 NULL, pfault_cpu_dead);
808 unregister_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
813 early_initcall(pfault_irq_init);
815 #endif /* CONFIG_PFAULT */