2 * Derived from "arch/i386/kernel/process.c"
3 * Copyright (C) 1995 Linus Torvalds
5 * Updated and modified by Cort Dougan (cort@cs.nmt.edu) and
6 * Paul Mackerras (paulus@cs.anu.edu.au)
9 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
17 #include <linux/errno.h>
18 #include <linux/sched.h>
19 #include <linux/sched/debug.h>
20 #include <linux/sched/task.h>
21 #include <linux/sched/task_stack.h>
22 #include <linux/kernel.h>
24 #include <linux/smp.h>
25 #include <linux/stddef.h>
26 #include <linux/unistd.h>
27 #include <linux/ptrace.h>
28 #include <linux/slab.h>
29 #include <linux/user.h>
30 #include <linux/elf.h>
31 #include <linux/prctl.h>
32 #include <linux/init_task.h>
33 #include <linux/export.h>
34 #include <linux/kallsyms.h>
35 #include <linux/mqueue.h>
36 #include <linux/hardirq.h>
37 #include <linux/utsname.h>
38 #include <linux/ftrace.h>
39 #include <linux/kernel_stat.h>
40 #include <linux/personality.h>
41 #include <linux/random.h>
42 #include <linux/hw_breakpoint.h>
43 #include <linux/uaccess.h>
44 #include <linux/elf-randomize.h>
45 #include <linux/pkeys.h>
47 #include <asm/pgtable.h>
49 #include <asm/processor.h>
52 #include <asm/machdep.h>
54 #include <asm/runlatch.h>
55 #include <asm/syscalls.h>
56 #include <asm/switch_to.h>
58 #include <asm/debug.h>
60 #include <asm/firmware.h>
61 #include <asm/hw_irq.h>
63 #include <asm/code-patching.h>
65 #include <asm/livepatch.h>
66 #include <asm/cpu_has_feature.h>
67 #include <asm/asm-prototypes.h>
68 #include <asm/stacktrace.h>
70 #include <linux/kprobes.h>
71 #include <linux/kdebug.h>
73 /* Transactional Memory debug */
75 #define TM_DEBUG(x...) printk(KERN_INFO x)
77 #define TM_DEBUG(x...) do { } while(0)
80 extern unsigned long _get_SP(void);
82 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
84 * Are we running in "Suspend disabled" mode? If so we have to block any
85 * sigreturn that would get us into suspended state, and we also warn in some
86 * other paths that we should never reach with suspend disabled.
88 bool tm_suspend_disabled __ro_after_init = false;
90 static void check_if_tm_restore_required(struct task_struct *tsk)
93 * If we are saving the current thread's registers, and the
94 * thread is in a transactional state, set the TIF_RESTORE_TM
95 * bit so that we know to restore the registers before
96 * returning to userspace.
98 if (tsk == current && tsk->thread.regs &&
99 MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
100 !test_thread_flag(TIF_RESTORE_TM)) {
101 tsk->thread.ckpt_regs.msr = tsk->thread.regs->msr;
102 set_thread_flag(TIF_RESTORE_TM);
106 static bool tm_active_with_fp(struct task_struct *tsk)
108 return MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
109 (tsk->thread.ckpt_regs.msr & MSR_FP);
112 static bool tm_active_with_altivec(struct task_struct *tsk)
114 return MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
115 (tsk->thread.ckpt_regs.msr & MSR_VEC);
118 static inline void check_if_tm_restore_required(struct task_struct *tsk) { }
119 static inline bool tm_active_with_fp(struct task_struct *tsk) { return false; }
120 static inline bool tm_active_with_altivec(struct task_struct *tsk) { return false; }
121 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
123 bool strict_msr_control;
124 EXPORT_SYMBOL(strict_msr_control);
126 static int __init enable_strict_msr_control(char *str)
128 strict_msr_control = true;
129 pr_info("Enabling strict facility control\n");
133 early_param("ppc_strict_facility_enable", enable_strict_msr_control);
135 unsigned long msr_check_and_set(unsigned long bits)
137 unsigned long oldmsr = mfmsr();
138 unsigned long newmsr;
140 newmsr = oldmsr | bits;
143 if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP))
147 if (oldmsr != newmsr)
152 EXPORT_SYMBOL_GPL(msr_check_and_set);
154 void __msr_check_and_clear(unsigned long bits)
156 unsigned long oldmsr = mfmsr();
157 unsigned long newmsr;
159 newmsr = oldmsr & ~bits;
162 if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP))
166 if (oldmsr != newmsr)
169 EXPORT_SYMBOL(__msr_check_and_clear);
171 #ifdef CONFIG_PPC_FPU
172 static void __giveup_fpu(struct task_struct *tsk)
177 msr = tsk->thread.regs->msr;
180 if (cpu_has_feature(CPU_FTR_VSX))
183 tsk->thread.regs->msr = msr;
186 void giveup_fpu(struct task_struct *tsk)
188 check_if_tm_restore_required(tsk);
190 msr_check_and_set(MSR_FP);
192 msr_check_and_clear(MSR_FP);
194 EXPORT_SYMBOL(giveup_fpu);
197 * Make sure the floating-point register state in the
198 * the thread_struct is up to date for task tsk.
200 void flush_fp_to_thread(struct task_struct *tsk)
202 if (tsk->thread.regs) {
204 * We need to disable preemption here because if we didn't,
205 * another process could get scheduled after the regs->msr
206 * test but before we have finished saving the FP registers
207 * to the thread_struct. That process could take over the
208 * FPU, and then when we get scheduled again we would store
209 * bogus values for the remaining FP registers.
212 if (tsk->thread.regs->msr & MSR_FP) {
214 * This should only ever be called for current or
215 * for a stopped child process. Since we save away
216 * the FP register state on context switch,
217 * there is something wrong if a stopped child appears
218 * to still have its FP state in the CPU registers.
220 BUG_ON(tsk != current);
226 EXPORT_SYMBOL_GPL(flush_fp_to_thread);
228 void enable_kernel_fp(void)
230 unsigned long cpumsr;
232 WARN_ON(preemptible());
234 cpumsr = msr_check_and_set(MSR_FP);
236 if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) {
237 check_if_tm_restore_required(current);
239 * If a thread has already been reclaimed then the
240 * checkpointed registers are on the CPU but have definitely
241 * been saved by the reclaim code. Don't need to and *cannot*
242 * giveup as this would save to the 'live' structure not the
243 * checkpointed structure.
245 if (!MSR_TM_ACTIVE(cpumsr) &&
246 MSR_TM_ACTIVE(current->thread.regs->msr))
248 __giveup_fpu(current);
251 EXPORT_SYMBOL(enable_kernel_fp);
253 static int restore_fp(struct task_struct *tsk)
255 if (tsk->thread.load_fp || tm_active_with_fp(tsk)) {
256 load_fp_state(¤t->thread.fp_state);
257 current->thread.load_fp++;
263 static int restore_fp(struct task_struct *tsk) { return 0; }
264 #endif /* CONFIG_PPC_FPU */
266 #ifdef CONFIG_ALTIVEC
267 #define loadvec(thr) ((thr).load_vec)
269 static void __giveup_altivec(struct task_struct *tsk)
274 msr = tsk->thread.regs->msr;
277 if (cpu_has_feature(CPU_FTR_VSX))
280 tsk->thread.regs->msr = msr;
283 void giveup_altivec(struct task_struct *tsk)
285 check_if_tm_restore_required(tsk);
287 msr_check_and_set(MSR_VEC);
288 __giveup_altivec(tsk);
289 msr_check_and_clear(MSR_VEC);
291 EXPORT_SYMBOL(giveup_altivec);
293 void enable_kernel_altivec(void)
295 unsigned long cpumsr;
297 WARN_ON(preemptible());
299 cpumsr = msr_check_and_set(MSR_VEC);
301 if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) {
302 check_if_tm_restore_required(current);
304 * If a thread has already been reclaimed then the
305 * checkpointed registers are on the CPU but have definitely
306 * been saved by the reclaim code. Don't need to and *cannot*
307 * giveup as this would save to the 'live' structure not the
308 * checkpointed structure.
310 if (!MSR_TM_ACTIVE(cpumsr) &&
311 MSR_TM_ACTIVE(current->thread.regs->msr))
313 __giveup_altivec(current);
316 EXPORT_SYMBOL(enable_kernel_altivec);
319 * Make sure the VMX/Altivec register state in the
320 * the thread_struct is up to date for task tsk.
322 void flush_altivec_to_thread(struct task_struct *tsk)
324 if (tsk->thread.regs) {
326 if (tsk->thread.regs->msr & MSR_VEC) {
327 BUG_ON(tsk != current);
333 EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
335 static int restore_altivec(struct task_struct *tsk)
337 if (cpu_has_feature(CPU_FTR_ALTIVEC) &&
338 (tsk->thread.load_vec || tm_active_with_altivec(tsk))) {
339 load_vr_state(&tsk->thread.vr_state);
340 tsk->thread.used_vr = 1;
341 tsk->thread.load_vec++;
348 #define loadvec(thr) 0
349 static inline int restore_altivec(struct task_struct *tsk) { return 0; }
350 #endif /* CONFIG_ALTIVEC */
353 static void __giveup_vsx(struct task_struct *tsk)
355 unsigned long msr = tsk->thread.regs->msr;
358 * We should never be ssetting MSR_VSX without also setting
361 WARN_ON((msr & MSR_VSX) && !((msr & MSR_FP) && (msr & MSR_VEC)));
363 /* __giveup_fpu will clear MSR_VSX */
367 __giveup_altivec(tsk);
370 static void giveup_vsx(struct task_struct *tsk)
372 check_if_tm_restore_required(tsk);
374 msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
376 msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX);
379 void enable_kernel_vsx(void)
381 unsigned long cpumsr;
383 WARN_ON(preemptible());
385 cpumsr = msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
387 if (current->thread.regs &&
388 (current->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP))) {
389 check_if_tm_restore_required(current);
391 * If a thread has already been reclaimed then the
392 * checkpointed registers are on the CPU but have definitely
393 * been saved by the reclaim code. Don't need to and *cannot*
394 * giveup as this would save to the 'live' structure not the
395 * checkpointed structure.
397 if (!MSR_TM_ACTIVE(cpumsr) &&
398 MSR_TM_ACTIVE(current->thread.regs->msr))
400 __giveup_vsx(current);
403 EXPORT_SYMBOL(enable_kernel_vsx);
405 void flush_vsx_to_thread(struct task_struct *tsk)
407 if (tsk->thread.regs) {
409 if (tsk->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP)) {
410 BUG_ON(tsk != current);
416 EXPORT_SYMBOL_GPL(flush_vsx_to_thread);
418 static int restore_vsx(struct task_struct *tsk)
420 if (cpu_has_feature(CPU_FTR_VSX)) {
421 tsk->thread.used_vsr = 1;
428 static inline int restore_vsx(struct task_struct *tsk) { return 0; }
429 #endif /* CONFIG_VSX */
432 void giveup_spe(struct task_struct *tsk)
434 check_if_tm_restore_required(tsk);
436 msr_check_and_set(MSR_SPE);
438 msr_check_and_clear(MSR_SPE);
440 EXPORT_SYMBOL(giveup_spe);
442 void enable_kernel_spe(void)
444 WARN_ON(preemptible());
446 msr_check_and_set(MSR_SPE);
448 if (current->thread.regs && (current->thread.regs->msr & MSR_SPE)) {
449 check_if_tm_restore_required(current);
450 __giveup_spe(current);
453 EXPORT_SYMBOL(enable_kernel_spe);
455 void flush_spe_to_thread(struct task_struct *tsk)
457 if (tsk->thread.regs) {
459 if (tsk->thread.regs->msr & MSR_SPE) {
460 BUG_ON(tsk != current);
461 tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
467 #endif /* CONFIG_SPE */
469 static unsigned long msr_all_available;
471 static int __init init_msr_all_available(void)
473 #ifdef CONFIG_PPC_FPU
474 msr_all_available |= MSR_FP;
476 #ifdef CONFIG_ALTIVEC
477 if (cpu_has_feature(CPU_FTR_ALTIVEC))
478 msr_all_available |= MSR_VEC;
481 if (cpu_has_feature(CPU_FTR_VSX))
482 msr_all_available |= MSR_VSX;
485 if (cpu_has_feature(CPU_FTR_SPE))
486 msr_all_available |= MSR_SPE;
491 early_initcall(init_msr_all_available);
493 void giveup_all(struct task_struct *tsk)
495 unsigned long usermsr;
497 if (!tsk->thread.regs)
500 usermsr = tsk->thread.regs->msr;
502 if ((usermsr & msr_all_available) == 0)
505 msr_check_and_set(msr_all_available);
506 check_if_tm_restore_required(tsk);
508 WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC)));
510 #ifdef CONFIG_PPC_FPU
511 if (usermsr & MSR_FP)
514 #ifdef CONFIG_ALTIVEC
515 if (usermsr & MSR_VEC)
516 __giveup_altivec(tsk);
519 if (usermsr & MSR_SPE)
523 msr_check_and_clear(msr_all_available);
525 EXPORT_SYMBOL(giveup_all);
527 void restore_math(struct pt_regs *regs)
531 if (!MSR_TM_ACTIVE(regs->msr) &&
532 !current->thread.load_fp && !loadvec(current->thread))
536 msr_check_and_set(msr_all_available);
539 * Only reload if the bit is not set in the user MSR, the bit BEING set
540 * indicates that the registers are hot
542 if ((!(msr & MSR_FP)) && restore_fp(current))
543 msr |= MSR_FP | current->thread.fpexc_mode;
545 if ((!(msr & MSR_VEC)) && restore_altivec(current))
548 if ((msr & (MSR_FP | MSR_VEC)) == (MSR_FP | MSR_VEC) &&
549 restore_vsx(current)) {
553 msr_check_and_clear(msr_all_available);
558 static void save_all(struct task_struct *tsk)
560 unsigned long usermsr;
562 if (!tsk->thread.regs)
565 usermsr = tsk->thread.regs->msr;
567 if ((usermsr & msr_all_available) == 0)
570 msr_check_and_set(msr_all_available);
572 WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC)));
574 if (usermsr & MSR_FP)
577 if (usermsr & MSR_VEC)
580 if (usermsr & MSR_SPE)
583 msr_check_and_clear(msr_all_available);
584 thread_pkey_regs_save(&tsk->thread);
587 void flush_all_to_thread(struct task_struct *tsk)
589 if (tsk->thread.regs) {
591 BUG_ON(tsk != current);
595 if (tsk->thread.regs->msr & MSR_SPE)
596 tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
602 EXPORT_SYMBOL(flush_all_to_thread);
604 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
605 void do_send_trap(struct pt_regs *regs, unsigned long address,
606 unsigned long error_code, int breakpt)
608 current->thread.trap_nr = TRAP_HWBKPT;
609 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
610 11, SIGSEGV) == NOTIFY_STOP)
613 /* Deliver the signal to userspace */
614 force_sig_ptrace_errno_trap(breakpt, /* breakpoint or watchpoint id */
615 (void __user *)address);
617 #else /* !CONFIG_PPC_ADV_DEBUG_REGS */
618 void do_break (struct pt_regs *regs, unsigned long address,
619 unsigned long error_code)
623 current->thread.trap_nr = TRAP_HWBKPT;
624 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
625 11, SIGSEGV) == NOTIFY_STOP)
628 if (debugger_break_match(regs))
631 /* Clear the breakpoint */
632 hw_breakpoint_disable();
634 /* Deliver the signal to userspace */
635 clear_siginfo(&info);
636 info.si_signo = SIGTRAP;
638 info.si_code = TRAP_HWBKPT;
639 info.si_addr = (void __user *)address;
640 force_sig_info(SIGTRAP, &info, current);
642 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
644 static DEFINE_PER_CPU(struct arch_hw_breakpoint, current_brk);
646 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
648 * Set the debug registers back to their default "safe" values.
650 static void set_debug_reg_defaults(struct thread_struct *thread)
652 thread->debug.iac1 = thread->debug.iac2 = 0;
653 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
654 thread->debug.iac3 = thread->debug.iac4 = 0;
656 thread->debug.dac1 = thread->debug.dac2 = 0;
657 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
658 thread->debug.dvc1 = thread->debug.dvc2 = 0;
660 thread->debug.dbcr0 = 0;
663 * Force User/Supervisor bits to b11 (user-only MSR[PR]=1)
665 thread->debug.dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US |
666 DBCR1_IAC3US | DBCR1_IAC4US;
668 * Force Data Address Compare User/Supervisor bits to be User-only
669 * (0b11 MSR[PR]=1) and set all other bits in DBCR2 register to be 0.
671 thread->debug.dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
673 thread->debug.dbcr1 = 0;
677 static void prime_debug_regs(struct debug_reg *debug)
680 * We could have inherited MSR_DE from userspace, since
681 * it doesn't get cleared on exception entry. Make sure
682 * MSR_DE is clear before we enable any debug events.
684 mtmsr(mfmsr() & ~MSR_DE);
686 mtspr(SPRN_IAC1, debug->iac1);
687 mtspr(SPRN_IAC2, debug->iac2);
688 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
689 mtspr(SPRN_IAC3, debug->iac3);
690 mtspr(SPRN_IAC4, debug->iac4);
692 mtspr(SPRN_DAC1, debug->dac1);
693 mtspr(SPRN_DAC2, debug->dac2);
694 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
695 mtspr(SPRN_DVC1, debug->dvc1);
696 mtspr(SPRN_DVC2, debug->dvc2);
698 mtspr(SPRN_DBCR0, debug->dbcr0);
699 mtspr(SPRN_DBCR1, debug->dbcr1);
701 mtspr(SPRN_DBCR2, debug->dbcr2);
705 * Unless neither the old or new thread are making use of the
706 * debug registers, set the debug registers from the values
707 * stored in the new thread.
709 void switch_booke_debug_regs(struct debug_reg *new_debug)
711 if ((current->thread.debug.dbcr0 & DBCR0_IDM)
712 || (new_debug->dbcr0 & DBCR0_IDM))
713 prime_debug_regs(new_debug);
715 EXPORT_SYMBOL_GPL(switch_booke_debug_regs);
716 #else /* !CONFIG_PPC_ADV_DEBUG_REGS */
717 #ifndef CONFIG_HAVE_HW_BREAKPOINT
718 static void set_breakpoint(struct arch_hw_breakpoint *brk)
721 __set_breakpoint(brk);
725 static void set_debug_reg_defaults(struct thread_struct *thread)
727 thread->hw_brk.address = 0;
728 thread->hw_brk.type = 0;
729 if (ppc_breakpoint_available())
730 set_breakpoint(&thread->hw_brk);
732 #endif /* !CONFIG_HAVE_HW_BREAKPOINT */
733 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
735 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
736 static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
738 mtspr(SPRN_DAC1, dabr);
739 #ifdef CONFIG_PPC_47x
744 #elif defined(CONFIG_PPC_BOOK3S)
745 static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
747 mtspr(SPRN_DABR, dabr);
748 if (cpu_has_feature(CPU_FTR_DABRX))
749 mtspr(SPRN_DABRX, dabrx);
752 #elif defined(CONFIG_PPC_8xx)
753 static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
755 unsigned long addr = dabr & ~HW_BRK_TYPE_DABR;
756 unsigned long lctrl1 = 0x90000000; /* compare type: equal on E & F */
757 unsigned long lctrl2 = 0x8e000002; /* watchpoint 1 on cmp E | F */
759 if ((dabr & HW_BRK_TYPE_RDWR) == HW_BRK_TYPE_READ)
761 else if ((dabr & HW_BRK_TYPE_RDWR) == HW_BRK_TYPE_WRITE)
763 else if ((dabr & HW_BRK_TYPE_RDWR) == 0)
766 mtspr(SPRN_LCTRL2, 0);
767 mtspr(SPRN_CMPE, addr);
768 mtspr(SPRN_CMPF, addr + 4);
769 mtspr(SPRN_LCTRL1, lctrl1);
770 mtspr(SPRN_LCTRL2, lctrl2);
775 static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
781 static inline int set_dabr(struct arch_hw_breakpoint *brk)
783 unsigned long dabr, dabrx;
785 dabr = brk->address | (brk->type & HW_BRK_TYPE_DABR);
786 dabrx = ((brk->type >> 3) & 0x7);
789 return ppc_md.set_dabr(dabr, dabrx);
791 return __set_dabr(dabr, dabrx);
794 static inline int set_dawr(struct arch_hw_breakpoint *brk)
796 unsigned long dawr, dawrx, mrd;
800 dawrx = (brk->type & (HW_BRK_TYPE_READ | HW_BRK_TYPE_WRITE)) \
801 << (63 - 58); //* read/write bits */
802 dawrx |= ((brk->type & (HW_BRK_TYPE_TRANSLATE)) >> 2) \
803 << (63 - 59); //* translate */
804 dawrx |= (brk->type & (HW_BRK_TYPE_PRIV_ALL)) \
805 >> 3; //* PRIM bits */
806 /* dawr length is stored in field MDR bits 48:53. Matches range in
807 doublewords (64 bits) baised by -1 eg. 0b000000=1DW and
809 brk->len is in bytes.
810 This aligns up to double word size, shifts and does the bias.
812 mrd = ((brk->len + 7) >> 3) - 1;
813 dawrx |= (mrd & 0x3f) << (63 - 53);
816 return ppc_md.set_dawr(dawr, dawrx);
817 mtspr(SPRN_DAWR, dawr);
818 mtspr(SPRN_DAWRX, dawrx);
822 void __set_breakpoint(struct arch_hw_breakpoint *brk)
824 memcpy(this_cpu_ptr(¤t_brk), brk, sizeof(*brk));
826 if (cpu_has_feature(CPU_FTR_DAWR))
829 else if (!cpu_has_feature(CPU_FTR_ARCH_207S))
833 // Shouldn't happen due to higher level checks
837 /* Check if we have DAWR or DABR hardware */
838 bool ppc_breakpoint_available(void)
840 if (cpu_has_feature(CPU_FTR_DAWR))
841 return true; /* POWER8 DAWR */
842 if (cpu_has_feature(CPU_FTR_ARCH_207S))
843 return false; /* POWER9 with DAWR disabled */
844 /* DABR: Everything but POWER8 and POWER9 */
847 EXPORT_SYMBOL_GPL(ppc_breakpoint_available);
849 static inline bool hw_brk_match(struct arch_hw_breakpoint *a,
850 struct arch_hw_breakpoint *b)
852 if (a->address != b->address)
854 if (a->type != b->type)
856 if (a->len != b->len)
861 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
863 static inline bool tm_enabled(struct task_struct *tsk)
865 return tsk && tsk->thread.regs && (tsk->thread.regs->msr & MSR_TM);
868 static void tm_reclaim_thread(struct thread_struct *thr, uint8_t cause)
871 * Use the current MSR TM suspended bit to track if we have
872 * checkpointed state outstanding.
873 * On signal delivery, we'd normally reclaim the checkpointed
874 * state to obtain stack pointer (see:get_tm_stackpointer()).
875 * This will then directly return to userspace without going
876 * through __switch_to(). However, if the stack frame is bad,
877 * we need to exit this thread which calls __switch_to() which
878 * will again attempt to reclaim the already saved tm state.
879 * Hence we need to check that we've not already reclaimed
881 * We do this using the current MSR, rather tracking it in
882 * some specific thread_struct bit, as it has the additional
883 * benefit of checking for a potential TM bad thing exception.
885 if (!MSR_TM_SUSPENDED(mfmsr()))
888 giveup_all(container_of(thr, struct task_struct, thread));
890 tm_reclaim(thr, cause);
893 * If we are in a transaction and FP is off then we can't have
894 * used FP inside that transaction. Hence the checkpointed
895 * state is the same as the live state. We need to copy the
896 * live state to the checkpointed state so that when the
897 * transaction is restored, the checkpointed state is correct
898 * and the aborted transaction sees the correct state. We use
899 * ckpt_regs.msr here as that's what tm_reclaim will use to
900 * determine if it's going to write the checkpointed state or
901 * not. So either this will write the checkpointed registers,
902 * or reclaim will. Similarly for VMX.
904 if ((thr->ckpt_regs.msr & MSR_FP) == 0)
905 memcpy(&thr->ckfp_state, &thr->fp_state,
906 sizeof(struct thread_fp_state));
907 if ((thr->ckpt_regs.msr & MSR_VEC) == 0)
908 memcpy(&thr->ckvr_state, &thr->vr_state,
909 sizeof(struct thread_vr_state));
912 void tm_reclaim_current(uint8_t cause)
915 tm_reclaim_thread(¤t->thread, cause);
918 static inline void tm_reclaim_task(struct task_struct *tsk)
920 /* We have to work out if we're switching from/to a task that's in the
921 * middle of a transaction.
923 * In switching we need to maintain a 2nd register state as
924 * oldtask->thread.ckpt_regs. We tm_reclaim(oldproc); this saves the
925 * checkpointed (tbegin) state in ckpt_regs, ckfp_state and
928 * We also context switch (save) TFHAR/TEXASR/TFIAR in here.
930 struct thread_struct *thr = &tsk->thread;
935 if (!MSR_TM_ACTIVE(thr->regs->msr))
936 goto out_and_saveregs;
938 WARN_ON(tm_suspend_disabled);
940 TM_DEBUG("--- tm_reclaim on pid %d (NIP=%lx, "
941 "ccr=%lx, msr=%lx, trap=%lx)\n",
942 tsk->pid, thr->regs->nip,
943 thr->regs->ccr, thr->regs->msr,
946 tm_reclaim_thread(thr, TM_CAUSE_RESCHED);
948 TM_DEBUG("--- tm_reclaim on pid %d complete\n",
952 /* Always save the regs here, even if a transaction's not active.
953 * This context-switches a thread's TM info SPRs. We do it here to
954 * be consistent with the restore path (in recheckpoint) which
955 * cannot happen later in _switch().
960 extern void __tm_recheckpoint(struct thread_struct *thread);
962 void tm_recheckpoint(struct thread_struct *thread)
966 if (!(thread->regs->msr & MSR_TM))
969 /* We really can't be interrupted here as the TEXASR registers can't
970 * change and later in the trecheckpoint code, we have a userspace R1.
971 * So let's hard disable over this region.
973 local_irq_save(flags);
976 /* The TM SPRs are restored here, so that TEXASR.FS can be set
977 * before the trecheckpoint and no explosion occurs.
979 tm_restore_sprs(thread);
981 __tm_recheckpoint(thread);
983 local_irq_restore(flags);
986 static inline void tm_recheckpoint_new_task(struct task_struct *new)
988 if (!cpu_has_feature(CPU_FTR_TM))
991 /* Recheckpoint the registers of the thread we're about to switch to.
993 * If the task was using FP, we non-lazily reload both the original and
994 * the speculative FP register states. This is because the kernel
995 * doesn't see if/when a TM rollback occurs, so if we take an FP
996 * unavailable later, we are unable to determine which set of FP regs
997 * need to be restored.
999 if (!tm_enabled(new))
1002 if (!MSR_TM_ACTIVE(new->thread.regs->msr)){
1003 tm_restore_sprs(&new->thread);
1006 /* Recheckpoint to restore original checkpointed register state. */
1007 TM_DEBUG("*** tm_recheckpoint of pid %d (new->msr 0x%lx)\n",
1008 new->pid, new->thread.regs->msr);
1010 tm_recheckpoint(&new->thread);
1013 * The checkpointed state has been restored but the live state has
1014 * not, ensure all the math functionality is turned off to trigger
1015 * restore_math() to reload.
1017 new->thread.regs->msr &= ~(MSR_FP | MSR_VEC | MSR_VSX);
1019 TM_DEBUG("*** tm_recheckpoint of pid %d complete "
1020 "(kernel msr 0x%lx)\n",
1024 static inline void __switch_to_tm(struct task_struct *prev,
1025 struct task_struct *new)
1027 if (cpu_has_feature(CPU_FTR_TM)) {
1028 if (tm_enabled(prev) || tm_enabled(new))
1031 if (tm_enabled(prev)) {
1032 prev->thread.load_tm++;
1033 tm_reclaim_task(prev);
1034 if (!MSR_TM_ACTIVE(prev->thread.regs->msr) && prev->thread.load_tm == 0)
1035 prev->thread.regs->msr &= ~MSR_TM;
1038 tm_recheckpoint_new_task(new);
1043 * This is called if we are on the way out to userspace and the
1044 * TIF_RESTORE_TM flag is set. It checks if we need to reload
1045 * FP and/or vector state and does so if necessary.
1046 * If userspace is inside a transaction (whether active or
1047 * suspended) and FP/VMX/VSX instructions have ever been enabled
1048 * inside that transaction, then we have to keep them enabled
1049 * and keep the FP/VMX/VSX state loaded while ever the transaction
1050 * continues. The reason is that if we didn't, and subsequently
1051 * got a FP/VMX/VSX unavailable interrupt inside a transaction,
1052 * we don't know whether it's the same transaction, and thus we
1053 * don't know which of the checkpointed state and the transactional
1056 void restore_tm_state(struct pt_regs *regs)
1058 unsigned long msr_diff;
1061 * This is the only moment we should clear TIF_RESTORE_TM as
1062 * it is here that ckpt_regs.msr and pt_regs.msr become the same
1063 * again, anything else could lead to an incorrect ckpt_msr being
1064 * saved and therefore incorrect signal contexts.
1066 clear_thread_flag(TIF_RESTORE_TM);
1067 if (!MSR_TM_ACTIVE(regs->msr))
1070 msr_diff = current->thread.ckpt_regs.msr & ~regs->msr;
1071 msr_diff &= MSR_FP | MSR_VEC | MSR_VSX;
1073 /* Ensure that restore_math() will restore */
1074 if (msr_diff & MSR_FP)
1075 current->thread.load_fp = 1;
1076 #ifdef CONFIG_ALTIVEC
1077 if (cpu_has_feature(CPU_FTR_ALTIVEC) && msr_diff & MSR_VEC)
1078 current->thread.load_vec = 1;
1082 regs->msr |= msr_diff;
1086 #define tm_recheckpoint_new_task(new)
1087 #define __switch_to_tm(prev, new)
1088 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
1090 static inline void save_sprs(struct thread_struct *t)
1092 #ifdef CONFIG_ALTIVEC
1093 if (cpu_has_feature(CPU_FTR_ALTIVEC))
1094 t->vrsave = mfspr(SPRN_VRSAVE);
1096 #ifdef CONFIG_PPC_BOOK3S_64
1097 if (cpu_has_feature(CPU_FTR_DSCR))
1098 t->dscr = mfspr(SPRN_DSCR);
1100 if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
1101 t->bescr = mfspr(SPRN_BESCR);
1102 t->ebbhr = mfspr(SPRN_EBBHR);
1103 t->ebbrr = mfspr(SPRN_EBBRR);
1105 t->fscr = mfspr(SPRN_FSCR);
1108 * Note that the TAR is not available for use in the kernel.
1109 * (To provide this, the TAR should be backed up/restored on
1110 * exception entry/exit instead, and be in pt_regs. FIXME,
1111 * this should be in pt_regs anyway (for debug).)
1113 t->tar = mfspr(SPRN_TAR);
1117 thread_pkey_regs_save(t);
1120 static inline void restore_sprs(struct thread_struct *old_thread,
1121 struct thread_struct *new_thread)
1123 #ifdef CONFIG_ALTIVEC
1124 if (cpu_has_feature(CPU_FTR_ALTIVEC) &&
1125 old_thread->vrsave != new_thread->vrsave)
1126 mtspr(SPRN_VRSAVE, new_thread->vrsave);
1128 #ifdef CONFIG_PPC_BOOK3S_64
1129 if (cpu_has_feature(CPU_FTR_DSCR)) {
1130 u64 dscr = get_paca()->dscr_default;
1131 if (new_thread->dscr_inherit)
1132 dscr = new_thread->dscr;
1134 if (old_thread->dscr != dscr)
1135 mtspr(SPRN_DSCR, dscr);
1138 if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
1139 if (old_thread->bescr != new_thread->bescr)
1140 mtspr(SPRN_BESCR, new_thread->bescr);
1141 if (old_thread->ebbhr != new_thread->ebbhr)
1142 mtspr(SPRN_EBBHR, new_thread->ebbhr);
1143 if (old_thread->ebbrr != new_thread->ebbrr)
1144 mtspr(SPRN_EBBRR, new_thread->ebbrr);
1146 if (old_thread->fscr != new_thread->fscr)
1147 mtspr(SPRN_FSCR, new_thread->fscr);
1149 if (old_thread->tar != new_thread->tar)
1150 mtspr(SPRN_TAR, new_thread->tar);
1153 if (cpu_has_feature(CPU_FTR_P9_TIDR) &&
1154 old_thread->tidr != new_thread->tidr)
1155 mtspr(SPRN_TIDR, new_thread->tidr);
1158 thread_pkey_regs_restore(new_thread, old_thread);
1161 #ifdef CONFIG_PPC_BOOK3S_64
1163 static const u8 dummy_copy_buffer[CP_SIZE] __attribute__((aligned(CP_SIZE)));
1166 struct task_struct *__switch_to(struct task_struct *prev,
1167 struct task_struct *new)
1169 struct thread_struct *new_thread, *old_thread;
1170 struct task_struct *last;
1171 #ifdef CONFIG_PPC_BOOK3S_64
1172 struct ppc64_tlb_batch *batch;
1175 new_thread = &new->thread;
1176 old_thread = ¤t->thread;
1178 WARN_ON(!irqs_disabled());
1180 #ifdef CONFIG_PPC_BOOK3S_64
1181 batch = this_cpu_ptr(&ppc64_tlb_batch);
1182 if (batch->active) {
1183 current_thread_info()->local_flags |= _TLF_LAZY_MMU;
1185 __flush_tlb_pending(batch);
1188 #endif /* CONFIG_PPC_BOOK3S_64 */
1190 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1191 switch_booke_debug_regs(&new->thread.debug);
1194 * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would
1197 #ifndef CONFIG_HAVE_HW_BREAKPOINT
1198 if (unlikely(!hw_brk_match(this_cpu_ptr(¤t_brk), &new->thread.hw_brk)))
1199 __set_breakpoint(&new->thread.hw_brk);
1200 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
1204 * We need to save SPRs before treclaim/trecheckpoint as these will
1205 * change a number of them.
1207 save_sprs(&prev->thread);
1209 /* Save FPU, Altivec, VSX and SPE state */
1212 __switch_to_tm(prev, new);
1214 if (!radix_enabled()) {
1216 * We can't take a PMU exception inside _switch() since there
1217 * is a window where the kernel stack SLB and the kernel stack
1218 * are out of sync. Hard disable here.
1224 * Call restore_sprs() before calling _switch(). If we move it after
1225 * _switch() then we miss out on calling it for new tasks. The reason
1226 * for this is we manually create a stack frame for new tasks that
1227 * directly returns through ret_from_fork() or
1228 * ret_from_kernel_thread(). See copy_thread() for details.
1230 restore_sprs(old_thread, new_thread);
1232 last = _switch(old_thread, new_thread);
1234 #ifdef CONFIG_PPC_BOOK3S_64
1235 if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
1236 current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
1237 batch = this_cpu_ptr(&ppc64_tlb_batch);
1241 if (current_thread_info()->task->thread.regs) {
1242 restore_math(current_thread_info()->task->thread.regs);
1245 * The copy-paste buffer can only store into foreign real
1246 * addresses, so unprivileged processes can not see the
1247 * data or use it in any way unless they have foreign real
1248 * mappings. If the new process has the foreign real address
1249 * mappings, we must issue a cp_abort to clear any state and
1250 * prevent snooping, corruption or a covert channel.
1252 if (current_thread_info()->task->thread.used_vas)
1253 asm volatile(PPC_CP_ABORT);
1255 #endif /* CONFIG_PPC_BOOK3S_64 */
1260 static int instructions_to_print = 16;
1262 static void show_instructions(struct pt_regs *regs)
1265 unsigned long pc = regs->nip - (instructions_to_print * 3 / 4 *
1268 printk("Instruction dump:");
1270 for (i = 0; i < instructions_to_print; i++) {
1276 #if !defined(CONFIG_BOOKE)
1277 /* If executing with the IMMU off, adjust pc rather
1278 * than print XXXXXXXX.
1280 if (!(regs->msr & MSR_IR))
1281 pc = (unsigned long)phys_to_virt(pc);
1284 if (!__kernel_text_address(pc) ||
1285 probe_kernel_address((const void *)pc, instr)) {
1286 pr_cont("XXXXXXXX ");
1288 if (regs->nip == pc)
1289 pr_cont("<%08x> ", instr);
1291 pr_cont("%08x ", instr);
1300 void show_user_instructions(struct pt_regs *regs)
1305 pc = regs->nip - (instructions_to_print * 3 / 4 * sizeof(int));
1308 * Make sure the NIP points at userspace, not kernel text/data or
1311 if (!__access_ok(pc, instructions_to_print * sizeof(int), USER_DS)) {
1312 pr_info("%s[%d]: Bad NIP, not dumping instructions.\n",
1313 current->comm, current->pid);
1317 pr_info("%s[%d]: code: ", current->comm, current->pid);
1319 for (i = 0; i < instructions_to_print; i++) {
1322 if (!(i % 8) && (i > 0)) {
1324 pr_info("%s[%d]: code: ", current->comm, current->pid);
1327 if (probe_kernel_address((const void *)pc, instr)) {
1328 pr_cont("XXXXXXXX ");
1330 if (regs->nip == pc)
1331 pr_cont("<%08x> ", instr);
1333 pr_cont("%08x ", instr);
1347 static struct regbit msr_bits[] = {
1348 #if defined(CONFIG_PPC64) && !defined(CONFIG_BOOKE)
1370 #ifndef CONFIG_BOOKE
1377 static void print_bits(unsigned long val, struct regbit *bits, const char *sep)
1381 for (; bits->bit; ++bits)
1382 if (val & bits->bit) {
1383 pr_cont("%s%s", s, bits->name);
1388 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1389 static struct regbit msr_tm_bits[] = {
1396 static void print_tm_bits(unsigned long val)
1399 * This only prints something if at least one of the TM bit is set.
1400 * Inside the TM[], the output means:
1401 * E: Enabled (bit 32)
1402 * S: Suspended (bit 33)
1403 * T: Transactional (bit 34)
1405 if (val & (MSR_TM | MSR_TS_S | MSR_TS_T)) {
1407 print_bits(val, msr_tm_bits, "");
1412 static void print_tm_bits(unsigned long val) {}
1415 static void print_msr_bits(unsigned long val)
1418 print_bits(val, msr_bits, ",");
1424 #define REG "%016lx"
1425 #define REGS_PER_LINE 4
1426 #define LAST_VOLATILE 13
1429 #define REGS_PER_LINE 8
1430 #define LAST_VOLATILE 12
1433 void show_regs(struct pt_regs * regs)
1437 show_regs_print_info(KERN_DEFAULT);
1439 printk("NIP: "REG" LR: "REG" CTR: "REG"\n",
1440 regs->nip, regs->link, regs->ctr);
1441 printk("REGS: %px TRAP: %04lx %s (%s)\n",
1442 regs, regs->trap, print_tainted(), init_utsname()->release);
1443 printk("MSR: "REG" ", regs->msr);
1444 print_msr_bits(regs->msr);
1445 pr_cont(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer);
1447 if ((TRAP(regs) != 0xc00) && cpu_has_feature(CPU_FTR_CFAR))
1448 pr_cont("CFAR: "REG" ", regs->orig_gpr3);
1449 if (trap == 0x200 || trap == 0x300 || trap == 0x600)
1450 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
1451 pr_cont("DEAR: "REG" ESR: "REG" ", regs->dar, regs->dsisr);
1453 pr_cont("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr);
1456 pr_cont("IRQMASK: %lx ", regs->softe);
1458 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1459 if (MSR_TM_ACTIVE(regs->msr))
1460 pr_cont("\nPACATMSCRATCH: %016llx ", get_paca()->tm_scratch);
1463 for (i = 0; i < 32; i++) {
1464 if ((i % REGS_PER_LINE) == 0)
1465 pr_cont("\nGPR%02d: ", i);
1466 pr_cont(REG " ", regs->gpr[i]);
1467 if (i == LAST_VOLATILE && !FULL_REGS(regs))
1471 #ifdef CONFIG_KALLSYMS
1473 * Lookup NIP late so we have the best change of getting the
1474 * above info out without failing
1476 printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
1477 printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
1479 show_stack(current, (unsigned long *) regs->gpr[1]);
1480 if (!user_mode(regs))
1481 show_instructions(regs);
1484 void flush_thread(void)
1486 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1487 flush_ptrace_hw_breakpoint(current);
1488 #else /* CONFIG_HAVE_HW_BREAKPOINT */
1489 set_debug_reg_defaults(¤t->thread);
1490 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
1493 int set_thread_uses_vas(void)
1495 #ifdef CONFIG_PPC_BOOK3S_64
1496 if (!cpu_has_feature(CPU_FTR_ARCH_300))
1499 current->thread.used_vas = 1;
1502 * Even a process that has no foreign real address mapping can use
1503 * an unpaired COPY instruction (to no real effect). Issue CP_ABORT
1504 * to clear any pending COPY and prevent a covert channel.
1506 * __switch_to() will issue CP_ABORT on future context switches.
1508 asm volatile(PPC_CP_ABORT);
1510 #endif /* CONFIG_PPC_BOOK3S_64 */
1516 * Assign a TIDR (thread ID) for task @t and set it in the thread
1517 * structure. For now, we only support setting TIDR for 'current' task.
1519 * Since the TID value is a truncated form of it PID, it is possible
1520 * (but unlikely) for 2 threads to have the same TID. In the unlikely event
1521 * that 2 threads share the same TID and are waiting, one of the following
1522 * cases will happen:
1524 * 1. The correct thread is running, the wrong thread is not
1525 * In this situation, the correct thread is woken and proceeds to pass it's
1528 * 2. Neither threads are running
1529 * In this situation, neither thread will be woken. When scheduled, the waiting
1530 * threads will execute either a wait, which will return immediately, followed
1531 * by a condition check, which will pass for the correct thread and fail
1532 * for the wrong thread, or they will execute the condition check immediately.
1534 * 3. The wrong thread is running, the correct thread is not
1535 * The wrong thread will be woken, but will fail it's condition check and
1536 * re-execute wait. The correct thread, when scheduled, will execute either
1537 * it's condition check (which will pass), or wait, which returns immediately
1538 * when called the first time after the thread is scheduled, followed by it's
1539 * condition check (which will pass).
1541 * 4. Both threads are running
1542 * Both threads will be woken. The wrong thread will fail it's condition check
1543 * and execute another wait, while the correct thread will pass it's condition
1546 * @t: the task to set the thread ID for
1548 int set_thread_tidr(struct task_struct *t)
1550 if (!cpu_has_feature(CPU_FTR_P9_TIDR))
1559 t->thread.tidr = (u16)task_pid_nr(t);
1560 mtspr(SPRN_TIDR, t->thread.tidr);
1564 EXPORT_SYMBOL_GPL(set_thread_tidr);
1566 #endif /* CONFIG_PPC64 */
1569 release_thread(struct task_struct *t)
1574 * this gets called so that we can store coprocessor state into memory and
1575 * copy the current task into the new thread.
1577 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
1579 flush_all_to_thread(src);
1581 * Flush TM state out so we can copy it. __switch_to_tm() does this
1582 * flush but it removes the checkpointed state from the current CPU and
1583 * transitions the CPU out of TM mode. Hence we need to call
1584 * tm_recheckpoint_new_task() (on the same task) to restore the
1585 * checkpointed state back and the TM mode.
1587 * Can't pass dst because it isn't ready. Doesn't matter, passing
1588 * dst is only important for __switch_to()
1590 __switch_to_tm(src, src);
1594 clear_task_ebb(dst);
1599 static void setup_ksp_vsid(struct task_struct *p, unsigned long sp)
1601 #ifdef CONFIG_PPC_BOOK3S_64
1602 unsigned long sp_vsid;
1603 unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
1605 if (radix_enabled())
1608 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
1609 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T)
1610 << SLB_VSID_SHIFT_1T;
1612 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_256M)
1614 sp_vsid |= SLB_VSID_KERNEL | llp;
1615 p->thread.ksp_vsid = sp_vsid;
1624 * Copy architecture-specific thread state
1626 int copy_thread(unsigned long clone_flags, unsigned long usp,
1627 unsigned long kthread_arg, struct task_struct *p)
1629 struct pt_regs *childregs, *kregs;
1630 extern void ret_from_fork(void);
1631 extern void ret_from_kernel_thread(void);
1633 unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
1634 struct thread_info *ti = task_thread_info(p);
1636 klp_init_thread_info(ti);
1638 /* Copy registers */
1639 sp -= sizeof(struct pt_regs);
1640 childregs = (struct pt_regs *) sp;
1641 if (unlikely(p->flags & PF_KTHREAD)) {
1643 memset(childregs, 0, sizeof(struct pt_regs));
1644 childregs->gpr[1] = sp + sizeof(struct pt_regs);
1647 childregs->gpr[14] = ppc_function_entry((void *)usp);
1649 clear_tsk_thread_flag(p, TIF_32BIT);
1650 childregs->softe = IRQS_ENABLED;
1652 childregs->gpr[15] = kthread_arg;
1653 p->thread.regs = NULL; /* no user register state */
1654 ti->flags |= _TIF_RESTOREALL;
1655 f = ret_from_kernel_thread;
1658 struct pt_regs *regs = current_pt_regs();
1659 CHECK_FULL_REGS(regs);
1662 childregs->gpr[1] = usp;
1663 p->thread.regs = childregs;
1664 childregs->gpr[3] = 0; /* Result from fork() */
1665 if (clone_flags & CLONE_SETTLS) {
1667 if (!is_32bit_task())
1668 childregs->gpr[13] = childregs->gpr[6];
1671 childregs->gpr[2] = childregs->gpr[6];
1676 childregs->msr &= ~(MSR_FP|MSR_VEC|MSR_VSX);
1677 sp -= STACK_FRAME_OVERHEAD;
1680 * The way this works is that at some point in the future
1681 * some task will call _switch to switch to the new task.
1682 * That will pop off the stack frame created below and start
1683 * the new task running at ret_from_fork. The new task will
1684 * do some house keeping and then return from the fork or clone
1685 * system call, using the stack frame created above.
1687 ((unsigned long *)sp)[0] = 0;
1688 sp -= sizeof(struct pt_regs);
1689 kregs = (struct pt_regs *) sp;
1690 sp -= STACK_FRAME_OVERHEAD;
1693 p->thread.ksp_limit = (unsigned long)task_stack_page(p) +
1694 _ALIGN_UP(sizeof(struct thread_info), 16);
1696 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1697 p->thread.ptrace_bps[0] = NULL;
1700 p->thread.fp_save_area = NULL;
1701 #ifdef CONFIG_ALTIVEC
1702 p->thread.vr_save_area = NULL;
1705 setup_ksp_vsid(p, sp);
1708 if (cpu_has_feature(CPU_FTR_DSCR)) {
1709 p->thread.dscr_inherit = current->thread.dscr_inherit;
1710 p->thread.dscr = mfspr(SPRN_DSCR);
1712 if (cpu_has_feature(CPU_FTR_HAS_PPR))
1713 p->thread.ppr = INIT_PPR;
1717 kregs->nip = ppc_function_entry(f);
1722 * Set up a thread for executing a new program
1724 void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
1727 unsigned long load_addr = regs->gpr[2]; /* saved by ELF_PLAT_INIT */
1731 * If we exec out of a kernel thread then thread.regs will not be
1734 if (!current->thread.regs) {
1735 struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE;
1736 current->thread.regs = regs - 1;
1739 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1741 * Clear any transactional state, we're exec()ing. The cause is
1742 * not important as there will never be a recheckpoint so it's not
1745 if (MSR_TM_SUSPENDED(mfmsr()))
1746 tm_reclaim_current(0);
1749 memset(regs->gpr, 0, sizeof(regs->gpr));
1757 * We have just cleared all the nonvolatile GPRs, so make
1758 * FULL_REGS(regs) return true. This is necessary to allow
1759 * ptrace to examine the thread immediately after exec.
1766 regs->msr = MSR_USER;
1768 if (!is_32bit_task()) {
1769 unsigned long entry;
1771 if (is_elf2_task()) {
1772 /* Look ma, no function descriptors! */
1777 * The latest iteration of the ABI requires that when
1778 * calling a function (at its global entry point),
1779 * the caller must ensure r12 holds the entry point
1780 * address (so that the function can quickly
1781 * establish addressability).
1783 regs->gpr[12] = start;
1784 /* Make sure that's restored on entry to userspace. */
1785 set_thread_flag(TIF_RESTOREALL);
1789 /* start is a relocated pointer to the function
1790 * descriptor for the elf _start routine. The first
1791 * entry in the function descriptor is the entry
1792 * address of _start and the second entry is the TOC
1793 * value we need to use.
1795 __get_user(entry, (unsigned long __user *)start);
1796 __get_user(toc, (unsigned long __user *)start+1);
1798 /* Check whether the e_entry function descriptor entries
1799 * need to be relocated before we can use them.
1801 if (load_addr != 0) {
1808 regs->msr = MSR_USER64;
1812 regs->msr = MSR_USER32;
1816 current->thread.used_vsr = 0;
1818 current->thread.load_fp = 0;
1819 memset(¤t->thread.fp_state, 0, sizeof(current->thread.fp_state));
1820 current->thread.fp_save_area = NULL;
1821 #ifdef CONFIG_ALTIVEC
1822 memset(¤t->thread.vr_state, 0, sizeof(current->thread.vr_state));
1823 current->thread.vr_state.vscr.u[3] = 0x00010000; /* Java mode disabled */
1824 current->thread.vr_save_area = NULL;
1825 current->thread.vrsave = 0;
1826 current->thread.used_vr = 0;
1827 current->thread.load_vec = 0;
1828 #endif /* CONFIG_ALTIVEC */
1830 memset(current->thread.evr, 0, sizeof(current->thread.evr));
1831 current->thread.acc = 0;
1832 current->thread.spefscr = 0;
1833 current->thread.used_spe = 0;
1834 #endif /* CONFIG_SPE */
1835 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1836 current->thread.tm_tfhar = 0;
1837 current->thread.tm_texasr = 0;
1838 current->thread.tm_tfiar = 0;
1839 current->thread.load_tm = 0;
1840 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
1842 thread_pkey_regs_init(¤t->thread);
1844 EXPORT_SYMBOL(start_thread);
1846 #define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
1847 | PR_FP_EXC_RES | PR_FP_EXC_INV)
1849 int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
1851 struct pt_regs *regs = tsk->thread.regs;
1853 /* This is a bit hairy. If we are an SPE enabled processor
1854 * (have embedded fp) we store the IEEE exception enable flags in
1855 * fpexc_mode. fpexc_mode is also used for setting FP exception
1856 * mode (asyn, precise, disabled) for 'Classic' FP. */
1857 if (val & PR_FP_EXC_SW_ENABLE) {
1859 if (cpu_has_feature(CPU_FTR_SPE)) {
1861 * When the sticky exception bits are set
1862 * directly by userspace, it must call prctl
1863 * with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE
1864 * in the existing prctl settings) or
1865 * PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in
1866 * the bits being set). <fenv.h> functions
1867 * saving and restoring the whole
1868 * floating-point environment need to do so
1869 * anyway to restore the prctl settings from
1870 * the saved environment.
1872 tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
1873 tsk->thread.fpexc_mode = val &
1874 (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
1884 /* on a CONFIG_SPE this does not hurt us. The bits that
1885 * __pack_fe01 use do not overlap with bits used for
1886 * PR_FP_EXC_SW_ENABLE. Additionally, the MSR[FE0,FE1] bits
1887 * on CONFIG_SPE implementations are reserved so writing to
1888 * them does not change anything */
1889 if (val > PR_FP_EXC_PRECISE)
1891 tsk->thread.fpexc_mode = __pack_fe01(val);
1892 if (regs != NULL && (regs->msr & MSR_FP) != 0)
1893 regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1))
1894 | tsk->thread.fpexc_mode;
1898 int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
1902 if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
1904 if (cpu_has_feature(CPU_FTR_SPE)) {
1906 * When the sticky exception bits are set
1907 * directly by userspace, it must call prctl
1908 * with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE
1909 * in the existing prctl settings) or
1910 * PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in
1911 * the bits being set). <fenv.h> functions
1912 * saving and restoring the whole
1913 * floating-point environment need to do so
1914 * anyway to restore the prctl settings from
1915 * the saved environment.
1917 tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
1918 val = tsk->thread.fpexc_mode;
1925 val = __unpack_fe01(tsk->thread.fpexc_mode);
1926 return put_user(val, (unsigned int __user *) adr);
1929 int set_endian(struct task_struct *tsk, unsigned int val)
1931 struct pt_regs *regs = tsk->thread.regs;
1933 if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) ||
1934 (val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE)))
1940 if (val == PR_ENDIAN_BIG)
1941 regs->msr &= ~MSR_LE;
1942 else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE)
1943 regs->msr |= MSR_LE;
1950 int get_endian(struct task_struct *tsk, unsigned long adr)
1952 struct pt_regs *regs = tsk->thread.regs;
1955 if (!cpu_has_feature(CPU_FTR_PPC_LE) &&
1956 !cpu_has_feature(CPU_FTR_REAL_LE))
1962 if (regs->msr & MSR_LE) {
1963 if (cpu_has_feature(CPU_FTR_REAL_LE))
1964 val = PR_ENDIAN_LITTLE;
1966 val = PR_ENDIAN_PPC_LITTLE;
1968 val = PR_ENDIAN_BIG;
1970 return put_user(val, (unsigned int __user *)adr);
1973 int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
1975 tsk->thread.align_ctl = val;
1979 int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
1981 return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr);
1984 static inline int valid_irq_stack(unsigned long sp, struct task_struct *p,
1985 unsigned long nbytes)
1987 unsigned long stack_page;
1988 unsigned long cpu = task_cpu(p);
1991 * Avoid crashing if the stack has overflowed and corrupted
1992 * task_cpu(p), which is in the thread_info struct.
1994 if (cpu < NR_CPUS && cpu_possible(cpu)) {
1995 stack_page = (unsigned long) hardirq_ctx[cpu];
1996 if (sp >= stack_page + sizeof(struct thread_struct)
1997 && sp <= stack_page + THREAD_SIZE - nbytes)
2000 stack_page = (unsigned long) softirq_ctx[cpu];
2001 if (sp >= stack_page + sizeof(struct thread_struct)
2002 && sp <= stack_page + THREAD_SIZE - nbytes)
2008 int validate_sp(unsigned long sp, struct task_struct *p,
2009 unsigned long nbytes)
2011 unsigned long stack_page = (unsigned long)task_stack_page(p);
2013 if (sp >= stack_page + sizeof(struct thread_struct)
2014 && sp <= stack_page + THREAD_SIZE - nbytes)
2017 return valid_irq_stack(sp, p, nbytes);
2020 EXPORT_SYMBOL(validate_sp);
2022 unsigned long get_wchan(struct task_struct *p)
2024 unsigned long ip, sp;
2027 if (!p || p == current || p->state == TASK_RUNNING)
2031 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
2035 sp = *(unsigned long *)sp;
2036 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD) ||
2037 p->state == TASK_RUNNING)
2040 ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE];
2041 if (!in_sched_functions(ip))
2044 } while (count++ < 16);
2048 static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
2050 void show_stack(struct task_struct *tsk, unsigned long *stack)
2052 unsigned long sp, ip, lr, newsp;
2055 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2056 int curr_frame = current->curr_ret_stack;
2057 extern void return_to_handler(void);
2058 unsigned long rth = (unsigned long)return_to_handler;
2061 sp = (unsigned long) stack;
2066 sp = current_stack_pointer();
2068 sp = tsk->thread.ksp;
2072 printk("Call Trace:\n");
2074 if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD))
2077 stack = (unsigned long *) sp;
2079 ip = stack[STACK_FRAME_LR_SAVE];
2080 if (!firstframe || ip != lr) {
2081 printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2082 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2083 if ((ip == rth) && curr_frame >= 0) {
2085 (void *)current->ret_stack[curr_frame].ret);
2090 pr_cont(" (unreliable)");
2096 * See if this is an exception frame.
2097 * We look for the "regshere" marker in the current frame.
2099 if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE)
2100 && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
2101 struct pt_regs *regs = (struct pt_regs *)
2102 (sp + STACK_FRAME_OVERHEAD);
2104 printk("--- interrupt: %lx at %pS\n LR = %pS\n",
2105 regs->trap, (void *)regs->nip, (void *)lr);
2110 } while (count++ < kstack_depth_to_print);
2114 /* Called with hard IRQs off */
2115 void notrace __ppc64_runlatch_on(void)
2117 struct thread_info *ti = current_thread_info();
2119 if (cpu_has_feature(CPU_FTR_ARCH_206)) {
2121 * Least significant bit (RUN) is the only writable bit of
2122 * the CTRL register, so we can avoid mfspr. 2.06 is not the
2123 * earliest ISA where this is the case, but it's convenient.
2125 mtspr(SPRN_CTRLT, CTRL_RUNLATCH);
2130 * Some architectures (e.g., Cell) have writable fields other
2131 * than RUN, so do the read-modify-write.
2133 ctrl = mfspr(SPRN_CTRLF);
2134 ctrl |= CTRL_RUNLATCH;
2135 mtspr(SPRN_CTRLT, ctrl);
2138 ti->local_flags |= _TLF_RUNLATCH;
2141 /* Called with hard IRQs off */
2142 void notrace __ppc64_runlatch_off(void)
2144 struct thread_info *ti = current_thread_info();
2146 ti->local_flags &= ~_TLF_RUNLATCH;
2148 if (cpu_has_feature(CPU_FTR_ARCH_206)) {
2149 mtspr(SPRN_CTRLT, 0);
2153 ctrl = mfspr(SPRN_CTRLF);
2154 ctrl &= ~CTRL_RUNLATCH;
2155 mtspr(SPRN_CTRLT, ctrl);
2158 #endif /* CONFIG_PPC64 */
2160 unsigned long arch_align_stack(unsigned long sp)
2162 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2163 sp -= get_random_int() & ~PAGE_MASK;
2167 static inline unsigned long brk_rnd(void)
2169 unsigned long rnd = 0;
2171 /* 8MB for 32bit, 1GB for 64bit */
2172 if (is_32bit_task())
2173 rnd = (get_random_long() % (1UL<<(23-PAGE_SHIFT)));
2175 rnd = (get_random_long() % (1UL<<(30-PAGE_SHIFT)));
2177 return rnd << PAGE_SHIFT;
2180 unsigned long arch_randomize_brk(struct mm_struct *mm)
2182 unsigned long base = mm->brk;
2185 #ifdef CONFIG_PPC_BOOK3S_64
2187 * If we are using 1TB segments and we are allowed to randomise
2188 * the heap, we can put it above 1TB so it is backed by a 1TB
2189 * segment. Otherwise the heap will be in the bottom 1TB
2190 * which always uses 256MB segments and this may result in a
2191 * performance penalty. We don't need to worry about radix. For
2192 * radix, mmu_highuser_ssize remains unchanged from 256MB.
2194 if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2195 base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2198 ret = PAGE_ALIGN(base + brk_rnd());