]> asedeno.scripts.mit.edu Git - linux.git/blob - arch/powerpc/kernel/process.c
powerpc: Add ppc_breakpoint_available()
[linux.git] / arch / powerpc / kernel / process.c
1 /*
2  *  Derived from "arch/i386/kernel/process.c"
3  *    Copyright (C) 1995  Linus Torvalds
4  *
5  *  Updated and modified by Cort Dougan (cort@cs.nmt.edu) and
6  *  Paul Mackerras (paulus@cs.anu.edu.au)
7  *
8  *  PowerPC version
9  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
10  *
11  *  This program is free software; you can redistribute it and/or
12  *  modify it under the terms of the GNU General Public License
13  *  as published by the Free Software Foundation; either version
14  *  2 of the License, or (at your option) any later version.
15  */
16
17 #include <linux/errno.h>
18 #include <linux/sched.h>
19 #include <linux/sched/debug.h>
20 #include <linux/sched/task.h>
21 #include <linux/sched/task_stack.h>
22 #include <linux/kernel.h>
23 #include <linux/mm.h>
24 #include <linux/smp.h>
25 #include <linux/stddef.h>
26 #include <linux/unistd.h>
27 #include <linux/ptrace.h>
28 #include <linux/slab.h>
29 #include <linux/user.h>
30 #include <linux/elf.h>
31 #include <linux/prctl.h>
32 #include <linux/init_task.h>
33 #include <linux/export.h>
34 #include <linux/kallsyms.h>
35 #include <linux/mqueue.h>
36 #include <linux/hardirq.h>
37 #include <linux/utsname.h>
38 #include <linux/ftrace.h>
39 #include <linux/kernel_stat.h>
40 #include <linux/personality.h>
41 #include <linux/random.h>
42 #include <linux/hw_breakpoint.h>
43 #include <linux/uaccess.h>
44 #include <linux/elf-randomize.h>
45 #include <linux/pkeys.h>
46
47 #include <asm/pgtable.h>
48 #include <asm/io.h>
49 #include <asm/processor.h>
50 #include <asm/mmu.h>
51 #include <asm/prom.h>
52 #include <asm/machdep.h>
53 #include <asm/time.h>
54 #include <asm/runlatch.h>
55 #include <asm/syscalls.h>
56 #include <asm/switch_to.h>
57 #include <asm/tm.h>
58 #include <asm/debug.h>
59 #ifdef CONFIG_PPC64
60 #include <asm/firmware.h>
61 #include <asm/hw_irq.h>
62 #endif
63 #include <asm/code-patching.h>
64 #include <asm/exec.h>
65 #include <asm/livepatch.h>
66 #include <asm/cpu_has_feature.h>
67 #include <asm/asm-prototypes.h>
68
69 #include <linux/kprobes.h>
70 #include <linux/kdebug.h>
71
72 /* Transactional Memory debug */
73 #ifdef TM_DEBUG_SW
74 #define TM_DEBUG(x...) printk(KERN_INFO x)
75 #else
76 #define TM_DEBUG(x...) do { } while(0)
77 #endif
78
79 extern unsigned long _get_SP(void);
80
81 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
82 /*
83  * Are we running in "Suspend disabled" mode? If so we have to block any
84  * sigreturn that would get us into suspended state, and we also warn in some
85  * other paths that we should never reach with suspend disabled.
86  */
87 bool tm_suspend_disabled __ro_after_init = false;
88
89 static void check_if_tm_restore_required(struct task_struct *tsk)
90 {
91         /*
92          * If we are saving the current thread's registers, and the
93          * thread is in a transactional state, set the TIF_RESTORE_TM
94          * bit so that we know to restore the registers before
95          * returning to userspace.
96          */
97         if (tsk == current && tsk->thread.regs &&
98             MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
99             !test_thread_flag(TIF_RESTORE_TM)) {
100                 tsk->thread.ckpt_regs.msr = tsk->thread.regs->msr;
101                 set_thread_flag(TIF_RESTORE_TM);
102         }
103 }
104
105 static inline bool msr_tm_active(unsigned long msr)
106 {
107         return MSR_TM_ACTIVE(msr);
108 }
109
110 static bool tm_active_with_fp(struct task_struct *tsk)
111 {
112         return msr_tm_active(tsk->thread.regs->msr) &&
113                 (tsk->thread.ckpt_regs.msr & MSR_FP);
114 }
115
116 static bool tm_active_with_altivec(struct task_struct *tsk)
117 {
118         return msr_tm_active(tsk->thread.regs->msr) &&
119                 (tsk->thread.ckpt_regs.msr & MSR_VEC);
120 }
121 #else
122 static inline bool msr_tm_active(unsigned long msr) { return false; }
123 static inline void check_if_tm_restore_required(struct task_struct *tsk) { }
124 static inline bool tm_active_with_fp(struct task_struct *tsk) { return false; }
125 static inline bool tm_active_with_altivec(struct task_struct *tsk) { return false; }
126 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
127
128 bool strict_msr_control;
129 EXPORT_SYMBOL(strict_msr_control);
130
131 static int __init enable_strict_msr_control(char *str)
132 {
133         strict_msr_control = true;
134         pr_info("Enabling strict facility control\n");
135
136         return 0;
137 }
138 early_param("ppc_strict_facility_enable", enable_strict_msr_control);
139
140 unsigned long msr_check_and_set(unsigned long bits)
141 {
142         unsigned long oldmsr = mfmsr();
143         unsigned long newmsr;
144
145         newmsr = oldmsr | bits;
146
147 #ifdef CONFIG_VSX
148         if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP))
149                 newmsr |= MSR_VSX;
150 #endif
151
152         if (oldmsr != newmsr)
153                 mtmsr_isync(newmsr);
154
155         return newmsr;
156 }
157
158 void __msr_check_and_clear(unsigned long bits)
159 {
160         unsigned long oldmsr = mfmsr();
161         unsigned long newmsr;
162
163         newmsr = oldmsr & ~bits;
164
165 #ifdef CONFIG_VSX
166         if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP))
167                 newmsr &= ~MSR_VSX;
168 #endif
169
170         if (oldmsr != newmsr)
171                 mtmsr_isync(newmsr);
172 }
173 EXPORT_SYMBOL(__msr_check_and_clear);
174
175 #ifdef CONFIG_PPC_FPU
176 void __giveup_fpu(struct task_struct *tsk)
177 {
178         unsigned long msr;
179
180         save_fpu(tsk);
181         msr = tsk->thread.regs->msr;
182         msr &= ~MSR_FP;
183 #ifdef CONFIG_VSX
184         if (cpu_has_feature(CPU_FTR_VSX))
185                 msr &= ~MSR_VSX;
186 #endif
187         tsk->thread.regs->msr = msr;
188 }
189
190 void giveup_fpu(struct task_struct *tsk)
191 {
192         check_if_tm_restore_required(tsk);
193
194         msr_check_and_set(MSR_FP);
195         __giveup_fpu(tsk);
196         msr_check_and_clear(MSR_FP);
197 }
198 EXPORT_SYMBOL(giveup_fpu);
199
200 /*
201  * Make sure the floating-point register state in the
202  * the thread_struct is up to date for task tsk.
203  */
204 void flush_fp_to_thread(struct task_struct *tsk)
205 {
206         if (tsk->thread.regs) {
207                 /*
208                  * We need to disable preemption here because if we didn't,
209                  * another process could get scheduled after the regs->msr
210                  * test but before we have finished saving the FP registers
211                  * to the thread_struct.  That process could take over the
212                  * FPU, and then when we get scheduled again we would store
213                  * bogus values for the remaining FP registers.
214                  */
215                 preempt_disable();
216                 if (tsk->thread.regs->msr & MSR_FP) {
217                         /*
218                          * This should only ever be called for current or
219                          * for a stopped child process.  Since we save away
220                          * the FP register state on context switch,
221                          * there is something wrong if a stopped child appears
222                          * to still have its FP state in the CPU registers.
223                          */
224                         BUG_ON(tsk != current);
225                         giveup_fpu(tsk);
226                 }
227                 preempt_enable();
228         }
229 }
230 EXPORT_SYMBOL_GPL(flush_fp_to_thread);
231
232 void enable_kernel_fp(void)
233 {
234         unsigned long cpumsr;
235
236         WARN_ON(preemptible());
237
238         cpumsr = msr_check_and_set(MSR_FP);
239
240         if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) {
241                 check_if_tm_restore_required(current);
242                 /*
243                  * If a thread has already been reclaimed then the
244                  * checkpointed registers are on the CPU but have definitely
245                  * been saved by the reclaim code. Don't need to and *cannot*
246                  * giveup as this would save  to the 'live' structure not the
247                  * checkpointed structure.
248                  */
249                 if(!msr_tm_active(cpumsr) && msr_tm_active(current->thread.regs->msr))
250                         return;
251                 __giveup_fpu(current);
252         }
253 }
254 EXPORT_SYMBOL(enable_kernel_fp);
255
256 static int restore_fp(struct task_struct *tsk)
257 {
258         if (tsk->thread.load_fp || tm_active_with_fp(tsk)) {
259                 load_fp_state(&current->thread.fp_state);
260                 current->thread.load_fp++;
261                 return 1;
262         }
263         return 0;
264 }
265 #else
266 static int restore_fp(struct task_struct *tsk) { return 0; }
267 #endif /* CONFIG_PPC_FPU */
268
269 #ifdef CONFIG_ALTIVEC
270 #define loadvec(thr) ((thr).load_vec)
271
272 static void __giveup_altivec(struct task_struct *tsk)
273 {
274         unsigned long msr;
275
276         save_altivec(tsk);
277         msr = tsk->thread.regs->msr;
278         msr &= ~MSR_VEC;
279 #ifdef CONFIG_VSX
280         if (cpu_has_feature(CPU_FTR_VSX))
281                 msr &= ~MSR_VSX;
282 #endif
283         tsk->thread.regs->msr = msr;
284 }
285
286 void giveup_altivec(struct task_struct *tsk)
287 {
288         check_if_tm_restore_required(tsk);
289
290         msr_check_and_set(MSR_VEC);
291         __giveup_altivec(tsk);
292         msr_check_and_clear(MSR_VEC);
293 }
294 EXPORT_SYMBOL(giveup_altivec);
295
296 void enable_kernel_altivec(void)
297 {
298         unsigned long cpumsr;
299
300         WARN_ON(preemptible());
301
302         cpumsr = msr_check_and_set(MSR_VEC);
303
304         if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) {
305                 check_if_tm_restore_required(current);
306                 /*
307                  * If a thread has already been reclaimed then the
308                  * checkpointed registers are on the CPU but have definitely
309                  * been saved by the reclaim code. Don't need to and *cannot*
310                  * giveup as this would save  to the 'live' structure not the
311                  * checkpointed structure.
312                  */
313                 if(!msr_tm_active(cpumsr) && msr_tm_active(current->thread.regs->msr))
314                         return;
315                 __giveup_altivec(current);
316         }
317 }
318 EXPORT_SYMBOL(enable_kernel_altivec);
319
320 /*
321  * Make sure the VMX/Altivec register state in the
322  * the thread_struct is up to date for task tsk.
323  */
324 void flush_altivec_to_thread(struct task_struct *tsk)
325 {
326         if (tsk->thread.regs) {
327                 preempt_disable();
328                 if (tsk->thread.regs->msr & MSR_VEC) {
329                         BUG_ON(tsk != current);
330                         giveup_altivec(tsk);
331                 }
332                 preempt_enable();
333         }
334 }
335 EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
336
337 static int restore_altivec(struct task_struct *tsk)
338 {
339         if (cpu_has_feature(CPU_FTR_ALTIVEC) &&
340                 (tsk->thread.load_vec || tm_active_with_altivec(tsk))) {
341                 load_vr_state(&tsk->thread.vr_state);
342                 tsk->thread.used_vr = 1;
343                 tsk->thread.load_vec++;
344
345                 return 1;
346         }
347         return 0;
348 }
349 #else
350 #define loadvec(thr) 0
351 static inline int restore_altivec(struct task_struct *tsk) { return 0; }
352 #endif /* CONFIG_ALTIVEC */
353
354 #ifdef CONFIG_VSX
355 static void __giveup_vsx(struct task_struct *tsk)
356 {
357         unsigned long msr = tsk->thread.regs->msr;
358
359         /*
360          * We should never be ssetting MSR_VSX without also setting
361          * MSR_FP and MSR_VEC
362          */
363         WARN_ON((msr & MSR_VSX) && !((msr & MSR_FP) && (msr & MSR_VEC)));
364
365         /* __giveup_fpu will clear MSR_VSX */
366         if (msr & MSR_FP)
367                 __giveup_fpu(tsk);
368         if (msr & MSR_VEC)
369                 __giveup_altivec(tsk);
370 }
371
372 static void giveup_vsx(struct task_struct *tsk)
373 {
374         check_if_tm_restore_required(tsk);
375
376         msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
377         __giveup_vsx(tsk);
378         msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX);
379 }
380
381 void enable_kernel_vsx(void)
382 {
383         unsigned long cpumsr;
384
385         WARN_ON(preemptible());
386
387         cpumsr = msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
388
389         if (current->thread.regs &&
390             (current->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP))) {
391                 check_if_tm_restore_required(current);
392                 /*
393                  * If a thread has already been reclaimed then the
394                  * checkpointed registers are on the CPU but have definitely
395                  * been saved by the reclaim code. Don't need to and *cannot*
396                  * giveup as this would save  to the 'live' structure not the
397                  * checkpointed structure.
398                  */
399                 if(!msr_tm_active(cpumsr) && msr_tm_active(current->thread.regs->msr))
400                         return;
401                 __giveup_vsx(current);
402         }
403 }
404 EXPORT_SYMBOL(enable_kernel_vsx);
405
406 void flush_vsx_to_thread(struct task_struct *tsk)
407 {
408         if (tsk->thread.regs) {
409                 preempt_disable();
410                 if (tsk->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP)) {
411                         BUG_ON(tsk != current);
412                         giveup_vsx(tsk);
413                 }
414                 preempt_enable();
415         }
416 }
417 EXPORT_SYMBOL_GPL(flush_vsx_to_thread);
418
419 static int restore_vsx(struct task_struct *tsk)
420 {
421         if (cpu_has_feature(CPU_FTR_VSX)) {
422                 tsk->thread.used_vsr = 1;
423                 return 1;
424         }
425
426         return 0;
427 }
428 #else
429 static inline int restore_vsx(struct task_struct *tsk) { return 0; }
430 #endif /* CONFIG_VSX */
431
432 #ifdef CONFIG_SPE
433 void giveup_spe(struct task_struct *tsk)
434 {
435         check_if_tm_restore_required(tsk);
436
437         msr_check_and_set(MSR_SPE);
438         __giveup_spe(tsk);
439         msr_check_and_clear(MSR_SPE);
440 }
441 EXPORT_SYMBOL(giveup_spe);
442
443 void enable_kernel_spe(void)
444 {
445         WARN_ON(preemptible());
446
447         msr_check_and_set(MSR_SPE);
448
449         if (current->thread.regs && (current->thread.regs->msr & MSR_SPE)) {
450                 check_if_tm_restore_required(current);
451                 __giveup_spe(current);
452         }
453 }
454 EXPORT_SYMBOL(enable_kernel_spe);
455
456 void flush_spe_to_thread(struct task_struct *tsk)
457 {
458         if (tsk->thread.regs) {
459                 preempt_disable();
460                 if (tsk->thread.regs->msr & MSR_SPE) {
461                         BUG_ON(tsk != current);
462                         tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
463                         giveup_spe(tsk);
464                 }
465                 preempt_enable();
466         }
467 }
468 #endif /* CONFIG_SPE */
469
470 static unsigned long msr_all_available;
471
472 static int __init init_msr_all_available(void)
473 {
474 #ifdef CONFIG_PPC_FPU
475         msr_all_available |= MSR_FP;
476 #endif
477 #ifdef CONFIG_ALTIVEC
478         if (cpu_has_feature(CPU_FTR_ALTIVEC))
479                 msr_all_available |= MSR_VEC;
480 #endif
481 #ifdef CONFIG_VSX
482         if (cpu_has_feature(CPU_FTR_VSX))
483                 msr_all_available |= MSR_VSX;
484 #endif
485 #ifdef CONFIG_SPE
486         if (cpu_has_feature(CPU_FTR_SPE))
487                 msr_all_available |= MSR_SPE;
488 #endif
489
490         return 0;
491 }
492 early_initcall(init_msr_all_available);
493
494 void giveup_all(struct task_struct *tsk)
495 {
496         unsigned long usermsr;
497
498         if (!tsk->thread.regs)
499                 return;
500
501         usermsr = tsk->thread.regs->msr;
502
503         if ((usermsr & msr_all_available) == 0)
504                 return;
505
506         msr_check_and_set(msr_all_available);
507         check_if_tm_restore_required(tsk);
508
509         WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC)));
510
511 #ifdef CONFIG_PPC_FPU
512         if (usermsr & MSR_FP)
513                 __giveup_fpu(tsk);
514 #endif
515 #ifdef CONFIG_ALTIVEC
516         if (usermsr & MSR_VEC)
517                 __giveup_altivec(tsk);
518 #endif
519 #ifdef CONFIG_SPE
520         if (usermsr & MSR_SPE)
521                 __giveup_spe(tsk);
522 #endif
523
524         msr_check_and_clear(msr_all_available);
525 }
526 EXPORT_SYMBOL(giveup_all);
527
528 void restore_math(struct pt_regs *regs)
529 {
530         unsigned long msr;
531
532         if (!msr_tm_active(regs->msr) &&
533                 !current->thread.load_fp && !loadvec(current->thread))
534                 return;
535
536         msr = regs->msr;
537         msr_check_and_set(msr_all_available);
538
539         /*
540          * Only reload if the bit is not set in the user MSR, the bit BEING set
541          * indicates that the registers are hot
542          */
543         if ((!(msr & MSR_FP)) && restore_fp(current))
544                 msr |= MSR_FP | current->thread.fpexc_mode;
545
546         if ((!(msr & MSR_VEC)) && restore_altivec(current))
547                 msr |= MSR_VEC;
548
549         if ((msr & (MSR_FP | MSR_VEC)) == (MSR_FP | MSR_VEC) &&
550                         restore_vsx(current)) {
551                 msr |= MSR_VSX;
552         }
553
554         msr_check_and_clear(msr_all_available);
555
556         regs->msr = msr;
557 }
558
559 void save_all(struct task_struct *tsk)
560 {
561         unsigned long usermsr;
562
563         if (!tsk->thread.regs)
564                 return;
565
566         usermsr = tsk->thread.regs->msr;
567
568         if ((usermsr & msr_all_available) == 0)
569                 return;
570
571         msr_check_and_set(msr_all_available);
572
573         WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC)));
574
575         if (usermsr & MSR_FP)
576                 save_fpu(tsk);
577
578         if (usermsr & MSR_VEC)
579                 save_altivec(tsk);
580
581         if (usermsr & MSR_SPE)
582                 __giveup_spe(tsk);
583
584         msr_check_and_clear(msr_all_available);
585 }
586
587 void flush_all_to_thread(struct task_struct *tsk)
588 {
589         if (tsk->thread.regs) {
590                 preempt_disable();
591                 BUG_ON(tsk != current);
592                 save_all(tsk);
593
594 #ifdef CONFIG_SPE
595                 if (tsk->thread.regs->msr & MSR_SPE)
596                         tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
597 #endif
598
599                 preempt_enable();
600         }
601 }
602 EXPORT_SYMBOL(flush_all_to_thread);
603
604 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
605 void do_send_trap(struct pt_regs *regs, unsigned long address,
606                   unsigned long error_code, int breakpt)
607 {
608         current->thread.trap_nr = TRAP_HWBKPT;
609         if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
610                         11, SIGSEGV) == NOTIFY_STOP)
611                 return;
612
613         /* Deliver the signal to userspace */
614         force_sig_ptrace_errno_trap(breakpt, /* breakpoint or watchpoint id */
615                                     (void __user *)address);
616 }
617 #else   /* !CONFIG_PPC_ADV_DEBUG_REGS */
618 void do_break (struct pt_regs *regs, unsigned long address,
619                     unsigned long error_code)
620 {
621         siginfo_t info;
622
623         current->thread.trap_nr = TRAP_HWBKPT;
624         if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
625                         11, SIGSEGV) == NOTIFY_STOP)
626                 return;
627
628         if (debugger_break_match(regs))
629                 return;
630
631         /* Clear the breakpoint */
632         hw_breakpoint_disable();
633
634         /* Deliver the signal to userspace */
635         info.si_signo = SIGTRAP;
636         info.si_errno = 0;
637         info.si_code = TRAP_HWBKPT;
638         info.si_addr = (void __user *)address;
639         force_sig_info(SIGTRAP, &info, current);
640 }
641 #endif  /* CONFIG_PPC_ADV_DEBUG_REGS */
642
643 static DEFINE_PER_CPU(struct arch_hw_breakpoint, current_brk);
644
645 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
646 /*
647  * Set the debug registers back to their default "safe" values.
648  */
649 static void set_debug_reg_defaults(struct thread_struct *thread)
650 {
651         thread->debug.iac1 = thread->debug.iac2 = 0;
652 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
653         thread->debug.iac3 = thread->debug.iac4 = 0;
654 #endif
655         thread->debug.dac1 = thread->debug.dac2 = 0;
656 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
657         thread->debug.dvc1 = thread->debug.dvc2 = 0;
658 #endif
659         thread->debug.dbcr0 = 0;
660 #ifdef CONFIG_BOOKE
661         /*
662          * Force User/Supervisor bits to b11 (user-only MSR[PR]=1)
663          */
664         thread->debug.dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US |
665                         DBCR1_IAC3US | DBCR1_IAC4US;
666         /*
667          * Force Data Address Compare User/Supervisor bits to be User-only
668          * (0b11 MSR[PR]=1) and set all other bits in DBCR2 register to be 0.
669          */
670         thread->debug.dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
671 #else
672         thread->debug.dbcr1 = 0;
673 #endif
674 }
675
676 static void prime_debug_regs(struct debug_reg *debug)
677 {
678         /*
679          * We could have inherited MSR_DE from userspace, since
680          * it doesn't get cleared on exception entry.  Make sure
681          * MSR_DE is clear before we enable any debug events.
682          */
683         mtmsr(mfmsr() & ~MSR_DE);
684
685         mtspr(SPRN_IAC1, debug->iac1);
686         mtspr(SPRN_IAC2, debug->iac2);
687 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
688         mtspr(SPRN_IAC3, debug->iac3);
689         mtspr(SPRN_IAC4, debug->iac4);
690 #endif
691         mtspr(SPRN_DAC1, debug->dac1);
692         mtspr(SPRN_DAC2, debug->dac2);
693 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
694         mtspr(SPRN_DVC1, debug->dvc1);
695         mtspr(SPRN_DVC2, debug->dvc2);
696 #endif
697         mtspr(SPRN_DBCR0, debug->dbcr0);
698         mtspr(SPRN_DBCR1, debug->dbcr1);
699 #ifdef CONFIG_BOOKE
700         mtspr(SPRN_DBCR2, debug->dbcr2);
701 #endif
702 }
703 /*
704  * Unless neither the old or new thread are making use of the
705  * debug registers, set the debug registers from the values
706  * stored in the new thread.
707  */
708 void switch_booke_debug_regs(struct debug_reg *new_debug)
709 {
710         if ((current->thread.debug.dbcr0 & DBCR0_IDM)
711                 || (new_debug->dbcr0 & DBCR0_IDM))
712                         prime_debug_regs(new_debug);
713 }
714 EXPORT_SYMBOL_GPL(switch_booke_debug_regs);
715 #else   /* !CONFIG_PPC_ADV_DEBUG_REGS */
716 #ifndef CONFIG_HAVE_HW_BREAKPOINT
717 static void set_debug_reg_defaults(struct thread_struct *thread)
718 {
719         thread->hw_brk.address = 0;
720         thread->hw_brk.type = 0;
721         set_breakpoint(&thread->hw_brk);
722 }
723 #endif /* !CONFIG_HAVE_HW_BREAKPOINT */
724 #endif  /* CONFIG_PPC_ADV_DEBUG_REGS */
725
726 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
727 static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
728 {
729         mtspr(SPRN_DAC1, dabr);
730 #ifdef CONFIG_PPC_47x
731         isync();
732 #endif
733         return 0;
734 }
735 #elif defined(CONFIG_PPC_BOOK3S)
736 static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
737 {
738         mtspr(SPRN_DABR, dabr);
739         if (cpu_has_feature(CPU_FTR_DABRX))
740                 mtspr(SPRN_DABRX, dabrx);
741         return 0;
742 }
743 #elif defined(CONFIG_PPC_8xx)
744 static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
745 {
746         unsigned long addr = dabr & ~HW_BRK_TYPE_DABR;
747         unsigned long lctrl1 = 0x90000000; /* compare type: equal on E & F */
748         unsigned long lctrl2 = 0x8e000002; /* watchpoint 1 on cmp E | F */
749
750         if ((dabr & HW_BRK_TYPE_RDWR) == HW_BRK_TYPE_READ)
751                 lctrl1 |= 0xa0000;
752         else if ((dabr & HW_BRK_TYPE_RDWR) == HW_BRK_TYPE_WRITE)
753                 lctrl1 |= 0xf0000;
754         else if ((dabr & HW_BRK_TYPE_RDWR) == 0)
755                 lctrl2 = 0;
756
757         mtspr(SPRN_LCTRL2, 0);
758         mtspr(SPRN_CMPE, addr);
759         mtspr(SPRN_CMPF, addr + 4);
760         mtspr(SPRN_LCTRL1, lctrl1);
761         mtspr(SPRN_LCTRL2, lctrl2);
762
763         return 0;
764 }
765 #else
766 static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
767 {
768         return -EINVAL;
769 }
770 #endif
771
772 static inline int set_dabr(struct arch_hw_breakpoint *brk)
773 {
774         unsigned long dabr, dabrx;
775
776         dabr = brk->address | (brk->type & HW_BRK_TYPE_DABR);
777         dabrx = ((brk->type >> 3) & 0x7);
778
779         if (ppc_md.set_dabr)
780                 return ppc_md.set_dabr(dabr, dabrx);
781
782         return __set_dabr(dabr, dabrx);
783 }
784
785 static inline int set_dawr(struct arch_hw_breakpoint *brk)
786 {
787         unsigned long dawr, dawrx, mrd;
788
789         dawr = brk->address;
790
791         dawrx  = (brk->type & (HW_BRK_TYPE_READ | HW_BRK_TYPE_WRITE)) \
792                                    << (63 - 58); //* read/write bits */
793         dawrx |= ((brk->type & (HW_BRK_TYPE_TRANSLATE)) >> 2) \
794                                    << (63 - 59); //* translate */
795         dawrx |= (brk->type & (HW_BRK_TYPE_PRIV_ALL)) \
796                                    >> 3; //* PRIM bits */
797         /* dawr length is stored in field MDR bits 48:53.  Matches range in
798            doublewords (64 bits) baised by -1 eg. 0b000000=1DW and
799            0b111111=64DW.
800            brk->len is in bytes.
801            This aligns up to double word size, shifts and does the bias.
802         */
803         mrd = ((brk->len + 7) >> 3) - 1;
804         dawrx |= (mrd & 0x3f) << (63 - 53);
805
806         if (ppc_md.set_dawr)
807                 return ppc_md.set_dawr(dawr, dawrx);
808         mtspr(SPRN_DAWR, dawr);
809         mtspr(SPRN_DAWRX, dawrx);
810         return 0;
811 }
812
813 void __set_breakpoint(struct arch_hw_breakpoint *brk)
814 {
815         memcpy(this_cpu_ptr(&current_brk), brk, sizeof(*brk));
816
817         if (cpu_has_feature(CPU_FTR_DAWR))
818                 set_dawr(brk);
819         else
820                 set_dabr(brk);
821 }
822
823 void set_breakpoint(struct arch_hw_breakpoint *brk)
824 {
825         preempt_disable();
826         __set_breakpoint(brk);
827         preempt_enable();
828 }
829
830 /* Check if we have DAWR or DABR hardware */
831 bool ppc_breakpoint_available(void)
832 {
833         if (cpu_has_feature(CPU_FTR_DAWR))
834                 return true; /* POWER8 DAWR */
835         if (cpu_has_feature(CPU_FTR_ARCH_207S))
836                 return false; /* POWER9 with DAWR disabled */
837         /* DABR: Everything but POWER8 and POWER9 */
838         return true;
839 }
840 EXPORT_SYMBOL_GPL(ppc_breakpoint_available);
841
842 #ifdef CONFIG_PPC64
843 DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array);
844 #endif
845
846 static inline bool hw_brk_match(struct arch_hw_breakpoint *a,
847                               struct arch_hw_breakpoint *b)
848 {
849         if (a->address != b->address)
850                 return false;
851         if (a->type != b->type)
852                 return false;
853         if (a->len != b->len)
854                 return false;
855         return true;
856 }
857
858 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
859
860 static inline bool tm_enabled(struct task_struct *tsk)
861 {
862         return tsk && tsk->thread.regs && (tsk->thread.regs->msr & MSR_TM);
863 }
864
865 static void tm_reclaim_thread(struct thread_struct *thr,
866                               struct thread_info *ti, uint8_t cause)
867 {
868         /*
869          * Use the current MSR TM suspended bit to track if we have
870          * checkpointed state outstanding.
871          * On signal delivery, we'd normally reclaim the checkpointed
872          * state to obtain stack pointer (see:get_tm_stackpointer()).
873          * This will then directly return to userspace without going
874          * through __switch_to(). However, if the stack frame is bad,
875          * we need to exit this thread which calls __switch_to() which
876          * will again attempt to reclaim the already saved tm state.
877          * Hence we need to check that we've not already reclaimed
878          * this state.
879          * We do this using the current MSR, rather tracking it in
880          * some specific thread_struct bit, as it has the additional
881          * benefit of checking for a potential TM bad thing exception.
882          */
883         if (!MSR_TM_SUSPENDED(mfmsr()))
884                 return;
885
886         giveup_all(container_of(thr, struct task_struct, thread));
887
888         tm_reclaim(thr, cause);
889
890         /*
891          * If we are in a transaction and FP is off then we can't have
892          * used FP inside that transaction. Hence the checkpointed
893          * state is the same as the live state. We need to copy the
894          * live state to the checkpointed state so that when the
895          * transaction is restored, the checkpointed state is correct
896          * and the aborted transaction sees the correct state. We use
897          * ckpt_regs.msr here as that's what tm_reclaim will use to
898          * determine if it's going to write the checkpointed state or
899          * not. So either this will write the checkpointed registers,
900          * or reclaim will. Similarly for VMX.
901          */
902         if ((thr->ckpt_regs.msr & MSR_FP) == 0)
903                 memcpy(&thr->ckfp_state, &thr->fp_state,
904                        sizeof(struct thread_fp_state));
905         if ((thr->ckpt_regs.msr & MSR_VEC) == 0)
906                 memcpy(&thr->ckvr_state, &thr->vr_state,
907                        sizeof(struct thread_vr_state));
908 }
909
910 void tm_reclaim_current(uint8_t cause)
911 {
912         tm_enable();
913         tm_reclaim_thread(&current->thread, current_thread_info(), cause);
914 }
915
916 static inline void tm_reclaim_task(struct task_struct *tsk)
917 {
918         /* We have to work out if we're switching from/to a task that's in the
919          * middle of a transaction.
920          *
921          * In switching we need to maintain a 2nd register state as
922          * oldtask->thread.ckpt_regs.  We tm_reclaim(oldproc); this saves the
923          * checkpointed (tbegin) state in ckpt_regs, ckfp_state and
924          * ckvr_state
925          *
926          * We also context switch (save) TFHAR/TEXASR/TFIAR in here.
927          */
928         struct thread_struct *thr = &tsk->thread;
929
930         if (!thr->regs)
931                 return;
932
933         if (!MSR_TM_ACTIVE(thr->regs->msr))
934                 goto out_and_saveregs;
935
936         WARN_ON(tm_suspend_disabled);
937
938         TM_DEBUG("--- tm_reclaim on pid %d (NIP=%lx, "
939                  "ccr=%lx, msr=%lx, trap=%lx)\n",
940                  tsk->pid, thr->regs->nip,
941                  thr->regs->ccr, thr->regs->msr,
942                  thr->regs->trap);
943
944         tm_reclaim_thread(thr, task_thread_info(tsk), TM_CAUSE_RESCHED);
945
946         TM_DEBUG("--- tm_reclaim on pid %d complete\n",
947                  tsk->pid);
948
949 out_and_saveregs:
950         /* Always save the regs here, even if a transaction's not active.
951          * This context-switches a thread's TM info SPRs.  We do it here to
952          * be consistent with the restore path (in recheckpoint) which
953          * cannot happen later in _switch().
954          */
955         tm_save_sprs(thr);
956 }
957
958 extern void __tm_recheckpoint(struct thread_struct *thread);
959
960 void tm_recheckpoint(struct thread_struct *thread)
961 {
962         unsigned long flags;
963
964         if (!(thread->regs->msr & MSR_TM))
965                 return;
966
967         /* We really can't be interrupted here as the TEXASR registers can't
968          * change and later in the trecheckpoint code, we have a userspace R1.
969          * So let's hard disable over this region.
970          */
971         local_irq_save(flags);
972         hard_irq_disable();
973
974         /* The TM SPRs are restored here, so that TEXASR.FS can be set
975          * before the trecheckpoint and no explosion occurs.
976          */
977         tm_restore_sprs(thread);
978
979         __tm_recheckpoint(thread);
980
981         local_irq_restore(flags);
982 }
983
984 static inline void tm_recheckpoint_new_task(struct task_struct *new)
985 {
986         if (!cpu_has_feature(CPU_FTR_TM))
987                 return;
988
989         /* Recheckpoint the registers of the thread we're about to switch to.
990          *
991          * If the task was using FP, we non-lazily reload both the original and
992          * the speculative FP register states.  This is because the kernel
993          * doesn't see if/when a TM rollback occurs, so if we take an FP
994          * unavailable later, we are unable to determine which set of FP regs
995          * need to be restored.
996          */
997         if (!tm_enabled(new))
998                 return;
999
1000         if (!MSR_TM_ACTIVE(new->thread.regs->msr)){
1001                 tm_restore_sprs(&new->thread);
1002                 return;
1003         }
1004         /* Recheckpoint to restore original checkpointed register state. */
1005         TM_DEBUG("*** tm_recheckpoint of pid %d (new->msr 0x%lx)\n",
1006                  new->pid, new->thread.regs->msr);
1007
1008         tm_recheckpoint(&new->thread);
1009
1010         /*
1011          * The checkpointed state has been restored but the live state has
1012          * not, ensure all the math functionality is turned off to trigger
1013          * restore_math() to reload.
1014          */
1015         new->thread.regs->msr &= ~(MSR_FP | MSR_VEC | MSR_VSX);
1016
1017         TM_DEBUG("*** tm_recheckpoint of pid %d complete "
1018                  "(kernel msr 0x%lx)\n",
1019                  new->pid, mfmsr());
1020 }
1021
1022 static inline void __switch_to_tm(struct task_struct *prev,
1023                 struct task_struct *new)
1024 {
1025         if (cpu_has_feature(CPU_FTR_TM)) {
1026                 if (tm_enabled(prev) || tm_enabled(new))
1027                         tm_enable();
1028
1029                 if (tm_enabled(prev)) {
1030                         prev->thread.load_tm++;
1031                         tm_reclaim_task(prev);
1032                         if (!MSR_TM_ACTIVE(prev->thread.regs->msr) && prev->thread.load_tm == 0)
1033                                 prev->thread.regs->msr &= ~MSR_TM;
1034                 }
1035
1036                 tm_recheckpoint_new_task(new);
1037         }
1038 }
1039
1040 /*
1041  * This is called if we are on the way out to userspace and the
1042  * TIF_RESTORE_TM flag is set.  It checks if we need to reload
1043  * FP and/or vector state and does so if necessary.
1044  * If userspace is inside a transaction (whether active or
1045  * suspended) and FP/VMX/VSX instructions have ever been enabled
1046  * inside that transaction, then we have to keep them enabled
1047  * and keep the FP/VMX/VSX state loaded while ever the transaction
1048  * continues.  The reason is that if we didn't, and subsequently
1049  * got a FP/VMX/VSX unavailable interrupt inside a transaction,
1050  * we don't know whether it's the same transaction, and thus we
1051  * don't know which of the checkpointed state and the transactional
1052  * state to use.
1053  */
1054 void restore_tm_state(struct pt_regs *regs)
1055 {
1056         unsigned long msr_diff;
1057
1058         /*
1059          * This is the only moment we should clear TIF_RESTORE_TM as
1060          * it is here that ckpt_regs.msr and pt_regs.msr become the same
1061          * again, anything else could lead to an incorrect ckpt_msr being
1062          * saved and therefore incorrect signal contexts.
1063          */
1064         clear_thread_flag(TIF_RESTORE_TM);
1065         if (!MSR_TM_ACTIVE(regs->msr))
1066                 return;
1067
1068         msr_diff = current->thread.ckpt_regs.msr & ~regs->msr;
1069         msr_diff &= MSR_FP | MSR_VEC | MSR_VSX;
1070
1071         /* Ensure that restore_math() will restore */
1072         if (msr_diff & MSR_FP)
1073                 current->thread.load_fp = 1;
1074 #ifdef CONFIG_ALTIVEC
1075         if (cpu_has_feature(CPU_FTR_ALTIVEC) && msr_diff & MSR_VEC)
1076                 current->thread.load_vec = 1;
1077 #endif
1078         restore_math(regs);
1079
1080         regs->msr |= msr_diff;
1081 }
1082
1083 #else
1084 #define tm_recheckpoint_new_task(new)
1085 #define __switch_to_tm(prev, new)
1086 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
1087
1088 static inline void save_sprs(struct thread_struct *t)
1089 {
1090 #ifdef CONFIG_ALTIVEC
1091         if (cpu_has_feature(CPU_FTR_ALTIVEC))
1092                 t->vrsave = mfspr(SPRN_VRSAVE);
1093 #endif
1094 #ifdef CONFIG_PPC_BOOK3S_64
1095         if (cpu_has_feature(CPU_FTR_DSCR))
1096                 t->dscr = mfspr(SPRN_DSCR);
1097
1098         if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
1099                 t->bescr = mfspr(SPRN_BESCR);
1100                 t->ebbhr = mfspr(SPRN_EBBHR);
1101                 t->ebbrr = mfspr(SPRN_EBBRR);
1102
1103                 t->fscr = mfspr(SPRN_FSCR);
1104
1105                 /*
1106                  * Note that the TAR is not available for use in the kernel.
1107                  * (To provide this, the TAR should be backed up/restored on
1108                  * exception entry/exit instead, and be in pt_regs.  FIXME,
1109                  * this should be in pt_regs anyway (for debug).)
1110                  */
1111                 t->tar = mfspr(SPRN_TAR);
1112         }
1113 #endif
1114
1115         thread_pkey_regs_save(t);
1116 }
1117
1118 static inline void restore_sprs(struct thread_struct *old_thread,
1119                                 struct thread_struct *new_thread)
1120 {
1121 #ifdef CONFIG_ALTIVEC
1122         if (cpu_has_feature(CPU_FTR_ALTIVEC) &&
1123             old_thread->vrsave != new_thread->vrsave)
1124                 mtspr(SPRN_VRSAVE, new_thread->vrsave);
1125 #endif
1126 #ifdef CONFIG_PPC_BOOK3S_64
1127         if (cpu_has_feature(CPU_FTR_DSCR)) {
1128                 u64 dscr = get_paca()->dscr_default;
1129                 if (new_thread->dscr_inherit)
1130                         dscr = new_thread->dscr;
1131
1132                 if (old_thread->dscr != dscr)
1133                         mtspr(SPRN_DSCR, dscr);
1134         }
1135
1136         if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
1137                 if (old_thread->bescr != new_thread->bescr)
1138                         mtspr(SPRN_BESCR, new_thread->bescr);
1139                 if (old_thread->ebbhr != new_thread->ebbhr)
1140                         mtspr(SPRN_EBBHR, new_thread->ebbhr);
1141                 if (old_thread->ebbrr != new_thread->ebbrr)
1142                         mtspr(SPRN_EBBRR, new_thread->ebbrr);
1143
1144                 if (old_thread->fscr != new_thread->fscr)
1145                         mtspr(SPRN_FSCR, new_thread->fscr);
1146
1147                 if (old_thread->tar != new_thread->tar)
1148                         mtspr(SPRN_TAR, new_thread->tar);
1149         }
1150
1151         if (cpu_has_feature(CPU_FTR_ARCH_300) &&
1152             old_thread->tidr != new_thread->tidr)
1153                 mtspr(SPRN_TIDR, new_thread->tidr);
1154 #endif
1155
1156         thread_pkey_regs_restore(new_thread, old_thread);
1157 }
1158
1159 #ifdef CONFIG_PPC_BOOK3S_64
1160 #define CP_SIZE 128
1161 static const u8 dummy_copy_buffer[CP_SIZE] __attribute__((aligned(CP_SIZE)));
1162 #endif
1163
1164 struct task_struct *__switch_to(struct task_struct *prev,
1165         struct task_struct *new)
1166 {
1167         struct thread_struct *new_thread, *old_thread;
1168         struct task_struct *last;
1169 #ifdef CONFIG_PPC_BOOK3S_64
1170         struct ppc64_tlb_batch *batch;
1171 #endif
1172
1173         new_thread = &new->thread;
1174         old_thread = &current->thread;
1175
1176         WARN_ON(!irqs_disabled());
1177
1178 #ifdef CONFIG_PPC64
1179         /*
1180          * Collect processor utilization data per process
1181          */
1182         if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
1183                 struct cpu_usage *cu = this_cpu_ptr(&cpu_usage_array);
1184                 long unsigned start_tb, current_tb;
1185                 start_tb = old_thread->start_tb;
1186                 cu->current_tb = current_tb = mfspr(SPRN_PURR);
1187                 old_thread->accum_tb += (current_tb - start_tb);
1188                 new_thread->start_tb = current_tb;
1189         }
1190 #endif /* CONFIG_PPC64 */
1191
1192 #ifdef CONFIG_PPC_BOOK3S_64
1193         batch = this_cpu_ptr(&ppc64_tlb_batch);
1194         if (batch->active) {
1195                 current_thread_info()->local_flags |= _TLF_LAZY_MMU;
1196                 if (batch->index)
1197                         __flush_tlb_pending(batch);
1198                 batch->active = 0;
1199         }
1200 #endif /* CONFIG_PPC_BOOK3S_64 */
1201
1202 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1203         switch_booke_debug_regs(&new->thread.debug);
1204 #else
1205 /*
1206  * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would
1207  * schedule DABR
1208  */
1209 #ifndef CONFIG_HAVE_HW_BREAKPOINT
1210         if (unlikely(!hw_brk_match(this_cpu_ptr(&current_brk), &new->thread.hw_brk)))
1211                 __set_breakpoint(&new->thread.hw_brk);
1212 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
1213 #endif
1214
1215         /*
1216          * We need to save SPRs before treclaim/trecheckpoint as these will
1217          * change a number of them.
1218          */
1219         save_sprs(&prev->thread);
1220
1221         /* Save FPU, Altivec, VSX and SPE state */
1222         giveup_all(prev);
1223
1224         __switch_to_tm(prev, new);
1225
1226         if (!radix_enabled()) {
1227                 /*
1228                  * We can't take a PMU exception inside _switch() since there
1229                  * is a window where the kernel stack SLB and the kernel stack
1230                  * are out of sync. Hard disable here.
1231                  */
1232                 hard_irq_disable();
1233         }
1234
1235         /*
1236          * Call restore_sprs() before calling _switch(). If we move it after
1237          * _switch() then we miss out on calling it for new tasks. The reason
1238          * for this is we manually create a stack frame for new tasks that
1239          * directly returns through ret_from_fork() or
1240          * ret_from_kernel_thread(). See copy_thread() for details.
1241          */
1242         restore_sprs(old_thread, new_thread);
1243
1244         last = _switch(old_thread, new_thread);
1245
1246 #ifdef CONFIG_PPC_BOOK3S_64
1247         if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
1248                 current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
1249                 batch = this_cpu_ptr(&ppc64_tlb_batch);
1250                 batch->active = 1;
1251         }
1252
1253         if (current_thread_info()->task->thread.regs) {
1254                 restore_math(current_thread_info()->task->thread.regs);
1255
1256                 /*
1257                  * The copy-paste buffer can only store into foreign real
1258                  * addresses, so unprivileged processes can not see the
1259                  * data or use it in any way unless they have foreign real
1260                  * mappings. If the new process has the foreign real address
1261                  * mappings, we must issue a cp_abort to clear any state and
1262                  * prevent snooping, corruption or a covert channel.
1263                  *
1264                  * DD1 allows paste into normal system memory so we do an
1265                  * unpaired copy, rather than cp_abort, to clear the buffer,
1266                  * since cp_abort is quite expensive.
1267                  */
1268                 if (current_thread_info()->task->thread.used_vas) {
1269                         asm volatile(PPC_CP_ABORT);
1270                 } else if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
1271                         asm volatile(PPC_COPY(%0, %1)
1272                                         : : "r"(dummy_copy_buffer), "r"(0));
1273                 }
1274         }
1275 #endif /* CONFIG_PPC_BOOK3S_64 */
1276
1277         return last;
1278 }
1279
1280 static int instructions_to_print = 16;
1281
1282 static void show_instructions(struct pt_regs *regs)
1283 {
1284         int i;
1285         unsigned long pc = regs->nip - (instructions_to_print * 3 / 4 *
1286                         sizeof(int));
1287
1288         printk("Instruction dump:");
1289
1290         for (i = 0; i < instructions_to_print; i++) {
1291                 int instr;
1292
1293                 if (!(i % 8))
1294                         pr_cont("\n");
1295
1296 #if !defined(CONFIG_BOOKE)
1297                 /* If executing with the IMMU off, adjust pc rather
1298                  * than print XXXXXXXX.
1299                  */
1300                 if (!(regs->msr & MSR_IR))
1301                         pc = (unsigned long)phys_to_virt(pc);
1302 #endif
1303
1304                 if (!__kernel_text_address(pc) ||
1305                      probe_kernel_address((unsigned int __user *)pc, instr)) {
1306                         pr_cont("XXXXXXXX ");
1307                 } else {
1308                         if (regs->nip == pc)
1309                                 pr_cont("<%08x> ", instr);
1310                         else
1311                                 pr_cont("%08x ", instr);
1312                 }
1313
1314                 pc += sizeof(int);
1315         }
1316
1317         pr_cont("\n");
1318 }
1319
1320 struct regbit {
1321         unsigned long bit;
1322         const char *name;
1323 };
1324
1325 static struct regbit msr_bits[] = {
1326 #if defined(CONFIG_PPC64) && !defined(CONFIG_BOOKE)
1327         {MSR_SF,        "SF"},
1328         {MSR_HV,        "HV"},
1329 #endif
1330         {MSR_VEC,       "VEC"},
1331         {MSR_VSX,       "VSX"},
1332 #ifdef CONFIG_BOOKE
1333         {MSR_CE,        "CE"},
1334 #endif
1335         {MSR_EE,        "EE"},
1336         {MSR_PR,        "PR"},
1337         {MSR_FP,        "FP"},
1338         {MSR_ME,        "ME"},
1339 #ifdef CONFIG_BOOKE
1340         {MSR_DE,        "DE"},
1341 #else
1342         {MSR_SE,        "SE"},
1343         {MSR_BE,        "BE"},
1344 #endif
1345         {MSR_IR,        "IR"},
1346         {MSR_DR,        "DR"},
1347         {MSR_PMM,       "PMM"},
1348 #ifndef CONFIG_BOOKE
1349         {MSR_RI,        "RI"},
1350         {MSR_LE,        "LE"},
1351 #endif
1352         {0,             NULL}
1353 };
1354
1355 static void print_bits(unsigned long val, struct regbit *bits, const char *sep)
1356 {
1357         const char *s = "";
1358
1359         for (; bits->bit; ++bits)
1360                 if (val & bits->bit) {
1361                         pr_cont("%s%s", s, bits->name);
1362                         s = sep;
1363                 }
1364 }
1365
1366 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1367 static struct regbit msr_tm_bits[] = {
1368         {MSR_TS_T,      "T"},
1369         {MSR_TS_S,      "S"},
1370         {MSR_TM,        "E"},
1371         {0,             NULL}
1372 };
1373
1374 static void print_tm_bits(unsigned long val)
1375 {
1376 /*
1377  * This only prints something if at least one of the TM bit is set.
1378  * Inside the TM[], the output means:
1379  *   E: Enabled         (bit 32)
1380  *   S: Suspended       (bit 33)
1381  *   T: Transactional   (bit 34)
1382  */
1383         if (val & (MSR_TM | MSR_TS_S | MSR_TS_T)) {
1384                 pr_cont(",TM[");
1385                 print_bits(val, msr_tm_bits, "");
1386                 pr_cont("]");
1387         }
1388 }
1389 #else
1390 static void print_tm_bits(unsigned long val) {}
1391 #endif
1392
1393 static void print_msr_bits(unsigned long val)
1394 {
1395         pr_cont("<");
1396         print_bits(val, msr_bits, ",");
1397         print_tm_bits(val);
1398         pr_cont(">");
1399 }
1400
1401 #ifdef CONFIG_PPC64
1402 #define REG             "%016lx"
1403 #define REGS_PER_LINE   4
1404 #define LAST_VOLATILE   13
1405 #else
1406 #define REG             "%08lx"
1407 #define REGS_PER_LINE   8
1408 #define LAST_VOLATILE   12
1409 #endif
1410
1411 void show_regs(struct pt_regs * regs)
1412 {
1413         int i, trap;
1414
1415         show_regs_print_info(KERN_DEFAULT);
1416
1417         printk("NIP:  "REG" LR: "REG" CTR: "REG"\n",
1418                regs->nip, regs->link, regs->ctr);
1419         printk("REGS: %px TRAP: %04lx   %s  (%s)\n",
1420                regs, regs->trap, print_tainted(), init_utsname()->release);
1421         printk("MSR:  "REG" ", regs->msr);
1422         print_msr_bits(regs->msr);
1423         pr_cont("  CR: %08lx  XER: %08lx\n", regs->ccr, regs->xer);
1424         trap = TRAP(regs);
1425         if ((TRAP(regs) != 0xc00) && cpu_has_feature(CPU_FTR_CFAR))
1426                 pr_cont("CFAR: "REG" ", regs->orig_gpr3);
1427         if (trap == 0x200 || trap == 0x300 || trap == 0x600)
1428 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
1429                 pr_cont("DEAR: "REG" ESR: "REG" ", regs->dar, regs->dsisr);
1430 #else
1431                 pr_cont("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr);
1432 #endif
1433 #ifdef CONFIG_PPC64
1434         pr_cont("SOFTE: %ld ", regs->softe);
1435 #endif
1436 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1437         if (MSR_TM_ACTIVE(regs->msr))
1438                 pr_cont("\nPACATMSCRATCH: %016llx ", get_paca()->tm_scratch);
1439 #endif
1440
1441         for (i = 0;  i < 32;  i++) {
1442                 if ((i % REGS_PER_LINE) == 0)
1443                         pr_cont("\nGPR%02d: ", i);
1444                 pr_cont(REG " ", regs->gpr[i]);
1445                 if (i == LAST_VOLATILE && !FULL_REGS(regs))
1446                         break;
1447         }
1448         pr_cont("\n");
1449 #ifdef CONFIG_KALLSYMS
1450         /*
1451          * Lookup NIP late so we have the best change of getting the
1452          * above info out without failing
1453          */
1454         printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
1455         printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
1456 #endif
1457         show_stack(current, (unsigned long *) regs->gpr[1]);
1458         if (!user_mode(regs))
1459                 show_instructions(regs);
1460 }
1461
1462 void flush_thread(void)
1463 {
1464 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1465         flush_ptrace_hw_breakpoint(current);
1466 #else /* CONFIG_HAVE_HW_BREAKPOINT */
1467         set_debug_reg_defaults(&current->thread);
1468 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
1469 }
1470
1471 int set_thread_uses_vas(void)
1472 {
1473 #ifdef CONFIG_PPC_BOOK3S_64
1474         if (!cpu_has_feature(CPU_FTR_ARCH_300))
1475                 return -EINVAL;
1476
1477         current->thread.used_vas = 1;
1478
1479         /*
1480          * Even a process that has no foreign real address mapping can use
1481          * an unpaired COPY instruction (to no real effect). Issue CP_ABORT
1482          * to clear any pending COPY and prevent a covert channel.
1483          *
1484          * __switch_to() will issue CP_ABORT on future context switches.
1485          */
1486         asm volatile(PPC_CP_ABORT);
1487
1488 #endif /* CONFIG_PPC_BOOK3S_64 */
1489         return 0;
1490 }
1491
1492 #ifdef CONFIG_PPC64
1493 static DEFINE_SPINLOCK(vas_thread_id_lock);
1494 static DEFINE_IDA(vas_thread_ida);
1495
1496 /*
1497  * We need to assign a unique thread id to each thread in a process.
1498  *
1499  * This thread id, referred to as TIDR, and separate from the Linux's tgid,
1500  * is intended to be used to direct an ASB_Notify from the hardware to the
1501  * thread, when a suitable event occurs in the system.
1502  *
1503  * One such event is a "paste" instruction in the context of Fast Thread
1504  * Wakeup (aka Core-to-core wake up in the Virtual Accelerator Switchboard
1505  * (VAS) in POWER9.
1506  *
1507  * To get a unique TIDR per process we could simply reuse task_pid_nr() but
1508  * the problem is that task_pid_nr() is not yet available copy_thread() is
1509  * called. Fixing that would require changing more intrusive arch-neutral
1510  * code in code path in copy_process()?.
1511  *
1512  * Further, to assign unique TIDRs within each process, we need an atomic
1513  * field (or an IDR) in task_struct, which again intrudes into the arch-
1514  * neutral code. So try to assign globally unique TIDRs for now.
1515  *
1516  * NOTE: TIDR 0 indicates that the thread does not need a TIDR value.
1517  *       For now, only threads that expect to be notified by the VAS
1518  *       hardware need a TIDR value and we assign values > 0 for those.
1519  */
1520 #define MAX_THREAD_CONTEXT      ((1 << 16) - 1)
1521 static int assign_thread_tidr(void)
1522 {
1523         int index;
1524         int err;
1525         unsigned long flags;
1526
1527 again:
1528         if (!ida_pre_get(&vas_thread_ida, GFP_KERNEL))
1529                 return -ENOMEM;
1530
1531         spin_lock_irqsave(&vas_thread_id_lock, flags);
1532         err = ida_get_new_above(&vas_thread_ida, 1, &index);
1533         spin_unlock_irqrestore(&vas_thread_id_lock, flags);
1534
1535         if (err == -EAGAIN)
1536                 goto again;
1537         else if (err)
1538                 return err;
1539
1540         if (index > MAX_THREAD_CONTEXT) {
1541                 spin_lock_irqsave(&vas_thread_id_lock, flags);
1542                 ida_remove(&vas_thread_ida, index);
1543                 spin_unlock_irqrestore(&vas_thread_id_lock, flags);
1544                 return -ENOMEM;
1545         }
1546
1547         return index;
1548 }
1549
1550 static void free_thread_tidr(int id)
1551 {
1552         unsigned long flags;
1553
1554         spin_lock_irqsave(&vas_thread_id_lock, flags);
1555         ida_remove(&vas_thread_ida, id);
1556         spin_unlock_irqrestore(&vas_thread_id_lock, flags);
1557 }
1558
1559 /*
1560  * Clear any TIDR value assigned to this thread.
1561  */
1562 void clear_thread_tidr(struct task_struct *t)
1563 {
1564         if (!t->thread.tidr)
1565                 return;
1566
1567         if (!cpu_has_feature(CPU_FTR_ARCH_300)) {
1568                 WARN_ON_ONCE(1);
1569                 return;
1570         }
1571
1572         mtspr(SPRN_TIDR, 0);
1573         free_thread_tidr(t->thread.tidr);
1574         t->thread.tidr = 0;
1575 }
1576
1577 void arch_release_task_struct(struct task_struct *t)
1578 {
1579         clear_thread_tidr(t);
1580 }
1581
1582 /*
1583  * Assign a unique TIDR (thread id) for task @t and set it in the thread
1584  * structure. For now, we only support setting TIDR for 'current' task.
1585  */
1586 int set_thread_tidr(struct task_struct *t)
1587 {
1588         int rc;
1589
1590         if (!cpu_has_feature(CPU_FTR_ARCH_300))
1591                 return -EINVAL;
1592
1593         if (t != current)
1594                 return -EINVAL;
1595
1596         if (t->thread.tidr)
1597                 return 0;
1598
1599         rc = assign_thread_tidr();
1600         if (rc < 0)
1601                 return rc;
1602
1603         t->thread.tidr = rc;
1604         mtspr(SPRN_TIDR, t->thread.tidr);
1605
1606         return 0;
1607 }
1608 EXPORT_SYMBOL_GPL(set_thread_tidr);
1609
1610 #endif /* CONFIG_PPC64 */
1611
1612 void
1613 release_thread(struct task_struct *t)
1614 {
1615 }
1616
1617 /*
1618  * this gets called so that we can store coprocessor state into memory and
1619  * copy the current task into the new thread.
1620  */
1621 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
1622 {
1623         flush_all_to_thread(src);
1624         /*
1625          * Flush TM state out so we can copy it.  __switch_to_tm() does this
1626          * flush but it removes the checkpointed state from the current CPU and
1627          * transitions the CPU out of TM mode.  Hence we need to call
1628          * tm_recheckpoint_new_task() (on the same task) to restore the
1629          * checkpointed state back and the TM mode.
1630          *
1631          * Can't pass dst because it isn't ready. Doesn't matter, passing
1632          * dst is only important for __switch_to()
1633          */
1634         __switch_to_tm(src, src);
1635
1636         *dst = *src;
1637
1638         clear_task_ebb(dst);
1639
1640         return 0;
1641 }
1642
1643 static void setup_ksp_vsid(struct task_struct *p, unsigned long sp)
1644 {
1645 #ifdef CONFIG_PPC_BOOK3S_64
1646         unsigned long sp_vsid;
1647         unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
1648
1649         if (radix_enabled())
1650                 return;
1651
1652         if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
1653                 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T)
1654                         << SLB_VSID_SHIFT_1T;
1655         else
1656                 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_256M)
1657                         << SLB_VSID_SHIFT;
1658         sp_vsid |= SLB_VSID_KERNEL | llp;
1659         p->thread.ksp_vsid = sp_vsid;
1660 #endif
1661 }
1662
1663 /*
1664  * Copy a thread..
1665  */
1666
1667 /*
1668  * Copy architecture-specific thread state
1669  */
1670 int copy_thread(unsigned long clone_flags, unsigned long usp,
1671                 unsigned long kthread_arg, struct task_struct *p)
1672 {
1673         struct pt_regs *childregs, *kregs;
1674         extern void ret_from_fork(void);
1675         extern void ret_from_kernel_thread(void);
1676         void (*f)(void);
1677         unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
1678         struct thread_info *ti = task_thread_info(p);
1679
1680         klp_init_thread_info(ti);
1681
1682         /* Copy registers */
1683         sp -= sizeof(struct pt_regs);
1684         childregs = (struct pt_regs *) sp;
1685         if (unlikely(p->flags & PF_KTHREAD)) {
1686                 /* kernel thread */
1687                 memset(childregs, 0, sizeof(struct pt_regs));
1688                 childregs->gpr[1] = sp + sizeof(struct pt_regs);
1689                 /* function */
1690                 if (usp)
1691                         childregs->gpr[14] = ppc_function_entry((void *)usp);
1692 #ifdef CONFIG_PPC64
1693                 clear_tsk_thread_flag(p, TIF_32BIT);
1694                 childregs->softe = IRQS_ENABLED;
1695 #endif
1696                 childregs->gpr[15] = kthread_arg;
1697                 p->thread.regs = NULL;  /* no user register state */
1698                 ti->flags |= _TIF_RESTOREALL;
1699                 f = ret_from_kernel_thread;
1700         } else {
1701                 /* user thread */
1702                 struct pt_regs *regs = current_pt_regs();
1703                 CHECK_FULL_REGS(regs);
1704                 *childregs = *regs;
1705                 if (usp)
1706                         childregs->gpr[1] = usp;
1707                 p->thread.regs = childregs;
1708                 childregs->gpr[3] = 0;  /* Result from fork() */
1709                 if (clone_flags & CLONE_SETTLS) {
1710 #ifdef CONFIG_PPC64
1711                         if (!is_32bit_task())
1712                                 childregs->gpr[13] = childregs->gpr[6];
1713                         else
1714 #endif
1715                                 childregs->gpr[2] = childregs->gpr[6];
1716                 }
1717
1718                 f = ret_from_fork;
1719         }
1720         childregs->msr &= ~(MSR_FP|MSR_VEC|MSR_VSX);
1721         sp -= STACK_FRAME_OVERHEAD;
1722
1723         /*
1724          * The way this works is that at some point in the future
1725          * some task will call _switch to switch to the new task.
1726          * That will pop off the stack frame created below and start
1727          * the new task running at ret_from_fork.  The new task will
1728          * do some house keeping and then return from the fork or clone
1729          * system call, using the stack frame created above.
1730          */
1731         ((unsigned long *)sp)[0] = 0;
1732         sp -= sizeof(struct pt_regs);
1733         kregs = (struct pt_regs *) sp;
1734         sp -= STACK_FRAME_OVERHEAD;
1735         p->thread.ksp = sp;
1736 #ifdef CONFIG_PPC32
1737         p->thread.ksp_limit = (unsigned long)task_stack_page(p) +
1738                                 _ALIGN_UP(sizeof(struct thread_info), 16);
1739 #endif
1740 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1741         p->thread.ptrace_bps[0] = NULL;
1742 #endif
1743
1744         p->thread.fp_save_area = NULL;
1745 #ifdef CONFIG_ALTIVEC
1746         p->thread.vr_save_area = NULL;
1747 #endif
1748
1749         setup_ksp_vsid(p, sp);
1750
1751 #ifdef CONFIG_PPC64 
1752         if (cpu_has_feature(CPU_FTR_DSCR)) {
1753                 p->thread.dscr_inherit = current->thread.dscr_inherit;
1754                 p->thread.dscr = mfspr(SPRN_DSCR);
1755         }
1756         if (cpu_has_feature(CPU_FTR_HAS_PPR))
1757                 p->thread.ppr = INIT_PPR;
1758
1759         p->thread.tidr = 0;
1760 #endif
1761         kregs->nip = ppc_function_entry(f);
1762         return 0;
1763 }
1764
1765 /*
1766  * Set up a thread for executing a new program
1767  */
1768 void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
1769 {
1770 #ifdef CONFIG_PPC64
1771         unsigned long load_addr = regs->gpr[2]; /* saved by ELF_PLAT_INIT */
1772 #endif
1773
1774         /*
1775          * If we exec out of a kernel thread then thread.regs will not be
1776          * set.  Do it now.
1777          */
1778         if (!current->thread.regs) {
1779                 struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE;
1780                 current->thread.regs = regs - 1;
1781         }
1782
1783 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1784         /*
1785          * Clear any transactional state, we're exec()ing. The cause is
1786          * not important as there will never be a recheckpoint so it's not
1787          * user visible.
1788          */
1789         if (MSR_TM_SUSPENDED(mfmsr()))
1790                 tm_reclaim_current(0);
1791 #endif
1792
1793         memset(regs->gpr, 0, sizeof(regs->gpr));
1794         regs->ctr = 0;
1795         regs->link = 0;
1796         regs->xer = 0;
1797         regs->ccr = 0;
1798         regs->gpr[1] = sp;
1799
1800         /*
1801          * We have just cleared all the nonvolatile GPRs, so make
1802          * FULL_REGS(regs) return true.  This is necessary to allow
1803          * ptrace to examine the thread immediately after exec.
1804          */
1805         regs->trap &= ~1UL;
1806
1807 #ifdef CONFIG_PPC32
1808         regs->mq = 0;
1809         regs->nip = start;
1810         regs->msr = MSR_USER;
1811 #else
1812         if (!is_32bit_task()) {
1813                 unsigned long entry;
1814
1815                 if (is_elf2_task()) {
1816                         /* Look ma, no function descriptors! */
1817                         entry = start;
1818
1819                         /*
1820                          * Ulrich says:
1821                          *   The latest iteration of the ABI requires that when
1822                          *   calling a function (at its global entry point),
1823                          *   the caller must ensure r12 holds the entry point
1824                          *   address (so that the function can quickly
1825                          *   establish addressability).
1826                          */
1827                         regs->gpr[12] = start;
1828                         /* Make sure that's restored on entry to userspace. */
1829                         set_thread_flag(TIF_RESTOREALL);
1830                 } else {
1831                         unsigned long toc;
1832
1833                         /* start is a relocated pointer to the function
1834                          * descriptor for the elf _start routine.  The first
1835                          * entry in the function descriptor is the entry
1836                          * address of _start and the second entry is the TOC
1837                          * value we need to use.
1838                          */
1839                         __get_user(entry, (unsigned long __user *)start);
1840                         __get_user(toc, (unsigned long __user *)start+1);
1841
1842                         /* Check whether the e_entry function descriptor entries
1843                          * need to be relocated before we can use them.
1844                          */
1845                         if (load_addr != 0) {
1846                                 entry += load_addr;
1847                                 toc   += load_addr;
1848                         }
1849                         regs->gpr[2] = toc;
1850                 }
1851                 regs->nip = entry;
1852                 regs->msr = MSR_USER64;
1853         } else {
1854                 regs->nip = start;
1855                 regs->gpr[2] = 0;
1856                 regs->msr = MSR_USER32;
1857         }
1858 #endif
1859 #ifdef CONFIG_VSX
1860         current->thread.used_vsr = 0;
1861 #endif
1862         current->thread.load_fp = 0;
1863         memset(&current->thread.fp_state, 0, sizeof(current->thread.fp_state));
1864         current->thread.fp_save_area = NULL;
1865 #ifdef CONFIG_ALTIVEC
1866         memset(&current->thread.vr_state, 0, sizeof(current->thread.vr_state));
1867         current->thread.vr_state.vscr.u[3] = 0x00010000; /* Java mode disabled */
1868         current->thread.vr_save_area = NULL;
1869         current->thread.vrsave = 0;
1870         current->thread.used_vr = 0;
1871         current->thread.load_vec = 0;
1872 #endif /* CONFIG_ALTIVEC */
1873 #ifdef CONFIG_SPE
1874         memset(current->thread.evr, 0, sizeof(current->thread.evr));
1875         current->thread.acc = 0;
1876         current->thread.spefscr = 0;
1877         current->thread.used_spe = 0;
1878 #endif /* CONFIG_SPE */
1879 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1880         current->thread.tm_tfhar = 0;
1881         current->thread.tm_texasr = 0;
1882         current->thread.tm_tfiar = 0;
1883         current->thread.load_tm = 0;
1884 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
1885
1886         thread_pkey_regs_init(&current->thread);
1887 }
1888 EXPORT_SYMBOL(start_thread);
1889
1890 #define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
1891                 | PR_FP_EXC_RES | PR_FP_EXC_INV)
1892
1893 int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
1894 {
1895         struct pt_regs *regs = tsk->thread.regs;
1896
1897         /* This is a bit hairy.  If we are an SPE enabled  processor
1898          * (have embedded fp) we store the IEEE exception enable flags in
1899          * fpexc_mode.  fpexc_mode is also used for setting FP exception
1900          * mode (asyn, precise, disabled) for 'Classic' FP. */
1901         if (val & PR_FP_EXC_SW_ENABLE) {
1902 #ifdef CONFIG_SPE
1903                 if (cpu_has_feature(CPU_FTR_SPE)) {
1904                         /*
1905                          * When the sticky exception bits are set
1906                          * directly by userspace, it must call prctl
1907                          * with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE
1908                          * in the existing prctl settings) or
1909                          * PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in
1910                          * the bits being set).  <fenv.h> functions
1911                          * saving and restoring the whole
1912                          * floating-point environment need to do so
1913                          * anyway to restore the prctl settings from
1914                          * the saved environment.
1915                          */
1916                         tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
1917                         tsk->thread.fpexc_mode = val &
1918                                 (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
1919                         return 0;
1920                 } else {
1921                         return -EINVAL;
1922                 }
1923 #else
1924                 return -EINVAL;
1925 #endif
1926         }
1927
1928         /* on a CONFIG_SPE this does not hurt us.  The bits that
1929          * __pack_fe01 use do not overlap with bits used for
1930          * PR_FP_EXC_SW_ENABLE.  Additionally, the MSR[FE0,FE1] bits
1931          * on CONFIG_SPE implementations are reserved so writing to
1932          * them does not change anything */
1933         if (val > PR_FP_EXC_PRECISE)
1934                 return -EINVAL;
1935         tsk->thread.fpexc_mode = __pack_fe01(val);
1936         if (regs != NULL && (regs->msr & MSR_FP) != 0)
1937                 regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1))
1938                         | tsk->thread.fpexc_mode;
1939         return 0;
1940 }
1941
1942 int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
1943 {
1944         unsigned int val;
1945
1946         if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
1947 #ifdef CONFIG_SPE
1948                 if (cpu_has_feature(CPU_FTR_SPE)) {
1949                         /*
1950                          * When the sticky exception bits are set
1951                          * directly by userspace, it must call prctl
1952                          * with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE
1953                          * in the existing prctl settings) or
1954                          * PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in
1955                          * the bits being set).  <fenv.h> functions
1956                          * saving and restoring the whole
1957                          * floating-point environment need to do so
1958                          * anyway to restore the prctl settings from
1959                          * the saved environment.
1960                          */
1961                         tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
1962                         val = tsk->thread.fpexc_mode;
1963                 } else
1964                         return -EINVAL;
1965 #else
1966                 return -EINVAL;
1967 #endif
1968         else
1969                 val = __unpack_fe01(tsk->thread.fpexc_mode);
1970         return put_user(val, (unsigned int __user *) adr);
1971 }
1972
1973 int set_endian(struct task_struct *tsk, unsigned int val)
1974 {
1975         struct pt_regs *regs = tsk->thread.regs;
1976
1977         if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) ||
1978             (val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE)))
1979                 return -EINVAL;
1980
1981         if (regs == NULL)
1982                 return -EINVAL;
1983
1984         if (val == PR_ENDIAN_BIG)
1985                 regs->msr &= ~MSR_LE;
1986         else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE)
1987                 regs->msr |= MSR_LE;
1988         else
1989                 return -EINVAL;
1990
1991         return 0;
1992 }
1993
1994 int get_endian(struct task_struct *tsk, unsigned long adr)
1995 {
1996         struct pt_regs *regs = tsk->thread.regs;
1997         unsigned int val;
1998
1999         if (!cpu_has_feature(CPU_FTR_PPC_LE) &&
2000             !cpu_has_feature(CPU_FTR_REAL_LE))
2001                 return -EINVAL;
2002
2003         if (regs == NULL)
2004                 return -EINVAL;
2005
2006         if (regs->msr & MSR_LE) {
2007                 if (cpu_has_feature(CPU_FTR_REAL_LE))
2008                         val = PR_ENDIAN_LITTLE;
2009                 else
2010                         val = PR_ENDIAN_PPC_LITTLE;
2011         } else
2012                 val = PR_ENDIAN_BIG;
2013
2014         return put_user(val, (unsigned int __user *)adr);
2015 }
2016
2017 int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
2018 {
2019         tsk->thread.align_ctl = val;
2020         return 0;
2021 }
2022
2023 int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
2024 {
2025         return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr);
2026 }
2027
2028 static inline int valid_irq_stack(unsigned long sp, struct task_struct *p,
2029                                   unsigned long nbytes)
2030 {
2031         unsigned long stack_page;
2032         unsigned long cpu = task_cpu(p);
2033
2034         /*
2035          * Avoid crashing if the stack has overflowed and corrupted
2036          * task_cpu(p), which is in the thread_info struct.
2037          */
2038         if (cpu < NR_CPUS && cpu_possible(cpu)) {
2039                 stack_page = (unsigned long) hardirq_ctx[cpu];
2040                 if (sp >= stack_page + sizeof(struct thread_struct)
2041                     && sp <= stack_page + THREAD_SIZE - nbytes)
2042                         return 1;
2043
2044                 stack_page = (unsigned long) softirq_ctx[cpu];
2045                 if (sp >= stack_page + sizeof(struct thread_struct)
2046                     && sp <= stack_page + THREAD_SIZE - nbytes)
2047                         return 1;
2048         }
2049         return 0;
2050 }
2051
2052 int validate_sp(unsigned long sp, struct task_struct *p,
2053                        unsigned long nbytes)
2054 {
2055         unsigned long stack_page = (unsigned long)task_stack_page(p);
2056
2057         if (sp >= stack_page + sizeof(struct thread_struct)
2058             && sp <= stack_page + THREAD_SIZE - nbytes)
2059                 return 1;
2060
2061         return valid_irq_stack(sp, p, nbytes);
2062 }
2063
2064 EXPORT_SYMBOL(validate_sp);
2065
2066 unsigned long get_wchan(struct task_struct *p)
2067 {
2068         unsigned long ip, sp;
2069         int count = 0;
2070
2071         if (!p || p == current || p->state == TASK_RUNNING)
2072                 return 0;
2073
2074         sp = p->thread.ksp;
2075         if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
2076                 return 0;
2077
2078         do {
2079                 sp = *(unsigned long *)sp;
2080                 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD) ||
2081                     p->state == TASK_RUNNING)
2082                         return 0;
2083                 if (count > 0) {
2084                         ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE];
2085                         if (!in_sched_functions(ip))
2086                                 return ip;
2087                 }
2088         } while (count++ < 16);
2089         return 0;
2090 }
2091
2092 static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
2093
2094 void show_stack(struct task_struct *tsk, unsigned long *stack)
2095 {
2096         unsigned long sp, ip, lr, newsp;
2097         int count = 0;
2098         int firstframe = 1;
2099 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2100         int curr_frame = current->curr_ret_stack;
2101         extern void return_to_handler(void);
2102         unsigned long rth = (unsigned long)return_to_handler;
2103 #endif
2104
2105         sp = (unsigned long) stack;
2106         if (tsk == NULL)
2107                 tsk = current;
2108         if (sp == 0) {
2109                 if (tsk == current)
2110                         sp = current_stack_pointer();
2111                 else
2112                         sp = tsk->thread.ksp;
2113         }
2114
2115         lr = 0;
2116         printk("Call Trace:\n");
2117         do {
2118                 if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD))
2119                         return;
2120
2121                 stack = (unsigned long *) sp;
2122                 newsp = stack[0];
2123                 ip = stack[STACK_FRAME_LR_SAVE];
2124                 if (!firstframe || ip != lr) {
2125                         printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2126 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2127                         if ((ip == rth) && curr_frame >= 0) {
2128                                 pr_cont(" (%pS)",
2129                                        (void *)current->ret_stack[curr_frame].ret);
2130                                 curr_frame--;
2131                         }
2132 #endif
2133                         if (firstframe)
2134                                 pr_cont(" (unreliable)");
2135                         pr_cont("\n");
2136                 }
2137                 firstframe = 0;
2138
2139                 /*
2140                  * See if this is an exception frame.
2141                  * We look for the "regshere" marker in the current frame.
2142                  */
2143                 if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE)
2144                     && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
2145                         struct pt_regs *regs = (struct pt_regs *)
2146                                 (sp + STACK_FRAME_OVERHEAD);
2147                         lr = regs->link;
2148                         printk("--- interrupt: %lx at %pS\n    LR = %pS\n",
2149                                regs->trap, (void *)regs->nip, (void *)lr);
2150                         firstframe = 1;
2151                 }
2152
2153                 sp = newsp;
2154         } while (count++ < kstack_depth_to_print);
2155 }
2156
2157 #ifdef CONFIG_PPC64
2158 /* Called with hard IRQs off */
2159 void notrace __ppc64_runlatch_on(void)
2160 {
2161         struct thread_info *ti = current_thread_info();
2162
2163         if (cpu_has_feature(CPU_FTR_ARCH_206)) {
2164                 /*
2165                  * Least significant bit (RUN) is the only writable bit of
2166                  * the CTRL register, so we can avoid mfspr. 2.06 is not the
2167                  * earliest ISA where this is the case, but it's convenient.
2168                  */
2169                 mtspr(SPRN_CTRLT, CTRL_RUNLATCH);
2170         } else {
2171                 unsigned long ctrl;
2172
2173                 /*
2174                  * Some architectures (e.g., Cell) have writable fields other
2175                  * than RUN, so do the read-modify-write.
2176                  */
2177                 ctrl = mfspr(SPRN_CTRLF);
2178                 ctrl |= CTRL_RUNLATCH;
2179                 mtspr(SPRN_CTRLT, ctrl);
2180         }
2181
2182         ti->local_flags |= _TLF_RUNLATCH;
2183 }
2184
2185 /* Called with hard IRQs off */
2186 void notrace __ppc64_runlatch_off(void)
2187 {
2188         struct thread_info *ti = current_thread_info();
2189
2190         ti->local_flags &= ~_TLF_RUNLATCH;
2191
2192         if (cpu_has_feature(CPU_FTR_ARCH_206)) {
2193                 mtspr(SPRN_CTRLT, 0);
2194         } else {
2195                 unsigned long ctrl;
2196
2197                 ctrl = mfspr(SPRN_CTRLF);
2198                 ctrl &= ~CTRL_RUNLATCH;
2199                 mtspr(SPRN_CTRLT, ctrl);
2200         }
2201 }
2202 #endif /* CONFIG_PPC64 */
2203
2204 unsigned long arch_align_stack(unsigned long sp)
2205 {
2206         if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2207                 sp -= get_random_int() & ~PAGE_MASK;
2208         return sp & ~0xf;
2209 }
2210
2211 static inline unsigned long brk_rnd(void)
2212 {
2213         unsigned long rnd = 0;
2214
2215         /* 8MB for 32bit, 1GB for 64bit */
2216         if (is_32bit_task())
2217                 rnd = (get_random_long() % (1UL<<(23-PAGE_SHIFT)));
2218         else
2219                 rnd = (get_random_long() % (1UL<<(30-PAGE_SHIFT)));
2220
2221         return rnd << PAGE_SHIFT;
2222 }
2223
2224 unsigned long arch_randomize_brk(struct mm_struct *mm)
2225 {
2226         unsigned long base = mm->brk;
2227         unsigned long ret;
2228
2229 #ifdef CONFIG_PPC_BOOK3S_64
2230         /*
2231          * If we are using 1TB segments and we are allowed to randomise
2232          * the heap, we can put it above 1TB so it is backed by a 1TB
2233          * segment. Otherwise the heap will be in the bottom 1TB
2234          * which always uses 256MB segments and this may result in a
2235          * performance penalty. We don't need to worry about radix. For
2236          * radix, mmu_highuser_ssize remains unchanged from 256MB.
2237          */
2238         if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2239                 base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2240 #endif
2241
2242         ret = PAGE_ALIGN(base + brk_rnd());
2243
2244         if (ret < mm->brk)
2245                 return mm->brk;
2246
2247         return ret;
2248 }
2249