]> asedeno.scripts.mit.edu Git - linux.git/blob - arch/powerpc/kernel/ptrace.c
Merge tag 'leaks-4.17-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tobin/leaks
[linux.git] / arch / powerpc / kernel / ptrace.c
1 /*
2  *  PowerPC version
3  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4  *
5  *  Derived from "arch/m68k/kernel/ptrace.c"
6  *  Copyright (C) 1994 by Hamish Macdonald
7  *  Taken from linux/kernel/ptrace.c and modified for M680x0.
8  *  linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
9  *
10  * Modified by Cort Dougan (cort@hq.fsmlabs.com)
11  * and Paul Mackerras (paulus@samba.org).
12  *
13  * This file is subject to the terms and conditions of the GNU General
14  * Public License.  See the file README.legal in the main directory of
15  * this archive for more details.
16  */
17
18 #include <linux/kernel.h>
19 #include <linux/sched.h>
20 #include <linux/mm.h>
21 #include <linux/smp.h>
22 #include <linux/errno.h>
23 #include <linux/ptrace.h>
24 #include <linux/regset.h>
25 #include <linux/tracehook.h>
26 #include <linux/elf.h>
27 #include <linux/user.h>
28 #include <linux/security.h>
29 #include <linux/signal.h>
30 #include <linux/seccomp.h>
31 #include <linux/audit.h>
32 #include <trace/syscall.h>
33 #include <linux/hw_breakpoint.h>
34 #include <linux/perf_event.h>
35 #include <linux/context_tracking.h>
36
37 #include <linux/uaccess.h>
38 #include <linux/pkeys.h>
39 #include <asm/page.h>
40 #include <asm/pgtable.h>
41 #include <asm/switch_to.h>
42 #include <asm/tm.h>
43 #include <asm/asm-prototypes.h>
44
45 #define CREATE_TRACE_POINTS
46 #include <trace/events/syscalls.h>
47
48 /*
49  * The parameter save area on the stack is used to store arguments being passed
50  * to callee function and is located at fixed offset from stack pointer.
51  */
52 #ifdef CONFIG_PPC32
53 #define PARAMETER_SAVE_AREA_OFFSET      24  /* bytes */
54 #else /* CONFIG_PPC32 */
55 #define PARAMETER_SAVE_AREA_OFFSET      48  /* bytes */
56 #endif
57
58 struct pt_regs_offset {
59         const char *name;
60         int offset;
61 };
62
63 #define STR(s)  #s                      /* convert to string */
64 #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
65 #define GPR_OFFSET_NAME(num)    \
66         {.name = STR(r##num), .offset = offsetof(struct pt_regs, gpr[num])}, \
67         {.name = STR(gpr##num), .offset = offsetof(struct pt_regs, gpr[num])}
68 #define REG_OFFSET_END {.name = NULL, .offset = 0}
69
70 #define TVSO(f) (offsetof(struct thread_vr_state, f))
71 #define TFSO(f) (offsetof(struct thread_fp_state, f))
72 #define TSO(f)  (offsetof(struct thread_struct, f))
73
74 static const struct pt_regs_offset regoffset_table[] = {
75         GPR_OFFSET_NAME(0),
76         GPR_OFFSET_NAME(1),
77         GPR_OFFSET_NAME(2),
78         GPR_OFFSET_NAME(3),
79         GPR_OFFSET_NAME(4),
80         GPR_OFFSET_NAME(5),
81         GPR_OFFSET_NAME(6),
82         GPR_OFFSET_NAME(7),
83         GPR_OFFSET_NAME(8),
84         GPR_OFFSET_NAME(9),
85         GPR_OFFSET_NAME(10),
86         GPR_OFFSET_NAME(11),
87         GPR_OFFSET_NAME(12),
88         GPR_OFFSET_NAME(13),
89         GPR_OFFSET_NAME(14),
90         GPR_OFFSET_NAME(15),
91         GPR_OFFSET_NAME(16),
92         GPR_OFFSET_NAME(17),
93         GPR_OFFSET_NAME(18),
94         GPR_OFFSET_NAME(19),
95         GPR_OFFSET_NAME(20),
96         GPR_OFFSET_NAME(21),
97         GPR_OFFSET_NAME(22),
98         GPR_OFFSET_NAME(23),
99         GPR_OFFSET_NAME(24),
100         GPR_OFFSET_NAME(25),
101         GPR_OFFSET_NAME(26),
102         GPR_OFFSET_NAME(27),
103         GPR_OFFSET_NAME(28),
104         GPR_OFFSET_NAME(29),
105         GPR_OFFSET_NAME(30),
106         GPR_OFFSET_NAME(31),
107         REG_OFFSET_NAME(nip),
108         REG_OFFSET_NAME(msr),
109         REG_OFFSET_NAME(ctr),
110         REG_OFFSET_NAME(link),
111         REG_OFFSET_NAME(xer),
112         REG_OFFSET_NAME(ccr),
113 #ifdef CONFIG_PPC64
114         REG_OFFSET_NAME(softe),
115 #else
116         REG_OFFSET_NAME(mq),
117 #endif
118         REG_OFFSET_NAME(trap),
119         REG_OFFSET_NAME(dar),
120         REG_OFFSET_NAME(dsisr),
121         REG_OFFSET_END,
122 };
123
124 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
125 static void flush_tmregs_to_thread(struct task_struct *tsk)
126 {
127         /*
128          * If task is not current, it will have been flushed already to
129          * it's thread_struct during __switch_to().
130          *
131          * A reclaim flushes ALL the state or if not in TM save TM SPRs
132          * in the appropriate thread structures from live.
133          */
134
135         if ((!cpu_has_feature(CPU_FTR_TM)) || (tsk != current))
136                 return;
137
138         if (MSR_TM_SUSPENDED(mfmsr())) {
139                 tm_reclaim_current(TM_CAUSE_SIGNAL);
140         } else {
141                 tm_enable();
142                 tm_save_sprs(&(tsk->thread));
143         }
144 }
145 #else
146 static inline void flush_tmregs_to_thread(struct task_struct *tsk) { }
147 #endif
148
149 /**
150  * regs_query_register_offset() - query register offset from its name
151  * @name:       the name of a register
152  *
153  * regs_query_register_offset() returns the offset of a register in struct
154  * pt_regs from its name. If the name is invalid, this returns -EINVAL;
155  */
156 int regs_query_register_offset(const char *name)
157 {
158         const struct pt_regs_offset *roff;
159         for (roff = regoffset_table; roff->name != NULL; roff++)
160                 if (!strcmp(roff->name, name))
161                         return roff->offset;
162         return -EINVAL;
163 }
164
165 /**
166  * regs_query_register_name() - query register name from its offset
167  * @offset:     the offset of a register in struct pt_regs.
168  *
169  * regs_query_register_name() returns the name of a register from its
170  * offset in struct pt_regs. If the @offset is invalid, this returns NULL;
171  */
172 const char *regs_query_register_name(unsigned int offset)
173 {
174         const struct pt_regs_offset *roff;
175         for (roff = regoffset_table; roff->name != NULL; roff++)
176                 if (roff->offset == offset)
177                         return roff->name;
178         return NULL;
179 }
180
181 /*
182  * does not yet catch signals sent when the child dies.
183  * in exit.c or in signal.c.
184  */
185
186 /*
187  * Set of msr bits that gdb can change on behalf of a process.
188  */
189 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
190 #define MSR_DEBUGCHANGE 0
191 #else
192 #define MSR_DEBUGCHANGE (MSR_SE | MSR_BE)
193 #endif
194
195 /*
196  * Max register writeable via put_reg
197  */
198 #ifdef CONFIG_PPC32
199 #define PT_MAX_PUT_REG  PT_MQ
200 #else
201 #define PT_MAX_PUT_REG  PT_CCR
202 #endif
203
204 static unsigned long get_user_msr(struct task_struct *task)
205 {
206         return task->thread.regs->msr | task->thread.fpexc_mode;
207 }
208
209 static int set_user_msr(struct task_struct *task, unsigned long msr)
210 {
211         task->thread.regs->msr &= ~MSR_DEBUGCHANGE;
212         task->thread.regs->msr |= msr & MSR_DEBUGCHANGE;
213         return 0;
214 }
215
216 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
217 static unsigned long get_user_ckpt_msr(struct task_struct *task)
218 {
219         return task->thread.ckpt_regs.msr | task->thread.fpexc_mode;
220 }
221
222 static int set_user_ckpt_msr(struct task_struct *task, unsigned long msr)
223 {
224         task->thread.ckpt_regs.msr &= ~MSR_DEBUGCHANGE;
225         task->thread.ckpt_regs.msr |= msr & MSR_DEBUGCHANGE;
226         return 0;
227 }
228
229 static int set_user_ckpt_trap(struct task_struct *task, unsigned long trap)
230 {
231         task->thread.ckpt_regs.trap = trap & 0xfff0;
232         return 0;
233 }
234 #endif
235
236 #ifdef CONFIG_PPC64
237 static int get_user_dscr(struct task_struct *task, unsigned long *data)
238 {
239         *data = task->thread.dscr;
240         return 0;
241 }
242
243 static int set_user_dscr(struct task_struct *task, unsigned long dscr)
244 {
245         task->thread.dscr = dscr;
246         task->thread.dscr_inherit = 1;
247         return 0;
248 }
249 #else
250 static int get_user_dscr(struct task_struct *task, unsigned long *data)
251 {
252         return -EIO;
253 }
254
255 static int set_user_dscr(struct task_struct *task, unsigned long dscr)
256 {
257         return -EIO;
258 }
259 #endif
260
261 /*
262  * We prevent mucking around with the reserved area of trap
263  * which are used internally by the kernel.
264  */
265 static int set_user_trap(struct task_struct *task, unsigned long trap)
266 {
267         task->thread.regs->trap = trap & 0xfff0;
268         return 0;
269 }
270
271 /*
272  * Get contents of register REGNO in task TASK.
273  */
274 int ptrace_get_reg(struct task_struct *task, int regno, unsigned long *data)
275 {
276         if ((task->thread.regs == NULL) || !data)
277                 return -EIO;
278
279         if (regno == PT_MSR) {
280                 *data = get_user_msr(task);
281                 return 0;
282         }
283
284         if (regno == PT_DSCR)
285                 return get_user_dscr(task, data);
286
287 #ifdef CONFIG_PPC64
288         /*
289          * softe copies paca->irq_soft_mask variable state. Since irq_soft_mask is
290          * no more used as a flag, lets force usr to alway see the softe value as 1
291          * which means interrupts are not soft disabled.
292          */
293         if (regno == PT_SOFTE) {
294                 *data = 1;
295                 return  0;
296         }
297 #endif
298
299         if (regno < (sizeof(struct pt_regs) / sizeof(unsigned long))) {
300                 *data = ((unsigned long *)task->thread.regs)[regno];
301                 return 0;
302         }
303
304         return -EIO;
305 }
306
307 /*
308  * Write contents of register REGNO in task TASK.
309  */
310 int ptrace_put_reg(struct task_struct *task, int regno, unsigned long data)
311 {
312         if (task->thread.regs == NULL)
313                 return -EIO;
314
315         if (regno == PT_MSR)
316                 return set_user_msr(task, data);
317         if (regno == PT_TRAP)
318                 return set_user_trap(task, data);
319         if (regno == PT_DSCR)
320                 return set_user_dscr(task, data);
321
322         if (regno <= PT_MAX_PUT_REG) {
323                 ((unsigned long *)task->thread.regs)[regno] = data;
324                 return 0;
325         }
326         return -EIO;
327 }
328
329 static int gpr_get(struct task_struct *target, const struct user_regset *regset,
330                    unsigned int pos, unsigned int count,
331                    void *kbuf, void __user *ubuf)
332 {
333         int i, ret;
334
335         if (target->thread.regs == NULL)
336                 return -EIO;
337
338         if (!FULL_REGS(target->thread.regs)) {
339                 /* We have a partial register set.  Fill 14-31 with bogus values */
340                 for (i = 14; i < 32; i++)
341                         target->thread.regs->gpr[i] = NV_REG_POISON;
342         }
343
344         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
345                                   target->thread.regs,
346                                   0, offsetof(struct pt_regs, msr));
347         if (!ret) {
348                 unsigned long msr = get_user_msr(target);
349                 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &msr,
350                                           offsetof(struct pt_regs, msr),
351                                           offsetof(struct pt_regs, msr) +
352                                           sizeof(msr));
353         }
354
355         BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
356                      offsetof(struct pt_regs, msr) + sizeof(long));
357
358         if (!ret)
359                 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
360                                           &target->thread.regs->orig_gpr3,
361                                           offsetof(struct pt_regs, orig_gpr3),
362                                           sizeof(struct pt_regs));
363         if (!ret)
364                 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
365                                                sizeof(struct pt_regs), -1);
366
367         return ret;
368 }
369
370 static int gpr_set(struct task_struct *target, const struct user_regset *regset,
371                    unsigned int pos, unsigned int count,
372                    const void *kbuf, const void __user *ubuf)
373 {
374         unsigned long reg;
375         int ret;
376
377         if (target->thread.regs == NULL)
378                 return -EIO;
379
380         CHECK_FULL_REGS(target->thread.regs);
381
382         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
383                                  target->thread.regs,
384                                  0, PT_MSR * sizeof(reg));
385
386         if (!ret && count > 0) {
387                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
388                                          PT_MSR * sizeof(reg),
389                                          (PT_MSR + 1) * sizeof(reg));
390                 if (!ret)
391                         ret = set_user_msr(target, reg);
392         }
393
394         BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
395                      offsetof(struct pt_regs, msr) + sizeof(long));
396
397         if (!ret)
398                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
399                                          &target->thread.regs->orig_gpr3,
400                                          PT_ORIG_R3 * sizeof(reg),
401                                          (PT_MAX_PUT_REG + 1) * sizeof(reg));
402
403         if (PT_MAX_PUT_REG + 1 < PT_TRAP && !ret)
404                 ret = user_regset_copyin_ignore(
405                         &pos, &count, &kbuf, &ubuf,
406                         (PT_MAX_PUT_REG + 1) * sizeof(reg),
407                         PT_TRAP * sizeof(reg));
408
409         if (!ret && count > 0) {
410                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
411                                          PT_TRAP * sizeof(reg),
412                                          (PT_TRAP + 1) * sizeof(reg));
413                 if (!ret)
414                         ret = set_user_trap(target, reg);
415         }
416
417         if (!ret)
418                 ret = user_regset_copyin_ignore(
419                         &pos, &count, &kbuf, &ubuf,
420                         (PT_TRAP + 1) * sizeof(reg), -1);
421
422         return ret;
423 }
424
425 /*
426  * Regardless of transactions, 'fp_state' holds the current running
427  * value of all FPR registers and 'ckfp_state' holds the last checkpointed
428  * value of all FPR registers for the current transaction.
429  *
430  * Userspace interface buffer layout:
431  *
432  * struct data {
433  *      u64     fpr[32];
434  *      u64     fpscr;
435  * };
436  */
437 static int fpr_get(struct task_struct *target, const struct user_regset *regset,
438                    unsigned int pos, unsigned int count,
439                    void *kbuf, void __user *ubuf)
440 {
441 #ifdef CONFIG_VSX
442         u64 buf[33];
443         int i;
444
445         flush_fp_to_thread(target);
446
447         /* copy to local buffer then write that out */
448         for (i = 0; i < 32 ; i++)
449                 buf[i] = target->thread.TS_FPR(i);
450         buf[32] = target->thread.fp_state.fpscr;
451         return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
452 #else
453         BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
454                      offsetof(struct thread_fp_state, fpr[32]));
455
456         flush_fp_to_thread(target);
457
458         return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
459                                    &target->thread.fp_state, 0, -1);
460 #endif
461 }
462
463 /*
464  * Regardless of transactions, 'fp_state' holds the current running
465  * value of all FPR registers and 'ckfp_state' holds the last checkpointed
466  * value of all FPR registers for the current transaction.
467  *
468  * Userspace interface buffer layout:
469  *
470  * struct data {
471  *      u64     fpr[32];
472  *      u64     fpscr;
473  * };
474  *
475  */
476 static int fpr_set(struct task_struct *target, const struct user_regset *regset,
477                    unsigned int pos, unsigned int count,
478                    const void *kbuf, const void __user *ubuf)
479 {
480 #ifdef CONFIG_VSX
481         u64 buf[33];
482         int i;
483
484         flush_fp_to_thread(target);
485
486         for (i = 0; i < 32 ; i++)
487                 buf[i] = target->thread.TS_FPR(i);
488         buf[32] = target->thread.fp_state.fpscr;
489
490         /* copy to local buffer then write that out */
491         i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
492         if (i)
493                 return i;
494
495         for (i = 0; i < 32 ; i++)
496                 target->thread.TS_FPR(i) = buf[i];
497         target->thread.fp_state.fpscr = buf[32];
498         return 0;
499 #else
500         BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
501                      offsetof(struct thread_fp_state, fpr[32]));
502
503         flush_fp_to_thread(target);
504
505         return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
506                                   &target->thread.fp_state, 0, -1);
507 #endif
508 }
509
510 #ifdef CONFIG_ALTIVEC
511 /*
512  * Get/set all the altivec registers vr0..vr31, vscr, vrsave, in one go.
513  * The transfer totals 34 quadword.  Quadwords 0-31 contain the
514  * corresponding vector registers.  Quadword 32 contains the vscr as the
515  * last word (offset 12) within that quadword.  Quadword 33 contains the
516  * vrsave as the first word (offset 0) within the quadword.
517  *
518  * This definition of the VMX state is compatible with the current PPC32
519  * ptrace interface.  This allows signal handling and ptrace to use the
520  * same structures.  This also simplifies the implementation of a bi-arch
521  * (combined (32- and 64-bit) gdb.
522  */
523
524 static int vr_active(struct task_struct *target,
525                      const struct user_regset *regset)
526 {
527         flush_altivec_to_thread(target);
528         return target->thread.used_vr ? regset->n : 0;
529 }
530
531 /*
532  * Regardless of transactions, 'vr_state' holds the current running
533  * value of all the VMX registers and 'ckvr_state' holds the last
534  * checkpointed value of all the VMX registers for the current
535  * transaction to fall back on in case it aborts.
536  *
537  * Userspace interface buffer layout:
538  *
539  * struct data {
540  *      vector128       vr[32];
541  *      vector128       vscr;
542  *      vector128       vrsave;
543  * };
544  */
545 static int vr_get(struct task_struct *target, const struct user_regset *regset,
546                   unsigned int pos, unsigned int count,
547                   void *kbuf, void __user *ubuf)
548 {
549         int ret;
550
551         flush_altivec_to_thread(target);
552
553         BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) !=
554                      offsetof(struct thread_vr_state, vr[32]));
555
556         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
557                                   &target->thread.vr_state, 0,
558                                   33 * sizeof(vector128));
559         if (!ret) {
560                 /*
561                  * Copy out only the low-order word of vrsave.
562                  */
563                 union {
564                         elf_vrreg_t reg;
565                         u32 word;
566                 } vrsave;
567                 memset(&vrsave, 0, sizeof(vrsave));
568
569                 vrsave.word = target->thread.vrsave;
570
571                 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave,
572                                           33 * sizeof(vector128), -1);
573         }
574
575         return ret;
576 }
577
578 /*
579  * Regardless of transactions, 'vr_state' holds the current running
580  * value of all the VMX registers and 'ckvr_state' holds the last
581  * checkpointed value of all the VMX registers for the current
582  * transaction to fall back on in case it aborts.
583  *
584  * Userspace interface buffer layout:
585  *
586  * struct data {
587  *      vector128       vr[32];
588  *      vector128       vscr;
589  *      vector128       vrsave;
590  * };
591  */
592 static int vr_set(struct task_struct *target, const struct user_regset *regset,
593                   unsigned int pos, unsigned int count,
594                   const void *kbuf, const void __user *ubuf)
595 {
596         int ret;
597
598         flush_altivec_to_thread(target);
599
600         BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) !=
601                      offsetof(struct thread_vr_state, vr[32]));
602
603         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
604                                  &target->thread.vr_state, 0,
605                                  33 * sizeof(vector128));
606         if (!ret && count > 0) {
607                 /*
608                  * We use only the first word of vrsave.
609                  */
610                 union {
611                         elf_vrreg_t reg;
612                         u32 word;
613                 } vrsave;
614                 memset(&vrsave, 0, sizeof(vrsave));
615
616                 vrsave.word = target->thread.vrsave;
617
618                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave,
619                                          33 * sizeof(vector128), -1);
620                 if (!ret)
621                         target->thread.vrsave = vrsave.word;
622         }
623
624         return ret;
625 }
626 #endif /* CONFIG_ALTIVEC */
627
628 #ifdef CONFIG_VSX
629 /*
630  * Currently to set and and get all the vsx state, you need to call
631  * the fp and VMX calls as well.  This only get/sets the lower 32
632  * 128bit VSX registers.
633  */
634
635 static int vsr_active(struct task_struct *target,
636                       const struct user_regset *regset)
637 {
638         flush_vsx_to_thread(target);
639         return target->thread.used_vsr ? regset->n : 0;
640 }
641
642 /*
643  * Regardless of transactions, 'fp_state' holds the current running
644  * value of all FPR registers and 'ckfp_state' holds the last
645  * checkpointed value of all FPR registers for the current
646  * transaction.
647  *
648  * Userspace interface buffer layout:
649  *
650  * struct data {
651  *      u64     vsx[32];
652  * };
653  */
654 static int vsr_get(struct task_struct *target, const struct user_regset *regset,
655                    unsigned int pos, unsigned int count,
656                    void *kbuf, void __user *ubuf)
657 {
658         u64 buf[32];
659         int ret, i;
660
661         flush_tmregs_to_thread(target);
662         flush_fp_to_thread(target);
663         flush_altivec_to_thread(target);
664         flush_vsx_to_thread(target);
665
666         for (i = 0; i < 32 ; i++)
667                 buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
668
669         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
670                                   buf, 0, 32 * sizeof(double));
671
672         return ret;
673 }
674
675 /*
676  * Regardless of transactions, 'fp_state' holds the current running
677  * value of all FPR registers and 'ckfp_state' holds the last
678  * checkpointed value of all FPR registers for the current
679  * transaction.
680  *
681  * Userspace interface buffer layout:
682  *
683  * struct data {
684  *      u64     vsx[32];
685  * };
686  */
687 static int vsr_set(struct task_struct *target, const struct user_regset *regset,
688                    unsigned int pos, unsigned int count,
689                    const void *kbuf, const void __user *ubuf)
690 {
691         u64 buf[32];
692         int ret,i;
693
694         flush_tmregs_to_thread(target);
695         flush_fp_to_thread(target);
696         flush_altivec_to_thread(target);
697         flush_vsx_to_thread(target);
698
699         for (i = 0; i < 32 ; i++)
700                 buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
701
702         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
703                                  buf, 0, 32 * sizeof(double));
704         if (!ret)
705                 for (i = 0; i < 32 ; i++)
706                         target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
707
708         return ret;
709 }
710 #endif /* CONFIG_VSX */
711
712 #ifdef CONFIG_SPE
713
714 /*
715  * For get_evrregs/set_evrregs functions 'data' has the following layout:
716  *
717  * struct {
718  *   u32 evr[32];
719  *   u64 acc;
720  *   u32 spefscr;
721  * }
722  */
723
724 static int evr_active(struct task_struct *target,
725                       const struct user_regset *regset)
726 {
727         flush_spe_to_thread(target);
728         return target->thread.used_spe ? regset->n : 0;
729 }
730
731 static int evr_get(struct task_struct *target, const struct user_regset *regset,
732                    unsigned int pos, unsigned int count,
733                    void *kbuf, void __user *ubuf)
734 {
735         int ret;
736
737         flush_spe_to_thread(target);
738
739         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
740                                   &target->thread.evr,
741                                   0, sizeof(target->thread.evr));
742
743         BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) !=
744                      offsetof(struct thread_struct, spefscr));
745
746         if (!ret)
747                 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
748                                           &target->thread.acc,
749                                           sizeof(target->thread.evr), -1);
750
751         return ret;
752 }
753
754 static int evr_set(struct task_struct *target, const struct user_regset *regset,
755                    unsigned int pos, unsigned int count,
756                    const void *kbuf, const void __user *ubuf)
757 {
758         int ret;
759
760         flush_spe_to_thread(target);
761
762         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
763                                  &target->thread.evr,
764                                  0, sizeof(target->thread.evr));
765
766         BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) !=
767                      offsetof(struct thread_struct, spefscr));
768
769         if (!ret)
770                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
771                                          &target->thread.acc,
772                                          sizeof(target->thread.evr), -1);
773
774         return ret;
775 }
776 #endif /* CONFIG_SPE */
777
778 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
779 /**
780  * tm_cgpr_active - get active number of registers in CGPR
781  * @target:     The target task.
782  * @regset:     The user regset structure.
783  *
784  * This function checks for the active number of available
785  * regisers in transaction checkpointed GPR category.
786  */
787 static int tm_cgpr_active(struct task_struct *target,
788                           const struct user_regset *regset)
789 {
790         if (!cpu_has_feature(CPU_FTR_TM))
791                 return -ENODEV;
792
793         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
794                 return 0;
795
796         return regset->n;
797 }
798
799 /**
800  * tm_cgpr_get - get CGPR registers
801  * @target:     The target task.
802  * @regset:     The user regset structure.
803  * @pos:        The buffer position.
804  * @count:      Number of bytes to copy.
805  * @kbuf:       Kernel buffer to copy from.
806  * @ubuf:       User buffer to copy into.
807  *
808  * This function gets transaction checkpointed GPR registers.
809  *
810  * When the transaction is active, 'ckpt_regs' holds all the checkpointed
811  * GPR register values for the current transaction to fall back on if it
812  * aborts in between. This function gets those checkpointed GPR registers.
813  * The userspace interface buffer layout is as follows.
814  *
815  * struct data {
816  *      struct pt_regs ckpt_regs;
817  * };
818  */
819 static int tm_cgpr_get(struct task_struct *target,
820                         const struct user_regset *regset,
821                         unsigned int pos, unsigned int count,
822                         void *kbuf, void __user *ubuf)
823 {
824         int ret;
825
826         if (!cpu_has_feature(CPU_FTR_TM))
827                 return -ENODEV;
828
829         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
830                 return -ENODATA;
831
832         flush_tmregs_to_thread(target);
833         flush_fp_to_thread(target);
834         flush_altivec_to_thread(target);
835
836         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
837                                   &target->thread.ckpt_regs,
838                                   0, offsetof(struct pt_regs, msr));
839         if (!ret) {
840                 unsigned long msr = get_user_ckpt_msr(target);
841
842                 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &msr,
843                                           offsetof(struct pt_regs, msr),
844                                           offsetof(struct pt_regs, msr) +
845                                           sizeof(msr));
846         }
847
848         BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
849                      offsetof(struct pt_regs, msr) + sizeof(long));
850
851         if (!ret)
852                 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
853                                           &target->thread.ckpt_regs.orig_gpr3,
854                                           offsetof(struct pt_regs, orig_gpr3),
855                                           sizeof(struct pt_regs));
856         if (!ret)
857                 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
858                                                sizeof(struct pt_regs), -1);
859
860         return ret;
861 }
862
863 /*
864  * tm_cgpr_set - set the CGPR registers
865  * @target:     The target task.
866  * @regset:     The user regset structure.
867  * @pos:        The buffer position.
868  * @count:      Number of bytes to copy.
869  * @kbuf:       Kernel buffer to copy into.
870  * @ubuf:       User buffer to copy from.
871  *
872  * This function sets in transaction checkpointed GPR registers.
873  *
874  * When the transaction is active, 'ckpt_regs' holds the checkpointed
875  * GPR register values for the current transaction to fall back on if it
876  * aborts in between. This function sets those checkpointed GPR registers.
877  * The userspace interface buffer layout is as follows.
878  *
879  * struct data {
880  *      struct pt_regs ckpt_regs;
881  * };
882  */
883 static int tm_cgpr_set(struct task_struct *target,
884                         const struct user_regset *regset,
885                         unsigned int pos, unsigned int count,
886                         const void *kbuf, const void __user *ubuf)
887 {
888         unsigned long reg;
889         int ret;
890
891         if (!cpu_has_feature(CPU_FTR_TM))
892                 return -ENODEV;
893
894         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
895                 return -ENODATA;
896
897         flush_tmregs_to_thread(target);
898         flush_fp_to_thread(target);
899         flush_altivec_to_thread(target);
900
901         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
902                                  &target->thread.ckpt_regs,
903                                  0, PT_MSR * sizeof(reg));
904
905         if (!ret && count > 0) {
906                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
907                                          PT_MSR * sizeof(reg),
908                                          (PT_MSR + 1) * sizeof(reg));
909                 if (!ret)
910                         ret = set_user_ckpt_msr(target, reg);
911         }
912
913         BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
914                      offsetof(struct pt_regs, msr) + sizeof(long));
915
916         if (!ret)
917                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
918                                          &target->thread.ckpt_regs.orig_gpr3,
919                                          PT_ORIG_R3 * sizeof(reg),
920                                          (PT_MAX_PUT_REG + 1) * sizeof(reg));
921
922         if (PT_MAX_PUT_REG + 1 < PT_TRAP && !ret)
923                 ret = user_regset_copyin_ignore(
924                         &pos, &count, &kbuf, &ubuf,
925                         (PT_MAX_PUT_REG + 1) * sizeof(reg),
926                         PT_TRAP * sizeof(reg));
927
928         if (!ret && count > 0) {
929                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
930                                          PT_TRAP * sizeof(reg),
931                                          (PT_TRAP + 1) * sizeof(reg));
932                 if (!ret)
933                         ret = set_user_ckpt_trap(target, reg);
934         }
935
936         if (!ret)
937                 ret = user_regset_copyin_ignore(
938                         &pos, &count, &kbuf, &ubuf,
939                         (PT_TRAP + 1) * sizeof(reg), -1);
940
941         return ret;
942 }
943
944 /**
945  * tm_cfpr_active - get active number of registers in CFPR
946  * @target:     The target task.
947  * @regset:     The user regset structure.
948  *
949  * This function checks for the active number of available
950  * regisers in transaction checkpointed FPR category.
951  */
952 static int tm_cfpr_active(struct task_struct *target,
953                                 const struct user_regset *regset)
954 {
955         if (!cpu_has_feature(CPU_FTR_TM))
956                 return -ENODEV;
957
958         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
959                 return 0;
960
961         return regset->n;
962 }
963
964 /**
965  * tm_cfpr_get - get CFPR registers
966  * @target:     The target task.
967  * @regset:     The user regset structure.
968  * @pos:        The buffer position.
969  * @count:      Number of bytes to copy.
970  * @kbuf:       Kernel buffer to copy from.
971  * @ubuf:       User buffer to copy into.
972  *
973  * This function gets in transaction checkpointed FPR registers.
974  *
975  * When the transaction is active 'ckfp_state' holds the checkpointed
976  * values for the current transaction to fall back on if it aborts
977  * in between. This function gets those checkpointed FPR registers.
978  * The userspace interface buffer layout is as follows.
979  *
980  * struct data {
981  *      u64     fpr[32];
982  *      u64     fpscr;
983  *};
984  */
985 static int tm_cfpr_get(struct task_struct *target,
986                         const struct user_regset *regset,
987                         unsigned int pos, unsigned int count,
988                         void *kbuf, void __user *ubuf)
989 {
990         u64 buf[33];
991         int i;
992
993         if (!cpu_has_feature(CPU_FTR_TM))
994                 return -ENODEV;
995
996         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
997                 return -ENODATA;
998
999         flush_tmregs_to_thread(target);
1000         flush_fp_to_thread(target);
1001         flush_altivec_to_thread(target);
1002
1003         /* copy to local buffer then write that out */
1004         for (i = 0; i < 32 ; i++)
1005                 buf[i] = target->thread.TS_CKFPR(i);
1006         buf[32] = target->thread.ckfp_state.fpscr;
1007         return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
1008 }
1009
1010 /**
1011  * tm_cfpr_set - set CFPR registers
1012  * @target:     The target task.
1013  * @regset:     The user regset structure.
1014  * @pos:        The buffer position.
1015  * @count:      Number of bytes to copy.
1016  * @kbuf:       Kernel buffer to copy into.
1017  * @ubuf:       User buffer to copy from.
1018  *
1019  * This function sets in transaction checkpointed FPR registers.
1020  *
1021  * When the transaction is active 'ckfp_state' holds the checkpointed
1022  * FPR register values for the current transaction to fall back on
1023  * if it aborts in between. This function sets these checkpointed
1024  * FPR registers. The userspace interface buffer layout is as follows.
1025  *
1026  * struct data {
1027  *      u64     fpr[32];
1028  *      u64     fpscr;
1029  *};
1030  */
1031 static int tm_cfpr_set(struct task_struct *target,
1032                         const struct user_regset *regset,
1033                         unsigned int pos, unsigned int count,
1034                         const void *kbuf, const void __user *ubuf)
1035 {
1036         u64 buf[33];
1037         int i;
1038
1039         if (!cpu_has_feature(CPU_FTR_TM))
1040                 return -ENODEV;
1041
1042         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1043                 return -ENODATA;
1044
1045         flush_tmregs_to_thread(target);
1046         flush_fp_to_thread(target);
1047         flush_altivec_to_thread(target);
1048
1049         for (i = 0; i < 32; i++)
1050                 buf[i] = target->thread.TS_CKFPR(i);
1051         buf[32] = target->thread.ckfp_state.fpscr;
1052
1053         /* copy to local buffer then write that out */
1054         i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
1055         if (i)
1056                 return i;
1057         for (i = 0; i < 32 ; i++)
1058                 target->thread.TS_CKFPR(i) = buf[i];
1059         target->thread.ckfp_state.fpscr = buf[32];
1060         return 0;
1061 }
1062
1063 /**
1064  * tm_cvmx_active - get active number of registers in CVMX
1065  * @target:     The target task.
1066  * @regset:     The user regset structure.
1067  *
1068  * This function checks for the active number of available
1069  * regisers in checkpointed VMX category.
1070  */
1071 static int tm_cvmx_active(struct task_struct *target,
1072                                 const struct user_regset *regset)
1073 {
1074         if (!cpu_has_feature(CPU_FTR_TM))
1075                 return -ENODEV;
1076
1077         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1078                 return 0;
1079
1080         return regset->n;
1081 }
1082
1083 /**
1084  * tm_cvmx_get - get CMVX registers
1085  * @target:     The target task.
1086  * @regset:     The user regset structure.
1087  * @pos:        The buffer position.
1088  * @count:      Number of bytes to copy.
1089  * @kbuf:       Kernel buffer to copy from.
1090  * @ubuf:       User buffer to copy into.
1091  *
1092  * This function gets in transaction checkpointed VMX registers.
1093  *
1094  * When the transaction is active 'ckvr_state' and 'ckvrsave' hold
1095  * the checkpointed values for the current transaction to fall
1096  * back on if it aborts in between. The userspace interface buffer
1097  * layout is as follows.
1098  *
1099  * struct data {
1100  *      vector128       vr[32];
1101  *      vector128       vscr;
1102  *      vector128       vrsave;
1103  *};
1104  */
1105 static int tm_cvmx_get(struct task_struct *target,
1106                         const struct user_regset *regset,
1107                         unsigned int pos, unsigned int count,
1108                         void *kbuf, void __user *ubuf)
1109 {
1110         int ret;
1111
1112         BUILD_BUG_ON(TVSO(vscr) != TVSO(vr[32]));
1113
1114         if (!cpu_has_feature(CPU_FTR_TM))
1115                 return -ENODEV;
1116
1117         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1118                 return -ENODATA;
1119
1120         /* Flush the state */
1121         flush_tmregs_to_thread(target);
1122         flush_fp_to_thread(target);
1123         flush_altivec_to_thread(target);
1124
1125         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1126                                         &target->thread.ckvr_state, 0,
1127                                         33 * sizeof(vector128));
1128         if (!ret) {
1129                 /*
1130                  * Copy out only the low-order word of vrsave.
1131                  */
1132                 union {
1133                         elf_vrreg_t reg;
1134                         u32 word;
1135                 } vrsave;
1136                 memset(&vrsave, 0, sizeof(vrsave));
1137                 vrsave.word = target->thread.ckvrsave;
1138                 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave,
1139                                                 33 * sizeof(vector128), -1);
1140         }
1141
1142         return ret;
1143 }
1144
1145 /**
1146  * tm_cvmx_set - set CMVX registers
1147  * @target:     The target task.
1148  * @regset:     The user regset structure.
1149  * @pos:        The buffer position.
1150  * @count:      Number of bytes to copy.
1151  * @kbuf:       Kernel buffer to copy into.
1152  * @ubuf:       User buffer to copy from.
1153  *
1154  * This function sets in transaction checkpointed VMX registers.
1155  *
1156  * When the transaction is active 'ckvr_state' and 'ckvrsave' hold
1157  * the checkpointed values for the current transaction to fall
1158  * back on if it aborts in between. The userspace interface buffer
1159  * layout is as follows.
1160  *
1161  * struct data {
1162  *      vector128       vr[32];
1163  *      vector128       vscr;
1164  *      vector128       vrsave;
1165  *};
1166  */
1167 static int tm_cvmx_set(struct task_struct *target,
1168                         const struct user_regset *regset,
1169                         unsigned int pos, unsigned int count,
1170                         const void *kbuf, const void __user *ubuf)
1171 {
1172         int ret;
1173
1174         BUILD_BUG_ON(TVSO(vscr) != TVSO(vr[32]));
1175
1176         if (!cpu_has_feature(CPU_FTR_TM))
1177                 return -ENODEV;
1178
1179         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1180                 return -ENODATA;
1181
1182         flush_tmregs_to_thread(target);
1183         flush_fp_to_thread(target);
1184         flush_altivec_to_thread(target);
1185
1186         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1187                                         &target->thread.ckvr_state, 0,
1188                                         33 * sizeof(vector128));
1189         if (!ret && count > 0) {
1190                 /*
1191                  * We use only the low-order word of vrsave.
1192                  */
1193                 union {
1194                         elf_vrreg_t reg;
1195                         u32 word;
1196                 } vrsave;
1197                 memset(&vrsave, 0, sizeof(vrsave));
1198                 vrsave.word = target->thread.ckvrsave;
1199                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave,
1200                                                 33 * sizeof(vector128), -1);
1201                 if (!ret)
1202                         target->thread.ckvrsave = vrsave.word;
1203         }
1204
1205         return ret;
1206 }
1207
1208 /**
1209  * tm_cvsx_active - get active number of registers in CVSX
1210  * @target:     The target task.
1211  * @regset:     The user regset structure.
1212  *
1213  * This function checks for the active number of available
1214  * regisers in transaction checkpointed VSX category.
1215  */
1216 static int tm_cvsx_active(struct task_struct *target,
1217                                 const struct user_regset *regset)
1218 {
1219         if (!cpu_has_feature(CPU_FTR_TM))
1220                 return -ENODEV;
1221
1222         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1223                 return 0;
1224
1225         flush_vsx_to_thread(target);
1226         return target->thread.used_vsr ? regset->n : 0;
1227 }
1228
1229 /**
1230  * tm_cvsx_get - get CVSX registers
1231  * @target:     The target task.
1232  * @regset:     The user regset structure.
1233  * @pos:        The buffer position.
1234  * @count:      Number of bytes to copy.
1235  * @kbuf:       Kernel buffer to copy from.
1236  * @ubuf:       User buffer to copy into.
1237  *
1238  * This function gets in transaction checkpointed VSX registers.
1239  *
1240  * When the transaction is active 'ckfp_state' holds the checkpointed
1241  * values for the current transaction to fall back on if it aborts
1242  * in between. This function gets those checkpointed VSX registers.
1243  * The userspace interface buffer layout is as follows.
1244  *
1245  * struct data {
1246  *      u64     vsx[32];
1247  *};
1248  */
1249 static int tm_cvsx_get(struct task_struct *target,
1250                         const struct user_regset *regset,
1251                         unsigned int pos, unsigned int count,
1252                         void *kbuf, void __user *ubuf)
1253 {
1254         u64 buf[32];
1255         int ret, i;
1256
1257         if (!cpu_has_feature(CPU_FTR_TM))
1258                 return -ENODEV;
1259
1260         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1261                 return -ENODATA;
1262
1263         /* Flush the state */
1264         flush_tmregs_to_thread(target);
1265         flush_fp_to_thread(target);
1266         flush_altivec_to_thread(target);
1267         flush_vsx_to_thread(target);
1268
1269         for (i = 0; i < 32 ; i++)
1270                 buf[i] = target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
1271         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1272                                   buf, 0, 32 * sizeof(double));
1273
1274         return ret;
1275 }
1276
1277 /**
1278  * tm_cvsx_set - set CFPR registers
1279  * @target:     The target task.
1280  * @regset:     The user regset structure.
1281  * @pos:        The buffer position.
1282  * @count:      Number of bytes to copy.
1283  * @kbuf:       Kernel buffer to copy into.
1284  * @ubuf:       User buffer to copy from.
1285  *
1286  * This function sets in transaction checkpointed VSX registers.
1287  *
1288  * When the transaction is active 'ckfp_state' holds the checkpointed
1289  * VSX register values for the current transaction to fall back on
1290  * if it aborts in between. This function sets these checkpointed
1291  * FPR registers. The userspace interface buffer layout is as follows.
1292  *
1293  * struct data {
1294  *      u64     vsx[32];
1295  *};
1296  */
1297 static int tm_cvsx_set(struct task_struct *target,
1298                         const struct user_regset *regset,
1299                         unsigned int pos, unsigned int count,
1300                         const void *kbuf, const void __user *ubuf)
1301 {
1302         u64 buf[32];
1303         int ret, i;
1304
1305         if (!cpu_has_feature(CPU_FTR_TM))
1306                 return -ENODEV;
1307
1308         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1309                 return -ENODATA;
1310
1311         /* Flush the state */
1312         flush_tmregs_to_thread(target);
1313         flush_fp_to_thread(target);
1314         flush_altivec_to_thread(target);
1315         flush_vsx_to_thread(target);
1316
1317         for (i = 0; i < 32 ; i++)
1318                 buf[i] = target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
1319
1320         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1321                                  buf, 0, 32 * sizeof(double));
1322         if (!ret)
1323                 for (i = 0; i < 32 ; i++)
1324                         target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
1325
1326         return ret;
1327 }
1328
1329 /**
1330  * tm_spr_active - get active number of registers in TM SPR
1331  * @target:     The target task.
1332  * @regset:     The user regset structure.
1333  *
1334  * This function checks the active number of available
1335  * regisers in the transactional memory SPR category.
1336  */
1337 static int tm_spr_active(struct task_struct *target,
1338                          const struct user_regset *regset)
1339 {
1340         if (!cpu_has_feature(CPU_FTR_TM))
1341                 return -ENODEV;
1342
1343         return regset->n;
1344 }
1345
1346 /**
1347  * tm_spr_get - get the TM related SPR registers
1348  * @target:     The target task.
1349  * @regset:     The user regset structure.
1350  * @pos:        The buffer position.
1351  * @count:      Number of bytes to copy.
1352  * @kbuf:       Kernel buffer to copy from.
1353  * @ubuf:       User buffer to copy into.
1354  *
1355  * This function gets transactional memory related SPR registers.
1356  * The userspace interface buffer layout is as follows.
1357  *
1358  * struct {
1359  *      u64             tm_tfhar;
1360  *      u64             tm_texasr;
1361  *      u64             tm_tfiar;
1362  * };
1363  */
1364 static int tm_spr_get(struct task_struct *target,
1365                       const struct user_regset *regset,
1366                       unsigned int pos, unsigned int count,
1367                       void *kbuf, void __user *ubuf)
1368 {
1369         int ret;
1370
1371         /* Build tests */
1372         BUILD_BUG_ON(TSO(tm_tfhar) + sizeof(u64) != TSO(tm_texasr));
1373         BUILD_BUG_ON(TSO(tm_texasr) + sizeof(u64) != TSO(tm_tfiar));
1374         BUILD_BUG_ON(TSO(tm_tfiar) + sizeof(u64) != TSO(ckpt_regs));
1375
1376         if (!cpu_has_feature(CPU_FTR_TM))
1377                 return -ENODEV;
1378
1379         /* Flush the states */
1380         flush_tmregs_to_thread(target);
1381         flush_fp_to_thread(target);
1382         flush_altivec_to_thread(target);
1383
1384         /* TFHAR register */
1385         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1386                                 &target->thread.tm_tfhar, 0, sizeof(u64));
1387
1388         /* TEXASR register */
1389         if (!ret)
1390                 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1391                                 &target->thread.tm_texasr, sizeof(u64),
1392                                 2 * sizeof(u64));
1393
1394         /* TFIAR register */
1395         if (!ret)
1396                 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1397                                 &target->thread.tm_tfiar,
1398                                 2 * sizeof(u64), 3 * sizeof(u64));
1399         return ret;
1400 }
1401
1402 /**
1403  * tm_spr_set - set the TM related SPR registers
1404  * @target:     The target task.
1405  * @regset:     The user regset structure.
1406  * @pos:        The buffer position.
1407  * @count:      Number of bytes to copy.
1408  * @kbuf:       Kernel buffer to copy into.
1409  * @ubuf:       User buffer to copy from.
1410  *
1411  * This function sets transactional memory related SPR registers.
1412  * The userspace interface buffer layout is as follows.
1413  *
1414  * struct {
1415  *      u64             tm_tfhar;
1416  *      u64             tm_texasr;
1417  *      u64             tm_tfiar;
1418  * };
1419  */
1420 static int tm_spr_set(struct task_struct *target,
1421                       const struct user_regset *regset,
1422                       unsigned int pos, unsigned int count,
1423                       const void *kbuf, const void __user *ubuf)
1424 {
1425         int ret;
1426
1427         /* Build tests */
1428         BUILD_BUG_ON(TSO(tm_tfhar) + sizeof(u64) != TSO(tm_texasr));
1429         BUILD_BUG_ON(TSO(tm_texasr) + sizeof(u64) != TSO(tm_tfiar));
1430         BUILD_BUG_ON(TSO(tm_tfiar) + sizeof(u64) != TSO(ckpt_regs));
1431
1432         if (!cpu_has_feature(CPU_FTR_TM))
1433                 return -ENODEV;
1434
1435         /* Flush the states */
1436         flush_tmregs_to_thread(target);
1437         flush_fp_to_thread(target);
1438         flush_altivec_to_thread(target);
1439
1440         /* TFHAR register */
1441         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1442                                 &target->thread.tm_tfhar, 0, sizeof(u64));
1443
1444         /* TEXASR register */
1445         if (!ret)
1446                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1447                                 &target->thread.tm_texasr, sizeof(u64),
1448                                 2 * sizeof(u64));
1449
1450         /* TFIAR register */
1451         if (!ret)
1452                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1453                                 &target->thread.tm_tfiar,
1454                                  2 * sizeof(u64), 3 * sizeof(u64));
1455         return ret;
1456 }
1457
1458 static int tm_tar_active(struct task_struct *target,
1459                          const struct user_regset *regset)
1460 {
1461         if (!cpu_has_feature(CPU_FTR_TM))
1462                 return -ENODEV;
1463
1464         if (MSR_TM_ACTIVE(target->thread.regs->msr))
1465                 return regset->n;
1466
1467         return 0;
1468 }
1469
1470 static int tm_tar_get(struct task_struct *target,
1471                       const struct user_regset *regset,
1472                       unsigned int pos, unsigned int count,
1473                       void *kbuf, void __user *ubuf)
1474 {
1475         int ret;
1476
1477         if (!cpu_has_feature(CPU_FTR_TM))
1478                 return -ENODEV;
1479
1480         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1481                 return -ENODATA;
1482
1483         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1484                                 &target->thread.tm_tar, 0, sizeof(u64));
1485         return ret;
1486 }
1487
1488 static int tm_tar_set(struct task_struct *target,
1489                       const struct user_regset *regset,
1490                       unsigned int pos, unsigned int count,
1491                       const void *kbuf, const void __user *ubuf)
1492 {
1493         int ret;
1494
1495         if (!cpu_has_feature(CPU_FTR_TM))
1496                 return -ENODEV;
1497
1498         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1499                 return -ENODATA;
1500
1501         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1502                                 &target->thread.tm_tar, 0, sizeof(u64));
1503         return ret;
1504 }
1505
1506 static int tm_ppr_active(struct task_struct *target,
1507                          const struct user_regset *regset)
1508 {
1509         if (!cpu_has_feature(CPU_FTR_TM))
1510                 return -ENODEV;
1511
1512         if (MSR_TM_ACTIVE(target->thread.regs->msr))
1513                 return regset->n;
1514
1515         return 0;
1516 }
1517
1518
1519 static int tm_ppr_get(struct task_struct *target,
1520                       const struct user_regset *regset,
1521                       unsigned int pos, unsigned int count,
1522                       void *kbuf, void __user *ubuf)
1523 {
1524         int ret;
1525
1526         if (!cpu_has_feature(CPU_FTR_TM))
1527                 return -ENODEV;
1528
1529         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1530                 return -ENODATA;
1531
1532         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1533                                 &target->thread.tm_ppr, 0, sizeof(u64));
1534         return ret;
1535 }
1536
1537 static int tm_ppr_set(struct task_struct *target,
1538                       const struct user_regset *regset,
1539                       unsigned int pos, unsigned int count,
1540                       const void *kbuf, const void __user *ubuf)
1541 {
1542         int ret;
1543
1544         if (!cpu_has_feature(CPU_FTR_TM))
1545                 return -ENODEV;
1546
1547         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1548                 return -ENODATA;
1549
1550         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1551                                 &target->thread.tm_ppr, 0, sizeof(u64));
1552         return ret;
1553 }
1554
1555 static int tm_dscr_active(struct task_struct *target,
1556                          const struct user_regset *regset)
1557 {
1558         if (!cpu_has_feature(CPU_FTR_TM))
1559                 return -ENODEV;
1560
1561         if (MSR_TM_ACTIVE(target->thread.regs->msr))
1562                 return regset->n;
1563
1564         return 0;
1565 }
1566
1567 static int tm_dscr_get(struct task_struct *target,
1568                       const struct user_regset *regset,
1569                       unsigned int pos, unsigned int count,
1570                       void *kbuf, void __user *ubuf)
1571 {
1572         int ret;
1573
1574         if (!cpu_has_feature(CPU_FTR_TM))
1575                 return -ENODEV;
1576
1577         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1578                 return -ENODATA;
1579
1580         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1581                                 &target->thread.tm_dscr, 0, sizeof(u64));
1582         return ret;
1583 }
1584
1585 static int tm_dscr_set(struct task_struct *target,
1586                       const struct user_regset *regset,
1587                       unsigned int pos, unsigned int count,
1588                       const void *kbuf, const void __user *ubuf)
1589 {
1590         int ret;
1591
1592         if (!cpu_has_feature(CPU_FTR_TM))
1593                 return -ENODEV;
1594
1595         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1596                 return -ENODATA;
1597
1598         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1599                                 &target->thread.tm_dscr, 0, sizeof(u64));
1600         return ret;
1601 }
1602 #endif  /* CONFIG_PPC_TRANSACTIONAL_MEM */
1603
1604 #ifdef CONFIG_PPC64
1605 static int ppr_get(struct task_struct *target,
1606                       const struct user_regset *regset,
1607                       unsigned int pos, unsigned int count,
1608                       void *kbuf, void __user *ubuf)
1609 {
1610         return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1611                                    &target->thread.ppr, 0, sizeof(u64));
1612 }
1613
1614 static int ppr_set(struct task_struct *target,
1615                       const struct user_regset *regset,
1616                       unsigned int pos, unsigned int count,
1617                       const void *kbuf, const void __user *ubuf)
1618 {
1619         return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1620                                   &target->thread.ppr, 0, sizeof(u64));
1621 }
1622
1623 static int dscr_get(struct task_struct *target,
1624                       const struct user_regset *regset,
1625                       unsigned int pos, unsigned int count,
1626                       void *kbuf, void __user *ubuf)
1627 {
1628         return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1629                                    &target->thread.dscr, 0, sizeof(u64));
1630 }
1631 static int dscr_set(struct task_struct *target,
1632                       const struct user_regset *regset,
1633                       unsigned int pos, unsigned int count,
1634                       const void *kbuf, const void __user *ubuf)
1635 {
1636         return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1637                                   &target->thread.dscr, 0, sizeof(u64));
1638 }
1639 #endif
1640 #ifdef CONFIG_PPC_BOOK3S_64
1641 static int tar_get(struct task_struct *target,
1642                       const struct user_regset *regset,
1643                       unsigned int pos, unsigned int count,
1644                       void *kbuf, void __user *ubuf)
1645 {
1646         return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1647                                    &target->thread.tar, 0, sizeof(u64));
1648 }
1649 static int tar_set(struct task_struct *target,
1650                       const struct user_regset *regset,
1651                       unsigned int pos, unsigned int count,
1652                       const void *kbuf, const void __user *ubuf)
1653 {
1654         return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1655                                   &target->thread.tar, 0, sizeof(u64));
1656 }
1657
1658 static int ebb_active(struct task_struct *target,
1659                          const struct user_regset *regset)
1660 {
1661         if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1662                 return -ENODEV;
1663
1664         if (target->thread.used_ebb)
1665                 return regset->n;
1666
1667         return 0;
1668 }
1669
1670 static int ebb_get(struct task_struct *target,
1671                       const struct user_regset *regset,
1672                       unsigned int pos, unsigned int count,
1673                       void *kbuf, void __user *ubuf)
1674 {
1675         /* Build tests */
1676         BUILD_BUG_ON(TSO(ebbrr) + sizeof(unsigned long) != TSO(ebbhr));
1677         BUILD_BUG_ON(TSO(ebbhr) + sizeof(unsigned long) != TSO(bescr));
1678
1679         if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1680                 return -ENODEV;
1681
1682         if (!target->thread.used_ebb)
1683                 return -ENODATA;
1684
1685         return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1686                         &target->thread.ebbrr, 0, 3 * sizeof(unsigned long));
1687 }
1688
1689 static int ebb_set(struct task_struct *target,
1690                       const struct user_regset *regset,
1691                       unsigned int pos, unsigned int count,
1692                       const void *kbuf, const void __user *ubuf)
1693 {
1694         int ret = 0;
1695
1696         /* Build tests */
1697         BUILD_BUG_ON(TSO(ebbrr) + sizeof(unsigned long) != TSO(ebbhr));
1698         BUILD_BUG_ON(TSO(ebbhr) + sizeof(unsigned long) != TSO(bescr));
1699
1700         if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1701                 return -ENODEV;
1702
1703         if (target->thread.used_ebb)
1704                 return -ENODATA;
1705
1706         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1707                         &target->thread.ebbrr, 0, sizeof(unsigned long));
1708
1709         if (!ret)
1710                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1711                         &target->thread.ebbhr, sizeof(unsigned long),
1712                         2 * sizeof(unsigned long));
1713
1714         if (!ret)
1715                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1716                         &target->thread.bescr,
1717                         2 * sizeof(unsigned long), 3 * sizeof(unsigned long));
1718
1719         return ret;
1720 }
1721 static int pmu_active(struct task_struct *target,
1722                          const struct user_regset *regset)
1723 {
1724         if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1725                 return -ENODEV;
1726
1727         return regset->n;
1728 }
1729
1730 static int pmu_get(struct task_struct *target,
1731                       const struct user_regset *regset,
1732                       unsigned int pos, unsigned int count,
1733                       void *kbuf, void __user *ubuf)
1734 {
1735         /* Build tests */
1736         BUILD_BUG_ON(TSO(siar) + sizeof(unsigned long) != TSO(sdar));
1737         BUILD_BUG_ON(TSO(sdar) + sizeof(unsigned long) != TSO(sier));
1738         BUILD_BUG_ON(TSO(sier) + sizeof(unsigned long) != TSO(mmcr2));
1739         BUILD_BUG_ON(TSO(mmcr2) + sizeof(unsigned long) != TSO(mmcr0));
1740
1741         if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1742                 return -ENODEV;
1743
1744         return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1745                         &target->thread.siar, 0,
1746                         5 * sizeof(unsigned long));
1747 }
1748
1749 static int pmu_set(struct task_struct *target,
1750                       const struct user_regset *regset,
1751                       unsigned int pos, unsigned int count,
1752                       const void *kbuf, const void __user *ubuf)
1753 {
1754         int ret = 0;
1755
1756         /* Build tests */
1757         BUILD_BUG_ON(TSO(siar) + sizeof(unsigned long) != TSO(sdar));
1758         BUILD_BUG_ON(TSO(sdar) + sizeof(unsigned long) != TSO(sier));
1759         BUILD_BUG_ON(TSO(sier) + sizeof(unsigned long) != TSO(mmcr2));
1760         BUILD_BUG_ON(TSO(mmcr2) + sizeof(unsigned long) != TSO(mmcr0));
1761
1762         if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1763                 return -ENODEV;
1764
1765         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1766                         &target->thread.siar, 0,
1767                         sizeof(unsigned long));
1768
1769         if (!ret)
1770                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1771                         &target->thread.sdar, sizeof(unsigned long),
1772                         2 * sizeof(unsigned long));
1773
1774         if (!ret)
1775                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1776                         &target->thread.sier, 2 * sizeof(unsigned long),
1777                         3 * sizeof(unsigned long));
1778
1779         if (!ret)
1780                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1781                         &target->thread.mmcr2, 3 * sizeof(unsigned long),
1782                         4 * sizeof(unsigned long));
1783
1784         if (!ret)
1785                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1786                         &target->thread.mmcr0, 4 * sizeof(unsigned long),
1787                         5 * sizeof(unsigned long));
1788         return ret;
1789 }
1790 #endif
1791
1792 #ifdef CONFIG_PPC_MEM_KEYS
1793 static int pkey_active(struct task_struct *target,
1794                        const struct user_regset *regset)
1795 {
1796         if (!arch_pkeys_enabled())
1797                 return -ENODEV;
1798
1799         return regset->n;
1800 }
1801
1802 static int pkey_get(struct task_struct *target,
1803                     const struct user_regset *regset,
1804                     unsigned int pos, unsigned int count,
1805                     void *kbuf, void __user *ubuf)
1806 {
1807         BUILD_BUG_ON(TSO(amr) + sizeof(unsigned long) != TSO(iamr));
1808         BUILD_BUG_ON(TSO(iamr) + sizeof(unsigned long) != TSO(uamor));
1809
1810         if (!arch_pkeys_enabled())
1811                 return -ENODEV;
1812
1813         return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1814                                    &target->thread.amr, 0,
1815                                    ELF_NPKEY * sizeof(unsigned long));
1816 }
1817
1818 static int pkey_set(struct task_struct *target,
1819                       const struct user_regset *regset,
1820                       unsigned int pos, unsigned int count,
1821                       const void *kbuf, const void __user *ubuf)
1822 {
1823         u64 new_amr;
1824         int ret;
1825
1826         if (!arch_pkeys_enabled())
1827                 return -ENODEV;
1828
1829         /* Only the AMR can be set from userspace */
1830         if (pos != 0 || count != sizeof(new_amr))
1831                 return -EINVAL;
1832
1833         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1834                                  &new_amr, 0, sizeof(new_amr));
1835         if (ret)
1836                 return ret;
1837
1838         /* UAMOR determines which bits of the AMR can be set from userspace. */
1839         target->thread.amr = (new_amr & target->thread.uamor) |
1840                 (target->thread.amr & ~target->thread.uamor);
1841
1842         return 0;
1843 }
1844 #endif /* CONFIG_PPC_MEM_KEYS */
1845
1846 /*
1847  * These are our native regset flavors.
1848  */
1849 enum powerpc_regset {
1850         REGSET_GPR,
1851         REGSET_FPR,
1852 #ifdef CONFIG_ALTIVEC
1853         REGSET_VMX,
1854 #endif
1855 #ifdef CONFIG_VSX
1856         REGSET_VSX,
1857 #endif
1858 #ifdef CONFIG_SPE
1859         REGSET_SPE,
1860 #endif
1861 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1862         REGSET_TM_CGPR,         /* TM checkpointed GPR registers */
1863         REGSET_TM_CFPR,         /* TM checkpointed FPR registers */
1864         REGSET_TM_CVMX,         /* TM checkpointed VMX registers */
1865         REGSET_TM_CVSX,         /* TM checkpointed VSX registers */
1866         REGSET_TM_SPR,          /* TM specific SPR registers */
1867         REGSET_TM_CTAR,         /* TM checkpointed TAR register */
1868         REGSET_TM_CPPR,         /* TM checkpointed PPR register */
1869         REGSET_TM_CDSCR,        /* TM checkpointed DSCR register */
1870 #endif
1871 #ifdef CONFIG_PPC64
1872         REGSET_PPR,             /* PPR register */
1873         REGSET_DSCR,            /* DSCR register */
1874 #endif
1875 #ifdef CONFIG_PPC_BOOK3S_64
1876         REGSET_TAR,             /* TAR register */
1877         REGSET_EBB,             /* EBB registers */
1878         REGSET_PMR,             /* Performance Monitor Registers */
1879 #endif
1880 #ifdef CONFIG_PPC_MEM_KEYS
1881         REGSET_PKEY,            /* AMR register */
1882 #endif
1883 };
1884
1885 static const struct user_regset native_regsets[] = {
1886         [REGSET_GPR] = {
1887                 .core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
1888                 .size = sizeof(long), .align = sizeof(long),
1889                 .get = gpr_get, .set = gpr_set
1890         },
1891         [REGSET_FPR] = {
1892                 .core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
1893                 .size = sizeof(double), .align = sizeof(double),
1894                 .get = fpr_get, .set = fpr_set
1895         },
1896 #ifdef CONFIG_ALTIVEC
1897         [REGSET_VMX] = {
1898                 .core_note_type = NT_PPC_VMX, .n = 34,
1899                 .size = sizeof(vector128), .align = sizeof(vector128),
1900                 .active = vr_active, .get = vr_get, .set = vr_set
1901         },
1902 #endif
1903 #ifdef CONFIG_VSX
1904         [REGSET_VSX] = {
1905                 .core_note_type = NT_PPC_VSX, .n = 32,
1906                 .size = sizeof(double), .align = sizeof(double),
1907                 .active = vsr_active, .get = vsr_get, .set = vsr_set
1908         },
1909 #endif
1910 #ifdef CONFIG_SPE
1911         [REGSET_SPE] = {
1912                 .core_note_type = NT_PPC_SPE, .n = 35,
1913                 .size = sizeof(u32), .align = sizeof(u32),
1914                 .active = evr_active, .get = evr_get, .set = evr_set
1915         },
1916 #endif
1917 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1918         [REGSET_TM_CGPR] = {
1919                 .core_note_type = NT_PPC_TM_CGPR, .n = ELF_NGREG,
1920                 .size = sizeof(long), .align = sizeof(long),
1921                 .active = tm_cgpr_active, .get = tm_cgpr_get, .set = tm_cgpr_set
1922         },
1923         [REGSET_TM_CFPR] = {
1924                 .core_note_type = NT_PPC_TM_CFPR, .n = ELF_NFPREG,
1925                 .size = sizeof(double), .align = sizeof(double),
1926                 .active = tm_cfpr_active, .get = tm_cfpr_get, .set = tm_cfpr_set
1927         },
1928         [REGSET_TM_CVMX] = {
1929                 .core_note_type = NT_PPC_TM_CVMX, .n = ELF_NVMX,
1930                 .size = sizeof(vector128), .align = sizeof(vector128),
1931                 .active = tm_cvmx_active, .get = tm_cvmx_get, .set = tm_cvmx_set
1932         },
1933         [REGSET_TM_CVSX] = {
1934                 .core_note_type = NT_PPC_TM_CVSX, .n = ELF_NVSX,
1935                 .size = sizeof(double), .align = sizeof(double),
1936                 .active = tm_cvsx_active, .get = tm_cvsx_get, .set = tm_cvsx_set
1937         },
1938         [REGSET_TM_SPR] = {
1939                 .core_note_type = NT_PPC_TM_SPR, .n = ELF_NTMSPRREG,
1940                 .size = sizeof(u64), .align = sizeof(u64),
1941                 .active = tm_spr_active, .get = tm_spr_get, .set = tm_spr_set
1942         },
1943         [REGSET_TM_CTAR] = {
1944                 .core_note_type = NT_PPC_TM_CTAR, .n = 1,
1945                 .size = sizeof(u64), .align = sizeof(u64),
1946                 .active = tm_tar_active, .get = tm_tar_get, .set = tm_tar_set
1947         },
1948         [REGSET_TM_CPPR] = {
1949                 .core_note_type = NT_PPC_TM_CPPR, .n = 1,
1950                 .size = sizeof(u64), .align = sizeof(u64),
1951                 .active = tm_ppr_active, .get = tm_ppr_get, .set = tm_ppr_set
1952         },
1953         [REGSET_TM_CDSCR] = {
1954                 .core_note_type = NT_PPC_TM_CDSCR, .n = 1,
1955                 .size = sizeof(u64), .align = sizeof(u64),
1956                 .active = tm_dscr_active, .get = tm_dscr_get, .set = tm_dscr_set
1957         },
1958 #endif
1959 #ifdef CONFIG_PPC64
1960         [REGSET_PPR] = {
1961                 .core_note_type = NT_PPC_PPR, .n = 1,
1962                 .size = sizeof(u64), .align = sizeof(u64),
1963                 .get = ppr_get, .set = ppr_set
1964         },
1965         [REGSET_DSCR] = {
1966                 .core_note_type = NT_PPC_DSCR, .n = 1,
1967                 .size = sizeof(u64), .align = sizeof(u64),
1968                 .get = dscr_get, .set = dscr_set
1969         },
1970 #endif
1971 #ifdef CONFIG_PPC_BOOK3S_64
1972         [REGSET_TAR] = {
1973                 .core_note_type = NT_PPC_TAR, .n = 1,
1974                 .size = sizeof(u64), .align = sizeof(u64),
1975                 .get = tar_get, .set = tar_set
1976         },
1977         [REGSET_EBB] = {
1978                 .core_note_type = NT_PPC_EBB, .n = ELF_NEBB,
1979                 .size = sizeof(u64), .align = sizeof(u64),
1980                 .active = ebb_active, .get = ebb_get, .set = ebb_set
1981         },
1982         [REGSET_PMR] = {
1983                 .core_note_type = NT_PPC_PMU, .n = ELF_NPMU,
1984                 .size = sizeof(u64), .align = sizeof(u64),
1985                 .active = pmu_active, .get = pmu_get, .set = pmu_set
1986         },
1987 #endif
1988 #ifdef CONFIG_PPC_MEM_KEYS
1989         [REGSET_PKEY] = {
1990                 .core_note_type = NT_PPC_PKEY, .n = ELF_NPKEY,
1991                 .size = sizeof(u64), .align = sizeof(u64),
1992                 .active = pkey_active, .get = pkey_get, .set = pkey_set
1993         },
1994 #endif
1995 };
1996
1997 static const struct user_regset_view user_ppc_native_view = {
1998         .name = UTS_MACHINE, .e_machine = ELF_ARCH, .ei_osabi = ELF_OSABI,
1999         .regsets = native_regsets, .n = ARRAY_SIZE(native_regsets)
2000 };
2001
2002 #ifdef CONFIG_PPC64
2003 #include <linux/compat.h>
2004
2005 static int gpr32_get_common(struct task_struct *target,
2006                      const struct user_regset *regset,
2007                      unsigned int pos, unsigned int count,
2008                             void *kbuf, void __user *ubuf,
2009                             unsigned long *regs)
2010 {
2011         compat_ulong_t *k = kbuf;
2012         compat_ulong_t __user *u = ubuf;
2013         compat_ulong_t reg;
2014
2015         pos /= sizeof(reg);
2016         count /= sizeof(reg);
2017
2018         if (kbuf)
2019                 for (; count > 0 && pos < PT_MSR; --count)
2020                         *k++ = regs[pos++];
2021         else
2022                 for (; count > 0 && pos < PT_MSR; --count)
2023                         if (__put_user((compat_ulong_t) regs[pos++], u++))
2024                                 return -EFAULT;
2025
2026         if (count > 0 && pos == PT_MSR) {
2027                 reg = get_user_msr(target);
2028                 if (kbuf)
2029                         *k++ = reg;
2030                 else if (__put_user(reg, u++))
2031                         return -EFAULT;
2032                 ++pos;
2033                 --count;
2034         }
2035
2036         if (kbuf)
2037                 for (; count > 0 && pos < PT_REGS_COUNT; --count)
2038                         *k++ = regs[pos++];
2039         else
2040                 for (; count > 0 && pos < PT_REGS_COUNT; --count)
2041                         if (__put_user((compat_ulong_t) regs[pos++], u++))
2042                                 return -EFAULT;
2043
2044         kbuf = k;
2045         ubuf = u;
2046         pos *= sizeof(reg);
2047         count *= sizeof(reg);
2048         return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
2049                                         PT_REGS_COUNT * sizeof(reg), -1);
2050 }
2051
2052 static int gpr32_set_common(struct task_struct *target,
2053                      const struct user_regset *regset,
2054                      unsigned int pos, unsigned int count,
2055                      const void *kbuf, const void __user *ubuf,
2056                      unsigned long *regs)
2057 {
2058         const compat_ulong_t *k = kbuf;
2059         const compat_ulong_t __user *u = ubuf;
2060         compat_ulong_t reg;
2061
2062         pos /= sizeof(reg);
2063         count /= sizeof(reg);
2064
2065         if (kbuf)
2066                 for (; count > 0 && pos < PT_MSR; --count)
2067                         regs[pos++] = *k++;
2068         else
2069                 for (; count > 0 && pos < PT_MSR; --count) {
2070                         if (__get_user(reg, u++))
2071                                 return -EFAULT;
2072                         regs[pos++] = reg;
2073                 }
2074
2075
2076         if (count > 0 && pos == PT_MSR) {
2077                 if (kbuf)
2078                         reg = *k++;
2079                 else if (__get_user(reg, u++))
2080                         return -EFAULT;
2081                 set_user_msr(target, reg);
2082                 ++pos;
2083                 --count;
2084         }
2085
2086         if (kbuf) {
2087                 for (; count > 0 && pos <= PT_MAX_PUT_REG; --count)
2088                         regs[pos++] = *k++;
2089                 for (; count > 0 && pos < PT_TRAP; --count, ++pos)
2090                         ++k;
2091         } else {
2092                 for (; count > 0 && pos <= PT_MAX_PUT_REG; --count) {
2093                         if (__get_user(reg, u++))
2094                                 return -EFAULT;
2095                         regs[pos++] = reg;
2096                 }
2097                 for (; count > 0 && pos < PT_TRAP; --count, ++pos)
2098                         if (__get_user(reg, u++))
2099                                 return -EFAULT;
2100         }
2101
2102         if (count > 0 && pos == PT_TRAP) {
2103                 if (kbuf)
2104                         reg = *k++;
2105                 else if (__get_user(reg, u++))
2106                         return -EFAULT;
2107                 set_user_trap(target, reg);
2108                 ++pos;
2109                 --count;
2110         }
2111
2112         kbuf = k;
2113         ubuf = u;
2114         pos *= sizeof(reg);
2115         count *= sizeof(reg);
2116         return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
2117                                          (PT_TRAP + 1) * sizeof(reg), -1);
2118 }
2119
2120 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2121 static int tm_cgpr32_get(struct task_struct *target,
2122                      const struct user_regset *regset,
2123                      unsigned int pos, unsigned int count,
2124                      void *kbuf, void __user *ubuf)
2125 {
2126         return gpr32_get_common(target, regset, pos, count, kbuf, ubuf,
2127                         &target->thread.ckpt_regs.gpr[0]);
2128 }
2129
2130 static int tm_cgpr32_set(struct task_struct *target,
2131                      const struct user_regset *regset,
2132                      unsigned int pos, unsigned int count,
2133                      const void *kbuf, const void __user *ubuf)
2134 {
2135         return gpr32_set_common(target, regset, pos, count, kbuf, ubuf,
2136                         &target->thread.ckpt_regs.gpr[0]);
2137 }
2138 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
2139
2140 static int gpr32_get(struct task_struct *target,
2141                      const struct user_regset *regset,
2142                      unsigned int pos, unsigned int count,
2143                      void *kbuf, void __user *ubuf)
2144 {
2145         int i;
2146
2147         if (target->thread.regs == NULL)
2148                 return -EIO;
2149
2150         if (!FULL_REGS(target->thread.regs)) {
2151                 /*
2152                  * We have a partial register set.
2153                  * Fill 14-31 with bogus values.
2154                  */
2155                 for (i = 14; i < 32; i++)
2156                         target->thread.regs->gpr[i] = NV_REG_POISON;
2157         }
2158         return gpr32_get_common(target, regset, pos, count, kbuf, ubuf,
2159                         &target->thread.regs->gpr[0]);
2160 }
2161
2162 static int gpr32_set(struct task_struct *target,
2163                      const struct user_regset *regset,
2164                      unsigned int pos, unsigned int count,
2165                      const void *kbuf, const void __user *ubuf)
2166 {
2167         if (target->thread.regs == NULL)
2168                 return -EIO;
2169
2170         CHECK_FULL_REGS(target->thread.regs);
2171         return gpr32_set_common(target, regset, pos, count, kbuf, ubuf,
2172                         &target->thread.regs->gpr[0]);
2173 }
2174
2175 /*
2176  * These are the regset flavors matching the CONFIG_PPC32 native set.
2177  */
2178 static const struct user_regset compat_regsets[] = {
2179         [REGSET_GPR] = {
2180                 .core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
2181                 .size = sizeof(compat_long_t), .align = sizeof(compat_long_t),
2182                 .get = gpr32_get, .set = gpr32_set
2183         },
2184         [REGSET_FPR] = {
2185                 .core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
2186                 .size = sizeof(double), .align = sizeof(double),
2187                 .get = fpr_get, .set = fpr_set
2188         },
2189 #ifdef CONFIG_ALTIVEC
2190         [REGSET_VMX] = {
2191                 .core_note_type = NT_PPC_VMX, .n = 34,
2192                 .size = sizeof(vector128), .align = sizeof(vector128),
2193                 .active = vr_active, .get = vr_get, .set = vr_set
2194         },
2195 #endif
2196 #ifdef CONFIG_SPE
2197         [REGSET_SPE] = {
2198                 .core_note_type = NT_PPC_SPE, .n = 35,
2199                 .size = sizeof(u32), .align = sizeof(u32),
2200                 .active = evr_active, .get = evr_get, .set = evr_set
2201         },
2202 #endif
2203 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2204         [REGSET_TM_CGPR] = {
2205                 .core_note_type = NT_PPC_TM_CGPR, .n = ELF_NGREG,
2206                 .size = sizeof(long), .align = sizeof(long),
2207                 .active = tm_cgpr_active,
2208                 .get = tm_cgpr32_get, .set = tm_cgpr32_set
2209         },
2210         [REGSET_TM_CFPR] = {
2211                 .core_note_type = NT_PPC_TM_CFPR, .n = ELF_NFPREG,
2212                 .size = sizeof(double), .align = sizeof(double),
2213                 .active = tm_cfpr_active, .get = tm_cfpr_get, .set = tm_cfpr_set
2214         },
2215         [REGSET_TM_CVMX] = {
2216                 .core_note_type = NT_PPC_TM_CVMX, .n = ELF_NVMX,
2217                 .size = sizeof(vector128), .align = sizeof(vector128),
2218                 .active = tm_cvmx_active, .get = tm_cvmx_get, .set = tm_cvmx_set
2219         },
2220         [REGSET_TM_CVSX] = {
2221                 .core_note_type = NT_PPC_TM_CVSX, .n = ELF_NVSX,
2222                 .size = sizeof(double), .align = sizeof(double),
2223                 .active = tm_cvsx_active, .get = tm_cvsx_get, .set = tm_cvsx_set
2224         },
2225         [REGSET_TM_SPR] = {
2226                 .core_note_type = NT_PPC_TM_SPR, .n = ELF_NTMSPRREG,
2227                 .size = sizeof(u64), .align = sizeof(u64),
2228                 .active = tm_spr_active, .get = tm_spr_get, .set = tm_spr_set
2229         },
2230         [REGSET_TM_CTAR] = {
2231                 .core_note_type = NT_PPC_TM_CTAR, .n = 1,
2232                 .size = sizeof(u64), .align = sizeof(u64),
2233                 .active = tm_tar_active, .get = tm_tar_get, .set = tm_tar_set
2234         },
2235         [REGSET_TM_CPPR] = {
2236                 .core_note_type = NT_PPC_TM_CPPR, .n = 1,
2237                 .size = sizeof(u64), .align = sizeof(u64),
2238                 .active = tm_ppr_active, .get = tm_ppr_get, .set = tm_ppr_set
2239         },
2240         [REGSET_TM_CDSCR] = {
2241                 .core_note_type = NT_PPC_TM_CDSCR, .n = 1,
2242                 .size = sizeof(u64), .align = sizeof(u64),
2243                 .active = tm_dscr_active, .get = tm_dscr_get, .set = tm_dscr_set
2244         },
2245 #endif
2246 #ifdef CONFIG_PPC64
2247         [REGSET_PPR] = {
2248                 .core_note_type = NT_PPC_PPR, .n = 1,
2249                 .size = sizeof(u64), .align = sizeof(u64),
2250                 .get = ppr_get, .set = ppr_set
2251         },
2252         [REGSET_DSCR] = {
2253                 .core_note_type = NT_PPC_DSCR, .n = 1,
2254                 .size = sizeof(u64), .align = sizeof(u64),
2255                 .get = dscr_get, .set = dscr_set
2256         },
2257 #endif
2258 #ifdef CONFIG_PPC_BOOK3S_64
2259         [REGSET_TAR] = {
2260                 .core_note_type = NT_PPC_TAR, .n = 1,
2261                 .size = sizeof(u64), .align = sizeof(u64),
2262                 .get = tar_get, .set = tar_set
2263         },
2264         [REGSET_EBB] = {
2265                 .core_note_type = NT_PPC_EBB, .n = ELF_NEBB,
2266                 .size = sizeof(u64), .align = sizeof(u64),
2267                 .active = ebb_active, .get = ebb_get, .set = ebb_set
2268         },
2269 #endif
2270 };
2271
2272 static const struct user_regset_view user_ppc_compat_view = {
2273         .name = "ppc", .e_machine = EM_PPC, .ei_osabi = ELF_OSABI,
2274         .regsets = compat_regsets, .n = ARRAY_SIZE(compat_regsets)
2275 };
2276 #endif  /* CONFIG_PPC64 */
2277
2278 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
2279 {
2280 #ifdef CONFIG_PPC64
2281         if (test_tsk_thread_flag(task, TIF_32BIT))
2282                 return &user_ppc_compat_view;
2283 #endif
2284         return &user_ppc_native_view;
2285 }
2286
2287
2288 void user_enable_single_step(struct task_struct *task)
2289 {
2290         struct pt_regs *regs = task->thread.regs;
2291
2292         if (regs != NULL) {
2293 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
2294                 task->thread.debug.dbcr0 &= ~DBCR0_BT;
2295                 task->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC;
2296                 regs->msr |= MSR_DE;
2297 #else
2298                 regs->msr &= ~MSR_BE;
2299                 regs->msr |= MSR_SE;
2300 #endif
2301         }
2302         set_tsk_thread_flag(task, TIF_SINGLESTEP);
2303 }
2304
2305 void user_enable_block_step(struct task_struct *task)
2306 {
2307         struct pt_regs *regs = task->thread.regs;
2308
2309         if (regs != NULL) {
2310 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
2311                 task->thread.debug.dbcr0 &= ~DBCR0_IC;
2312                 task->thread.debug.dbcr0 = DBCR0_IDM | DBCR0_BT;
2313                 regs->msr |= MSR_DE;
2314 #else
2315                 regs->msr &= ~MSR_SE;
2316                 regs->msr |= MSR_BE;
2317 #endif
2318         }
2319         set_tsk_thread_flag(task, TIF_SINGLESTEP);
2320 }
2321
2322 void user_disable_single_step(struct task_struct *task)
2323 {
2324         struct pt_regs *regs = task->thread.regs;
2325
2326         if (regs != NULL) {
2327 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
2328                 /*
2329                  * The logic to disable single stepping should be as
2330                  * simple as turning off the Instruction Complete flag.
2331                  * And, after doing so, if all debug flags are off, turn
2332                  * off DBCR0(IDM) and MSR(DE) .... Torez
2333                  */
2334                 task->thread.debug.dbcr0 &= ~(DBCR0_IC|DBCR0_BT);
2335                 /*
2336                  * Test to see if any of the DBCR_ACTIVE_EVENTS bits are set.
2337                  */
2338                 if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0,
2339                                         task->thread.debug.dbcr1)) {
2340                         /*
2341                          * All debug events were off.....
2342                          */
2343                         task->thread.debug.dbcr0 &= ~DBCR0_IDM;
2344                         regs->msr &= ~MSR_DE;
2345                 }
2346 #else
2347                 regs->msr &= ~(MSR_SE | MSR_BE);
2348 #endif
2349         }
2350         clear_tsk_thread_flag(task, TIF_SINGLESTEP);
2351 }
2352
2353 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2354 void ptrace_triggered(struct perf_event *bp,
2355                       struct perf_sample_data *data, struct pt_regs *regs)
2356 {
2357         struct perf_event_attr attr;
2358
2359         /*
2360          * Disable the breakpoint request here since ptrace has defined a
2361          * one-shot behaviour for breakpoint exceptions in PPC64.
2362          * The SIGTRAP signal is generated automatically for us in do_dabr().
2363          * We don't have to do anything about that here
2364          */
2365         attr = bp->attr;
2366         attr.disabled = true;
2367         modify_user_hw_breakpoint(bp, &attr);
2368 }
2369 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
2370
2371 static int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
2372                                unsigned long data)
2373 {
2374 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2375         int ret;
2376         struct thread_struct *thread = &(task->thread);
2377         struct perf_event *bp;
2378         struct perf_event_attr attr;
2379 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
2380 #ifndef CONFIG_PPC_ADV_DEBUG_REGS
2381         struct arch_hw_breakpoint hw_brk;
2382 #endif
2383
2384         /* For ppc64 we support one DABR and no IABR's at the moment (ppc64).
2385          *  For embedded processors we support one DAC and no IAC's at the
2386          *  moment.
2387          */
2388         if (addr > 0)
2389                 return -EINVAL;
2390
2391         /* The bottom 3 bits in dabr are flags */
2392         if ((data & ~0x7UL) >= TASK_SIZE)
2393                 return -EIO;
2394
2395 #ifndef CONFIG_PPC_ADV_DEBUG_REGS
2396         /* For processors using DABR (i.e. 970), the bottom 3 bits are flags.
2397          *  It was assumed, on previous implementations, that 3 bits were
2398          *  passed together with the data address, fitting the design of the
2399          *  DABR register, as follows:
2400          *
2401          *  bit 0: Read flag
2402          *  bit 1: Write flag
2403          *  bit 2: Breakpoint translation
2404          *
2405          *  Thus, we use them here as so.
2406          */
2407
2408         /* Ensure breakpoint translation bit is set */
2409         if (data && !(data & HW_BRK_TYPE_TRANSLATE))
2410                 return -EIO;
2411         hw_brk.address = data & (~HW_BRK_TYPE_DABR);
2412         hw_brk.type = (data & HW_BRK_TYPE_DABR) | HW_BRK_TYPE_PRIV_ALL;
2413         hw_brk.len = 8;
2414 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2415         bp = thread->ptrace_bps[0];
2416         if ((!data) || !(hw_brk.type & HW_BRK_TYPE_RDWR)) {
2417                 if (bp) {
2418                         unregister_hw_breakpoint(bp);
2419                         thread->ptrace_bps[0] = NULL;
2420                 }
2421                 return 0;
2422         }
2423         if (bp) {
2424                 attr = bp->attr;
2425                 attr.bp_addr = hw_brk.address;
2426                 arch_bp_generic_fields(hw_brk.type, &attr.bp_type);
2427
2428                 /* Enable breakpoint */
2429                 attr.disabled = false;
2430
2431                 ret =  modify_user_hw_breakpoint(bp, &attr);
2432                 if (ret) {
2433                         return ret;
2434                 }
2435                 thread->ptrace_bps[0] = bp;
2436                 thread->hw_brk = hw_brk;
2437                 return 0;
2438         }
2439
2440         /* Create a new breakpoint request if one doesn't exist already */
2441         hw_breakpoint_init(&attr);
2442         attr.bp_addr = hw_brk.address;
2443         arch_bp_generic_fields(hw_brk.type,
2444                                &attr.bp_type);
2445
2446         thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr,
2447                                                ptrace_triggered, NULL, task);
2448         if (IS_ERR(bp)) {
2449                 thread->ptrace_bps[0] = NULL;
2450                 return PTR_ERR(bp);
2451         }
2452
2453 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
2454         task->thread.hw_brk = hw_brk;
2455 #else /* CONFIG_PPC_ADV_DEBUG_REGS */
2456         /* As described above, it was assumed 3 bits were passed with the data
2457          *  address, but we will assume only the mode bits will be passed
2458          *  as to not cause alignment restrictions for DAC-based processors.
2459          */
2460
2461         /* DAC's hold the whole address without any mode flags */
2462         task->thread.debug.dac1 = data & ~0x3UL;
2463
2464         if (task->thread.debug.dac1 == 0) {
2465                 dbcr_dac(task) &= ~(DBCR_DAC1R | DBCR_DAC1W);
2466                 if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0,
2467                                         task->thread.debug.dbcr1)) {
2468                         task->thread.regs->msr &= ~MSR_DE;
2469                         task->thread.debug.dbcr0 &= ~DBCR0_IDM;
2470                 }
2471                 return 0;
2472         }
2473
2474         /* Read or Write bits must be set */
2475
2476         if (!(data & 0x3UL))
2477                 return -EINVAL;
2478
2479         /* Set the Internal Debugging flag (IDM bit 1) for the DBCR0
2480            register */
2481         task->thread.debug.dbcr0 |= DBCR0_IDM;
2482
2483         /* Check for write and read flags and set DBCR0
2484            accordingly */
2485         dbcr_dac(task) &= ~(DBCR_DAC1R|DBCR_DAC1W);
2486         if (data & 0x1UL)
2487                 dbcr_dac(task) |= DBCR_DAC1R;
2488         if (data & 0x2UL)
2489                 dbcr_dac(task) |= DBCR_DAC1W;
2490         task->thread.regs->msr |= MSR_DE;
2491 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
2492         return 0;
2493 }
2494
2495 /*
2496  * Called by kernel/ptrace.c when detaching..
2497  *
2498  * Make sure single step bits etc are not set.
2499  */
2500 void ptrace_disable(struct task_struct *child)
2501 {
2502         /* make sure the single step bit is not set. */
2503         user_disable_single_step(child);
2504 }
2505
2506 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
2507 static long set_instruction_bp(struct task_struct *child,
2508                               struct ppc_hw_breakpoint *bp_info)
2509 {
2510         int slot;
2511         int slot1_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC1) != 0);
2512         int slot2_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC2) != 0);
2513         int slot3_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC3) != 0);
2514         int slot4_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC4) != 0);
2515
2516         if (dbcr_iac_range(child) & DBCR_IAC12MODE)
2517                 slot2_in_use = 1;
2518         if (dbcr_iac_range(child) & DBCR_IAC34MODE)
2519                 slot4_in_use = 1;
2520
2521         if (bp_info->addr >= TASK_SIZE)
2522                 return -EIO;
2523
2524         if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT) {
2525
2526                 /* Make sure range is valid. */
2527                 if (bp_info->addr2 >= TASK_SIZE)
2528                         return -EIO;
2529
2530                 /* We need a pair of IAC regsisters */
2531                 if ((!slot1_in_use) && (!slot2_in_use)) {
2532                         slot = 1;
2533                         child->thread.debug.iac1 = bp_info->addr;
2534                         child->thread.debug.iac2 = bp_info->addr2;
2535                         child->thread.debug.dbcr0 |= DBCR0_IAC1;
2536                         if (bp_info->addr_mode ==
2537                                         PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
2538                                 dbcr_iac_range(child) |= DBCR_IAC12X;
2539                         else
2540                                 dbcr_iac_range(child) |= DBCR_IAC12I;
2541 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
2542                 } else if ((!slot3_in_use) && (!slot4_in_use)) {
2543                         slot = 3;
2544                         child->thread.debug.iac3 = bp_info->addr;
2545                         child->thread.debug.iac4 = bp_info->addr2;
2546                         child->thread.debug.dbcr0 |= DBCR0_IAC3;
2547                         if (bp_info->addr_mode ==
2548                                         PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
2549                                 dbcr_iac_range(child) |= DBCR_IAC34X;
2550                         else
2551                                 dbcr_iac_range(child) |= DBCR_IAC34I;
2552 #endif
2553                 } else
2554                         return -ENOSPC;
2555         } else {
2556                 /* We only need one.  If possible leave a pair free in
2557                  * case a range is needed later
2558                  */
2559                 if (!slot1_in_use) {
2560                         /*
2561                          * Don't use iac1 if iac1-iac2 are free and either
2562                          * iac3 or iac4 (but not both) are free
2563                          */
2564                         if (slot2_in_use || (slot3_in_use == slot4_in_use)) {
2565                                 slot = 1;
2566                                 child->thread.debug.iac1 = bp_info->addr;
2567                                 child->thread.debug.dbcr0 |= DBCR0_IAC1;
2568                                 goto out;
2569                         }
2570                 }
2571                 if (!slot2_in_use) {
2572                         slot = 2;
2573                         child->thread.debug.iac2 = bp_info->addr;
2574                         child->thread.debug.dbcr0 |= DBCR0_IAC2;
2575 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
2576                 } else if (!slot3_in_use) {
2577                         slot = 3;
2578                         child->thread.debug.iac3 = bp_info->addr;
2579                         child->thread.debug.dbcr0 |= DBCR0_IAC3;
2580                 } else if (!slot4_in_use) {
2581                         slot = 4;
2582                         child->thread.debug.iac4 = bp_info->addr;
2583                         child->thread.debug.dbcr0 |= DBCR0_IAC4;
2584 #endif
2585                 } else
2586                         return -ENOSPC;
2587         }
2588 out:
2589         child->thread.debug.dbcr0 |= DBCR0_IDM;
2590         child->thread.regs->msr |= MSR_DE;
2591
2592         return slot;
2593 }
2594
2595 static int del_instruction_bp(struct task_struct *child, int slot)
2596 {
2597         switch (slot) {
2598         case 1:
2599                 if ((child->thread.debug.dbcr0 & DBCR0_IAC1) == 0)
2600                         return -ENOENT;
2601
2602                 if (dbcr_iac_range(child) & DBCR_IAC12MODE) {
2603                         /* address range - clear slots 1 & 2 */
2604                         child->thread.debug.iac2 = 0;
2605                         dbcr_iac_range(child) &= ~DBCR_IAC12MODE;
2606                 }
2607                 child->thread.debug.iac1 = 0;
2608                 child->thread.debug.dbcr0 &= ~DBCR0_IAC1;
2609                 break;
2610         case 2:
2611                 if ((child->thread.debug.dbcr0 & DBCR0_IAC2) == 0)
2612                         return -ENOENT;
2613
2614                 if (dbcr_iac_range(child) & DBCR_IAC12MODE)
2615                         /* used in a range */
2616                         return -EINVAL;
2617                 child->thread.debug.iac2 = 0;
2618                 child->thread.debug.dbcr0 &= ~DBCR0_IAC2;
2619                 break;
2620 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
2621         case 3:
2622                 if ((child->thread.debug.dbcr0 & DBCR0_IAC3) == 0)
2623                         return -ENOENT;
2624
2625                 if (dbcr_iac_range(child) & DBCR_IAC34MODE) {
2626                         /* address range - clear slots 3 & 4 */
2627                         child->thread.debug.iac4 = 0;
2628                         dbcr_iac_range(child) &= ~DBCR_IAC34MODE;
2629                 }
2630                 child->thread.debug.iac3 = 0;
2631                 child->thread.debug.dbcr0 &= ~DBCR0_IAC3;
2632                 break;
2633         case 4:
2634                 if ((child->thread.debug.dbcr0 & DBCR0_IAC4) == 0)
2635                         return -ENOENT;
2636
2637                 if (dbcr_iac_range(child) & DBCR_IAC34MODE)
2638                         /* Used in a range */
2639                         return -EINVAL;
2640                 child->thread.debug.iac4 = 0;
2641                 child->thread.debug.dbcr0 &= ~DBCR0_IAC4;
2642                 break;
2643 #endif
2644         default:
2645                 return -EINVAL;
2646         }
2647         return 0;
2648 }
2649
2650 static int set_dac(struct task_struct *child, struct ppc_hw_breakpoint *bp_info)
2651 {
2652         int byte_enable =
2653                 (bp_info->condition_mode >> PPC_BREAKPOINT_CONDITION_BE_SHIFT)
2654                 & 0xf;
2655         int condition_mode =
2656                 bp_info->condition_mode & PPC_BREAKPOINT_CONDITION_MODE;
2657         int slot;
2658
2659         if (byte_enable && (condition_mode == 0))
2660                 return -EINVAL;
2661
2662         if (bp_info->addr >= TASK_SIZE)
2663                 return -EIO;
2664
2665         if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0) {
2666                 slot = 1;
2667                 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
2668                         dbcr_dac(child) |= DBCR_DAC1R;
2669                 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
2670                         dbcr_dac(child) |= DBCR_DAC1W;
2671                 child->thread.debug.dac1 = (unsigned long)bp_info->addr;
2672 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
2673                 if (byte_enable) {
2674                         child->thread.debug.dvc1 =
2675                                 (unsigned long)bp_info->condition_value;
2676                         child->thread.debug.dbcr2 |=
2677                                 ((byte_enable << DBCR2_DVC1BE_SHIFT) |
2678                                  (condition_mode << DBCR2_DVC1M_SHIFT));
2679                 }
2680 #endif
2681 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2682         } else if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE) {
2683                 /* Both dac1 and dac2 are part of a range */
2684                 return -ENOSPC;
2685 #endif
2686         } else if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0) {
2687                 slot = 2;
2688                 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
2689                         dbcr_dac(child) |= DBCR_DAC2R;
2690                 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
2691                         dbcr_dac(child) |= DBCR_DAC2W;
2692                 child->thread.debug.dac2 = (unsigned long)bp_info->addr;
2693 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
2694                 if (byte_enable) {
2695                         child->thread.debug.dvc2 =
2696                                 (unsigned long)bp_info->condition_value;
2697                         child->thread.debug.dbcr2 |=
2698                                 ((byte_enable << DBCR2_DVC2BE_SHIFT) |
2699                                  (condition_mode << DBCR2_DVC2M_SHIFT));
2700                 }
2701 #endif
2702         } else
2703                 return -ENOSPC;
2704         child->thread.debug.dbcr0 |= DBCR0_IDM;
2705         child->thread.regs->msr |= MSR_DE;
2706
2707         return slot + 4;
2708 }
2709
2710 static int del_dac(struct task_struct *child, int slot)
2711 {
2712         if (slot == 1) {
2713                 if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0)
2714                         return -ENOENT;
2715
2716                 child->thread.debug.dac1 = 0;
2717                 dbcr_dac(child) &= ~(DBCR_DAC1R | DBCR_DAC1W);
2718 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2719                 if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE) {
2720                         child->thread.debug.dac2 = 0;
2721                         child->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE;
2722                 }
2723                 child->thread.debug.dbcr2 &= ~(DBCR2_DVC1M | DBCR2_DVC1BE);
2724 #endif
2725 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
2726                 child->thread.debug.dvc1 = 0;
2727 #endif
2728         } else if (slot == 2) {
2729                 if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0)
2730                         return -ENOENT;
2731
2732 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2733                 if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE)
2734                         /* Part of a range */
2735                         return -EINVAL;
2736                 child->thread.debug.dbcr2 &= ~(DBCR2_DVC2M | DBCR2_DVC2BE);
2737 #endif
2738 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
2739                 child->thread.debug.dvc2 = 0;
2740 #endif
2741                 child->thread.debug.dac2 = 0;
2742                 dbcr_dac(child) &= ~(DBCR_DAC2R | DBCR_DAC2W);
2743         } else
2744                 return -EINVAL;
2745
2746         return 0;
2747 }
2748 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
2749
2750 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2751 static int set_dac_range(struct task_struct *child,
2752                          struct ppc_hw_breakpoint *bp_info)
2753 {
2754         int mode = bp_info->addr_mode & PPC_BREAKPOINT_MODE_MASK;
2755
2756         /* We don't allow range watchpoints to be used with DVC */
2757         if (bp_info->condition_mode)
2758                 return -EINVAL;
2759
2760         /*
2761          * Best effort to verify the address range.  The user/supervisor bits
2762          * prevent trapping in kernel space, but let's fail on an obvious bad
2763          * range.  The simple test on the mask is not fool-proof, and any
2764          * exclusive range will spill over into kernel space.
2765          */
2766         if (bp_info->addr >= TASK_SIZE)
2767                 return -EIO;
2768         if (mode == PPC_BREAKPOINT_MODE_MASK) {
2769                 /*
2770                  * dac2 is a bitmask.  Don't allow a mask that makes a
2771                  * kernel space address from a valid dac1 value
2772                  */
2773                 if (~((unsigned long)bp_info->addr2) >= TASK_SIZE)
2774                         return -EIO;
2775         } else {
2776                 /*
2777                  * For range breakpoints, addr2 must also be a valid address
2778                  */
2779                 if (bp_info->addr2 >= TASK_SIZE)
2780                         return -EIO;
2781         }
2782
2783         if (child->thread.debug.dbcr0 &
2784             (DBCR0_DAC1R | DBCR0_DAC1W | DBCR0_DAC2R | DBCR0_DAC2W))
2785                 return -ENOSPC;
2786
2787         if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
2788                 child->thread.debug.dbcr0 |= (DBCR0_DAC1R | DBCR0_IDM);
2789         if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
2790                 child->thread.debug.dbcr0 |= (DBCR0_DAC1W | DBCR0_IDM);
2791         child->thread.debug.dac1 = bp_info->addr;
2792         child->thread.debug.dac2 = bp_info->addr2;
2793         if (mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE)
2794                 child->thread.debug.dbcr2  |= DBCR2_DAC12M;
2795         else if (mode == PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
2796                 child->thread.debug.dbcr2  |= DBCR2_DAC12MX;
2797         else    /* PPC_BREAKPOINT_MODE_MASK */
2798                 child->thread.debug.dbcr2  |= DBCR2_DAC12MM;
2799         child->thread.regs->msr |= MSR_DE;
2800
2801         return 5;
2802 }
2803 #endif /* CONFIG_PPC_ADV_DEBUG_DAC_RANGE */
2804
2805 static long ppc_set_hwdebug(struct task_struct *child,
2806                      struct ppc_hw_breakpoint *bp_info)
2807 {
2808 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2809         int len = 0;
2810         struct thread_struct *thread = &(child->thread);
2811         struct perf_event *bp;
2812         struct perf_event_attr attr;
2813 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
2814 #ifndef CONFIG_PPC_ADV_DEBUG_REGS
2815         struct arch_hw_breakpoint brk;
2816 #endif
2817
2818         if (bp_info->version != 1)
2819                 return -ENOTSUPP;
2820 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
2821         /*
2822          * Check for invalid flags and combinations
2823          */
2824         if ((bp_info->trigger_type == 0) ||
2825             (bp_info->trigger_type & ~(PPC_BREAKPOINT_TRIGGER_EXECUTE |
2826                                        PPC_BREAKPOINT_TRIGGER_RW)) ||
2827             (bp_info->addr_mode & ~PPC_BREAKPOINT_MODE_MASK) ||
2828             (bp_info->condition_mode &
2829              ~(PPC_BREAKPOINT_CONDITION_MODE |
2830                PPC_BREAKPOINT_CONDITION_BE_ALL)))
2831                 return -EINVAL;
2832 #if CONFIG_PPC_ADV_DEBUG_DVCS == 0
2833         if (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)
2834                 return -EINVAL;
2835 #endif
2836
2837         if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_EXECUTE) {
2838                 if ((bp_info->trigger_type != PPC_BREAKPOINT_TRIGGER_EXECUTE) ||
2839                     (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE))
2840                         return -EINVAL;
2841                 return set_instruction_bp(child, bp_info);
2842         }
2843         if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT)
2844                 return set_dac(child, bp_info);
2845
2846 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2847         return set_dac_range(child, bp_info);
2848 #else
2849         return -EINVAL;
2850 #endif
2851 #else /* !CONFIG_PPC_ADV_DEBUG_DVCS */
2852         /*
2853          * We only support one data breakpoint
2854          */
2855         if ((bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_RW) == 0 ||
2856             (bp_info->trigger_type & ~PPC_BREAKPOINT_TRIGGER_RW) != 0 ||
2857             bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)
2858                 return -EINVAL;
2859
2860         if ((unsigned long)bp_info->addr >= TASK_SIZE)
2861                 return -EIO;
2862
2863         brk.address = bp_info->addr & ~7UL;
2864         brk.type = HW_BRK_TYPE_TRANSLATE;
2865         brk.len = 8;
2866         if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
2867                 brk.type |= HW_BRK_TYPE_READ;
2868         if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
2869                 brk.type |= HW_BRK_TYPE_WRITE;
2870 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2871         /*
2872          * Check if the request is for 'range' breakpoints. We can
2873          * support it if range < 8 bytes.
2874          */
2875         if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE)
2876                 len = bp_info->addr2 - bp_info->addr;
2877         else if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT)
2878                 len = 1;
2879         else
2880                 return -EINVAL;
2881         bp = thread->ptrace_bps[0];
2882         if (bp)
2883                 return -ENOSPC;
2884
2885         /* Create a new breakpoint request if one doesn't exist already */
2886         hw_breakpoint_init(&attr);
2887         attr.bp_addr = (unsigned long)bp_info->addr & ~HW_BREAKPOINT_ALIGN;
2888         attr.bp_len = len;
2889         arch_bp_generic_fields(brk.type, &attr.bp_type);
2890
2891         thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr,
2892                                                ptrace_triggered, NULL, child);
2893         if (IS_ERR(bp)) {
2894                 thread->ptrace_bps[0] = NULL;
2895                 return PTR_ERR(bp);
2896         }
2897
2898         return 1;
2899 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
2900
2901         if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT)
2902                 return -EINVAL;
2903
2904         if (child->thread.hw_brk.address)
2905                 return -ENOSPC;
2906
2907         child->thread.hw_brk = brk;
2908
2909         return 1;
2910 #endif /* !CONFIG_PPC_ADV_DEBUG_DVCS */
2911 }
2912
2913 static long ppc_del_hwdebug(struct task_struct *child, long data)
2914 {
2915 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2916         int ret = 0;
2917         struct thread_struct *thread = &(child->thread);
2918         struct perf_event *bp;
2919 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
2920 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
2921         int rc;
2922
2923         if (data <= 4)
2924                 rc = del_instruction_bp(child, (int)data);
2925         else
2926                 rc = del_dac(child, (int)data - 4);
2927
2928         if (!rc) {
2929                 if (!DBCR_ACTIVE_EVENTS(child->thread.debug.dbcr0,
2930                                         child->thread.debug.dbcr1)) {
2931                         child->thread.debug.dbcr0 &= ~DBCR0_IDM;
2932                         child->thread.regs->msr &= ~MSR_DE;
2933                 }
2934         }
2935         return rc;
2936 #else
2937         if (data != 1)
2938                 return -EINVAL;
2939
2940 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2941         bp = thread->ptrace_bps[0];
2942         if (bp) {
2943                 unregister_hw_breakpoint(bp);
2944                 thread->ptrace_bps[0] = NULL;
2945         } else
2946                 ret = -ENOENT;
2947         return ret;
2948 #else /* CONFIG_HAVE_HW_BREAKPOINT */
2949         if (child->thread.hw_brk.address == 0)
2950                 return -ENOENT;
2951
2952         child->thread.hw_brk.address = 0;
2953         child->thread.hw_brk.type = 0;
2954 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
2955
2956         return 0;
2957 #endif
2958 }
2959
2960 long arch_ptrace(struct task_struct *child, long request,
2961                  unsigned long addr, unsigned long data)
2962 {
2963         int ret = -EPERM;
2964         void __user *datavp = (void __user *) data;
2965         unsigned long __user *datalp = datavp;
2966
2967         switch (request) {
2968         /* read the word at location addr in the USER area. */
2969         case PTRACE_PEEKUSR: {
2970                 unsigned long index, tmp;
2971
2972                 ret = -EIO;
2973                 /* convert to index and check */
2974 #ifdef CONFIG_PPC32
2975                 index = addr >> 2;
2976                 if ((addr & 3) || (index > PT_FPSCR)
2977                     || (child->thread.regs == NULL))
2978 #else
2979                 index = addr >> 3;
2980                 if ((addr & 7) || (index > PT_FPSCR))
2981 #endif
2982                         break;
2983
2984                 CHECK_FULL_REGS(child->thread.regs);
2985                 if (index < PT_FPR0) {
2986                         ret = ptrace_get_reg(child, (int) index, &tmp);
2987                         if (ret)
2988                                 break;
2989                 } else {
2990                         unsigned int fpidx = index - PT_FPR0;
2991
2992                         flush_fp_to_thread(child);
2993                         if (fpidx < (PT_FPSCR - PT_FPR0))
2994                                 memcpy(&tmp, &child->thread.TS_FPR(fpidx),
2995                                        sizeof(long));
2996                         else
2997                                 tmp = child->thread.fp_state.fpscr;
2998                 }
2999                 ret = put_user(tmp, datalp);
3000                 break;
3001         }
3002
3003         /* write the word at location addr in the USER area */
3004         case PTRACE_POKEUSR: {
3005                 unsigned long index;
3006
3007                 ret = -EIO;
3008                 /* convert to index and check */
3009 #ifdef CONFIG_PPC32
3010                 index = addr >> 2;
3011                 if ((addr & 3) || (index > PT_FPSCR)
3012                     || (child->thread.regs == NULL))
3013 #else
3014                 index = addr >> 3;
3015                 if ((addr & 7) || (index > PT_FPSCR))
3016 #endif
3017                         break;
3018
3019                 CHECK_FULL_REGS(child->thread.regs);
3020                 if (index < PT_FPR0) {
3021                         ret = ptrace_put_reg(child, index, data);
3022                 } else {
3023                         unsigned int fpidx = index - PT_FPR0;
3024
3025                         flush_fp_to_thread(child);
3026                         if (fpidx < (PT_FPSCR - PT_FPR0))
3027                                 memcpy(&child->thread.TS_FPR(fpidx), &data,
3028                                        sizeof(long));
3029                         else
3030                                 child->thread.fp_state.fpscr = data;
3031                         ret = 0;
3032                 }
3033                 break;
3034         }
3035
3036         case PPC_PTRACE_GETHWDBGINFO: {
3037                 struct ppc_debug_info dbginfo;
3038
3039                 dbginfo.version = 1;
3040 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
3041                 dbginfo.num_instruction_bps = CONFIG_PPC_ADV_DEBUG_IACS;
3042                 dbginfo.num_data_bps = CONFIG_PPC_ADV_DEBUG_DACS;
3043                 dbginfo.num_condition_regs = CONFIG_PPC_ADV_DEBUG_DVCS;
3044                 dbginfo.data_bp_alignment = 4;
3045                 dbginfo.sizeof_condition = 4;
3046                 dbginfo.features = PPC_DEBUG_FEATURE_INSN_BP_RANGE |
3047                                    PPC_DEBUG_FEATURE_INSN_BP_MASK;
3048 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
3049                 dbginfo.features |=
3050                                    PPC_DEBUG_FEATURE_DATA_BP_RANGE |
3051                                    PPC_DEBUG_FEATURE_DATA_BP_MASK;
3052 #endif
3053 #else /* !CONFIG_PPC_ADV_DEBUG_REGS */
3054                 dbginfo.num_instruction_bps = 0;
3055                 dbginfo.num_data_bps = 1;
3056                 dbginfo.num_condition_regs = 0;
3057 #ifdef CONFIG_PPC64
3058                 dbginfo.data_bp_alignment = 8;
3059 #else
3060                 dbginfo.data_bp_alignment = 4;
3061 #endif
3062                 dbginfo.sizeof_condition = 0;
3063 #ifdef CONFIG_HAVE_HW_BREAKPOINT
3064                 dbginfo.features = PPC_DEBUG_FEATURE_DATA_BP_RANGE;
3065                 if (cpu_has_feature(CPU_FTR_DAWR))
3066                         dbginfo.features |= PPC_DEBUG_FEATURE_DATA_BP_DAWR;
3067 #else
3068                 dbginfo.features = 0;
3069 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
3070 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
3071
3072                 if (!access_ok(VERIFY_WRITE, datavp,
3073                                sizeof(struct ppc_debug_info)))
3074                         return -EFAULT;
3075                 ret = __copy_to_user(datavp, &dbginfo,
3076                                      sizeof(struct ppc_debug_info)) ?
3077                       -EFAULT : 0;
3078                 break;
3079         }
3080
3081         case PPC_PTRACE_SETHWDEBUG: {
3082                 struct ppc_hw_breakpoint bp_info;
3083
3084                 if (!access_ok(VERIFY_READ, datavp,
3085                                sizeof(struct ppc_hw_breakpoint)))
3086                         return -EFAULT;
3087                 ret = __copy_from_user(&bp_info, datavp,
3088                                        sizeof(struct ppc_hw_breakpoint)) ?
3089                       -EFAULT : 0;
3090                 if (!ret)
3091                         ret = ppc_set_hwdebug(child, &bp_info);
3092                 break;
3093         }
3094
3095         case PPC_PTRACE_DELHWDEBUG: {
3096                 ret = ppc_del_hwdebug(child, data);
3097                 break;
3098         }
3099
3100         case PTRACE_GET_DEBUGREG: {
3101 #ifndef CONFIG_PPC_ADV_DEBUG_REGS
3102                 unsigned long dabr_fake;
3103 #endif
3104                 ret = -EINVAL;
3105                 /* We only support one DABR and no IABRS at the moment */
3106                 if (addr > 0)
3107                         break;
3108 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
3109                 ret = put_user(child->thread.debug.dac1, datalp);
3110 #else
3111                 dabr_fake = ((child->thread.hw_brk.address & (~HW_BRK_TYPE_DABR)) |
3112                              (child->thread.hw_brk.type & HW_BRK_TYPE_DABR));
3113                 ret = put_user(dabr_fake, datalp);
3114 #endif
3115                 break;
3116         }
3117
3118         case PTRACE_SET_DEBUGREG:
3119                 ret = ptrace_set_debugreg(child, addr, data);
3120                 break;
3121
3122 #ifdef CONFIG_PPC64
3123         case PTRACE_GETREGS64:
3124 #endif
3125         case PTRACE_GETREGS:    /* Get all pt_regs from the child. */
3126                 return copy_regset_to_user(child, &user_ppc_native_view,
3127                                            REGSET_GPR,
3128                                            0, sizeof(struct pt_regs),
3129                                            datavp);
3130
3131 #ifdef CONFIG_PPC64
3132         case PTRACE_SETREGS64:
3133 #endif
3134         case PTRACE_SETREGS:    /* Set all gp regs in the child. */
3135                 return copy_regset_from_user(child, &user_ppc_native_view,
3136                                              REGSET_GPR,
3137                                              0, sizeof(struct pt_regs),
3138                                              datavp);
3139
3140         case PTRACE_GETFPREGS: /* Get the child FPU state (FPR0...31 + FPSCR) */
3141                 return copy_regset_to_user(child, &user_ppc_native_view,
3142                                            REGSET_FPR,
3143                                            0, sizeof(elf_fpregset_t),
3144                                            datavp);
3145
3146         case PTRACE_SETFPREGS: /* Set the child FPU state (FPR0...31 + FPSCR) */
3147                 return copy_regset_from_user(child, &user_ppc_native_view,
3148                                              REGSET_FPR,
3149                                              0, sizeof(elf_fpregset_t),
3150                                              datavp);
3151
3152 #ifdef CONFIG_ALTIVEC
3153         case PTRACE_GETVRREGS:
3154                 return copy_regset_to_user(child, &user_ppc_native_view,
3155                                            REGSET_VMX,
3156                                            0, (33 * sizeof(vector128) +
3157                                                sizeof(u32)),
3158                                            datavp);
3159
3160         case PTRACE_SETVRREGS:
3161                 return copy_regset_from_user(child, &user_ppc_native_view,
3162                                              REGSET_VMX,
3163                                              0, (33 * sizeof(vector128) +
3164                                                  sizeof(u32)),
3165                                              datavp);
3166 #endif
3167 #ifdef CONFIG_VSX
3168         case PTRACE_GETVSRREGS:
3169                 return copy_regset_to_user(child, &user_ppc_native_view,
3170                                            REGSET_VSX,
3171                                            0, 32 * sizeof(double),
3172                                            datavp);
3173
3174         case PTRACE_SETVSRREGS:
3175                 return copy_regset_from_user(child, &user_ppc_native_view,
3176                                              REGSET_VSX,
3177                                              0, 32 * sizeof(double),
3178                                              datavp);
3179 #endif
3180 #ifdef CONFIG_SPE
3181         case PTRACE_GETEVRREGS:
3182                 /* Get the child spe register state. */
3183                 return copy_regset_to_user(child, &user_ppc_native_view,
3184                                            REGSET_SPE, 0, 35 * sizeof(u32),
3185                                            datavp);
3186
3187         case PTRACE_SETEVRREGS:
3188                 /* Set the child spe register state. */
3189                 return copy_regset_from_user(child, &user_ppc_native_view,
3190                                              REGSET_SPE, 0, 35 * sizeof(u32),
3191                                              datavp);
3192 #endif
3193
3194         default:
3195                 ret = ptrace_request(child, request, addr, data);
3196                 break;
3197         }
3198         return ret;
3199 }
3200
3201 #ifdef CONFIG_SECCOMP
3202 static int do_seccomp(struct pt_regs *regs)
3203 {
3204         if (!test_thread_flag(TIF_SECCOMP))
3205                 return 0;
3206
3207         /*
3208          * The ABI we present to seccomp tracers is that r3 contains
3209          * the syscall return value and orig_gpr3 contains the first
3210          * syscall parameter. This is different to the ptrace ABI where
3211          * both r3 and orig_gpr3 contain the first syscall parameter.
3212          */
3213         regs->gpr[3] = -ENOSYS;
3214
3215         /*
3216          * We use the __ version here because we have already checked
3217          * TIF_SECCOMP. If this fails, there is nothing left to do, we
3218          * have already loaded -ENOSYS into r3, or seccomp has put
3219          * something else in r3 (via SECCOMP_RET_ERRNO/TRACE).
3220          */
3221         if (__secure_computing(NULL))
3222                 return -1;
3223
3224         /*
3225          * The syscall was allowed by seccomp, restore the register
3226          * state to what audit expects.
3227          * Note that we use orig_gpr3, which means a seccomp tracer can
3228          * modify the first syscall parameter (in orig_gpr3) and also
3229          * allow the syscall to proceed.
3230          */
3231         regs->gpr[3] = regs->orig_gpr3;
3232
3233         return 0;
3234 }
3235 #else
3236 static inline int do_seccomp(struct pt_regs *regs) { return 0; }
3237 #endif /* CONFIG_SECCOMP */
3238
3239 /**
3240  * do_syscall_trace_enter() - Do syscall tracing on kernel entry.
3241  * @regs: the pt_regs of the task to trace (current)
3242  *
3243  * Performs various types of tracing on syscall entry. This includes seccomp,
3244  * ptrace, syscall tracepoints and audit.
3245  *
3246  * The pt_regs are potentially visible to userspace via ptrace, so their
3247  * contents is ABI.
3248  *
3249  * One or more of the tracers may modify the contents of pt_regs, in particular
3250  * to modify arguments or even the syscall number itself.
3251  *
3252  * It's also possible that a tracer can choose to reject the system call. In
3253  * that case this function will return an illegal syscall number, and will put
3254  * an appropriate return value in regs->r3.
3255  *
3256  * Return: the (possibly changed) syscall number.
3257  */
3258 long do_syscall_trace_enter(struct pt_regs *regs)
3259 {
3260         user_exit();
3261
3262         /*
3263          * The tracer may decide to abort the syscall, if so tracehook
3264          * will return !0. Note that the tracer may also just change
3265          * regs->gpr[0] to an invalid syscall number, that is handled
3266          * below on the exit path.
3267          */
3268         if (test_thread_flag(TIF_SYSCALL_TRACE) &&
3269             tracehook_report_syscall_entry(regs))
3270                 goto skip;
3271
3272         /* Run seccomp after ptrace; allow it to set gpr[3]. */
3273         if (do_seccomp(regs))
3274                 return -1;
3275
3276         /* Avoid trace and audit when syscall is invalid. */
3277         if (regs->gpr[0] >= NR_syscalls)
3278                 goto skip;
3279
3280         if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
3281                 trace_sys_enter(regs, regs->gpr[0]);
3282
3283 #ifdef CONFIG_PPC64
3284         if (!is_32bit_task())
3285                 audit_syscall_entry(regs->gpr[0], regs->gpr[3], regs->gpr[4],
3286                                     regs->gpr[5], regs->gpr[6]);
3287         else
3288 #endif
3289                 audit_syscall_entry(regs->gpr[0],
3290                                     regs->gpr[3] & 0xffffffff,
3291                                     regs->gpr[4] & 0xffffffff,
3292                                     regs->gpr[5] & 0xffffffff,
3293                                     regs->gpr[6] & 0xffffffff);
3294
3295         /* Return the possibly modified but valid syscall number */
3296         return regs->gpr[0];
3297
3298 skip:
3299         /*
3300          * If we are aborting explicitly, or if the syscall number is
3301          * now invalid, set the return value to -ENOSYS.
3302          */
3303         regs->gpr[3] = -ENOSYS;
3304         return -1;
3305 }
3306
3307 void do_syscall_trace_leave(struct pt_regs *regs)
3308 {
3309         int step;
3310
3311         audit_syscall_exit(regs);
3312
3313         if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
3314                 trace_sys_exit(regs, regs->result);
3315
3316         step = test_thread_flag(TIF_SINGLESTEP);
3317         if (step || test_thread_flag(TIF_SYSCALL_TRACE))
3318                 tracehook_report_syscall_exit(regs, step);
3319
3320         user_enter();
3321 }