1 #ifndef __ASM_POWERPC_MMU_CONTEXT_H
2 #define __ASM_POWERPC_MMU_CONTEXT_H
5 #include <linux/kernel.h>
7 #include <linux/sched.h>
8 #include <linux/spinlock.h>
10 #include <asm/cputable.h>
11 #include <asm/cputhreads.h>
14 * Most if the context management is out of line
16 extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
17 extern void destroy_context(struct mm_struct *mm);
18 #ifdef CONFIG_SPAPR_TCE_IOMMU
19 struct mm_iommu_table_group_mem_t;
21 extern int isolate_lru_page(struct page *page); /* from internal.h */
22 extern bool mm_iommu_preregistered(struct mm_struct *mm);
23 extern long mm_iommu_get(struct mm_struct *mm,
24 unsigned long ua, unsigned long entries,
25 struct mm_iommu_table_group_mem_t **pmem);
26 extern long mm_iommu_put(struct mm_struct *mm,
27 struct mm_iommu_table_group_mem_t *mem);
28 extern void mm_iommu_init(struct mm_struct *mm);
29 extern void mm_iommu_cleanup(struct mm_struct *mm);
30 extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
31 unsigned long ua, unsigned long size);
32 extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm(
33 struct mm_struct *mm, unsigned long ua, unsigned long size);
34 extern struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
35 unsigned long ua, unsigned long entries);
36 extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
37 unsigned long ua, unsigned long *hpa);
38 extern long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
39 unsigned long ua, unsigned long *hpa);
40 extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem);
41 extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem);
43 extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
44 extern void set_context(unsigned long id, pgd_t *pgd);
46 #ifdef CONFIG_PPC_BOOK3S_64
47 extern void radix__switch_mmu_context(struct mm_struct *prev,
48 struct mm_struct *next);
49 static inline void switch_mmu_context(struct mm_struct *prev,
50 struct mm_struct *next,
51 struct task_struct *tsk)
54 return radix__switch_mmu_context(prev, next);
55 return switch_slb(tsk, next);
58 extern int hash__alloc_context_id(void);
59 extern void hash__reserve_context_id(int id);
60 extern void __destroy_context(int context_id);
61 static inline void mmu_context_init(void) { }
63 extern void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next,
64 struct task_struct *tsk);
65 extern unsigned long __init_new_context(void);
66 extern void __destroy_context(unsigned long context_id);
67 extern void mmu_context_init(void);
70 extern void switch_cop(struct mm_struct *next);
71 extern int use_cop(unsigned long acop, struct mm_struct *mm);
72 extern void drop_cop(unsigned long acop, struct mm_struct *mm);
75 * switch_mm is the entry point called from the architecture independent
76 * code in kernel/sched/core.c
78 static inline void switch_mm_irqs_off(struct mm_struct *prev,
79 struct mm_struct *next,
80 struct task_struct *tsk)
82 /* Mark this context has been used on the new CPU */
83 if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(next)))
84 cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
86 /* 32-bit keeps track of the current PGDIR in the thread struct */
88 tsk->thread.pgdir = next->pgd;
89 #endif /* CONFIG_PPC32 */
91 /* 64-bit Book3E keeps track of current PGD in the PACA */
92 #ifdef CONFIG_PPC_BOOK3E_64
93 get_paca()->pgd = next->pgd;
95 /* Nothing else to do if we aren't actually switching */
99 #ifdef CONFIG_PPC_ICSWX
100 /* Switch coprocessor context only if prev or next uses a coprocessor */
101 if (prev->context.acop || next->context.acop)
103 #endif /* CONFIG_PPC_ICSWX */
105 /* We must stop all altivec streams before changing the HW
108 #ifdef CONFIG_ALTIVEC
109 if (cpu_has_feature(CPU_FTR_ALTIVEC))
110 asm volatile ("dssall");
111 #endif /* CONFIG_ALTIVEC */
113 * The actual HW switching method differs between the various
114 * sub architectures. Out of line for now
116 switch_mmu_context(prev, next, tsk);
119 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
120 struct task_struct *tsk)
124 local_irq_save(flags);
125 switch_mm_irqs_off(prev, next, tsk);
126 local_irq_restore(flags);
128 #define switch_mm_irqs_off switch_mm_irqs_off
131 #define deactivate_mm(tsk,mm) do { } while (0)
134 * After we have set current->mm to a new value, this activates
135 * the context for the new mm so we see the new mappings.
137 static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
141 local_irq_save(flags);
142 switch_mm(prev, next, current);
143 local_irq_restore(flags);
146 /* We don't currently use enter_lazy_tlb() for anything */
147 static inline void enter_lazy_tlb(struct mm_struct *mm,
148 struct task_struct *tsk)
150 /* 64-bit Book3E keeps track of current PGD in the PACA */
151 #ifdef CONFIG_PPC_BOOK3E_64
152 get_paca()->pgd = NULL;
156 static inline void arch_dup_mmap(struct mm_struct *oldmm,
157 struct mm_struct *mm)
161 static inline void arch_exit_mmap(struct mm_struct *mm)
165 static inline void arch_unmap(struct mm_struct *mm,
166 struct vm_area_struct *vma,
167 unsigned long start, unsigned long end)
169 if (start <= mm->context.vdso_base && mm->context.vdso_base < end)
170 mm->context.vdso_base = 0;
173 static inline void arch_bprm_mm_init(struct mm_struct *mm,
174 struct vm_area_struct *vma)
178 static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
179 bool write, bool execute, bool foreign)
181 /* by default, allow everything */
184 #endif /* __KERNEL__ */
185 #endif /* __ASM_POWERPC_MMU_CONTEXT_H */