2 * KVM guest address space mapping code
4 * Copyright IBM Corp. 2007, 2016
5 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
8 #include <linux/kernel.h>
10 #include <linux/swap.h>
11 #include <linux/smp.h>
12 #include <linux/spinlock.h>
13 #include <linux/slab.h>
14 #include <linux/swapops.h>
15 #include <linux/ksm.h>
16 #include <linux/mman.h>
18 #include <asm/pgtable.h>
19 #include <asm/pgalloc.h>
23 #define GMAP_SHADOW_FAKE_TABLE 1ULL
26 * gmap_alloc - allocate and initialize a guest address space
27 * @mm: pointer to the parent mm_struct
28 * @limit: maximum address of the gmap address space
30 * Returns a guest address space structure.
32 static struct gmap *gmap_alloc(unsigned long limit)
37 unsigned long etype, atype;
39 if (limit < (1UL << 31)) {
40 limit = (1UL << 31) - 1;
41 atype = _ASCE_TYPE_SEGMENT;
42 etype = _SEGMENT_ENTRY_EMPTY;
43 } else if (limit < (1UL << 42)) {
44 limit = (1UL << 42) - 1;
45 atype = _ASCE_TYPE_REGION3;
46 etype = _REGION3_ENTRY_EMPTY;
47 } else if (limit < (1UL << 53)) {
48 limit = (1UL << 53) - 1;
49 atype = _ASCE_TYPE_REGION2;
50 etype = _REGION2_ENTRY_EMPTY;
53 atype = _ASCE_TYPE_REGION1;
54 etype = _REGION1_ENTRY_EMPTY;
56 gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL);
59 INIT_LIST_HEAD(&gmap->crst_list);
60 INIT_LIST_HEAD(&gmap->children);
61 INIT_LIST_HEAD(&gmap->pt_list);
62 INIT_RADIX_TREE(&gmap->guest_to_host, GFP_KERNEL);
63 INIT_RADIX_TREE(&gmap->host_to_guest, GFP_ATOMIC);
64 INIT_RADIX_TREE(&gmap->host_to_rmap, GFP_ATOMIC);
65 spin_lock_init(&gmap->guest_table_lock);
66 spin_lock_init(&gmap->shadow_lock);
67 atomic_set(&gmap->ref_count, 1);
68 page = alloc_pages(GFP_KERNEL, 2);
72 list_add(&page->lru, &gmap->crst_list);
73 table = (unsigned long *) page_to_phys(page);
74 crst_table_init(table, etype);
76 gmap->asce = atype | _ASCE_TABLE_LENGTH |
77 _ASCE_USER_BITS | __pa(table);
78 gmap->asce_end = limit;
88 * gmap_create - create a guest address space
89 * @mm: pointer to the parent mm_struct
90 * @limit: maximum size of the gmap address space
92 * Returns a guest address space structure.
94 struct gmap *gmap_create(struct mm_struct *mm, unsigned long limit)
98 gmap = gmap_alloc(limit);
102 spin_lock(&mm->context.gmap_lock);
103 list_add_rcu(&gmap->list, &mm->context.gmap_list);
104 spin_unlock(&mm->context.gmap_lock);
107 EXPORT_SYMBOL_GPL(gmap_create);
109 static void gmap_flush_tlb(struct gmap *gmap)
111 if (MACHINE_HAS_IDTE)
112 __tlb_flush_asce(gmap->mm, gmap->asce);
114 __tlb_flush_global();
117 static void gmap_radix_tree_free(struct radix_tree_root *root)
119 struct radix_tree_iter iter;
120 unsigned long indices[16];
125 /* A radix tree is freed by deleting all of its entries */
129 radix_tree_for_each_slot(slot, root, &iter, index) {
130 indices[nr] = iter.index;
134 for (i = 0; i < nr; i++) {
136 radix_tree_delete(root, index);
141 static void gmap_rmap_radix_tree_free(struct radix_tree_root *root)
143 struct gmap_rmap *rmap, *rnext, *head;
144 struct radix_tree_iter iter;
145 unsigned long indices[16];
150 /* A radix tree is freed by deleting all of its entries */
154 radix_tree_for_each_slot(slot, root, &iter, index) {
155 indices[nr] = iter.index;
159 for (i = 0; i < nr; i++) {
161 head = radix_tree_delete(root, index);
162 gmap_for_each_rmap_safe(rmap, rnext, head)
169 * gmap_free - free a guest address space
170 * @gmap: pointer to the guest address space structure
172 * No locks required. There are no references to this gmap anymore.
174 static void gmap_free(struct gmap *gmap)
176 struct page *page, *next;
178 /* Flush tlb of all gmaps (if not already done for shadows) */
179 if (!(gmap_is_shadow(gmap) && gmap->removed))
180 gmap_flush_tlb(gmap);
181 /* Free all segment & region tables. */
182 list_for_each_entry_safe(page, next, &gmap->crst_list, lru)
183 __free_pages(page, 2);
184 gmap_radix_tree_free(&gmap->guest_to_host);
185 gmap_radix_tree_free(&gmap->host_to_guest);
187 /* Free additional data for a shadow gmap */
188 if (gmap_is_shadow(gmap)) {
189 /* Free all page tables. */
190 list_for_each_entry_safe(page, next, &gmap->pt_list, lru)
191 page_table_free_pgste(page);
192 gmap_rmap_radix_tree_free(&gmap->host_to_rmap);
193 /* Release reference to the parent */
194 gmap_put(gmap->parent);
201 * gmap_get - increase reference counter for guest address space
202 * @gmap: pointer to the guest address space structure
204 * Returns the gmap pointer
206 struct gmap *gmap_get(struct gmap *gmap)
208 atomic_inc(&gmap->ref_count);
211 EXPORT_SYMBOL_GPL(gmap_get);
214 * gmap_put - decrease reference counter for guest address space
215 * @gmap: pointer to the guest address space structure
217 * If the reference counter reaches zero the guest address space is freed.
219 void gmap_put(struct gmap *gmap)
221 if (atomic_dec_return(&gmap->ref_count) == 0)
224 EXPORT_SYMBOL_GPL(gmap_put);
227 * gmap_remove - remove a guest address space but do not free it yet
228 * @gmap: pointer to the guest address space structure
230 void gmap_remove(struct gmap *gmap)
232 struct gmap *sg, *next;
234 /* Remove all shadow gmaps linked to this gmap */
235 if (!list_empty(&gmap->children)) {
236 spin_lock(&gmap->shadow_lock);
237 list_for_each_entry_safe(sg, next, &gmap->children, list) {
241 spin_unlock(&gmap->shadow_lock);
243 /* Remove gmap from the pre-mm list */
244 spin_lock(&gmap->mm->context.gmap_lock);
245 list_del_rcu(&gmap->list);
246 spin_unlock(&gmap->mm->context.gmap_lock);
251 EXPORT_SYMBOL_GPL(gmap_remove);
254 * gmap_enable - switch primary space to the guest address space
255 * @gmap: pointer to the guest address space structure
257 void gmap_enable(struct gmap *gmap)
259 S390_lowcore.gmap = (unsigned long) gmap;
261 EXPORT_SYMBOL_GPL(gmap_enable);
264 * gmap_disable - switch back to the standard primary address space
265 * @gmap: pointer to the guest address space structure
267 void gmap_disable(struct gmap *gmap)
269 S390_lowcore.gmap = 0UL;
271 EXPORT_SYMBOL_GPL(gmap_disable);
274 * gmap_alloc_table is assumed to be called with mmap_sem held
276 static int gmap_alloc_table(struct gmap *gmap, unsigned long *table,
277 unsigned long init, unsigned long gaddr)
282 /* since we dont free the gmap table until gmap_free we can unlock */
283 page = alloc_pages(GFP_KERNEL, 2);
286 new = (unsigned long *) page_to_phys(page);
287 crst_table_init(new, init);
288 spin_lock(&gmap->guest_table_lock);
289 if (*table & _REGION_ENTRY_INVALID) {
290 list_add(&page->lru, &gmap->crst_list);
291 *table = (unsigned long) new | _REGION_ENTRY_LENGTH |
292 (*table & _REGION_ENTRY_TYPE_MASK);
296 spin_unlock(&gmap->guest_table_lock);
298 __free_pages(page, 2);
303 * __gmap_segment_gaddr - find virtual address from segment pointer
304 * @entry: pointer to a segment table entry in the guest address space
306 * Returns the virtual address in the guest address space for the segment
308 static unsigned long __gmap_segment_gaddr(unsigned long *entry)
311 unsigned long offset, mask;
313 offset = (unsigned long) entry / sizeof(unsigned long);
314 offset = (offset & (PTRS_PER_PMD - 1)) * PMD_SIZE;
315 mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
316 page = virt_to_page((void *)((unsigned long) entry & mask));
317 return page->index + offset;
321 * __gmap_unlink_by_vmaddr - unlink a single segment via a host address
322 * @gmap: pointer to the guest address space structure
323 * @vmaddr: address in the host process address space
325 * Returns 1 if a TLB flush is required
327 static int __gmap_unlink_by_vmaddr(struct gmap *gmap, unsigned long vmaddr)
329 unsigned long *entry;
332 BUG_ON(gmap_is_shadow(gmap));
333 spin_lock(&gmap->guest_table_lock);
334 entry = radix_tree_delete(&gmap->host_to_guest, vmaddr >> PMD_SHIFT);
336 flush = (*entry != _SEGMENT_ENTRY_INVALID);
337 *entry = _SEGMENT_ENTRY_INVALID;
339 spin_unlock(&gmap->guest_table_lock);
344 * __gmap_unmap_by_gaddr - unmap a single segment via a guest address
345 * @gmap: pointer to the guest address space structure
346 * @gaddr: address in the guest address space
348 * Returns 1 if a TLB flush is required
350 static int __gmap_unmap_by_gaddr(struct gmap *gmap, unsigned long gaddr)
352 unsigned long vmaddr;
354 vmaddr = (unsigned long) radix_tree_delete(&gmap->guest_to_host,
356 return vmaddr ? __gmap_unlink_by_vmaddr(gmap, vmaddr) : 0;
360 * gmap_unmap_segment - unmap segment from the guest address space
361 * @gmap: pointer to the guest address space structure
362 * @to: address in the guest address space
363 * @len: length of the memory area to unmap
365 * Returns 0 if the unmap succeeded, -EINVAL if not.
367 int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
372 BUG_ON(gmap_is_shadow(gmap));
373 if ((to | len) & (PMD_SIZE - 1))
375 if (len == 0 || to + len < to)
379 down_write(&gmap->mm->mmap_sem);
380 for (off = 0; off < len; off += PMD_SIZE)
381 flush |= __gmap_unmap_by_gaddr(gmap, to + off);
382 up_write(&gmap->mm->mmap_sem);
384 gmap_flush_tlb(gmap);
387 EXPORT_SYMBOL_GPL(gmap_unmap_segment);
390 * gmap_map_segment - map a segment to the guest address space
391 * @gmap: pointer to the guest address space structure
392 * @from: source address in the parent address space
393 * @to: target address in the guest address space
394 * @len: length of the memory area to map
396 * Returns 0 if the mmap succeeded, -EINVAL or -ENOMEM if not.
398 int gmap_map_segment(struct gmap *gmap, unsigned long from,
399 unsigned long to, unsigned long len)
404 BUG_ON(gmap_is_shadow(gmap));
405 if ((from | to | len) & (PMD_SIZE - 1))
407 if (len == 0 || from + len < from || to + len < to ||
408 from + len - 1 > TASK_MAX_SIZE || to + len - 1 > gmap->asce_end)
412 down_write(&gmap->mm->mmap_sem);
413 for (off = 0; off < len; off += PMD_SIZE) {
414 /* Remove old translation */
415 flush |= __gmap_unmap_by_gaddr(gmap, to + off);
416 /* Store new translation */
417 if (radix_tree_insert(&gmap->guest_to_host,
418 (to + off) >> PMD_SHIFT,
419 (void *) from + off))
422 up_write(&gmap->mm->mmap_sem);
424 gmap_flush_tlb(gmap);
427 gmap_unmap_segment(gmap, to, len);
430 EXPORT_SYMBOL_GPL(gmap_map_segment);
433 * __gmap_translate - translate a guest address to a user space address
434 * @gmap: pointer to guest mapping meta data structure
435 * @gaddr: guest address
437 * Returns user space address which corresponds to the guest address or
438 * -EFAULT if no such mapping exists.
439 * This function does not establish potentially missing page table entries.
440 * The mmap_sem of the mm that belongs to the address space must be held
441 * when this function gets called.
443 * Note: Can also be called for shadow gmaps.
445 unsigned long __gmap_translate(struct gmap *gmap, unsigned long gaddr)
447 unsigned long vmaddr;
449 vmaddr = (unsigned long)
450 radix_tree_lookup(&gmap->guest_to_host, gaddr >> PMD_SHIFT);
451 /* Note: guest_to_host is empty for a shadow gmap */
452 return vmaddr ? (vmaddr | (gaddr & ~PMD_MASK)) : -EFAULT;
454 EXPORT_SYMBOL_GPL(__gmap_translate);
457 * gmap_translate - translate a guest address to a user space address
458 * @gmap: pointer to guest mapping meta data structure
459 * @gaddr: guest address
461 * Returns user space address which corresponds to the guest address or
462 * -EFAULT if no such mapping exists.
463 * This function does not establish potentially missing page table entries.
465 unsigned long gmap_translate(struct gmap *gmap, unsigned long gaddr)
469 down_read(&gmap->mm->mmap_sem);
470 rc = __gmap_translate(gmap, gaddr);
471 up_read(&gmap->mm->mmap_sem);
474 EXPORT_SYMBOL_GPL(gmap_translate);
477 * gmap_unlink - disconnect a page table from the gmap shadow tables
478 * @gmap: pointer to guest mapping meta data structure
479 * @table: pointer to the host page table
480 * @vmaddr: vm address associated with the host page table
482 void gmap_unlink(struct mm_struct *mm, unsigned long *table,
483 unsigned long vmaddr)
489 list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
490 flush = __gmap_unlink_by_vmaddr(gmap, vmaddr);
492 gmap_flush_tlb(gmap);
498 * gmap_link - set up shadow page tables to connect a host to a guest address
499 * @gmap: pointer to guest mapping meta data structure
500 * @gaddr: guest address
501 * @vmaddr: vm address
503 * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
504 * if the vm address is already mapped to a different guest segment.
505 * The mmap_sem of the mm that belongs to the address space must be held
506 * when this function gets called.
508 int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
510 struct mm_struct *mm;
511 unsigned long *table;
518 BUG_ON(gmap_is_shadow(gmap));
519 /* Create higher level tables in the gmap page table */
521 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION1) {
522 table += (gaddr >> 53) & 0x7ff;
523 if ((*table & _REGION_ENTRY_INVALID) &&
524 gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY,
525 gaddr & 0xffe0000000000000UL))
527 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
529 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION2) {
530 table += (gaddr >> 42) & 0x7ff;
531 if ((*table & _REGION_ENTRY_INVALID) &&
532 gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY,
533 gaddr & 0xfffffc0000000000UL))
535 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
537 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION3) {
538 table += (gaddr >> 31) & 0x7ff;
539 if ((*table & _REGION_ENTRY_INVALID) &&
540 gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY,
541 gaddr & 0xffffffff80000000UL))
543 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
545 table += (gaddr >> 20) & 0x7ff;
546 /* Walk the parent mm page table */
548 pgd = pgd_offset(mm, vmaddr);
549 VM_BUG_ON(pgd_none(*pgd));
550 pud = pud_offset(pgd, vmaddr);
551 VM_BUG_ON(pud_none(*pud));
552 pmd = pmd_offset(pud, vmaddr);
553 VM_BUG_ON(pmd_none(*pmd));
554 /* large pmds cannot yet be handled */
557 /* Link gmap segment table entry location to page table. */
558 rc = radix_tree_preload(GFP_KERNEL);
561 ptl = pmd_lock(mm, pmd);
562 spin_lock(&gmap->guest_table_lock);
563 if (*table == _SEGMENT_ENTRY_INVALID) {
564 rc = radix_tree_insert(&gmap->host_to_guest,
565 vmaddr >> PMD_SHIFT, table);
567 *table = pmd_val(*pmd);
570 spin_unlock(&gmap->guest_table_lock);
572 radix_tree_preload_end();
577 * gmap_fault - resolve a fault on a guest address
578 * @gmap: pointer to guest mapping meta data structure
579 * @gaddr: guest address
580 * @fault_flags: flags to pass down to handle_mm_fault()
582 * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
583 * if the vm address is already mapped to a different guest segment.
585 int gmap_fault(struct gmap *gmap, unsigned long gaddr,
586 unsigned int fault_flags)
588 unsigned long vmaddr;
592 down_read(&gmap->mm->mmap_sem);
596 vmaddr = __gmap_translate(gmap, gaddr);
597 if (IS_ERR_VALUE(vmaddr)) {
601 if (fixup_user_fault(current, gmap->mm, vmaddr, fault_flags,
607 * In the case that fixup_user_fault unlocked the mmap_sem during
608 * faultin redo __gmap_translate to not race with a map/unmap_segment.
613 rc = __gmap_link(gmap, gaddr, vmaddr);
615 up_read(&gmap->mm->mmap_sem);
618 EXPORT_SYMBOL_GPL(gmap_fault);
621 * this function is assumed to be called with mmap_sem held
623 void __gmap_zap(struct gmap *gmap, unsigned long gaddr)
625 unsigned long vmaddr;
629 /* Find the vm address for the guest address */
630 vmaddr = (unsigned long) radix_tree_lookup(&gmap->guest_to_host,
633 vmaddr |= gaddr & ~PMD_MASK;
634 /* Get pointer to the page table entry */
635 ptep = get_locked_pte(gmap->mm, vmaddr, &ptl);
637 ptep_zap_unused(gmap->mm, vmaddr, ptep, 0);
638 pte_unmap_unlock(ptep, ptl);
641 EXPORT_SYMBOL_GPL(__gmap_zap);
643 void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to)
645 unsigned long gaddr, vmaddr, size;
646 struct vm_area_struct *vma;
648 down_read(&gmap->mm->mmap_sem);
649 for (gaddr = from; gaddr < to;
650 gaddr = (gaddr + PMD_SIZE) & PMD_MASK) {
651 /* Find the vm address for the guest address */
652 vmaddr = (unsigned long)
653 radix_tree_lookup(&gmap->guest_to_host,
657 vmaddr |= gaddr & ~PMD_MASK;
658 /* Find vma in the parent mm */
659 vma = find_vma(gmap->mm, vmaddr);
660 size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK));
661 zap_page_range(vma, vmaddr, size, NULL);
663 up_read(&gmap->mm->mmap_sem);
665 EXPORT_SYMBOL_GPL(gmap_discard);
667 static LIST_HEAD(gmap_notifier_list);
668 static DEFINE_SPINLOCK(gmap_notifier_lock);
671 * gmap_register_pte_notifier - register a pte invalidation callback
672 * @nb: pointer to the gmap notifier block
674 void gmap_register_pte_notifier(struct gmap_notifier *nb)
676 spin_lock(&gmap_notifier_lock);
677 list_add_rcu(&nb->list, &gmap_notifier_list);
678 spin_unlock(&gmap_notifier_lock);
680 EXPORT_SYMBOL_GPL(gmap_register_pte_notifier);
683 * gmap_unregister_pte_notifier - remove a pte invalidation callback
684 * @nb: pointer to the gmap notifier block
686 void gmap_unregister_pte_notifier(struct gmap_notifier *nb)
688 spin_lock(&gmap_notifier_lock);
689 list_del_rcu(&nb->list);
690 spin_unlock(&gmap_notifier_lock);
693 EXPORT_SYMBOL_GPL(gmap_unregister_pte_notifier);
696 * gmap_call_notifier - call all registered invalidation callbacks
697 * @gmap: pointer to guest mapping meta data structure
698 * @start: start virtual address in the guest address space
699 * @end: end virtual address in the guest address space
701 static void gmap_call_notifier(struct gmap *gmap, unsigned long start,
704 struct gmap_notifier *nb;
706 list_for_each_entry(nb, &gmap_notifier_list, list)
707 nb->notifier_call(gmap, start, end);
711 * gmap_table_walk - walk the gmap page tables
712 * @gmap: pointer to guest mapping meta data structure
713 * @gaddr: virtual address in the guest address space
714 * @level: page table level to stop at
716 * Returns a table entry pointer for the given guest address and @level
717 * @level=0 : returns a pointer to a page table table entry (or NULL)
718 * @level=1 : returns a pointer to a segment table entry (or NULL)
719 * @level=2 : returns a pointer to a region-3 table entry (or NULL)
720 * @level=3 : returns a pointer to a region-2 table entry (or NULL)
721 * @level=4 : returns a pointer to a region-1 table entry (or NULL)
723 * Returns NULL if the gmap page tables could not be walked to the
726 * Note: Can also be called for shadow gmaps.
728 static inline unsigned long *gmap_table_walk(struct gmap *gmap,
729 unsigned long gaddr, int level)
731 unsigned long *table;
733 if ((gmap->asce & _ASCE_TYPE_MASK) + 4 < (level * 4))
735 if (gmap_is_shadow(gmap) && gmap->removed)
737 if (gaddr & (-1UL << (31 + ((gmap->asce & _ASCE_TYPE_MASK) >> 2)*11)))
740 switch (gmap->asce & _ASCE_TYPE_MASK) {
741 case _ASCE_TYPE_REGION1:
742 table += (gaddr >> 53) & 0x7ff;
745 if (*table & _REGION_ENTRY_INVALID)
747 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
749 case _ASCE_TYPE_REGION2:
750 table += (gaddr >> 42) & 0x7ff;
753 if (*table & _REGION_ENTRY_INVALID)
755 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
757 case _ASCE_TYPE_REGION3:
758 table += (gaddr >> 31) & 0x7ff;
761 if (*table & _REGION_ENTRY_INVALID)
763 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
765 case _ASCE_TYPE_SEGMENT:
766 table += (gaddr >> 20) & 0x7ff;
769 if (*table & _REGION_ENTRY_INVALID)
771 table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
772 table += (gaddr >> 12) & 0xff;
778 * gmap_pte_op_walk - walk the gmap page table, get the page table lock
779 * and return the pte pointer
780 * @gmap: pointer to guest mapping meta data structure
781 * @gaddr: virtual address in the guest address space
782 * @ptl: pointer to the spinlock pointer
784 * Returns a pointer to the locked pte for a guest address, or NULL
786 * Note: Can also be called for shadow gmaps.
788 static pte_t *gmap_pte_op_walk(struct gmap *gmap, unsigned long gaddr,
791 unsigned long *table;
793 if (gmap_is_shadow(gmap))
794 spin_lock(&gmap->guest_table_lock);
795 /* Walk the gmap page table, lock and get pte pointer */
796 table = gmap_table_walk(gmap, gaddr, 1); /* get segment pointer */
797 if (!table || *table & _SEGMENT_ENTRY_INVALID) {
798 if (gmap_is_shadow(gmap))
799 spin_unlock(&gmap->guest_table_lock);
802 if (gmap_is_shadow(gmap)) {
803 *ptl = &gmap->guest_table_lock;
804 return pte_offset_map((pmd_t *) table, gaddr);
806 return pte_alloc_map_lock(gmap->mm, (pmd_t *) table, gaddr, ptl);
810 * gmap_pte_op_fixup - force a page in and connect the gmap page table
811 * @gmap: pointer to guest mapping meta data structure
812 * @gaddr: virtual address in the guest address space
813 * @vmaddr: address in the host process address space
815 * Returns 0 if the caller can retry __gmap_translate (might fail again),
816 * -ENOMEM if out of memory and -EFAULT if anything goes wrong while fixing
817 * up or connecting the gmap page table.
819 static int gmap_pte_op_fixup(struct gmap *gmap, unsigned long gaddr,
820 unsigned long vmaddr)
822 struct mm_struct *mm = gmap->mm;
823 bool unlocked = false;
825 BUG_ON(gmap_is_shadow(gmap));
826 if (fixup_user_fault(current, mm, vmaddr, FAULT_FLAG_WRITE, &unlocked))
829 /* lost mmap_sem, caller has to retry __gmap_translate */
831 /* Connect the page tables */
832 return __gmap_link(gmap, gaddr, vmaddr);
836 * gmap_pte_op_end - release the page table lock
837 * @ptl: pointer to the spinlock pointer
839 static void gmap_pte_op_end(spinlock_t *ptl)
845 * gmap_protect_range - remove access rights to memory and set pgste bits
846 * @gmap: pointer to guest mapping meta data structure
847 * @gaddr: virtual address in the guest address space
849 * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
850 * @bits: pgste notification bits to set
852 * Returns 0 if successfully protected, -ENOMEM if out of memory and
853 * -EFAULT if gaddr is invalid (or mapping for shadows is missing).
855 * Called with sg->mm->mmap_sem in read.
857 * Note: Can also be called for shadow gmaps.
859 static int gmap_protect_range(struct gmap *gmap, unsigned long gaddr,
860 unsigned long len, int prot, unsigned long bits)
862 unsigned long vmaddr;
869 ptep = gmap_pte_op_walk(gmap, gaddr, &ptl);
871 rc = ptep_force_prot(gmap->mm, gaddr, ptep, prot, bits);
872 gmap_pte_op_end(ptl);
875 vmaddr = __gmap_translate(gmap, gaddr);
876 if (IS_ERR_VALUE(vmaddr))
878 rc = gmap_pte_op_fixup(gmap, gaddr, vmaddr);
890 * gmap_mprotect_notify - change access rights for a range of ptes and
891 * call the notifier if any pte changes again
892 * @gmap: pointer to guest mapping meta data structure
893 * @gaddr: virtual address in the guest address space
895 * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
897 * Returns 0 if for each page in the given range a gmap mapping exists,
898 * the new access rights could be set and the notifier could be armed.
899 * If the gmap mapping is missing for one or more pages -EFAULT is
900 * returned. If no memory could be allocated -ENOMEM is returned.
901 * This function establishes missing page table entries.
903 int gmap_mprotect_notify(struct gmap *gmap, unsigned long gaddr,
904 unsigned long len, int prot)
908 if ((gaddr & ~PAGE_MASK) || (len & ~PAGE_MASK) || gmap_is_shadow(gmap))
910 if (!MACHINE_HAS_ESOP && prot == PROT_READ)
912 down_read(&gmap->mm->mmap_sem);
913 rc = gmap_protect_range(gmap, gaddr, len, prot, PGSTE_IN_BIT);
914 up_read(&gmap->mm->mmap_sem);
917 EXPORT_SYMBOL_GPL(gmap_mprotect_notify);
920 * gmap_read_table - get an unsigned long value from a guest page table using
921 * absolute addressing, without marking the page referenced.
922 * @gmap: pointer to guest mapping meta data structure
923 * @gaddr: virtual address in the guest address space
924 * @val: pointer to the unsigned long value to return
926 * Returns 0 if the value was read, -ENOMEM if out of memory and -EFAULT
927 * if reading using the virtual address failed.
929 * Called with gmap->mm->mmap_sem in read.
931 int gmap_read_table(struct gmap *gmap, unsigned long gaddr, unsigned long *val)
933 unsigned long address, vmaddr;
940 ptep = gmap_pte_op_walk(gmap, gaddr, &ptl);
943 if (pte_present(pte) && (pte_val(pte) & _PAGE_READ)) {
944 address = pte_val(pte) & PAGE_MASK;
945 address += gaddr & ~PAGE_MASK;
946 *val = *(unsigned long *) address;
947 pte_val(*ptep) |= _PAGE_YOUNG;
948 /* Do *NOT* clear the _PAGE_INVALID bit! */
951 gmap_pte_op_end(ptl);
955 vmaddr = __gmap_translate(gmap, gaddr);
956 if (IS_ERR_VALUE(vmaddr)) {
960 rc = gmap_pte_op_fixup(gmap, gaddr, vmaddr);
966 EXPORT_SYMBOL_GPL(gmap_read_table);
969 * gmap_insert_rmap - add a rmap to the host_to_rmap radix tree
970 * @sg: pointer to the shadow guest address space structure
971 * @vmaddr: vm address associated with the rmap
972 * @rmap: pointer to the rmap structure
974 * Called with the sg->guest_table_lock
976 static inline void gmap_insert_rmap(struct gmap *sg, unsigned long vmaddr,
977 struct gmap_rmap *rmap)
981 BUG_ON(!gmap_is_shadow(sg));
982 slot = radix_tree_lookup_slot(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT);
984 rmap->next = radix_tree_deref_slot_protected(slot,
985 &sg->guest_table_lock);
986 radix_tree_replace_slot(slot, rmap);
989 radix_tree_insert(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT,
995 * gmap_protect_rmap - modify access rights to memory and create an rmap
996 * @sg: pointer to the shadow guest address space structure
997 * @raddr: rmap address in the shadow gmap
998 * @paddr: address in the parent guest address space
999 * @len: length of the memory area to protect
1000 * @prot: indicates access rights: none, read-only or read-write
1002 * Returns 0 if successfully protected and the rmap was created, -ENOMEM
1003 * if out of memory and -EFAULT if paddr is invalid.
1005 static int gmap_protect_rmap(struct gmap *sg, unsigned long raddr,
1006 unsigned long paddr, unsigned long len, int prot)
1008 struct gmap *parent;
1009 struct gmap_rmap *rmap;
1010 unsigned long vmaddr;
1015 BUG_ON(!gmap_is_shadow(sg));
1016 parent = sg->parent;
1018 vmaddr = __gmap_translate(parent, paddr);
1019 if (IS_ERR_VALUE(vmaddr))
1021 rmap = kzalloc(sizeof(*rmap), GFP_KERNEL);
1024 rmap->raddr = raddr;
1025 rc = radix_tree_preload(GFP_KERNEL);
1031 ptep = gmap_pte_op_walk(parent, paddr, &ptl);
1033 spin_lock(&sg->guest_table_lock);
1034 rc = ptep_force_prot(parent->mm, paddr, ptep, prot,
1037 gmap_insert_rmap(sg, vmaddr, rmap);
1038 spin_unlock(&sg->guest_table_lock);
1039 gmap_pte_op_end(ptl);
1041 radix_tree_preload_end();
1044 rc = gmap_pte_op_fixup(parent, paddr, vmaddr);
1055 #define _SHADOW_RMAP_MASK 0x7
1056 #define _SHADOW_RMAP_REGION1 0x5
1057 #define _SHADOW_RMAP_REGION2 0x4
1058 #define _SHADOW_RMAP_REGION3 0x3
1059 #define _SHADOW_RMAP_SEGMENT 0x2
1060 #define _SHADOW_RMAP_PGTABLE 0x1
1063 * gmap_idte_one - invalidate a single region or segment table entry
1064 * @asce: region or segment table *origin* + table-type bits
1065 * @vaddr: virtual address to identify the table entry to flush
1067 * The invalid bit of a single region or segment table entry is set
1068 * and the associated TLB entries depending on the entry are flushed.
1069 * The table-type of the @asce identifies the portion of the @vaddr
1070 * that is used as the invalidation index.
1072 static inline void gmap_idte_one(unsigned long asce, unsigned long vaddr)
1075 " .insn rrf,0xb98e0000,%0,%1,0,0"
1076 : : "a" (asce), "a" (vaddr) : "cc", "memory");
1080 * gmap_unshadow_page - remove a page from a shadow page table
1081 * @sg: pointer to the shadow guest address space structure
1082 * @raddr: rmap address in the shadow guest address space
1084 * Called with the sg->guest_table_lock
1086 static void gmap_unshadow_page(struct gmap *sg, unsigned long raddr)
1088 unsigned long *table;
1090 BUG_ON(!gmap_is_shadow(sg));
1091 table = gmap_table_walk(sg, raddr, 0); /* get page table pointer */
1092 if (!table || *table & _PAGE_INVALID)
1094 gmap_call_notifier(sg, raddr, raddr + (1UL << 12) - 1);
1095 ptep_unshadow_pte(sg->mm, raddr, (pte_t *) table);
1099 * __gmap_unshadow_pgt - remove all entries from a shadow page table
1100 * @sg: pointer to the shadow guest address space structure
1101 * @raddr: rmap address in the shadow guest address space
1102 * @pgt: pointer to the start of a shadow page table
1104 * Called with the sg->guest_table_lock
1106 static void __gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr,
1111 BUG_ON(!gmap_is_shadow(sg));
1112 for (i = 0; i < 256; i++, raddr += 1UL << 12)
1113 pgt[i] = _PAGE_INVALID;
1117 * gmap_unshadow_pgt - remove a shadow page table from a segment entry
1118 * @sg: pointer to the shadow guest address space structure
1119 * @raddr: address in the shadow guest address space
1121 * Called with the sg->guest_table_lock
1123 static void gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr)
1125 unsigned long sto, *ste, *pgt;
1128 BUG_ON(!gmap_is_shadow(sg));
1129 ste = gmap_table_walk(sg, raddr, 1); /* get segment pointer */
1130 if (!ste || !(*ste & _SEGMENT_ENTRY_ORIGIN))
1132 gmap_call_notifier(sg, raddr, raddr + (1UL << 20) - 1);
1133 sto = (unsigned long) (ste - ((raddr >> 20) & 0x7ff));
1134 gmap_idte_one(sto | _ASCE_TYPE_SEGMENT, raddr);
1135 pgt = (unsigned long *)(*ste & _SEGMENT_ENTRY_ORIGIN);
1136 *ste = _SEGMENT_ENTRY_EMPTY;
1137 __gmap_unshadow_pgt(sg, raddr, pgt);
1138 /* Free page table */
1139 page = pfn_to_page(__pa(pgt) >> PAGE_SHIFT);
1140 list_del(&page->lru);
1141 page_table_free_pgste(page);
1145 * __gmap_unshadow_sgt - remove all entries from a shadow segment table
1146 * @sg: pointer to the shadow guest address space structure
1147 * @raddr: rmap address in the shadow guest address space
1148 * @sgt: pointer to the start of a shadow segment table
1150 * Called with the sg->guest_table_lock
1152 static void __gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr,
1155 unsigned long asce, *pgt;
1159 BUG_ON(!gmap_is_shadow(sg));
1160 asce = (unsigned long) sgt | _ASCE_TYPE_SEGMENT;
1161 for (i = 0; i < 2048; i++, raddr += 1UL << 20) {
1162 if (!(sgt[i] & _SEGMENT_ENTRY_ORIGIN))
1164 pgt = (unsigned long *)(sgt[i] & _REGION_ENTRY_ORIGIN);
1165 sgt[i] = _SEGMENT_ENTRY_EMPTY;
1166 __gmap_unshadow_pgt(sg, raddr, pgt);
1167 /* Free page table */
1168 page = pfn_to_page(__pa(pgt) >> PAGE_SHIFT);
1169 list_del(&page->lru);
1170 page_table_free_pgste(page);
1175 * gmap_unshadow_sgt - remove a shadow segment table from a region-3 entry
1176 * @sg: pointer to the shadow guest address space structure
1177 * @raddr: rmap address in the shadow guest address space
1179 * Called with the shadow->guest_table_lock
1181 static void gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr)
1183 unsigned long r3o, *r3e, *sgt;
1186 BUG_ON(!gmap_is_shadow(sg));
1187 r3e = gmap_table_walk(sg, raddr, 2); /* get region-3 pointer */
1188 if (!r3e || !(*r3e & _REGION_ENTRY_ORIGIN))
1190 gmap_call_notifier(sg, raddr, raddr + (1UL << 31) - 1);
1191 r3o = (unsigned long) (r3e - ((raddr >> 31) & 0x7ff));
1192 gmap_idte_one(r3o | _ASCE_TYPE_REGION3, raddr);
1193 sgt = (unsigned long *)(*r3e & _REGION_ENTRY_ORIGIN);
1194 *r3e = _REGION3_ENTRY_EMPTY;
1195 __gmap_unshadow_sgt(sg, raddr, sgt);
1196 /* Free segment table */
1197 page = pfn_to_page(__pa(sgt) >> PAGE_SHIFT);
1198 list_del(&page->lru);
1199 __free_pages(page, 2);
1203 * __gmap_unshadow_r3t - remove all entries from a shadow region-3 table
1204 * @sg: pointer to the shadow guest address space structure
1205 * @raddr: address in the shadow guest address space
1206 * @r3t: pointer to the start of a shadow region-3 table
1208 * Called with the sg->guest_table_lock
1210 static void __gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr,
1213 unsigned long asce, *sgt;
1217 BUG_ON(!gmap_is_shadow(sg));
1218 asce = (unsigned long) r3t | _ASCE_TYPE_REGION3;
1219 for (i = 0; i < 2048; i++, raddr += 1UL << 31) {
1220 if (!(r3t[i] & _REGION_ENTRY_ORIGIN))
1222 sgt = (unsigned long *)(r3t[i] & _REGION_ENTRY_ORIGIN);
1223 r3t[i] = _REGION3_ENTRY_EMPTY;
1224 __gmap_unshadow_sgt(sg, raddr, sgt);
1225 /* Free segment table */
1226 page = pfn_to_page(__pa(sgt) >> PAGE_SHIFT);
1227 list_del(&page->lru);
1228 __free_pages(page, 2);
1233 * gmap_unshadow_r3t - remove a shadow region-3 table from a region-2 entry
1234 * @sg: pointer to the shadow guest address space structure
1235 * @raddr: rmap address in the shadow guest address space
1237 * Called with the sg->guest_table_lock
1239 static void gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr)
1241 unsigned long r2o, *r2e, *r3t;
1244 BUG_ON(!gmap_is_shadow(sg));
1245 r2e = gmap_table_walk(sg, raddr, 3); /* get region-2 pointer */
1246 if (!r2e || !(*r2e & _REGION_ENTRY_ORIGIN))
1248 gmap_call_notifier(sg, raddr, raddr + (1UL << 42) - 1);
1249 r2o = (unsigned long) (r2e - ((raddr >> 42) & 0x7ff));
1250 gmap_idte_one(r2o | _ASCE_TYPE_REGION2, raddr);
1251 r3t = (unsigned long *)(*r2e & _REGION_ENTRY_ORIGIN);
1252 *r2e = _REGION2_ENTRY_EMPTY;
1253 __gmap_unshadow_r3t(sg, raddr, r3t);
1254 /* Free region 3 table */
1255 page = pfn_to_page(__pa(r3t) >> PAGE_SHIFT);
1256 list_del(&page->lru);
1257 __free_pages(page, 2);
1261 * __gmap_unshadow_r2t - remove all entries from a shadow region-2 table
1262 * @sg: pointer to the shadow guest address space structure
1263 * @raddr: rmap address in the shadow guest address space
1264 * @r2t: pointer to the start of a shadow region-2 table
1266 * Called with the sg->guest_table_lock
1268 static void __gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr,
1271 unsigned long asce, *r3t;
1275 BUG_ON(!gmap_is_shadow(sg));
1276 asce = (unsigned long) r2t | _ASCE_TYPE_REGION2;
1277 for (i = 0; i < 2048; i++, raddr += 1UL << 42) {
1278 if (!(r2t[i] & _REGION_ENTRY_ORIGIN))
1280 r3t = (unsigned long *)(r2t[i] & _REGION_ENTRY_ORIGIN);
1281 r2t[i] = _REGION2_ENTRY_EMPTY;
1282 __gmap_unshadow_r3t(sg, raddr, r3t);
1283 /* Free region 3 table */
1284 page = pfn_to_page(__pa(r3t) >> PAGE_SHIFT);
1285 list_del(&page->lru);
1286 __free_pages(page, 2);
1291 * gmap_unshadow_r2t - remove a shadow region-2 table from a region-1 entry
1292 * @sg: pointer to the shadow guest address space structure
1293 * @raddr: rmap address in the shadow guest address space
1295 * Called with the sg->guest_table_lock
1297 static void gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr)
1299 unsigned long r1o, *r1e, *r2t;
1302 BUG_ON(!gmap_is_shadow(sg));
1303 r1e = gmap_table_walk(sg, raddr, 4); /* get region-1 pointer */
1304 if (!r1e || !(*r1e & _REGION_ENTRY_ORIGIN))
1306 gmap_call_notifier(sg, raddr, raddr + (1UL << 53) - 1);
1307 r1o = (unsigned long) (r1e - ((raddr >> 53) & 0x7ff));
1308 gmap_idte_one(r1o | _ASCE_TYPE_REGION1, raddr);
1309 r2t = (unsigned long *)(*r1e & _REGION_ENTRY_ORIGIN);
1310 *r1e = _REGION1_ENTRY_EMPTY;
1311 __gmap_unshadow_r2t(sg, raddr, r2t);
1312 /* Free region 2 table */
1313 page = pfn_to_page(__pa(r2t) >> PAGE_SHIFT);
1314 list_del(&page->lru);
1315 __free_pages(page, 2);
1319 * __gmap_unshadow_r1t - remove all entries from a shadow region-1 table
1320 * @sg: pointer to the shadow guest address space structure
1321 * @raddr: rmap address in the shadow guest address space
1322 * @r1t: pointer to the start of a shadow region-1 table
1324 * Called with the shadow->guest_table_lock
1326 static void __gmap_unshadow_r1t(struct gmap *sg, unsigned long raddr,
1329 unsigned long asce, *r2t;
1333 BUG_ON(!gmap_is_shadow(sg));
1334 asce = (unsigned long) r1t | _ASCE_TYPE_REGION1;
1335 for (i = 0; i < 2048; i++, raddr += 1UL << 53) {
1336 if (!(r1t[i] & _REGION_ENTRY_ORIGIN))
1338 r2t = (unsigned long *)(r1t[i] & _REGION_ENTRY_ORIGIN);
1339 __gmap_unshadow_r2t(sg, raddr, r2t);
1340 /* Clear entry and flush translation r1t -> r2t */
1341 gmap_idte_one(asce, raddr);
1342 r1t[i] = _REGION1_ENTRY_EMPTY;
1343 /* Free region 2 table */
1344 page = pfn_to_page(__pa(r2t) >> PAGE_SHIFT);
1345 list_del(&page->lru);
1346 __free_pages(page, 2);
1351 * gmap_unshadow - remove a shadow page table completely
1352 * @sg: pointer to the shadow guest address space structure
1354 * Called with sg->guest_table_lock
1356 static void gmap_unshadow(struct gmap *sg)
1358 unsigned long *table;
1360 BUG_ON(!gmap_is_shadow(sg));
1364 gmap_call_notifier(sg, 0, -1UL);
1366 table = (unsigned long *)(sg->asce & _ASCE_ORIGIN);
1367 switch (sg->asce & _ASCE_TYPE_MASK) {
1368 case _ASCE_TYPE_REGION1:
1369 __gmap_unshadow_r1t(sg, 0, table);
1371 case _ASCE_TYPE_REGION2:
1372 __gmap_unshadow_r2t(sg, 0, table);
1374 case _ASCE_TYPE_REGION3:
1375 __gmap_unshadow_r3t(sg, 0, table);
1377 case _ASCE_TYPE_SEGMENT:
1378 __gmap_unshadow_sgt(sg, 0, table);
1384 * gmap_find_shadow - find a specific asce in the list of shadow tables
1385 * @parent: pointer to the parent gmap
1386 * @asce: ASCE for which the shadow table is created
1387 * @edat_level: edat level to be used for the shadow translation
1389 * Returns the pointer to a gmap if a shadow table with the given asce is
1390 * already available, ERR_PTR(-EAGAIN) if another one is just being created,
1393 static struct gmap *gmap_find_shadow(struct gmap *parent, unsigned long asce,
1398 list_for_each_entry(sg, &parent->children, list) {
1399 if (sg->orig_asce != asce || sg->edat_level != edat_level ||
1402 if (!sg->initialized)
1403 return ERR_PTR(-EAGAIN);
1404 atomic_inc(&sg->ref_count);
1411 * gmap_shadow - create/find a shadow guest address space
1412 * @parent: pointer to the parent gmap
1413 * @asce: ASCE for which the shadow table is created
1414 * @edat_level: edat level to be used for the shadow translation
1416 * The pages of the top level page table referred by the asce parameter
1417 * will be set to read-only and marked in the PGSTEs of the kvm process.
1418 * The shadow table will be removed automatically on any change to the
1419 * PTE mapping for the source table.
1421 * Returns a guest address space structure, ERR_PTR(-ENOMEM) if out of memory,
1422 * ERR_PTR(-EAGAIN) if the caller has to retry and ERR_PTR(-EFAULT) if the
1423 * parent gmap table could not be protected.
1425 struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce,
1428 struct gmap *sg, *new;
1429 unsigned long limit;
1432 BUG_ON(gmap_is_shadow(parent));
1433 spin_lock(&parent->shadow_lock);
1434 sg = gmap_find_shadow(parent, asce, edat_level);
1435 spin_unlock(&parent->shadow_lock);
1438 /* Create a new shadow gmap */
1439 limit = -1UL >> (33 - (((asce & _ASCE_TYPE_MASK) >> 2) * 11));
1440 if (asce & _ASCE_REAL_SPACE)
1442 new = gmap_alloc(limit);
1444 return ERR_PTR(-ENOMEM);
1445 new->mm = parent->mm;
1446 new->parent = gmap_get(parent);
1447 new->orig_asce = asce;
1448 new->edat_level = edat_level;
1449 new->initialized = false;
1450 spin_lock(&parent->shadow_lock);
1451 /* Recheck if another CPU created the same shadow */
1452 sg = gmap_find_shadow(parent, asce, edat_level);
1454 spin_unlock(&parent->shadow_lock);
1458 if (asce & _ASCE_REAL_SPACE) {
1459 /* only allow one real-space gmap shadow */
1460 list_for_each_entry(sg, &parent->children, list) {
1461 if (sg->orig_asce & _ASCE_REAL_SPACE) {
1462 spin_lock(&sg->guest_table_lock);
1464 spin_unlock(&sg->guest_table_lock);
1465 list_del(&sg->list);
1471 atomic_set(&new->ref_count, 2);
1472 list_add(&new->list, &parent->children);
1473 if (asce & _ASCE_REAL_SPACE) {
1474 /* nothing to protect, return right away */
1475 new->initialized = true;
1476 spin_unlock(&parent->shadow_lock);
1479 spin_unlock(&parent->shadow_lock);
1480 /* protect after insertion, so it will get properly invalidated */
1481 down_read(&parent->mm->mmap_sem);
1482 rc = gmap_protect_range(parent, asce & _ASCE_ORIGIN,
1483 ((asce & _ASCE_TABLE_LENGTH) + 1) * 4096,
1484 PROT_READ, PGSTE_VSIE_BIT);
1485 up_read(&parent->mm->mmap_sem);
1486 spin_lock(&parent->shadow_lock);
1487 new->initialized = true;
1489 list_del(&new->list);
1493 spin_unlock(&parent->shadow_lock);
1496 EXPORT_SYMBOL_GPL(gmap_shadow);
1499 * gmap_shadow_r2t - create an empty shadow region 2 table
1500 * @sg: pointer to the shadow guest address space structure
1501 * @saddr: faulting address in the shadow gmap
1502 * @r2t: parent gmap address of the region 2 table to get shadowed
1503 * @fake: r2t references contiguous guest memory block, not a r2t
1505 * The r2t parameter specifies the address of the source table. The
1506 * four pages of the source table are made read-only in the parent gmap
1507 * address space. A write to the source table area @r2t will automatically
1508 * remove the shadow r2 table and all of its decendents.
1510 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
1511 * shadow table structure is incomplete, -ENOMEM if out of memory and
1512 * -EFAULT if an address in the parent gmap could not be resolved.
1514 * Called with sg->mm->mmap_sem in read.
1516 int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t,
1519 unsigned long raddr, origin, offset, len;
1520 unsigned long *s_r2t, *table;
1524 BUG_ON(!gmap_is_shadow(sg));
1525 /* Allocate a shadow region second table */
1526 page = alloc_pages(GFP_KERNEL, 2);
1529 page->index = r2t & _REGION_ENTRY_ORIGIN;
1531 page->index |= GMAP_SHADOW_FAKE_TABLE;
1532 s_r2t = (unsigned long *) page_to_phys(page);
1533 /* Install shadow region second table */
1534 spin_lock(&sg->guest_table_lock);
1535 table = gmap_table_walk(sg, saddr, 4); /* get region-1 pointer */
1537 rc = -EAGAIN; /* Race with unshadow */
1540 if (!(*table & _REGION_ENTRY_INVALID)) {
1541 rc = 0; /* Already established */
1543 } else if (*table & _REGION_ENTRY_ORIGIN) {
1544 rc = -EAGAIN; /* Race with shadow */
1547 crst_table_init(s_r2t, _REGION2_ENTRY_EMPTY);
1548 /* mark as invalid as long as the parent table is not protected */
1549 *table = (unsigned long) s_r2t | _REGION_ENTRY_LENGTH |
1550 _REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID;
1551 if (sg->edat_level >= 1)
1552 *table |= (r2t & _REGION_ENTRY_PROTECT);
1553 list_add(&page->lru, &sg->crst_list);
1555 /* nothing to protect for fake tables */
1556 *table &= ~_REGION_ENTRY_INVALID;
1557 spin_unlock(&sg->guest_table_lock);
1560 spin_unlock(&sg->guest_table_lock);
1561 /* Make r2t read-only in parent gmap page table */
1562 raddr = (saddr & 0xffe0000000000000UL) | _SHADOW_RMAP_REGION1;
1563 origin = r2t & _REGION_ENTRY_ORIGIN;
1564 offset = ((r2t & _REGION_ENTRY_OFFSET) >> 6) * 4096;
1565 len = ((r2t & _REGION_ENTRY_LENGTH) + 1) * 4096 - offset;
1566 rc = gmap_protect_rmap(sg, raddr, origin + offset, len, PROT_READ);
1567 spin_lock(&sg->guest_table_lock);
1569 table = gmap_table_walk(sg, saddr, 4);
1570 if (!table || (*table & _REGION_ENTRY_ORIGIN) !=
1571 (unsigned long) s_r2t)
1572 rc = -EAGAIN; /* Race with unshadow */
1574 *table &= ~_REGION_ENTRY_INVALID;
1576 gmap_unshadow_r2t(sg, raddr);
1578 spin_unlock(&sg->guest_table_lock);
1581 spin_unlock(&sg->guest_table_lock);
1582 __free_pages(page, 2);
1585 EXPORT_SYMBOL_GPL(gmap_shadow_r2t);
1588 * gmap_shadow_r3t - create a shadow region 3 table
1589 * @sg: pointer to the shadow guest address space structure
1590 * @saddr: faulting address in the shadow gmap
1591 * @r3t: parent gmap address of the region 3 table to get shadowed
1592 * @fake: r3t references contiguous guest memory block, not a r3t
1594 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
1595 * shadow table structure is incomplete, -ENOMEM if out of memory and
1596 * -EFAULT if an address in the parent gmap could not be resolved.
1598 * Called with sg->mm->mmap_sem in read.
1600 int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t,
1603 unsigned long raddr, origin, offset, len;
1604 unsigned long *s_r3t, *table;
1608 BUG_ON(!gmap_is_shadow(sg));
1609 /* Allocate a shadow region second table */
1610 page = alloc_pages(GFP_KERNEL, 2);
1613 page->index = r3t & _REGION_ENTRY_ORIGIN;
1615 page->index |= GMAP_SHADOW_FAKE_TABLE;
1616 s_r3t = (unsigned long *) page_to_phys(page);
1617 /* Install shadow region second table */
1618 spin_lock(&sg->guest_table_lock);
1619 table = gmap_table_walk(sg, saddr, 3); /* get region-2 pointer */
1621 rc = -EAGAIN; /* Race with unshadow */
1624 if (!(*table & _REGION_ENTRY_INVALID)) {
1625 rc = 0; /* Already established */
1627 } else if (*table & _REGION_ENTRY_ORIGIN) {
1628 rc = -EAGAIN; /* Race with shadow */
1630 crst_table_init(s_r3t, _REGION3_ENTRY_EMPTY);
1631 /* mark as invalid as long as the parent table is not protected */
1632 *table = (unsigned long) s_r3t | _REGION_ENTRY_LENGTH |
1633 _REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID;
1634 if (sg->edat_level >= 1)
1635 *table |= (r3t & _REGION_ENTRY_PROTECT);
1636 list_add(&page->lru, &sg->crst_list);
1638 /* nothing to protect for fake tables */
1639 *table &= ~_REGION_ENTRY_INVALID;
1640 spin_unlock(&sg->guest_table_lock);
1643 spin_unlock(&sg->guest_table_lock);
1644 /* Make r3t read-only in parent gmap page table */
1645 raddr = (saddr & 0xfffffc0000000000UL) | _SHADOW_RMAP_REGION2;
1646 origin = r3t & _REGION_ENTRY_ORIGIN;
1647 offset = ((r3t & _REGION_ENTRY_OFFSET) >> 6) * 4096;
1648 len = ((r3t & _REGION_ENTRY_LENGTH) + 1) * 4096 - offset;
1649 rc = gmap_protect_rmap(sg, raddr, origin + offset, len, PROT_READ);
1650 spin_lock(&sg->guest_table_lock);
1652 table = gmap_table_walk(sg, saddr, 3);
1653 if (!table || (*table & _REGION_ENTRY_ORIGIN) !=
1654 (unsigned long) s_r3t)
1655 rc = -EAGAIN; /* Race with unshadow */
1657 *table &= ~_REGION_ENTRY_INVALID;
1659 gmap_unshadow_r3t(sg, raddr);
1661 spin_unlock(&sg->guest_table_lock);
1664 spin_unlock(&sg->guest_table_lock);
1665 __free_pages(page, 2);
1668 EXPORT_SYMBOL_GPL(gmap_shadow_r3t);
1671 * gmap_shadow_sgt - create a shadow segment table
1672 * @sg: pointer to the shadow guest address space structure
1673 * @saddr: faulting address in the shadow gmap
1674 * @sgt: parent gmap address of the segment table to get shadowed
1675 * @fake: sgt references contiguous guest memory block, not a sgt
1677 * Returns: 0 if successfully shadowed or already shadowed, -EAGAIN if the
1678 * shadow table structure is incomplete, -ENOMEM if out of memory and
1679 * -EFAULT if an address in the parent gmap could not be resolved.
1681 * Called with sg->mm->mmap_sem in read.
1683 int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt,
1686 unsigned long raddr, origin, offset, len;
1687 unsigned long *s_sgt, *table;
1691 BUG_ON(!gmap_is_shadow(sg) || (sgt & _REGION3_ENTRY_LARGE));
1692 /* Allocate a shadow segment table */
1693 page = alloc_pages(GFP_KERNEL, 2);
1696 page->index = sgt & _REGION_ENTRY_ORIGIN;
1698 page->index |= GMAP_SHADOW_FAKE_TABLE;
1699 s_sgt = (unsigned long *) page_to_phys(page);
1700 /* Install shadow region second table */
1701 spin_lock(&sg->guest_table_lock);
1702 table = gmap_table_walk(sg, saddr, 2); /* get region-3 pointer */
1704 rc = -EAGAIN; /* Race with unshadow */
1707 if (!(*table & _REGION_ENTRY_INVALID)) {
1708 rc = 0; /* Already established */
1710 } else if (*table & _REGION_ENTRY_ORIGIN) {
1711 rc = -EAGAIN; /* Race with shadow */
1714 crst_table_init(s_sgt, _SEGMENT_ENTRY_EMPTY);
1715 /* mark as invalid as long as the parent table is not protected */
1716 *table = (unsigned long) s_sgt | _REGION_ENTRY_LENGTH |
1717 _REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID;
1718 if (sg->edat_level >= 1)
1719 *table |= sgt & _REGION_ENTRY_PROTECT;
1720 list_add(&page->lru, &sg->crst_list);
1722 /* nothing to protect for fake tables */
1723 *table &= ~_REGION_ENTRY_INVALID;
1724 spin_unlock(&sg->guest_table_lock);
1727 spin_unlock(&sg->guest_table_lock);
1728 /* Make sgt read-only in parent gmap page table */
1729 raddr = (saddr & 0xffffffff80000000UL) | _SHADOW_RMAP_REGION3;
1730 origin = sgt & _REGION_ENTRY_ORIGIN;
1731 offset = ((sgt & _REGION_ENTRY_OFFSET) >> 6) * 4096;
1732 len = ((sgt & _REGION_ENTRY_LENGTH) + 1) * 4096 - offset;
1733 rc = gmap_protect_rmap(sg, raddr, origin + offset, len, PROT_READ);
1734 spin_lock(&sg->guest_table_lock);
1736 table = gmap_table_walk(sg, saddr, 2);
1737 if (!table || (*table & _REGION_ENTRY_ORIGIN) !=
1738 (unsigned long) s_sgt)
1739 rc = -EAGAIN; /* Race with unshadow */
1741 *table &= ~_REGION_ENTRY_INVALID;
1743 gmap_unshadow_sgt(sg, raddr);
1745 spin_unlock(&sg->guest_table_lock);
1748 spin_unlock(&sg->guest_table_lock);
1749 __free_pages(page, 2);
1752 EXPORT_SYMBOL_GPL(gmap_shadow_sgt);
1755 * gmap_shadow_lookup_pgtable - find a shadow page table
1756 * @sg: pointer to the shadow guest address space structure
1757 * @saddr: the address in the shadow aguest address space
1758 * @pgt: parent gmap address of the page table to get shadowed
1759 * @dat_protection: if the pgtable is marked as protected by dat
1760 * @fake: pgt references contiguous guest memory block, not a pgtable
1762 * Returns 0 if the shadow page table was found and -EAGAIN if the page
1763 * table was not found.
1765 * Called with sg->mm->mmap_sem in read.
1767 int gmap_shadow_pgt_lookup(struct gmap *sg, unsigned long saddr,
1768 unsigned long *pgt, int *dat_protection,
1771 unsigned long *table;
1775 BUG_ON(!gmap_is_shadow(sg));
1776 spin_lock(&sg->guest_table_lock);
1777 table = gmap_table_walk(sg, saddr, 1); /* get segment pointer */
1778 if (table && !(*table & _SEGMENT_ENTRY_INVALID)) {
1779 /* Shadow page tables are full pages (pte+pgste) */
1780 page = pfn_to_page(*table >> PAGE_SHIFT);
1781 *pgt = page->index & ~GMAP_SHADOW_FAKE_TABLE;
1782 *dat_protection = !!(*table & _SEGMENT_ENTRY_PROTECT);
1783 *fake = !!(page->index & GMAP_SHADOW_FAKE_TABLE);
1788 spin_unlock(&sg->guest_table_lock);
1792 EXPORT_SYMBOL_GPL(gmap_shadow_pgt_lookup);
1795 * gmap_shadow_pgt - instantiate a shadow page table
1796 * @sg: pointer to the shadow guest address space structure
1797 * @saddr: faulting address in the shadow gmap
1798 * @pgt: parent gmap address of the page table to get shadowed
1799 * @fake: pgt references contiguous guest memory block, not a pgtable
1801 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
1802 * shadow table structure is incomplete, -ENOMEM if out of memory,
1803 * -EFAULT if an address in the parent gmap could not be resolved and
1805 * Called with gmap->mm->mmap_sem in read
1807 int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt,
1810 unsigned long raddr, origin;
1811 unsigned long *s_pgt, *table;
1815 BUG_ON(!gmap_is_shadow(sg) || (pgt & _SEGMENT_ENTRY_LARGE));
1816 /* Allocate a shadow page table */
1817 page = page_table_alloc_pgste(sg->mm);
1820 page->index = pgt & _SEGMENT_ENTRY_ORIGIN;
1822 page->index |= GMAP_SHADOW_FAKE_TABLE;
1823 s_pgt = (unsigned long *) page_to_phys(page);
1824 /* Install shadow page table */
1825 spin_lock(&sg->guest_table_lock);
1826 table = gmap_table_walk(sg, saddr, 1); /* get segment pointer */
1828 rc = -EAGAIN; /* Race with unshadow */
1831 if (!(*table & _SEGMENT_ENTRY_INVALID)) {
1832 rc = 0; /* Already established */
1834 } else if (*table & _SEGMENT_ENTRY_ORIGIN) {
1835 rc = -EAGAIN; /* Race with shadow */
1838 /* mark as invalid as long as the parent table is not protected */
1839 *table = (unsigned long) s_pgt | _SEGMENT_ENTRY |
1840 (pgt & _SEGMENT_ENTRY_PROTECT) | _SEGMENT_ENTRY_INVALID;
1841 list_add(&page->lru, &sg->pt_list);
1843 /* nothing to protect for fake tables */
1844 *table &= ~_SEGMENT_ENTRY_INVALID;
1845 spin_unlock(&sg->guest_table_lock);
1848 spin_unlock(&sg->guest_table_lock);
1849 /* Make pgt read-only in parent gmap page table (not the pgste) */
1850 raddr = (saddr & 0xfffffffffff00000UL) | _SHADOW_RMAP_SEGMENT;
1851 origin = pgt & _SEGMENT_ENTRY_ORIGIN & PAGE_MASK;
1852 rc = gmap_protect_rmap(sg, raddr, origin, PAGE_SIZE, PROT_READ);
1853 spin_lock(&sg->guest_table_lock);
1855 table = gmap_table_walk(sg, saddr, 1);
1856 if (!table || (*table & _SEGMENT_ENTRY_ORIGIN) !=
1857 (unsigned long) s_pgt)
1858 rc = -EAGAIN; /* Race with unshadow */
1860 *table &= ~_SEGMENT_ENTRY_INVALID;
1862 gmap_unshadow_pgt(sg, raddr);
1864 spin_unlock(&sg->guest_table_lock);
1867 spin_unlock(&sg->guest_table_lock);
1868 page_table_free_pgste(page);
1872 EXPORT_SYMBOL_GPL(gmap_shadow_pgt);
1875 * gmap_shadow_page - create a shadow page mapping
1876 * @sg: pointer to the shadow guest address space structure
1877 * @saddr: faulting address in the shadow gmap
1878 * @pte: pte in parent gmap address space to get shadowed
1880 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
1881 * shadow table structure is incomplete, -ENOMEM if out of memory and
1882 * -EFAULT if an address in the parent gmap could not be resolved.
1884 * Called with sg->mm->mmap_sem in read.
1886 int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte)
1888 struct gmap *parent;
1889 struct gmap_rmap *rmap;
1890 unsigned long vmaddr, paddr;
1892 pte_t *sptep, *tptep;
1895 BUG_ON(!gmap_is_shadow(sg));
1896 parent = sg->parent;
1898 rmap = kzalloc(sizeof(*rmap), GFP_KERNEL);
1901 rmap->raddr = (saddr & PAGE_MASK) | _SHADOW_RMAP_PGTABLE;
1904 paddr = pte_val(pte) & PAGE_MASK;
1905 vmaddr = __gmap_translate(parent, paddr);
1906 if (IS_ERR_VALUE(vmaddr)) {
1910 rc = radix_tree_preload(GFP_KERNEL);
1914 sptep = gmap_pte_op_walk(parent, paddr, &ptl);
1916 spin_lock(&sg->guest_table_lock);
1917 /* Get page table pointer */
1918 tptep = (pte_t *) gmap_table_walk(sg, saddr, 0);
1920 spin_unlock(&sg->guest_table_lock);
1921 gmap_pte_op_end(ptl);
1922 radix_tree_preload_end();
1925 rc = ptep_shadow_pte(sg->mm, saddr, sptep, tptep, pte);
1927 /* Success and a new mapping */
1928 gmap_insert_rmap(sg, vmaddr, rmap);
1932 gmap_pte_op_end(ptl);
1933 spin_unlock(&sg->guest_table_lock);
1935 radix_tree_preload_end();
1938 rc = gmap_pte_op_fixup(parent, paddr, vmaddr);
1945 EXPORT_SYMBOL_GPL(gmap_shadow_page);
1948 * gmap_shadow_notify - handle notifications for shadow gmap
1950 * Called with sg->parent->shadow_lock.
1952 static void gmap_shadow_notify(struct gmap *sg, unsigned long vmaddr,
1953 unsigned long offset, pte_t *pte)
1955 struct gmap_rmap *rmap, *rnext, *head;
1956 unsigned long gaddr, start, end, bits, raddr;
1957 unsigned long *table;
1959 BUG_ON(!gmap_is_shadow(sg));
1960 spin_lock(&sg->parent->guest_table_lock);
1961 table = radix_tree_lookup(&sg->parent->host_to_guest,
1962 vmaddr >> PMD_SHIFT);
1963 gaddr = table ? __gmap_segment_gaddr(table) + offset : 0;
1964 spin_unlock(&sg->parent->guest_table_lock);
1968 spin_lock(&sg->guest_table_lock);
1970 spin_unlock(&sg->guest_table_lock);
1973 /* Check for top level table */
1974 start = sg->orig_asce & _ASCE_ORIGIN;
1975 end = start + ((sg->orig_asce & _ASCE_TABLE_LENGTH) + 1) * 4096;
1976 if (!(sg->orig_asce & _ASCE_REAL_SPACE) && gaddr >= start &&
1978 /* The complete shadow table has to go */
1980 spin_unlock(&sg->guest_table_lock);
1981 list_del(&sg->list);
1985 /* Remove the page table tree from on specific entry */
1986 head = radix_tree_delete(&sg->host_to_rmap, vmaddr >> 12);
1987 gmap_for_each_rmap_safe(rmap, rnext, head) {
1988 bits = rmap->raddr & _SHADOW_RMAP_MASK;
1989 raddr = rmap->raddr ^ bits;
1991 case _SHADOW_RMAP_REGION1:
1992 gmap_unshadow_r2t(sg, raddr);
1994 case _SHADOW_RMAP_REGION2:
1995 gmap_unshadow_r3t(sg, raddr);
1997 case _SHADOW_RMAP_REGION3:
1998 gmap_unshadow_sgt(sg, raddr);
2000 case _SHADOW_RMAP_SEGMENT:
2001 gmap_unshadow_pgt(sg, raddr);
2003 case _SHADOW_RMAP_PGTABLE:
2004 gmap_unshadow_page(sg, raddr);
2009 spin_unlock(&sg->guest_table_lock);
2013 * ptep_notify - call all invalidation callbacks for a specific pte.
2014 * @mm: pointer to the process mm_struct
2015 * @addr: virtual address in the process address space
2016 * @pte: pointer to the page table entry
2017 * @bits: bits from the pgste that caused the notify call
2019 * This function is assumed to be called with the page table lock held
2020 * for the pte to notify.
2022 void ptep_notify(struct mm_struct *mm, unsigned long vmaddr,
2023 pte_t *pte, unsigned long bits)
2025 unsigned long offset, gaddr;
2026 unsigned long *table;
2027 struct gmap *gmap, *sg, *next;
2029 offset = ((unsigned long) pte) & (255 * sizeof(pte_t));
2030 offset = offset * (4096 / sizeof(pte_t));
2032 list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
2033 if (!list_empty(&gmap->children) && (bits & PGSTE_VSIE_BIT)) {
2034 spin_lock(&gmap->shadow_lock);
2035 list_for_each_entry_safe(sg, next,
2036 &gmap->children, list)
2037 gmap_shadow_notify(sg, vmaddr, offset, pte);
2038 spin_unlock(&gmap->shadow_lock);
2040 if (!(bits & PGSTE_IN_BIT))
2042 spin_lock(&gmap->guest_table_lock);
2043 table = radix_tree_lookup(&gmap->host_to_guest,
2044 vmaddr >> PMD_SHIFT);
2046 gaddr = __gmap_segment_gaddr(table) + offset;
2047 spin_unlock(&gmap->guest_table_lock);
2049 gmap_call_notifier(gmap, gaddr, gaddr + PAGE_SIZE - 1);
2053 EXPORT_SYMBOL_GPL(ptep_notify);
2055 static inline void thp_split_mm(struct mm_struct *mm)
2057 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2058 struct vm_area_struct *vma;
2061 for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) {
2062 for (addr = vma->vm_start;
2065 follow_page(vma, addr, FOLL_SPLIT);
2066 vma->vm_flags &= ~VM_HUGEPAGE;
2067 vma->vm_flags |= VM_NOHUGEPAGE;
2069 mm->def_flags |= VM_NOHUGEPAGE;
2074 * switch on pgstes for its userspace process (for kvm)
2076 int s390_enable_sie(void)
2078 struct mm_struct *mm = current->mm;
2080 /* Do we have pgstes? if yes, we are done */
2081 if (mm_has_pgste(mm))
2083 /* Fail if the page tables are 2K */
2084 if (!mm_alloc_pgste(mm))
2086 down_write(&mm->mmap_sem);
2087 mm->context.has_pgste = 1;
2088 /* split thp mappings and disable thp for future mappings */
2090 up_write(&mm->mmap_sem);
2093 EXPORT_SYMBOL_GPL(s390_enable_sie);
2096 * Enable storage key handling from now on and initialize the storage
2097 * keys with the default key.
2099 static int __s390_enable_skey(pte_t *pte, unsigned long addr,
2100 unsigned long next, struct mm_walk *walk)
2103 * Remove all zero page mappings,
2104 * after establishing a policy to forbid zero page mappings
2105 * following faults for that page will get fresh anonymous pages
2107 if (is_zero_pfn(pte_pfn(*pte)))
2108 ptep_xchg_direct(walk->mm, addr, pte, __pte(_PAGE_INVALID));
2109 /* Clear storage key */
2110 ptep_zap_key(walk->mm, addr, pte);
2114 int s390_enable_skey(void)
2116 struct mm_walk walk = { .pte_entry = __s390_enable_skey };
2117 struct mm_struct *mm = current->mm;
2118 struct vm_area_struct *vma;
2121 down_write(&mm->mmap_sem);
2122 if (mm_use_skey(mm))
2125 mm->context.use_skey = 1;
2126 for (vma = mm->mmap; vma; vma = vma->vm_next) {
2127 if (ksm_madvise(vma, vma->vm_start, vma->vm_end,
2128 MADV_UNMERGEABLE, &vma->vm_flags)) {
2129 mm->context.use_skey = 0;
2134 mm->def_flags &= ~VM_MERGEABLE;
2137 walk_page_range(0, TASK_SIZE, &walk);
2140 up_write(&mm->mmap_sem);
2143 EXPORT_SYMBOL_GPL(s390_enable_skey);
2146 * Reset CMMA state, make all pages stable again.
2148 static int __s390_reset_cmma(pte_t *pte, unsigned long addr,
2149 unsigned long next, struct mm_walk *walk)
2151 ptep_zap_unused(walk->mm, addr, pte, 1);
2155 void s390_reset_cmma(struct mm_struct *mm)
2157 struct mm_walk walk = { .pte_entry = __s390_reset_cmma };
2159 down_write(&mm->mmap_sem);
2161 walk_page_range(0, TASK_SIZE, &walk);
2162 up_write(&mm->mmap_sem);
2164 EXPORT_SYMBOL_GPL(s390_reset_cmma);