2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * Copyright 2016 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
9 #include <linux/types.h>
10 #include <linux/string.h>
11 #include <linux/kvm.h>
12 #include <linux/kvm_host.h>
14 #include <asm/kvm_ppc.h>
15 #include <asm/kvm_book3s.h>
18 #include <asm/pgtable.h>
19 #include <asm/pgalloc.h>
20 #include <asm/pte-walk.h>
23 * Supported radix tree geometry.
24 * Like p9, we support either 5 or 9 bits at the first (lowest) level,
25 * for a page size of 64k or 4k.
27 static int p9_supported_radix_bits[4] = { 5, 9, 9, 13 };
29 int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
30 struct kvmppc_pte *gpte, bool data, bool iswrite)
32 struct kvm *kvm = vcpu->kvm;
37 unsigned long root, pte, index;
38 unsigned long rts, bits, offset;
40 unsigned long proc_tbl_size;
42 /* Work out effective PID */
43 switch (eaddr >> 62) {
53 proc_tbl_size = 1 << ((kvm->arch.process_table & PRTS_MASK) + 12);
54 if (pid * 16 >= proc_tbl_size)
57 /* Read partition table to find root of tree for effective PID */
58 ptbl = (kvm->arch.process_table & PRTB_MASK) + (pid * 16);
59 ret = kvm_read_guest(kvm, ptbl, &prte, sizeof(prte));
63 root = be64_to_cpu(prte);
64 rts = ((root & RTS1_MASK) >> (RTS1_SHIFT - 3)) |
65 ((root & RTS2_MASK) >> RTS2_SHIFT);
66 bits = root & RPDS_MASK;
67 root = root & RPDB_MASK;
69 /* P9 DD1 interprets RTS (radix tree size) differently */
71 if (cpu_has_feature(CPU_FTR_POWER9_DD1))
74 /* current implementations only support 52-bit space */
78 for (level = 3; level >= 0; --level) {
79 if (level && bits != p9_supported_radix_bits[level])
81 if (level == 0 && !(bits == 5 || bits == 9))
84 index = (eaddr >> offset) & ((1UL << bits) - 1);
85 /* check that low bits of page table base are zero */
86 if (root & ((1UL << (bits + 3)) - 1))
88 ret = kvm_read_guest(kvm, root + index * 8,
92 pte = __be64_to_cpu(rpte);
93 if (!(pte & _PAGE_PRESENT))
98 root = pte & 0x0fffffffffffff00ul;
100 /* need a leaf at lowest level; 512GB pages not supported */
101 if (level < 0 || level == 3)
104 /* offset is now log base 2 of the page size */
105 gpa = pte & 0x01fffffffffff000ul;
106 if (gpa & ((1ul << offset) - 1))
108 gpa += eaddr & ((1ul << offset) - 1);
109 for (ps = MMU_PAGE_4K; ps < MMU_PAGE_COUNT; ++ps)
110 if (offset == mmu_psize_defs[ps].shift)
112 gpte->page_size = ps;
117 /* Work out permissions */
118 gpte->may_read = !!(pte & _PAGE_READ);
119 gpte->may_write = !!(pte & _PAGE_WRITE);
120 gpte->may_execute = !!(pte & _PAGE_EXEC);
121 if (kvmppc_get_msr(vcpu) & MSR_PR) {
122 if (pte & _PAGE_PRIVILEGED) {
125 gpte->may_execute = 0;
128 if (!(pte & _PAGE_PRIVILEGED)) {
129 /* Check AMR/IAMR to see if strict mode is in force */
130 if (vcpu->arch.amr & (1ul << 62))
132 if (vcpu->arch.amr & (1ul << 63))
134 if (vcpu->arch.iamr & (1ul << 62))
135 gpte->may_execute = 0;
142 static void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr,
145 unsigned long psize = PAGE_SIZE;
148 psize = 1UL << pshift;
150 addr &= ~(psize - 1);
151 radix__flush_tlb_lpid_page(kvm->arch.lpid, addr, psize);
154 static void kvmppc_radix_flush_pwc(struct kvm *kvm)
156 radix__flush_pwc_lpid(kvm->arch.lpid);
159 static unsigned long kvmppc_radix_update_pte(struct kvm *kvm, pte_t *ptep,
160 unsigned long clr, unsigned long set,
161 unsigned long addr, unsigned int shift)
163 unsigned long old = 0;
165 if (!(clr & _PAGE_PRESENT) && cpu_has_feature(CPU_FTR_POWER9_DD1) &&
166 pte_present(*ptep)) {
167 /* have to invalidate it first */
168 old = __radix_pte_update(ptep, _PAGE_PRESENT, 0);
169 kvmppc_radix_tlbie_page(kvm, addr, shift);
170 set |= _PAGE_PRESENT;
171 old &= _PAGE_PRESENT;
173 return __radix_pte_update(ptep, clr, set) | old;
176 void kvmppc_radix_set_pte_at(struct kvm *kvm, unsigned long addr,
177 pte_t *ptep, pte_t pte)
179 radix__set_pte_at(kvm->mm, addr, ptep, pte, 0);
182 static struct kmem_cache *kvm_pte_cache;
183 static struct kmem_cache *kvm_pmd_cache;
185 static pte_t *kvmppc_pte_alloc(void)
187 return kmem_cache_alloc(kvm_pte_cache, GFP_KERNEL);
190 static void kvmppc_pte_free(pte_t *ptep)
192 kmem_cache_free(kvm_pte_cache, ptep);
195 /* Like pmd_huge() and pmd_large(), but works regardless of config options */
196 static inline int pmd_is_leaf(pmd_t pmd)
198 return !!(pmd_val(pmd) & _PAGE_PTE);
201 static pmd_t *kvmppc_pmd_alloc(void)
203 return kmem_cache_alloc(kvm_pmd_cache, GFP_KERNEL);
206 static void kvmppc_pmd_free(pmd_t *pmdp)
208 kmem_cache_free(kvm_pmd_cache, pmdp);
211 static void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte,
212 unsigned long gpa, unsigned int shift)
215 unsigned long page_size = 1ul << shift;
218 old = kvmppc_radix_update_pte(kvm, pte, ~0UL, 0, gpa, shift);
219 kvmppc_radix_tlbie_page(kvm, gpa, shift);
220 if (old & _PAGE_DIRTY) {
221 unsigned long gfn = gpa >> PAGE_SHIFT;
222 struct kvm_memory_slot *memslot;
224 memslot = gfn_to_memslot(kvm, gfn);
225 if (memslot && memslot->dirty_bitmap)
226 kvmppc_update_dirty_map(memslot, gfn, page_size);
231 * kvmppc_free_p?d are used to free existing page tables, and recursively
232 * descend and clear and free children.
233 * Callers are responsible for flushing the PWC.
235 * When page tables are being unmapped/freed as part of page fault path
236 * (full == false), ptes are not expected. There is code to unmap them
237 * and emit a warning if encountered, but there may already be data
238 * corruption due to the unexpected mappings.
240 static void kvmppc_unmap_free_pte(struct kvm *kvm, pte_t *pte, bool full)
243 memset(pte, 0, sizeof(long) << PTE_INDEX_SIZE);
248 for (it = 0; it < PTRS_PER_PTE; ++it, ++p) {
249 if (pte_val(*p) == 0)
252 kvmppc_unmap_pte(kvm, p,
253 pte_pfn(*p) << PAGE_SHIFT,
258 kvmppc_pte_free(pte);
261 static void kvmppc_unmap_free_pmd(struct kvm *kvm, pmd_t *pmd, bool full)
266 for (im = 0; im < PTRS_PER_PMD; ++im, ++p) {
267 if (!pmd_present(*p))
269 if (pmd_is_leaf(*p)) {
274 kvmppc_unmap_pte(kvm, (pte_t *)p,
275 pte_pfn(*(pte_t *)p) << PAGE_SHIFT,
281 pte = pte_offset_map(p, 0);
282 kvmppc_unmap_free_pte(kvm, pte, full);
286 kvmppc_pmd_free(pmd);
289 static void kvmppc_unmap_free_pud(struct kvm *kvm, pud_t *pud)
294 for (iu = 0; iu < PTRS_PER_PUD; ++iu, ++p) {
295 if (!pud_present(*p))
302 pmd = pmd_offset(p, 0);
303 kvmppc_unmap_free_pmd(kvm, pmd, true);
307 pud_free(kvm->mm, pud);
310 void kvmppc_free_radix(struct kvm *kvm)
315 if (!kvm->arch.pgtable)
317 pgd = kvm->arch.pgtable;
318 for (ig = 0; ig < PTRS_PER_PGD; ++ig, ++pgd) {
321 if (!pgd_present(*pgd))
323 pud = pud_offset(pgd, 0);
324 kvmppc_unmap_free_pud(kvm, pud);
327 pgd_free(kvm->mm, kvm->arch.pgtable);
328 kvm->arch.pgtable = NULL;
331 static void kvmppc_unmap_free_pmd_entry_table(struct kvm *kvm, pmd_t *pmd,
334 pte_t *pte = pte_offset_kernel(pmd, 0);
337 * Clearing the pmd entry then flushing the PWC ensures that the pte
338 * page no longer be cached by the MMU, so can be freed without
339 * flushing the PWC again.
342 kvmppc_radix_flush_pwc(kvm);
344 kvmppc_unmap_free_pte(kvm, pte, false);
347 static void kvmppc_unmap_free_pud_entry_table(struct kvm *kvm, pud_t *pud,
350 pmd_t *pmd = pmd_offset(pud, 0);
353 * Clearing the pud entry then flushing the PWC ensures that the pmd
354 * page and any children pte pages will no longer be cached by the MMU,
355 * so can be freed without flushing the PWC again.
358 kvmppc_radix_flush_pwc(kvm);
360 kvmppc_unmap_free_pmd(kvm, pmd, false);
364 * There are a number of bits which may differ between different faults to
365 * the same partition scope entry. RC bits, in the course of cleaning and
366 * aging. And the write bit can change, either the access could have been
367 * upgraded, or a read fault could happen concurrently with a write fault
368 * that sets those bits first.
370 #define PTE_BITS_MUST_MATCH (~(_PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED))
372 static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa,
373 unsigned int level, unsigned long mmu_seq)
376 pud_t *pud, *new_pud = NULL;
377 pmd_t *pmd, *new_pmd = NULL;
378 pte_t *ptep, *new_ptep = NULL;
381 /* Traverse the guest's 2nd-level tree, allocate new levels needed */
382 pgd = kvm->arch.pgtable + pgd_index(gpa);
384 if (pgd_present(*pgd))
385 pud = pud_offset(pgd, gpa);
387 new_pud = pud_alloc_one(kvm->mm, gpa);
390 if (pud && pud_present(*pud) && !pud_huge(*pud))
391 pmd = pmd_offset(pud, gpa);
393 new_pmd = kvmppc_pmd_alloc();
395 if (level == 0 && !(pmd && pmd_present(*pmd) && !pmd_is_leaf(*pmd)))
396 new_ptep = kvmppc_pte_alloc();
398 /* Check if we might have been invalidated; let the guest retry if so */
399 spin_lock(&kvm->mmu_lock);
401 if (mmu_notifier_retry(kvm, mmu_seq))
404 /* Now traverse again under the lock and change the tree */
406 if (pgd_none(*pgd)) {
409 pgd_populate(kvm->mm, pgd, new_pud);
412 pud = pud_offset(pgd, gpa);
413 if (pud_huge(*pud)) {
414 unsigned long hgpa = gpa & PUD_MASK;
416 /* Check if we raced and someone else has set the same thing */
418 if (pud_raw(*pud) == pte_raw(pte)) {
422 /* Valid 1GB page here already, add our extra bits */
423 WARN_ON_ONCE((pud_val(*pud) ^ pte_val(pte)) &
424 PTE_BITS_MUST_MATCH);
425 kvmppc_radix_update_pte(kvm, (pte_t *)pud,
426 0, pte_val(pte), hgpa, PUD_SHIFT);
431 * If we raced with another CPU which has just put
432 * a 1GB pte in after we saw a pmd page, try again.
438 /* Valid 1GB page here already, remove it */
439 kvmppc_unmap_pte(kvm, (pte_t *)pud, hgpa, PUD_SHIFT);
442 if (!pud_none(*pud)) {
444 * There's a page table page here, but we wanted to
445 * install a large page, so remove and free the page
448 kvmppc_unmap_free_pud_entry_table(kvm, pud, gpa);
450 kvmppc_radix_set_pte_at(kvm, gpa, (pte_t *)pud, pte);
454 if (pud_none(*pud)) {
457 pud_populate(kvm->mm, pud, new_pmd);
460 pmd = pmd_offset(pud, gpa);
461 if (pmd_is_leaf(*pmd)) {
462 unsigned long lgpa = gpa & PMD_MASK;
464 /* Check if we raced and someone else has set the same thing */
466 if (pmd_raw(*pmd) == pte_raw(pte)) {
470 /* Valid 2MB page here already, add our extra bits */
471 WARN_ON_ONCE((pmd_val(*pmd) ^ pte_val(pte)) &
472 PTE_BITS_MUST_MATCH);
473 kvmppc_radix_update_pte(kvm, pmdp_ptep(pmd),
474 0, pte_val(pte), lgpa, PMD_SHIFT);
480 * If we raced with another CPU which has just put
481 * a 2MB pte in after we saw a pte page, try again.
487 /* Valid 2MB page here already, remove it */
488 kvmppc_unmap_pte(kvm, pmdp_ptep(pmd), lgpa, PMD_SHIFT);
491 if (!pmd_none(*pmd)) {
493 * There's a page table page here, but we wanted to
494 * install a large page, so remove and free the page
497 kvmppc_unmap_free_pmd_entry_table(kvm, pmd, gpa);
499 kvmppc_radix_set_pte_at(kvm, gpa, pmdp_ptep(pmd), pte);
503 if (pmd_none(*pmd)) {
506 pmd_populate(kvm->mm, pmd, new_ptep);
509 ptep = pte_offset_kernel(pmd, gpa);
510 if (pte_present(*ptep)) {
511 /* Check if someone else set the same thing */
512 if (pte_raw(*ptep) == pte_raw(pte)) {
516 /* Valid page here already, add our extra bits */
517 WARN_ON_ONCE((pte_val(*ptep) ^ pte_val(pte)) &
518 PTE_BITS_MUST_MATCH);
519 kvmppc_radix_update_pte(kvm, ptep, 0, pte_val(pte), gpa, 0);
523 kvmppc_radix_set_pte_at(kvm, gpa, ptep, pte);
527 spin_unlock(&kvm->mmu_lock);
529 pud_free(kvm->mm, new_pud);
531 kvmppc_pmd_free(new_pmd);
533 kvmppc_pte_free(new_ptep);
537 int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
538 unsigned long ea, unsigned long dsisr)
540 struct kvm *kvm = vcpu->kvm;
541 unsigned long mmu_seq, pte_size;
542 unsigned long gpa, gfn, hva, pfn;
543 struct kvm_memory_slot *memslot;
544 struct page *page = NULL;
547 bool upgrade_write = false;
548 bool *upgrade_p = &upgrade_write;
550 unsigned long pgflags;
551 unsigned int shift, level;
553 /* Check for unusual errors */
554 if (dsisr & DSISR_UNSUPP_MMU) {
555 pr_err("KVM: Got unsupported MMU fault\n");
558 if (dsisr & DSISR_BADACCESS) {
559 /* Reflect to the guest as DSI */
560 pr_err("KVM: Got radix HV page fault with DSISR=%lx\n", dsisr);
561 kvmppc_core_queue_data_storage(vcpu, ea, dsisr);
565 /* Translate the logical address and get the page */
566 gpa = vcpu->arch.fault_gpa & ~0xfffUL;
567 gpa &= ~0xF000000000000000ul;
568 gfn = gpa >> PAGE_SHIFT;
569 if (!(dsisr & DSISR_PRTABLE_FAULT))
571 memslot = gfn_to_memslot(kvm, gfn);
573 /* No memslot means it's an emulated MMIO region */
574 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) {
575 if (dsisr & (DSISR_PRTABLE_FAULT | DSISR_BADACCESS |
578 * Bad address in guest page table tree, or other
579 * unusual error - reflect it to the guest as DSI.
581 kvmppc_core_queue_data_storage(vcpu, ea, dsisr);
584 return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea,
585 dsisr & DSISR_ISSTORE);
588 writing = (dsisr & DSISR_ISSTORE) != 0;
589 if (memslot->flags & KVM_MEM_READONLY) {
591 /* give the guest a DSI */
592 dsisr = DSISR_ISSTORE | DSISR_PROTFAULT;
593 kvmppc_core_queue_data_storage(vcpu, ea, dsisr);
599 if (dsisr & DSISR_SET_RC) {
601 * Need to set an R or C bit in the 2nd-level tables;
602 * since we are just helping out the hardware here,
603 * it is sufficient to do what the hardware does.
605 pgflags = _PAGE_ACCESSED;
607 pgflags |= _PAGE_DIRTY;
609 * We are walking the secondary page table here. We can do this
610 * without disabling irq.
612 spin_lock(&kvm->mmu_lock);
613 ptep = __find_linux_pte(kvm->arch.pgtable,
615 if (ptep && pte_present(*ptep) &&
616 (!writing || pte_write(*ptep))) {
617 kvmppc_radix_update_pte(kvm, ptep, 0, pgflags,
619 dsisr &= ~DSISR_SET_RC;
621 spin_unlock(&kvm->mmu_lock);
622 if (!(dsisr & (DSISR_BAD_FAULT_64S | DSISR_NOHPTE |
623 DSISR_PROTFAULT | DSISR_SET_RC)))
627 /* used to check for invalidations in progress */
628 mmu_seq = kvm->mmu_notifier_seq;
632 * Do a fast check first, since __gfn_to_pfn_memslot doesn't
633 * do it with !atomic && !async, which is how we call it.
634 * We always ask for write permission since the common case
635 * is that the page is writable.
637 hva = gfn_to_hva_memslot(memslot, gfn);
638 if (upgrade_p && __get_user_pages_fast(hva, 1, 1, &page) == 1) {
639 pfn = page_to_pfn(page);
640 upgrade_write = true;
642 /* Call KVM generic code to do the slow-path check */
643 pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL,
645 if (is_error_noslot_pfn(pfn))
648 if (pfn_valid(pfn)) {
649 page = pfn_to_page(pfn);
650 if (PageReserved(page))
655 /* See if we can insert a 1GB or 2MB large PTE here */
657 if (page && PageCompound(page)) {
658 pte_size = PAGE_SIZE << compound_order(compound_head(page));
659 if (pte_size >= PUD_SIZE &&
660 (gpa & (PUD_SIZE - PAGE_SIZE)) ==
661 (hva & (PUD_SIZE - PAGE_SIZE))) {
663 pfn &= ~((PUD_SIZE >> PAGE_SHIFT) - 1);
664 } else if (pte_size >= PMD_SIZE &&
665 (gpa & (PMD_SIZE - PAGE_SIZE)) ==
666 (hva & (PMD_SIZE - PAGE_SIZE))) {
668 pfn &= ~((PMD_SIZE >> PAGE_SHIFT) - 1);
673 * Compute the PTE value that we need to insert.
676 pgflags = _PAGE_READ | _PAGE_EXEC | _PAGE_PRESENT | _PAGE_PTE |
678 if (writing || upgrade_write)
679 pgflags |= _PAGE_WRITE | _PAGE_DIRTY;
680 pte = pfn_pte(pfn, __pgprot(pgflags));
683 * Read the PTE from the process' radix tree and use that
684 * so we get the attribute bits.
687 ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL, &shift);
690 if (shift == PUD_SHIFT &&
691 (gpa & (PUD_SIZE - PAGE_SIZE)) ==
692 (hva & (PUD_SIZE - PAGE_SIZE))) {
694 } else if (shift == PMD_SHIFT &&
695 (gpa & (PMD_SIZE - PAGE_SIZE)) ==
696 (hva & (PMD_SIZE - PAGE_SIZE))) {
698 } else if (shift && shift != PAGE_SHIFT) {
700 unsigned long mask = (1ul << shift) - PAGE_SIZE;
701 pte = __pte(pte_val(pte) | (hva & mask));
703 pte = __pte(pte_val(pte) | _PAGE_EXEC | _PAGE_ACCESSED);
704 if (writing || upgrade_write) {
705 if (pte_val(pte) & _PAGE_WRITE)
706 pte = __pte(pte_val(pte) | _PAGE_DIRTY);
708 pte = __pte(pte_val(pte) & ~(_PAGE_WRITE | _PAGE_DIRTY));
712 /* Allocate space in the tree and write the PTE */
713 ret = kvmppc_create_pte(kvm, pte, gpa, level, mmu_seq);
716 if (!ret && (pte_val(pte) & _PAGE_WRITE))
717 set_page_dirty_lock(page);
721 if (ret == 0 || ret == -EAGAIN)
726 /* Called with kvm->lock held */
727 int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
731 unsigned long gpa = gfn << PAGE_SHIFT;
735 ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
736 if (ptep && pte_present(*ptep)) {
737 old = kvmppc_radix_update_pte(kvm, ptep, ~0UL, 0,
739 kvmppc_radix_tlbie_page(kvm, gpa, shift);
740 if ((old & _PAGE_DIRTY) && memslot->dirty_bitmap) {
741 unsigned long npages = 1;
743 npages = 1ul << (shift - PAGE_SHIFT);
744 kvmppc_update_dirty_map(memslot, gfn, npages);
750 /* Called with kvm->lock held */
751 int kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
755 unsigned long gpa = gfn << PAGE_SHIFT;
759 ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
760 if (ptep && pte_present(*ptep) && pte_young(*ptep)) {
761 kvmppc_radix_update_pte(kvm, ptep, _PAGE_ACCESSED, 0,
763 /* XXX need to flush tlb here? */
769 /* Called with kvm->lock held */
770 int kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
774 unsigned long gpa = gfn << PAGE_SHIFT;
778 ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
779 if (ptep && pte_present(*ptep) && pte_young(*ptep))
784 /* Returns the number of PAGE_SIZE pages that are dirty */
785 static int kvm_radix_test_clear_dirty(struct kvm *kvm,
786 struct kvm_memory_slot *memslot, int pagenum)
788 unsigned long gfn = memslot->base_gfn + pagenum;
789 unsigned long gpa = gfn << PAGE_SHIFT;
794 ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
795 if (ptep && pte_present(*ptep) && pte_dirty(*ptep)) {
798 ret = 1 << (shift - PAGE_SHIFT);
799 kvmppc_radix_update_pte(kvm, ptep, _PAGE_DIRTY, 0,
801 kvmppc_radix_tlbie_page(kvm, gpa, shift);
806 long kvmppc_hv_get_dirty_log_radix(struct kvm *kvm,
807 struct kvm_memory_slot *memslot, unsigned long *map)
812 for (i = 0; i < memslot->npages; i = j) {
813 npages = kvm_radix_test_clear_dirty(kvm, memslot, i);
816 * Note that if npages > 0 then i must be a multiple of npages,
817 * since huge pages are only used to back the guest at guest
818 * real addresses that are a multiple of their size.
819 * Since we have at most one PTE covering any given guest
820 * real address, if npages > 1 we can skip to i + npages.
824 set_dirty_bits(map, i, npages);
831 static void add_rmmu_ap_encoding(struct kvm_ppc_rmmu_info *info,
832 int psize, int *indexp)
834 if (!mmu_psize_defs[psize].shift)
836 info->ap_encodings[*indexp] = mmu_psize_defs[psize].shift |
837 (mmu_psize_defs[psize].ap << 29);
841 int kvmhv_get_rmmu_info(struct kvm *kvm, struct kvm_ppc_rmmu_info *info)
845 if (!radix_enabled())
847 memset(info, 0, sizeof(*info));
850 info->geometries[0].page_shift = 12;
851 info->geometries[0].level_bits[0] = 9;
852 for (i = 1; i < 4; ++i)
853 info->geometries[0].level_bits[i] = p9_supported_radix_bits[i];
855 info->geometries[1].page_shift = 16;
856 for (i = 0; i < 4; ++i)
857 info->geometries[1].level_bits[i] = p9_supported_radix_bits[i];
860 add_rmmu_ap_encoding(info, MMU_PAGE_4K, &i);
861 add_rmmu_ap_encoding(info, MMU_PAGE_64K, &i);
862 add_rmmu_ap_encoding(info, MMU_PAGE_2M, &i);
863 add_rmmu_ap_encoding(info, MMU_PAGE_1G, &i);
868 int kvmppc_init_vm_radix(struct kvm *kvm)
870 kvm->arch.pgtable = pgd_alloc(kvm->mm);
871 if (!kvm->arch.pgtable)
876 static void pte_ctor(void *addr)
878 memset(addr, 0, RADIX_PTE_TABLE_SIZE);
881 static void pmd_ctor(void *addr)
883 memset(addr, 0, RADIX_PMD_TABLE_SIZE);
886 int kvmppc_radix_init(void)
888 unsigned long size = sizeof(void *) << RADIX_PTE_INDEX_SIZE;
890 kvm_pte_cache = kmem_cache_create("kvm-pte", size, size, 0, pte_ctor);
894 size = sizeof(void *) << RADIX_PMD_INDEX_SIZE;
896 kvm_pmd_cache = kmem_cache_create("kvm-pmd", size, size, 0, pmd_ctor);
897 if (!kvm_pmd_cache) {
898 kmem_cache_destroy(kvm_pte_cache);
905 void kvmppc_radix_exit(void)
907 kmem_cache_destroy(kvm_pte_cache);
908 kmem_cache_destroy(kvm_pmd_cache);