]> asedeno.scripts.mit.edu Git - linux.git/blob - virt/kvm/arm/mmu.c
Linux 5.6-rc7
[linux.git] / virt / kvm / arm / mmu.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
4  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
5  */
6
7 #include <linux/mman.h>
8 #include <linux/kvm_host.h>
9 #include <linux/io.h>
10 #include <linux/hugetlb.h>
11 #include <linux/sched/signal.h>
12 #include <trace/events/kvm.h>
13 #include <asm/pgalloc.h>
14 #include <asm/cacheflush.h>
15 #include <asm/kvm_arm.h>
16 #include <asm/kvm_mmu.h>
17 #include <asm/kvm_ras.h>
18 #include <asm/kvm_asm.h>
19 #include <asm/kvm_emulate.h>
20 #include <asm/virt.h>
21
22 #include "trace.h"
23
24 static pgd_t *boot_hyp_pgd;
25 static pgd_t *hyp_pgd;
26 static pgd_t *merged_hyp_pgd;
27 static DEFINE_MUTEX(kvm_hyp_pgd_mutex);
28
29 static unsigned long hyp_idmap_start;
30 static unsigned long hyp_idmap_end;
31 static phys_addr_t hyp_idmap_vector;
32
33 static unsigned long io_map_base;
34
35 #define hyp_pgd_order get_order(PTRS_PER_PGD * sizeof(pgd_t))
36
37 #define KVM_S2PTE_FLAG_IS_IOMAP         (1UL << 0)
38 #define KVM_S2_FLAG_LOGGING_ACTIVE      (1UL << 1)
39
40 static bool is_iomap(unsigned long flags)
41 {
42         return flags & KVM_S2PTE_FLAG_IS_IOMAP;
43 }
44
45 static bool memslot_is_logging(struct kvm_memory_slot *memslot)
46 {
47         return memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY);
48 }
49
50 /**
51  * kvm_flush_remote_tlbs() - flush all VM TLB entries for v7/8
52  * @kvm:        pointer to kvm structure.
53  *
54  * Interface to HYP function to flush all VM TLB entries
55  */
56 void kvm_flush_remote_tlbs(struct kvm *kvm)
57 {
58         kvm_call_hyp(__kvm_tlb_flush_vmid, kvm);
59 }
60
61 static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
62 {
63         kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa);
64 }
65
66 /*
67  * D-Cache management functions. They take the page table entries by
68  * value, as they are flushing the cache using the kernel mapping (or
69  * kmap on 32bit).
70  */
71 static void kvm_flush_dcache_pte(pte_t pte)
72 {
73         __kvm_flush_dcache_pte(pte);
74 }
75
76 static void kvm_flush_dcache_pmd(pmd_t pmd)
77 {
78         __kvm_flush_dcache_pmd(pmd);
79 }
80
81 static void kvm_flush_dcache_pud(pud_t pud)
82 {
83         __kvm_flush_dcache_pud(pud);
84 }
85
86 static bool kvm_is_device_pfn(unsigned long pfn)
87 {
88         return !pfn_valid(pfn);
89 }
90
91 /**
92  * stage2_dissolve_pmd() - clear and flush huge PMD entry
93  * @kvm:        pointer to kvm structure.
94  * @addr:       IPA
95  * @pmd:        pmd pointer for IPA
96  *
97  * Function clears a PMD entry, flushes addr 1st and 2nd stage TLBs.
98  */
99 static void stage2_dissolve_pmd(struct kvm *kvm, phys_addr_t addr, pmd_t *pmd)
100 {
101         if (!pmd_thp_or_huge(*pmd))
102                 return;
103
104         pmd_clear(pmd);
105         kvm_tlb_flush_vmid_ipa(kvm, addr);
106         put_page(virt_to_page(pmd));
107 }
108
109 /**
110  * stage2_dissolve_pud() - clear and flush huge PUD entry
111  * @kvm:        pointer to kvm structure.
112  * @addr:       IPA
113  * @pud:        pud pointer for IPA
114  *
115  * Function clears a PUD entry, flushes addr 1st and 2nd stage TLBs.
116  */
117 static void stage2_dissolve_pud(struct kvm *kvm, phys_addr_t addr, pud_t *pudp)
118 {
119         if (!stage2_pud_huge(kvm, *pudp))
120                 return;
121
122         stage2_pud_clear(kvm, pudp);
123         kvm_tlb_flush_vmid_ipa(kvm, addr);
124         put_page(virt_to_page(pudp));
125 }
126
127 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
128                                   int min, int max)
129 {
130         void *page;
131
132         BUG_ON(max > KVM_NR_MEM_OBJS);
133         if (cache->nobjs >= min)
134                 return 0;
135         while (cache->nobjs < max) {
136                 page = (void *)__get_free_page(GFP_PGTABLE_USER);
137                 if (!page)
138                         return -ENOMEM;
139                 cache->objects[cache->nobjs++] = page;
140         }
141         return 0;
142 }
143
144 static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
145 {
146         while (mc->nobjs)
147                 free_page((unsigned long)mc->objects[--mc->nobjs]);
148 }
149
150 static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
151 {
152         void *p;
153
154         BUG_ON(!mc || !mc->nobjs);
155         p = mc->objects[--mc->nobjs];
156         return p;
157 }
158
159 static void clear_stage2_pgd_entry(struct kvm *kvm, pgd_t *pgd, phys_addr_t addr)
160 {
161         pud_t *pud_table __maybe_unused = stage2_pud_offset(kvm, pgd, 0UL);
162         stage2_pgd_clear(kvm, pgd);
163         kvm_tlb_flush_vmid_ipa(kvm, addr);
164         stage2_pud_free(kvm, pud_table);
165         put_page(virt_to_page(pgd));
166 }
167
168 static void clear_stage2_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr)
169 {
170         pmd_t *pmd_table __maybe_unused = stage2_pmd_offset(kvm, pud, 0);
171         VM_BUG_ON(stage2_pud_huge(kvm, *pud));
172         stage2_pud_clear(kvm, pud);
173         kvm_tlb_flush_vmid_ipa(kvm, addr);
174         stage2_pmd_free(kvm, pmd_table);
175         put_page(virt_to_page(pud));
176 }
177
178 static void clear_stage2_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
179 {
180         pte_t *pte_table = pte_offset_kernel(pmd, 0);
181         VM_BUG_ON(pmd_thp_or_huge(*pmd));
182         pmd_clear(pmd);
183         kvm_tlb_flush_vmid_ipa(kvm, addr);
184         free_page((unsigned long)pte_table);
185         put_page(virt_to_page(pmd));
186 }
187
188 static inline void kvm_set_pte(pte_t *ptep, pte_t new_pte)
189 {
190         WRITE_ONCE(*ptep, new_pte);
191         dsb(ishst);
192 }
193
194 static inline void kvm_set_pmd(pmd_t *pmdp, pmd_t new_pmd)
195 {
196         WRITE_ONCE(*pmdp, new_pmd);
197         dsb(ishst);
198 }
199
200 static inline void kvm_pmd_populate(pmd_t *pmdp, pte_t *ptep)
201 {
202         kvm_set_pmd(pmdp, kvm_mk_pmd(ptep));
203 }
204
205 static inline void kvm_pud_populate(pud_t *pudp, pmd_t *pmdp)
206 {
207         WRITE_ONCE(*pudp, kvm_mk_pud(pmdp));
208         dsb(ishst);
209 }
210
211 static inline void kvm_pgd_populate(pgd_t *pgdp, pud_t *pudp)
212 {
213         WRITE_ONCE(*pgdp, kvm_mk_pgd(pudp));
214         dsb(ishst);
215 }
216
217 /*
218  * Unmapping vs dcache management:
219  *
220  * If a guest maps certain memory pages as uncached, all writes will
221  * bypass the data cache and go directly to RAM.  However, the CPUs
222  * can still speculate reads (not writes) and fill cache lines with
223  * data.
224  *
225  * Those cache lines will be *clean* cache lines though, so a
226  * clean+invalidate operation is equivalent to an invalidate
227  * operation, because no cache lines are marked dirty.
228  *
229  * Those clean cache lines could be filled prior to an uncached write
230  * by the guest, and the cache coherent IO subsystem would therefore
231  * end up writing old data to disk.
232  *
233  * This is why right after unmapping a page/section and invalidating
234  * the corresponding TLBs, we call kvm_flush_dcache_p*() to make sure
235  * the IO subsystem will never hit in the cache.
236  *
237  * This is all avoided on systems that have ARM64_HAS_STAGE2_FWB, as
238  * we then fully enforce cacheability of RAM, no matter what the guest
239  * does.
240  */
241 static void unmap_stage2_ptes(struct kvm *kvm, pmd_t *pmd,
242                        phys_addr_t addr, phys_addr_t end)
243 {
244         phys_addr_t start_addr = addr;
245         pte_t *pte, *start_pte;
246
247         start_pte = pte = pte_offset_kernel(pmd, addr);
248         do {
249                 if (!pte_none(*pte)) {
250                         pte_t old_pte = *pte;
251
252                         kvm_set_pte(pte, __pte(0));
253                         kvm_tlb_flush_vmid_ipa(kvm, addr);
254
255                         /* No need to invalidate the cache for device mappings */
256                         if (!kvm_is_device_pfn(pte_pfn(old_pte)))
257                                 kvm_flush_dcache_pte(old_pte);
258
259                         put_page(virt_to_page(pte));
260                 }
261         } while (pte++, addr += PAGE_SIZE, addr != end);
262
263         if (stage2_pte_table_empty(kvm, start_pte))
264                 clear_stage2_pmd_entry(kvm, pmd, start_addr);
265 }
266
267 static void unmap_stage2_pmds(struct kvm *kvm, pud_t *pud,
268                        phys_addr_t addr, phys_addr_t end)
269 {
270         phys_addr_t next, start_addr = addr;
271         pmd_t *pmd, *start_pmd;
272
273         start_pmd = pmd = stage2_pmd_offset(kvm, pud, addr);
274         do {
275                 next = stage2_pmd_addr_end(kvm, addr, end);
276                 if (!pmd_none(*pmd)) {
277                         if (pmd_thp_or_huge(*pmd)) {
278                                 pmd_t old_pmd = *pmd;
279
280                                 pmd_clear(pmd);
281                                 kvm_tlb_flush_vmid_ipa(kvm, addr);
282
283                                 kvm_flush_dcache_pmd(old_pmd);
284
285                                 put_page(virt_to_page(pmd));
286                         } else {
287                                 unmap_stage2_ptes(kvm, pmd, addr, next);
288                         }
289                 }
290         } while (pmd++, addr = next, addr != end);
291
292         if (stage2_pmd_table_empty(kvm, start_pmd))
293                 clear_stage2_pud_entry(kvm, pud, start_addr);
294 }
295
296 static void unmap_stage2_puds(struct kvm *kvm, pgd_t *pgd,
297                        phys_addr_t addr, phys_addr_t end)
298 {
299         phys_addr_t next, start_addr = addr;
300         pud_t *pud, *start_pud;
301
302         start_pud = pud = stage2_pud_offset(kvm, pgd, addr);
303         do {
304                 next = stage2_pud_addr_end(kvm, addr, end);
305                 if (!stage2_pud_none(kvm, *pud)) {
306                         if (stage2_pud_huge(kvm, *pud)) {
307                                 pud_t old_pud = *pud;
308
309                                 stage2_pud_clear(kvm, pud);
310                                 kvm_tlb_flush_vmid_ipa(kvm, addr);
311                                 kvm_flush_dcache_pud(old_pud);
312                                 put_page(virt_to_page(pud));
313                         } else {
314                                 unmap_stage2_pmds(kvm, pud, addr, next);
315                         }
316                 }
317         } while (pud++, addr = next, addr != end);
318
319         if (stage2_pud_table_empty(kvm, start_pud))
320                 clear_stage2_pgd_entry(kvm, pgd, start_addr);
321 }
322
323 /**
324  * unmap_stage2_range -- Clear stage2 page table entries to unmap a range
325  * @kvm:   The VM pointer
326  * @start: The intermediate physical base address of the range to unmap
327  * @size:  The size of the area to unmap
328  *
329  * Clear a range of stage-2 mappings, lowering the various ref-counts.  Must
330  * be called while holding mmu_lock (unless for freeing the stage2 pgd before
331  * destroying the VM), otherwise another faulting VCPU may come in and mess
332  * with things behind our backs.
333  */
334 static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
335 {
336         pgd_t *pgd;
337         phys_addr_t addr = start, end = start + size;
338         phys_addr_t next;
339
340         assert_spin_locked(&kvm->mmu_lock);
341         WARN_ON(size & ~PAGE_MASK);
342
343         pgd = kvm->arch.pgd + stage2_pgd_index(kvm, addr);
344         do {
345                 /*
346                  * Make sure the page table is still active, as another thread
347                  * could have possibly freed the page table, while we released
348                  * the lock.
349                  */
350                 if (!READ_ONCE(kvm->arch.pgd))
351                         break;
352                 next = stage2_pgd_addr_end(kvm, addr, end);
353                 if (!stage2_pgd_none(kvm, *pgd))
354                         unmap_stage2_puds(kvm, pgd, addr, next);
355                 /*
356                  * If the range is too large, release the kvm->mmu_lock
357                  * to prevent starvation and lockup detector warnings.
358                  */
359                 if (next != end)
360                         cond_resched_lock(&kvm->mmu_lock);
361         } while (pgd++, addr = next, addr != end);
362 }
363
364 static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
365                               phys_addr_t addr, phys_addr_t end)
366 {
367         pte_t *pte;
368
369         pte = pte_offset_kernel(pmd, addr);
370         do {
371                 if (!pte_none(*pte) && !kvm_is_device_pfn(pte_pfn(*pte)))
372                         kvm_flush_dcache_pte(*pte);
373         } while (pte++, addr += PAGE_SIZE, addr != end);
374 }
375
376 static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud,
377                               phys_addr_t addr, phys_addr_t end)
378 {
379         pmd_t *pmd;
380         phys_addr_t next;
381
382         pmd = stage2_pmd_offset(kvm, pud, addr);
383         do {
384                 next = stage2_pmd_addr_end(kvm, addr, end);
385                 if (!pmd_none(*pmd)) {
386                         if (pmd_thp_or_huge(*pmd))
387                                 kvm_flush_dcache_pmd(*pmd);
388                         else
389                                 stage2_flush_ptes(kvm, pmd, addr, next);
390                 }
391         } while (pmd++, addr = next, addr != end);
392 }
393
394 static void stage2_flush_puds(struct kvm *kvm, pgd_t *pgd,
395                               phys_addr_t addr, phys_addr_t end)
396 {
397         pud_t *pud;
398         phys_addr_t next;
399
400         pud = stage2_pud_offset(kvm, pgd, addr);
401         do {
402                 next = stage2_pud_addr_end(kvm, addr, end);
403                 if (!stage2_pud_none(kvm, *pud)) {
404                         if (stage2_pud_huge(kvm, *pud))
405                                 kvm_flush_dcache_pud(*pud);
406                         else
407                                 stage2_flush_pmds(kvm, pud, addr, next);
408                 }
409         } while (pud++, addr = next, addr != end);
410 }
411
412 static void stage2_flush_memslot(struct kvm *kvm,
413                                  struct kvm_memory_slot *memslot)
414 {
415         phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
416         phys_addr_t end = addr + PAGE_SIZE * memslot->npages;
417         phys_addr_t next;
418         pgd_t *pgd;
419
420         pgd = kvm->arch.pgd + stage2_pgd_index(kvm, addr);
421         do {
422                 next = stage2_pgd_addr_end(kvm, addr, end);
423                 if (!stage2_pgd_none(kvm, *pgd))
424                         stage2_flush_puds(kvm, pgd, addr, next);
425         } while (pgd++, addr = next, addr != end);
426 }
427
428 /**
429  * stage2_flush_vm - Invalidate cache for pages mapped in stage 2
430  * @kvm: The struct kvm pointer
431  *
432  * Go through the stage 2 page tables and invalidate any cache lines
433  * backing memory already mapped to the VM.
434  */
435 static void stage2_flush_vm(struct kvm *kvm)
436 {
437         struct kvm_memslots *slots;
438         struct kvm_memory_slot *memslot;
439         int idx;
440
441         idx = srcu_read_lock(&kvm->srcu);
442         spin_lock(&kvm->mmu_lock);
443
444         slots = kvm_memslots(kvm);
445         kvm_for_each_memslot(memslot, slots)
446                 stage2_flush_memslot(kvm, memslot);
447
448         spin_unlock(&kvm->mmu_lock);
449         srcu_read_unlock(&kvm->srcu, idx);
450 }
451
452 static void clear_hyp_pgd_entry(pgd_t *pgd)
453 {
454         pud_t *pud_table __maybe_unused = pud_offset(pgd, 0UL);
455         pgd_clear(pgd);
456         pud_free(NULL, pud_table);
457         put_page(virt_to_page(pgd));
458 }
459
460 static void clear_hyp_pud_entry(pud_t *pud)
461 {
462         pmd_t *pmd_table __maybe_unused = pmd_offset(pud, 0);
463         VM_BUG_ON(pud_huge(*pud));
464         pud_clear(pud);
465         pmd_free(NULL, pmd_table);
466         put_page(virt_to_page(pud));
467 }
468
469 static void clear_hyp_pmd_entry(pmd_t *pmd)
470 {
471         pte_t *pte_table = pte_offset_kernel(pmd, 0);
472         VM_BUG_ON(pmd_thp_or_huge(*pmd));
473         pmd_clear(pmd);
474         pte_free_kernel(NULL, pte_table);
475         put_page(virt_to_page(pmd));
476 }
477
478 static void unmap_hyp_ptes(pmd_t *pmd, phys_addr_t addr, phys_addr_t end)
479 {
480         pte_t *pte, *start_pte;
481
482         start_pte = pte = pte_offset_kernel(pmd, addr);
483         do {
484                 if (!pte_none(*pte)) {
485                         kvm_set_pte(pte, __pte(0));
486                         put_page(virt_to_page(pte));
487                 }
488         } while (pte++, addr += PAGE_SIZE, addr != end);
489
490         if (hyp_pte_table_empty(start_pte))
491                 clear_hyp_pmd_entry(pmd);
492 }
493
494 static void unmap_hyp_pmds(pud_t *pud, phys_addr_t addr, phys_addr_t end)
495 {
496         phys_addr_t next;
497         pmd_t *pmd, *start_pmd;
498
499         start_pmd = pmd = pmd_offset(pud, addr);
500         do {
501                 next = pmd_addr_end(addr, end);
502                 /* Hyp doesn't use huge pmds */
503                 if (!pmd_none(*pmd))
504                         unmap_hyp_ptes(pmd, addr, next);
505         } while (pmd++, addr = next, addr != end);
506
507         if (hyp_pmd_table_empty(start_pmd))
508                 clear_hyp_pud_entry(pud);
509 }
510
511 static void unmap_hyp_puds(pgd_t *pgd, phys_addr_t addr, phys_addr_t end)
512 {
513         phys_addr_t next;
514         pud_t *pud, *start_pud;
515
516         start_pud = pud = pud_offset(pgd, addr);
517         do {
518                 next = pud_addr_end(addr, end);
519                 /* Hyp doesn't use huge puds */
520                 if (!pud_none(*pud))
521                         unmap_hyp_pmds(pud, addr, next);
522         } while (pud++, addr = next, addr != end);
523
524         if (hyp_pud_table_empty(start_pud))
525                 clear_hyp_pgd_entry(pgd);
526 }
527
528 static unsigned int kvm_pgd_index(unsigned long addr, unsigned int ptrs_per_pgd)
529 {
530         return (addr >> PGDIR_SHIFT) & (ptrs_per_pgd - 1);
531 }
532
533 static void __unmap_hyp_range(pgd_t *pgdp, unsigned long ptrs_per_pgd,
534                               phys_addr_t start, u64 size)
535 {
536         pgd_t *pgd;
537         phys_addr_t addr = start, end = start + size;
538         phys_addr_t next;
539
540         /*
541          * We don't unmap anything from HYP, except at the hyp tear down.
542          * Hence, we don't have to invalidate the TLBs here.
543          */
544         pgd = pgdp + kvm_pgd_index(addr, ptrs_per_pgd);
545         do {
546                 next = pgd_addr_end(addr, end);
547                 if (!pgd_none(*pgd))
548                         unmap_hyp_puds(pgd, addr, next);
549         } while (pgd++, addr = next, addr != end);
550 }
551
552 static void unmap_hyp_range(pgd_t *pgdp, phys_addr_t start, u64 size)
553 {
554         __unmap_hyp_range(pgdp, PTRS_PER_PGD, start, size);
555 }
556
557 static void unmap_hyp_idmap_range(pgd_t *pgdp, phys_addr_t start, u64 size)
558 {
559         __unmap_hyp_range(pgdp, __kvm_idmap_ptrs_per_pgd(), start, size);
560 }
561
562 /**
563  * free_hyp_pgds - free Hyp-mode page tables
564  *
565  * Assumes hyp_pgd is a page table used strictly in Hyp-mode and
566  * therefore contains either mappings in the kernel memory area (above
567  * PAGE_OFFSET), or device mappings in the idmap range.
568  *
569  * boot_hyp_pgd should only map the idmap range, and is only used in
570  * the extended idmap case.
571  */
572 void free_hyp_pgds(void)
573 {
574         pgd_t *id_pgd;
575
576         mutex_lock(&kvm_hyp_pgd_mutex);
577
578         id_pgd = boot_hyp_pgd ? boot_hyp_pgd : hyp_pgd;
579
580         if (id_pgd) {
581                 /* In case we never called hyp_mmu_init() */
582                 if (!io_map_base)
583                         io_map_base = hyp_idmap_start;
584                 unmap_hyp_idmap_range(id_pgd, io_map_base,
585                                       hyp_idmap_start + PAGE_SIZE - io_map_base);
586         }
587
588         if (boot_hyp_pgd) {
589                 free_pages((unsigned long)boot_hyp_pgd, hyp_pgd_order);
590                 boot_hyp_pgd = NULL;
591         }
592
593         if (hyp_pgd) {
594                 unmap_hyp_range(hyp_pgd, kern_hyp_va(PAGE_OFFSET),
595                                 (uintptr_t)high_memory - PAGE_OFFSET);
596
597                 free_pages((unsigned long)hyp_pgd, hyp_pgd_order);
598                 hyp_pgd = NULL;
599         }
600         if (merged_hyp_pgd) {
601                 clear_page(merged_hyp_pgd);
602                 free_page((unsigned long)merged_hyp_pgd);
603                 merged_hyp_pgd = NULL;
604         }
605
606         mutex_unlock(&kvm_hyp_pgd_mutex);
607 }
608
609 static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start,
610                                     unsigned long end, unsigned long pfn,
611                                     pgprot_t prot)
612 {
613         pte_t *pte;
614         unsigned long addr;
615
616         addr = start;
617         do {
618                 pte = pte_offset_kernel(pmd, addr);
619                 kvm_set_pte(pte, kvm_pfn_pte(pfn, prot));
620                 get_page(virt_to_page(pte));
621                 pfn++;
622         } while (addr += PAGE_SIZE, addr != end);
623 }
624
625 static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start,
626                                    unsigned long end, unsigned long pfn,
627                                    pgprot_t prot)
628 {
629         pmd_t *pmd;
630         pte_t *pte;
631         unsigned long addr, next;
632
633         addr = start;
634         do {
635                 pmd = pmd_offset(pud, addr);
636
637                 BUG_ON(pmd_sect(*pmd));
638
639                 if (pmd_none(*pmd)) {
640                         pte = pte_alloc_one_kernel(NULL);
641                         if (!pte) {
642                                 kvm_err("Cannot allocate Hyp pte\n");
643                                 return -ENOMEM;
644                         }
645                         kvm_pmd_populate(pmd, pte);
646                         get_page(virt_to_page(pmd));
647                 }
648
649                 next = pmd_addr_end(addr, end);
650
651                 create_hyp_pte_mappings(pmd, addr, next, pfn, prot);
652                 pfn += (next - addr) >> PAGE_SHIFT;
653         } while (addr = next, addr != end);
654
655         return 0;
656 }
657
658 static int create_hyp_pud_mappings(pgd_t *pgd, unsigned long start,
659                                    unsigned long end, unsigned long pfn,
660                                    pgprot_t prot)
661 {
662         pud_t *pud;
663         pmd_t *pmd;
664         unsigned long addr, next;
665         int ret;
666
667         addr = start;
668         do {
669                 pud = pud_offset(pgd, addr);
670
671                 if (pud_none_or_clear_bad(pud)) {
672                         pmd = pmd_alloc_one(NULL, addr);
673                         if (!pmd) {
674                                 kvm_err("Cannot allocate Hyp pmd\n");
675                                 return -ENOMEM;
676                         }
677                         kvm_pud_populate(pud, pmd);
678                         get_page(virt_to_page(pud));
679                 }
680
681                 next = pud_addr_end(addr, end);
682                 ret = create_hyp_pmd_mappings(pud, addr, next, pfn, prot);
683                 if (ret)
684                         return ret;
685                 pfn += (next - addr) >> PAGE_SHIFT;
686         } while (addr = next, addr != end);
687
688         return 0;
689 }
690
691 static int __create_hyp_mappings(pgd_t *pgdp, unsigned long ptrs_per_pgd,
692                                  unsigned long start, unsigned long end,
693                                  unsigned long pfn, pgprot_t prot)
694 {
695         pgd_t *pgd;
696         pud_t *pud;
697         unsigned long addr, next;
698         int err = 0;
699
700         mutex_lock(&kvm_hyp_pgd_mutex);
701         addr = start & PAGE_MASK;
702         end = PAGE_ALIGN(end);
703         do {
704                 pgd = pgdp + kvm_pgd_index(addr, ptrs_per_pgd);
705
706                 if (pgd_none(*pgd)) {
707                         pud = pud_alloc_one(NULL, addr);
708                         if (!pud) {
709                                 kvm_err("Cannot allocate Hyp pud\n");
710                                 err = -ENOMEM;
711                                 goto out;
712                         }
713                         kvm_pgd_populate(pgd, pud);
714                         get_page(virt_to_page(pgd));
715                 }
716
717                 next = pgd_addr_end(addr, end);
718                 err = create_hyp_pud_mappings(pgd, addr, next, pfn, prot);
719                 if (err)
720                         goto out;
721                 pfn += (next - addr) >> PAGE_SHIFT;
722         } while (addr = next, addr != end);
723 out:
724         mutex_unlock(&kvm_hyp_pgd_mutex);
725         return err;
726 }
727
728 static phys_addr_t kvm_kaddr_to_phys(void *kaddr)
729 {
730         if (!is_vmalloc_addr(kaddr)) {
731                 BUG_ON(!virt_addr_valid(kaddr));
732                 return __pa(kaddr);
733         } else {
734                 return page_to_phys(vmalloc_to_page(kaddr)) +
735                        offset_in_page(kaddr);
736         }
737 }
738
739 /**
740  * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode
741  * @from:       The virtual kernel start address of the range
742  * @to:         The virtual kernel end address of the range (exclusive)
743  * @prot:       The protection to be applied to this range
744  *
745  * The same virtual address as the kernel virtual address is also used
746  * in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying
747  * physical pages.
748  */
749 int create_hyp_mappings(void *from, void *to, pgprot_t prot)
750 {
751         phys_addr_t phys_addr;
752         unsigned long virt_addr;
753         unsigned long start = kern_hyp_va((unsigned long)from);
754         unsigned long end = kern_hyp_va((unsigned long)to);
755
756         if (is_kernel_in_hyp_mode())
757                 return 0;
758
759         start = start & PAGE_MASK;
760         end = PAGE_ALIGN(end);
761
762         for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) {
763                 int err;
764
765                 phys_addr = kvm_kaddr_to_phys(from + virt_addr - start);
766                 err = __create_hyp_mappings(hyp_pgd, PTRS_PER_PGD,
767                                             virt_addr, virt_addr + PAGE_SIZE,
768                                             __phys_to_pfn(phys_addr),
769                                             prot);
770                 if (err)
771                         return err;
772         }
773
774         return 0;
775 }
776
777 static int __create_hyp_private_mapping(phys_addr_t phys_addr, size_t size,
778                                         unsigned long *haddr, pgprot_t prot)
779 {
780         pgd_t *pgd = hyp_pgd;
781         unsigned long base;
782         int ret = 0;
783
784         mutex_lock(&kvm_hyp_pgd_mutex);
785
786         /*
787          * This assumes that we we have enough space below the idmap
788          * page to allocate our VAs. If not, the check below will
789          * kick. A potential alternative would be to detect that
790          * overflow and switch to an allocation above the idmap.
791          *
792          * The allocated size is always a multiple of PAGE_SIZE.
793          */
794         size = PAGE_ALIGN(size + offset_in_page(phys_addr));
795         base = io_map_base - size;
796
797         /*
798          * Verify that BIT(VA_BITS - 1) hasn't been flipped by
799          * allocating the new area, as it would indicate we've
800          * overflowed the idmap/IO address range.
801          */
802         if ((base ^ io_map_base) & BIT(VA_BITS - 1))
803                 ret = -ENOMEM;
804         else
805                 io_map_base = base;
806
807         mutex_unlock(&kvm_hyp_pgd_mutex);
808
809         if (ret)
810                 goto out;
811
812         if (__kvm_cpu_uses_extended_idmap())
813                 pgd = boot_hyp_pgd;
814
815         ret = __create_hyp_mappings(pgd, __kvm_idmap_ptrs_per_pgd(),
816                                     base, base + size,
817                                     __phys_to_pfn(phys_addr), prot);
818         if (ret)
819                 goto out;
820
821         *haddr = base + offset_in_page(phys_addr);
822
823 out:
824         return ret;
825 }
826
827 /**
828  * create_hyp_io_mappings - Map IO into both kernel and HYP
829  * @phys_addr:  The physical start address which gets mapped
830  * @size:       Size of the region being mapped
831  * @kaddr:      Kernel VA for this mapping
832  * @haddr:      HYP VA for this mapping
833  */
834 int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size,
835                            void __iomem **kaddr,
836                            void __iomem **haddr)
837 {
838         unsigned long addr;
839         int ret;
840
841         *kaddr = ioremap(phys_addr, size);
842         if (!*kaddr)
843                 return -ENOMEM;
844
845         if (is_kernel_in_hyp_mode()) {
846                 *haddr = *kaddr;
847                 return 0;
848         }
849
850         ret = __create_hyp_private_mapping(phys_addr, size,
851                                            &addr, PAGE_HYP_DEVICE);
852         if (ret) {
853                 iounmap(*kaddr);
854                 *kaddr = NULL;
855                 *haddr = NULL;
856                 return ret;
857         }
858
859         *haddr = (void __iomem *)addr;
860         return 0;
861 }
862
863 /**
864  * create_hyp_exec_mappings - Map an executable range into HYP
865  * @phys_addr:  The physical start address which gets mapped
866  * @size:       Size of the region being mapped
867  * @haddr:      HYP VA for this mapping
868  */
869 int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
870                              void **haddr)
871 {
872         unsigned long addr;
873         int ret;
874
875         BUG_ON(is_kernel_in_hyp_mode());
876
877         ret = __create_hyp_private_mapping(phys_addr, size,
878                                            &addr, PAGE_HYP_EXEC);
879         if (ret) {
880                 *haddr = NULL;
881                 return ret;
882         }
883
884         *haddr = (void *)addr;
885         return 0;
886 }
887
888 /**
889  * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation.
890  * @kvm:        The KVM struct pointer for the VM.
891  *
892  * Allocates only the stage-2 HW PGD level table(s) of size defined by
893  * stage2_pgd_size(kvm).
894  *
895  * Note we don't need locking here as this is only called when the VM is
896  * created, which can only be done once.
897  */
898 int kvm_alloc_stage2_pgd(struct kvm *kvm)
899 {
900         phys_addr_t pgd_phys;
901         pgd_t *pgd;
902
903         if (kvm->arch.pgd != NULL) {
904                 kvm_err("kvm_arch already initialized?\n");
905                 return -EINVAL;
906         }
907
908         /* Allocate the HW PGD, making sure that each page gets its own refcount */
909         pgd = alloc_pages_exact(stage2_pgd_size(kvm), GFP_KERNEL | __GFP_ZERO);
910         if (!pgd)
911                 return -ENOMEM;
912
913         pgd_phys = virt_to_phys(pgd);
914         if (WARN_ON(pgd_phys & ~kvm_vttbr_baddr_mask(kvm)))
915                 return -EINVAL;
916
917         kvm->arch.pgd = pgd;
918         kvm->arch.pgd_phys = pgd_phys;
919         return 0;
920 }
921
922 static void stage2_unmap_memslot(struct kvm *kvm,
923                                  struct kvm_memory_slot *memslot)
924 {
925         hva_t hva = memslot->userspace_addr;
926         phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
927         phys_addr_t size = PAGE_SIZE * memslot->npages;
928         hva_t reg_end = hva + size;
929
930         /*
931          * A memory region could potentially cover multiple VMAs, and any holes
932          * between them, so iterate over all of them to find out if we should
933          * unmap any of them.
934          *
935          *     +--------------------------------------------+
936          * +---------------+----------------+   +----------------+
937          * |   : VMA 1     |      VMA 2     |   |    VMA 3  :    |
938          * +---------------+----------------+   +----------------+
939          *     |               memory region                |
940          *     +--------------------------------------------+
941          */
942         do {
943                 struct vm_area_struct *vma = find_vma(current->mm, hva);
944                 hva_t vm_start, vm_end;
945
946                 if (!vma || vma->vm_start >= reg_end)
947                         break;
948
949                 /*
950                  * Take the intersection of this VMA with the memory region
951                  */
952                 vm_start = max(hva, vma->vm_start);
953                 vm_end = min(reg_end, vma->vm_end);
954
955                 if (!(vma->vm_flags & VM_PFNMAP)) {
956                         gpa_t gpa = addr + (vm_start - memslot->userspace_addr);
957                         unmap_stage2_range(kvm, gpa, vm_end - vm_start);
958                 }
959                 hva = vm_end;
960         } while (hva < reg_end);
961 }
962
963 /**
964  * stage2_unmap_vm - Unmap Stage-2 RAM mappings
965  * @kvm: The struct kvm pointer
966  *
967  * Go through the memregions and unmap any reguler RAM
968  * backing memory already mapped to the VM.
969  */
970 void stage2_unmap_vm(struct kvm *kvm)
971 {
972         struct kvm_memslots *slots;
973         struct kvm_memory_slot *memslot;
974         int idx;
975
976         idx = srcu_read_lock(&kvm->srcu);
977         down_read(&current->mm->mmap_sem);
978         spin_lock(&kvm->mmu_lock);
979
980         slots = kvm_memslots(kvm);
981         kvm_for_each_memslot(memslot, slots)
982                 stage2_unmap_memslot(kvm, memslot);
983
984         spin_unlock(&kvm->mmu_lock);
985         up_read(&current->mm->mmap_sem);
986         srcu_read_unlock(&kvm->srcu, idx);
987 }
988
989 /**
990  * kvm_free_stage2_pgd - free all stage-2 tables
991  * @kvm:        The KVM struct pointer for the VM.
992  *
993  * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all
994  * underlying level-2 and level-3 tables before freeing the actual level-1 table
995  * and setting the struct pointer to NULL.
996  */
997 void kvm_free_stage2_pgd(struct kvm *kvm)
998 {
999         void *pgd = NULL;
1000
1001         spin_lock(&kvm->mmu_lock);
1002         if (kvm->arch.pgd) {
1003                 unmap_stage2_range(kvm, 0, kvm_phys_size(kvm));
1004                 pgd = READ_ONCE(kvm->arch.pgd);
1005                 kvm->arch.pgd = NULL;
1006                 kvm->arch.pgd_phys = 0;
1007         }
1008         spin_unlock(&kvm->mmu_lock);
1009
1010         /* Free the HW pgd, one page at a time */
1011         if (pgd)
1012                 free_pages_exact(pgd, stage2_pgd_size(kvm));
1013 }
1014
1015 static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
1016                              phys_addr_t addr)
1017 {
1018         pgd_t *pgd;
1019         pud_t *pud;
1020
1021         pgd = kvm->arch.pgd + stage2_pgd_index(kvm, addr);
1022         if (stage2_pgd_none(kvm, *pgd)) {
1023                 if (!cache)
1024                         return NULL;
1025                 pud = mmu_memory_cache_alloc(cache);
1026                 stage2_pgd_populate(kvm, pgd, pud);
1027                 get_page(virt_to_page(pgd));
1028         }
1029
1030         return stage2_pud_offset(kvm, pgd, addr);
1031 }
1032
1033 static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
1034                              phys_addr_t addr)
1035 {
1036         pud_t *pud;
1037         pmd_t *pmd;
1038
1039         pud = stage2_get_pud(kvm, cache, addr);
1040         if (!pud || stage2_pud_huge(kvm, *pud))
1041                 return NULL;
1042
1043         if (stage2_pud_none(kvm, *pud)) {
1044                 if (!cache)
1045                         return NULL;
1046                 pmd = mmu_memory_cache_alloc(cache);
1047                 stage2_pud_populate(kvm, pud, pmd);
1048                 get_page(virt_to_page(pud));
1049         }
1050
1051         return stage2_pmd_offset(kvm, pud, addr);
1052 }
1053
1054 static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
1055                                *cache, phys_addr_t addr, const pmd_t *new_pmd)
1056 {
1057         pmd_t *pmd, old_pmd;
1058
1059 retry:
1060         pmd = stage2_get_pmd(kvm, cache, addr);
1061         VM_BUG_ON(!pmd);
1062
1063         old_pmd = *pmd;
1064         /*
1065          * Multiple vcpus faulting on the same PMD entry, can
1066          * lead to them sequentially updating the PMD with the
1067          * same value. Following the break-before-make
1068          * (pmd_clear() followed by tlb_flush()) process can
1069          * hinder forward progress due to refaults generated
1070          * on missing translations.
1071          *
1072          * Skip updating the page table if the entry is
1073          * unchanged.
1074          */
1075         if (pmd_val(old_pmd) == pmd_val(*new_pmd))
1076                 return 0;
1077
1078         if (pmd_present(old_pmd)) {
1079                 /*
1080                  * If we already have PTE level mapping for this block,
1081                  * we must unmap it to avoid inconsistent TLB state and
1082                  * leaking the table page. We could end up in this situation
1083                  * if the memory slot was marked for dirty logging and was
1084                  * reverted, leaving PTE level mappings for the pages accessed
1085                  * during the period. So, unmap the PTE level mapping for this
1086                  * block and retry, as we could have released the upper level
1087                  * table in the process.
1088                  *
1089                  * Normal THP split/merge follows mmu_notifier callbacks and do
1090                  * get handled accordingly.
1091                  */
1092                 if (!pmd_thp_or_huge(old_pmd)) {
1093                         unmap_stage2_range(kvm, addr & S2_PMD_MASK, S2_PMD_SIZE);
1094                         goto retry;
1095                 }
1096                 /*
1097                  * Mapping in huge pages should only happen through a
1098                  * fault.  If a page is merged into a transparent huge
1099                  * page, the individual subpages of that huge page
1100                  * should be unmapped through MMU notifiers before we
1101                  * get here.
1102                  *
1103                  * Merging of CompoundPages is not supported; they
1104                  * should become splitting first, unmapped, merged,
1105                  * and mapped back in on-demand.
1106                  */
1107                 WARN_ON_ONCE(pmd_pfn(old_pmd) != pmd_pfn(*new_pmd));
1108                 pmd_clear(pmd);
1109                 kvm_tlb_flush_vmid_ipa(kvm, addr);
1110         } else {
1111                 get_page(virt_to_page(pmd));
1112         }
1113
1114         kvm_set_pmd(pmd, *new_pmd);
1115         return 0;
1116 }
1117
1118 static int stage2_set_pud_huge(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
1119                                phys_addr_t addr, const pud_t *new_pudp)
1120 {
1121         pud_t *pudp, old_pud;
1122
1123 retry:
1124         pudp = stage2_get_pud(kvm, cache, addr);
1125         VM_BUG_ON(!pudp);
1126
1127         old_pud = *pudp;
1128
1129         /*
1130          * A large number of vcpus faulting on the same stage 2 entry,
1131          * can lead to a refault due to the stage2_pud_clear()/tlb_flush().
1132          * Skip updating the page tables if there is no change.
1133          */
1134         if (pud_val(old_pud) == pud_val(*new_pudp))
1135                 return 0;
1136
1137         if (stage2_pud_present(kvm, old_pud)) {
1138                 /*
1139                  * If we already have table level mapping for this block, unmap
1140                  * the range for this block and retry.
1141                  */
1142                 if (!stage2_pud_huge(kvm, old_pud)) {
1143                         unmap_stage2_range(kvm, addr & S2_PUD_MASK, S2_PUD_SIZE);
1144                         goto retry;
1145                 }
1146
1147                 WARN_ON_ONCE(kvm_pud_pfn(old_pud) != kvm_pud_pfn(*new_pudp));
1148                 stage2_pud_clear(kvm, pudp);
1149                 kvm_tlb_flush_vmid_ipa(kvm, addr);
1150         } else {
1151                 get_page(virt_to_page(pudp));
1152         }
1153
1154         kvm_set_pud(pudp, *new_pudp);
1155         return 0;
1156 }
1157
1158 /*
1159  * stage2_get_leaf_entry - walk the stage2 VM page tables and return
1160  * true if a valid and present leaf-entry is found. A pointer to the
1161  * leaf-entry is returned in the appropriate level variable - pudpp,
1162  * pmdpp, ptepp.
1163  */
1164 static bool stage2_get_leaf_entry(struct kvm *kvm, phys_addr_t addr,
1165                                   pud_t **pudpp, pmd_t **pmdpp, pte_t **ptepp)
1166 {
1167         pud_t *pudp;
1168         pmd_t *pmdp;
1169         pte_t *ptep;
1170
1171         *pudpp = NULL;
1172         *pmdpp = NULL;
1173         *ptepp = NULL;
1174
1175         pudp = stage2_get_pud(kvm, NULL, addr);
1176         if (!pudp || stage2_pud_none(kvm, *pudp) || !stage2_pud_present(kvm, *pudp))
1177                 return false;
1178
1179         if (stage2_pud_huge(kvm, *pudp)) {
1180                 *pudpp = pudp;
1181                 return true;
1182         }
1183
1184         pmdp = stage2_pmd_offset(kvm, pudp, addr);
1185         if (!pmdp || pmd_none(*pmdp) || !pmd_present(*pmdp))
1186                 return false;
1187
1188         if (pmd_thp_or_huge(*pmdp)) {
1189                 *pmdpp = pmdp;
1190                 return true;
1191         }
1192
1193         ptep = pte_offset_kernel(pmdp, addr);
1194         if (!ptep || pte_none(*ptep) || !pte_present(*ptep))
1195                 return false;
1196
1197         *ptepp = ptep;
1198         return true;
1199 }
1200
1201 static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr)
1202 {
1203         pud_t *pudp;
1204         pmd_t *pmdp;
1205         pte_t *ptep;
1206         bool found;
1207
1208         found = stage2_get_leaf_entry(kvm, addr, &pudp, &pmdp, &ptep);
1209         if (!found)
1210                 return false;
1211
1212         if (pudp)
1213                 return kvm_s2pud_exec(pudp);
1214         else if (pmdp)
1215                 return kvm_s2pmd_exec(pmdp);
1216         else
1217                 return kvm_s2pte_exec(ptep);
1218 }
1219
1220 static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
1221                           phys_addr_t addr, const pte_t *new_pte,
1222                           unsigned long flags)
1223 {
1224         pud_t *pud;
1225         pmd_t *pmd;
1226         pte_t *pte, old_pte;
1227         bool iomap = flags & KVM_S2PTE_FLAG_IS_IOMAP;
1228         bool logging_active = flags & KVM_S2_FLAG_LOGGING_ACTIVE;
1229
1230         VM_BUG_ON(logging_active && !cache);
1231
1232         /* Create stage-2 page table mapping - Levels 0 and 1 */
1233         pud = stage2_get_pud(kvm, cache, addr);
1234         if (!pud) {
1235                 /*
1236                  * Ignore calls from kvm_set_spte_hva for unallocated
1237                  * address ranges.
1238                  */
1239                 return 0;
1240         }
1241
1242         /*
1243          * While dirty page logging - dissolve huge PUD, then continue
1244          * on to allocate page.
1245          */
1246         if (logging_active)
1247                 stage2_dissolve_pud(kvm, addr, pud);
1248
1249         if (stage2_pud_none(kvm, *pud)) {
1250                 if (!cache)
1251                         return 0; /* ignore calls from kvm_set_spte_hva */
1252                 pmd = mmu_memory_cache_alloc(cache);
1253                 stage2_pud_populate(kvm, pud, pmd);
1254                 get_page(virt_to_page(pud));
1255         }
1256
1257         pmd = stage2_pmd_offset(kvm, pud, addr);
1258         if (!pmd) {
1259                 /*
1260                  * Ignore calls from kvm_set_spte_hva for unallocated
1261                  * address ranges.
1262                  */
1263                 return 0;
1264         }
1265
1266         /*
1267          * While dirty page logging - dissolve huge PMD, then continue on to
1268          * allocate page.
1269          */
1270         if (logging_active)
1271                 stage2_dissolve_pmd(kvm, addr, pmd);
1272
1273         /* Create stage-2 page mappings - Level 2 */
1274         if (pmd_none(*pmd)) {
1275                 if (!cache)
1276                         return 0; /* ignore calls from kvm_set_spte_hva */
1277                 pte = mmu_memory_cache_alloc(cache);
1278                 kvm_pmd_populate(pmd, pte);
1279                 get_page(virt_to_page(pmd));
1280         }
1281
1282         pte = pte_offset_kernel(pmd, addr);
1283
1284         if (iomap && pte_present(*pte))
1285                 return -EFAULT;
1286
1287         /* Create 2nd stage page table mapping - Level 3 */
1288         old_pte = *pte;
1289         if (pte_present(old_pte)) {
1290                 /* Skip page table update if there is no change */
1291                 if (pte_val(old_pte) == pte_val(*new_pte))
1292                         return 0;
1293
1294                 kvm_set_pte(pte, __pte(0));
1295                 kvm_tlb_flush_vmid_ipa(kvm, addr);
1296         } else {
1297                 get_page(virt_to_page(pte));
1298         }
1299
1300         kvm_set_pte(pte, *new_pte);
1301         return 0;
1302 }
1303
1304 #ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
1305 static int stage2_ptep_test_and_clear_young(pte_t *pte)
1306 {
1307         if (pte_young(*pte)) {
1308                 *pte = pte_mkold(*pte);
1309                 return 1;
1310         }
1311         return 0;
1312 }
1313 #else
1314 static int stage2_ptep_test_and_clear_young(pte_t *pte)
1315 {
1316         return __ptep_test_and_clear_young(pte);
1317 }
1318 #endif
1319
1320 static int stage2_pmdp_test_and_clear_young(pmd_t *pmd)
1321 {
1322         return stage2_ptep_test_and_clear_young((pte_t *)pmd);
1323 }
1324
1325 static int stage2_pudp_test_and_clear_young(pud_t *pud)
1326 {
1327         return stage2_ptep_test_and_clear_young((pte_t *)pud);
1328 }
1329
1330 /**
1331  * kvm_phys_addr_ioremap - map a device range to guest IPA
1332  *
1333  * @kvm:        The KVM pointer
1334  * @guest_ipa:  The IPA at which to insert the mapping
1335  * @pa:         The physical address of the device
1336  * @size:       The size of the mapping
1337  */
1338 int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
1339                           phys_addr_t pa, unsigned long size, bool writable)
1340 {
1341         phys_addr_t addr, end;
1342         int ret = 0;
1343         unsigned long pfn;
1344         struct kvm_mmu_memory_cache cache = { 0, };
1345
1346         end = (guest_ipa + size + PAGE_SIZE - 1) & PAGE_MASK;
1347         pfn = __phys_to_pfn(pa);
1348
1349         for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) {
1350                 pte_t pte = kvm_pfn_pte(pfn, PAGE_S2_DEVICE);
1351
1352                 if (writable)
1353                         pte = kvm_s2pte_mkwrite(pte);
1354
1355                 ret = mmu_topup_memory_cache(&cache,
1356                                              kvm_mmu_cache_min_pages(kvm),
1357                                              KVM_NR_MEM_OBJS);
1358                 if (ret)
1359                         goto out;
1360                 spin_lock(&kvm->mmu_lock);
1361                 ret = stage2_set_pte(kvm, &cache, addr, &pte,
1362                                                 KVM_S2PTE_FLAG_IS_IOMAP);
1363                 spin_unlock(&kvm->mmu_lock);
1364                 if (ret)
1365                         goto out;
1366
1367                 pfn++;
1368         }
1369
1370 out:
1371         mmu_free_memory_cache(&cache);
1372         return ret;
1373 }
1374
1375 static bool transparent_hugepage_adjust(kvm_pfn_t *pfnp, phys_addr_t *ipap)
1376 {
1377         kvm_pfn_t pfn = *pfnp;
1378         gfn_t gfn = *ipap >> PAGE_SHIFT;
1379
1380         if (kvm_is_transparent_hugepage(pfn)) {
1381                 unsigned long mask;
1382                 /*
1383                  * The address we faulted on is backed by a transparent huge
1384                  * page.  However, because we map the compound huge page and
1385                  * not the individual tail page, we need to transfer the
1386                  * refcount to the head page.  We have to be careful that the
1387                  * THP doesn't start to split while we are adjusting the
1388                  * refcounts.
1389                  *
1390                  * We are sure this doesn't happen, because mmu_notifier_retry
1391                  * was successful and we are holding the mmu_lock, so if this
1392                  * THP is trying to split, it will be blocked in the mmu
1393                  * notifier before touching any of the pages, specifically
1394                  * before being able to call __split_huge_page_refcount().
1395                  *
1396                  * We can therefore safely transfer the refcount from PG_tail
1397                  * to PG_head and switch the pfn from a tail page to the head
1398                  * page accordingly.
1399                  */
1400                 mask = PTRS_PER_PMD - 1;
1401                 VM_BUG_ON((gfn & mask) != (pfn & mask));
1402                 if (pfn & mask) {
1403                         *ipap &= PMD_MASK;
1404                         kvm_release_pfn_clean(pfn);
1405                         pfn &= ~mask;
1406                         kvm_get_pfn(pfn);
1407                         *pfnp = pfn;
1408                 }
1409
1410                 return true;
1411         }
1412
1413         return false;
1414 }
1415
1416 /**
1417  * stage2_wp_ptes - write protect PMD range
1418  * @pmd:        pointer to pmd entry
1419  * @addr:       range start address
1420  * @end:        range end address
1421  */
1422 static void stage2_wp_ptes(pmd_t *pmd, phys_addr_t addr, phys_addr_t end)
1423 {
1424         pte_t *pte;
1425
1426         pte = pte_offset_kernel(pmd, addr);
1427         do {
1428                 if (!pte_none(*pte)) {
1429                         if (!kvm_s2pte_readonly(pte))
1430                                 kvm_set_s2pte_readonly(pte);
1431                 }
1432         } while (pte++, addr += PAGE_SIZE, addr != end);
1433 }
1434
1435 /**
1436  * stage2_wp_pmds - write protect PUD range
1437  * kvm:         kvm instance for the VM
1438  * @pud:        pointer to pud entry
1439  * @addr:       range start address
1440  * @end:        range end address
1441  */
1442 static void stage2_wp_pmds(struct kvm *kvm, pud_t *pud,
1443                            phys_addr_t addr, phys_addr_t end)
1444 {
1445         pmd_t *pmd;
1446         phys_addr_t next;
1447
1448         pmd = stage2_pmd_offset(kvm, pud, addr);
1449
1450         do {
1451                 next = stage2_pmd_addr_end(kvm, addr, end);
1452                 if (!pmd_none(*pmd)) {
1453                         if (pmd_thp_or_huge(*pmd)) {
1454                                 if (!kvm_s2pmd_readonly(pmd))
1455                                         kvm_set_s2pmd_readonly(pmd);
1456                         } else {
1457                                 stage2_wp_ptes(pmd, addr, next);
1458                         }
1459                 }
1460         } while (pmd++, addr = next, addr != end);
1461 }
1462
1463 /**
1464  * stage2_wp_puds - write protect PGD range
1465  * @pgd:        pointer to pgd entry
1466  * @addr:       range start address
1467  * @end:        range end address
1468  */
1469 static void  stage2_wp_puds(struct kvm *kvm, pgd_t *pgd,
1470                             phys_addr_t addr, phys_addr_t end)
1471 {
1472         pud_t *pud;
1473         phys_addr_t next;
1474
1475         pud = stage2_pud_offset(kvm, pgd, addr);
1476         do {
1477                 next = stage2_pud_addr_end(kvm, addr, end);
1478                 if (!stage2_pud_none(kvm, *pud)) {
1479                         if (stage2_pud_huge(kvm, *pud)) {
1480                                 if (!kvm_s2pud_readonly(pud))
1481                                         kvm_set_s2pud_readonly(pud);
1482                         } else {
1483                                 stage2_wp_pmds(kvm, pud, addr, next);
1484                         }
1485                 }
1486         } while (pud++, addr = next, addr != end);
1487 }
1488
1489 /**
1490  * stage2_wp_range() - write protect stage2 memory region range
1491  * @kvm:        The KVM pointer
1492  * @addr:       Start address of range
1493  * @end:        End address of range
1494  */
1495 static void stage2_wp_range(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
1496 {
1497         pgd_t *pgd;
1498         phys_addr_t next;
1499
1500         pgd = kvm->arch.pgd + stage2_pgd_index(kvm, addr);
1501         do {
1502                 /*
1503                  * Release kvm_mmu_lock periodically if the memory region is
1504                  * large. Otherwise, we may see kernel panics with
1505                  * CONFIG_DETECT_HUNG_TASK, CONFIG_LOCKUP_DETECTOR,
1506                  * CONFIG_LOCKDEP. Additionally, holding the lock too long
1507                  * will also starve other vCPUs. We have to also make sure
1508                  * that the page tables are not freed while we released
1509                  * the lock.
1510                  */
1511                 cond_resched_lock(&kvm->mmu_lock);
1512                 if (!READ_ONCE(kvm->arch.pgd))
1513                         break;
1514                 next = stage2_pgd_addr_end(kvm, addr, end);
1515                 if (stage2_pgd_present(kvm, *pgd))
1516                         stage2_wp_puds(kvm, pgd, addr, next);
1517         } while (pgd++, addr = next, addr != end);
1518 }
1519
1520 /**
1521  * kvm_mmu_wp_memory_region() - write protect stage 2 entries for memory slot
1522  * @kvm:        The KVM pointer
1523  * @slot:       The memory slot to write protect
1524  *
1525  * Called to start logging dirty pages after memory region
1526  * KVM_MEM_LOG_DIRTY_PAGES operation is called. After this function returns
1527  * all present PUD, PMD and PTEs are write protected in the memory region.
1528  * Afterwards read of dirty page log can be called.
1529  *
1530  * Acquires kvm_mmu_lock. Called with kvm->slots_lock mutex acquired,
1531  * serializing operations for VM memory regions.
1532  */
1533 void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot)
1534 {
1535         struct kvm_memslots *slots = kvm_memslots(kvm);
1536         struct kvm_memory_slot *memslot = id_to_memslot(slots, slot);
1537         phys_addr_t start = memslot->base_gfn << PAGE_SHIFT;
1538         phys_addr_t end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT;
1539
1540         spin_lock(&kvm->mmu_lock);
1541         stage2_wp_range(kvm, start, end);
1542         spin_unlock(&kvm->mmu_lock);
1543         kvm_flush_remote_tlbs(kvm);
1544 }
1545
1546 /**
1547  * kvm_mmu_write_protect_pt_masked() - write protect dirty pages
1548  * @kvm:        The KVM pointer
1549  * @slot:       The memory slot associated with mask
1550  * @gfn_offset: The gfn offset in memory slot
1551  * @mask:       The mask of dirty pages at offset 'gfn_offset' in this memory
1552  *              slot to be write protected
1553  *
1554  * Walks bits set in mask write protects the associated pte's. Caller must
1555  * acquire kvm_mmu_lock.
1556  */
1557 static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
1558                 struct kvm_memory_slot *slot,
1559                 gfn_t gfn_offset, unsigned long mask)
1560 {
1561         phys_addr_t base_gfn = slot->base_gfn + gfn_offset;
1562         phys_addr_t start = (base_gfn +  __ffs(mask)) << PAGE_SHIFT;
1563         phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT;
1564
1565         stage2_wp_range(kvm, start, end);
1566 }
1567
1568 /*
1569  * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected
1570  * dirty pages.
1571  *
1572  * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to
1573  * enable dirty logging for them.
1574  */
1575 void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
1576                 struct kvm_memory_slot *slot,
1577                 gfn_t gfn_offset, unsigned long mask)
1578 {
1579         kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
1580 }
1581
1582 static void clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size)
1583 {
1584         __clean_dcache_guest_page(pfn, size);
1585 }
1586
1587 static void invalidate_icache_guest_page(kvm_pfn_t pfn, unsigned long size)
1588 {
1589         __invalidate_icache_guest_page(pfn, size);
1590 }
1591
1592 static void kvm_send_hwpoison_signal(unsigned long address, short lsb)
1593 {
1594         send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb, current);
1595 }
1596
1597 static bool fault_supports_stage2_huge_mapping(struct kvm_memory_slot *memslot,
1598                                                unsigned long hva,
1599                                                unsigned long map_size)
1600 {
1601         gpa_t gpa_start;
1602         hva_t uaddr_start, uaddr_end;
1603         size_t size;
1604
1605         size = memslot->npages * PAGE_SIZE;
1606
1607         gpa_start = memslot->base_gfn << PAGE_SHIFT;
1608
1609         uaddr_start = memslot->userspace_addr;
1610         uaddr_end = uaddr_start + size;
1611
1612         /*
1613          * Pages belonging to memslots that don't have the same alignment
1614          * within a PMD/PUD for userspace and IPA cannot be mapped with stage-2
1615          * PMD/PUD entries, because we'll end up mapping the wrong pages.
1616          *
1617          * Consider a layout like the following:
1618          *
1619          *    memslot->userspace_addr:
1620          *    +-----+--------------------+--------------------+---+
1621          *    |abcde|fgh  Stage-1 block  |    Stage-1 block tv|xyz|
1622          *    +-----+--------------------+--------------------+---+
1623          *
1624          *    memslot->base_gfn << PAGE_SIZE:
1625          *      +---+--------------------+--------------------+-----+
1626          *      |abc|def  Stage-2 block  |    Stage-2 block   |tvxyz|
1627          *      +---+--------------------+--------------------+-----+
1628          *
1629          * If we create those stage-2 blocks, we'll end up with this incorrect
1630          * mapping:
1631          *   d -> f
1632          *   e -> g
1633          *   f -> h
1634          */
1635         if ((gpa_start & (map_size - 1)) != (uaddr_start & (map_size - 1)))
1636                 return false;
1637
1638         /*
1639          * Next, let's make sure we're not trying to map anything not covered
1640          * by the memslot. This means we have to prohibit block size mappings
1641          * for the beginning and end of a non-block aligned and non-block sized
1642          * memory slot (illustrated by the head and tail parts of the
1643          * userspace view above containing pages 'abcde' and 'xyz',
1644          * respectively).
1645          *
1646          * Note that it doesn't matter if we do the check using the
1647          * userspace_addr or the base_gfn, as both are equally aligned (per
1648          * the check above) and equally sized.
1649          */
1650         return (hva & ~(map_size - 1)) >= uaddr_start &&
1651                (hva & ~(map_size - 1)) + map_size <= uaddr_end;
1652 }
1653
1654 static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
1655                           struct kvm_memory_slot *memslot, unsigned long hva,
1656                           unsigned long fault_status)
1657 {
1658         int ret;
1659         bool write_fault, writable, force_pte = false;
1660         bool exec_fault, needs_exec;
1661         unsigned long mmu_seq;
1662         gfn_t gfn = fault_ipa >> PAGE_SHIFT;
1663         struct kvm *kvm = vcpu->kvm;
1664         struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
1665         struct vm_area_struct *vma;
1666         short vma_shift;
1667         kvm_pfn_t pfn;
1668         pgprot_t mem_type = PAGE_S2;
1669         bool logging_active = memslot_is_logging(memslot);
1670         unsigned long vma_pagesize, flags = 0;
1671
1672         write_fault = kvm_is_write_fault(vcpu);
1673         exec_fault = kvm_vcpu_trap_is_iabt(vcpu);
1674         VM_BUG_ON(write_fault && exec_fault);
1675
1676         if (fault_status == FSC_PERM && !write_fault && !exec_fault) {
1677                 kvm_err("Unexpected L2 read permission error\n");
1678                 return -EFAULT;
1679         }
1680
1681         /* Let's check if we will get back a huge page backed by hugetlbfs */
1682         down_read(&current->mm->mmap_sem);
1683         vma = find_vma_intersection(current->mm, hva, hva + 1);
1684         if (unlikely(!vma)) {
1685                 kvm_err("Failed to find VMA for hva 0x%lx\n", hva);
1686                 up_read(&current->mm->mmap_sem);
1687                 return -EFAULT;
1688         }
1689
1690         if (is_vm_hugetlb_page(vma))
1691                 vma_shift = huge_page_shift(hstate_vma(vma));
1692         else
1693                 vma_shift = PAGE_SHIFT;
1694
1695         vma_pagesize = 1ULL << vma_shift;
1696         if (logging_active ||
1697             (vma->vm_flags & VM_PFNMAP) ||
1698             !fault_supports_stage2_huge_mapping(memslot, hva, vma_pagesize)) {
1699                 force_pte = true;
1700                 vma_pagesize = PAGE_SIZE;
1701         }
1702
1703         /*
1704          * The stage2 has a minimum of 2 level table (For arm64 see
1705          * kvm_arm_setup_stage2()). Hence, we are guaranteed that we can
1706          * use PMD_SIZE huge mappings (even when the PMD is folded into PGD).
1707          * As for PUD huge maps, we must make sure that we have at least
1708          * 3 levels, i.e, PMD is not folded.
1709          */
1710         if (vma_pagesize == PMD_SIZE ||
1711             (vma_pagesize == PUD_SIZE && kvm_stage2_has_pmd(kvm)))
1712                 gfn = (fault_ipa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT;
1713         up_read(&current->mm->mmap_sem);
1714
1715         /* We need minimum second+third level pages */
1716         ret = mmu_topup_memory_cache(memcache, kvm_mmu_cache_min_pages(kvm),
1717                                      KVM_NR_MEM_OBJS);
1718         if (ret)
1719                 return ret;
1720
1721         mmu_seq = vcpu->kvm->mmu_notifier_seq;
1722         /*
1723          * Ensure the read of mmu_notifier_seq happens before we call
1724          * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk
1725          * the page we just got a reference to gets unmapped before we have a
1726          * chance to grab the mmu_lock, which ensure that if the page gets
1727          * unmapped afterwards, the call to kvm_unmap_hva will take it away
1728          * from us again properly. This smp_rmb() interacts with the smp_wmb()
1729          * in kvm_mmu_notifier_invalidate_<page|range_end>.
1730          */
1731         smp_rmb();
1732
1733         pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable);
1734         if (pfn == KVM_PFN_ERR_HWPOISON) {
1735                 kvm_send_hwpoison_signal(hva, vma_shift);
1736                 return 0;
1737         }
1738         if (is_error_noslot_pfn(pfn))
1739                 return -EFAULT;
1740
1741         if (kvm_is_device_pfn(pfn)) {
1742                 mem_type = PAGE_S2_DEVICE;
1743                 flags |= KVM_S2PTE_FLAG_IS_IOMAP;
1744         } else if (logging_active) {
1745                 /*
1746                  * Faults on pages in a memslot with logging enabled
1747                  * should not be mapped with huge pages (it introduces churn
1748                  * and performance degradation), so force a pte mapping.
1749                  */
1750                 flags |= KVM_S2_FLAG_LOGGING_ACTIVE;
1751
1752                 /*
1753                  * Only actually map the page as writable if this was a write
1754                  * fault.
1755                  */
1756                 if (!write_fault)
1757                         writable = false;
1758         }
1759
1760         if (exec_fault && is_iomap(flags))
1761                 return -ENOEXEC;
1762
1763         spin_lock(&kvm->mmu_lock);
1764         if (mmu_notifier_retry(kvm, mmu_seq))
1765                 goto out_unlock;
1766
1767         if (vma_pagesize == PAGE_SIZE && !force_pte) {
1768                 /*
1769                  * Only PMD_SIZE transparent hugepages(THP) are
1770                  * currently supported. This code will need to be
1771                  * updated to support other THP sizes.
1772                  *
1773                  * Make sure the host VA and the guest IPA are sufficiently
1774                  * aligned and that the block is contained within the memslot.
1775                  */
1776                 if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE) &&
1777                     transparent_hugepage_adjust(&pfn, &fault_ipa))
1778                         vma_pagesize = PMD_SIZE;
1779         }
1780
1781         if (writable)
1782                 kvm_set_pfn_dirty(pfn);
1783
1784         if (fault_status != FSC_PERM && !is_iomap(flags))
1785                 clean_dcache_guest_page(pfn, vma_pagesize);
1786
1787         if (exec_fault)
1788                 invalidate_icache_guest_page(pfn, vma_pagesize);
1789
1790         /*
1791          * If we took an execution fault we have made the
1792          * icache/dcache coherent above and should now let the s2
1793          * mapping be executable.
1794          *
1795          * Write faults (!exec_fault && FSC_PERM) are orthogonal to
1796          * execute permissions, and we preserve whatever we have.
1797          */
1798         needs_exec = exec_fault ||
1799                 (fault_status == FSC_PERM && stage2_is_exec(kvm, fault_ipa));
1800
1801         if (vma_pagesize == PUD_SIZE) {
1802                 pud_t new_pud = kvm_pfn_pud(pfn, mem_type);
1803
1804                 new_pud = kvm_pud_mkhuge(new_pud);
1805                 if (writable)
1806                         new_pud = kvm_s2pud_mkwrite(new_pud);
1807
1808                 if (needs_exec)
1809                         new_pud = kvm_s2pud_mkexec(new_pud);
1810
1811                 ret = stage2_set_pud_huge(kvm, memcache, fault_ipa, &new_pud);
1812         } else if (vma_pagesize == PMD_SIZE) {
1813                 pmd_t new_pmd = kvm_pfn_pmd(pfn, mem_type);
1814
1815                 new_pmd = kvm_pmd_mkhuge(new_pmd);
1816
1817                 if (writable)
1818                         new_pmd = kvm_s2pmd_mkwrite(new_pmd);
1819
1820                 if (needs_exec)
1821                         new_pmd = kvm_s2pmd_mkexec(new_pmd);
1822
1823                 ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
1824         } else {
1825                 pte_t new_pte = kvm_pfn_pte(pfn, mem_type);
1826
1827                 if (writable) {
1828                         new_pte = kvm_s2pte_mkwrite(new_pte);
1829                         mark_page_dirty(kvm, gfn);
1830                 }
1831
1832                 if (needs_exec)
1833                         new_pte = kvm_s2pte_mkexec(new_pte);
1834
1835                 ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, flags);
1836         }
1837
1838 out_unlock:
1839         spin_unlock(&kvm->mmu_lock);
1840         kvm_set_pfn_accessed(pfn);
1841         kvm_release_pfn_clean(pfn);
1842         return ret;
1843 }
1844
1845 /*
1846  * Resolve the access fault by making the page young again.
1847  * Note that because the faulting entry is guaranteed not to be
1848  * cached in the TLB, we don't need to invalidate anything.
1849  * Only the HW Access Flag updates are supported for Stage 2 (no DBM),
1850  * so there is no need for atomic (pte|pmd)_mkyoung operations.
1851  */
1852 static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
1853 {
1854         pud_t *pud;
1855         pmd_t *pmd;
1856         pte_t *pte;
1857         kvm_pfn_t pfn;
1858         bool pfn_valid = false;
1859
1860         trace_kvm_access_fault(fault_ipa);
1861
1862         spin_lock(&vcpu->kvm->mmu_lock);
1863
1864         if (!stage2_get_leaf_entry(vcpu->kvm, fault_ipa, &pud, &pmd, &pte))
1865                 goto out;
1866
1867         if (pud) {              /* HugeTLB */
1868                 *pud = kvm_s2pud_mkyoung(*pud);
1869                 pfn = kvm_pud_pfn(*pud);
1870                 pfn_valid = true;
1871         } else  if (pmd) {      /* THP, HugeTLB */
1872                 *pmd = pmd_mkyoung(*pmd);
1873                 pfn = pmd_pfn(*pmd);
1874                 pfn_valid = true;
1875         } else {
1876                 *pte = pte_mkyoung(*pte);       /* Just a page... */
1877                 pfn = pte_pfn(*pte);
1878                 pfn_valid = true;
1879         }
1880
1881 out:
1882         spin_unlock(&vcpu->kvm->mmu_lock);
1883         if (pfn_valid)
1884                 kvm_set_pfn_accessed(pfn);
1885 }
1886
1887 /**
1888  * kvm_handle_guest_abort - handles all 2nd stage aborts
1889  * @vcpu:       the VCPU pointer
1890  * @run:        the kvm_run structure
1891  *
1892  * Any abort that gets to the host is almost guaranteed to be caused by a
1893  * missing second stage translation table entry, which can mean that either the
1894  * guest simply needs more memory and we must allocate an appropriate page or it
1895  * can mean that the guest tried to access I/O memory, which is emulated by user
1896  * space. The distinction is based on the IPA causing the fault and whether this
1897  * memory region has been registered as standard RAM by user space.
1898  */
1899 int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
1900 {
1901         unsigned long fault_status;
1902         phys_addr_t fault_ipa;
1903         struct kvm_memory_slot *memslot;
1904         unsigned long hva;
1905         bool is_iabt, write_fault, writable;
1906         gfn_t gfn;
1907         int ret, idx;
1908
1909         fault_status = kvm_vcpu_trap_get_fault_type(vcpu);
1910
1911         fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
1912         is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
1913
1914         /* Synchronous External Abort? */
1915         if (kvm_vcpu_dabt_isextabt(vcpu)) {
1916                 /*
1917                  * For RAS the host kernel may handle this abort.
1918                  * There is no need to pass the error into the guest.
1919                  */
1920                 if (!kvm_handle_guest_sea(fault_ipa, kvm_vcpu_get_hsr(vcpu)))
1921                         return 1;
1922
1923                 if (unlikely(!is_iabt)) {
1924                         kvm_inject_vabt(vcpu);
1925                         return 1;
1926                 }
1927         }
1928
1929         trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_hsr(vcpu),
1930                               kvm_vcpu_get_hfar(vcpu), fault_ipa);
1931
1932         /* Check the stage-2 fault is trans. fault or write fault */
1933         if (fault_status != FSC_FAULT && fault_status != FSC_PERM &&
1934             fault_status != FSC_ACCESS) {
1935                 kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n",
1936                         kvm_vcpu_trap_get_class(vcpu),
1937                         (unsigned long)kvm_vcpu_trap_get_fault(vcpu),
1938                         (unsigned long)kvm_vcpu_get_hsr(vcpu));
1939                 return -EFAULT;
1940         }
1941
1942         idx = srcu_read_lock(&vcpu->kvm->srcu);
1943
1944         gfn = fault_ipa >> PAGE_SHIFT;
1945         memslot = gfn_to_memslot(vcpu->kvm, gfn);
1946         hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable);
1947         write_fault = kvm_is_write_fault(vcpu);
1948         if (kvm_is_error_hva(hva) || (write_fault && !writable)) {
1949                 if (is_iabt) {
1950                         /* Prefetch Abort on I/O address */
1951                         ret = -ENOEXEC;
1952                         goto out;
1953                 }
1954
1955                 /*
1956                  * Check for a cache maintenance operation. Since we
1957                  * ended-up here, we know it is outside of any memory
1958                  * slot. But we can't find out if that is for a device,
1959                  * or if the guest is just being stupid. The only thing
1960                  * we know for sure is that this range cannot be cached.
1961                  *
1962                  * So let's assume that the guest is just being
1963                  * cautious, and skip the instruction.
1964                  */
1965                 if (kvm_vcpu_dabt_is_cm(vcpu)) {
1966                         kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
1967                         ret = 1;
1968                         goto out_unlock;
1969                 }
1970
1971                 /*
1972                  * The IPA is reported as [MAX:12], so we need to
1973                  * complement it with the bottom 12 bits from the
1974                  * faulting VA. This is always 12 bits, irrespective
1975                  * of the page size.
1976                  */
1977                 fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1);
1978                 ret = io_mem_abort(vcpu, run, fault_ipa);
1979                 goto out_unlock;
1980         }
1981
1982         /* Userspace should not be able to register out-of-bounds IPAs */
1983         VM_BUG_ON(fault_ipa >= kvm_phys_size(vcpu->kvm));
1984
1985         if (fault_status == FSC_ACCESS) {
1986                 handle_access_fault(vcpu, fault_ipa);
1987                 ret = 1;
1988                 goto out_unlock;
1989         }
1990
1991         ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status);
1992         if (ret == 0)
1993                 ret = 1;
1994 out:
1995         if (ret == -ENOEXEC) {
1996                 kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
1997                 ret = 1;
1998         }
1999 out_unlock:
2000         srcu_read_unlock(&vcpu->kvm->srcu, idx);
2001         return ret;
2002 }
2003
2004 static int handle_hva_to_gpa(struct kvm *kvm,
2005                              unsigned long start,
2006                              unsigned long end,
2007                              int (*handler)(struct kvm *kvm,
2008                                             gpa_t gpa, u64 size,
2009                                             void *data),
2010                              void *data)
2011 {
2012         struct kvm_memslots *slots;
2013         struct kvm_memory_slot *memslot;
2014         int ret = 0;
2015
2016         slots = kvm_memslots(kvm);
2017
2018         /* we only care about the pages that the guest sees */
2019         kvm_for_each_memslot(memslot, slots) {
2020                 unsigned long hva_start, hva_end;
2021                 gfn_t gpa;
2022
2023                 hva_start = max(start, memslot->userspace_addr);
2024                 hva_end = min(end, memslot->userspace_addr +
2025                                         (memslot->npages << PAGE_SHIFT));
2026                 if (hva_start >= hva_end)
2027                         continue;
2028
2029                 gpa = hva_to_gfn_memslot(hva_start, memslot) << PAGE_SHIFT;
2030                 ret |= handler(kvm, gpa, (u64)(hva_end - hva_start), data);
2031         }
2032
2033         return ret;
2034 }
2035
2036 static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
2037 {
2038         unmap_stage2_range(kvm, gpa, size);
2039         return 0;
2040 }
2041
2042 int kvm_unmap_hva_range(struct kvm *kvm,
2043                         unsigned long start, unsigned long end)
2044 {
2045         if (!kvm->arch.pgd)
2046                 return 0;
2047
2048         trace_kvm_unmap_hva_range(start, end);
2049         handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
2050         return 0;
2051 }
2052
2053 static int kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
2054 {
2055         pte_t *pte = (pte_t *)data;
2056
2057         WARN_ON(size != PAGE_SIZE);
2058         /*
2059          * We can always call stage2_set_pte with KVM_S2PTE_FLAG_LOGGING_ACTIVE
2060          * flag clear because MMU notifiers will have unmapped a huge PMD before
2061          * calling ->change_pte() (which in turn calls kvm_set_spte_hva()) and
2062          * therefore stage2_set_pte() never needs to clear out a huge PMD
2063          * through this calling path.
2064          */
2065         stage2_set_pte(kvm, NULL, gpa, pte, 0);
2066         return 0;
2067 }
2068
2069
2070 int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
2071 {
2072         unsigned long end = hva + PAGE_SIZE;
2073         kvm_pfn_t pfn = pte_pfn(pte);
2074         pte_t stage2_pte;
2075
2076         if (!kvm->arch.pgd)
2077                 return 0;
2078
2079         trace_kvm_set_spte_hva(hva);
2080
2081         /*
2082          * We've moved a page around, probably through CoW, so let's treat it
2083          * just like a translation fault and clean the cache to the PoC.
2084          */
2085         clean_dcache_guest_page(pfn, PAGE_SIZE);
2086         stage2_pte = kvm_pfn_pte(pfn, PAGE_S2);
2087         handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte);
2088
2089         return 0;
2090 }
2091
2092 static int kvm_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
2093 {
2094         pud_t *pud;
2095         pmd_t *pmd;
2096         pte_t *pte;
2097
2098         WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE);
2099         if (!stage2_get_leaf_entry(kvm, gpa, &pud, &pmd, &pte))
2100                 return 0;
2101
2102         if (pud)
2103                 return stage2_pudp_test_and_clear_young(pud);
2104         else if (pmd)
2105                 return stage2_pmdp_test_and_clear_young(pmd);
2106         else
2107                 return stage2_ptep_test_and_clear_young(pte);
2108 }
2109
2110 static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
2111 {
2112         pud_t *pud;
2113         pmd_t *pmd;
2114         pte_t *pte;
2115
2116         WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE);
2117         if (!stage2_get_leaf_entry(kvm, gpa, &pud, &pmd, &pte))
2118                 return 0;
2119
2120         if (pud)
2121                 return kvm_s2pud_young(*pud);
2122         else if (pmd)
2123                 return pmd_young(*pmd);
2124         else
2125                 return pte_young(*pte);
2126 }
2127
2128 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
2129 {
2130         if (!kvm->arch.pgd)
2131                 return 0;
2132         trace_kvm_age_hva(start, end);
2133         return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL);
2134 }
2135
2136 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
2137 {
2138         if (!kvm->arch.pgd)
2139                 return 0;
2140         trace_kvm_test_age_hva(hva);
2141         return handle_hva_to_gpa(kvm, hva, hva + PAGE_SIZE,
2142                                  kvm_test_age_hva_handler, NULL);
2143 }
2144
2145 void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
2146 {
2147         mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
2148 }
2149
2150 phys_addr_t kvm_mmu_get_httbr(void)
2151 {
2152         if (__kvm_cpu_uses_extended_idmap())
2153                 return virt_to_phys(merged_hyp_pgd);
2154         else
2155                 return virt_to_phys(hyp_pgd);
2156 }
2157
2158 phys_addr_t kvm_get_idmap_vector(void)
2159 {
2160         return hyp_idmap_vector;
2161 }
2162
2163 static int kvm_map_idmap_text(pgd_t *pgd)
2164 {
2165         int err;
2166
2167         /* Create the idmap in the boot page tables */
2168         err =   __create_hyp_mappings(pgd, __kvm_idmap_ptrs_per_pgd(),
2169                                       hyp_idmap_start, hyp_idmap_end,
2170                                       __phys_to_pfn(hyp_idmap_start),
2171                                       PAGE_HYP_EXEC);
2172         if (err)
2173                 kvm_err("Failed to idmap %lx-%lx\n",
2174                         hyp_idmap_start, hyp_idmap_end);
2175
2176         return err;
2177 }
2178
2179 int kvm_mmu_init(void)
2180 {
2181         int err;
2182
2183         hyp_idmap_start = kvm_virt_to_phys(__hyp_idmap_text_start);
2184         hyp_idmap_start = ALIGN_DOWN(hyp_idmap_start, PAGE_SIZE);
2185         hyp_idmap_end = kvm_virt_to_phys(__hyp_idmap_text_end);
2186         hyp_idmap_end = ALIGN(hyp_idmap_end, PAGE_SIZE);
2187         hyp_idmap_vector = kvm_virt_to_phys(__kvm_hyp_init);
2188
2189         /*
2190          * We rely on the linker script to ensure at build time that the HYP
2191          * init code does not cross a page boundary.
2192          */
2193         BUG_ON((hyp_idmap_start ^ (hyp_idmap_end - 1)) & PAGE_MASK);
2194
2195         kvm_debug("IDMAP page: %lx\n", hyp_idmap_start);
2196         kvm_debug("HYP VA range: %lx:%lx\n",
2197                   kern_hyp_va(PAGE_OFFSET),
2198                   kern_hyp_va((unsigned long)high_memory - 1));
2199
2200         if (hyp_idmap_start >= kern_hyp_va(PAGE_OFFSET) &&
2201             hyp_idmap_start <  kern_hyp_va((unsigned long)high_memory - 1) &&
2202             hyp_idmap_start != (unsigned long)__hyp_idmap_text_start) {
2203                 /*
2204                  * The idmap page is intersecting with the VA space,
2205                  * it is not safe to continue further.
2206                  */
2207                 kvm_err("IDMAP intersecting with HYP VA, unable to continue\n");
2208                 err = -EINVAL;
2209                 goto out;
2210         }
2211
2212         hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order);
2213         if (!hyp_pgd) {
2214                 kvm_err("Hyp mode PGD not allocated\n");
2215                 err = -ENOMEM;
2216                 goto out;
2217         }
2218
2219         if (__kvm_cpu_uses_extended_idmap()) {
2220                 boot_hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
2221                                                          hyp_pgd_order);
2222                 if (!boot_hyp_pgd) {
2223                         kvm_err("Hyp boot PGD not allocated\n");
2224                         err = -ENOMEM;
2225                         goto out;
2226                 }
2227
2228                 err = kvm_map_idmap_text(boot_hyp_pgd);
2229                 if (err)
2230                         goto out;
2231
2232                 merged_hyp_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
2233                 if (!merged_hyp_pgd) {
2234                         kvm_err("Failed to allocate extra HYP pgd\n");
2235                         goto out;
2236                 }
2237                 __kvm_extend_hypmap(boot_hyp_pgd, hyp_pgd, merged_hyp_pgd,
2238                                     hyp_idmap_start);
2239         } else {
2240                 err = kvm_map_idmap_text(hyp_pgd);
2241                 if (err)
2242                         goto out;
2243         }
2244
2245         io_map_base = hyp_idmap_start;
2246         return 0;
2247 out:
2248         free_hyp_pgds();
2249         return err;
2250 }
2251
2252 void kvm_arch_commit_memory_region(struct kvm *kvm,
2253                                    const struct kvm_userspace_memory_region *mem,
2254                                    const struct kvm_memory_slot *old,
2255                                    const struct kvm_memory_slot *new,
2256                                    enum kvm_mr_change change)
2257 {
2258         /*
2259          * At this point memslot has been committed and there is an
2260          * allocated dirty_bitmap[], dirty pages will be be tracked while the
2261          * memory slot is write protected.
2262          */
2263         if (change != KVM_MR_DELETE && mem->flags & KVM_MEM_LOG_DIRTY_PAGES)
2264                 kvm_mmu_wp_memory_region(kvm, mem->slot);
2265 }
2266
2267 int kvm_arch_prepare_memory_region(struct kvm *kvm,
2268                                    struct kvm_memory_slot *memslot,
2269                                    const struct kvm_userspace_memory_region *mem,
2270                                    enum kvm_mr_change change)
2271 {
2272         hva_t hva = mem->userspace_addr;
2273         hva_t reg_end = hva + mem->memory_size;
2274         bool writable = !(mem->flags & KVM_MEM_READONLY);
2275         int ret = 0;
2276
2277         if (change != KVM_MR_CREATE && change != KVM_MR_MOVE &&
2278                         change != KVM_MR_FLAGS_ONLY)
2279                 return 0;
2280
2281         /*
2282          * Prevent userspace from creating a memory region outside of the IPA
2283          * space addressable by the KVM guest IPA space.
2284          */
2285         if (memslot->base_gfn + memslot->npages >=
2286             (kvm_phys_size(kvm) >> PAGE_SHIFT))
2287                 return -EFAULT;
2288
2289         down_read(&current->mm->mmap_sem);
2290         /*
2291          * A memory region could potentially cover multiple VMAs, and any holes
2292          * between them, so iterate over all of them to find out if we can map
2293          * any of them right now.
2294          *
2295          *     +--------------------------------------------+
2296          * +---------------+----------------+   +----------------+
2297          * |   : VMA 1     |      VMA 2     |   |    VMA 3  :    |
2298          * +---------------+----------------+   +----------------+
2299          *     |               memory region                |
2300          *     +--------------------------------------------+
2301          */
2302         do {
2303                 struct vm_area_struct *vma = find_vma(current->mm, hva);
2304                 hva_t vm_start, vm_end;
2305
2306                 if (!vma || vma->vm_start >= reg_end)
2307                         break;
2308
2309                 /*
2310                  * Take the intersection of this VMA with the memory region
2311                  */
2312                 vm_start = max(hva, vma->vm_start);
2313                 vm_end = min(reg_end, vma->vm_end);
2314
2315                 if (vma->vm_flags & VM_PFNMAP) {
2316                         gpa_t gpa = mem->guest_phys_addr +
2317                                     (vm_start - mem->userspace_addr);
2318                         phys_addr_t pa;
2319
2320                         pa = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
2321                         pa += vm_start - vma->vm_start;
2322
2323                         /* IO region dirty page logging not allowed */
2324                         if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) {
2325                                 ret = -EINVAL;
2326                                 goto out;
2327                         }
2328
2329                         ret = kvm_phys_addr_ioremap(kvm, gpa, pa,
2330                                                     vm_end - vm_start,
2331                                                     writable);
2332                         if (ret)
2333                                 break;
2334                 }
2335                 hva = vm_end;
2336         } while (hva < reg_end);
2337
2338         if (change == KVM_MR_FLAGS_ONLY)
2339                 goto out;
2340
2341         spin_lock(&kvm->mmu_lock);
2342         if (ret)
2343                 unmap_stage2_range(kvm, mem->guest_phys_addr, mem->memory_size);
2344         else
2345                 stage2_flush_memslot(kvm, memslot);
2346         spin_unlock(&kvm->mmu_lock);
2347 out:
2348         up_read(&current->mm->mmap_sem);
2349         return ret;
2350 }
2351
2352 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
2353                            struct kvm_memory_slot *dont)
2354 {
2355 }
2356
2357 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
2358                             unsigned long npages)
2359 {
2360         return 0;
2361 }
2362
2363 void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
2364 {
2365 }
2366
2367 void kvm_arch_flush_shadow_all(struct kvm *kvm)
2368 {
2369         kvm_free_stage2_pgd(kvm);
2370 }
2371
2372 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
2373                                    struct kvm_memory_slot *slot)
2374 {
2375         gpa_t gpa = slot->base_gfn << PAGE_SHIFT;
2376         phys_addr_t size = slot->npages << PAGE_SHIFT;
2377
2378         spin_lock(&kvm->mmu_lock);
2379         unmap_stage2_range(kvm, gpa, size);
2380         spin_unlock(&kvm->mmu_lock);
2381 }
2382
2383 /*
2384  * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
2385  *
2386  * Main problems:
2387  * - S/W ops are local to a CPU (not broadcast)
2388  * - We have line migration behind our back (speculation)
2389  * - System caches don't support S/W at all (damn!)
2390  *
2391  * In the face of the above, the best we can do is to try and convert
2392  * S/W ops to VA ops. Because the guest is not allowed to infer the
2393  * S/W to PA mapping, it can only use S/W to nuke the whole cache,
2394  * which is a rather good thing for us.
2395  *
2396  * Also, it is only used when turning caches on/off ("The expected
2397  * usage of the cache maintenance instructions that operate by set/way
2398  * is associated with the cache maintenance instructions associated
2399  * with the powerdown and powerup of caches, if this is required by
2400  * the implementation.").
2401  *
2402  * We use the following policy:
2403  *
2404  * - If we trap a S/W operation, we enable VM trapping to detect
2405  *   caches being turned on/off, and do a full clean.
2406  *
2407  * - We flush the caches on both caches being turned on and off.
2408  *
2409  * - Once the caches are enabled, we stop trapping VM ops.
2410  */
2411 void kvm_set_way_flush(struct kvm_vcpu *vcpu)
2412 {
2413         unsigned long hcr = *vcpu_hcr(vcpu);
2414
2415         /*
2416          * If this is the first time we do a S/W operation
2417          * (i.e. HCR_TVM not set) flush the whole memory, and set the
2418          * VM trapping.
2419          *
2420          * Otherwise, rely on the VM trapping to wait for the MMU +
2421          * Caches to be turned off. At that point, we'll be able to
2422          * clean the caches again.
2423          */
2424         if (!(hcr & HCR_TVM)) {
2425                 trace_kvm_set_way_flush(*vcpu_pc(vcpu),
2426                                         vcpu_has_cache_enabled(vcpu));
2427                 stage2_flush_vm(vcpu->kvm);
2428                 *vcpu_hcr(vcpu) = hcr | HCR_TVM;
2429         }
2430 }
2431
2432 void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled)
2433 {
2434         bool now_enabled = vcpu_has_cache_enabled(vcpu);
2435
2436         /*
2437          * If switching the MMU+caches on, need to invalidate the caches.
2438          * If switching it off, need to clean the caches.
2439          * Clean + invalidate does the trick always.
2440          */
2441         if (now_enabled != was_enabled)
2442                 stage2_flush_vm(vcpu->kvm);
2443
2444         /* Caches are now on, stop trapping VM ops (until a S/W op) */
2445         if (now_enabled)
2446                 *vcpu_hcr(vcpu) &= ~HCR_TVM;
2447
2448         trace_kvm_toggle_cache(*vcpu_pc(vcpu), was_enabled, now_enabled);
2449 }