]> asedeno.scripts.mit.edu Git - linux.git/blob - arch/powerpc/kvm/book3s_64_vio.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
[linux.git] / arch / powerpc / kvm / book3s_64_vio.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *
4  * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
5  * Copyright 2011 David Gibson, IBM Corporation <dwg@au1.ibm.com>
6  * Copyright 2016 Alexey Kardashevskiy, IBM Corporation <aik@au1.ibm.com>
7  */
8
9 #include <linux/types.h>
10 #include <linux/string.h>
11 #include <linux/kvm.h>
12 #include <linux/kvm_host.h>
13 #include <linux/highmem.h>
14 #include <linux/gfp.h>
15 #include <linux/slab.h>
16 #include <linux/sched/signal.h>
17 #include <linux/hugetlb.h>
18 #include <linux/list.h>
19 #include <linux/anon_inodes.h>
20 #include <linux/iommu.h>
21 #include <linux/file.h>
22 #include <linux/mm.h>
23
24 #include <asm/kvm_ppc.h>
25 #include <asm/kvm_book3s.h>
26 #include <asm/book3s/64/mmu-hash.h>
27 #include <asm/hvcall.h>
28 #include <asm/synch.h>
29 #include <asm/ppc-opcode.h>
30 #include <asm/kvm_host.h>
31 #include <asm/udbg.h>
32 #include <asm/iommu.h>
33 #include <asm/tce.h>
34 #include <asm/mmu_context.h>
35
36 static unsigned long kvmppc_tce_pages(unsigned long iommu_pages)
37 {
38         return ALIGN(iommu_pages * sizeof(u64), PAGE_SIZE) / PAGE_SIZE;
39 }
40
41 static unsigned long kvmppc_stt_pages(unsigned long tce_pages)
42 {
43         unsigned long stt_bytes = sizeof(struct kvmppc_spapr_tce_table) +
44                         (tce_pages * sizeof(struct page *));
45
46         return tce_pages + ALIGN(stt_bytes, PAGE_SIZE) / PAGE_SIZE;
47 }
48
49 static void kvm_spapr_tce_iommu_table_free(struct rcu_head *head)
50 {
51         struct kvmppc_spapr_tce_iommu_table *stit = container_of(head,
52                         struct kvmppc_spapr_tce_iommu_table, rcu);
53
54         iommu_tce_table_put(stit->tbl);
55
56         kfree(stit);
57 }
58
59 static void kvm_spapr_tce_liobn_put(struct kref *kref)
60 {
61         struct kvmppc_spapr_tce_iommu_table *stit = container_of(kref,
62                         struct kvmppc_spapr_tce_iommu_table, kref);
63
64         list_del_rcu(&stit->next);
65
66         call_rcu(&stit->rcu, kvm_spapr_tce_iommu_table_free);
67 }
68
69 extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
70                 struct iommu_group *grp)
71 {
72         int i;
73         struct kvmppc_spapr_tce_table *stt;
74         struct kvmppc_spapr_tce_iommu_table *stit, *tmp;
75         struct iommu_table_group *table_group = NULL;
76
77         list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) {
78
79                 table_group = iommu_group_get_iommudata(grp);
80                 if (WARN_ON(!table_group))
81                         continue;
82
83                 list_for_each_entry_safe(stit, tmp, &stt->iommu_tables, next) {
84                         for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
85                                 if (table_group->tables[i] != stit->tbl)
86                                         continue;
87
88                                 kref_put(&stit->kref, kvm_spapr_tce_liobn_put);
89                         }
90                 }
91         }
92 }
93
94 extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
95                 struct iommu_group *grp)
96 {
97         struct kvmppc_spapr_tce_table *stt = NULL;
98         bool found = false;
99         struct iommu_table *tbl = NULL;
100         struct iommu_table_group *table_group;
101         long i;
102         struct kvmppc_spapr_tce_iommu_table *stit;
103         struct fd f;
104
105         f = fdget(tablefd);
106         if (!f.file)
107                 return -EBADF;
108
109         list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) {
110                 if (stt == f.file->private_data) {
111                         found = true;
112                         break;
113                 }
114         }
115
116         fdput(f);
117
118         if (!found)
119                 return -EINVAL;
120
121         table_group = iommu_group_get_iommudata(grp);
122         if (WARN_ON(!table_group))
123                 return -EFAULT;
124
125         for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
126                 struct iommu_table *tbltmp = table_group->tables[i];
127
128                 if (!tbltmp)
129                         continue;
130                 /* Make sure hardware table parameters are compatible */
131                 if ((tbltmp->it_page_shift <= stt->page_shift) &&
132                                 (tbltmp->it_offset << tbltmp->it_page_shift ==
133                                  stt->offset << stt->page_shift) &&
134                                 (tbltmp->it_size << tbltmp->it_page_shift >=
135                                  stt->size << stt->page_shift)) {
136                         /*
137                          * Reference the table to avoid races with
138                          * add/remove DMA windows.
139                          */
140                         tbl = iommu_tce_table_get(tbltmp);
141                         break;
142                 }
143         }
144         if (!tbl)
145                 return -EINVAL;
146
147         list_for_each_entry_rcu(stit, &stt->iommu_tables, next) {
148                 if (tbl != stit->tbl)
149                         continue;
150
151                 if (!kref_get_unless_zero(&stit->kref)) {
152                         /* stit is being destroyed */
153                         iommu_tce_table_put(tbl);
154                         return -ENOTTY;
155                 }
156                 /*
157                  * The table is already known to this KVM, we just increased
158                  * its KVM reference counter and can return.
159                  */
160                 return 0;
161         }
162
163         stit = kzalloc(sizeof(*stit), GFP_KERNEL);
164         if (!stit) {
165                 iommu_tce_table_put(tbl);
166                 return -ENOMEM;
167         }
168
169         stit->tbl = tbl;
170         kref_init(&stit->kref);
171
172         list_add_rcu(&stit->next, &stt->iommu_tables);
173
174         return 0;
175 }
176
177 static void release_spapr_tce_table(struct rcu_head *head)
178 {
179         struct kvmppc_spapr_tce_table *stt = container_of(head,
180                         struct kvmppc_spapr_tce_table, rcu);
181         unsigned long i, npages = kvmppc_tce_pages(stt->size);
182
183         for (i = 0; i < npages; i++)
184                 if (stt->pages[i])
185                         __free_page(stt->pages[i]);
186
187         kfree(stt);
188 }
189
190 static struct page *kvm_spapr_get_tce_page(struct kvmppc_spapr_tce_table *stt,
191                 unsigned long sttpage)
192 {
193         struct page *page = stt->pages[sttpage];
194
195         if (page)
196                 return page;
197
198         mutex_lock(&stt->alloc_lock);
199         page = stt->pages[sttpage];
200         if (!page) {
201                 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
202                 WARN_ON_ONCE(!page);
203                 if (page)
204                         stt->pages[sttpage] = page;
205         }
206         mutex_unlock(&stt->alloc_lock);
207
208         return page;
209 }
210
211 static vm_fault_t kvm_spapr_tce_fault(struct vm_fault *vmf)
212 {
213         struct kvmppc_spapr_tce_table *stt = vmf->vma->vm_file->private_data;
214         struct page *page;
215
216         if (vmf->pgoff >= kvmppc_tce_pages(stt->size))
217                 return VM_FAULT_SIGBUS;
218
219         page = kvm_spapr_get_tce_page(stt, vmf->pgoff);
220         if (!page)
221                 return VM_FAULT_OOM;
222
223         get_page(page);
224         vmf->page = page;
225         return 0;
226 }
227
228 static const struct vm_operations_struct kvm_spapr_tce_vm_ops = {
229         .fault = kvm_spapr_tce_fault,
230 };
231
232 static int kvm_spapr_tce_mmap(struct file *file, struct vm_area_struct *vma)
233 {
234         vma->vm_ops = &kvm_spapr_tce_vm_ops;
235         return 0;
236 }
237
238 static int kvm_spapr_tce_release(struct inode *inode, struct file *filp)
239 {
240         struct kvmppc_spapr_tce_table *stt = filp->private_data;
241         struct kvmppc_spapr_tce_iommu_table *stit, *tmp;
242         struct kvm *kvm = stt->kvm;
243
244         mutex_lock(&kvm->lock);
245         list_del_rcu(&stt->list);
246         mutex_unlock(&kvm->lock);
247
248         list_for_each_entry_safe(stit, tmp, &stt->iommu_tables, next) {
249                 WARN_ON(!kref_read(&stit->kref));
250                 while (1) {
251                         if (kref_put(&stit->kref, kvm_spapr_tce_liobn_put))
252                                 break;
253                 }
254         }
255
256         account_locked_vm(kvm->mm,
257                 kvmppc_stt_pages(kvmppc_tce_pages(stt->size)), false);
258
259         kvm_put_kvm(stt->kvm);
260
261         call_rcu(&stt->rcu, release_spapr_tce_table);
262
263         return 0;
264 }
265
266 static const struct file_operations kvm_spapr_tce_fops = {
267         .mmap           = kvm_spapr_tce_mmap,
268         .release        = kvm_spapr_tce_release,
269 };
270
271 long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
272                                    struct kvm_create_spapr_tce_64 *args)
273 {
274         struct kvmppc_spapr_tce_table *stt = NULL;
275         struct kvmppc_spapr_tce_table *siter;
276         struct mm_struct *mm = kvm->mm;
277         unsigned long npages, size = args->size;
278         int ret = -ENOMEM;
279
280         if (!args->size || args->page_shift < 12 || args->page_shift > 34 ||
281                 (args->offset + args->size > (ULLONG_MAX >> args->page_shift)))
282                 return -EINVAL;
283
284         npages = kvmppc_tce_pages(size);
285         ret = account_locked_vm(mm, kvmppc_stt_pages(npages), true);
286         if (ret)
287                 return ret;
288
289         ret = -ENOMEM;
290         stt = kzalloc(sizeof(*stt) + npages * sizeof(struct page *),
291                       GFP_KERNEL);
292         if (!stt)
293                 goto fail_acct;
294
295         stt->liobn = args->liobn;
296         stt->page_shift = args->page_shift;
297         stt->offset = args->offset;
298         stt->size = size;
299         stt->kvm = kvm;
300         mutex_init(&stt->alloc_lock);
301         INIT_LIST_HEAD_RCU(&stt->iommu_tables);
302
303         mutex_lock(&kvm->lock);
304
305         /* Check this LIOBN hasn't been previously allocated */
306         ret = 0;
307         list_for_each_entry(siter, &kvm->arch.spapr_tce_tables, list) {
308                 if (siter->liobn == args->liobn) {
309                         ret = -EBUSY;
310                         break;
311                 }
312         }
313
314         kvm_get_kvm(kvm);
315         if (!ret)
316                 ret = anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
317                                        stt, O_RDWR | O_CLOEXEC);
318
319         if (ret >= 0)
320                 list_add_rcu(&stt->list, &kvm->arch.spapr_tce_tables);
321         else
322                 kvm_put_kvm_no_destroy(kvm);
323
324         mutex_unlock(&kvm->lock);
325
326         if (ret >= 0)
327                 return ret;
328
329         kfree(stt);
330  fail_acct:
331         account_locked_vm(mm, kvmppc_stt_pages(npages), false);
332         return ret;
333 }
334
335 static long kvmppc_tce_to_ua(struct kvm *kvm, unsigned long tce,
336                 unsigned long *ua)
337 {
338         unsigned long gfn = tce >> PAGE_SHIFT;
339         struct kvm_memory_slot *memslot;
340
341         memslot = search_memslots(kvm_memslots(kvm), gfn);
342         if (!memslot)
343                 return -EINVAL;
344
345         *ua = __gfn_to_hva_memslot(memslot, gfn) |
346                 (tce & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE));
347
348         return 0;
349 }
350
351 static long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt,
352                 unsigned long tce)
353 {
354         unsigned long gpa = tce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
355         enum dma_data_direction dir = iommu_tce_direction(tce);
356         struct kvmppc_spapr_tce_iommu_table *stit;
357         unsigned long ua = 0;
358
359         /* Allow userspace to poison TCE table */
360         if (dir == DMA_NONE)
361                 return H_SUCCESS;
362
363         if (iommu_tce_check_gpa(stt->page_shift, gpa))
364                 return H_TOO_HARD;
365
366         if (kvmppc_tce_to_ua(stt->kvm, tce, &ua))
367                 return H_TOO_HARD;
368
369         list_for_each_entry_rcu(stit, &stt->iommu_tables, next) {
370                 unsigned long hpa = 0;
371                 struct mm_iommu_table_group_mem_t *mem;
372                 long shift = stit->tbl->it_page_shift;
373
374                 mem = mm_iommu_lookup(stt->kvm->mm, ua, 1ULL << shift);
375                 if (!mem)
376                         return H_TOO_HARD;
377
378                 if (mm_iommu_ua_to_hpa(mem, ua, shift, &hpa))
379                         return H_TOO_HARD;
380         }
381
382         return H_SUCCESS;
383 }
384
385 /*
386  * Handles TCE requests for emulated devices.
387  * Puts guest TCE values to the table and expects user space to convert them.
388  * Cannot fail so kvmppc_tce_validate must be called before it.
389  */
390 static void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt,
391                 unsigned long idx, unsigned long tce)
392 {
393         struct page *page;
394         u64 *tbl;
395         unsigned long sttpage;
396
397         idx -= stt->offset;
398         sttpage = idx / TCES_PER_PAGE;
399         page = stt->pages[sttpage];
400
401         if (!page) {
402                 /* We allow any TCE, not just with read|write permissions */
403                 if (!tce)
404                         return;
405
406                 page = kvm_spapr_get_tce_page(stt, sttpage);
407                 if (!page)
408                         return;
409         }
410         tbl = page_to_virt(page);
411
412         tbl[idx % TCES_PER_PAGE] = tce;
413 }
414
415 static void kvmppc_clear_tce(struct mm_struct *mm, struct iommu_table *tbl,
416                 unsigned long entry)
417 {
418         unsigned long hpa = 0;
419         enum dma_data_direction dir = DMA_NONE;
420
421         iommu_tce_xchg_no_kill(mm, tbl, entry, &hpa, &dir);
422 }
423
424 static long kvmppc_tce_iommu_mapped_dec(struct kvm *kvm,
425                 struct iommu_table *tbl, unsigned long entry)
426 {
427         struct mm_iommu_table_group_mem_t *mem = NULL;
428         const unsigned long pgsize = 1ULL << tbl->it_page_shift;
429         __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
430
431         if (!pua)
432                 return H_SUCCESS;
433
434         mem = mm_iommu_lookup(kvm->mm, be64_to_cpu(*pua), pgsize);
435         if (!mem)
436                 return H_TOO_HARD;
437
438         mm_iommu_mapped_dec(mem);
439
440         *pua = cpu_to_be64(0);
441
442         return H_SUCCESS;
443 }
444
445 static long kvmppc_tce_iommu_do_unmap(struct kvm *kvm,
446                 struct iommu_table *tbl, unsigned long entry)
447 {
448         enum dma_data_direction dir = DMA_NONE;
449         unsigned long hpa = 0;
450         long ret;
451
452         if (WARN_ON_ONCE(iommu_tce_xchg_no_kill(kvm->mm, tbl, entry, &hpa,
453                                         &dir)))
454                 return H_TOO_HARD;
455
456         if (dir == DMA_NONE)
457                 return H_SUCCESS;
458
459         ret = kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry);
460         if (ret != H_SUCCESS)
461                 iommu_tce_xchg_no_kill(kvm->mm, tbl, entry, &hpa, &dir);
462
463         return ret;
464 }
465
466 static long kvmppc_tce_iommu_unmap(struct kvm *kvm,
467                 struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
468                 unsigned long entry)
469 {
470         unsigned long i, ret = H_SUCCESS;
471         unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
472         unsigned long io_entry = entry * subpages;
473
474         for (i = 0; i < subpages; ++i) {
475                 ret = kvmppc_tce_iommu_do_unmap(kvm, tbl, io_entry + i);
476                 if (ret != H_SUCCESS)
477                         break;
478         }
479
480         return ret;
481 }
482
483 long kvmppc_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
484                 unsigned long entry, unsigned long ua,
485                 enum dma_data_direction dir)
486 {
487         long ret;
488         unsigned long hpa;
489         __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
490         struct mm_iommu_table_group_mem_t *mem;
491
492         if (!pua)
493                 /* it_userspace allocation might be delayed */
494                 return H_TOO_HARD;
495
496         mem = mm_iommu_lookup(kvm->mm, ua, 1ULL << tbl->it_page_shift);
497         if (!mem)
498                 /* This only handles v2 IOMMU type, v1 is handled via ioctl() */
499                 return H_TOO_HARD;
500
501         if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, tbl->it_page_shift, &hpa)))
502                 return H_TOO_HARD;
503
504         if (mm_iommu_mapped_inc(mem))
505                 return H_TOO_HARD;
506
507         ret = iommu_tce_xchg_no_kill(kvm->mm, tbl, entry, &hpa, &dir);
508         if (WARN_ON_ONCE(ret)) {
509                 mm_iommu_mapped_dec(mem);
510                 return H_TOO_HARD;
511         }
512
513         if (dir != DMA_NONE)
514                 kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry);
515
516         *pua = cpu_to_be64(ua);
517
518         return 0;
519 }
520
521 static long kvmppc_tce_iommu_map(struct kvm *kvm,
522                 struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
523                 unsigned long entry, unsigned long ua,
524                 enum dma_data_direction dir)
525 {
526         unsigned long i, pgoff, ret = H_SUCCESS;
527         unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
528         unsigned long io_entry = entry * subpages;
529
530         for (i = 0, pgoff = 0; i < subpages;
531                         ++i, pgoff += IOMMU_PAGE_SIZE(tbl)) {
532
533                 ret = kvmppc_tce_iommu_do_map(kvm, tbl,
534                                 io_entry + i, ua + pgoff, dir);
535                 if (ret != H_SUCCESS)
536                         break;
537         }
538
539         return ret;
540 }
541
542 long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
543                       unsigned long ioba, unsigned long tce)
544 {
545         struct kvmppc_spapr_tce_table *stt;
546         long ret, idx;
547         struct kvmppc_spapr_tce_iommu_table *stit;
548         unsigned long entry, ua = 0;
549         enum dma_data_direction dir;
550
551         /* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
552         /*          liobn, ioba, tce); */
553
554         stt = kvmppc_find_table(vcpu->kvm, liobn);
555         if (!stt)
556                 return H_TOO_HARD;
557
558         ret = kvmppc_ioba_validate(stt, ioba, 1);
559         if (ret != H_SUCCESS)
560                 return ret;
561
562         idx = srcu_read_lock(&vcpu->kvm->srcu);
563
564         ret = kvmppc_tce_validate(stt, tce);
565         if (ret != H_SUCCESS)
566                 goto unlock_exit;
567
568         dir = iommu_tce_direction(tce);
569
570         if ((dir != DMA_NONE) && kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) {
571                 ret = H_PARAMETER;
572                 goto unlock_exit;
573         }
574
575         entry = ioba >> stt->page_shift;
576
577         list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
578                 if (dir == DMA_NONE)
579                         ret = kvmppc_tce_iommu_unmap(vcpu->kvm, stt,
580                                         stit->tbl, entry);
581                 else
582                         ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, stit->tbl,
583                                         entry, ua, dir);
584
585                 iommu_tce_kill(stit->tbl, entry, 1);
586
587                 if (ret != H_SUCCESS) {
588                         kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl, entry);
589                         goto unlock_exit;
590                 }
591         }
592
593         kvmppc_tce_put(stt, entry, tce);
594
595 unlock_exit:
596         srcu_read_unlock(&vcpu->kvm->srcu, idx);
597
598         return ret;
599 }
600 EXPORT_SYMBOL_GPL(kvmppc_h_put_tce);
601
602 long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
603                 unsigned long liobn, unsigned long ioba,
604                 unsigned long tce_list, unsigned long npages)
605 {
606         struct kvmppc_spapr_tce_table *stt;
607         long i, ret = H_SUCCESS, idx;
608         unsigned long entry, ua = 0;
609         u64 __user *tces;
610         u64 tce;
611         struct kvmppc_spapr_tce_iommu_table *stit;
612
613         stt = kvmppc_find_table(vcpu->kvm, liobn);
614         if (!stt)
615                 return H_TOO_HARD;
616
617         entry = ioba >> stt->page_shift;
618         /*
619          * SPAPR spec says that the maximum size of the list is 512 TCEs
620          * so the whole table fits in 4K page
621          */
622         if (npages > 512)
623                 return H_PARAMETER;
624
625         if (tce_list & (SZ_4K - 1))
626                 return H_PARAMETER;
627
628         ret = kvmppc_ioba_validate(stt, ioba, npages);
629         if (ret != H_SUCCESS)
630                 return ret;
631
632         idx = srcu_read_lock(&vcpu->kvm->srcu);
633         if (kvmppc_tce_to_ua(vcpu->kvm, tce_list, &ua)) {
634                 ret = H_TOO_HARD;
635                 goto unlock_exit;
636         }
637         tces = (u64 __user *) ua;
638
639         for (i = 0; i < npages; ++i) {
640                 if (get_user(tce, tces + i)) {
641                         ret = H_TOO_HARD;
642                         goto unlock_exit;
643                 }
644                 tce = be64_to_cpu(tce);
645
646                 ret = kvmppc_tce_validate(stt, tce);
647                 if (ret != H_SUCCESS)
648                         goto unlock_exit;
649         }
650
651         for (i = 0; i < npages; ++i) {
652                 /*
653                  * This looks unsafe, because we validate, then regrab
654                  * the TCE from userspace which could have been changed by
655                  * another thread.
656                  *
657                  * But it actually is safe, because the relevant checks will be
658                  * re-executed in the following code.  If userspace tries to
659                  * change this dodgily it will result in a messier failure mode
660                  * but won't threaten the host.
661                  */
662                 if (get_user(tce, tces + i)) {
663                         ret = H_TOO_HARD;
664                         goto invalidate_exit;
665                 }
666                 tce = be64_to_cpu(tce);
667
668                 if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) {
669                         ret = H_PARAMETER;
670                         goto invalidate_exit;
671                 }
672
673                 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
674                         ret = kvmppc_tce_iommu_map(vcpu->kvm, stt,
675                                         stit->tbl, entry + i, ua,
676                                         iommu_tce_direction(tce));
677
678                         if (ret != H_SUCCESS) {
679                                 kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl,
680                                                 entry);
681                                 goto invalidate_exit;
682                         }
683                 }
684
685                 kvmppc_tce_put(stt, entry + i, tce);
686         }
687
688 invalidate_exit:
689         list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
690                 iommu_tce_kill(stit->tbl, entry, npages);
691
692 unlock_exit:
693         srcu_read_unlock(&vcpu->kvm->srcu, idx);
694
695         return ret;
696 }
697 EXPORT_SYMBOL_GPL(kvmppc_h_put_tce_indirect);
698
699 long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
700                 unsigned long liobn, unsigned long ioba,
701                 unsigned long tce_value, unsigned long npages)
702 {
703         struct kvmppc_spapr_tce_table *stt;
704         long i, ret;
705         struct kvmppc_spapr_tce_iommu_table *stit;
706
707         stt = kvmppc_find_table(vcpu->kvm, liobn);
708         if (!stt)
709                 return H_TOO_HARD;
710
711         ret = kvmppc_ioba_validate(stt, ioba, npages);
712         if (ret != H_SUCCESS)
713                 return ret;
714
715         /* Check permission bits only to allow userspace poison TCE for debug */
716         if (tce_value & (TCE_PCI_WRITE | TCE_PCI_READ))
717                 return H_PARAMETER;
718
719         list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
720                 unsigned long entry = ioba >> stt->page_shift;
721
722                 for (i = 0; i < npages; ++i) {
723                         ret = kvmppc_tce_iommu_unmap(vcpu->kvm, stt,
724                                         stit->tbl, entry + i);
725
726                         if (ret == H_SUCCESS)
727                                 continue;
728
729                         if (ret == H_TOO_HARD)
730                                 goto invalidate_exit;
731
732                         WARN_ON_ONCE(1);
733                         kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl, entry);
734                 }
735         }
736
737         for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
738                 kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value);
739
740 invalidate_exit:
741         list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
742                 iommu_tce_kill(stit->tbl, ioba >> stt->page_shift, npages);
743
744         return ret;
745 }
746 EXPORT_SYMBOL_GPL(kvmppc_h_stuff_tce);