]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - arch/powerpc/platforms/pseries/iommu.c
powerpc/pseries: Enable VFIO
[linux.git] / arch / powerpc / platforms / pseries / iommu.c
index 7ce5db209abfb107e9584850afd753f9b6b2cf23..8374adee27e36b17ffddcbe5481e173fa2b3a6ff 100644 (file)
@@ -551,6 +551,7 @@ static void iommu_table_setparms(struct pci_controller *phb,
 static void iommu_table_setparms_lpar(struct pci_controller *phb,
                                      struct device_node *dn,
                                      struct iommu_table *tbl,
+                                     struct iommu_table_group *table_group,
                                      const __be32 *dma_window)
 {
        unsigned long offset, size;
@@ -564,6 +565,9 @@ static void iommu_table_setparms_lpar(struct pci_controller *phb,
        tbl->it_type = TCE_PCI;
        tbl->it_offset = offset >> tbl->it_page_shift;
        tbl->it_size = size >> tbl->it_page_shift;
+
+       table_group->tce32_start = offset;
+       table_group->tce32_size = size;
 }
 
 struct iommu_table_ops iommu_table_pseries_ops = {
@@ -652,8 +656,38 @@ static void pci_dma_bus_setup_pSeries(struct pci_bus *bus)
        pr_debug("ISA/IDE, window size is 0x%llx\n", pci->phb->dma_window_size);
 }
 
+#ifdef CONFIG_IOMMU_API
+static int tce_exchange_pseries(struct iommu_table *tbl, long index, unsigned
+                               long *tce, enum dma_data_direction *direction)
+{
+       long rc;
+       unsigned long ioba = (unsigned long) index << tbl->it_page_shift;
+       unsigned long flags, oldtce = 0;
+       u64 proto_tce = iommu_direction_to_tce_perm(*direction);
+       unsigned long newtce = *tce | proto_tce;
+
+       spin_lock_irqsave(&tbl->large_pool.lock, flags);
+
+       rc = plpar_tce_get((u64)tbl->it_index, ioba, &oldtce);
+       if (!rc)
+               rc = plpar_tce_put((u64)tbl->it_index, ioba, newtce);
+
+       if (!rc) {
+               *direction = iommu_tce_direction(oldtce);
+               *tce = oldtce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
+       }
+
+       spin_unlock_irqrestore(&tbl->large_pool.lock, flags);
+
+       return rc;
+}
+#endif
+
 struct iommu_table_ops iommu_table_lpar_multi_ops = {
        .set = tce_buildmulti_pSeriesLP,
+#ifdef CONFIG_IOMMU_API
+       .exchange = tce_exchange_pseries,
+#endif
        .clear = tce_freemulti_pSeriesLP,
        .get = tce_get_pSeriesLP
 };
@@ -690,7 +724,8 @@ static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus)
        if (!ppci->table_group) {
                ppci->table_group = iommu_pseries_alloc_group(ppci->phb->node);
                tbl = ppci->table_group->tables[0];
-               iommu_table_setparms_lpar(ppci->phb, pdn, tbl, dma_window);
+               iommu_table_setparms_lpar(ppci->phb, pdn, tbl,
+                               ppci->table_group, dma_window);
                tbl->it_ops = &iommu_table_lpar_multi_ops;
                iommu_init_table(tbl, ppci->phb->node);
                iommu_register_group(ppci->table_group,
@@ -1144,7 +1179,8 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
        if (!pci->table_group) {
                pci->table_group = iommu_pseries_alloc_group(pci->phb->node);
                tbl = pci->table_group->tables[0];
-               iommu_table_setparms_lpar(pci->phb, pdn, tbl, dma_window);
+               iommu_table_setparms_lpar(pci->phb, pdn, tbl,
+                               pci->table_group, dma_window);
                tbl->it_ops = &iommu_table_lpar_multi_ops;
                iommu_init_table(tbl, pci->phb->node);
                iommu_register_group(pci->table_group,