1 // SPDX-License-Identifier: GPL-2.0-only
3 * This file implements the DMA operations for NVLink devices. The NPU
4 * devices all point to the same iommu table as the parent PCI device.
6 * Copyright Alistair Popple, IBM Corporation 2015.
9 #include <linux/mmu_notifier.h>
10 #include <linux/mmu_context.h>
12 #include <linux/pci.h>
13 #include <linux/memblock.h>
14 #include <linux/sizes.h>
16 #include <asm/debugfs.h>
17 #include <asm/powernv.h>
22 static struct pci_dev *get_pci_dev(struct device_node *dn)
24 struct pci_dn *pdn = PCI_DN(dn);
27 pdev = pci_get_domain_bus_and_slot(pci_domain_nr(pdn->phb->bus),
28 pdn->busno, pdn->devfn);
31 * pci_get_domain_bus_and_slot() increased the reference count of
32 * the PCI device, but callers don't need that actually as the PE
33 * already holds a reference to the device. Since callers aren't
34 * aware of the reference count change, call pci_dev_put() now to
43 /* Given a NPU device get the associated PCI device. */
44 struct pci_dev *pnv_pci_get_gpu_dev(struct pci_dev *npdev)
46 struct device_node *dn;
47 struct pci_dev *gpdev;
52 if (WARN_ON(!npdev->dev.of_node))
55 /* Get assoicated PCI device */
56 dn = of_parse_phandle(npdev->dev.of_node, "ibm,gpu", 0);
60 gpdev = get_pci_dev(dn);
65 EXPORT_SYMBOL(pnv_pci_get_gpu_dev);
67 /* Given the real PCI device get a linked NPU device. */
68 struct pci_dev *pnv_pci_get_npu_dev(struct pci_dev *gpdev, int index)
70 struct device_node *dn;
71 struct pci_dev *npdev;
76 /* Not all PCI devices have device-tree nodes */
77 if (!gpdev->dev.of_node)
80 /* Get assoicated PCI device */
81 dn = of_parse_phandle(gpdev->dev.of_node, "ibm,npu", index);
85 npdev = get_pci_dev(dn);
90 EXPORT_SYMBOL(pnv_pci_get_npu_dev);
93 * Returns the PE assoicated with the PCI device of the given
94 * NPU. Returns the linked pci device if pci_dev != NULL.
96 static struct pnv_ioda_pe *get_gpu_pci_dev_and_pe(struct pnv_ioda_pe *npe,
97 struct pci_dev **gpdev)
100 struct pci_controller *hose;
101 struct pci_dev *pdev;
102 struct pnv_ioda_pe *pe;
105 pdev = pnv_pci_get_gpu_dev(npe->pdev);
109 pdn = pci_get_pdn(pdev);
110 if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE))
113 hose = pci_bus_to_host(pdev->bus);
114 phb = hose->private_data;
115 pe = &phb->ioda.pe_array[pdn->pe_number];
123 static long pnv_npu_unset_window(struct iommu_table_group *table_group,
126 static long pnv_npu_set_window(struct iommu_table_group *table_group, int num,
127 struct iommu_table *tbl)
129 struct pnv_ioda_pe *npe = container_of(table_group, struct pnv_ioda_pe,
131 struct pnv_phb *phb = npe->phb;
133 const unsigned long size = tbl->it_indirect_levels ?
134 tbl->it_level_size : tbl->it_size;
135 const __u64 start_addr = tbl->it_offset << tbl->it_page_shift;
136 const __u64 win_size = tbl->it_size << tbl->it_page_shift;
137 int num2 = (num == 0) ? 1 : 0;
139 /* NPU has just one TVE so if there is another table, remove it first */
140 if (npe->table_group.tables[num2])
141 pnv_npu_unset_window(&npe->table_group, num2);
143 pe_info(npe, "Setting up window %llx..%llx pg=%lx\n",
144 start_addr, start_addr + win_size - 1,
145 IOMMU_PAGE_SIZE(tbl));
147 rc = opal_pci_map_pe_dma_window(phb->opal_id,
150 tbl->it_indirect_levels + 1,
153 IOMMU_PAGE_SIZE(tbl));
155 pe_err(npe, "Failed to configure TCE table, err %lld\n", rc);
158 pnv_pci_ioda2_tce_invalidate_entire(phb, false);
160 /* Add the table to the list so its TCE cache will get invalidated */
161 pnv_pci_link_table_and_group(phb->hose->node, num,
162 tbl, &npe->table_group);
167 static long pnv_npu_unset_window(struct iommu_table_group *table_group, int num)
169 struct pnv_ioda_pe *npe = container_of(table_group, struct pnv_ioda_pe,
171 struct pnv_phb *phb = npe->phb;
174 if (!npe->table_group.tables[num])
177 pe_info(npe, "Removing DMA window\n");
179 rc = opal_pci_map_pe_dma_window(phb->opal_id, npe->pe_number,
181 0/* levels */, 0/* table address */,
182 0/* table size */, 0/* page size */);
184 pe_err(npe, "Unmapping failed, ret = %lld\n", rc);
187 pnv_pci_ioda2_tce_invalidate_entire(phb, false);
189 pnv_pci_unlink_table_and_group(npe->table_group.tables[num],
195 #ifdef CONFIG_IOMMU_API
196 /* Switch ownership from platform code to external user (e.g. VFIO) */
197 static void pnv_npu_take_ownership(struct iommu_table_group *table_group)
199 struct pnv_ioda_pe *npe = container_of(table_group, struct pnv_ioda_pe,
201 struct pnv_phb *phb = npe->phb;
203 struct pci_dev *gpdev = NULL;
206 * Note: NPU has just a single TVE in the hardware which means that
207 * while used by the kernel, it can have either 32bit window or
208 * DMA bypass but never both. So we deconfigure 32bit window only
209 * if it was enabled at the moment of ownership change.
211 if (npe->table_group.tables[0]) {
212 pnv_npu_unset_window(&npe->table_group, 0);
217 rc = opal_pci_map_pe_dma_window_real(phb->opal_id,
218 npe->pe_number, npe->pe_number,
219 0 /* bypass base */, 0);
221 pe_err(npe, "Failed to disable bypass, err %lld\n", rc);
224 pnv_pci_ioda2_tce_invalidate_entire(npe->phb, false);
226 get_gpu_pci_dev_and_pe(npe, &gpdev);
228 pnv_npu2_unmap_lpar_dev(gpdev);
231 static void pnv_npu_release_ownership(struct iommu_table_group *table_group)
233 struct pnv_ioda_pe *npe = container_of(table_group, struct pnv_ioda_pe,
235 struct pci_dev *gpdev = NULL;
237 get_gpu_pci_dev_and_pe(npe, &gpdev);
239 pnv_npu2_map_lpar_dev(gpdev, 0, MSR_DR | MSR_PR | MSR_HV);
242 static struct iommu_table_group_ops pnv_pci_npu_ops = {
243 .set_window = pnv_npu_set_window,
244 .unset_window = pnv_npu_unset_window,
245 .take_ownership = pnv_npu_take_ownership,
246 .release_ownership = pnv_npu_release_ownership,
248 #endif /* !CONFIG_IOMMU_API */
253 /* Maximum possible number of ATSD MMIO registers per NPU */
254 #define NV_NMMU_ATSD_REGS 8
255 #define NV_NPU_MAX_PE_NUM 16
258 * A compound NPU IOMMU group which might consist of 1 GPU + 2xNPUs (POWER8) or
259 * up to 3 x (GPU + 2xNPUs) (POWER9).
262 struct iommu_table_group table_group;
264 struct pnv_ioda_pe *pe[NV_NPU_MAX_PE_NUM];
267 /* An NPU descriptor, valid for POWER9 only */
270 struct npu_comp npucomp;
273 #ifdef CONFIG_IOMMU_API
274 static long pnv_npu_peers_create_table_userspace(
275 struct iommu_table_group *table_group,
276 int num, __u32 page_shift, __u64 window_size, __u32 levels,
277 struct iommu_table **ptbl)
279 struct npu_comp *npucomp = container_of(table_group, struct npu_comp,
282 if (!npucomp->pe_num || !npucomp->pe[0] ||
283 !npucomp->pe[0]->table_group.ops ||
284 !npucomp->pe[0]->table_group.ops->create_table)
287 return npucomp->pe[0]->table_group.ops->create_table(
288 &npucomp->pe[0]->table_group, num, page_shift,
289 window_size, levels, ptbl);
292 static long pnv_npu_peers_set_window(struct iommu_table_group *table_group,
293 int num, struct iommu_table *tbl)
297 struct npu_comp *npucomp = container_of(table_group, struct npu_comp,
300 for (i = 0; i < npucomp->pe_num; ++i) {
301 struct pnv_ioda_pe *pe = npucomp->pe[i];
303 if (!pe->table_group.ops->set_window)
306 ret = pe->table_group.ops->set_window(&pe->table_group,
313 for (j = 0; j < i; ++j) {
314 struct pnv_ioda_pe *pe = npucomp->pe[j];
316 if (!pe->table_group.ops->unset_window)
319 ret = pe->table_group.ops->unset_window(
320 &pe->table_group, num);
325 table_group->tables[num] = iommu_tce_table_get(tbl);
331 static long pnv_npu_peers_unset_window(struct iommu_table_group *table_group,
336 struct npu_comp *npucomp = container_of(table_group, struct npu_comp,
339 for (i = 0; i < npucomp->pe_num; ++i) {
340 struct pnv_ioda_pe *pe = npucomp->pe[i];
342 WARN_ON(npucomp->table_group.tables[num] !=
343 table_group->tables[num]);
344 if (!npucomp->table_group.tables[num])
347 if (!pe->table_group.ops->unset_window)
350 ret = pe->table_group.ops->unset_window(&pe->table_group, num);
356 for (j = 0; j < i; ++j) {
357 struct pnv_ioda_pe *pe = npucomp->pe[j];
359 if (!npucomp->table_group.tables[num])
362 if (!pe->table_group.ops->set_window)
365 ret = pe->table_group.ops->set_window(&pe->table_group,
366 num, table_group->tables[num]);
370 } else if (table_group->tables[num]) {
371 iommu_tce_table_put(table_group->tables[num]);
372 table_group->tables[num] = NULL;
378 static void pnv_npu_peers_take_ownership(struct iommu_table_group *table_group)
381 struct npu_comp *npucomp = container_of(table_group, struct npu_comp,
384 for (i = 0; i < npucomp->pe_num; ++i) {
385 struct pnv_ioda_pe *pe = npucomp->pe[i];
387 if (!pe->table_group.ops->take_ownership)
389 pe->table_group.ops->take_ownership(&pe->table_group);
393 static void pnv_npu_peers_release_ownership(
394 struct iommu_table_group *table_group)
397 struct npu_comp *npucomp = container_of(table_group, struct npu_comp,
400 for (i = 0; i < npucomp->pe_num; ++i) {
401 struct pnv_ioda_pe *pe = npucomp->pe[i];
403 if (!pe->table_group.ops->release_ownership)
405 pe->table_group.ops->release_ownership(&pe->table_group);
409 static struct iommu_table_group_ops pnv_npu_peers_ops = {
410 .get_table_size = pnv_pci_ioda2_get_table_size,
411 .create_table = pnv_npu_peers_create_table_userspace,
412 .set_window = pnv_npu_peers_set_window,
413 .unset_window = pnv_npu_peers_unset_window,
414 .take_ownership = pnv_npu_peers_take_ownership,
415 .release_ownership = pnv_npu_peers_release_ownership,
418 static void pnv_comp_attach_table_group(struct npu_comp *npucomp,
419 struct pnv_ioda_pe *pe)
421 if (WARN_ON(npucomp->pe_num == NV_NPU_MAX_PE_NUM))
424 npucomp->pe[npucomp->pe_num] = pe;
428 struct iommu_table_group *pnv_try_setup_npu_table_group(struct pnv_ioda_pe *pe)
430 struct iommu_table_group *table_group;
431 struct npu_comp *npucomp;
432 struct pci_dev *gpdev = NULL;
433 struct pci_controller *hose;
434 struct pci_dev *npdev = NULL;
436 list_for_each_entry(gpdev, &pe->pbus->devices, bus_list) {
437 npdev = pnv_pci_get_npu_dev(gpdev, 0);
443 /* It is not an NPU attached device, skip */
446 hose = pci_bus_to_host(npdev->bus);
449 table_group = &hose->npu->npucomp.table_group;
451 if (!table_group->group) {
452 table_group->ops = &pnv_npu_peers_ops;
453 iommu_register_group(table_group,
458 /* Create a group for 1 GPU and attached NPUs for POWER8 */
459 pe->npucomp = kzalloc(sizeof(*pe->npucomp), GFP_KERNEL);
460 table_group = &pe->npucomp->table_group;
461 table_group->ops = &pnv_npu_peers_ops;
462 iommu_register_group(table_group, hose->global_number,
466 /* Steal capabilities from a GPU PE */
467 table_group->max_dynamic_windows_supported =
468 pe->table_group.max_dynamic_windows_supported;
469 table_group->tce32_start = pe->table_group.tce32_start;
470 table_group->tce32_size = pe->table_group.tce32_size;
471 table_group->max_levels = pe->table_group.max_levels;
472 if (!table_group->pgsizes)
473 table_group->pgsizes = pe->table_group.pgsizes;
475 npucomp = container_of(table_group, struct npu_comp, table_group);
476 pnv_comp_attach_table_group(npucomp, pe);
481 struct iommu_table_group *pnv_npu_compound_attach(struct pnv_ioda_pe *pe)
483 struct iommu_table_group *table_group;
484 struct npu_comp *npucomp;
485 struct pci_dev *gpdev = NULL;
486 struct pci_dev *npdev;
487 struct pnv_ioda_pe *gpe = get_gpu_pci_dev_and_pe(pe, &gpdev);
489 WARN_ON(!(pe->flags & PNV_IODA_PE_DEV));
494 * IODA2 bridges get this set up from pci_controller_ops::setup_bridge
495 * but NPU bridges do not have this hook defined so we do it here.
496 * We do not setup other table group parameters as they won't be used
497 * anyway - NVLink bridges are subordinate PEs.
499 pe->table_group.ops = &pnv_pci_npu_ops;
501 table_group = iommu_group_get_iommudata(
502 iommu_group_get(&gpdev->dev));
505 * On P9 NPU PHB and PCI PHB support different page sizes,
506 * keep only matching. We expect here that NVLink bridge PE pgsizes is
507 * initialized by the caller.
509 table_group->pgsizes &= pe->table_group.pgsizes;
510 npucomp = container_of(table_group, struct npu_comp, table_group);
511 pnv_comp_attach_table_group(npucomp, pe);
513 list_for_each_entry(npdev, &pe->phb->hose->bus->devices, bus_list) {
514 struct pci_dev *gpdevtmp = pnv_pci_get_gpu_dev(npdev);
516 if (gpdevtmp != gpdev)
519 iommu_add_device(table_group, &npdev->dev);
524 #endif /* CONFIG_IOMMU_API */
526 int pnv_npu2_init(struct pci_controller *hose)
528 static int npu_index;
532 npu = kzalloc(sizeof(*npu), GFP_KERNEL);
537 if (WARN_ON(npu_index >= NV_MAX_NPUS)) {
541 npu->index = npu_index;
551 int pnv_npu2_map_lpar_dev(struct pci_dev *gpdev, unsigned int lparid,
555 struct pci_dev *npdev = pnv_pci_get_npu_dev(gpdev, 0);
556 struct pci_controller *hose;
557 struct pnv_phb *nphb;
562 hose = pci_bus_to_host(npdev->bus);
563 nphb = hose->private_data;
565 dev_dbg(&gpdev->dev, "Map LPAR opalid=%llu lparid=%u\n",
566 nphb->opal_id, lparid);
568 * Currently we only support radix and non-zero LPCR only makes sense
569 * for hash tables so skiboot expects the LPCR parameter to be a zero.
571 ret = opal_npu_map_lpar(nphb->opal_id, pci_dev_id(gpdev), lparid,
574 dev_err(&gpdev->dev, "Error %d mapping device to LPAR\n", ret);
578 dev_dbg(&gpdev->dev, "init context opalid=%llu msr=%lx\n",
580 ret = opal_npu_init_context(nphb->opal_id, 0/*__unused*/, msr,
583 dev_err(&gpdev->dev, "Failed to init context: %d\n", ret);
589 EXPORT_SYMBOL_GPL(pnv_npu2_map_lpar_dev);
591 void pnv_npu2_map_lpar(struct pnv_ioda_pe *gpe, unsigned long msr)
593 struct pci_dev *gpdev;
595 list_for_each_entry(gpdev, &gpe->pbus->devices, bus_list)
596 pnv_npu2_map_lpar_dev(gpdev, 0, msr);
599 int pnv_npu2_unmap_lpar_dev(struct pci_dev *gpdev)
602 struct pci_dev *npdev = pnv_pci_get_npu_dev(gpdev, 0);
603 struct pci_controller *hose;
604 struct pnv_phb *nphb;
609 hose = pci_bus_to_host(npdev->bus);
610 nphb = hose->private_data;
612 dev_dbg(&gpdev->dev, "destroy context opalid=%llu\n",
614 ret = opal_npu_destroy_context(nphb->opal_id, 0/*__unused*/,
617 dev_err(&gpdev->dev, "Failed to destroy context: %d\n", ret);
621 /* Set LPID to 0 anyway, just to be safe */
622 dev_dbg(&gpdev->dev, "Map LPAR opalid=%llu lparid=0\n", nphb->opal_id);
623 ret = opal_npu_map_lpar(nphb->opal_id, pci_dev_id(gpdev), 0 /*LPID*/,
626 dev_err(&gpdev->dev, "Error %d mapping device to LPAR\n", ret);
630 EXPORT_SYMBOL_GPL(pnv_npu2_unmap_lpar_dev);