2 * Support PCI/PCIe on PowerNV platforms
4 * Copyright 2011 Benjamin Herrenschmidt, IBM Corp.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #include <linux/kernel.h>
13 #include <linux/pci.h>
14 #include <linux/delay.h>
15 #include <linux/string.h>
16 #include <linux/init.h>
17 #include <linux/irq.h>
19 #include <linux/msi.h>
20 #include <linux/iommu.h>
22 #include <asm/sections.h>
25 #include <asm/pci-bridge.h>
26 #include <asm/machdep.h>
27 #include <asm/msi_bitmap.h>
28 #include <asm/ppc-pci.h>
30 #include <asm/iommu.h>
32 #include <asm/firmware.h>
33 #include <asm/eeh_event.h>
40 int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
42 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
43 struct pnv_phb *phb = hose->private_data;
44 struct msi_desc *entry;
50 if (WARN_ON(!phb) || !phb->msi_bmp.bitmap)
53 if (pdev->no_64bit_msi && !phb->msi32_support)
56 for_each_pci_msi_entry(entry, pdev) {
57 if (!entry->msi_attrib.is_64 && !phb->msi32_support) {
58 pr_warn("%s: Supports only 64-bit MSIs\n",
62 hwirq = msi_bitmap_alloc_hwirqs(&phb->msi_bmp, 1);
64 pr_warn("%s: Failed to find a free MSI\n",
68 virq = irq_create_mapping(NULL, phb->msi_base + hwirq);
70 pr_warn("%s: Failed to map MSI to linux irq\n",
72 msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq, 1);
75 rc = phb->msi_setup(phb, pdev, phb->msi_base + hwirq,
76 virq, entry->msi_attrib.is_64, &msg);
78 pr_warn("%s: Failed to setup MSI\n", pci_name(pdev));
79 irq_dispose_mapping(virq);
80 msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq, 1);
83 irq_set_msi_desc(virq, entry);
84 pci_write_msi_msg(virq, &msg);
89 void pnv_teardown_msi_irqs(struct pci_dev *pdev)
91 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
92 struct pnv_phb *phb = hose->private_data;
93 struct msi_desc *entry;
94 irq_hw_number_t hwirq;
99 for_each_pci_msi_entry(entry, pdev) {
100 if (entry->irq == NO_IRQ)
102 hwirq = virq_to_hw(entry->irq);
103 irq_set_msi_desc(entry->irq, NULL);
104 irq_dispose_mapping(entry->irq);
105 msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq - phb->msi_base, 1);
108 #endif /* CONFIG_PCI_MSI */
110 static void pnv_pci_dump_p7ioc_diag_data(struct pci_controller *hose,
111 struct OpalIoPhbErrorCommon *common)
113 struct OpalIoP7IOCPhbErrorData *data;
116 data = (struct OpalIoP7IOCPhbErrorData *)common;
117 pr_info("P7IOC PHB#%d Diag-data (Version: %d)\n",
118 hose->global_number, be32_to_cpu(common->version));
121 pr_info("brdgCtl: %08x\n",
122 be32_to_cpu(data->brdgCtl));
123 if (data->portStatusReg || data->rootCmplxStatus ||
124 data->busAgentStatus)
125 pr_info("UtlSts: %08x %08x %08x\n",
126 be32_to_cpu(data->portStatusReg),
127 be32_to_cpu(data->rootCmplxStatus),
128 be32_to_cpu(data->busAgentStatus));
129 if (data->deviceStatus || data->slotStatus ||
130 data->linkStatus || data->devCmdStatus ||
132 pr_info("RootSts: %08x %08x %08x %08x %08x\n",
133 be32_to_cpu(data->deviceStatus),
134 be32_to_cpu(data->slotStatus),
135 be32_to_cpu(data->linkStatus),
136 be32_to_cpu(data->devCmdStatus),
137 be32_to_cpu(data->devSecStatus));
138 if (data->rootErrorStatus || data->uncorrErrorStatus ||
139 data->corrErrorStatus)
140 pr_info("RootErrSts: %08x %08x %08x\n",
141 be32_to_cpu(data->rootErrorStatus),
142 be32_to_cpu(data->uncorrErrorStatus),
143 be32_to_cpu(data->corrErrorStatus));
144 if (data->tlpHdr1 || data->tlpHdr2 ||
145 data->tlpHdr3 || data->tlpHdr4)
146 pr_info("RootErrLog: %08x %08x %08x %08x\n",
147 be32_to_cpu(data->tlpHdr1),
148 be32_to_cpu(data->tlpHdr2),
149 be32_to_cpu(data->tlpHdr3),
150 be32_to_cpu(data->tlpHdr4));
151 if (data->sourceId || data->errorClass ||
153 pr_info("RootErrLog1: %08x %016llx %016llx\n",
154 be32_to_cpu(data->sourceId),
155 be64_to_cpu(data->errorClass),
156 be64_to_cpu(data->correlator));
157 if (data->p7iocPlssr || data->p7iocCsr)
158 pr_info("PhbSts: %016llx %016llx\n",
159 be64_to_cpu(data->p7iocPlssr),
160 be64_to_cpu(data->p7iocCsr));
162 pr_info("Lem: %016llx %016llx %016llx\n",
163 be64_to_cpu(data->lemFir),
164 be64_to_cpu(data->lemErrorMask),
165 be64_to_cpu(data->lemWOF));
166 if (data->phbErrorStatus)
167 pr_info("PhbErr: %016llx %016llx %016llx %016llx\n",
168 be64_to_cpu(data->phbErrorStatus),
169 be64_to_cpu(data->phbFirstErrorStatus),
170 be64_to_cpu(data->phbErrorLog0),
171 be64_to_cpu(data->phbErrorLog1));
172 if (data->mmioErrorStatus)
173 pr_info("OutErr: %016llx %016llx %016llx %016llx\n",
174 be64_to_cpu(data->mmioErrorStatus),
175 be64_to_cpu(data->mmioFirstErrorStatus),
176 be64_to_cpu(data->mmioErrorLog0),
177 be64_to_cpu(data->mmioErrorLog1));
178 if (data->dma0ErrorStatus)
179 pr_info("InAErr: %016llx %016llx %016llx %016llx\n",
180 be64_to_cpu(data->dma0ErrorStatus),
181 be64_to_cpu(data->dma0FirstErrorStatus),
182 be64_to_cpu(data->dma0ErrorLog0),
183 be64_to_cpu(data->dma0ErrorLog1));
184 if (data->dma1ErrorStatus)
185 pr_info("InBErr: %016llx %016llx %016llx %016llx\n",
186 be64_to_cpu(data->dma1ErrorStatus),
187 be64_to_cpu(data->dma1FirstErrorStatus),
188 be64_to_cpu(data->dma1ErrorLog0),
189 be64_to_cpu(data->dma1ErrorLog1));
191 for (i = 0; i < OPAL_P7IOC_NUM_PEST_REGS; i++) {
192 if ((data->pestA[i] >> 63) == 0 &&
193 (data->pestB[i] >> 63) == 0)
196 pr_info("PE[%3d] A/B: %016llx %016llx\n",
197 i, be64_to_cpu(data->pestA[i]),
198 be64_to_cpu(data->pestB[i]));
202 static void pnv_pci_dump_phb3_diag_data(struct pci_controller *hose,
203 struct OpalIoPhbErrorCommon *common)
205 struct OpalIoPhb3ErrorData *data;
208 data = (struct OpalIoPhb3ErrorData*)common;
209 pr_info("PHB3 PHB#%d Diag-data (Version: %d)\n",
210 hose->global_number, be32_to_cpu(common->version));
212 pr_info("brdgCtl: %08x\n",
213 be32_to_cpu(data->brdgCtl));
214 if (data->portStatusReg || data->rootCmplxStatus ||
215 data->busAgentStatus)
216 pr_info("UtlSts: %08x %08x %08x\n",
217 be32_to_cpu(data->portStatusReg),
218 be32_to_cpu(data->rootCmplxStatus),
219 be32_to_cpu(data->busAgentStatus));
220 if (data->deviceStatus || data->slotStatus ||
221 data->linkStatus || data->devCmdStatus ||
223 pr_info("RootSts: %08x %08x %08x %08x %08x\n",
224 be32_to_cpu(data->deviceStatus),
225 be32_to_cpu(data->slotStatus),
226 be32_to_cpu(data->linkStatus),
227 be32_to_cpu(data->devCmdStatus),
228 be32_to_cpu(data->devSecStatus));
229 if (data->rootErrorStatus || data->uncorrErrorStatus ||
230 data->corrErrorStatus)
231 pr_info("RootErrSts: %08x %08x %08x\n",
232 be32_to_cpu(data->rootErrorStatus),
233 be32_to_cpu(data->uncorrErrorStatus),
234 be32_to_cpu(data->corrErrorStatus));
235 if (data->tlpHdr1 || data->tlpHdr2 ||
236 data->tlpHdr3 || data->tlpHdr4)
237 pr_info("RootErrLog: %08x %08x %08x %08x\n",
238 be32_to_cpu(data->tlpHdr1),
239 be32_to_cpu(data->tlpHdr2),
240 be32_to_cpu(data->tlpHdr3),
241 be32_to_cpu(data->tlpHdr4));
242 if (data->sourceId || data->errorClass ||
244 pr_info("RootErrLog1: %08x %016llx %016llx\n",
245 be32_to_cpu(data->sourceId),
246 be64_to_cpu(data->errorClass),
247 be64_to_cpu(data->correlator));
249 pr_info("nFir: %016llx %016llx %016llx\n",
250 be64_to_cpu(data->nFir),
251 be64_to_cpu(data->nFirMask),
252 be64_to_cpu(data->nFirWOF));
253 if (data->phbPlssr || data->phbCsr)
254 pr_info("PhbSts: %016llx %016llx\n",
255 be64_to_cpu(data->phbPlssr),
256 be64_to_cpu(data->phbCsr));
258 pr_info("Lem: %016llx %016llx %016llx\n",
259 be64_to_cpu(data->lemFir),
260 be64_to_cpu(data->lemErrorMask),
261 be64_to_cpu(data->lemWOF));
262 if (data->phbErrorStatus)
263 pr_info("PhbErr: %016llx %016llx %016llx %016llx\n",
264 be64_to_cpu(data->phbErrorStatus),
265 be64_to_cpu(data->phbFirstErrorStatus),
266 be64_to_cpu(data->phbErrorLog0),
267 be64_to_cpu(data->phbErrorLog1));
268 if (data->mmioErrorStatus)
269 pr_info("OutErr: %016llx %016llx %016llx %016llx\n",
270 be64_to_cpu(data->mmioErrorStatus),
271 be64_to_cpu(data->mmioFirstErrorStatus),
272 be64_to_cpu(data->mmioErrorLog0),
273 be64_to_cpu(data->mmioErrorLog1));
274 if (data->dma0ErrorStatus)
275 pr_info("InAErr: %016llx %016llx %016llx %016llx\n",
276 be64_to_cpu(data->dma0ErrorStatus),
277 be64_to_cpu(data->dma0FirstErrorStatus),
278 be64_to_cpu(data->dma0ErrorLog0),
279 be64_to_cpu(data->dma0ErrorLog1));
280 if (data->dma1ErrorStatus)
281 pr_info("InBErr: %016llx %016llx %016llx %016llx\n",
282 be64_to_cpu(data->dma1ErrorStatus),
283 be64_to_cpu(data->dma1FirstErrorStatus),
284 be64_to_cpu(data->dma1ErrorLog0),
285 be64_to_cpu(data->dma1ErrorLog1));
287 for (i = 0; i < OPAL_PHB3_NUM_PEST_REGS; i++) {
288 if ((be64_to_cpu(data->pestA[i]) >> 63) == 0 &&
289 (be64_to_cpu(data->pestB[i]) >> 63) == 0)
292 pr_info("PE[%3d] A/B: %016llx %016llx\n",
293 i, be64_to_cpu(data->pestA[i]),
294 be64_to_cpu(data->pestB[i]));
298 void pnv_pci_dump_phb_diag_data(struct pci_controller *hose,
299 unsigned char *log_buff)
301 struct OpalIoPhbErrorCommon *common;
303 if (!hose || !log_buff)
306 common = (struct OpalIoPhbErrorCommon *)log_buff;
307 switch (be32_to_cpu(common->ioType)) {
308 case OPAL_PHB_ERROR_DATA_TYPE_P7IOC:
309 pnv_pci_dump_p7ioc_diag_data(hose, common);
311 case OPAL_PHB_ERROR_DATA_TYPE_PHB3:
312 pnv_pci_dump_phb3_diag_data(hose, common);
315 pr_warn("%s: Unrecognized ioType %d\n",
316 __func__, be32_to_cpu(common->ioType));
320 static void pnv_pci_handle_eeh_config(struct pnv_phb *phb, u32 pe_no)
322 unsigned long flags, rc;
323 int has_diag, ret = 0;
325 spin_lock_irqsave(&phb->lock, flags);
327 /* Fetch PHB diag-data */
328 rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag.blob,
329 PNV_PCI_DIAG_BUF_SIZE);
330 has_diag = (rc == OPAL_SUCCESS);
332 /* If PHB supports compound PE, to handle it */
333 if (phb->unfreeze_pe) {
334 ret = phb->unfreeze_pe(phb,
336 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
338 rc = opal_pci_eeh_freeze_clear(phb->opal_id,
340 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
342 pr_warn("%s: Failure %ld clearing frozen "
344 __func__, rc, phb->hose->global_number,
351 * For now, let's only display the diag buffer when we fail to clear
352 * the EEH status. We'll do more sensible things later when we have
353 * proper EEH support. We need to make sure we don't pollute ourselves
354 * with the normal errors generated when probing empty slots
357 pnv_pci_dump_phb_diag_data(phb->hose, phb->diag.blob);
359 spin_unlock_irqrestore(&phb->lock, flags);
362 static void pnv_pci_config_check_eeh(struct pci_dn *pdn)
364 struct pnv_phb *phb = pdn->phb->private_data;
371 * Get the PE#. During the PCI probe stage, we might not
372 * setup that yet. So all ER errors should be mapped to
375 pe_no = pdn->pe_number;
376 if (pe_no == IODA_INVALID_PE) {
377 pe_no = phb->ioda.reserved_pe_idx;
381 * Fetch frozen state. If the PHB support compound PE,
382 * we need handle that case.
384 if (phb->get_pe_state) {
385 fstate = phb->get_pe_state(phb, pe_no);
387 rc = opal_pci_eeh_freeze_status(phb->opal_id,
393 pr_warn("%s: Failure %lld getting PHB#%x-PE#%x state\n",
394 __func__, rc, phb->hose->global_number, pe_no);
399 pr_devel(" -> EEH check, bdfn=%04x PE#%d fstate=%x\n",
400 (pdn->busno << 8) | (pdn->devfn), pe_no, fstate);
402 /* Clear the frozen state if applicable */
403 if (fstate == OPAL_EEH_STOPPED_MMIO_FREEZE ||
404 fstate == OPAL_EEH_STOPPED_DMA_FREEZE ||
405 fstate == OPAL_EEH_STOPPED_MMIO_DMA_FREEZE) {
407 * If PHB supports compound PE, freeze it for
411 phb->freeze_pe(phb, pe_no);
413 pnv_pci_handle_eeh_config(phb, pe_no);
417 int pnv_pci_cfg_read(struct pci_dn *pdn,
418 int where, int size, u32 *val)
420 struct pnv_phb *phb = pdn->phb->private_data;
421 u32 bdfn = (pdn->busno << 8) | pdn->devfn;
427 rc = opal_pci_config_read_byte(phb->opal_id, bdfn, where, &v8);
428 *val = (rc == OPAL_SUCCESS) ? v8 : 0xff;
433 rc = opal_pci_config_read_half_word(phb->opal_id, bdfn, where,
435 *val = (rc == OPAL_SUCCESS) ? be16_to_cpu(v16) : 0xffff;
440 rc = opal_pci_config_read_word(phb->opal_id, bdfn, where, &v32);
441 *val = (rc == OPAL_SUCCESS) ? be32_to_cpu(v32) : 0xffffffff;
445 return PCIBIOS_FUNC_NOT_SUPPORTED;
448 pr_devel("%s: bus: %x devfn: %x +%x/%x -> %08x\n",
449 __func__, pdn->busno, pdn->devfn, where, size, *val);
450 return PCIBIOS_SUCCESSFUL;
453 int pnv_pci_cfg_write(struct pci_dn *pdn,
454 int where, int size, u32 val)
456 struct pnv_phb *phb = pdn->phb->private_data;
457 u32 bdfn = (pdn->busno << 8) | pdn->devfn;
459 pr_devel("%s: bus: %x devfn: %x +%x/%x -> %08x\n",
460 __func__, pdn->busno, pdn->devfn, where, size, val);
463 opal_pci_config_write_byte(phb->opal_id, bdfn, where, val);
466 opal_pci_config_write_half_word(phb->opal_id, bdfn, where, val);
469 opal_pci_config_write_word(phb->opal_id, bdfn, where, val);
472 return PCIBIOS_FUNC_NOT_SUPPORTED;
475 return PCIBIOS_SUCCESSFUL;
479 static bool pnv_pci_cfg_check(struct pci_dn *pdn)
481 struct eeh_dev *edev = NULL;
482 struct pnv_phb *phb = pdn->phb->private_data;
484 /* EEH not enabled ? */
485 if (!(phb->flags & PNV_PHB_FLAG_EEH))
488 /* PE reset or device removed ? */
492 (edev->pe->state & EEH_PE_CFG_BLOCKED))
495 if (edev->mode & EEH_DEV_REMOVED)
502 static inline pnv_pci_cfg_check(struct pci_dn *pdn)
506 #endif /* CONFIG_EEH */
508 static int pnv_pci_read_config(struct pci_bus *bus,
510 int where, int size, u32 *val)
517 pdn = pci_get_pdn_by_devfn(bus, devfn);
519 return PCIBIOS_DEVICE_NOT_FOUND;
521 if (!pnv_pci_cfg_check(pdn))
522 return PCIBIOS_DEVICE_NOT_FOUND;
524 ret = pnv_pci_cfg_read(pdn, where, size, val);
525 phb = pdn->phb->private_data;
526 if (phb->flags & PNV_PHB_FLAG_EEH && pdn->edev) {
527 if (*val == EEH_IO_ERROR_VALUE(size) &&
528 eeh_dev_check_failure(pdn->edev))
529 return PCIBIOS_DEVICE_NOT_FOUND;
531 pnv_pci_config_check_eeh(pdn);
537 static int pnv_pci_write_config(struct pci_bus *bus,
539 int where, int size, u32 val)
545 pdn = pci_get_pdn_by_devfn(bus, devfn);
547 return PCIBIOS_DEVICE_NOT_FOUND;
549 if (!pnv_pci_cfg_check(pdn))
550 return PCIBIOS_DEVICE_NOT_FOUND;
552 ret = pnv_pci_cfg_write(pdn, where, size, val);
553 phb = pdn->phb->private_data;
554 if (!(phb->flags & PNV_PHB_FLAG_EEH))
555 pnv_pci_config_check_eeh(pdn);
560 struct pci_ops pnv_pci_ops = {
561 .read = pnv_pci_read_config,
562 .write = pnv_pci_write_config,
565 static __be64 *pnv_tce(struct iommu_table *tbl, long idx)
567 __be64 *tmp = ((__be64 *)tbl->it_base);
568 int level = tbl->it_indirect_levels;
569 const long shift = ilog2(tbl->it_level_size);
570 unsigned long mask = (tbl->it_level_size - 1) << (level * shift);
573 int n = (idx & mask) >> (level * shift);
574 unsigned long tce = be64_to_cpu(tmp[n]);
576 tmp = __va(tce & ~(TCE_PCI_READ | TCE_PCI_WRITE));
585 int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
586 unsigned long uaddr, enum dma_data_direction direction,
587 struct dma_attrs *attrs)
589 u64 proto_tce = iommu_direction_to_tce_perm(direction);
590 u64 rpn = __pa(uaddr) >> tbl->it_page_shift;
593 if (proto_tce & TCE_PCI_WRITE)
594 proto_tce |= TCE_PCI_READ;
596 for (i = 0; i < npages; i++) {
597 unsigned long newtce = proto_tce |
598 ((rpn + i) << tbl->it_page_shift);
599 unsigned long idx = index - tbl->it_offset + i;
601 *(pnv_tce(tbl, idx)) = cpu_to_be64(newtce);
607 #ifdef CONFIG_IOMMU_API
608 int pnv_tce_xchg(struct iommu_table *tbl, long index,
609 unsigned long *hpa, enum dma_data_direction *direction)
611 u64 proto_tce = iommu_direction_to_tce_perm(*direction);
612 unsigned long newtce = *hpa | proto_tce, oldtce;
613 unsigned long idx = index - tbl->it_offset;
615 BUG_ON(*hpa & ~IOMMU_PAGE_MASK(tbl));
617 if (newtce & TCE_PCI_WRITE)
618 newtce |= TCE_PCI_READ;
620 oldtce = xchg(pnv_tce(tbl, idx), cpu_to_be64(newtce));
621 *hpa = be64_to_cpu(oldtce) & ~(TCE_PCI_READ | TCE_PCI_WRITE);
622 *direction = iommu_tce_direction(oldtce);
628 void pnv_tce_free(struct iommu_table *tbl, long index, long npages)
632 for (i = 0; i < npages; i++) {
633 unsigned long idx = index - tbl->it_offset + i;
635 *(pnv_tce(tbl, idx)) = cpu_to_be64(0);
639 unsigned long pnv_tce_get(struct iommu_table *tbl, long index)
641 return *(pnv_tce(tbl, index - tbl->it_offset));
644 struct iommu_table *pnv_pci_table_alloc(int nid)
646 struct iommu_table *tbl;
648 tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, nid);
649 INIT_LIST_HEAD_RCU(&tbl->it_group_list);
654 long pnv_pci_link_table_and_group(int node, int num,
655 struct iommu_table *tbl,
656 struct iommu_table_group *table_group)
658 struct iommu_table_group_link *tgl = NULL;
660 if (WARN_ON(!tbl || !table_group))
663 tgl = kzalloc_node(sizeof(struct iommu_table_group_link), GFP_KERNEL,
668 tgl->table_group = table_group;
669 list_add_rcu(&tgl->next, &tbl->it_group_list);
671 table_group->tables[num] = tbl;
676 static void pnv_iommu_table_group_link_free(struct rcu_head *head)
678 struct iommu_table_group_link *tgl = container_of(head,
679 struct iommu_table_group_link, rcu);
684 void pnv_pci_unlink_table_and_group(struct iommu_table *tbl,
685 struct iommu_table_group *table_group)
689 struct iommu_table_group_link *tgl;
691 if (!tbl || !table_group)
694 /* Remove link to a group from table's list of attached groups */
696 list_for_each_entry_rcu(tgl, &tbl->it_group_list, next) {
697 if (tgl->table_group == table_group) {
698 list_del_rcu(&tgl->next);
699 call_rcu(&tgl->rcu, pnv_iommu_table_group_link_free);
707 /* Clean a pointer to iommu_table in iommu_table_group::tables[] */
709 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
710 if (table_group->tables[i] == tbl) {
711 table_group->tables[i] = NULL;
719 void pnv_pci_setup_iommu_table(struct iommu_table *tbl,
720 void *tce_mem, u64 tce_size,
721 u64 dma_offset, unsigned page_shift)
723 tbl->it_blocksize = 16;
724 tbl->it_base = (unsigned long)tce_mem;
725 tbl->it_page_shift = page_shift;
726 tbl->it_offset = dma_offset >> tbl->it_page_shift;
728 tbl->it_size = tce_size >> 3;
730 tbl->it_type = TCE_PCI;
733 void pnv_pci_dma_dev_setup(struct pci_dev *pdev)
735 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
736 struct pnv_phb *phb = hose->private_data;
737 #ifdef CONFIG_PCI_IOV
738 struct pnv_ioda_pe *pe;
741 /* Fix the VF pdn PE number */
742 if (pdev->is_virtfn) {
743 pdn = pci_get_pdn(pdev);
744 WARN_ON(pdn->pe_number != IODA_INVALID_PE);
745 list_for_each_entry(pe, &phb->ioda.pe_list, list) {
746 if (pe->rid == ((pdev->bus->number << 8) |
747 (pdev->devfn & 0xff))) {
748 pdn->pe_number = pe->pe_number;
754 #endif /* CONFIG_PCI_IOV */
756 if (phb && phb->dma_dev_setup)
757 phb->dma_dev_setup(phb, pdev);
760 void pnv_pci_dma_bus_setup(struct pci_bus *bus)
762 struct pci_controller *hose = bus->sysdata;
763 struct pnv_phb *phb = hose->private_data;
764 struct pnv_ioda_pe *pe;
766 list_for_each_entry(pe, &phb->ioda.pe_list, list) {
767 if (!(pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)))
773 if (bus->number == ((pe->rid >> 8) & 0xFF)) {
780 void pnv_pci_shutdown(void)
782 struct pci_controller *hose;
784 list_for_each_entry(hose, &hose_list, list_node)
785 if (hose->controller_ops.shutdown)
786 hose->controller_ops.shutdown(hose);
789 /* Fixup wrong class code in p7ioc and p8 root complex */
790 static void pnv_p7ioc_rc_quirk(struct pci_dev *dev)
792 dev->class = PCI_CLASS_BRIDGE_PCI << 8;
794 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_IBM, 0x3b9, pnv_p7ioc_rc_quirk);
796 void __init pnv_pci_init(void)
798 struct device_node *np;
800 pci_add_flags(PCI_CAN_SKIP_ISA_ALIGN);
802 /* If we don't have OPAL, eg. in sim, just skip PCI probe */
803 if (!firmware_has_feature(FW_FEATURE_OPAL))
806 /* Look for IODA IO-Hubs. */
807 for_each_compatible_node(np, NULL, "ibm,ioda-hub") {
808 pnv_pci_init_ioda_hub(np);
811 /* Look for ioda2 built-in PHB3's */
812 for_each_compatible_node(np, NULL, "ibm,ioda2-phb")
813 pnv_pci_init_ioda2_phb(np);
815 /* Look for NPU PHBs */
816 for_each_compatible_node(np, NULL, "ibm,ioda2-npu-phb")
817 pnv_pci_init_npu_phb(np);
819 /* Setup the linkage between OF nodes and PHBs */
822 /* Configure IOMMU DMA hooks */
823 set_pci_dma_ops(&dma_iommu_ops);
826 machine_subsys_initcall_sync(powernv, tce_iommu_bus_notifier_init);