1 // SPDX-License-Identifier: GPL-2.0
3 * PCIe host controller driver for Texas Instruments Keystone SoCs
5 * Copyright (C) 2013-2014 Texas Instruments., Ltd.
8 * Author: Murali Karicheri <m-karicheri2@ti.com>
9 * Implementation based on pci-exynos.c and pcie-designware.c
12 #include <linux/irqchip/chained_irq.h>
13 #include <linux/clk.h>
14 #include <linux/delay.h>
15 #include <linux/interrupt.h>
16 #include <linux/irqdomain.h>
17 #include <linux/init.h>
18 #include <linux/mfd/syscon.h>
19 #include <linux/msi.h>
20 #include <linux/of_irq.h>
22 #include <linux/of_pci.h>
23 #include <linux/platform_device.h>
24 #include <linux/phy/phy.h>
25 #include <linux/regmap.h>
26 #include <linux/resource.h>
27 #include <linux/signal.h>
29 #include "pcie-designware.h"
31 #define DRIVER_NAME "keystone-pcie"
33 #define PCIE_VENDORID_MASK 0xffff
34 #define PCIE_DEVICEID_SHIFT 16
37 #define PCIE_CAP_BASE 0x70
39 /* Application register defines */
40 #define LTSSM_EN_VAL BIT(0)
41 #define LTSSM_STATE_MASK 0x1f
42 #define LTSSM_STATE_L0 0x11
43 #define DBI_CS2_EN_VAL 0x20
44 #define OB_XLAT_EN_VAL 2
46 /* Application registers */
47 #define CMD_STATUS 0x004
49 #define CFG_SETUP 0x008
50 #define CFG_BUS(x) (((x) & 0xff) << 16)
51 #define CFG_DEVICE(x) (((x) & 0x1f) << 8)
52 #define CFG_FUNC(x) ((x) & 0x7)
53 #define CFG_TYPE1 BIT(24)
56 #define CFG_PCIM_WIN_SZ_IDX 3
57 #define SPACE0_REMOTE_CFG_OFFSET 0x1000
58 #define OB_OFFSET_INDEX(n) (0x200 + (8 * (n)))
59 #define OB_OFFSET_HI(n) (0x204 + (8 * (n)))
61 /* IRQ register defines */
63 #define IRQ_STATUS 0x184
64 #define IRQ_ENABLE_SET 0x188
65 #define IRQ_ENABLE_CLR 0x18c
68 #define MSI0_IRQ_STATUS 0x104
69 #define MSI0_IRQ_ENABLE_SET 0x108
70 #define MSI0_IRQ_ENABLE_CLR 0x10c
71 #define IRQ_STATUS 0x184
72 #define MSI_IRQ_OFFSET 4
75 #define ERR_AER BIT(5) /* ECRC error */
76 #define ERR_AXI BIT(4) /* AXI tag lookup fatal error */
77 #define ERR_CORR BIT(3) /* Correctable error */
78 #define ERR_NONFATAL BIT(2) /* Non-fatal error */
79 #define ERR_FATAL BIT(1) /* Fatal error */
80 #define ERR_SYS BIT(0) /* System (fatal, non-fatal, or correctable) */
81 #define ERR_IRQ_ALL (ERR_AER | ERR_AXI | ERR_CORR | \
82 ERR_NONFATAL | ERR_FATAL | ERR_SYS)
83 #define ERR_FATAL_IRQ (ERR_FATAL | ERR_AXI)
84 #define ERR_IRQ_STATUS_RAW 0x1c0
85 #define ERR_IRQ_STATUS 0x1c4
86 #define ERR_IRQ_ENABLE_SET 0x1c8
87 #define ERR_IRQ_ENABLE_CLR 0x1cc
89 /* Config space registers */
92 #define MAX_MSI_HOST_IRQS 8
94 /* PCIE controller device IDs */
95 #define PCIE_RC_K2HK 0xb008
96 #define PCIE_RC_K2E 0xb009
97 #define PCIE_RC_K2L 0xb00a
98 #define PCIE_RC_K2G 0xb00b
100 #define to_keystone_pcie(x) dev_get_drvdata((x)->dev)
102 struct keystone_pcie {
106 int num_legacy_host_irqs;
107 int legacy_host_irqs[PCI_NUM_INTX];
108 struct device_node *legacy_intc_np;
110 int num_msi_host_irqs;
111 int msi_host_irqs[MAX_MSI_HOST_IRQS];
115 struct device_link **link;
116 struct device_node *msi_intc_np;
117 struct irq_domain *legacy_irq_domain;
118 struct device_node *np;
122 /* Application register space */
123 void __iomem *va_app_base; /* DT 1st resource */
127 static inline void update_reg_offset_bit_pos(u32 offset, u32 *reg_offset,
130 *reg_offset = offset % 8;
131 *bit_pos = offset >> 3;
134 static phys_addr_t ks_pcie_get_msi_addr(struct pcie_port *pp)
136 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
137 struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
139 return ks_pcie->app.start + MSI_IRQ;
142 static u32 ks_pcie_app_readl(struct keystone_pcie *ks_pcie, u32 offset)
144 return readl(ks_pcie->va_app_base + offset);
147 static void ks_pcie_app_writel(struct keystone_pcie *ks_pcie, u32 offset,
150 writel(val, ks_pcie->va_app_base + offset);
153 static void ks_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset)
155 struct dw_pcie *pci = ks_pcie->pci;
156 struct pcie_port *pp = &pci->pp;
157 struct device *dev = pci->dev;
161 pending = ks_pcie_app_readl(ks_pcie, MSI0_IRQ_STATUS + (offset << 4));
164 * MSI0 status bit 0-3 shows vectors 0, 8, 16, 24, MSI1 status bit
165 * shows 1, 9, 17, 25 and so forth
167 for (src = 0; src < 4; src++) {
168 if (BIT(src) & pending) {
169 vector = offset + (src << 3);
170 virq = irq_linear_revmap(pp->irq_domain, vector);
171 dev_dbg(dev, "irq: bit %d, vector %d, virq %d\n",
173 generic_handle_irq(virq);
178 static void ks_pcie_msi_irq_ack(int irq, struct pcie_port *pp)
180 u32 reg_offset, bit_pos;
181 struct keystone_pcie *ks_pcie;
184 pci = to_dw_pcie_from_pp(pp);
185 ks_pcie = to_keystone_pcie(pci);
186 update_reg_offset_bit_pos(irq, ®_offset, &bit_pos);
188 ks_pcie_app_writel(ks_pcie, MSI0_IRQ_STATUS + (reg_offset << 4),
190 ks_pcie_app_writel(ks_pcie, IRQ_EOI, reg_offset + MSI_IRQ_OFFSET);
193 static void ks_pcie_msi_set_irq(struct pcie_port *pp, int irq)
195 u32 reg_offset, bit_pos;
196 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
197 struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
199 update_reg_offset_bit_pos(irq, ®_offset, &bit_pos);
200 ks_pcie_app_writel(ks_pcie, MSI0_IRQ_ENABLE_SET + (reg_offset << 4),
204 static void ks_pcie_msi_clear_irq(struct pcie_port *pp, int irq)
206 u32 reg_offset, bit_pos;
207 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
208 struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
210 update_reg_offset_bit_pos(irq, ®_offset, &bit_pos);
211 ks_pcie_app_writel(ks_pcie, MSI0_IRQ_ENABLE_CLR + (reg_offset << 4),
215 static int ks_pcie_msi_host_init(struct pcie_port *pp)
217 return dw_pcie_allocate_domains(pp);
220 static void ks_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie)
224 for (i = 0; i < PCI_NUM_INTX; i++)
225 ks_pcie_app_writel(ks_pcie, IRQ_ENABLE_SET + (i << 4), 0x1);
228 static void ks_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie,
231 struct dw_pcie *pci = ks_pcie->pci;
232 struct device *dev = pci->dev;
236 pending = ks_pcie_app_readl(ks_pcie, IRQ_STATUS + (offset << 4));
238 if (BIT(0) & pending) {
239 virq = irq_linear_revmap(ks_pcie->legacy_irq_domain, offset);
240 dev_dbg(dev, ": irq: irq_offset %d, virq %d\n", offset, virq);
241 generic_handle_irq(virq);
244 /* EOI the INTx interrupt */
245 ks_pcie_app_writel(ks_pcie, IRQ_EOI, offset);
248 static void ks_pcie_enable_error_irq(struct keystone_pcie *ks_pcie)
250 ks_pcie_app_writel(ks_pcie, ERR_IRQ_ENABLE_SET, ERR_IRQ_ALL);
253 static irqreturn_t ks_pcie_handle_error_irq(struct keystone_pcie *ks_pcie)
257 status = ks_pcie_app_readl(ks_pcie, ERR_IRQ_STATUS_RAW) & ERR_IRQ_ALL;
261 if (status & ERR_FATAL_IRQ)
262 dev_err(ks_pcie->pci->dev, "fatal error (status %#010x)\n",
265 /* Ack the IRQ; status bits are RW1C */
266 ks_pcie_app_writel(ks_pcie, ERR_IRQ_STATUS, status);
270 static void ks_pcie_ack_legacy_irq(struct irq_data *d)
274 static void ks_pcie_mask_legacy_irq(struct irq_data *d)
278 static void ks_pcie_unmask_legacy_irq(struct irq_data *d)
282 static struct irq_chip ks_pcie_legacy_irq_chip = {
283 .name = "Keystone-PCI-Legacy-IRQ",
284 .irq_ack = ks_pcie_ack_legacy_irq,
285 .irq_mask = ks_pcie_mask_legacy_irq,
286 .irq_unmask = ks_pcie_unmask_legacy_irq,
289 static int ks_pcie_init_legacy_irq_map(struct irq_domain *d,
291 irq_hw_number_t hw_irq)
293 irq_set_chip_and_handler(irq, &ks_pcie_legacy_irq_chip,
295 irq_set_chip_data(irq, d->host_data);
300 static const struct irq_domain_ops ks_pcie_legacy_irq_domain_ops = {
301 .map = ks_pcie_init_legacy_irq_map,
302 .xlate = irq_domain_xlate_onetwocell,
306 * ks_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask
309 * Since modification of dbi_cs2 involves different clock domain, read the
310 * status back to ensure the transition is complete.
312 static void ks_pcie_set_dbi_mode(struct keystone_pcie *ks_pcie)
316 val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
317 ks_pcie_app_writel(ks_pcie, CMD_STATUS, DBI_CS2_EN_VAL | val);
320 val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
321 } while (!(val & DBI_CS2_EN_VAL));
325 * ks_pcie_clear_dbi_mode() - Disable DBI mode
327 * Since modification of dbi_cs2 involves different clock domain, read the
328 * status back to ensure the transition is complete.
330 static void ks_pcie_clear_dbi_mode(struct keystone_pcie *ks_pcie)
334 val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
335 ks_pcie_app_writel(ks_pcie, CMD_STATUS, ~DBI_CS2_EN_VAL & val);
338 val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
339 } while (val & DBI_CS2_EN_VAL);
342 static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
344 u32 num_viewport = ks_pcie->num_viewport;
345 struct dw_pcie *pci = ks_pcie->pci;
346 struct pcie_port *pp = &pci->pp;
347 u32 start = pp->mem->start, end = pp->mem->end;
351 /* Disable BARs for inbound access */
352 ks_pcie_set_dbi_mode(ks_pcie);
353 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
354 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0);
355 ks_pcie_clear_dbi_mode(ks_pcie);
357 /* Set outbound translation size per window division */
358 ks_pcie_app_writel(ks_pcie, OB_SIZE, CFG_PCIM_WIN_SZ_IDX & 0x7);
360 tr_size = (1 << (CFG_PCIM_WIN_SZ_IDX & 0x7)) * SZ_1M;
362 /* Using Direct 1:1 mapping of RC <-> PCI memory space */
363 for (i = 0; (i < num_viewport) && (start < end); i++) {
364 ks_pcie_app_writel(ks_pcie, OB_OFFSET_INDEX(i), start | 1);
365 ks_pcie_app_writel(ks_pcie, OB_OFFSET_HI(i), 0);
369 /* Enable OB translation */
370 val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
371 ks_pcie_app_writel(ks_pcie, CMD_STATUS, OB_XLAT_EN_VAL | val);
374 static int ks_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
375 unsigned int devfn, int where, int size,
378 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
379 struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
382 reg = CFG_BUS(bus->number) | CFG_DEVICE(PCI_SLOT(devfn)) |
383 CFG_FUNC(PCI_FUNC(devfn));
384 if (bus->parent->number != pp->root_bus_nr)
386 ks_pcie_app_writel(ks_pcie, CFG_SETUP, reg);
388 return dw_pcie_read(pp->va_cfg0_base + where, size, val);
391 static int ks_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
392 unsigned int devfn, int where, int size,
395 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
396 struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
399 reg = CFG_BUS(bus->number) | CFG_DEVICE(PCI_SLOT(devfn)) |
400 CFG_FUNC(PCI_FUNC(devfn));
401 if (bus->parent->number != pp->root_bus_nr)
403 ks_pcie_app_writel(ks_pcie, CFG_SETUP, reg);
405 return dw_pcie_write(pp->va_cfg0_base + where, size, val);
409 * ks_pcie_v3_65_scan_bus() - keystone scan_bus post initialization
411 * This sets BAR0 to enable inbound access for MSI_IRQ register
413 static void ks_pcie_v3_65_scan_bus(struct pcie_port *pp)
415 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
416 struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
418 /* Configure and set up BAR0 */
419 ks_pcie_set_dbi_mode(ks_pcie);
422 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 1);
423 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, SZ_4K - 1);
425 ks_pcie_clear_dbi_mode(ks_pcie);
428 * For BAR0, just setting bus address for inbound writes (MSI) should
429 * be sufficient. Use physical address to avoid any conflicts.
431 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, ks_pcie->app.start);
435 * ks_pcie_link_up() - Check if link up
437 static int ks_pcie_link_up(struct dw_pcie *pci)
441 val = dw_pcie_readl_dbi(pci, DEBUG0);
442 return (val & LTSSM_STATE_MASK) == LTSSM_STATE_L0;
445 static void ks_pcie_initiate_link_train(struct keystone_pcie *ks_pcie)
449 /* Disable Link training */
450 val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
451 val &= ~LTSSM_EN_VAL;
452 ks_pcie_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val);
454 /* Initiate Link Training */
455 val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
456 ks_pcie_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val);
460 * ks_pcie_dw_host_init() - initialize host for v3_65 dw hardware
462 * Ioremap the register resources, initialize legacy irq domain
463 * and call dw_pcie_v3_65_host_init() API to initialize the Keystone
464 * PCI host controller.
466 static int __init ks_pcie_dw_host_init(struct keystone_pcie *ks_pcie)
468 struct dw_pcie *pci = ks_pcie->pci;
469 struct pcie_port *pp = &pci->pp;
470 struct device *dev = pci->dev;
471 struct platform_device *pdev = to_platform_device(dev);
472 struct resource *res;
474 /* Index 0 is the config reg. space address */
475 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
476 pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
477 if (IS_ERR(pci->dbi_base))
478 return PTR_ERR(pci->dbi_base);
481 * We set these same and is used in pcie rd/wr_other_conf
484 pp->va_cfg0_base = pci->dbi_base + SPACE0_REMOTE_CFG_OFFSET;
485 pp->va_cfg1_base = pp->va_cfg0_base;
487 /* Index 1 is the application reg. space address */
488 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
489 ks_pcie->va_app_base = devm_ioremap_resource(dev, res);
490 if (IS_ERR(ks_pcie->va_app_base))
491 return PTR_ERR(ks_pcie->va_app_base);
495 /* Create legacy IRQ domain */
496 ks_pcie->legacy_irq_domain =
497 irq_domain_add_linear(ks_pcie->legacy_intc_np,
499 &ks_pcie_legacy_irq_domain_ops,
501 if (!ks_pcie->legacy_irq_domain) {
502 dev_err(dev, "Failed to add irq domain for legacy irqs\n");
506 return dw_pcie_host_init(pp);
509 static void ks_pcie_quirk(struct pci_dev *dev)
511 struct pci_bus *bus = dev->bus;
512 struct pci_dev *bridge;
513 static const struct pci_device_id rc_pci_devids[] = {
514 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2HK),
515 .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
516 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2E),
517 .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
518 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2L),
519 .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
520 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2G),
521 .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
525 if (pci_is_root_bus(bus))
528 /* look for the host bridge */
529 while (!pci_is_root_bus(bus)) {
538 * Keystone PCI controller has a h/w limitation of
539 * 256 bytes maximum read request size. It can't handle
540 * anything higher than this. So force this limit on
541 * all downstream devices.
543 if (pci_match_id(rc_pci_devids, bridge)) {
544 if (pcie_get_readrq(dev) > 256) {
545 dev_info(&dev->dev, "limiting MRRS to 256\n");
546 pcie_set_readrq(dev, 256);
550 DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, ks_pcie_quirk);
552 static int ks_pcie_establish_link(struct keystone_pcie *ks_pcie)
554 struct dw_pcie *pci = ks_pcie->pci;
555 struct device *dev = pci->dev;
557 if (dw_pcie_link_up(pci)) {
558 dev_info(dev, "Link already up\n");
562 ks_pcie_initiate_link_train(ks_pcie);
564 /* check if the link is up or not */
565 if (!dw_pcie_wait_for_link(pci))
568 dev_err(dev, "phy link never came up\n");
572 static void ks_pcie_msi_irq_handler(struct irq_desc *desc)
574 unsigned int irq = irq_desc_get_irq(desc);
575 struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
576 u32 offset = irq - ks_pcie->msi_host_irqs[0];
577 struct dw_pcie *pci = ks_pcie->pci;
578 struct device *dev = pci->dev;
579 struct irq_chip *chip = irq_desc_get_chip(desc);
581 dev_dbg(dev, "%s, irq %d\n", __func__, irq);
584 * The chained irq handler installation would have replaced normal
585 * interrupt driver handler so we need to take care of mask/unmask and
588 chained_irq_enter(chip, desc);
589 ks_pcie_handle_msi_irq(ks_pcie, offset);
590 chained_irq_exit(chip, desc);
594 * ks_pcie_legacy_irq_handler() - Handle legacy interrupt
595 * @irq: IRQ line for legacy interrupts
596 * @desc: Pointer to irq descriptor
598 * Traverse through pending legacy interrupts and invoke handler for each. Also
599 * takes care of interrupt controller level mask/ack operation.
601 static void ks_pcie_legacy_irq_handler(struct irq_desc *desc)
603 unsigned int irq = irq_desc_get_irq(desc);
604 struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
605 struct dw_pcie *pci = ks_pcie->pci;
606 struct device *dev = pci->dev;
607 u32 irq_offset = irq - ks_pcie->legacy_host_irqs[0];
608 struct irq_chip *chip = irq_desc_get_chip(desc);
610 dev_dbg(dev, ": Handling legacy irq %d\n", irq);
613 * The chained irq handler installation would have replaced normal
614 * interrupt driver handler so we need to take care of mask/unmask and
617 chained_irq_enter(chip, desc);
618 ks_pcie_handle_legacy_irq(ks_pcie, irq_offset);
619 chained_irq_exit(chip, desc);
622 static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie,
623 char *controller, int *num_irqs)
625 int temp, max_host_irqs, legacy = 1, *host_irqs;
626 struct device *dev = ks_pcie->pci->dev;
627 struct device_node *np_pcie = dev->of_node, **np_temp;
629 if (!strcmp(controller, "msi-interrupt-controller"))
633 np_temp = &ks_pcie->legacy_intc_np;
634 max_host_irqs = PCI_NUM_INTX;
635 host_irqs = &ks_pcie->legacy_host_irqs[0];
637 np_temp = &ks_pcie->msi_intc_np;
638 max_host_irqs = MAX_MSI_HOST_IRQS;
639 host_irqs = &ks_pcie->msi_host_irqs[0];
642 /* interrupt controller is in a child node */
643 *np_temp = of_get_child_by_name(np_pcie, controller);
645 dev_err(dev, "Node for %s is absent\n", controller);
649 temp = of_irq_count(*np_temp);
651 dev_err(dev, "No IRQ entries in %s\n", controller);
652 of_node_put(*np_temp);
656 if (temp > max_host_irqs)
657 dev_warn(dev, "Too many %s interrupts defined %u\n",
658 (legacy ? "legacy" : "MSI"), temp);
661 * support upto max_host_irqs. In dt from index 0 to 3 (legacy) or 0 to
664 for (temp = 0; temp < max_host_irqs; temp++) {
665 host_irqs[temp] = irq_of_parse_and_map(*np_temp, temp);
666 if (!host_irqs[temp])
670 of_node_put(*np_temp);
680 static void ks_pcie_setup_interrupts(struct keystone_pcie *ks_pcie)
685 for (i = 0; i < ks_pcie->num_legacy_host_irqs; i++) {
686 irq_set_chained_handler_and_data(ks_pcie->legacy_host_irqs[i],
687 ks_pcie_legacy_irq_handler,
690 ks_pcie_enable_legacy_irqs(ks_pcie);
693 if (IS_ENABLED(CONFIG_PCI_MSI)) {
694 for (i = 0; i < ks_pcie->num_msi_host_irqs; i++) {
695 irq_set_chained_handler_and_data(ks_pcie->msi_host_irqs[i],
696 ks_pcie_msi_irq_handler,
701 if (ks_pcie->error_irq > 0)
702 ks_pcie_enable_error_irq(ks_pcie);
706 * When a PCI device does not exist during config cycles, keystone host gets a
707 * bus error instead of returning 0xffffffff. This handler always returns 0
708 * for this kind of faults.
710 static int ks_pcie_fault(unsigned long addr, unsigned int fsr,
711 struct pt_regs *regs)
713 unsigned long instr = *(unsigned long *) instruction_pointer(regs);
715 if ((instr & 0x0e100090) == 0x00100090) {
716 int reg = (instr >> 12) & 15;
718 regs->uregs[reg] = -1;
725 static int __init ks_pcie_init_id(struct keystone_pcie *ks_pcie)
729 struct regmap *devctrl_regs;
730 struct dw_pcie *pci = ks_pcie->pci;
731 struct device *dev = pci->dev;
732 struct device_node *np = dev->of_node;
734 devctrl_regs = syscon_regmap_lookup_by_phandle(np, "ti,syscon-pcie-id");
735 if (IS_ERR(devctrl_regs))
736 return PTR_ERR(devctrl_regs);
738 ret = regmap_read(devctrl_regs, 0, &id);
742 dw_pcie_writew_dbi(pci, PCI_VENDOR_ID, id & PCIE_VENDORID_MASK);
743 dw_pcie_writew_dbi(pci, PCI_DEVICE_ID, id >> PCIE_DEVICEID_SHIFT);
748 static int __init ks_pcie_host_init(struct pcie_port *pp)
750 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
751 struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
754 dw_pcie_setup_rc(pp);
756 ks_pcie_establish_link(ks_pcie);
757 ks_pcie_setup_rc_app_regs(ks_pcie);
758 ks_pcie_setup_interrupts(ks_pcie);
759 writew(PCI_IO_RANGE_TYPE_32 | (PCI_IO_RANGE_TYPE_32 << 8),
760 pci->dbi_base + PCI_IO_BASE);
762 ret = ks_pcie_init_id(ks_pcie);
767 * PCIe access errors that result into OCP errors are caught by ARM as
770 hook_fault_code(17, ks_pcie_fault, SIGBUS, 0,
771 "Asynchronous external abort");
776 static const struct dw_pcie_host_ops ks_pcie_host_ops = {
777 .rd_other_conf = ks_pcie_rd_other_conf,
778 .wr_other_conf = ks_pcie_wr_other_conf,
779 .host_init = ks_pcie_host_init,
780 .msi_set_irq = ks_pcie_msi_set_irq,
781 .msi_clear_irq = ks_pcie_msi_clear_irq,
782 .get_msi_addr = ks_pcie_get_msi_addr,
783 .msi_host_init = ks_pcie_msi_host_init,
784 .msi_irq_ack = ks_pcie_msi_irq_ack,
785 .scan_bus = ks_pcie_v3_65_scan_bus,
788 static irqreturn_t ks_pcie_err_irq_handler(int irq, void *priv)
790 struct keystone_pcie *ks_pcie = priv;
792 return ks_pcie_handle_error_irq(ks_pcie);
795 static int __init ks_pcie_add_pcie_port(struct keystone_pcie *ks_pcie,
796 struct platform_device *pdev)
798 struct dw_pcie *pci = ks_pcie->pci;
799 struct pcie_port *pp = &pci->pp;
800 struct device *dev = &pdev->dev;
803 ret = ks_pcie_get_irq_controller_info(ks_pcie,
804 "legacy-interrupt-controller",
805 &ks_pcie->num_legacy_host_irqs);
809 if (IS_ENABLED(CONFIG_PCI_MSI)) {
810 ret = ks_pcie_get_irq_controller_info(ks_pcie,
811 "msi-interrupt-controller",
812 &ks_pcie->num_msi_host_irqs);
818 * Index 0 is the platform interrupt for error interrupt
819 * from RC. This is optional.
821 ks_pcie->error_irq = irq_of_parse_and_map(ks_pcie->np, 0);
822 if (ks_pcie->error_irq <= 0)
823 dev_info(dev, "no error IRQ defined\n");
825 ret = request_irq(ks_pcie->error_irq, ks_pcie_err_irq_handler,
826 IRQF_SHARED, "pcie-error-irq", ks_pcie);
828 dev_err(dev, "failed to request error IRQ %d\n",
834 pp->ops = &ks_pcie_host_ops;
835 ret = ks_pcie_dw_host_init(ks_pcie);
837 dev_err(dev, "failed to initialize host\n");
844 static const struct of_device_id ks_pcie_of_match[] = {
847 .compatible = "ti,keystone-pcie",
852 static const struct dw_pcie_ops ks_pcie_dw_pcie_ops = {
853 .link_up = ks_pcie_link_up,
856 static void ks_pcie_disable_phy(struct keystone_pcie *ks_pcie)
858 int num_lanes = ks_pcie->num_lanes;
860 while (num_lanes--) {
861 phy_power_off(ks_pcie->phy[num_lanes]);
862 phy_exit(ks_pcie->phy[num_lanes]);
866 static int ks_pcie_enable_phy(struct keystone_pcie *ks_pcie)
870 int num_lanes = ks_pcie->num_lanes;
872 for (i = 0; i < num_lanes; i++) {
873 ret = phy_init(ks_pcie->phy[i]);
877 ret = phy_power_on(ks_pcie->phy[i]);
879 phy_exit(ks_pcie->phy[i]);
888 phy_power_off(ks_pcie->phy[i]);
889 phy_exit(ks_pcie->phy[i]);
895 static int __init ks_pcie_probe(struct platform_device *pdev)
897 struct device *dev = &pdev->dev;
898 struct device_node *np = dev->of_node;
900 struct keystone_pcie *ks_pcie;
901 struct device_link **link;
909 ks_pcie = devm_kzalloc(dev, sizeof(*ks_pcie), GFP_KERNEL);
913 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
918 pci->ops = &ks_pcie_dw_pcie_ops;
920 ret = of_property_read_u32(np, "num-viewport", &num_viewport);
922 dev_err(dev, "unable to read *num-viewport* property\n");
926 ret = of_property_read_u32(np, "num-lanes", &num_lanes);
930 phy = devm_kzalloc(dev, sizeof(*phy) * num_lanes, GFP_KERNEL);
934 link = devm_kzalloc(dev, sizeof(*link) * num_lanes, GFP_KERNEL);
938 for (i = 0; i < num_lanes; i++) {
939 snprintf(name, sizeof(name), "pcie-phy%d", i);
940 phy[i] = devm_phy_optional_get(dev, name);
941 if (IS_ERR(phy[i])) {
942 ret = PTR_ERR(phy[i]);
949 link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS);
958 ks_pcie->link = link;
959 ks_pcie->num_lanes = num_lanes;
960 ks_pcie->num_viewport = num_viewport;
963 ret = ks_pcie_enable_phy(ks_pcie);
965 dev_err(dev, "failed to enable phy\n");
969 platform_set_drvdata(pdev, ks_pcie);
970 pm_runtime_enable(dev);
971 ret = pm_runtime_get_sync(dev);
973 dev_err(dev, "pm_runtime_get_sync failed\n");
977 ret = ks_pcie_add_pcie_port(ks_pcie, pdev);
985 pm_runtime_disable(dev);
986 ks_pcie_disable_phy(ks_pcie);
989 while (--i >= 0 && link[i])
990 device_link_del(link[i]);
995 static int __exit ks_pcie_remove(struct platform_device *pdev)
997 struct keystone_pcie *ks_pcie = platform_get_drvdata(pdev);
998 struct device_link **link = ks_pcie->link;
999 int num_lanes = ks_pcie->num_lanes;
1000 struct device *dev = &pdev->dev;
1002 pm_runtime_put(dev);
1003 pm_runtime_disable(dev);
1004 ks_pcie_disable_phy(ks_pcie);
1006 device_link_del(link[num_lanes]);
1011 static struct platform_driver ks_pcie_driver __refdata = {
1012 .probe = ks_pcie_probe,
1013 .remove = __exit_p(ks_pcie_remove),
1015 .name = "keystone-pcie",
1016 .of_match_table = of_match_ptr(ks_pcie_of_match),
1019 builtin_platform_driver(ks_pcie_driver);