2 * Copyright (c) 2015-2016 Quantenna Communications, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/firmware.h>
20 #include <linux/pci.h>
21 #include <linux/vmalloc.h>
22 #include <linux/delay.h>
23 #include <linux/interrupt.h>
24 #include <linux/sched.h>
25 #include <linux/completion.h>
26 #include <linux/crc32.h>
27 #include <linux/spinlock.h>
29 #include "qtn_hw_ids.h"
30 #include "pcie_bus_priv.h"
35 static bool use_msi = true;
36 module_param(use_msi, bool, 0644);
37 MODULE_PARM_DESC(use_msi, "set 0 to use legacy interrupt");
39 static unsigned int tx_bd_size_param = 256;
40 module_param(tx_bd_size_param, uint, 0644);
41 MODULE_PARM_DESC(tx_bd_size_param, "Tx descriptors queue size");
43 static unsigned int rx_bd_size_param = 256;
44 module_param(rx_bd_size_param, uint, 0644);
45 MODULE_PARM_DESC(rx_bd_size_param, "Rx descriptors queue size");
47 static unsigned int rx_bd_reserved_param = 16;
48 module_param(rx_bd_reserved_param, uint, 0644);
49 MODULE_PARM_DESC(rx_bd_reserved_param, "Reserved RX descriptors");
51 static u8 flashboot = 1;
52 module_param(flashboot, byte, 0644);
53 MODULE_PARM_DESC(flashboot, "set to 0 to use FW binary file on FS");
55 #define DRV_NAME "qtnfmac_pearl_pcie"
57 static inline void qtnf_non_posted_write(u32 val, void __iomem *basereg)
61 /* flush posted write */
65 static inline void qtnf_init_hdp_irqs(struct qtnf_pcie_bus_priv *priv)
69 spin_lock_irqsave(&priv->irq_lock, flags);
70 priv->pcie_irq_mask = (PCIE_HDP_INT_RX_BITS | PCIE_HDP_INT_TX_BITS);
71 spin_unlock_irqrestore(&priv->irq_lock, flags);
74 static inline void qtnf_enable_hdp_irqs(struct qtnf_pcie_bus_priv *priv)
78 spin_lock_irqsave(&priv->irq_lock, flags);
79 writel(priv->pcie_irq_mask, PCIE_HDP_INT_EN(priv->pcie_reg_base));
80 spin_unlock_irqrestore(&priv->irq_lock, flags);
83 static inline void qtnf_disable_hdp_irqs(struct qtnf_pcie_bus_priv *priv)
87 spin_lock_irqsave(&priv->irq_lock, flags);
88 writel(0x0, PCIE_HDP_INT_EN(priv->pcie_reg_base));
89 spin_unlock_irqrestore(&priv->irq_lock, flags);
92 static inline void qtnf_en_rxdone_irq(struct qtnf_pcie_bus_priv *priv)
96 spin_lock_irqsave(&priv->irq_lock, flags);
97 priv->pcie_irq_mask |= PCIE_HDP_INT_RX_BITS;
98 writel(priv->pcie_irq_mask, PCIE_HDP_INT_EN(priv->pcie_reg_base));
99 spin_unlock_irqrestore(&priv->irq_lock, flags);
102 static inline void qtnf_dis_rxdone_irq(struct qtnf_pcie_bus_priv *priv)
106 spin_lock_irqsave(&priv->irq_lock, flags);
107 priv->pcie_irq_mask &= ~PCIE_HDP_INT_RX_BITS;
108 writel(priv->pcie_irq_mask, PCIE_HDP_INT_EN(priv->pcie_reg_base));
109 spin_unlock_irqrestore(&priv->irq_lock, flags);
112 static inline void qtnf_en_txdone_irq(struct qtnf_pcie_bus_priv *priv)
116 spin_lock_irqsave(&priv->irq_lock, flags);
117 priv->pcie_irq_mask |= PCIE_HDP_INT_TX_BITS;
118 writel(priv->pcie_irq_mask, PCIE_HDP_INT_EN(priv->pcie_reg_base));
119 spin_unlock_irqrestore(&priv->irq_lock, flags);
122 static inline void qtnf_dis_txdone_irq(struct qtnf_pcie_bus_priv *priv)
126 spin_lock_irqsave(&priv->irq_lock, flags);
127 priv->pcie_irq_mask &= ~PCIE_HDP_INT_TX_BITS;
128 writel(priv->pcie_irq_mask, PCIE_HDP_INT_EN(priv->pcie_reg_base));
129 spin_unlock_irqrestore(&priv->irq_lock, flags);
132 static int qtnf_pcie_init_irq(struct qtnf_pcie_bus_priv *priv)
134 struct pci_dev *pdev = priv->pdev;
136 /* fall back to legacy INTx interrupts by default */
137 priv->msi_enabled = 0;
139 /* check if MSI capability is available */
141 if (!pci_enable_msi(pdev)) {
142 pr_debug("MSI interrupt enabled\n");
143 priv->msi_enabled = 1;
145 pr_warn("failed to enable MSI interrupts");
149 if (!priv->msi_enabled) {
150 pr_warn("legacy PCIE interrupts enabled\n");
157 static void qtnf_deassert_intx(struct qtnf_pcie_bus_priv *priv)
159 void __iomem *reg = priv->sysctl_bar + PEARL_PCIE_CFG0_OFFSET;
163 cfg &= ~PEARL_ASSERT_INTX;
164 qtnf_non_posted_write(cfg, reg);
167 static void qtnf_ipc_gen_ep_int(void *arg)
169 const struct qtnf_pcie_bus_priv *priv = arg;
170 const u32 data = QTN_PEARL_IPC_IRQ_WORD(QTN_PEARL_LHOST_IPC_IRQ);
171 void __iomem *reg = priv->sysctl_bar +
172 QTN_PEARL_SYSCTL_LHOST_IRQ_OFFSET;
174 qtnf_non_posted_write(data, reg);
177 static void __iomem *qtnf_map_bar(struct qtnf_pcie_bus_priv *priv, u8 index)
184 ret = pcim_iomap_regions(priv->pdev, 1 << index, DRV_NAME);
186 return IOMEM_ERR_PTR(ret);
188 busaddr = pci_resource_start(priv->pdev, index);
189 vaddr = pcim_iomap_table(priv->pdev)[index];
190 len = pci_resource_len(priv->pdev, index);
192 pr_debug("BAR%u vaddr=0x%p busaddr=%pad len=%u\n",
193 index, vaddr, &busaddr, (int)len);
198 static void qtnf_pcie_control_rx_callback(void *arg, const u8 *buf, size_t len)
200 struct qtnf_pcie_bus_priv *priv = arg;
201 struct qtnf_bus *bus = pci_get_drvdata(priv->pdev);
204 if (unlikely(len == 0)) {
205 pr_warn("zero length packet received\n");
209 skb = __dev_alloc_skb(len, GFP_KERNEL);
211 if (unlikely(!skb)) {
212 pr_err("failed to allocate skb\n");
216 skb_put_data(skb, buf, len);
218 qtnf_trans_handle_rx_ctl_packet(bus, skb);
221 static int qtnf_pcie_init_shm_ipc(struct qtnf_pcie_bus_priv *priv)
223 struct qtnf_shm_ipc_region __iomem *ipc_tx_reg;
224 struct qtnf_shm_ipc_region __iomem *ipc_rx_reg;
225 const struct qtnf_shm_ipc_int ipc_int = { qtnf_ipc_gen_ep_int, priv };
226 const struct qtnf_shm_ipc_rx_callback rx_callback = {
227 qtnf_pcie_control_rx_callback, priv };
229 ipc_tx_reg = &priv->bda->bda_shm_reg1;
230 ipc_rx_reg = &priv->bda->bda_shm_reg2;
232 qtnf_shm_ipc_init(&priv->shm_ipc_ep_in, QTNF_SHM_IPC_OUTBOUND,
233 ipc_tx_reg, priv->workqueue,
234 &ipc_int, &rx_callback);
235 qtnf_shm_ipc_init(&priv->shm_ipc_ep_out, QTNF_SHM_IPC_INBOUND,
236 ipc_rx_reg, priv->workqueue,
237 &ipc_int, &rx_callback);
242 static void qtnf_pcie_free_shm_ipc(struct qtnf_pcie_bus_priv *priv)
244 qtnf_shm_ipc_free(&priv->shm_ipc_ep_in);
245 qtnf_shm_ipc_free(&priv->shm_ipc_ep_out);
248 static int qtnf_pcie_init_memory(struct qtnf_pcie_bus_priv *priv)
252 priv->sysctl_bar = qtnf_map_bar(priv, QTN_SYSCTL_BAR);
253 if (IS_ERR_OR_NULL(priv->sysctl_bar)) {
254 pr_err("failed to map BAR%u\n", QTN_SYSCTL_BAR);
258 priv->dmareg_bar = qtnf_map_bar(priv, QTN_DMA_BAR);
259 if (IS_ERR_OR_NULL(priv->dmareg_bar)) {
260 pr_err("failed to map BAR%u\n", QTN_DMA_BAR);
264 priv->epmem_bar = qtnf_map_bar(priv, QTN_SHMEM_BAR);
265 if (IS_ERR_OR_NULL(priv->epmem_bar)) {
266 pr_err("failed to map BAR%u\n", QTN_SHMEM_BAR);
270 priv->pcie_reg_base = priv->dmareg_bar;
271 priv->bda = priv->epmem_bar;
272 writel(priv->msi_enabled, &priv->bda->bda_rc_msi_enabled);
278 qtnf_pcie_init_dma_mask(struct qtnf_pcie_bus_priv *priv, u64 dma_mask)
282 ret = dma_supported(&priv->pdev->dev, dma_mask);
284 pr_err("DMA mask %llu not supported\n", dma_mask);
288 ret = pci_set_dma_mask(priv->pdev, dma_mask);
290 pr_err("failed to set DMA mask %llu\n", dma_mask);
294 ret = pci_set_consistent_dma_mask(priv->pdev, dma_mask);
296 pr_err("failed to set consistent DMA mask %llu\n", dma_mask);
303 static void qtnf_tune_pcie_mps(struct qtnf_pcie_bus_priv *priv)
305 struct pci_dev *pdev = priv->pdev;
306 struct pci_dev *parent;
307 int mps_p, mps_o, mps_m, mps;
311 mps_o = pcie_get_mps(pdev);
313 /* maximum supported mps */
314 mps_m = 128 << pdev->pcie_mpss;
316 /* suggested new mps value */
319 if (pdev->bus && pdev->bus->self) {
320 /* parent (bus) mps */
321 parent = pdev->bus->self;
323 if (pci_is_pcie(parent)) {
324 mps_p = pcie_get_mps(parent);
325 mps = min(mps_m, mps_p);
329 ret = pcie_set_mps(pdev, mps);
331 pr_err("failed to set mps to %d, keep using current %d\n",
337 pr_debug("set mps to %d (was %d, max %d)\n", mps, mps_o, mps_m);
341 static int qtnf_is_state(__le32 __iomem *reg, u32 state)
348 static void qtnf_set_state(__le32 __iomem *reg, u32 state)
352 qtnf_non_posted_write(state | s, reg);
355 static void qtnf_clear_state(__le32 __iomem *reg, u32 state)
359 qtnf_non_posted_write(s & ~state, reg);
362 static int qtnf_poll_state(__le32 __iomem *reg, u32 state, u32 delay_in_ms)
366 while ((qtnf_is_state(reg, state) == 0)) {
367 usleep_range(1000, 1200);
368 if (++timeout > delay_in_ms)
375 static int alloc_skb_array(struct qtnf_pcie_bus_priv *priv)
377 struct sk_buff **vaddr;
380 len = priv->tx_bd_num * sizeof(*priv->tx_skb) +
381 priv->rx_bd_num * sizeof(*priv->rx_skb);
382 vaddr = devm_kzalloc(&priv->pdev->dev, len, GFP_KERNEL);
387 priv->tx_skb = vaddr;
389 vaddr += priv->tx_bd_num;
390 priv->rx_skb = vaddr;
395 static int alloc_bd_table(struct qtnf_pcie_bus_priv *priv)
401 len = priv->tx_bd_num * sizeof(struct qtnf_tx_bd) +
402 priv->rx_bd_num * sizeof(struct qtnf_rx_bd);
404 vaddr = dmam_alloc_coherent(&priv->pdev->dev, len, &paddr, GFP_KERNEL);
410 memset(vaddr, 0, len);
412 priv->bd_table_vaddr = vaddr;
413 priv->bd_table_paddr = paddr;
414 priv->bd_table_len = len;
416 priv->tx_bd_vbase = vaddr;
417 priv->tx_bd_pbase = paddr;
419 pr_debug("TX descriptor table: vaddr=0x%p paddr=%pad\n", vaddr, &paddr);
421 priv->tx_bd_reclaim_start = 0;
422 priv->tx_bd_index = 0;
423 priv->tx_queue_len = 0;
427 vaddr = ((struct qtnf_tx_bd *)vaddr) + priv->tx_bd_num;
428 paddr += priv->tx_bd_num * sizeof(struct qtnf_tx_bd);
430 priv->rx_bd_vbase = vaddr;
431 priv->rx_bd_pbase = paddr;
433 writel(QTN_HOST_LO32(paddr),
434 PCIE_HDP_TX_HOST_Q_BASE_L(priv->pcie_reg_base));
435 writel(QTN_HOST_HI32(paddr),
436 PCIE_HDP_TX_HOST_Q_BASE_H(priv->pcie_reg_base));
437 writel(priv->rx_bd_num | (sizeof(struct qtnf_rx_bd)) << 16,
438 PCIE_HDP_TX_HOST_Q_SZ_CTRL(priv->pcie_reg_base));
440 priv->hw_txproc_wr_ptr = priv->rx_bd_num - rx_bd_reserved_param;
442 writel(priv->hw_txproc_wr_ptr,
443 PCIE_HDP_TX_HOST_Q_WR_PTR(priv->pcie_reg_base));
445 pr_debug("RX descriptor table: vaddr=0x%p paddr=%pad\n", vaddr, &paddr);
447 priv->rx_bd_index = 0;
452 static int skb2rbd_attach(struct qtnf_pcie_bus_priv *priv, u16 rx_bd_index)
454 struct qtnf_rx_bd *rxbd;
458 skb = __dev_alloc_skb(SKB_BUF_SIZE + NET_IP_ALIGN,
461 priv->rx_skb[rx_bd_index] = NULL;
465 priv->rx_skb[rx_bd_index] = skb;
467 skb_reserve(skb, NET_IP_ALIGN);
469 rxbd = &priv->rx_bd_vbase[rx_bd_index];
471 paddr = pci_map_single(priv->pdev, skb->data,
472 SKB_BUF_SIZE, PCI_DMA_FROMDEVICE);
473 if (pci_dma_mapping_error(priv->pdev, paddr)) {
474 pr_err("skb DMA mapping error: %pad\n", &paddr);
478 writel(QTN_HOST_LO32(paddr),
479 PCIE_HDP_HHBM_BUF_PTR(priv->pcie_reg_base));
480 writel(QTN_HOST_HI32(paddr),
481 PCIE_HDP_HHBM_BUF_PTR_H(priv->pcie_reg_base));
483 /* keep rx skb paddrs in rx buffer descriptors for cleanup purposes */
484 rxbd->addr = cpu_to_le32(QTN_HOST_LO32(paddr));
485 rxbd->addr_h = cpu_to_le32(QTN_HOST_HI32(paddr));
492 static int alloc_rx_buffers(struct qtnf_pcie_bus_priv *priv)
497 memset(priv->rx_bd_vbase, 0x0,
498 priv->rx_bd_num * sizeof(struct qtnf_rx_bd));
500 for (i = 0; i < priv->rx_bd_num; i++) {
501 ret = skb2rbd_attach(priv, i);
509 /* all rx/tx activity should have ceased before calling this function */
510 static void free_xfer_buffers(void *data)
512 struct qtnf_pcie_bus_priv *priv = (struct qtnf_pcie_bus_priv *)data;
513 struct qtnf_rx_bd *rxbd;
517 /* free rx buffers */
518 for (i = 0; i < priv->rx_bd_num; i++) {
519 if (priv->rx_skb[i]) {
520 rxbd = &priv->rx_bd_vbase[i];
521 paddr = QTN_HOST_ADDR(le32_to_cpu(rxbd->addr_h),
522 le32_to_cpu(rxbd->addr));
523 pci_unmap_single(priv->pdev, paddr, SKB_BUF_SIZE,
526 dev_kfree_skb_any(priv->rx_skb[i]);
530 /* free tx buffers */
531 for (i = 0; i < priv->tx_bd_num; i++) {
532 if (priv->tx_skb[i]) {
533 dev_kfree_skb_any(priv->tx_skb[i]);
534 priv->tx_skb[i] = NULL;
539 static int qtnf_pcie_init_xfer(struct qtnf_pcie_bus_priv *priv)
543 priv->tx_bd_num = tx_bd_size_param;
544 priv->rx_bd_num = rx_bd_size_param;
546 ret = alloc_skb_array(priv);
548 pr_err("failed to allocate skb array\n");
552 ret = alloc_bd_table(priv);
554 pr_err("failed to allocate bd table\n");
558 ret = alloc_rx_buffers(priv);
560 pr_err("failed to allocate rx buffers\n");
567 static int qtnf_pcie_data_tx_reclaim(struct qtnf_pcie_bus_priv *priv)
569 struct qtnf_tx_bd *txbd;
576 last_sent = readl(PCIE_HDP_RX0DMA_CNT(priv->pcie_reg_base))
578 i = priv->tx_bd_reclaim_start;
581 while (i != last_sent) {
582 skb = priv->tx_skb[i];
586 txbd = &priv->tx_bd_vbase[i];
587 paddr = QTN_HOST_ADDR(le32_to_cpu(txbd->addr_h),
588 le32_to_cpu(txbd->addr));
589 pci_unmap_single(priv->pdev, paddr, skb->len, PCI_DMA_TODEVICE);
592 skb->dev->stats.tx_packets++;
593 skb->dev->stats.tx_bytes += skb->len;
595 if (netif_queue_stopped(skb->dev))
596 netif_wake_queue(skb->dev);
599 dev_kfree_skb_any(skb);
600 priv->tx_skb[i] = NULL;
601 priv->tx_queue_len--;
604 if (++i >= priv->tx_bd_num)
608 priv->tx_bd_reclaim_start = i;
609 priv->tx_reclaim_done += count;
610 priv->tx_reclaim_req++;
615 static bool qtnf_tx_queue_ready(struct qtnf_pcie_bus_priv *priv)
617 if (priv->tx_queue_len >= priv->tx_bd_num - 1) {
618 pr_err_ratelimited("reclaim full Tx queue\n");
619 qtnf_pcie_data_tx_reclaim(priv);
621 if (priv->tx_queue_len >= priv->tx_bd_num - 1) {
622 priv->tx_full_count++;
630 static int qtnf_pcie_data_tx(struct qtnf_bus *bus, struct sk_buff *skb)
632 struct qtnf_pcie_bus_priv *priv = (void *)get_bus_priv(bus);
633 dma_addr_t txbd_paddr, skb_paddr;
634 struct qtnf_tx_bd *txbd;
640 spin_lock_irqsave(&priv->tx_lock, flags);
642 priv->tx_done_count++;
644 if (!qtnf_tx_queue_ready(priv)) {
646 netif_stop_queue(skb->dev);
648 spin_unlock_irqrestore(&priv->tx_lock, flags);
649 return NETDEV_TX_BUSY;
652 i = priv->tx_bd_index;
653 priv->tx_skb[i] = skb;
656 skb_paddr = pci_map_single(priv->pdev, skb->data,
657 skb->len, PCI_DMA_TODEVICE);
658 if (pci_dma_mapping_error(priv->pdev, skb_paddr)) {
659 pr_err("skb DMA mapping error: %pad\n", &skb_paddr);
664 txbd = &priv->tx_bd_vbase[i];
665 txbd->addr = cpu_to_le32(QTN_HOST_LO32(skb_paddr));
666 txbd->addr_h = cpu_to_le32(QTN_HOST_HI32(skb_paddr));
668 info = (len & QTN_PCIE_TX_DESC_LEN_MASK) << QTN_PCIE_TX_DESC_LEN_SHIFT;
669 txbd->info = cpu_to_le32(info);
671 /* sync up all descriptor updates before passing them to EP */
674 /* write new TX descriptor to PCIE_RX_FIFO on EP */
675 txbd_paddr = priv->tx_bd_pbase + i * sizeof(struct qtnf_tx_bd);
676 writel(QTN_HOST_LO32(txbd_paddr),
677 PCIE_HDP_HOST_WR_DESC0(priv->pcie_reg_base));
678 writel(QTN_HOST_HI32(txbd_paddr),
679 PCIE_HDP_HOST_WR_DESC0_H(priv->pcie_reg_base));
681 if (++i >= priv->tx_bd_num)
684 priv->tx_bd_index = i;
685 priv->tx_queue_len++;
689 pr_err_ratelimited("drop skb\n");
691 skb->dev->stats.tx_dropped++;
692 dev_kfree_skb_any(skb);
695 spin_unlock_irqrestore(&priv->tx_lock, flags);
700 static int qtnf_pcie_control_tx(struct qtnf_bus *bus, struct sk_buff *skb)
702 struct qtnf_pcie_bus_priv *priv = (void *)get_bus_priv(bus);
704 return qtnf_shm_ipc_send(&priv->shm_ipc_ep_in, skb->data, skb->len);
707 static irqreturn_t qtnf_interrupt(int irq, void *data)
709 struct qtnf_bus *bus = (struct qtnf_bus *)data;
710 struct qtnf_pcie_bus_priv *priv = (void *)get_bus_priv(bus);
713 priv->pcie_irq_count++;
714 status = readl(PCIE_HDP_INT_STATUS(priv->pcie_reg_base));
716 qtnf_shm_ipc_irq_handler(&priv->shm_ipc_ep_in);
717 qtnf_shm_ipc_irq_handler(&priv->shm_ipc_ep_out);
719 if (!(status & priv->pcie_irq_mask))
722 if (status & PCIE_HDP_INT_RX_BITS) {
723 priv->pcie_irq_rx_count++;
724 qtnf_dis_rxdone_irq(priv);
725 napi_schedule(&bus->mux_napi);
728 if (status & PCIE_HDP_INT_TX_BITS) {
729 priv->pcie_irq_tx_count++;
730 qtnf_dis_txdone_irq(priv);
731 tasklet_hi_schedule(&priv->reclaim_tq);
735 /* H/W workaround: clean all bits, not only enabled */
736 qtnf_non_posted_write(~0U, PCIE_HDP_INT_STATUS(priv->pcie_reg_base));
738 if (!priv->msi_enabled)
739 qtnf_deassert_intx(priv);
744 static inline void hw_txproc_wr_ptr_inc(struct qtnf_pcie_bus_priv *priv)
748 index = priv->hw_txproc_wr_ptr;
750 if (++index >= priv->rx_bd_num)
753 priv->hw_txproc_wr_ptr = index;
756 static int qtnf_rx_poll(struct napi_struct *napi, int budget)
758 struct qtnf_bus *bus = container_of(napi, struct qtnf_bus, mux_napi);
759 struct qtnf_pcie_bus_priv *priv = (void *)get_bus_priv(bus);
760 struct net_device *ndev = NULL;
761 struct sk_buff *skb = NULL;
763 struct qtnf_rx_bd *rxbd;
764 dma_addr_t skb_paddr;
769 index = priv->rx_bd_index;
770 rxbd = &priv->rx_bd_vbase[index];
772 descw = le32_to_cpu(rxbd->info);
774 while ((descw & QTN_TXDONE_MASK) && (processed < budget)) {
775 skb = priv->rx_skb[index];
778 skb_put(skb, QTN_GET_LEN(descw));
780 skb_paddr = QTN_HOST_ADDR(le32_to_cpu(rxbd->addr_h),
781 le32_to_cpu(rxbd->addr));
782 pci_unmap_single(priv->pdev, skb_paddr, SKB_BUF_SIZE,
785 ndev = qtnf_classify_skb(bus, skb);
787 ndev->stats.rx_packets++;
788 ndev->stats.rx_bytes += skb->len;
790 skb->protocol = eth_type_trans(skb, ndev);
791 netif_receive_skb(skb);
793 pr_debug("drop untagged skb\n");
794 bus->mux_dev.stats.rx_dropped++;
795 dev_kfree_skb_any(skb);
800 pr_err("missing rx_skb[%d]\n", index);
803 /* attached rx buffer is passed upstream: map a new one */
804 ret = skb2rbd_attach(priv, index);
806 if (++index >= priv->rx_bd_num)
809 priv->rx_bd_index = index;
810 hw_txproc_wr_ptr_inc(priv);
812 rxbd = &priv->rx_bd_vbase[index];
813 descw = le32_to_cpu(rxbd->info);
815 pr_err("failed to allocate new rx_skb[%d]\n", index);
819 writel(priv->hw_txproc_wr_ptr,
820 PCIE_HDP_TX_HOST_Q_WR_PTR(priv->pcie_reg_base));
823 if (processed < budget) {
825 qtnf_en_rxdone_irq(priv);
832 qtnf_pcie_data_tx_timeout(struct qtnf_bus *bus, struct net_device *ndev)
834 struct qtnf_pcie_bus_priv *priv = (void *)get_bus_priv(bus);
836 tasklet_hi_schedule(&priv->reclaim_tq);
839 static void qtnf_pcie_data_rx_start(struct qtnf_bus *bus)
841 struct qtnf_pcie_bus_priv *priv = (void *)get_bus_priv(bus);
843 qtnf_enable_hdp_irqs(priv);
844 napi_enable(&bus->mux_napi);
847 static void qtnf_pcie_data_rx_stop(struct qtnf_bus *bus)
849 struct qtnf_pcie_bus_priv *priv = (void *)get_bus_priv(bus);
851 napi_disable(&bus->mux_napi);
852 qtnf_disable_hdp_irqs(priv);
855 static const struct qtnf_bus_ops qtnf_pcie_bus_ops = {
856 /* control path methods */
857 .control_tx = qtnf_pcie_control_tx,
859 /* data path methods */
860 .data_tx = qtnf_pcie_data_tx,
861 .data_tx_timeout = qtnf_pcie_data_tx_timeout,
862 .data_rx_start = qtnf_pcie_data_rx_start,
863 .data_rx_stop = qtnf_pcie_data_rx_stop,
866 static int qtnf_ep_fw_send(struct qtnf_pcie_bus_priv *priv, uint32_t size,
867 int blk, const u8 *pblk, const u8 *fw)
869 struct pci_dev *pdev = priv->pdev;
870 struct qtnf_bus *bus = pci_get_drvdata(pdev);
872 struct qtnf_pcie_fw_hdr *hdr;
875 int hds = sizeof(*hdr);
876 struct sk_buff *skb = NULL;
880 skb = __dev_alloc_skb(QTN_PCIE_FW_BUFSZ, GFP_KERNEL);
884 skb->len = QTN_PCIE_FW_BUFSZ;
887 hdr = (struct qtnf_pcie_fw_hdr *)skb->data;
888 memcpy(hdr->boardflg, QTN_PCIE_BOARDFLG, strlen(QTN_PCIE_BOARDFLG));
889 hdr->fwsize = cpu_to_le32(size);
890 hdr->seqnum = cpu_to_le32(blk);
893 hdr->type = cpu_to_le32(QTN_FW_DSUB);
895 hdr->type = cpu_to_le32(QTN_FW_DBEGIN);
897 pdata = skb->data + hds;
899 len = QTN_PCIE_FW_BUFSZ - hds;
900 if (pblk >= (fw + size - len)) {
901 len = fw + size - pblk;
902 hdr->type = cpu_to_le32(QTN_FW_DEND);
905 hdr->pktlen = cpu_to_le32(len);
906 memcpy(pdata, pblk, len);
907 hdr->crc = cpu_to_le32(~crc32(0, pdata, len));
909 ret = qtnf_pcie_data_tx(bus, skb);
911 return (ret == NETDEV_TX_OK) ? len : 0;
915 qtnf_ep_fw_load(struct qtnf_pcie_bus_priv *priv, const u8 *fw, u32 fw_size)
917 int blk_size = QTN_PCIE_FW_BUFSZ - sizeof(struct qtnf_pcie_fw_hdr);
918 int blk_count = fw_size / blk_size + ((fw_size % blk_size) ? 1 : 0);
924 pr_debug("FW upload started: fw_addr=0x%p size=%d\n", fw, fw_size);
926 while (blk < blk_count) {
927 if (++threshold > 10000) {
928 pr_err("FW upload failed: too many retries\n");
932 len = qtnf_ep_fw_send(priv, fw_size, blk, pblk, fw);
936 if (!((blk + 1) & QTN_PCIE_FW_DLMASK) ||
937 (blk == (blk_count - 1))) {
938 qtnf_set_state(&priv->bda->bda_rc_state,
940 if (qtnf_poll_state(&priv->bda->bda_ep_state,
942 QTN_FW_DL_TIMEOUT_MS)) {
943 pr_err("FW upload failed: SYNC timed out\n");
947 qtnf_clear_state(&priv->bda->bda_ep_state,
950 if (qtnf_is_state(&priv->bda->bda_ep_state,
952 if (blk == (blk_count - 1)) {
954 blk_count & QTN_PCIE_FW_DLMASK;
956 pblk -= ((last_round - 1) *
959 blk -= QTN_PCIE_FW_DLMASK;
960 pblk -= QTN_PCIE_FW_DLMASK * blk_size;
963 qtnf_clear_state(&priv->bda->bda_ep_state,
966 pr_warn("FW upload retry: block #%d\n", blk);
970 qtnf_pcie_data_tx_reclaim(priv);
977 pr_debug("FW upload completed: totally sent %d blocks\n", blk);
981 static void qtnf_firmware_load(const struct firmware *fw, void *context)
983 struct qtnf_pcie_bus_priv *priv = (void *)context;
984 struct pci_dev *pdev = priv->pdev;
985 struct qtnf_bus *bus = pci_get_drvdata(pdev);
989 pr_err("failed to get firmware %s\n", bus->fwname);
993 ret = qtnf_ep_fw_load(priv, fw->data, fw->size);
995 pr_err("FW upload error\n");
999 if (qtnf_poll_state(&priv->bda->bda_ep_state, QTN_EP_FW_DONE,
1000 QTN_FW_DL_TIMEOUT_MS)) {
1001 pr_err("FW bringup timed out\n");
1005 bus->fw_state = QTNF_FW_STATE_FW_DNLD_DONE;
1006 pr_info("firmware is up and running\n");
1011 release_firmware(fw);
1013 complete(&bus->request_firmware_complete);
1016 static int qtnf_bringup_fw(struct qtnf_bus *bus)
1018 struct qtnf_pcie_bus_priv *priv = (void *)get_bus_priv(bus);
1019 struct pci_dev *pdev = priv->pdev;
1021 u32 state = QTN_RC_FW_LOADRDY | QTN_RC_FW_QLINK;
1024 state |= QTN_RC_FW_FLASHBOOT;
1026 qtnf_set_state(&priv->bda->bda_rc_state, state);
1028 if (qtnf_poll_state(&priv->bda->bda_ep_state, QTN_EP_FW_LOADRDY,
1029 QTN_FW_DL_TIMEOUT_MS)) {
1030 pr_err("card is not ready\n");
1034 qtnf_clear_state(&priv->bda->bda_ep_state, QTN_EP_FW_LOADRDY);
1037 pr_info("Booting FW from flash\n");
1039 if (!qtnf_poll_state(&priv->bda->bda_ep_state, QTN_EP_FW_DONE,
1040 QTN_FW_DL_TIMEOUT_MS))
1041 bus->fw_state = QTNF_FW_STATE_FW_DNLD_DONE;
1046 pr_info("starting firmware upload: %s\n", bus->fwname);
1048 ret = request_firmware_nowait(THIS_MODULE, 1, bus->fwname, &pdev->dev,
1049 GFP_KERNEL, priv, qtnf_firmware_load);
1051 pr_err("request_firmware_nowait error %d\n", ret);
1058 static void qtnf_reclaim_tasklet_fn(unsigned long data)
1060 struct qtnf_pcie_bus_priv *priv = (void *)data;
1061 unsigned long flags;
1063 spin_lock_irqsave(&priv->tx_lock, flags);
1064 qtnf_pcie_data_tx_reclaim(priv);
1065 spin_unlock_irqrestore(&priv->tx_lock, flags);
1066 qtnf_en_txdone_irq(priv);
1069 static int qtnf_dbg_mps_show(struct seq_file *s, void *data)
1071 struct qtnf_bus *bus = dev_get_drvdata(s->private);
1072 struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus);
1074 seq_printf(s, "%d\n", priv->mps);
1079 static int qtnf_dbg_msi_show(struct seq_file *s, void *data)
1081 struct qtnf_bus *bus = dev_get_drvdata(s->private);
1082 struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus);
1084 seq_printf(s, "%u\n", priv->msi_enabled);
1089 static int qtnf_dbg_irq_stats(struct seq_file *s, void *data)
1091 struct qtnf_bus *bus = dev_get_drvdata(s->private);
1092 struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus);
1094 seq_printf(s, "pcie_irq_count(%u)\n", priv->pcie_irq_count);
1095 seq_printf(s, "pcie_irq_tx_count(%u)\n", priv->pcie_irq_tx_count);
1096 seq_printf(s, "pcie_irq_rx_count(%u)\n", priv->pcie_irq_rx_count);
1101 static int qtnf_dbg_hdp_stats(struct seq_file *s, void *data)
1103 struct qtnf_bus *bus = dev_get_drvdata(s->private);
1104 struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus);
1106 seq_printf(s, "tx_full_count(%u)\n", priv->tx_full_count);
1107 seq_printf(s, "tx_done_count(%u)\n", priv->tx_done_count);
1108 seq_printf(s, "tx_reclaim_done(%u)\n", priv->tx_reclaim_done);
1109 seq_printf(s, "tx_reclaim_req(%u)\n", priv->tx_reclaim_req);
1110 seq_printf(s, "tx_bd_reclaim_start(%u)\n", priv->tx_bd_reclaim_start);
1111 seq_printf(s, "tx_bd_index(%u)\n", priv->tx_bd_index);
1112 seq_printf(s, "rx_bd_index(%u)\n", priv->rx_bd_index);
1113 seq_printf(s, "tx_queue_len(%u)\n", priv->tx_queue_len);
1118 static int qtnf_dbg_shm_stats(struct seq_file *s, void *data)
1120 struct qtnf_bus *bus = dev_get_drvdata(s->private);
1121 struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus);
1123 seq_printf(s, "shm_ipc_ep_in.tx_packet_count(%zu)\n",
1124 priv->shm_ipc_ep_in.tx_packet_count);
1125 seq_printf(s, "shm_ipc_ep_in.rx_packet_count(%zu)\n",
1126 priv->shm_ipc_ep_in.rx_packet_count);
1127 seq_printf(s, "shm_ipc_ep_out.tx_packet_count(%zu)\n",
1128 priv->shm_ipc_ep_out.tx_timeout_count);
1129 seq_printf(s, "shm_ipc_ep_out.rx_packet_count(%zu)\n",
1130 priv->shm_ipc_ep_out.rx_packet_count);
1135 static int qtnf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1137 struct qtnf_pcie_bus_priv *pcie_priv;
1138 struct qtnf_bus *bus;
1141 bus = devm_kzalloc(&pdev->dev,
1142 sizeof(*bus) + sizeof(*pcie_priv), GFP_KERNEL);
1148 pcie_priv = get_bus_priv(bus);
1150 pci_set_drvdata(pdev, bus);
1151 bus->bus_ops = &qtnf_pcie_bus_ops;
1152 bus->dev = &pdev->dev;
1153 bus->fw_state = QTNF_FW_STATE_RESET;
1154 pcie_priv->pdev = pdev;
1156 strcpy(bus->fwname, QTN_PCI_PEARL_FW_NAME);
1157 init_completion(&bus->request_firmware_complete);
1158 mutex_init(&bus->bus_lock);
1159 spin_lock_init(&pcie_priv->irq_lock);
1160 spin_lock_init(&pcie_priv->tx_lock);
1163 pcie_priv->tx_full_count = 0;
1164 pcie_priv->tx_done_count = 0;
1165 pcie_priv->pcie_irq_count = 0;
1166 pcie_priv->pcie_irq_rx_count = 0;
1167 pcie_priv->pcie_irq_tx_count = 0;
1168 pcie_priv->tx_reclaim_done = 0;
1169 pcie_priv->tx_reclaim_req = 0;
1171 pcie_priv->workqueue = create_singlethread_workqueue("QTNF_PEARL_PCIE");
1172 if (!pcie_priv->workqueue) {
1173 pr_err("failed to alloc bus workqueue\n");
1178 if (!pci_is_pcie(pdev)) {
1179 pr_err("device %s is not PCI Express\n", pci_name(pdev));
1184 qtnf_tune_pcie_mps(pcie_priv);
1186 ret = pcim_enable_device(pdev);
1188 pr_err("failed to init PCI device %x\n", pdev->device);
1191 pr_debug("successful init of PCI device %x\n", pdev->device);
1194 pcim_pin_device(pdev);
1195 pci_set_master(pdev);
1197 ret = qtnf_pcie_init_irq(pcie_priv);
1199 pr_err("irq init failed\n");
1203 ret = qtnf_pcie_init_memory(pcie_priv);
1205 pr_err("PCIE memory init failed\n");
1209 ret = qtnf_pcie_init_shm_ipc(pcie_priv);
1211 pr_err("PCIE SHM IPC init failed\n");
1215 ret = qtnf_pcie_init_dma_mask(pcie_priv, DMA_BIT_MASK(32));
1217 pr_err("PCIE DMA mask init failed\n");
1221 ret = devm_add_action(&pdev->dev, free_xfer_buffers, (void *)pcie_priv);
1223 pr_err("custom release callback init failed\n");
1227 ret = qtnf_pcie_init_xfer(pcie_priv);
1229 pr_err("PCIE xfer init failed\n");
1233 /* init default irq settings */
1234 qtnf_init_hdp_irqs(pcie_priv);
1236 /* start with disabled irqs */
1237 qtnf_disable_hdp_irqs(pcie_priv);
1239 ret = devm_request_irq(&pdev->dev, pdev->irq, &qtnf_interrupt, 0,
1240 "qtnf_pcie_irq", (void *)bus);
1242 pr_err("failed to request pcie irq %d\n", pdev->irq);
1246 tasklet_init(&pcie_priv->reclaim_tq, qtnf_reclaim_tasklet_fn,
1247 (unsigned long)pcie_priv);
1248 init_dummy_netdev(&bus->mux_dev);
1249 netif_napi_add(&bus->mux_dev, &bus->mux_napi,
1252 ret = qtnf_bringup_fw(bus);
1254 goto err_bringup_fw;
1256 wait_for_completion(&bus->request_firmware_complete);
1258 if (bus->fw_state != QTNF_FW_STATE_FW_DNLD_DONE) {
1259 pr_err("failed to start FW\n");
1260 goto err_bringup_fw;
1263 if (qtnf_poll_state(&pcie_priv->bda->bda_ep_state, QTN_EP_FW_QLINK_DONE,
1264 QTN_FW_QLINK_TIMEOUT_MS)) {
1265 pr_err("FW runtime failure\n");
1266 goto err_bringup_fw;
1269 ret = qtnf_core_attach(bus);
1271 pr_err("failed to attach core\n");
1272 goto err_bringup_fw;
1275 qtnf_debugfs_init(bus, DRV_NAME);
1276 qtnf_debugfs_add_entry(bus, "mps", qtnf_dbg_mps_show);
1277 qtnf_debugfs_add_entry(bus, "msi_enabled", qtnf_dbg_msi_show);
1278 qtnf_debugfs_add_entry(bus, "hdp_stats", qtnf_dbg_hdp_stats);
1279 qtnf_debugfs_add_entry(bus, "irq_stats", qtnf_dbg_irq_stats);
1280 qtnf_debugfs_add_entry(bus, "shm_stats", qtnf_dbg_shm_stats);
1285 netif_napi_del(&bus->mux_napi);
1288 flush_workqueue(pcie_priv->workqueue);
1289 destroy_workqueue(pcie_priv->workqueue);
1292 pci_set_drvdata(pdev, NULL);
1298 static void qtnf_pcie_remove(struct pci_dev *pdev)
1300 struct qtnf_pcie_bus_priv *priv;
1301 struct qtnf_bus *bus;
1303 bus = pci_get_drvdata(pdev);
1307 priv = get_bus_priv(bus);
1309 qtnf_core_detach(bus);
1310 netif_napi_del(&bus->mux_napi);
1312 flush_workqueue(priv->workqueue);
1313 destroy_workqueue(priv->workqueue);
1314 tasklet_kill(&priv->reclaim_tq);
1316 qtnf_debugfs_remove(bus);
1318 qtnf_pcie_free_shm_ipc(priv);
1321 #ifdef CONFIG_PM_SLEEP
1322 static int qtnf_pcie_suspend(struct device *dev)
1327 static int qtnf_pcie_resume(struct device *dev)
1331 #endif /* CONFIG_PM_SLEEP */
1333 #ifdef CONFIG_PM_SLEEP
1334 /* Power Management Hooks */
1335 static SIMPLE_DEV_PM_OPS(qtnf_pcie_pm_ops, qtnf_pcie_suspend,
1339 static struct pci_device_id qtnf_pcie_devid_table[] = {
1341 PCIE_VENDOR_ID_QUANTENNA, PCIE_DEVICE_ID_QTN_PEARL,
1342 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1347 MODULE_DEVICE_TABLE(pci, qtnf_pcie_devid_table);
1349 static struct pci_driver qtnf_pcie_drv_data = {
1351 .id_table = qtnf_pcie_devid_table,
1352 .probe = qtnf_pcie_probe,
1353 .remove = qtnf_pcie_remove,
1354 #ifdef CONFIG_PM_SLEEP
1356 .pm = &qtnf_pcie_pm_ops,
1361 static int __init qtnf_pcie_register(void)
1363 pr_info("register Quantenna QSR10g FullMAC PCIE driver\n");
1364 return pci_register_driver(&qtnf_pcie_drv_data);
1367 static void __exit qtnf_pcie_exit(void)
1369 pr_info("unregister Quantenna QSR10g FullMAC PCIE driver\n");
1370 pci_unregister_driver(&qtnf_pcie_drv_data);
1373 module_init(qtnf_pcie_register);
1374 module_exit(qtnf_pcie_exit);
1376 MODULE_AUTHOR("Quantenna Communications");
1377 MODULE_DESCRIPTION("Quantenna QSR10g PCIe bus driver for 802.11 wireless LAN.");
1378 MODULE_LICENSE("GPL");