2 * Copyright (C) 2015 Cavium, Inc.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License
6 * as published by the Free Software Foundation.
9 #include <linux/acpi.h>
10 #include <linux/module.h>
11 #include <linux/interrupt.h>
12 #include <linux/pci.h>
13 #include <linux/netdevice.h>
14 #include <linux/etherdevice.h>
15 #include <linux/phy.h>
17 #include <linux/of_mdio.h>
18 #include <linux/of_net.h>
22 #include "thunder_bgx.h"
24 #define DRV_NAME "thunder-BGX"
25 #define DRV_VERSION "1.0"
36 int lmacid; /* ID within BGX */
37 int lmacid_bd; /* ID on board */
38 struct net_device netdev;
39 struct phy_device *phydev;
40 unsigned int last_duplex;
41 unsigned int last_link;
42 unsigned int last_speed;
44 struct delayed_work dwork;
45 struct workqueue_struct *check_link;
50 struct lmac lmac[MAX_LMAC_PER_BGX];
54 void __iomem *reg_base;
60 static struct bgx *bgx_vnic[MAX_BGX_THUNDER];
61 static int lmac_count; /* Total no of LMACs in system */
63 static int bgx_xaui_check_link(struct lmac *lmac);
65 /* Supported devices */
66 static const struct pci_device_id bgx_id_table[] = {
67 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_BGX) },
68 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_RGX) },
69 { 0, } /* end of table */
72 MODULE_AUTHOR("Cavium Inc");
73 MODULE_DESCRIPTION("Cavium Thunder BGX/MAC Driver");
74 MODULE_LICENSE("GPL v2");
75 MODULE_VERSION(DRV_VERSION);
76 MODULE_DEVICE_TABLE(pci, bgx_id_table);
78 /* The Cavium ThunderX network controller can *only* be found in SoCs
79 * containing the ThunderX ARM64 CPU implementation. All accesses to the device
80 * registers on this platform are implicitly strongly ordered with respect
81 * to memory accesses. So writeq_relaxed() and readq_relaxed() are safe to use
82 * with no memory barriers in this driver. The readq()/writeq() functions add
83 * explicit ordering operation which in this case are redundant, and only
87 /* Register read/write APIs */
88 static u64 bgx_reg_read(struct bgx *bgx, u8 lmac, u64 offset)
90 void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset;
92 return readq_relaxed(addr);
95 static void bgx_reg_write(struct bgx *bgx, u8 lmac, u64 offset, u64 val)
97 void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset;
99 writeq_relaxed(val, addr);
102 static void bgx_reg_modify(struct bgx *bgx, u8 lmac, u64 offset, u64 val)
104 void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset;
106 writeq_relaxed(val | readq_relaxed(addr), addr);
109 static int bgx_poll_reg(struct bgx *bgx, u8 lmac, u64 reg, u64 mask, bool zero)
115 reg_val = bgx_reg_read(bgx, lmac, reg);
116 if (zero && !(reg_val & mask))
118 if (!zero && (reg_val & mask))
120 usleep_range(1000, 2000);
126 /* Return number of BGX present in HW */
127 unsigned bgx_get_map(int node)
132 for (i = 0; i < MAX_BGX_PER_NODE; i++) {
133 if (bgx_vnic[(node * MAX_BGX_PER_NODE) + i])
139 EXPORT_SYMBOL(bgx_get_map);
141 /* Return number of LMAC configured for this BGX */
142 int bgx_get_lmac_count(int node, int bgx_idx)
146 bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
148 return bgx->lmac_count;
152 EXPORT_SYMBOL(bgx_get_lmac_count);
154 /* Returns the current link status of LMAC */
155 void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status)
157 struct bgx_link_status *link = (struct bgx_link_status *)status;
161 bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
165 lmac = &bgx->lmac[lmacid];
166 link->mac_type = lmac->lmac_type;
167 link->link_up = lmac->link_up;
168 link->duplex = lmac->last_duplex;
169 link->speed = lmac->last_speed;
171 EXPORT_SYMBOL(bgx_get_lmac_link_state);
173 const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid)
175 struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
178 return bgx->lmac[lmacid].mac;
182 EXPORT_SYMBOL(bgx_get_lmac_mac);
184 void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac)
186 struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
191 ether_addr_copy(bgx->lmac[lmacid].mac, mac);
193 EXPORT_SYMBOL(bgx_set_lmac_mac);
195 void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable)
197 struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
203 lmac = &bgx->lmac[lmacid];
205 cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
207 cfg |= CMR_PKT_RX_EN | CMR_PKT_TX_EN;
209 cfg &= ~(CMR_PKT_RX_EN | CMR_PKT_TX_EN);
210 bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
213 xcv_setup_link(enable ? lmac->link_up : 0, lmac->last_speed);
215 EXPORT_SYMBOL(bgx_lmac_rx_tx_enable);
217 void bgx_lmac_get_pfc(int node, int bgx_idx, int lmacid, void *pause)
219 struct pfc *pfc = (struct pfc *)pause;
220 struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
226 lmac = &bgx->lmac[lmacid];
230 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_CBFC_CTL);
231 pfc->fc_rx = cfg & RX_EN;
232 pfc->fc_tx = cfg & TX_EN;
235 EXPORT_SYMBOL(bgx_lmac_get_pfc);
237 void bgx_lmac_set_pfc(int node, int bgx_idx, int lmacid, void *pause)
239 struct pfc *pfc = (struct pfc *)pause;
240 struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
246 lmac = &bgx->lmac[lmacid];
250 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_CBFC_CTL);
251 cfg &= ~(RX_EN | TX_EN);
252 cfg |= (pfc->fc_rx ? RX_EN : 0x00);
253 cfg |= (pfc->fc_tx ? TX_EN : 0x00);
254 bgx_reg_write(bgx, lmacid, BGX_SMUX_CBFC_CTL, cfg);
256 EXPORT_SYMBOL(bgx_lmac_set_pfc);
258 static void bgx_sgmii_change_link_state(struct lmac *lmac)
260 struct bgx *bgx = lmac->bgx;
265 cmr_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_CMRX_CFG);
267 bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg);
269 port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG);
270 misc_ctl = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL);
273 misc_ctl &= ~PCS_MISC_CTL_GMX_ENO;
274 port_cfg &= ~GMI_PORT_CFG_DUPLEX;
275 port_cfg |= (lmac->last_duplex << 2);
277 misc_ctl |= PCS_MISC_CTL_GMX_ENO;
280 switch (lmac->last_speed) {
282 port_cfg &= ~GMI_PORT_CFG_SPEED; /* speed 0 */
283 port_cfg |= GMI_PORT_CFG_SPEED_MSB; /* speed_msb 1 */
284 port_cfg &= ~GMI_PORT_CFG_SLOT_TIME; /* slottime 0 */
285 misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK;
286 misc_ctl |= 50; /* samp_pt */
287 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 64);
288 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_BURST, 0);
291 port_cfg &= ~GMI_PORT_CFG_SPEED; /* speed 0 */
292 port_cfg &= ~GMI_PORT_CFG_SPEED_MSB; /* speed_msb 0 */
293 port_cfg &= ~GMI_PORT_CFG_SLOT_TIME; /* slottime 0 */
294 misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK;
295 misc_ctl |= 5; /* samp_pt */
296 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 64);
297 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_BURST, 0);
300 port_cfg |= GMI_PORT_CFG_SPEED; /* speed 1 */
301 port_cfg &= ~GMI_PORT_CFG_SPEED_MSB; /* speed_msb 0 */
302 port_cfg |= GMI_PORT_CFG_SLOT_TIME; /* slottime 1 */
303 misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK;
304 misc_ctl |= 1; /* samp_pt */
305 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 512);
306 if (lmac->last_duplex)
307 bgx_reg_write(bgx, lmac->lmacid,
308 BGX_GMP_GMI_TXX_BURST, 0);
310 bgx_reg_write(bgx, lmac->lmacid,
311 BGX_GMP_GMI_TXX_BURST, 8192);
316 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL, misc_ctl);
317 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG, port_cfg);
319 port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG);
323 bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg);
325 if (bgx->is_rgx && (cmr_cfg & (CMR_PKT_RX_EN | CMR_PKT_TX_EN)))
326 xcv_setup_link(lmac->link_up, lmac->last_speed);
329 static void bgx_lmac_handler(struct net_device *netdev)
331 struct lmac *lmac = container_of(netdev, struct lmac, netdev);
332 struct phy_device *phydev;
333 int link_changed = 0;
338 phydev = lmac->phydev;
340 if (!phydev->link && lmac->last_link)
344 (lmac->last_duplex != phydev->duplex ||
345 lmac->last_link != phydev->link ||
346 lmac->last_speed != phydev->speed)) {
350 lmac->last_link = phydev->link;
351 lmac->last_speed = phydev->speed;
352 lmac->last_duplex = phydev->duplex;
357 if (link_changed > 0)
358 lmac->link_up = true;
360 lmac->link_up = false;
363 bgx_sgmii_change_link_state(lmac);
365 bgx_xaui_check_link(lmac);
368 u64 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx)
372 bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
378 return bgx_reg_read(bgx, lmac, BGX_CMRX_RX_STAT0 + (idx * 8));
380 EXPORT_SYMBOL(bgx_get_rx_stats);
382 u64 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx)
386 bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
390 return bgx_reg_read(bgx, lmac, BGX_CMRX_TX_STAT0 + (idx * 8));
392 EXPORT_SYMBOL(bgx_get_tx_stats);
394 static void bgx_flush_dmac_addrs(struct bgx *bgx, int lmac)
398 while (bgx->lmac[lmac].dmac > 0) {
399 offset = ((bgx->lmac[lmac].dmac - 1) * sizeof(u64)) +
400 (lmac * MAX_DMAC_PER_LMAC * sizeof(u64));
401 bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + offset, 0);
402 bgx->lmac[lmac].dmac--;
406 /* Configure BGX LMAC in internal loopback mode */
407 void bgx_lmac_internal_loopback(int node, int bgx_idx,
408 int lmac_idx, bool enable)
414 bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
418 lmac = &bgx->lmac[lmac_idx];
419 if (lmac->is_sgmii) {
420 cfg = bgx_reg_read(bgx, lmac_idx, BGX_GMP_PCS_MRX_CTL);
422 cfg |= PCS_MRX_CTL_LOOPBACK1;
424 cfg &= ~PCS_MRX_CTL_LOOPBACK1;
425 bgx_reg_write(bgx, lmac_idx, BGX_GMP_PCS_MRX_CTL, cfg);
427 cfg = bgx_reg_read(bgx, lmac_idx, BGX_SPUX_CONTROL1);
429 cfg |= SPU_CTL_LOOPBACK;
431 cfg &= ~SPU_CTL_LOOPBACK;
432 bgx_reg_write(bgx, lmac_idx, BGX_SPUX_CONTROL1, cfg);
435 EXPORT_SYMBOL(bgx_lmac_internal_loopback);
437 static int bgx_lmac_sgmii_init(struct bgx *bgx, struct lmac *lmac)
439 int lmacid = lmac->lmacid;
442 bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_THRESH, 0x30);
443 /* max packet size */
444 bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_RXX_JABBER, MAX_FRAME_SIZE);
446 /* Disable frame alignment if using preamble */
447 cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND);
449 bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_SGMII_CTL, 0);
452 bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
455 bgx_reg_modify(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, PCS_MRX_CTL_RESET);
456 if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_CTL,
457 PCS_MRX_CTL_RESET, true)) {
458 dev_err(&bgx->pdev->dev, "BGX PCS reset not completed\n");
462 /* power down, reset autoneg, autoneg enable */
463 cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MRX_CTL);
464 cfg &= ~PCS_MRX_CTL_PWR_DN;
465 cfg |= PCS_MRX_CTL_RST_AN;
467 cfg |= PCS_MRX_CTL_AN_EN;
469 /* In scenarios where PHY driver is not present or it's a
470 * non-standard PHY, FW sets AN_EN to inform Linux driver
471 * to do auto-neg and link polling or not.
473 if (cfg & PCS_MRX_CTL_AN_EN)
474 lmac->autoneg = true;
476 bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, cfg);
478 if (lmac->lmac_type == BGX_MODE_QSGMII) {
479 /* Disable disparity check for QSGMII */
480 cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MISCX_CTL);
481 cfg &= ~PCS_MISC_CTL_DISP_EN;
482 bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MISCX_CTL, cfg);
486 if ((lmac->lmac_type == BGX_MODE_SGMII) && lmac->phydev) {
487 if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_STATUS,
488 PCS_MRX_STATUS_AN_CPT, false)) {
489 dev_err(&bgx->pdev->dev, "BGX AN_CPT not completed\n");
497 static int bgx_lmac_xaui_init(struct bgx *bgx, struct lmac *lmac)
500 int lmacid = lmac->lmacid;
503 bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET);
504 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET, true)) {
505 dev_err(&bgx->pdev->dev, "BGX SPU reset not completed\n");
510 cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
512 bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
514 bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER);
515 /* Set interleaved running disparity for RXAUI */
516 if (lmac->lmac_type == BGX_MODE_RXAUI)
517 bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL,
518 SPU_MISC_CTL_INTLV_RDISP);
520 /* Clear receive packet disable */
521 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_MISC_CONTROL);
522 cfg &= ~SPU_MISC_CTL_RX_DIS;
523 bgx_reg_write(bgx, lmacid, BGX_SPUX_MISC_CONTROL, cfg);
525 /* clear all interrupts */
526 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_INT);
527 bgx_reg_write(bgx, lmacid, BGX_SMUX_RX_INT, cfg);
528 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_INT);
529 bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_INT, cfg);
530 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
531 bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
533 if (lmac->use_training) {
534 bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LP_CUP, 0x00);
535 bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_CUP, 0x00);
536 bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_REP, 0x00);
537 /* training enable */
538 bgx_reg_modify(bgx, lmacid,
539 BGX_SPUX_BR_PMD_CRTL, SPU_PMD_CRTL_TRAIN_EN);
542 /* Append FCS to each packet */
543 bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, SMU_TX_APPEND_FCS_D);
545 /* Disable forward error correction */
546 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_FEC_CONTROL);
547 cfg &= ~SPU_FEC_CTL_FEC_EN;
548 bgx_reg_write(bgx, lmacid, BGX_SPUX_FEC_CONTROL, cfg);
550 /* Disable autoneg */
551 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_CONTROL);
552 cfg = cfg & ~(SPU_AN_CTL_AN_EN | SPU_AN_CTL_XNP_EN);
553 bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_CONTROL, cfg);
555 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_ADV);
556 if (lmac->lmac_type == BGX_MODE_10G_KR)
558 else if (lmac->lmac_type == BGX_MODE_40G_KR)
561 cfg &= ~((1 << 23) | (1 << 24));
562 cfg = cfg & (~((1ULL << 25) | (1ULL << 22) | (1ULL << 12)));
563 bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_ADV, cfg);
565 cfg = bgx_reg_read(bgx, 0, BGX_SPU_DBG_CONTROL);
566 cfg &= ~SPU_DBG_CTL_AN_ARB_LINK_CHK_EN;
567 bgx_reg_write(bgx, 0, BGX_SPU_DBG_CONTROL, cfg);
570 bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
572 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_CONTROL1);
573 cfg &= ~SPU_CTL_LOW_POWER;
574 bgx_reg_write(bgx, lmacid, BGX_SPUX_CONTROL1, cfg);
576 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_CTL);
577 cfg &= ~SMU_TX_CTL_UNI_EN;
578 cfg |= SMU_TX_CTL_DIC_EN;
579 bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_CTL, cfg);
581 /* Enable receive and transmission of pause frames */
582 bgx_reg_write(bgx, lmacid, BGX_SMUX_CBFC_CTL, ((0xffffULL << 32) |
583 BCK_EN | DRP_EN | TX_EN | RX_EN));
584 /* Configure pause time and interval */
585 bgx_reg_write(bgx, lmacid,
586 BGX_SMUX_TX_PAUSE_PKT_TIME, DEFAULT_PAUSE_TIME);
587 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_PAUSE_PKT_INTERVAL);
589 bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_PAUSE_PKT_INTERVAL,
590 cfg | (DEFAULT_PAUSE_TIME - 0x1000));
591 bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_PAUSE_ZERO, 0x01);
593 /* take lmac_count into account */
594 bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_THRESH, (0x100 - 1));
595 /* max packet size */
596 bgx_reg_modify(bgx, lmacid, BGX_SMUX_RX_JABBER, MAX_FRAME_SIZE);
601 static int bgx_xaui_check_link(struct lmac *lmac)
603 struct bgx *bgx = lmac->bgx;
604 int lmacid = lmac->lmacid;
605 int lmac_type = lmac->lmac_type;
608 if (lmac->use_training) {
609 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
610 if (!(cfg & (1ull << 13))) {
611 cfg = (1ull << 13) | (1ull << 14);
612 bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
613 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL);
615 bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL, cfg);
620 /* wait for PCS to come out of reset */
621 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET, true)) {
622 dev_err(&bgx->pdev->dev, "BGX SPU reset not completed\n");
626 if ((lmac_type == BGX_MODE_10G_KR) || (lmac_type == BGX_MODE_XFI) ||
627 (lmac_type == BGX_MODE_40G_KR) || (lmac_type == BGX_MODE_XLAUI)) {
628 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BR_STATUS1,
629 SPU_BR_STATUS_BLK_LOCK, false)) {
630 dev_err(&bgx->pdev->dev,
631 "SPU_BR_STATUS_BLK_LOCK not completed\n");
635 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BX_STATUS,
636 SPU_BX_STATUS_RX_ALIGN, false)) {
637 dev_err(&bgx->pdev->dev,
638 "SPU_BX_STATUS_RX_ALIGN not completed\n");
643 /* Clear rcvflt bit (latching high) and read it back */
644 if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT)
645 bgx_reg_modify(bgx, lmacid,
646 BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT);
647 if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) {
648 dev_err(&bgx->pdev->dev, "Receive fault, retry training\n");
649 if (lmac->use_training) {
650 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
651 if (!(cfg & (1ull << 13))) {
652 cfg = (1ull << 13) | (1ull << 14);
653 bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
654 cfg = bgx_reg_read(bgx, lmacid,
655 BGX_SPUX_BR_PMD_CRTL);
657 bgx_reg_write(bgx, lmacid,
658 BGX_SPUX_BR_PMD_CRTL, cfg);
665 /* Wait for BGX RX to be idle */
666 if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_RX_IDLE, false)) {
667 dev_err(&bgx->pdev->dev, "SMU RX not idle\n");
671 /* Wait for BGX TX to be idle */
672 if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_TX_IDLE, false)) {
673 dev_err(&bgx->pdev->dev, "SMU TX not idle\n");
677 /* Check for MAC RX faults */
678 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_CTL);
679 /* 0 - Link is okay, 1 - Local fault, 2 - Remote fault */
680 cfg &= SMU_RX_CTL_STATUS;
684 /* Rx local/remote fault seen.
685 * Do lmac reinit to see if condition recovers
687 bgx_lmac_xaui_init(bgx, lmac);
692 static void bgx_poll_for_sgmii_link(struct lmac *lmac)
694 u64 pcs_link, an_result;
697 pcs_link = bgx_reg_read(lmac->bgx, lmac->lmacid,
698 BGX_GMP_PCS_MRX_STATUS);
700 /*Link state bit is sticky, read it again*/
701 if (!(pcs_link & PCS_MRX_STATUS_LINK))
702 pcs_link = bgx_reg_read(lmac->bgx, lmac->lmacid,
703 BGX_GMP_PCS_MRX_STATUS);
705 if (bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_GMP_PCS_MRX_STATUS,
706 PCS_MRX_STATUS_AN_CPT, false)) {
707 lmac->link_up = false;
708 lmac->last_speed = SPEED_UNKNOWN;
709 lmac->last_duplex = DUPLEX_UNKNOWN;
713 lmac->link_up = ((pcs_link & PCS_MRX_STATUS_LINK) != 0) ? true : false;
714 an_result = bgx_reg_read(lmac->bgx, lmac->lmacid,
715 BGX_GMP_PCS_ANX_AN_RESULTS);
717 speed = (an_result >> 3) & 0x3;
718 lmac->last_duplex = (an_result >> 1) & 0x1;
721 lmac->last_speed = 10;
724 lmac->last_speed = 100;
727 lmac->last_speed = 1000;
730 lmac->link_up = false;
731 lmac->last_speed = SPEED_UNKNOWN;
732 lmac->last_duplex = DUPLEX_UNKNOWN;
738 if (lmac->last_link != lmac->link_up) {
740 bgx_sgmii_change_link_state(lmac);
741 lmac->last_link = lmac->link_up;
744 queue_delayed_work(lmac->check_link, &lmac->dwork, HZ * 3);
747 static void bgx_poll_for_link(struct work_struct *work)
750 u64 spu_link, smu_link;
752 lmac = container_of(work, struct lmac, dwork.work);
753 if (lmac->is_sgmii) {
754 bgx_poll_for_sgmii_link(lmac);
758 /* Receive link is latching low. Force it high and verify it */
759 bgx_reg_modify(lmac->bgx, lmac->lmacid,
760 BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK);
761 bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1,
762 SPU_STATUS1_RCV_LNK, false);
764 spu_link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1);
765 smu_link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SMUX_RX_CTL);
767 if ((spu_link & SPU_STATUS1_RCV_LNK) &&
768 !(smu_link & SMU_RX_CTL_STATUS)) {
770 if (lmac->lmac_type == BGX_MODE_XLAUI)
771 lmac->last_speed = 40000;
773 lmac->last_speed = 10000;
774 lmac->last_duplex = 1;
777 lmac->last_speed = SPEED_UNKNOWN;
778 lmac->last_duplex = DUPLEX_UNKNOWN;
781 if (lmac->last_link != lmac->link_up) {
783 if (bgx_xaui_check_link(lmac)) {
784 /* Errors, clear link_up state */
786 lmac->last_speed = SPEED_UNKNOWN;
787 lmac->last_duplex = DUPLEX_UNKNOWN;
790 lmac->last_link = lmac->link_up;
793 queue_delayed_work(lmac->check_link, &lmac->dwork, HZ * 2);
796 static int phy_interface_mode(u8 lmac_type)
798 if (lmac_type == BGX_MODE_QSGMII)
799 return PHY_INTERFACE_MODE_QSGMII;
800 if (lmac_type == BGX_MODE_RGMII)
801 return PHY_INTERFACE_MODE_RGMII;
803 return PHY_INTERFACE_MODE_SGMII;
806 static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
811 lmac = &bgx->lmac[lmacid];
814 if ((lmac->lmac_type == BGX_MODE_SGMII) ||
815 (lmac->lmac_type == BGX_MODE_QSGMII) ||
816 (lmac->lmac_type == BGX_MODE_RGMII)) {
818 if (bgx_lmac_sgmii_init(bgx, lmac))
822 if (bgx_lmac_xaui_init(bgx, lmac))
826 if (lmac->is_sgmii) {
827 cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND);
828 cfg |= ((1ull << 2) | (1ull << 1)); /* FCS and PAD */
829 bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND, cfg);
830 bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_MIN_PKT, 60 - 1);
832 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_APPEND);
833 cfg |= ((1ull << 2) | (1ull << 1)); /* FCS and PAD */
834 bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, cfg);
835 bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_MIN_PKT, 60 + 4);
839 bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
841 /* Restore default cfg, incase low level firmware changed it */
842 bgx_reg_write(bgx, lmacid, BGX_CMRX_RX_DMAC_CTL, 0x03);
844 if ((lmac->lmac_type != BGX_MODE_XFI) &&
845 (lmac->lmac_type != BGX_MODE_XLAUI) &&
846 (lmac->lmac_type != BGX_MODE_40G_KR) &&
847 (lmac->lmac_type != BGX_MODE_10G_KR)) {
850 bgx_reg_write(bgx, lmacid,
851 BGX_GMP_PCS_LINKX_TIMER,
852 PCS_LINKX_TIMER_COUNT);
855 /* Default to below link speed and duplex */
856 lmac->link_up = true;
857 lmac->last_speed = 1000;
858 lmac->last_duplex = 1;
859 bgx_sgmii_change_link_state(lmac);
863 lmac->phydev->dev_flags = 0;
865 if (phy_connect_direct(&lmac->netdev, lmac->phydev,
867 phy_interface_mode(lmac->lmac_type)))
870 phy_start_aneg(lmac->phydev);
875 lmac->check_link = alloc_workqueue("check_link", WQ_UNBOUND |
877 if (!lmac->check_link)
879 INIT_DELAYED_WORK(&lmac->dwork, bgx_poll_for_link);
880 queue_delayed_work(lmac->check_link, &lmac->dwork, 0);
885 static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid)
890 lmac = &bgx->lmac[lmacid];
891 if (lmac->check_link) {
892 /* Destroy work queue */
893 cancel_delayed_work_sync(&lmac->dwork);
894 destroy_workqueue(lmac->check_link);
897 /* Disable packet reception */
898 cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
899 cfg &= ~CMR_PKT_RX_EN;
900 bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
902 /* Give chance for Rx/Tx FIFO to get drained */
903 bgx_poll_reg(bgx, lmacid, BGX_CMRX_RX_FIFO_LEN, (u64)0x1FFF, true);
904 bgx_poll_reg(bgx, lmacid, BGX_CMRX_TX_FIFO_LEN, (u64)0x3FFF, true);
906 /* Disable packet transmission */
907 cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
908 cfg &= ~CMR_PKT_TX_EN;
909 bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
911 /* Disable serdes lanes */
913 bgx_reg_modify(bgx, lmacid,
914 BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER);
916 bgx_reg_modify(bgx, lmacid,
917 BGX_GMP_PCS_MRX_CTL, PCS_MRX_CTL_PWR_DN);
920 cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
922 bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
924 bgx_flush_dmac_addrs(bgx, lmacid);
926 if ((lmac->lmac_type != BGX_MODE_XFI) &&
927 (lmac->lmac_type != BGX_MODE_XLAUI) &&
928 (lmac->lmac_type != BGX_MODE_40G_KR) &&
929 (lmac->lmac_type != BGX_MODE_10G_KR) && lmac->phydev)
930 phy_disconnect(lmac->phydev);
935 static void bgx_init_hw(struct bgx *bgx)
940 bgx_reg_modify(bgx, 0, BGX_CMR_GLOBAL_CFG, CMR_GLOBAL_CFG_FCS_STRIP);
941 if (bgx_reg_read(bgx, 0, BGX_CMR_BIST_STATUS))
942 dev_err(&bgx->pdev->dev, "BGX%d BIST failed\n", bgx->bgx_id);
944 /* Set lmac type and lane2serdes mapping */
945 for (i = 0; i < bgx->lmac_count; i++) {
946 lmac = &bgx->lmac[i];
947 bgx_reg_write(bgx, i, BGX_CMRX_CFG,
948 (lmac->lmac_type << 8) | lmac->lane_to_sds);
949 bgx->lmac[i].lmacid_bd = lmac_count;
953 bgx_reg_write(bgx, 0, BGX_CMR_TX_LMACS, bgx->lmac_count);
954 bgx_reg_write(bgx, 0, BGX_CMR_RX_LMACS, bgx->lmac_count);
956 /* Set the backpressure AND mask */
957 for (i = 0; i < bgx->lmac_count; i++)
958 bgx_reg_modify(bgx, 0, BGX_CMR_CHAN_MSK_AND,
959 ((1ULL << MAX_BGX_CHANS_PER_LMAC) - 1) <<
960 (i * MAX_BGX_CHANS_PER_LMAC));
962 /* Disable all MAC filtering */
963 for (i = 0; i < RX_DMAC_COUNT; i++)
964 bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + (i * 8), 0x00);
966 /* Disable MAC steering (NCSI traffic) */
967 for (i = 0; i < RX_TRAFFIC_STEER_RULE_COUNT; i++)
968 bgx_reg_write(bgx, 0, BGX_CMR_RX_STREERING + (i * 8), 0x00);
971 static u8 bgx_get_lane2sds_cfg(struct bgx *bgx, struct lmac *lmac)
973 return (u8)(bgx_reg_read(bgx, lmac->lmacid, BGX_CMRX_CFG) & 0xFF);
976 static void bgx_print_qlm_mode(struct bgx *bgx, u8 lmacid)
978 struct device *dev = &bgx->pdev->dev;
983 if (lmacid > bgx->max_lmac)
986 lmac = &bgx->lmac[lmacid];
987 dlm = (lmacid / 2) + (bgx->bgx_id * 2);
989 sprintf(str, "BGX%d QLM mode", bgx->bgx_id);
991 sprintf(str, "BGX%d DLM%d mode", bgx->bgx_id, dlm);
993 switch (lmac->lmac_type) {
995 dev_info(dev, "%s: SGMII\n", (char *)str);
998 dev_info(dev, "%s: XAUI\n", (char *)str);
1000 case BGX_MODE_RXAUI:
1001 dev_info(dev, "%s: RXAUI\n", (char *)str);
1004 if (!lmac->use_training)
1005 dev_info(dev, "%s: XFI\n", (char *)str);
1007 dev_info(dev, "%s: 10G_KR\n", (char *)str);
1009 case BGX_MODE_XLAUI:
1010 if (!lmac->use_training)
1011 dev_info(dev, "%s: XLAUI\n", (char *)str);
1013 dev_info(dev, "%s: 40G_KR4\n", (char *)str);
1015 case BGX_MODE_QSGMII:
1016 if ((lmacid == 0) &&
1017 (bgx_get_lane2sds_cfg(bgx, lmac) != lmacid))
1019 if ((lmacid == 2) &&
1020 (bgx_get_lane2sds_cfg(bgx, lmac) == lmacid))
1022 dev_info(dev, "%s: QSGMII\n", (char *)str);
1024 case BGX_MODE_RGMII:
1025 dev_info(dev, "%s: RGMII\n", (char *)str);
1027 case BGX_MODE_INVALID:
1033 static void lmac_set_lane2sds(struct bgx *bgx, struct lmac *lmac)
1035 switch (lmac->lmac_type) {
1036 case BGX_MODE_SGMII:
1038 lmac->lane_to_sds = lmac->lmacid;
1041 case BGX_MODE_XLAUI:
1042 case BGX_MODE_RGMII:
1043 lmac->lane_to_sds = 0xE4;
1045 case BGX_MODE_RXAUI:
1046 lmac->lane_to_sds = (lmac->lmacid) ? 0xE : 0x4;
1048 case BGX_MODE_QSGMII:
1049 /* There is no way to determine if DLM0/2 is QSGMII or
1050 * DLM1/3 is configured to QSGMII as bootloader will
1051 * configure all LMACs, so take whatever is configured
1052 * by low level firmware.
1054 lmac->lane_to_sds = bgx_get_lane2sds_cfg(bgx, lmac);
1057 lmac->lane_to_sds = 0;
1062 static void lmac_set_training(struct bgx *bgx, struct lmac *lmac, int lmacid)
1064 if ((lmac->lmac_type != BGX_MODE_10G_KR) &&
1065 (lmac->lmac_type != BGX_MODE_40G_KR)) {
1066 lmac->use_training = 0;
1070 lmac->use_training = bgx_reg_read(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL) &
1071 SPU_PMD_CRTL_TRAIN_EN;
1074 static void bgx_set_lmac_config(struct bgx *bgx, u8 idx)
1082 lmac = &bgx->lmac[idx];
1084 if (!bgx->is_dlm || bgx->is_rgx) {
1085 /* Read LMAC0 type to figure out QLM mode
1086 * This is configured by low level firmware
1088 cmr_cfg = bgx_reg_read(bgx, 0, BGX_CMRX_CFG);
1089 lmac->lmac_type = (cmr_cfg >> 8) & 0x07;
1091 lmac->lmac_type = BGX_MODE_RGMII;
1092 lmac_set_training(bgx, lmac, 0);
1093 lmac_set_lane2sds(bgx, lmac);
1097 /* On 81xx BGX can be split across 2 DLMs
1098 * firmware programs lmac_type of LMAC0 and LMAC2
1100 if ((idx == 0) || (idx == 2)) {
1101 cmr_cfg = bgx_reg_read(bgx, idx, BGX_CMRX_CFG);
1102 lmac_type = (u8)((cmr_cfg >> 8) & 0x07);
1103 lane_to_sds = (u8)(cmr_cfg & 0xFF);
1104 /* Check if config is not reset value */
1105 if ((lmac_type == 0) && (lane_to_sds == 0xE4))
1106 lmac->lmac_type = BGX_MODE_INVALID;
1108 lmac->lmac_type = lmac_type;
1109 lmac_set_training(bgx, lmac, lmac->lmacid);
1110 lmac_set_lane2sds(bgx, lmac);
1112 olmac = &bgx->lmac[idx + 1];
1113 /* Check if other LMAC on the same DLM is already configured by
1114 * firmware, if so use the same config or else set as same, as
1116 * This check is needed as on 80xx only one lane of each of the
1117 * DLM of BGX0 is used, so have to rely on firmware for
1118 * distingushing 80xx from 81xx.
1120 cmr_cfg = bgx_reg_read(bgx, idx + 1, BGX_CMRX_CFG);
1121 lmac_type = (u8)((cmr_cfg >> 8) & 0x07);
1122 lane_to_sds = (u8)(cmr_cfg & 0xFF);
1123 if ((lmac_type == 0) && (lane_to_sds == 0xE4)) {
1124 olmac->lmac_type = lmac->lmac_type;
1125 lmac_set_lane2sds(bgx, olmac);
1127 olmac->lmac_type = lmac_type;
1128 olmac->lane_to_sds = lane_to_sds;
1130 lmac_set_training(bgx, olmac, olmac->lmacid);
1134 static bool is_dlm0_in_bgx_mode(struct bgx *bgx)
1141 lmac = &bgx->lmac[0];
1142 if (lmac->lmac_type == BGX_MODE_INVALID)
1148 static void bgx_get_qlm_mode(struct bgx *bgx)
1151 struct lmac *lmac01;
1152 struct lmac *lmac23;
1155 /* Init all LMAC's type to invalid */
1156 for (idx = 0; idx < bgx->max_lmac; idx++) {
1157 lmac = &bgx->lmac[idx];
1159 lmac->lmac_type = BGX_MODE_INVALID;
1160 lmac->use_training = false;
1163 /* It is assumed that low level firmware sets this value */
1164 bgx->lmac_count = bgx_reg_read(bgx, 0, BGX_CMR_RX_LMACS) & 0x7;
1165 if (bgx->lmac_count > bgx->max_lmac)
1166 bgx->lmac_count = bgx->max_lmac;
1168 for (idx = 0; idx < bgx->max_lmac; idx++)
1169 bgx_set_lmac_config(bgx, idx);
1171 if (!bgx->is_dlm || bgx->is_rgx) {
1172 bgx_print_qlm_mode(bgx, 0);
1176 if (bgx->lmac_count) {
1177 bgx_print_qlm_mode(bgx, 0);
1178 bgx_print_qlm_mode(bgx, 2);
1181 /* If DLM0 is not in BGX mode then LMAC0/1 have
1182 * to be configured with serdes lanes of DLM1
1184 if (is_dlm0_in_bgx_mode(bgx) || (bgx->lmac_count > 2))
1186 for (idx = 0; idx < bgx->lmac_count; idx++) {
1187 lmac01 = &bgx->lmac[idx];
1188 lmac23 = &bgx->lmac[idx + 2];
1189 lmac01->lmac_type = lmac23->lmac_type;
1190 lmac01->lane_to_sds = lmac23->lane_to_sds;
1196 static int acpi_get_mac_address(struct device *dev, struct acpi_device *adev,
1202 ret = fwnode_property_read_u8_array(acpi_fwnode_handle(adev),
1203 "mac-address", mac, ETH_ALEN);
1207 if (!is_valid_ether_addr(mac)) {
1208 dev_err(dev, "MAC address invalid: %pM\n", mac);
1213 dev_info(dev, "MAC address set to: %pM\n", mac);
1215 memcpy(dst, mac, ETH_ALEN);
1220 /* Currently only sets the MAC address. */
1221 static acpi_status bgx_acpi_register_phy(acpi_handle handle,
1222 u32 lvl, void *context, void **rv)
1224 struct bgx *bgx = context;
1225 struct device *dev = &bgx->pdev->dev;
1226 struct acpi_device *adev;
1228 if (acpi_bus_get_device(handle, &adev))
1231 acpi_get_mac_address(dev, adev, bgx->lmac[bgx->acpi_lmac_idx].mac);
1233 SET_NETDEV_DEV(&bgx->lmac[bgx->acpi_lmac_idx].netdev, dev);
1235 bgx->lmac[bgx->acpi_lmac_idx].lmacid = bgx->acpi_lmac_idx;
1236 bgx->acpi_lmac_idx++; /* move to next LMAC */
1241 static acpi_status bgx_acpi_match_id(acpi_handle handle, u32 lvl,
1242 void *context, void **ret_val)
1244 struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL };
1245 struct bgx *bgx = context;
1248 snprintf(bgx_sel, 5, "BGX%d", bgx->bgx_id);
1249 if (ACPI_FAILURE(acpi_get_name(handle, ACPI_SINGLE_NAME, &string))) {
1250 pr_warn("Invalid link device\n");
1254 if (strncmp(string.pointer, bgx_sel, 4))
1257 acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
1258 bgx_acpi_register_phy, NULL, bgx, NULL);
1260 kfree(string.pointer);
1261 return AE_CTRL_TERMINATE;
1264 static int bgx_init_acpi_phy(struct bgx *bgx)
1266 acpi_get_devices(NULL, bgx_acpi_match_id, bgx, (void **)NULL);
1272 static int bgx_init_acpi_phy(struct bgx *bgx)
1277 #endif /* CONFIG_ACPI */
1279 #if IS_ENABLED(CONFIG_OF_MDIO)
1281 static int bgx_init_of_phy(struct bgx *bgx)
1283 struct fwnode_handle *fwn;
1284 struct device_node *node = NULL;
1287 device_for_each_child_node(&bgx->pdev->dev, fwn) {
1288 struct phy_device *pd;
1289 struct device_node *phy_np;
1292 /* Should always be an OF node. But if it is not, we
1293 * cannot handle it, so exit the loop.
1295 node = to_of_node(fwn);
1299 mac = of_get_mac_address(node);
1301 ether_addr_copy(bgx->lmac[lmac].mac, mac);
1303 SET_NETDEV_DEV(&bgx->lmac[lmac].netdev, &bgx->pdev->dev);
1304 bgx->lmac[lmac].lmacid = lmac;
1306 phy_np = of_parse_phandle(node, "phy-handle", 0);
1307 /* If there is no phy or defective firmware presents
1308 * this cortina phy, for which there is no driver
1309 * support, ignore it.
1312 !of_device_is_compatible(phy_np, "cortina,cs4223-slice")) {
1313 /* Wait until the phy drivers are available */
1314 pd = of_phy_find_device(phy_np);
1317 bgx->lmac[lmac].phydev = pd;
1321 if (lmac == bgx->max_lmac) {
1329 /* We are bailing out, try not to leak device reference counts
1330 * for phy devices we may have already found.
1333 if (bgx->lmac[lmac].phydev) {
1334 put_device(&bgx->lmac[lmac].phydev->mdio.dev);
1335 bgx->lmac[lmac].phydev = NULL;
1340 return -EPROBE_DEFER;
1345 static int bgx_init_of_phy(struct bgx *bgx)
1350 #endif /* CONFIG_OF_MDIO */
1352 static int bgx_init_phy(struct bgx *bgx)
1355 return bgx_init_acpi_phy(bgx);
1357 return bgx_init_of_phy(bgx);
1360 static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1363 struct device *dev = &pdev->dev;
1364 struct bgx *bgx = NULL;
1368 bgx = devm_kzalloc(dev, sizeof(*bgx), GFP_KERNEL);
1373 pci_set_drvdata(pdev, bgx);
1375 err = pci_enable_device(pdev);
1377 dev_err(dev, "Failed to enable PCI device\n");
1378 pci_set_drvdata(pdev, NULL);
1382 err = pci_request_regions(pdev, DRV_NAME);
1384 dev_err(dev, "PCI request regions failed 0x%x\n", err);
1385 goto err_disable_device;
1388 /* MAP configuration registers */
1389 bgx->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
1390 if (!bgx->reg_base) {
1391 dev_err(dev, "BGX: Cannot map CSR memory space, aborting\n");
1393 goto err_release_regions;
1396 pci_read_config_word(pdev, PCI_DEVICE_ID, &sdevid);
1397 if (sdevid != PCI_DEVICE_ID_THUNDER_RGX) {
1398 bgx->bgx_id = (pci_resource_start(pdev,
1399 PCI_CFG_REG_BAR_NUM) >> 24) & BGX_ID_MASK;
1400 bgx->bgx_id += nic_get_node_id(pdev) * MAX_BGX_PER_NODE;
1401 bgx->max_lmac = MAX_LMAC_PER_BGX;
1402 bgx_vnic[bgx->bgx_id] = bgx;
1406 bgx->bgx_id = MAX_BGX_PER_CN81XX - 1;
1407 bgx_vnic[bgx->bgx_id] = bgx;
1411 /* On 81xx all are DLMs and on 83xx there are 3 BGX QLMs and one
1412 * BGX i.e BGX2 can be split across 2 DLMs.
1414 pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &sdevid);
1415 if ((sdevid == PCI_SUBSYS_DEVID_81XX_BGX) ||
1416 ((sdevid == PCI_SUBSYS_DEVID_83XX_BGX) && (bgx->bgx_id == 2)))
1419 bgx_get_qlm_mode(bgx);
1421 err = bgx_init_phy(bgx);
1427 /* Enable all LMACs */
1428 for (lmac = 0; lmac < bgx->lmac_count; lmac++) {
1429 err = bgx_lmac_enable(bgx, lmac);
1431 dev_err(dev, "BGX%d failed to enable lmac%d\n",
1434 bgx_lmac_disable(bgx, --lmac);
1442 bgx_vnic[bgx->bgx_id] = NULL;
1443 err_release_regions:
1444 pci_release_regions(pdev);
1446 pci_disable_device(pdev);
1447 pci_set_drvdata(pdev, NULL);
1451 static void bgx_remove(struct pci_dev *pdev)
1453 struct bgx *bgx = pci_get_drvdata(pdev);
1456 /* Disable all LMACs */
1457 for (lmac = 0; lmac < bgx->lmac_count; lmac++)
1458 bgx_lmac_disable(bgx, lmac);
1460 bgx_vnic[bgx->bgx_id] = NULL;
1461 pci_release_regions(pdev);
1462 pci_disable_device(pdev);
1463 pci_set_drvdata(pdev, NULL);
1466 static struct pci_driver bgx_driver = {
1468 .id_table = bgx_id_table,
1470 .remove = bgx_remove,
1473 static int __init bgx_init_module(void)
1475 pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION);
1477 return pci_register_driver(&bgx_driver);
1480 static void __exit bgx_cleanup_module(void)
1482 pci_unregister_driver(&bgx_driver);
1485 module_init(bgx_init_module);
1486 module_exit(bgx_cleanup_module);