2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
4 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
37 #include <linux/bitmap.h>
38 #include <linux/crc32.h>
39 #include <linux/ctype.h>
40 #include <linux/debugfs.h>
41 #include <linux/err.h>
42 #include <linux/etherdevice.h>
43 #include <linux/firmware.h>
45 #include <linux/if_vlan.h>
46 #include <linux/init.h>
47 #include <linux/log2.h>
48 #include <linux/mdio.h>
49 #include <linux/module.h>
50 #include <linux/moduleparam.h>
51 #include <linux/mutex.h>
52 #include <linux/netdevice.h>
53 #include <linux/pci.h>
54 #include <linux/aer.h>
55 #include <linux/rtnetlink.h>
56 #include <linux/sched.h>
57 #include <linux/seq_file.h>
58 #include <linux/sockios.h>
59 #include <linux/vmalloc.h>
60 #include <linux/workqueue.h>
61 #include <net/neighbour.h>
62 #include <net/netevent.h>
63 #include <net/addrconf.h>
64 #include <net/bonding.h>
65 #include <net/addrconf.h>
66 #include <linux/uaccess.h>
67 #include <linux/crash_dump.h>
68 #include <net/udp_tunnel.h>
71 #include "cxgb4_filter.h"
73 #include "t4_values.h"
76 #include "t4fw_version.h"
77 #include "cxgb4_dcb.h"
79 #include "cxgb4_debugfs.h"
84 #include "cxgb4_tc_u32.h"
85 #include "cxgb4_tc_flower.h"
86 #include "cxgb4_ptp.h"
87 #include "cxgb4_cudbg.h"
89 char cxgb4_driver_name[] = KBUILD_MODNAME;
94 #define DRV_VERSION "2.0.0-ko"
95 const char cxgb4_driver_version[] = DRV_VERSION;
96 #define DRV_DESC "Chelsio T4/T5/T6 Network Driver"
98 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
99 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
100 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
102 /* Macros needed to support the PCI Device ID Table ...
104 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
105 static const struct pci_device_id cxgb4_pci_tbl[] = {
106 #define CXGB4_UNIFIED_PF 0x4
108 #define CH_PCI_DEVICE_ID_FUNCTION CXGB4_UNIFIED_PF
110 /* Include PCI Device IDs for both PF4 and PF0-3 so our PCI probe() routine is
113 #define CH_PCI_DEVICE_ID_FUNCTION2 0x0
115 #define CH_PCI_ID_TABLE_ENTRY(devid) \
116 {PCI_VDEVICE(CHELSIO, (devid)), CXGB4_UNIFIED_PF}
118 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
122 #include "t4_pci_id_tbl.h"
124 #define FW4_FNAME "cxgb4/t4fw.bin"
125 #define FW5_FNAME "cxgb4/t5fw.bin"
126 #define FW6_FNAME "cxgb4/t6fw.bin"
127 #define FW4_CFNAME "cxgb4/t4-config.txt"
128 #define FW5_CFNAME "cxgb4/t5-config.txt"
129 #define FW6_CFNAME "cxgb4/t6-config.txt"
130 #define PHY_AQ1202_FIRMWARE "cxgb4/aq1202_fw.cld"
131 #define PHY_BCM84834_FIRMWARE "cxgb4/bcm8483.bin"
132 #define PHY_AQ1202_DEVICEID 0x4409
133 #define PHY_BCM84834_DEVICEID 0x4486
135 MODULE_DESCRIPTION(DRV_DESC);
136 MODULE_AUTHOR("Chelsio Communications");
137 MODULE_LICENSE("Dual BSD/GPL");
138 MODULE_VERSION(DRV_VERSION);
139 MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
140 MODULE_FIRMWARE(FW4_FNAME);
141 MODULE_FIRMWARE(FW5_FNAME);
142 MODULE_FIRMWARE(FW6_FNAME);
145 * The driver uses the best interrupt scheme available on a platform in the
146 * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
147 * of these schemes the driver may consider as follows:
149 * msi = 2: choose from among all three options
150 * msi = 1: only consider MSI and INTx interrupts
151 * msi = 0: force INTx interrupts
155 module_param(msi, int, 0644);
156 MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
159 * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
160 * offset by 2 bytes in order to have the IP headers line up on 4-byte
161 * boundaries. This is a requirement for many architectures which will throw
162 * a machine check fault if an attempt is made to access one of the 4-byte IP
163 * header fields on a non-4-byte boundary. And it's a major performance issue
164 * even on some architectures which allow it like some implementations of the
165 * x86 ISA. However, some architectures don't mind this and for some very
166 * edge-case performance sensitive applications (like forwarding large volumes
167 * of small packets), setting this DMA offset to 0 will decrease the number of
168 * PCI-E Bus transfers enough to measurably affect performance.
170 static int rx_dma_offset = 2;
172 /* TX Queue select used to determine what algorithm to use for selecting TX
173 * queue. Select between the kernel provided function (select_queue=0) or user
174 * cxgb_select_queue function (select_queue=1)
176 * Default: select_queue=0
178 static int select_queue;
179 module_param(select_queue, int, 0644);
180 MODULE_PARM_DESC(select_queue,
181 "Select between kernel provided method of selecting or driver method of selecting TX queue. Default is kernel method.");
183 static struct dentry *cxgb4_debugfs_root;
185 LIST_HEAD(adapter_list);
186 DEFINE_MUTEX(uld_mutex);
188 static void link_report(struct net_device *dev)
190 if (!netif_carrier_ok(dev))
191 netdev_info(dev, "link down\n");
193 static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
196 const struct port_info *p = netdev_priv(dev);
198 switch (p->link_cfg.speed) {
221 pr_info("%s: unsupported speed: %d\n",
222 dev->name, p->link_cfg.speed);
226 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
231 #ifdef CONFIG_CHELSIO_T4_DCB
232 /* Set up/tear down Data Center Bridging Priority mapping for a net device. */
233 static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
235 struct port_info *pi = netdev_priv(dev);
236 struct adapter *adap = pi->adapter;
237 struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset];
240 /* We use a simple mapping of Port TX Queue Index to DCB
241 * Priority when we're enabling DCB.
243 for (i = 0; i < pi->nqsets; i++, txq++) {
247 name = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
249 FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH) |
250 FW_PARAMS_PARAM_YZ_V(txq->q.cntxt_id));
251 value = enable ? i : 0xffffffff;
253 /* Since we can be called while atomic (from "interrupt
254 * level") we need to issue the Set Parameters Commannd
255 * without sleeping (timeout < 0).
257 err = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1,
259 -FW_CMD_MAX_TIMEOUT);
262 dev_err(adap->pdev_dev,
263 "Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n",
264 enable ? "set" : "unset", pi->port_id, i, -err);
266 txq->dcb_prio = enable ? value : 0;
270 static int cxgb4_dcb_enabled(const struct net_device *dev)
272 struct port_info *pi = netdev_priv(dev);
274 if (!pi->dcb.enabled)
277 return ((pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED) ||
278 (pi->dcb.state == CXGB4_DCB_STATE_HOST));
280 #endif /* CONFIG_CHELSIO_T4_DCB */
282 void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
284 struct net_device *dev = adapter->port[port_id];
286 /* Skip changes from disabled ports. */
287 if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
289 netif_carrier_on(dev);
291 #ifdef CONFIG_CHELSIO_T4_DCB
292 if (cxgb4_dcb_enabled(dev)) {
293 cxgb4_dcb_reset(dev);
294 dcb_tx_queue_prio_enable(dev, false);
296 #endif /* CONFIG_CHELSIO_T4_DCB */
297 netif_carrier_off(dev);
304 void t4_os_portmod_changed(struct adapter *adap, int port_id)
306 static const char *mod_str[] = {
307 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
310 struct net_device *dev = adap->port[port_id];
311 struct port_info *pi = netdev_priv(dev);
313 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
314 netdev_info(dev, "port module unplugged\n");
315 else if (pi->mod_type < ARRAY_SIZE(mod_str))
316 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
317 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
318 netdev_info(dev, "%s: unsupported port module inserted\n",
320 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
321 netdev_info(dev, "%s: unknown port module inserted\n",
323 else if (pi->mod_type == FW_PORT_MOD_TYPE_ERROR)
324 netdev_info(dev, "%s: transceiver module error\n", dev->name);
326 netdev_info(dev, "%s: unknown module type %d inserted\n",
327 dev->name, pi->mod_type);
329 /* If the interface is running, then we'll need any "sticky" Link
330 * Parameters redone with a new Transceiver Module.
332 pi->link_cfg.redo_l1cfg = netif_running(dev);
335 int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
336 module_param(dbfifo_int_thresh, int, 0644);
337 MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
340 * usecs to sleep while draining the dbfifo
342 static int dbfifo_drain_delay = 1000;
343 module_param(dbfifo_drain_delay, int, 0644);
344 MODULE_PARM_DESC(dbfifo_drain_delay,
345 "usecs to sleep while draining the dbfifo");
347 static inline int cxgb4_set_addr_hash(struct port_info *pi)
349 struct adapter *adap = pi->adapter;
352 struct hash_mac_addr *entry;
354 /* Calculate the hash vector for the updated list and program it */
355 list_for_each_entry(entry, &adap->mac_hlist, list) {
356 ucast |= is_unicast_ether_addr(entry->addr);
357 vec |= (1ULL << hash_mac_addr(entry->addr));
359 return t4_set_addr_hash(adap, adap->mbox, pi->viid, ucast,
363 static int cxgb4_mac_sync(struct net_device *netdev, const u8 *mac_addr)
365 struct port_info *pi = netdev_priv(netdev);
366 struct adapter *adap = pi->adapter;
371 bool ucast = is_unicast_ether_addr(mac_addr);
372 const u8 *maclist[1] = {mac_addr};
373 struct hash_mac_addr *new_entry;
375 ret = t4_alloc_mac_filt(adap, adap->mbox, pi->viid, free, 1, maclist,
376 NULL, ucast ? &uhash : &mhash, false);
379 /* if hash != 0, then add the addr to hash addr list
380 * so on the end we will calculate the hash for the
381 * list and program it
383 if (uhash || mhash) {
384 new_entry = kzalloc(sizeof(*new_entry), GFP_ATOMIC);
387 ether_addr_copy(new_entry->addr, mac_addr);
388 list_add_tail(&new_entry->list, &adap->mac_hlist);
389 ret = cxgb4_set_addr_hash(pi);
392 return ret < 0 ? ret : 0;
395 static int cxgb4_mac_unsync(struct net_device *netdev, const u8 *mac_addr)
397 struct port_info *pi = netdev_priv(netdev);
398 struct adapter *adap = pi->adapter;
400 const u8 *maclist[1] = {mac_addr};
401 struct hash_mac_addr *entry, *tmp;
403 /* If the MAC address to be removed is in the hash addr
404 * list, delete it from the list and update hash vector
406 list_for_each_entry_safe(entry, tmp, &adap->mac_hlist, list) {
407 if (ether_addr_equal(entry->addr, mac_addr)) {
408 list_del(&entry->list);
410 return cxgb4_set_addr_hash(pi);
414 ret = t4_free_mac_filt(adap, adap->mbox, pi->viid, 1, maclist, false);
415 return ret < 0 ? -EINVAL : 0;
419 * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
420 * If @mtu is -1 it is left unchanged.
422 static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
424 struct port_info *pi = netdev_priv(dev);
425 struct adapter *adapter = pi->adapter;
427 __dev_uc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync);
428 __dev_mc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync);
430 return t4_set_rxmode(adapter, adapter->mbox, pi->viid, mtu,
431 (dev->flags & IFF_PROMISC) ? 1 : 0,
432 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
437 * link_start - enable a port
438 * @dev: the port to enable
440 * Performs the MAC and PHY actions needed to enable a port.
442 static int link_start(struct net_device *dev)
445 struct port_info *pi = netdev_priv(dev);
446 unsigned int mb = pi->adapter->pf;
449 * We do not set address filters and promiscuity here, the stack does
450 * that step explicitly.
452 ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
453 !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
455 ret = t4_change_mac(pi->adapter, mb, pi->viid,
456 pi->xact_addr_filt, dev->dev_addr, true,
459 pi->xact_addr_filt = ret;
464 ret = t4_link_l1cfg(pi->adapter, mb, pi->tx_chan,
468 ret = t4_enable_pi_params(pi->adapter, mb, pi, true,
469 true, CXGB4_DCB_ENABLED);
476 #ifdef CONFIG_CHELSIO_T4_DCB
477 /* Handle a Data Center Bridging update message from the firmware. */
478 static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd)
480 int port = FW_PORT_CMD_PORTID_G(ntohl(pcmd->op_to_portid));
481 struct net_device *dev = adap->port[adap->chan_map[port]];
482 int old_dcb_enabled = cxgb4_dcb_enabled(dev);
485 cxgb4_dcb_handle_fw_update(adap, pcmd);
486 new_dcb_enabled = cxgb4_dcb_enabled(dev);
488 /* If the DCB has become enabled or disabled on the port then we're
489 * going to need to set up/tear down DCB Priority parameters for the
490 * TX Queues associated with the port.
492 if (new_dcb_enabled != old_dcb_enabled)
493 dcb_tx_queue_prio_enable(dev, new_dcb_enabled);
495 #endif /* CONFIG_CHELSIO_T4_DCB */
497 /* Response queue handler for the FW event queue.
499 static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
500 const struct pkt_gl *gl)
502 u8 opcode = ((const struct rss_header *)rsp)->opcode;
504 rsp++; /* skip RSS header */
506 /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
508 if (unlikely(opcode == CPL_FW4_MSG &&
509 ((const struct cpl_fw4_msg *)rsp)->type == FW_TYPE_RSSCPL)) {
511 opcode = ((const struct rss_header *)rsp)->opcode;
513 if (opcode != CPL_SGE_EGR_UPDATE) {
514 dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
520 if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
521 const struct cpl_sge_egr_update *p = (void *)rsp;
522 unsigned int qid = EGR_QID_G(ntohl(p->opcode_qid));
525 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
527 if (txq->q_type == CXGB4_TXQ_ETH) {
528 struct sge_eth_txq *eq;
530 eq = container_of(txq, struct sge_eth_txq, q);
531 netif_tx_wake_queue(eq->txq);
533 struct sge_uld_txq *oq;
535 oq = container_of(txq, struct sge_uld_txq, q);
536 tasklet_schedule(&oq->qresume_tsk);
538 } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
539 const struct cpl_fw6_msg *p = (void *)rsp;
541 #ifdef CONFIG_CHELSIO_T4_DCB
542 const struct fw_port_cmd *pcmd = (const void *)p->data;
543 unsigned int cmd = FW_CMD_OP_G(ntohl(pcmd->op_to_portid));
544 unsigned int action =
545 FW_PORT_CMD_ACTION_G(ntohl(pcmd->action_to_len16));
547 if (cmd == FW_PORT_CMD &&
548 (action == FW_PORT_ACTION_GET_PORT_INFO ||
549 action == FW_PORT_ACTION_GET_PORT_INFO32)) {
550 int port = FW_PORT_CMD_PORTID_G(
551 be32_to_cpu(pcmd->op_to_portid));
552 struct net_device *dev;
553 int dcbxdis, state_input;
555 dev = q->adap->port[q->adap->chan_map[port]];
556 dcbxdis = (action == FW_PORT_ACTION_GET_PORT_INFO
557 ? !!(pcmd->u.info.dcbxdis_pkd &
558 FW_PORT_CMD_DCBXDIS_F)
559 : !!(pcmd->u.info32.lstatus32_to_cbllen32 &
560 FW_PORT_CMD_DCBXDIS32_F));
561 state_input = (dcbxdis
562 ? CXGB4_DCB_INPUT_FW_DISABLED
563 : CXGB4_DCB_INPUT_FW_ENABLED);
565 cxgb4_dcb_state_fsm(dev, state_input);
568 if (cmd == FW_PORT_CMD &&
569 action == FW_PORT_ACTION_L2_DCB_CFG)
570 dcb_rpl(q->adap, pcmd);
574 t4_handle_fw_rpl(q->adap, p->data);
575 } else if (opcode == CPL_L2T_WRITE_RPL) {
576 const struct cpl_l2t_write_rpl *p = (void *)rsp;
578 do_l2t_write_rpl(q->adap, p);
579 } else if (opcode == CPL_SMT_WRITE_RPL) {
580 const struct cpl_smt_write_rpl *p = (void *)rsp;
582 do_smt_write_rpl(q->adap, p);
583 } else if (opcode == CPL_SET_TCB_RPL) {
584 const struct cpl_set_tcb_rpl *p = (void *)rsp;
586 filter_rpl(q->adap, p);
587 } else if (opcode == CPL_ACT_OPEN_RPL) {
588 const struct cpl_act_open_rpl *p = (void *)rsp;
590 hash_filter_rpl(q->adap, p);
591 } else if (opcode == CPL_ABORT_RPL_RSS) {
592 const struct cpl_abort_rpl_rss *p = (void *)rsp;
594 hash_del_filter_rpl(q->adap, p);
595 } else if (opcode == CPL_SRQ_TABLE_RPL) {
596 const struct cpl_srq_table_rpl *p = (void *)rsp;
598 do_srq_table_rpl(q->adap, p);
600 dev_err(q->adap->pdev_dev,
601 "unexpected CPL %#x on FW event queue\n", opcode);
606 static void disable_msi(struct adapter *adapter)
608 if (adapter->flags & USING_MSIX) {
609 pci_disable_msix(adapter->pdev);
610 adapter->flags &= ~USING_MSIX;
611 } else if (adapter->flags & USING_MSI) {
612 pci_disable_msi(adapter->pdev);
613 adapter->flags &= ~USING_MSI;
618 * Interrupt handler for non-data events used with MSI-X.
620 static irqreturn_t t4_nondata_intr(int irq, void *cookie)
622 struct adapter *adap = cookie;
623 u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A));
627 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A), v);
629 if (adap->flags & MASTER_PF)
630 t4_slow_intr_handler(adap);
635 * Name the MSI-X interrupts.
637 static void name_msix_vecs(struct adapter *adap)
639 int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
641 /* non-data interrupts */
642 snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
645 snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
646 adap->port[0]->name);
648 /* Ethernet queues */
649 for_each_port(adap, j) {
650 struct net_device *d = adap->port[j];
651 const struct port_info *pi = netdev_priv(d);
653 for (i = 0; i < pi->nqsets; i++, msi_idx++)
654 snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
659 static int request_msix_queue_irqs(struct adapter *adap)
661 struct sge *s = &adap->sge;
665 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
666 adap->msix_info[1].desc, &s->fw_evtq);
670 for_each_ethrxq(s, ethqidx) {
671 err = request_irq(adap->msix_info[msi_index].vec,
673 adap->msix_info[msi_index].desc,
674 &s->ethrxq[ethqidx].rspq);
682 while (--ethqidx >= 0)
683 free_irq(adap->msix_info[--msi_index].vec,
684 &s->ethrxq[ethqidx].rspq);
685 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
689 static void free_msix_queue_irqs(struct adapter *adap)
691 int i, msi_index = 2;
692 struct sge *s = &adap->sge;
694 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
695 for_each_ethrxq(s, i)
696 free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq);
700 * cxgb4_write_rss - write the RSS table for a given port
702 * @queues: array of queue indices for RSS
704 * Sets up the portion of the HW RSS table for the port's VI to distribute
705 * packets to the Rx queues in @queues.
706 * Should never be called before setting up sge eth rx queues
708 int cxgb4_write_rss(const struct port_info *pi, const u16 *queues)
712 struct adapter *adapter = pi->adapter;
713 const struct sge_eth_rxq *rxq;
715 rxq = &adapter->sge.ethrxq[pi->first_qset];
716 rss = kmalloc_array(pi->rss_size, sizeof(u16), GFP_KERNEL);
720 /* map the queue indices to queue ids */
721 for (i = 0; i < pi->rss_size; i++, queues++)
722 rss[i] = rxq[*queues].rspq.abs_id;
724 err = t4_config_rss_range(adapter, adapter->pf, pi->viid, 0,
725 pi->rss_size, rss, pi->rss_size);
726 /* If Tunnel All Lookup isn't specified in the global RSS
727 * Configuration, then we need to specify a default Ingress
728 * Queue for any ingress packets which aren't hashed. We'll
729 * use our first ingress queue ...
732 err = t4_config_vi_rss(adapter, adapter->mbox, pi->viid,
733 FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F |
734 FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F |
735 FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F |
736 FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F |
737 FW_RSS_VI_CONFIG_CMD_UDPEN_F,
744 * setup_rss - configure RSS
747 * Sets up RSS for each port.
749 static int setup_rss(struct adapter *adap)
753 for_each_port(adap, i) {
754 const struct port_info *pi = adap2pinfo(adap, i);
756 /* Fill default values with equal distribution */
757 for (j = 0; j < pi->rss_size; j++)
758 pi->rss[j] = j % pi->nqsets;
760 err = cxgb4_write_rss(pi, pi->rss);
768 * Return the channel of the ingress queue with the given qid.
770 static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
772 qid -= p->ingr_start;
773 return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
777 * Wait until all NAPI handlers are descheduled.
779 static void quiesce_rx(struct adapter *adap)
783 for (i = 0; i < adap->sge.ingr_sz; i++) {
784 struct sge_rspq *q = adap->sge.ingr_map[i];
787 napi_disable(&q->napi);
791 /* Disable interrupt and napi handler */
792 static void disable_interrupts(struct adapter *adap)
794 if (adap->flags & FULL_INIT_DONE) {
795 t4_intr_disable(adap);
796 if (adap->flags & USING_MSIX) {
797 free_msix_queue_irqs(adap);
798 free_irq(adap->msix_info[0].vec, adap);
800 free_irq(adap->pdev->irq, adap);
807 * Enable NAPI scheduling and interrupt generation for all Rx queues.
809 static void enable_rx(struct adapter *adap)
813 for (i = 0; i < adap->sge.ingr_sz; i++) {
814 struct sge_rspq *q = adap->sge.ingr_map[i];
819 napi_enable(&q->napi);
821 /* 0-increment GTS to start the timer and enable interrupts */
822 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
823 SEINTARM_V(q->intr_params) |
824 INGRESSQID_V(q->cntxt_id));
829 static int setup_fw_sge_queues(struct adapter *adap)
831 struct sge *s = &adap->sge;
834 bitmap_zero(s->starving_fl, s->egr_sz);
835 bitmap_zero(s->txq_maperr, s->egr_sz);
837 if (adap->flags & USING_MSIX)
838 adap->msi_idx = 1; /* vector 0 is for non-queue interrupts */
840 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
841 NULL, NULL, NULL, -1);
844 adap->msi_idx = -((int)s->intrq.abs_id + 1);
847 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
848 adap->msi_idx, NULL, fwevtq_handler, NULL, -1);
853 * setup_sge_queues - configure SGE Tx/Rx/response queues
856 * Determines how many sets of SGE queues to use and initializes them.
857 * We support multiple queue sets per port if we have MSI-X, otherwise
858 * just one queue set per port.
860 static int setup_sge_queues(struct adapter *adap)
863 struct sge *s = &adap->sge;
864 struct sge_uld_rxq_info *rxq_info = NULL;
865 unsigned int cmplqid = 0;
868 rxq_info = s->uld_rxq_info[CXGB4_ULD_RDMA];
870 for_each_port(adap, i) {
871 struct net_device *dev = adap->port[i];
872 struct port_info *pi = netdev_priv(dev);
873 struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
874 struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
876 for (j = 0; j < pi->nqsets; j++, q++) {
877 if (adap->msi_idx > 0)
879 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
880 adap->msi_idx, &q->fl,
883 t4_get_tp_ch_map(adap,
888 memset(&q->stats, 0, sizeof(q->stats));
890 for (j = 0; j < pi->nqsets; j++, t++) {
891 err = t4_sge_alloc_eth_txq(adap, t, dev,
892 netdev_get_tx_queue(dev, j),
893 s->fw_evtq.cntxt_id);
899 for_each_port(adap, i) {
900 /* Note that cmplqid below is 0 if we don't
901 * have RDMA queues, and that's the right value.
904 cmplqid = rxq_info->uldrxq[i].rspq.cntxt_id;
906 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
907 s->fw_evtq.cntxt_id, cmplqid);
912 if (!is_t4(adap->params.chip)) {
913 err = t4_sge_alloc_eth_txq(adap, &s->ptptxq, adap->port[0],
914 netdev_get_tx_queue(adap->port[0], 0)
915 , s->fw_evtq.cntxt_id);
920 t4_write_reg(adap, is_t4(adap->params.chip) ?
921 MPS_TRC_RSS_CONTROL_A :
922 MPS_T5_TRC_RSS_CONTROL_A,
923 RSSCONTROL_V(netdev2pinfo(adap->port[0])->tx_chan) |
924 QUEUENUMBER_V(s->ethrxq[0].rspq.abs_id));
927 dev_err(adap->pdev_dev, "Can't allocate queues, err=%d\n", -err);
928 t4_free_sge_resources(adap);
932 static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
933 struct net_device *sb_dev,
934 select_queue_fallback_t fallback)
938 #ifdef CONFIG_CHELSIO_T4_DCB
939 /* If a Data Center Bridging has been successfully negotiated on this
940 * link then we'll use the skb's priority to map it to a TX Queue.
941 * The skb's priority is determined via the VLAN Tag Priority Code
944 if (cxgb4_dcb_enabled(dev) && !is_kdump_kernel()) {
948 err = vlan_get_tag(skb, &vlan_tci);
952 "TX Packet without VLAN Tag on DCB Link\n");
955 txq = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
956 #ifdef CONFIG_CHELSIO_T4_FCOE
957 if (skb->protocol == htons(ETH_P_FCOE))
958 txq = skb->priority & 0x7;
959 #endif /* CONFIG_CHELSIO_T4_FCOE */
963 #endif /* CONFIG_CHELSIO_T4_DCB */
966 txq = (skb_rx_queue_recorded(skb)
967 ? skb_get_rx_queue(skb)
968 : smp_processor_id());
970 while (unlikely(txq >= dev->real_num_tx_queues))
971 txq -= dev->real_num_tx_queues;
976 return fallback(dev, skb) % dev->real_num_tx_queues;
979 static int closest_timer(const struct sge *s, int time)
981 int i, delta, match = 0, min_delta = INT_MAX;
983 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
984 delta = time - s->timer_val[i];
987 if (delta < min_delta) {
995 static int closest_thres(const struct sge *s, int thres)
997 int i, delta, match = 0, min_delta = INT_MAX;
999 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
1000 delta = thres - s->counter_val[i];
1003 if (delta < min_delta) {
1012 * cxgb4_set_rspq_intr_params - set a queue's interrupt holdoff parameters
1014 * @us: the hold-off time in us, or 0 to disable timer
1015 * @cnt: the hold-off packet count, or 0 to disable counter
1017 * Sets an Rx queue's interrupt hold-off time and packet count. At least
1018 * one of the two needs to be enabled for the queue to generate interrupts.
1020 int cxgb4_set_rspq_intr_params(struct sge_rspq *q,
1021 unsigned int us, unsigned int cnt)
1023 struct adapter *adap = q->adap;
1025 if ((us | cnt) == 0)
1032 new_idx = closest_thres(&adap->sge, cnt);
1033 if (q->desc && q->pktcnt_idx != new_idx) {
1034 /* the queue has already been created, update it */
1035 v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
1036 FW_PARAMS_PARAM_X_V(
1037 FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
1038 FW_PARAMS_PARAM_YZ_V(q->cntxt_id);
1039 err = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
1044 q->pktcnt_idx = new_idx;
1047 us = us == 0 ? 6 : closest_timer(&adap->sge, us);
1048 q->intr_params = QINTR_TIMER_IDX_V(us) | QINTR_CNT_EN_V(cnt > 0);
1052 static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
1054 const struct port_info *pi = netdev_priv(dev);
1055 netdev_features_t changed = dev->features ^ features;
1058 if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
1061 err = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, -1,
1063 !!(features & NETIF_F_HW_VLAN_CTAG_RX), true);
1065 dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX;
1069 static int setup_debugfs(struct adapter *adap)
1071 if (IS_ERR_OR_NULL(adap->debugfs_root))
1074 #ifdef CONFIG_DEBUG_FS
1075 t4_setup_debugfs(adap);
1081 * upper-layer driver support
1085 * Allocate an active-open TID and set it to the supplied value.
1087 int cxgb4_alloc_atid(struct tid_info *t, void *data)
1091 spin_lock_bh(&t->atid_lock);
1093 union aopen_entry *p = t->afree;
1095 atid = (p - t->atid_tab) + t->atid_base;
1100 spin_unlock_bh(&t->atid_lock);
1103 EXPORT_SYMBOL(cxgb4_alloc_atid);
1106 * Release an active-open TID.
1108 void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
1110 union aopen_entry *p = &t->atid_tab[atid - t->atid_base];
1112 spin_lock_bh(&t->atid_lock);
1116 spin_unlock_bh(&t->atid_lock);
1118 EXPORT_SYMBOL(cxgb4_free_atid);
1121 * Allocate a server TID and set it to the supplied value.
1123 int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
1127 spin_lock_bh(&t->stid_lock);
1128 if (family == PF_INET) {
1129 stid = find_first_zero_bit(t->stid_bmap, t->nstids);
1130 if (stid < t->nstids)
1131 __set_bit(stid, t->stid_bmap);
1135 stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 1);
1140 t->stid_tab[stid].data = data;
1141 stid += t->stid_base;
1142 /* IPv6 requires max of 520 bits or 16 cells in TCAM
1143 * This is equivalent to 4 TIDs. With CLIP enabled it
1146 if (family == PF_INET6) {
1147 t->stids_in_use += 2;
1148 t->v6_stids_in_use += 2;
1153 spin_unlock_bh(&t->stid_lock);
1156 EXPORT_SYMBOL(cxgb4_alloc_stid);
1158 /* Allocate a server filter TID and set it to the supplied value.
1160 int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
1164 spin_lock_bh(&t->stid_lock);
1165 if (family == PF_INET) {
1166 stid = find_next_zero_bit(t->stid_bmap,
1167 t->nstids + t->nsftids, t->nstids);
1168 if (stid < (t->nstids + t->nsftids))
1169 __set_bit(stid, t->stid_bmap);
1176 t->stid_tab[stid].data = data;
1178 stid += t->sftid_base;
1181 spin_unlock_bh(&t->stid_lock);
1184 EXPORT_SYMBOL(cxgb4_alloc_sftid);
1186 /* Release a server TID.
1188 void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
1190 /* Is it a server filter TID? */
1191 if (t->nsftids && (stid >= t->sftid_base)) {
1192 stid -= t->sftid_base;
1195 stid -= t->stid_base;
1198 spin_lock_bh(&t->stid_lock);
1199 if (family == PF_INET)
1200 __clear_bit(stid, t->stid_bmap);
1202 bitmap_release_region(t->stid_bmap, stid, 1);
1203 t->stid_tab[stid].data = NULL;
1204 if (stid < t->nstids) {
1205 if (family == PF_INET6) {
1206 t->stids_in_use -= 2;
1207 t->v6_stids_in_use -= 2;
1215 spin_unlock_bh(&t->stid_lock);
1217 EXPORT_SYMBOL(cxgb4_free_stid);
1220 * Populate a TID_RELEASE WR. Caller must properly size the skb.
1222 static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
1225 struct cpl_tid_release *req;
1227 set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
1228 req = __skb_put(skb, sizeof(*req));
1229 INIT_TP_WR(req, tid);
1230 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
1234 * Queue a TID release request and if necessary schedule a work queue to
1237 static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
1240 void **p = &t->tid_tab[tid];
1241 struct adapter *adap = container_of(t, struct adapter, tids);
1243 spin_lock_bh(&adap->tid_release_lock);
1244 *p = adap->tid_release_head;
1245 /* Low 2 bits encode the Tx channel number */
1246 adap->tid_release_head = (void **)((uintptr_t)p | chan);
1247 if (!adap->tid_release_task_busy) {
1248 adap->tid_release_task_busy = true;
1249 queue_work(adap->workq, &adap->tid_release_task);
1251 spin_unlock_bh(&adap->tid_release_lock);
1255 * Process the list of pending TID release requests.
1257 static void process_tid_release_list(struct work_struct *work)
1259 struct sk_buff *skb;
1260 struct adapter *adap;
1262 adap = container_of(work, struct adapter, tid_release_task);
1264 spin_lock_bh(&adap->tid_release_lock);
1265 while (adap->tid_release_head) {
1266 void **p = adap->tid_release_head;
1267 unsigned int chan = (uintptr_t)p & 3;
1268 p = (void *)p - chan;
1270 adap->tid_release_head = *p;
1272 spin_unlock_bh(&adap->tid_release_lock);
1274 while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
1276 schedule_timeout_uninterruptible(1);
1278 mk_tid_release(skb, chan, p - adap->tids.tid_tab);
1279 t4_ofld_send(adap, skb);
1280 spin_lock_bh(&adap->tid_release_lock);
1282 adap->tid_release_task_busy = false;
1283 spin_unlock_bh(&adap->tid_release_lock);
1287 * Release a TID and inform HW. If we are unable to allocate the release
1288 * message we defer to a work queue.
1290 void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid,
1291 unsigned short family)
1293 struct sk_buff *skb;
1294 struct adapter *adap = container_of(t, struct adapter, tids);
1296 WARN_ON(tid >= t->ntids);
1298 if (t->tid_tab[tid]) {
1299 t->tid_tab[tid] = NULL;
1300 atomic_dec(&t->conns_in_use);
1301 if (t->hash_base && (tid >= t->hash_base)) {
1302 if (family == AF_INET6)
1303 atomic_sub(2, &t->hash_tids_in_use);
1305 atomic_dec(&t->hash_tids_in_use);
1307 if (family == AF_INET6)
1308 atomic_sub(2, &t->tids_in_use);
1310 atomic_dec(&t->tids_in_use);
1314 skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
1316 mk_tid_release(skb, chan, tid);
1317 t4_ofld_send(adap, skb);
1319 cxgb4_queue_tid_release(t, chan, tid);
1321 EXPORT_SYMBOL(cxgb4_remove_tid);
1324 * Allocate and initialize the TID tables. Returns 0 on success.
1326 static int tid_init(struct tid_info *t)
1328 struct adapter *adap = container_of(t, struct adapter, tids);
1329 unsigned int max_ftids = t->nftids + t->nsftids;
1330 unsigned int natids = t->natids;
1331 unsigned int stid_bmap_size;
1332 unsigned int ftid_bmap_size;
1335 stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
1336 ftid_bmap_size = BITS_TO_LONGS(t->nftids);
1337 size = t->ntids * sizeof(*t->tid_tab) +
1338 natids * sizeof(*t->atid_tab) +
1339 t->nstids * sizeof(*t->stid_tab) +
1340 t->nsftids * sizeof(*t->stid_tab) +
1341 stid_bmap_size * sizeof(long) +
1342 max_ftids * sizeof(*t->ftid_tab) +
1343 ftid_bmap_size * sizeof(long);
1345 t->tid_tab = kvzalloc(size, GFP_KERNEL);
1349 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
1350 t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
1351 t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
1352 t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
1353 t->ftid_bmap = (unsigned long *)&t->ftid_tab[max_ftids];
1354 spin_lock_init(&t->stid_lock);
1355 spin_lock_init(&t->atid_lock);
1356 spin_lock_init(&t->ftid_lock);
1358 t->stids_in_use = 0;
1359 t->v6_stids_in_use = 0;
1360 t->sftids_in_use = 0;
1362 t->atids_in_use = 0;
1363 atomic_set(&t->tids_in_use, 0);
1364 atomic_set(&t->conns_in_use, 0);
1365 atomic_set(&t->hash_tids_in_use, 0);
1367 /* Setup the free list for atid_tab and clear the stid bitmap. */
1370 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
1371 t->afree = t->atid_tab;
1374 if (is_offload(adap)) {
1375 bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
1376 /* Reserve stid 0 for T4/T5 adapters */
1377 if (!t->stid_base &&
1378 CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
1379 __set_bit(0, t->stid_bmap);
1382 bitmap_zero(t->ftid_bmap, t->nftids);
1387 * cxgb4_create_server - create an IP server
1389 * @stid: the server TID
1390 * @sip: local IP address to bind server to
1391 * @sport: the server's TCP port
1392 * @queue: queue to direct messages from this server to
1394 * Create an IP server for the given port and address.
1395 * Returns <0 on error and one of the %NET_XMIT_* values on success.
1397 int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
1398 __be32 sip, __be16 sport, __be16 vlan,
1402 struct sk_buff *skb;
1403 struct adapter *adap;
1404 struct cpl_pass_open_req *req;
1407 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
1411 adap = netdev2adap(dev);
1412 req = __skb_put(skb, sizeof(*req));
1414 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
1415 req->local_port = sport;
1416 req->peer_port = htons(0);
1417 req->local_ip = sip;
1418 req->peer_ip = htonl(0);
1419 chan = rxq_to_chan(&adap->sge, queue);
1420 req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
1421 req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
1422 SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
1423 ret = t4_mgmt_tx(adap, skb);
1424 return net_xmit_eval(ret);
1426 EXPORT_SYMBOL(cxgb4_create_server);
1428 /* cxgb4_create_server6 - create an IPv6 server
1430 * @stid: the server TID
1431 * @sip: local IPv6 address to bind server to
1432 * @sport: the server's TCP port
1433 * @queue: queue to direct messages from this server to
1435 * Create an IPv6 server for the given port and address.
1436 * Returns <0 on error and one of the %NET_XMIT_* values on success.
1438 int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
1439 const struct in6_addr *sip, __be16 sport,
1443 struct sk_buff *skb;
1444 struct adapter *adap;
1445 struct cpl_pass_open_req6 *req;
1448 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
1452 adap = netdev2adap(dev);
1453 req = __skb_put(skb, sizeof(*req));
1455 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
1456 req->local_port = sport;
1457 req->peer_port = htons(0);
1458 req->local_ip_hi = *(__be64 *)(sip->s6_addr);
1459 req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
1460 req->peer_ip_hi = cpu_to_be64(0);
1461 req->peer_ip_lo = cpu_to_be64(0);
1462 chan = rxq_to_chan(&adap->sge, queue);
1463 req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
1464 req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
1465 SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
1466 ret = t4_mgmt_tx(adap, skb);
1467 return net_xmit_eval(ret);
1469 EXPORT_SYMBOL(cxgb4_create_server6);
1471 int cxgb4_remove_server(const struct net_device *dev, unsigned int stid,
1472 unsigned int queue, bool ipv6)
1474 struct sk_buff *skb;
1475 struct adapter *adap;
1476 struct cpl_close_listsvr_req *req;
1479 adap = netdev2adap(dev);
1481 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
1485 req = __skb_put(skb, sizeof(*req));
1487 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid));
1488 req->reply_ctrl = htons(NO_REPLY_V(0) | (ipv6 ? LISTSVR_IPV6_V(1) :
1489 LISTSVR_IPV6_V(0)) | QUEUENO_V(queue));
1490 ret = t4_mgmt_tx(adap, skb);
1491 return net_xmit_eval(ret);
1493 EXPORT_SYMBOL(cxgb4_remove_server);
1496 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
1497 * @mtus: the HW MTU table
1498 * @mtu: the target MTU
1499 * @idx: index of selected entry in the MTU table
1501 * Returns the index and the value in the HW MTU table that is closest to
1502 * but does not exceed @mtu, unless @mtu is smaller than any value in the
1503 * table, in which case that smallest available value is selected.
1505 unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
1510 while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
1516 EXPORT_SYMBOL(cxgb4_best_mtu);
1519 * cxgb4_best_aligned_mtu - find best MTU, [hopefully] data size aligned
1520 * @mtus: the HW MTU table
1521 * @header_size: Header Size
1522 * @data_size_max: maximum Data Segment Size
1523 * @data_size_align: desired Data Segment Size Alignment (2^N)
1524 * @mtu_idxp: HW MTU Table Index return value pointer (possibly NULL)
1526 * Similar to cxgb4_best_mtu() but instead of searching the Hardware
1527 * MTU Table based solely on a Maximum MTU parameter, we break that
1528 * parameter up into a Header Size and Maximum Data Segment Size, and
1529 * provide a desired Data Segment Size Alignment. If we find an MTU in
1530 * the Hardware MTU Table which will result in a Data Segment Size with
1531 * the requested alignment _and_ that MTU isn't "too far" from the
1532 * closest MTU, then we'll return that rather than the closest MTU.
1534 unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus,
1535 unsigned short header_size,
1536 unsigned short data_size_max,
1537 unsigned short data_size_align,
1538 unsigned int *mtu_idxp)
1540 unsigned short max_mtu = header_size + data_size_max;
1541 unsigned short data_size_align_mask = data_size_align - 1;
1542 int mtu_idx, aligned_mtu_idx;
1544 /* Scan the MTU Table till we find an MTU which is larger than our
1545 * Maximum MTU or we reach the end of the table. Along the way,
1546 * record the last MTU found, if any, which will result in a Data
1547 * Segment Length matching the requested alignment.
1549 for (mtu_idx = 0, aligned_mtu_idx = -1; mtu_idx < NMTUS; mtu_idx++) {
1550 unsigned short data_size = mtus[mtu_idx] - header_size;
1552 /* If this MTU minus the Header Size would result in a
1553 * Data Segment Size of the desired alignment, remember it.
1555 if ((data_size & data_size_align_mask) == 0)
1556 aligned_mtu_idx = mtu_idx;
1558 /* If we're not at the end of the Hardware MTU Table and the
1559 * next element is larger than our Maximum MTU, drop out of
1562 if (mtu_idx+1 < NMTUS && mtus[mtu_idx+1] > max_mtu)
1566 /* If we fell out of the loop because we ran to the end of the table,
1567 * then we just have to use the last [largest] entry.
1569 if (mtu_idx == NMTUS)
1572 /* If we found an MTU which resulted in the requested Data Segment
1573 * Length alignment and that's "not far" from the largest MTU which is
1574 * less than or equal to the maximum MTU, then use that.
1576 if (aligned_mtu_idx >= 0 &&
1577 mtu_idx - aligned_mtu_idx <= 1)
1578 mtu_idx = aligned_mtu_idx;
1580 /* If the caller has passed in an MTU Index pointer, pass the
1581 * MTU Index back. Return the MTU value.
1584 *mtu_idxp = mtu_idx;
1585 return mtus[mtu_idx];
1587 EXPORT_SYMBOL(cxgb4_best_aligned_mtu);
1590 * cxgb4_tp_smt_idx - Get the Source Mac Table index for this VI
1592 * @viid: VI id of the given port
1594 * Return the SMT index for this VI.
1596 unsigned int cxgb4_tp_smt_idx(enum chip_type chip, unsigned int viid)
1598 /* In T4/T5, SMT contains 256 SMAC entries organized in
1599 * 128 rows of 2 entries each.
1600 * In T6, SMT contains 256 SMAC entries in 256 rows.
1601 * TODO: The below code needs to be updated when we add support
1604 if (CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5)
1605 return ((viid & 0x7f) << 1);
1607 return (viid & 0x7f);
1609 EXPORT_SYMBOL(cxgb4_tp_smt_idx);
1612 * cxgb4_port_chan - get the HW channel of a port
1613 * @dev: the net device for the port
1615 * Return the HW Tx channel of the given port.
1617 unsigned int cxgb4_port_chan(const struct net_device *dev)
1619 return netdev2pinfo(dev)->tx_chan;
1621 EXPORT_SYMBOL(cxgb4_port_chan);
1623 unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
1625 struct adapter *adap = netdev2adap(dev);
1626 u32 v1, v2, lp_count, hp_count;
1628 v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A);
1629 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A);
1630 if (is_t4(adap->params.chip)) {
1631 lp_count = LP_COUNT_G(v1);
1632 hp_count = HP_COUNT_G(v1);
1634 lp_count = LP_COUNT_T5_G(v1);
1635 hp_count = HP_COUNT_T5_G(v2);
1637 return lpfifo ? lp_count : hp_count;
1639 EXPORT_SYMBOL(cxgb4_dbfifo_count);
1642 * cxgb4_port_viid - get the VI id of a port
1643 * @dev: the net device for the port
1645 * Return the VI id of the given port.
1647 unsigned int cxgb4_port_viid(const struct net_device *dev)
1649 return netdev2pinfo(dev)->viid;
1651 EXPORT_SYMBOL(cxgb4_port_viid);
1654 * cxgb4_port_idx - get the index of a port
1655 * @dev: the net device for the port
1657 * Return the index of the given port.
1659 unsigned int cxgb4_port_idx(const struct net_device *dev)
1661 return netdev2pinfo(dev)->port_id;
1663 EXPORT_SYMBOL(cxgb4_port_idx);
1665 void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
1666 struct tp_tcp_stats *v6)
1668 struct adapter *adap = pci_get_drvdata(pdev);
1670 spin_lock(&adap->stats_lock);
1671 t4_tp_get_tcp_stats(adap, v4, v6, false);
1672 spin_unlock(&adap->stats_lock);
1674 EXPORT_SYMBOL(cxgb4_get_tcp_stats);
1676 void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
1677 const unsigned int *pgsz_order)
1679 struct adapter *adap = netdev2adap(dev);
1681 t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK_A, tag_mask);
1682 t4_write_reg(adap, ULP_RX_ISCSI_PSZ_A, HPZ0_V(pgsz_order[0]) |
1683 HPZ1_V(pgsz_order[1]) | HPZ2_V(pgsz_order[2]) |
1684 HPZ3_V(pgsz_order[3]));
1686 EXPORT_SYMBOL(cxgb4_iscsi_init);
1688 int cxgb4_flush_eq_cache(struct net_device *dev)
1690 struct adapter *adap = netdev2adap(dev);
1692 return t4_sge_ctxt_flush(adap, adap->mbox, CTXT_EGRESS);
1694 EXPORT_SYMBOL(cxgb4_flush_eq_cache);
1696 static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
1698 u32 addr = t4_read_reg(adap, SGE_DBQ_CTXT_BADDR_A) + 24 * qid + 8;
1702 spin_lock(&adap->win0_lock);
1703 ret = t4_memory_rw(adap, 0, MEM_EDC0, addr,
1704 sizeof(indices), (__be32 *)&indices,
1706 spin_unlock(&adap->win0_lock);
1708 *cidx = (be64_to_cpu(indices) >> 25) & 0xffff;
1709 *pidx = (be64_to_cpu(indices) >> 9) & 0xffff;
1714 int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
1717 struct adapter *adap = netdev2adap(dev);
1718 u16 hw_pidx, hw_cidx;
1721 ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx);
1725 if (pidx != hw_pidx) {
1729 if (pidx >= hw_pidx)
1730 delta = pidx - hw_pidx;
1732 delta = size - hw_pidx + pidx;
1734 if (is_t4(adap->params.chip))
1735 val = PIDX_V(delta);
1737 val = PIDX_T5_V(delta);
1739 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
1745 EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
1747 int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
1749 u32 edc0_size, edc1_size, mc0_size, mc1_size, size;
1750 u32 edc0_end, edc1_end, mc0_end, mc1_end;
1751 u32 offset, memtype, memaddr;
1752 struct adapter *adap;
1756 adap = netdev2adap(dev);
1758 offset = ((stag >> 8) * 32) + adap->vres.stag.start;
1760 /* Figure out where the offset lands in the Memory Type/Address scheme.
1761 * This code assumes that the memory is laid out starting at offset 0
1762 * with no breaks as: EDC0, EDC1, MC0, MC1. All cards have both EDC0
1763 * and EDC1. Some cards will have neither MC0 nor MC1, most cards have
1764 * MC0, and some have both MC0 and MC1.
1766 size = t4_read_reg(adap, MA_EDRAM0_BAR_A);
1767 edc0_size = EDRAM0_SIZE_G(size) << 20;
1768 size = t4_read_reg(adap, MA_EDRAM1_BAR_A);
1769 edc1_size = EDRAM1_SIZE_G(size) << 20;
1770 size = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
1771 mc0_size = EXT_MEM0_SIZE_G(size) << 20;
1773 if (t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A) & HMA_MUX_F) {
1774 size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
1775 hma_size = EXT_MEM1_SIZE_G(size) << 20;
1777 edc0_end = edc0_size;
1778 edc1_end = edc0_end + edc1_size;
1779 mc0_end = edc1_end + mc0_size;
1781 if (offset < edc0_end) {
1784 } else if (offset < edc1_end) {
1786 memaddr = offset - edc0_end;
1788 if (hma_size && (offset < (edc1_end + hma_size))) {
1790 memaddr = offset - edc1_end;
1791 } else if (offset < mc0_end) {
1793 memaddr = offset - edc1_end;
1794 } else if (is_t5(adap->params.chip)) {
1795 size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
1796 mc1_size = EXT_MEM1_SIZE_G(size) << 20;
1797 mc1_end = mc0_end + mc1_size;
1798 if (offset < mc1_end) {
1800 memaddr = offset - mc0_end;
1802 /* offset beyond the end of any memory */
1806 /* T4/T6 only has a single memory channel */
1811 spin_lock(&adap->win0_lock);
1812 ret = t4_memory_rw(adap, 0, memtype, memaddr, 32, tpte, T4_MEMORY_READ);
1813 spin_unlock(&adap->win0_lock);
1817 dev_err(adap->pdev_dev, "stag %#x, offset %#x out of range\n",
1821 EXPORT_SYMBOL(cxgb4_read_tpte);
1823 u64 cxgb4_read_sge_timestamp(struct net_device *dev)
1826 struct adapter *adap;
1828 adap = netdev2adap(dev);
1829 lo = t4_read_reg(adap, SGE_TIMESTAMP_LO_A);
1830 hi = TSVAL_G(t4_read_reg(adap, SGE_TIMESTAMP_HI_A));
1832 return ((u64)hi << 32) | (u64)lo;
1834 EXPORT_SYMBOL(cxgb4_read_sge_timestamp);
1836 int cxgb4_bar2_sge_qregs(struct net_device *dev,
1838 enum cxgb4_bar2_qtype qtype,
1841 unsigned int *pbar2_qid)
1843 return t4_bar2_sge_qregs(netdev2adap(dev),
1845 (qtype == CXGB4_BAR2_QTYPE_EGRESS
1846 ? T4_BAR2_QTYPE_EGRESS
1847 : T4_BAR2_QTYPE_INGRESS),
1852 EXPORT_SYMBOL(cxgb4_bar2_sge_qregs);
1854 static struct pci_driver cxgb4_driver;
1856 static void check_neigh_update(struct neighbour *neigh)
1858 const struct device *parent;
1859 const struct net_device *netdev = neigh->dev;
1861 if (is_vlan_dev(netdev))
1862 netdev = vlan_dev_real_dev(netdev);
1863 parent = netdev->dev.parent;
1864 if (parent && parent->driver == &cxgb4_driver.driver)
1865 t4_l2t_update(dev_get_drvdata(parent), neigh);
1868 static int netevent_cb(struct notifier_block *nb, unsigned long event,
1872 case NETEVENT_NEIGH_UPDATE:
1873 check_neigh_update(data);
1875 case NETEVENT_REDIRECT:
1882 static bool netevent_registered;
1883 static struct notifier_block cxgb4_netevent_nb = {
1884 .notifier_call = netevent_cb
1887 static void drain_db_fifo(struct adapter *adap, int usecs)
1889 u32 v1, v2, lp_count, hp_count;
1892 v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A);
1893 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A);
1894 if (is_t4(adap->params.chip)) {
1895 lp_count = LP_COUNT_G(v1);
1896 hp_count = HP_COUNT_G(v1);
1898 lp_count = LP_COUNT_T5_G(v1);
1899 hp_count = HP_COUNT_T5_G(v2);
1902 if (lp_count == 0 && hp_count == 0)
1904 set_current_state(TASK_UNINTERRUPTIBLE);
1905 schedule_timeout(usecs_to_jiffies(usecs));
1909 static void disable_txq_db(struct sge_txq *q)
1911 unsigned long flags;
1913 spin_lock_irqsave(&q->db_lock, flags);
1915 spin_unlock_irqrestore(&q->db_lock, flags);
1918 static void enable_txq_db(struct adapter *adap, struct sge_txq *q)
1920 spin_lock_irq(&q->db_lock);
1921 if (q->db_pidx_inc) {
1922 /* Make sure that all writes to the TX descriptors
1923 * are committed before we tell HW about them.
1926 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
1927 QID_V(q->cntxt_id) | PIDX_V(q->db_pidx_inc));
1931 spin_unlock_irq(&q->db_lock);
1934 static void disable_dbs(struct adapter *adap)
1938 for_each_ethrxq(&adap->sge, i)
1939 disable_txq_db(&adap->sge.ethtxq[i].q);
1940 if (is_offload(adap)) {
1941 struct sge_uld_txq_info *txq_info =
1942 adap->sge.uld_txq_info[CXGB4_TX_OFLD];
1945 for_each_ofldtxq(&adap->sge, i) {
1946 struct sge_uld_txq *txq = &txq_info->uldtxq[i];
1948 disable_txq_db(&txq->q);
1952 for_each_port(adap, i)
1953 disable_txq_db(&adap->sge.ctrlq[i].q);
1956 static void enable_dbs(struct adapter *adap)
1960 for_each_ethrxq(&adap->sge, i)
1961 enable_txq_db(adap, &adap->sge.ethtxq[i].q);
1962 if (is_offload(adap)) {
1963 struct sge_uld_txq_info *txq_info =
1964 adap->sge.uld_txq_info[CXGB4_TX_OFLD];
1967 for_each_ofldtxq(&adap->sge, i) {
1968 struct sge_uld_txq *txq = &txq_info->uldtxq[i];
1970 enable_txq_db(adap, &txq->q);
1974 for_each_port(adap, i)
1975 enable_txq_db(adap, &adap->sge.ctrlq[i].q);
1978 static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
1980 enum cxgb4_uld type = CXGB4_ULD_RDMA;
1982 if (adap->uld && adap->uld[type].handle)
1983 adap->uld[type].control(adap->uld[type].handle, cmd);
1986 static void process_db_full(struct work_struct *work)
1988 struct adapter *adap;
1990 adap = container_of(work, struct adapter, db_full_task);
1992 drain_db_fifo(adap, dbfifo_drain_delay);
1994 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
1995 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
1996 t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
1997 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F,
1998 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F);
2000 t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
2001 DBFIFO_LP_INT_F, DBFIFO_LP_INT_F);
2004 static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
2006 u16 hw_pidx, hw_cidx;
2009 spin_lock_irq(&q->db_lock);
2010 ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
2013 if (q->db_pidx != hw_pidx) {
2017 if (q->db_pidx >= hw_pidx)
2018 delta = q->db_pidx - hw_pidx;
2020 delta = q->size - hw_pidx + q->db_pidx;
2022 if (is_t4(adap->params.chip))
2023 val = PIDX_V(delta);
2025 val = PIDX_T5_V(delta);
2027 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
2028 QID_V(q->cntxt_id) | val);
2033 spin_unlock_irq(&q->db_lock);
2035 CH_WARN(adap, "DB drop recovery failed.\n");
2038 static void recover_all_queues(struct adapter *adap)
2042 for_each_ethrxq(&adap->sge, i)
2043 sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
2044 if (is_offload(adap)) {
2045 struct sge_uld_txq_info *txq_info =
2046 adap->sge.uld_txq_info[CXGB4_TX_OFLD];
2048 for_each_ofldtxq(&adap->sge, i) {
2049 struct sge_uld_txq *txq = &txq_info->uldtxq[i];
2051 sync_txq_pidx(adap, &txq->q);
2055 for_each_port(adap, i)
2056 sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
2059 static void process_db_drop(struct work_struct *work)
2061 struct adapter *adap;
2063 adap = container_of(work, struct adapter, db_drop_task);
2065 if (is_t4(adap->params.chip)) {
2066 drain_db_fifo(adap, dbfifo_drain_delay);
2067 notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
2068 drain_db_fifo(adap, dbfifo_drain_delay);
2069 recover_all_queues(adap);
2070 drain_db_fifo(adap, dbfifo_drain_delay);
2072 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
2073 } else if (is_t5(adap->params.chip)) {
2074 u32 dropped_db = t4_read_reg(adap, 0x010ac);
2075 u16 qid = (dropped_db >> 15) & 0x1ffff;
2076 u16 pidx_inc = dropped_db & 0x1fff;
2078 unsigned int bar2_qid;
2081 ret = t4_bar2_sge_qregs(adap, qid, T4_BAR2_QTYPE_EGRESS,
2082 0, &bar2_qoffset, &bar2_qid);
2084 dev_err(adap->pdev_dev, "doorbell drop recovery: "
2085 "qid=%d, pidx_inc=%d\n", qid, pidx_inc);
2087 writel(PIDX_T5_V(pidx_inc) | QID_V(bar2_qid),
2088 adap->bar2 + bar2_qoffset + SGE_UDB_KDOORBELL);
2090 /* Re-enable BAR2 WC */
2091 t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
2094 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
2095 t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, DROPPED_DB_F, 0);
2098 void t4_db_full(struct adapter *adap)
2100 if (is_t4(adap->params.chip)) {
2102 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
2103 t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
2104 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F, 0);
2105 queue_work(adap->workq, &adap->db_full_task);
2109 void t4_db_dropped(struct adapter *adap)
2111 if (is_t4(adap->params.chip)) {
2113 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
2115 queue_work(adap->workq, &adap->db_drop_task);
2118 void t4_register_netevent_notifier(void)
2120 if (!netevent_registered) {
2121 register_netevent_notifier(&cxgb4_netevent_nb);
2122 netevent_registered = true;
2126 static void detach_ulds(struct adapter *adap)
2130 mutex_lock(&uld_mutex);
2131 list_del(&adap->list_node);
2133 for (i = 0; i < CXGB4_ULD_MAX; i++)
2134 if (adap->uld && adap->uld[i].handle)
2135 adap->uld[i].state_change(adap->uld[i].handle,
2136 CXGB4_STATE_DETACH);
2138 if (netevent_registered && list_empty(&adapter_list)) {
2139 unregister_netevent_notifier(&cxgb4_netevent_nb);
2140 netevent_registered = false;
2142 mutex_unlock(&uld_mutex);
2145 static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
2149 mutex_lock(&uld_mutex);
2150 for (i = 0; i < CXGB4_ULD_MAX; i++)
2151 if (adap->uld && adap->uld[i].handle)
2152 adap->uld[i].state_change(adap->uld[i].handle,
2154 mutex_unlock(&uld_mutex);
2157 #if IS_ENABLED(CONFIG_IPV6)
2158 static int cxgb4_inet6addr_handler(struct notifier_block *this,
2159 unsigned long event, void *data)
2161 struct inet6_ifaddr *ifa = data;
2162 struct net_device *event_dev = ifa->idev->dev;
2163 const struct device *parent = NULL;
2164 #if IS_ENABLED(CONFIG_BONDING)
2165 struct adapter *adap;
2167 if (is_vlan_dev(event_dev))
2168 event_dev = vlan_dev_real_dev(event_dev);
2169 #if IS_ENABLED(CONFIG_BONDING)
2170 if (event_dev->flags & IFF_MASTER) {
2171 list_for_each_entry(adap, &adapter_list, list_node) {
2174 cxgb4_clip_get(adap->port[0],
2175 (const u32 *)ifa, 1);
2178 cxgb4_clip_release(adap->port[0],
2179 (const u32 *)ifa, 1);
2190 parent = event_dev->dev.parent;
2192 if (parent && parent->driver == &cxgb4_driver.driver) {
2195 cxgb4_clip_get(event_dev, (const u32 *)ifa, 1);
2198 cxgb4_clip_release(event_dev, (const u32 *)ifa, 1);
2207 static bool inet6addr_registered;
2208 static struct notifier_block cxgb4_inet6addr_notifier = {
2209 .notifier_call = cxgb4_inet6addr_handler
2212 static void update_clip(const struct adapter *adap)
2215 struct net_device *dev;
2220 for (i = 0; i < MAX_NPORTS; i++) {
2221 dev = adap->port[i];
2225 ret = cxgb4_update_root_dev_clip(dev);
2232 #endif /* IS_ENABLED(CONFIG_IPV6) */
2235 * cxgb_up - enable the adapter
2236 * @adap: adapter being enabled
2238 * Called when the first port is enabled, this function performs the
2239 * actions necessary to make an adapter operational, such as completing
2240 * the initialization of HW modules, and enabling interrupts.
2242 * Must be called with the rtnl lock held.
2244 static int cxgb_up(struct adapter *adap)
2248 mutex_lock(&uld_mutex);
2249 err = setup_sge_queues(adap);
2252 err = setup_rss(adap);
2256 if (adap->flags & USING_MSIX) {
2257 name_msix_vecs(adap);
2258 err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
2259 adap->msix_info[0].desc, adap);
2262 err = request_msix_queue_irqs(adap);
2264 free_irq(adap->msix_info[0].vec, adap);
2268 err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
2269 (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
2270 adap->port[0]->name, adap);
2277 t4_intr_enable(adap);
2278 adap->flags |= FULL_INIT_DONE;
2279 mutex_unlock(&uld_mutex);
2281 notify_ulds(adap, CXGB4_STATE_UP);
2282 #if IS_ENABLED(CONFIG_IPV6)
2285 /* Initialize hash mac addr list*/
2286 INIT_LIST_HEAD(&adap->mac_hlist);
2290 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
2292 t4_free_sge_resources(adap);
2294 mutex_unlock(&uld_mutex);
2298 static void cxgb_down(struct adapter *adapter)
2300 cancel_work_sync(&adapter->tid_release_task);
2301 cancel_work_sync(&adapter->db_full_task);
2302 cancel_work_sync(&adapter->db_drop_task);
2303 adapter->tid_release_task_busy = false;
2304 adapter->tid_release_head = NULL;
2306 t4_sge_stop(adapter);
2307 t4_free_sge_resources(adapter);
2308 adapter->flags &= ~FULL_INIT_DONE;
2312 * net_device operations
2314 static int cxgb_open(struct net_device *dev)
2317 struct port_info *pi = netdev_priv(dev);
2318 struct adapter *adapter = pi->adapter;
2320 netif_carrier_off(dev);
2322 if (!(adapter->flags & FULL_INIT_DONE)) {
2323 err = cxgb_up(adapter);
2328 /* It's possible that the basic port information could have
2329 * changed since we first read it.
2331 err = t4_update_port_info(pi);
2335 err = link_start(dev);
2337 netif_tx_start_all_queues(dev);
2341 static int cxgb_close(struct net_device *dev)
2343 struct port_info *pi = netdev_priv(dev);
2344 struct adapter *adapter = pi->adapter;
2347 netif_tx_stop_all_queues(dev);
2348 netif_carrier_off(dev);
2349 ret = t4_enable_pi_params(adapter, adapter->pf, pi,
2350 false, false, false);
2351 #ifdef CONFIG_CHELSIO_T4_DCB
2352 cxgb4_dcb_reset(dev);
2353 dcb_tx_queue_prio_enable(dev, false);
2358 int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
2359 __be32 sip, __be16 sport, __be16 vlan,
2360 unsigned int queue, unsigned char port, unsigned char mask)
2363 struct filter_entry *f;
2364 struct adapter *adap;
2368 adap = netdev2adap(dev);
2370 /* Adjust stid to correct filter index */
2371 stid -= adap->tids.sftid_base;
2372 stid += adap->tids.nftids;
2374 /* Check to make sure the filter requested is writable ...
2376 f = &adap->tids.ftid_tab[stid];
2377 ret = writable_filter(f);
2381 /* Clear out any old resources being used by the filter before
2382 * we start constructing the new filter.
2385 clear_filter(adap, f);
2387 /* Clear out filter specifications */
2388 memset(&f->fs, 0, sizeof(struct ch_filter_specification));
2389 f->fs.val.lport = cpu_to_be16(sport);
2390 f->fs.mask.lport = ~0;
2392 if ((val[0] | val[1] | val[2] | val[3]) != 0) {
2393 for (i = 0; i < 4; i++) {
2394 f->fs.val.lip[i] = val[i];
2395 f->fs.mask.lip[i] = ~0;
2397 if (adap->params.tp.vlan_pri_map & PORT_F) {
2398 f->fs.val.iport = port;
2399 f->fs.mask.iport = mask;
2403 if (adap->params.tp.vlan_pri_map & PROTOCOL_F) {
2404 f->fs.val.proto = IPPROTO_TCP;
2405 f->fs.mask.proto = ~0;
2410 /* Mark filter as locked */
2414 /* Save the actual tid. We need this to get the corresponding
2415 * filter entry structure in filter_rpl.
2417 f->tid = stid + adap->tids.ftid_base;
2418 ret = set_filter_wr(adap, stid);
2420 clear_filter(adap, f);
2426 EXPORT_SYMBOL(cxgb4_create_server_filter);
2428 int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
2429 unsigned int queue, bool ipv6)
2431 struct filter_entry *f;
2432 struct adapter *adap;
2434 adap = netdev2adap(dev);
2436 /* Adjust stid to correct filter index */
2437 stid -= adap->tids.sftid_base;
2438 stid += adap->tids.nftids;
2440 f = &adap->tids.ftid_tab[stid];
2441 /* Unlock the filter */
2444 return delete_filter(adap, stid);
2446 EXPORT_SYMBOL(cxgb4_remove_server_filter);
2448 static void cxgb_get_stats(struct net_device *dev,
2449 struct rtnl_link_stats64 *ns)
2451 struct port_stats stats;
2452 struct port_info *p = netdev_priv(dev);
2453 struct adapter *adapter = p->adapter;
2455 /* Block retrieving statistics during EEH error
2456 * recovery. Otherwise, the recovery might fail
2457 * and the PCI device will be removed permanently
2459 spin_lock(&adapter->stats_lock);
2460 if (!netif_device_present(dev)) {
2461 spin_unlock(&adapter->stats_lock);
2464 t4_get_port_stats_offset(adapter, p->tx_chan, &stats,
2466 spin_unlock(&adapter->stats_lock);
2468 ns->tx_bytes = stats.tx_octets;
2469 ns->tx_packets = stats.tx_frames;
2470 ns->rx_bytes = stats.rx_octets;
2471 ns->rx_packets = stats.rx_frames;
2472 ns->multicast = stats.rx_mcast_frames;
2474 /* detailed rx_errors */
2475 ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
2477 ns->rx_over_errors = 0;
2478 ns->rx_crc_errors = stats.rx_fcs_err;
2479 ns->rx_frame_errors = stats.rx_symbol_err;
2480 ns->rx_dropped = stats.rx_ovflow0 + stats.rx_ovflow1 +
2481 stats.rx_ovflow2 + stats.rx_ovflow3 +
2482 stats.rx_trunc0 + stats.rx_trunc1 +
2483 stats.rx_trunc2 + stats.rx_trunc3;
2484 ns->rx_missed_errors = 0;
2486 /* detailed tx_errors */
2487 ns->tx_aborted_errors = 0;
2488 ns->tx_carrier_errors = 0;
2489 ns->tx_fifo_errors = 0;
2490 ns->tx_heartbeat_errors = 0;
2491 ns->tx_window_errors = 0;
2493 ns->tx_errors = stats.tx_error_frames;
2494 ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
2495 ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
2498 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2501 int ret = 0, prtad, devad;
2502 struct port_info *pi = netdev_priv(dev);
2503 struct adapter *adapter = pi->adapter;
2504 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
2508 if (pi->mdio_addr < 0)
2510 data->phy_id = pi->mdio_addr;
2514 if (mdio_phy_id_is_c45(data->phy_id)) {
2515 prtad = mdio_phy_id_prtad(data->phy_id);
2516 devad = mdio_phy_id_devad(data->phy_id);
2517 } else if (data->phy_id < 32) {
2518 prtad = data->phy_id;
2520 data->reg_num &= 0x1f;
2524 mbox = pi->adapter->pf;
2525 if (cmd == SIOCGMIIREG)
2526 ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
2527 data->reg_num, &data->val_out);
2529 ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
2530 data->reg_num, data->val_in);
2533 return copy_to_user(req->ifr_data, &pi->tstamp_config,
2534 sizeof(pi->tstamp_config)) ?
2537 if (copy_from_user(&pi->tstamp_config, req->ifr_data,
2538 sizeof(pi->tstamp_config)))
2541 if (!is_t4(adapter->params.chip)) {
2542 switch (pi->tstamp_config.tx_type) {
2543 case HWTSTAMP_TX_OFF:
2544 case HWTSTAMP_TX_ON:
2550 switch (pi->tstamp_config.rx_filter) {
2551 case HWTSTAMP_FILTER_NONE:
2552 pi->rxtstamp = false;
2554 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2555 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2556 cxgb4_ptprx_timestamping(pi, pi->port_id,
2559 case HWTSTAMP_FILTER_PTP_V2_EVENT:
2560 cxgb4_ptprx_timestamping(pi, pi->port_id,
2563 case HWTSTAMP_FILTER_ALL:
2564 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2565 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2566 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2567 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2568 pi->rxtstamp = true;
2571 pi->tstamp_config.rx_filter =
2572 HWTSTAMP_FILTER_NONE;
2576 if ((pi->tstamp_config.tx_type == HWTSTAMP_TX_OFF) &&
2577 (pi->tstamp_config.rx_filter ==
2578 HWTSTAMP_FILTER_NONE)) {
2579 if (cxgb4_ptp_txtype(adapter, pi->port_id) >= 0)
2580 pi->ptp_enable = false;
2583 if (pi->tstamp_config.rx_filter !=
2584 HWTSTAMP_FILTER_NONE) {
2585 if (cxgb4_ptp_redirect_rx_packet(adapter,
2587 pi->ptp_enable = true;
2590 /* For T4 Adapters */
2591 switch (pi->tstamp_config.rx_filter) {
2592 case HWTSTAMP_FILTER_NONE:
2593 pi->rxtstamp = false;
2595 case HWTSTAMP_FILTER_ALL:
2596 pi->rxtstamp = true;
2599 pi->tstamp_config.rx_filter =
2600 HWTSTAMP_FILTER_NONE;
2604 return copy_to_user(req->ifr_data, &pi->tstamp_config,
2605 sizeof(pi->tstamp_config)) ?
2613 static void cxgb_set_rxmode(struct net_device *dev)
2615 /* unfortunately we can't return errors to the stack */
2616 set_rxmode(dev, -1, false);
2619 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2622 struct port_info *pi = netdev_priv(dev);
2624 ret = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, new_mtu, -1,
2631 #ifdef CONFIG_PCI_IOV
2632 static int cxgb4_mgmt_open(struct net_device *dev)
2634 /* Turn carrier off since we don't have to transmit anything on this
2637 netif_carrier_off(dev);
2641 /* Fill MAC address that will be assigned by the FW */
2642 static void cxgb4_mgmt_fill_vf_station_mac_addr(struct adapter *adap)
2644 u8 hw_addr[ETH_ALEN], macaddr[ETH_ALEN];
2645 unsigned int i, vf, nvfs;
2650 adap->params.pci.vpd_cap_addr = pci_find_capability(adap->pdev,
2652 err = t4_get_raw_vpd_params(adap, &adap->params.vpd);
2656 na = adap->params.vpd.na;
2657 for (i = 0; i < ETH_ALEN; i++)
2658 hw_addr[i] = (hex2val(na[2 * i + 0]) * 16 +
2659 hex2val(na[2 * i + 1]));
2661 a = (hw_addr[0] << 8) | hw_addr[1];
2662 b = (hw_addr[1] << 8) | hw_addr[2];
2664 a |= 0x0200; /* locally assigned Ethernet MAC address */
2665 a &= ~0x0100; /* not a multicast Ethernet MAC address */
2666 macaddr[0] = a >> 8;
2667 macaddr[1] = a & 0xff;
2669 for (i = 2; i < 5; i++)
2670 macaddr[i] = hw_addr[i + 1];
2672 for (vf = 0, nvfs = pci_sriov_get_totalvfs(adap->pdev);
2674 macaddr[5] = adap->pf * 16 + vf;
2675 ether_addr_copy(adap->vfinfo[vf].vf_mac_addr, macaddr);
2679 static int cxgb4_mgmt_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
2681 struct port_info *pi = netdev_priv(dev);
2682 struct adapter *adap = pi->adapter;
2685 /* verify MAC addr is valid */
2686 if (!is_valid_ether_addr(mac)) {
2687 dev_err(pi->adapter->pdev_dev,
2688 "Invalid Ethernet address %pM for VF %d\n",
2693 dev_info(pi->adapter->pdev_dev,
2694 "Setting MAC %pM on VF %d\n", mac, vf);
2695 ret = t4_set_vf_mac_acl(adap, vf + 1, 1, mac);
2697 ether_addr_copy(adap->vfinfo[vf].vf_mac_addr, mac);
2701 static int cxgb4_mgmt_get_vf_config(struct net_device *dev,
2702 int vf, struct ifla_vf_info *ivi)
2704 struct port_info *pi = netdev_priv(dev);
2705 struct adapter *adap = pi->adapter;
2706 struct vf_info *vfinfo;
2708 if (vf >= adap->num_vfs)
2710 vfinfo = &adap->vfinfo[vf];
2713 ivi->max_tx_rate = vfinfo->tx_rate;
2714 ivi->min_tx_rate = 0;
2715 ether_addr_copy(ivi->mac, vfinfo->vf_mac_addr);
2716 ivi->vlan = vfinfo->vlan;
2720 static int cxgb4_mgmt_get_phys_port_id(struct net_device *dev,
2721 struct netdev_phys_item_id *ppid)
2723 struct port_info *pi = netdev_priv(dev);
2724 unsigned int phy_port_id;
2726 phy_port_id = pi->adapter->adap_idx * 10 + pi->port_id;
2727 ppid->id_len = sizeof(phy_port_id);
2728 memcpy(ppid->id, &phy_port_id, ppid->id_len);
2732 static int cxgb4_mgmt_set_vf_rate(struct net_device *dev, int vf,
2733 int min_tx_rate, int max_tx_rate)
2735 struct port_info *pi = netdev_priv(dev);
2736 struct adapter *adap = pi->adapter;
2737 unsigned int link_ok, speed, mtu;
2738 u32 fw_pfvf, fw_class;
2743 if (vf >= adap->num_vfs)
2747 dev_err(adap->pdev_dev,
2748 "Min tx rate (%d) (> 0) for VF %d is Invalid.\n",
2753 ret = t4_get_link_params(pi, &link_ok, &speed, &mtu);
2754 if (ret != FW_SUCCESS) {
2755 dev_err(adap->pdev_dev,
2756 "Failed to get link information for VF %d\n", vf);
2761 dev_err(adap->pdev_dev, "Link down for VF %d\n", vf);
2765 if (max_tx_rate > speed) {
2766 dev_err(adap->pdev_dev,
2767 "Max tx rate %d for VF %d can't be > link-speed %u",
2768 max_tx_rate, vf, speed);
2773 /* subtract ethhdr size and 4 bytes crc since, f/w appends it */
2774 pktsize = pktsize - sizeof(struct ethhdr) - 4;
2775 /* subtract ipv4 hdr size, tcp hdr size to get typical IPv4 MSS size */
2776 pktsize = pktsize - sizeof(struct iphdr) - sizeof(struct tcphdr);
2777 /* configure Traffic Class for rate-limiting */
2778 ret = t4_sched_params(adap, SCHED_CLASS_TYPE_PACKET,
2779 SCHED_CLASS_LEVEL_CL_RL,
2780 SCHED_CLASS_MODE_CLASS,
2781 SCHED_CLASS_RATEUNIT_BITS,
2782 SCHED_CLASS_RATEMODE_ABS,
2783 pi->tx_chan, class_id, 0,
2784 max_tx_rate * 1000, 0, pktsize);
2786 dev_err(adap->pdev_dev, "Err %d for Traffic Class config\n",
2790 dev_info(adap->pdev_dev,
2791 "Class %d with MSS %u configured with rate %u\n",
2792 class_id, pktsize, max_tx_rate);
2794 /* bind VF to configured Traffic Class */
2795 fw_pfvf = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
2796 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_SCHEDCLASS_ETH));
2797 fw_class = class_id;
2798 ret = t4_set_params(adap, adap->mbox, adap->pf, vf + 1, 1, &fw_pfvf,
2801 dev_err(adap->pdev_dev,
2802 "Err %d in binding VF %d to Traffic Class %d\n",
2806 dev_info(adap->pdev_dev, "PF %d VF %d is bound to Class %d\n",
2807 adap->pf, vf, class_id);
2808 adap->vfinfo[vf].tx_rate = max_tx_rate;
2812 static int cxgb4_mgmt_set_vf_vlan(struct net_device *dev, int vf,
2813 u16 vlan, u8 qos, __be16 vlan_proto)
2815 struct port_info *pi = netdev_priv(dev);
2816 struct adapter *adap = pi->adapter;
2819 if (vf >= adap->num_vfs || vlan > 4095 || qos > 7)
2822 if (vlan_proto != htons(ETH_P_8021Q) || qos != 0)
2823 return -EPROTONOSUPPORT;
2825 ret = t4_set_vlan_acl(adap, adap->mbox, vf + 1, vlan);
2827 adap->vfinfo[vf].vlan = vlan;
2831 dev_err(adap->pdev_dev, "Err %d %s VLAN ACL for PF/VF %d/%d\n",
2832 ret, (vlan ? "setting" : "clearing"), adap->pf, vf);
2835 #endif /* CONFIG_PCI_IOV */
2837 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2840 struct sockaddr *addr = p;
2841 struct port_info *pi = netdev_priv(dev);
2843 if (!is_valid_ether_addr(addr->sa_data))
2844 return -EADDRNOTAVAIL;
2846 ret = t4_change_mac(pi->adapter, pi->adapter->pf, pi->viid,
2847 pi->xact_addr_filt, addr->sa_data, true, true);
2851 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2852 pi->xact_addr_filt = ret;
2856 #ifdef CONFIG_NET_POLL_CONTROLLER
2857 static void cxgb_netpoll(struct net_device *dev)
2859 struct port_info *pi = netdev_priv(dev);
2860 struct adapter *adap = pi->adapter;
2862 if (adap->flags & USING_MSIX) {
2864 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
2866 for (i = pi->nqsets; i; i--, rx++)
2867 t4_sge_intr_msix(0, &rx->rspq);
2869 t4_intr_handler(adap)(0, adap);
2873 static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
2875 struct port_info *pi = netdev_priv(dev);
2876 struct adapter *adap = pi->adapter;
2877 struct sched_class *e;
2878 struct ch_sched_params p;
2879 struct ch_sched_queue qe;
2883 if (!can_sched(dev))
2886 if (index < 0 || index > pi->nqsets - 1)
2889 if (!(adap->flags & FULL_INIT_DONE)) {
2890 dev_err(adap->pdev_dev,
2891 "Failed to rate limit on queue %d. Link Down?\n",
2896 /* Convert from Mbps to Kbps */
2897 req_rate = rate * 1000;
2899 /* Max rate is 100 Gbps */
2900 if (req_rate > SCHED_MAX_RATE_KBPS) {
2901 dev_err(adap->pdev_dev,
2902 "Invalid rate %u Mbps, Max rate is %u Mbps\n",
2903 rate, SCHED_MAX_RATE_KBPS / 1000);
2907 /* First unbind the queue from any existing class */
2908 memset(&qe, 0, sizeof(qe));
2910 qe.class = SCHED_CLS_NONE;
2912 err = cxgb4_sched_class_unbind(dev, (void *)(&qe), SCHED_QUEUE);
2914 dev_err(adap->pdev_dev,
2915 "Unbinding Queue %d on port %d fail. Err: %d\n",
2916 index, pi->port_id, err);
2920 /* Queue already unbound */
2924 /* Fetch any available unused or matching scheduling class */
2925 memset(&p, 0, sizeof(p));
2926 p.type = SCHED_CLASS_TYPE_PACKET;
2927 p.u.params.level = SCHED_CLASS_LEVEL_CL_RL;
2928 p.u.params.mode = SCHED_CLASS_MODE_CLASS;
2929 p.u.params.rateunit = SCHED_CLASS_RATEUNIT_BITS;
2930 p.u.params.ratemode = SCHED_CLASS_RATEMODE_ABS;
2931 p.u.params.channel = pi->tx_chan;
2932 p.u.params.class = SCHED_CLS_NONE;
2933 p.u.params.minrate = 0;
2934 p.u.params.maxrate = req_rate;
2935 p.u.params.weight = 0;
2936 p.u.params.pktsize = dev->mtu;
2938 e = cxgb4_sched_class_alloc(dev, &p);
2942 /* Bind the queue to a scheduling class */
2943 memset(&qe, 0, sizeof(qe));
2947 err = cxgb4_sched_class_bind(dev, (void *)(&qe), SCHED_QUEUE);
2949 dev_err(adap->pdev_dev,
2950 "Queue rate limiting failed. Err: %d\n", err);
2954 static int cxgb_setup_tc_flower(struct net_device *dev,
2955 struct tc_cls_flower_offload *cls_flower)
2957 switch (cls_flower->command) {
2958 case TC_CLSFLOWER_REPLACE:
2959 return cxgb4_tc_flower_replace(dev, cls_flower);
2960 case TC_CLSFLOWER_DESTROY:
2961 return cxgb4_tc_flower_destroy(dev, cls_flower);
2962 case TC_CLSFLOWER_STATS:
2963 return cxgb4_tc_flower_stats(dev, cls_flower);
2969 static int cxgb_setup_tc_cls_u32(struct net_device *dev,
2970 struct tc_cls_u32_offload *cls_u32)
2972 switch (cls_u32->command) {
2973 case TC_CLSU32_NEW_KNODE:
2974 case TC_CLSU32_REPLACE_KNODE:
2975 return cxgb4_config_knode(dev, cls_u32);
2976 case TC_CLSU32_DELETE_KNODE:
2977 return cxgb4_delete_knode(dev, cls_u32);
2983 static int cxgb_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
2986 struct net_device *dev = cb_priv;
2987 struct port_info *pi = netdev2pinfo(dev);
2988 struct adapter *adap = netdev2adap(dev);
2990 if (!(adap->flags & FULL_INIT_DONE)) {
2991 dev_err(adap->pdev_dev,
2992 "Failed to setup tc on port %d. Link Down?\n",
2997 if (!tc_cls_can_offload_and_chain0(dev, type_data))
3001 case TC_SETUP_CLSU32:
3002 return cxgb_setup_tc_cls_u32(dev, type_data);
3003 case TC_SETUP_CLSFLOWER:
3004 return cxgb_setup_tc_flower(dev, type_data);
3010 static int cxgb_setup_tc_block(struct net_device *dev,
3011 struct tc_block_offload *f)
3013 struct port_info *pi = netdev2pinfo(dev);
3015 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
3018 switch (f->command) {
3020 return tcf_block_cb_register(f->block, cxgb_setup_tc_block_cb,
3021 pi, dev, f->extack);
3022 case TC_BLOCK_UNBIND:
3023 tcf_block_cb_unregister(f->block, cxgb_setup_tc_block_cb, pi);
3030 static int cxgb_setup_tc(struct net_device *dev, enum tc_setup_type type,
3034 case TC_SETUP_BLOCK:
3035 return cxgb_setup_tc_block(dev, type_data);
3041 static void cxgb_del_udp_tunnel(struct net_device *netdev,
3042 struct udp_tunnel_info *ti)
3044 struct port_info *pi = netdev_priv(netdev);
3045 struct adapter *adapter = pi->adapter;
3046 unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
3047 u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 };
3050 if (chip_ver < CHELSIO_T6)
3054 case UDP_TUNNEL_TYPE_VXLAN:
3055 if (!adapter->vxlan_port_cnt ||
3056 adapter->vxlan_port != ti->port)
3057 return; /* Invalid VxLAN destination port */
3059 adapter->vxlan_port_cnt--;
3060 if (adapter->vxlan_port_cnt)
3063 adapter->vxlan_port = 0;
3064 t4_write_reg(adapter, MPS_RX_VXLAN_TYPE_A, 0);
3066 case UDP_TUNNEL_TYPE_GENEVE:
3067 if (!adapter->geneve_port_cnt ||
3068 adapter->geneve_port != ti->port)
3069 return; /* Invalid GENEVE destination port */
3071 adapter->geneve_port_cnt--;
3072 if (adapter->geneve_port_cnt)
3075 adapter->geneve_port = 0;
3076 t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A, 0);
3081 /* Matchall mac entries can be deleted only after all tunnel ports
3082 * are brought down or removed.
3084 if (!adapter->rawf_cnt)
3086 for_each_port(adapter, i) {
3087 pi = adap2pinfo(adapter, i);
3088 ret = t4_free_raw_mac_filt(adapter, pi->viid,
3089 match_all_mac, match_all_mac,
3090 adapter->rawf_start +
3092 1, pi->port_id, false);
3094 netdev_info(netdev, "Failed to free mac filter entry, for port %d\n",
3098 atomic_dec(&adapter->mps_encap[adapter->rawf_start +
3099 pi->port_id].refcnt);
3103 static void cxgb_add_udp_tunnel(struct net_device *netdev,
3104 struct udp_tunnel_info *ti)
3106 struct port_info *pi = netdev_priv(netdev);
3107 struct adapter *adapter = pi->adapter;
3108 unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
3109 u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 };
3112 if (chip_ver < CHELSIO_T6 || !adapter->rawf_cnt)
3116 case UDP_TUNNEL_TYPE_VXLAN:
3117 /* Callback for adding vxlan port can be called with the same
3118 * port for both IPv4 and IPv6. We should not disable the
3119 * offloading when the same port for both protocols is added
3120 * and later one of them is removed.
3122 if (adapter->vxlan_port_cnt &&
3123 adapter->vxlan_port == ti->port) {
3124 adapter->vxlan_port_cnt++;
3128 /* We will support only one VxLAN port */
3129 if (adapter->vxlan_port_cnt) {
3130 netdev_info(netdev, "UDP port %d already offloaded, not adding port %d\n",
3131 be16_to_cpu(adapter->vxlan_port),
3132 be16_to_cpu(ti->port));
3136 adapter->vxlan_port = ti->port;
3137 adapter->vxlan_port_cnt = 1;
3139 t4_write_reg(adapter, MPS_RX_VXLAN_TYPE_A,
3140 VXLAN_V(be16_to_cpu(ti->port)) | VXLAN_EN_F);
3142 case UDP_TUNNEL_TYPE_GENEVE:
3143 if (adapter->geneve_port_cnt &&
3144 adapter->geneve_port == ti->port) {
3145 adapter->geneve_port_cnt++;
3149 /* We will support only one GENEVE port */
3150 if (adapter->geneve_port_cnt) {
3151 netdev_info(netdev, "UDP port %d already offloaded, not adding port %d\n",
3152 be16_to_cpu(adapter->geneve_port),
3153 be16_to_cpu(ti->port));
3157 adapter->geneve_port = ti->port;
3158 adapter->geneve_port_cnt = 1;
3160 t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A,
3161 GENEVE_V(be16_to_cpu(ti->port)) | GENEVE_EN_F);
3166 /* Create a 'match all' mac filter entry for inner mac,
3167 * if raw mac interface is supported. Once the linux kernel provides
3168 * driver entry points for adding/deleting the inner mac addresses,
3169 * we will remove this 'match all' entry and fallback to adding
3170 * exact match filters.
3172 for_each_port(adapter, i) {
3173 pi = adap2pinfo(adapter, i);
3175 ret = t4_alloc_raw_mac_filt(adapter, pi->viid,
3178 adapter->rawf_start +
3180 1, pi->port_id, false);
3182 netdev_info(netdev, "Failed to allocate a mac filter entry, not adding port %d\n",
3183 be16_to_cpu(ti->port));
3184 cxgb_del_udp_tunnel(netdev, ti);
3187 atomic_inc(&adapter->mps_encap[ret].refcnt);
3191 static netdev_features_t cxgb_features_check(struct sk_buff *skb,
3192 struct net_device *dev,
3193 netdev_features_t features)
3195 struct port_info *pi = netdev_priv(dev);
3196 struct adapter *adapter = pi->adapter;
3198 if (CHELSIO_CHIP_VERSION(adapter->params.chip) < CHELSIO_T6)
3201 /* Check if hw supports offload for this packet */
3202 if (!skb->encapsulation || cxgb_encap_offload_supported(skb))
3205 /* Offload is not supported for this encapsulated packet */
3206 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
3209 static netdev_features_t cxgb_fix_features(struct net_device *dev,
3210 netdev_features_t features)
3212 /* Disable GRO, if RX_CSUM is disabled */
3213 if (!(features & NETIF_F_RXCSUM))
3214 features &= ~NETIF_F_GRO;
3219 static const struct net_device_ops cxgb4_netdev_ops = {
3220 .ndo_open = cxgb_open,
3221 .ndo_stop = cxgb_close,
3222 .ndo_start_xmit = t4_start_xmit,
3223 .ndo_select_queue = cxgb_select_queue,
3224 .ndo_get_stats64 = cxgb_get_stats,
3225 .ndo_set_rx_mode = cxgb_set_rxmode,
3226 .ndo_set_mac_address = cxgb_set_mac_addr,
3227 .ndo_set_features = cxgb_set_features,
3228 .ndo_validate_addr = eth_validate_addr,
3229 .ndo_do_ioctl = cxgb_ioctl,
3230 .ndo_change_mtu = cxgb_change_mtu,
3231 #ifdef CONFIG_NET_POLL_CONTROLLER
3232 .ndo_poll_controller = cxgb_netpoll,
3234 #ifdef CONFIG_CHELSIO_T4_FCOE
3235 .ndo_fcoe_enable = cxgb_fcoe_enable,
3236 .ndo_fcoe_disable = cxgb_fcoe_disable,
3237 #endif /* CONFIG_CHELSIO_T4_FCOE */
3238 .ndo_set_tx_maxrate = cxgb_set_tx_maxrate,
3239 .ndo_setup_tc = cxgb_setup_tc,
3240 .ndo_udp_tunnel_add = cxgb_add_udp_tunnel,
3241 .ndo_udp_tunnel_del = cxgb_del_udp_tunnel,
3242 .ndo_features_check = cxgb_features_check,
3243 .ndo_fix_features = cxgb_fix_features,
3246 #ifdef CONFIG_PCI_IOV
3247 static const struct net_device_ops cxgb4_mgmt_netdev_ops = {
3248 .ndo_open = cxgb4_mgmt_open,
3249 .ndo_set_vf_mac = cxgb4_mgmt_set_vf_mac,
3250 .ndo_get_vf_config = cxgb4_mgmt_get_vf_config,
3251 .ndo_set_vf_rate = cxgb4_mgmt_set_vf_rate,
3252 .ndo_get_phys_port_id = cxgb4_mgmt_get_phys_port_id,
3253 .ndo_set_vf_vlan = cxgb4_mgmt_set_vf_vlan,
3257 static void cxgb4_mgmt_get_drvinfo(struct net_device *dev,
3258 struct ethtool_drvinfo *info)
3260 struct adapter *adapter = netdev2adap(dev);
3262 strlcpy(info->driver, cxgb4_driver_name, sizeof(info->driver));
3263 strlcpy(info->version, cxgb4_driver_version,
3264 sizeof(info->version));
3265 strlcpy(info->bus_info, pci_name(adapter->pdev),
3266 sizeof(info->bus_info));
3269 static const struct ethtool_ops cxgb4_mgmt_ethtool_ops = {
3270 .get_drvinfo = cxgb4_mgmt_get_drvinfo,
3273 static void notify_fatal_err(struct work_struct *work)
3275 struct adapter *adap;
3277 adap = container_of(work, struct adapter, fatal_err_notify_task);
3278 notify_ulds(adap, CXGB4_STATE_FATAL_ERROR);
3281 void t4_fatal_err(struct adapter *adap)
3285 if (pci_channel_offline(adap->pdev))
3288 /* Disable the SGE since ULDs are going to free resources that
3289 * could be exposed to the adapter. RDMA MWs for example...
3291 t4_shutdown_adapter(adap);
3292 for_each_port(adap, port) {
3293 struct net_device *dev = adap->port[port];
3295 /* If we get here in very early initialization the network
3296 * devices may not have been set up yet.
3301 netif_tx_stop_all_queues(dev);
3302 netif_carrier_off(dev);
3304 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
3305 queue_work(adap->workq, &adap->fatal_err_notify_task);
3308 static void setup_memwin(struct adapter *adap)
3310 u32 nic_win_base = t4_get_util_window(adap);
3312 t4_setup_memwin(adap, nic_win_base, MEMWIN_NIC);
3315 static void setup_memwin_rdma(struct adapter *adap)
3317 if (adap->vres.ocq.size) {
3321 start = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_2);
3322 start &= PCI_BASE_ADDRESS_MEM_MASK;
3323 start += OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
3324 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
3326 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 3),
3327 start | BIR_V(1) | WINDOW_V(ilog2(sz_kb)));
3329 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3),
3330 adap->vres.ocq.start);
3332 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3));
3336 /* HMA Definitions */
3338 /* The maximum number of address that can be send in a single FW cmd */
3339 #define HMA_MAX_ADDR_IN_CMD 5
3341 #define HMA_PAGE_SIZE PAGE_SIZE
3343 #define HMA_MAX_NO_FW_ADDRESS (16 << 10) /* FW supports 16K addresses */
3345 #define HMA_PAGE_ORDER \
3346 ((HMA_PAGE_SIZE < HMA_MAX_NO_FW_ADDRESS) ? \
3347 ilog2(HMA_MAX_NO_FW_ADDRESS / HMA_PAGE_SIZE) : 0)
3349 /* The minimum and maximum possible HMA sizes that can be specified in the FW
3350 * configuration(in units of MB).
3352 #define HMA_MIN_TOTAL_SIZE 1
3353 #define HMA_MAX_TOTAL_SIZE \
3354 (((HMA_PAGE_SIZE << HMA_PAGE_ORDER) * \
3355 HMA_MAX_NO_FW_ADDRESS) >> 20)
3357 static void adap_free_hma_mem(struct adapter *adapter)
3359 struct scatterlist *iter;
3363 if (!adapter->hma.sgt)
3366 if (adapter->hma.flags & HMA_DMA_MAPPED_FLAG) {
3367 dma_unmap_sg(adapter->pdev_dev, adapter->hma.sgt->sgl,
3368 adapter->hma.sgt->nents, PCI_DMA_BIDIRECTIONAL);
3369 adapter->hma.flags &= ~HMA_DMA_MAPPED_FLAG;
3372 for_each_sg(adapter->hma.sgt->sgl, iter,
3373 adapter->hma.sgt->orig_nents, i) {
3374 page = sg_page(iter);
3376 __free_pages(page, HMA_PAGE_ORDER);
3379 kfree(adapter->hma.phy_addr);
3380 sg_free_table(adapter->hma.sgt);
3381 kfree(adapter->hma.sgt);
3382 adapter->hma.sgt = NULL;
3385 static int adap_config_hma(struct adapter *adapter)
3387 struct scatterlist *sgl, *iter;
3388 struct sg_table *sgt;
3389 struct page *newpage;
3390 unsigned int i, j, k;
3391 u32 param, hma_size;
3397 /* HMA is supported only for T6+ cards.
3398 * Avoid initializing HMA in kdump kernels.
3400 if (is_kdump_kernel() ||
3401 CHELSIO_CHIP_VERSION(adapter->params.chip) < CHELSIO_T6)
3404 /* Get the HMA region size required by fw */
3405 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3406 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_HMA_SIZE));
3407 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
3408 1, ¶m, &hma_size);
3409 /* An error means card has its own memory or HMA is not supported by
3410 * the firmware. Return without any errors.
3412 if (ret || !hma_size)
3415 if (hma_size < HMA_MIN_TOTAL_SIZE ||
3416 hma_size > HMA_MAX_TOTAL_SIZE) {
3417 dev_err(adapter->pdev_dev,
3418 "HMA size %uMB beyond bounds(%u-%lu)MB\n",
3419 hma_size, HMA_MIN_TOTAL_SIZE, HMA_MAX_TOTAL_SIZE);
3423 page_size = HMA_PAGE_SIZE;
3424 page_order = HMA_PAGE_ORDER;
3425 adapter->hma.sgt = kzalloc(sizeof(*adapter->hma.sgt), GFP_KERNEL);
3426 if (unlikely(!adapter->hma.sgt)) {
3427 dev_err(adapter->pdev_dev, "HMA SG table allocation failed\n");
3430 sgt = adapter->hma.sgt;
3431 /* FW returned value will be in MB's
3433 sgt->orig_nents = (hma_size << 20) / (page_size << page_order);
3434 if (sg_alloc_table(sgt, sgt->orig_nents, GFP_KERNEL)) {
3435 dev_err(adapter->pdev_dev, "HMA SGL allocation failed\n");
3436 kfree(adapter->hma.sgt);
3437 adapter->hma.sgt = NULL;
3441 sgl = adapter->hma.sgt->sgl;
3442 node = dev_to_node(adapter->pdev_dev);
3443 for_each_sg(sgl, iter, sgt->orig_nents, i) {
3444 newpage = alloc_pages_node(node, __GFP_NOWARN | GFP_KERNEL |
3445 __GFP_ZERO, page_order);
3447 dev_err(adapter->pdev_dev,
3448 "Not enough memory for HMA page allocation\n");
3452 sg_set_page(iter, newpage, page_size << page_order, 0);
3455 sgt->nents = dma_map_sg(adapter->pdev_dev, sgl, sgt->orig_nents,
3458 dev_err(adapter->pdev_dev,
3459 "Not enough memory for HMA DMA mapping");
3463 adapter->hma.flags |= HMA_DMA_MAPPED_FLAG;
3465 adapter->hma.phy_addr = kcalloc(sgt->nents, sizeof(dma_addr_t),
3467 if (unlikely(!adapter->hma.phy_addr))
3470 for_each_sg(sgl, iter, sgt->nents, i) {
3471 newpage = sg_page(iter);
3472 adapter->hma.phy_addr[i] = sg_dma_address(iter);
3475 ncmds = DIV_ROUND_UP(sgt->nents, HMA_MAX_ADDR_IN_CMD);
3476 /* Pass on the addresses to firmware */
3477 for (i = 0, k = 0; i < ncmds; i++, k += HMA_MAX_ADDR_IN_CMD) {
3478 struct fw_hma_cmd hma_cmd;
3479 u8 naddr = HMA_MAX_ADDR_IN_CMD;
3480 u8 soc = 0, eoc = 0;
3481 u8 hma_mode = 1; /* Presently we support only Page table mode */
3483 soc = (i == 0) ? 1 : 0;
3484 eoc = (i == ncmds - 1) ? 1 : 0;
3486 /* For last cmd, set naddr corresponding to remaining
3489 if (i == ncmds - 1) {
3490 naddr = sgt->nents % HMA_MAX_ADDR_IN_CMD;
3491 naddr = naddr ? naddr : HMA_MAX_ADDR_IN_CMD;
3493 memset(&hma_cmd, 0, sizeof(hma_cmd));
3494 hma_cmd.op_pkd = htonl(FW_CMD_OP_V(FW_HMA_CMD) |
3495 FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
3496 hma_cmd.retval_len16 = htonl(FW_LEN16(hma_cmd));
3498 hma_cmd.mode_to_pcie_params =
3499 htonl(FW_HMA_CMD_MODE_V(hma_mode) |
3500 FW_HMA_CMD_SOC_V(soc) | FW_HMA_CMD_EOC_V(eoc));
3502 /* HMA cmd size specified in MB's */
3503 hma_cmd.naddr_size =
3504 htonl(FW_HMA_CMD_SIZE_V(hma_size) |
3505 FW_HMA_CMD_NADDR_V(naddr));
3507 /* Total Page size specified in units of 4K */
3508 hma_cmd.addr_size_pkd =
3509 htonl(FW_HMA_CMD_ADDR_SIZE_V
3510 ((page_size << page_order) >> 12));
3512 /* Fill the 5 addresses */
3513 for (j = 0; j < naddr; j++) {
3514 hma_cmd.phy_address[j] =
3515 cpu_to_be64(adapter->hma.phy_addr[j + k]);
3517 ret = t4_wr_mbox(adapter, adapter->mbox, &hma_cmd,
3518 sizeof(hma_cmd), &hma_cmd);
3520 dev_err(adapter->pdev_dev,
3521 "HMA FW command failed with err %d\n", ret);
3527 dev_info(adapter->pdev_dev,
3528 "Reserved %uMB host memory for HMA\n", hma_size);
3532 adap_free_hma_mem(adapter);
3536 static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
3541 /* Now that we've successfully configured and initialized the adapter
3542 * can ask the Firmware what resources it has provisioned for us.
3544 ret = t4_get_pfres(adap);
3546 dev_err(adap->pdev_dev,
3547 "Unable to retrieve resource provisioning information\n");
3551 /* get device capabilities */
3552 memset(c, 0, sizeof(*c));
3553 c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3554 FW_CMD_REQUEST_F | FW_CMD_READ_F);
3555 c->cfvalid_to_len16 = htonl(FW_LEN16(*c));
3556 ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), c);
3560 c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3561 FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
3562 ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), NULL);
3566 ret = t4_config_glbl_rss(adap, adap->pf,
3567 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
3568 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F |
3569 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F);
3573 ret = t4_cfg_pfvf(adap, adap->mbox, adap->pf, 0, adap->sge.egr_sz, 64,
3574 MAX_INGQ, 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF,
3581 /* tweak some settings */
3582 t4_write_reg(adap, TP_SHIFT_CNT_A, 0x64f8849);
3583 t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(PAGE_SHIFT - 12));
3584 t4_write_reg(adap, TP_PIO_ADDR_A, TP_INGRESS_CONFIG_A);
3585 v = t4_read_reg(adap, TP_PIO_DATA_A);
3586 t4_write_reg(adap, TP_PIO_DATA_A, v & ~CSUM_HAS_PSEUDO_HDR_F);
3588 /* first 4 Tx modulation queues point to consecutive Tx channels */
3589 adap->params.tp.tx_modq_map = 0xE4;
3590 t4_write_reg(adap, TP_TX_MOD_QUEUE_REQ_MAP_A,
3591 TX_MOD_QUEUE_REQ_MAP_V(adap->params.tp.tx_modq_map));
3593 /* associate each Tx modulation queue with consecutive Tx channels */
3595 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
3596 &v, 1, TP_TX_SCHED_HDR_A);
3597 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
3598 &v, 1, TP_TX_SCHED_FIFO_A);
3599 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
3600 &v, 1, TP_TX_SCHED_PCMD_A);
3602 #define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
3603 if (is_offload(adap)) {
3604 t4_write_reg(adap, TP_TX_MOD_QUEUE_WEIGHT0_A,
3605 TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3606 TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3607 TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3608 TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
3609 t4_write_reg(adap, TP_TX_MOD_CHANNEL_WEIGHT_A,
3610 TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3611 TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3612 TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3613 TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
3616 /* get basic stuff going */
3617 return t4_early_init(adap, adap->pf);
3621 * Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
3623 #define MAX_ATIDS 8192U
3626 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
3628 * If the firmware we're dealing with has Configuration File support, then
3629 * we use that to perform all configuration
3633 * Tweak configuration based on module parameters, etc. Most of these have
3634 * defaults assigned to them by Firmware Configuration Files (if we're using
3635 * them) but need to be explicitly set if we're using hard-coded
3636 * initialization. But even in the case of using Firmware Configuration
3637 * Files, we'd like to expose the ability to change these via module
3638 * parameters so these are essentially common tweaks/settings for
3639 * Configuration Files and hard-coded initialization ...
3641 static int adap_init0_tweaks(struct adapter *adapter)
3644 * Fix up various Host-Dependent Parameters like Page Size, Cache
3645 * Line Size, etc. The firmware default is for a 4KB Page Size and
3646 * 64B Cache Line Size ...
3648 t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES);
3651 * Process module parameters which affect early initialization.
3653 if (rx_dma_offset != 2 && rx_dma_offset != 0) {
3654 dev_err(&adapter->pdev->dev,
3655 "Ignoring illegal rx_dma_offset=%d, using 2\n",
3659 t4_set_reg_field(adapter, SGE_CONTROL_A,
3660 PKTSHIFT_V(PKTSHIFT_M),
3661 PKTSHIFT_V(rx_dma_offset));
3664 * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
3665 * adds the pseudo header itself.
3667 t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG_A,
3668 CSUM_HAS_PSEUDO_HDR_F, 0);
3673 /* 10Gb/s-BT PHY Support. chip-external 10Gb/s-BT PHYs are complex chips
3674 * unto themselves and they contain their own firmware to perform their
3677 static int phy_aq1202_version(const u8 *phy_fw_data,
3682 /* At offset 0x8 you're looking for the primary image's
3683 * starting offset which is 3 Bytes wide
3685 * At offset 0xa of the primary image, you look for the offset
3686 * of the DRAM segment which is 3 Bytes wide.
3688 * The FW version is at offset 0x27e of the DRAM and is 2 Bytes
3691 #define be16(__p) (((__p)[0] << 8) | (__p)[1])
3692 #define le16(__p) ((__p)[0] | ((__p)[1] << 8))
3693 #define le24(__p) (le16(__p) | ((__p)[2] << 16))
3695 offset = le24(phy_fw_data + 0x8) << 12;
3696 offset = le24(phy_fw_data + offset + 0xa);
3697 return be16(phy_fw_data + offset + 0x27e);
3704 static struct info_10gbt_phy_fw {
3705 unsigned int phy_fw_id; /* PCI Device ID */
3706 char *phy_fw_file; /* /lib/firmware/ PHY Firmware file */
3707 int (*phy_fw_version)(const u8 *phy_fw_data, size_t phy_fw_size);
3708 int phy_flash; /* Has FLASH for PHY Firmware */
3709 } phy_info_array[] = {
3711 PHY_AQ1202_DEVICEID,
3712 PHY_AQ1202_FIRMWARE,
3717 PHY_BCM84834_DEVICEID,
3718 PHY_BCM84834_FIRMWARE,
3725 static struct info_10gbt_phy_fw *find_phy_info(int devid)
3729 for (i = 0; i < ARRAY_SIZE(phy_info_array); i++) {
3730 if (phy_info_array[i].phy_fw_id == devid)
3731 return &phy_info_array[i];
3736 /* Handle updating of chip-external 10Gb/s-BT PHY firmware. This needs to
3737 * happen after the FW_RESET_CMD but before the FW_INITIALIZE_CMD. On error
3738 * we return a negative error number. If we transfer new firmware we return 1
3739 * (from t4_load_phy_fw()). If we don't do anything we return 0.
3741 static int adap_init0_phy(struct adapter *adap)
3743 const struct firmware *phyf;
3745 struct info_10gbt_phy_fw *phy_info;
3747 /* Use the device ID to determine which PHY file to flash.
3749 phy_info = find_phy_info(adap->pdev->device);
3751 dev_warn(adap->pdev_dev,
3752 "No PHY Firmware file found for this PHY\n");
3756 /* If we have a T4 PHY firmware file under /lib/firmware/cxgb4/, then
3757 * use that. The adapter firmware provides us with a memory buffer
3758 * where we can load a PHY firmware file from the host if we want to
3759 * override the PHY firmware File in flash.
3761 ret = request_firmware_direct(&phyf, phy_info->phy_fw_file,
3764 /* For adapters without FLASH attached to PHY for their
3765 * firmware, it's obviously a fatal error if we can't get the
3766 * firmware to the adapter. For adapters with PHY firmware
3767 * FLASH storage, it's worth a warning if we can't find the
3768 * PHY Firmware but we'll neuter the error ...
3770 dev_err(adap->pdev_dev, "unable to find PHY Firmware image "
3771 "/lib/firmware/%s, error %d\n",
3772 phy_info->phy_fw_file, -ret);
3773 if (phy_info->phy_flash) {
3774 int cur_phy_fw_ver = 0;
3776 t4_phy_fw_ver(adap, &cur_phy_fw_ver);
3777 dev_warn(adap->pdev_dev, "continuing with, on-adapter "
3778 "FLASH copy, version %#x\n", cur_phy_fw_ver);
3785 /* Load PHY Firmware onto adapter.
3787 ret = t4_load_phy_fw(adap, MEMWIN_NIC, &adap->win0_lock,
3788 phy_info->phy_fw_version,
3789 (u8 *)phyf->data, phyf->size);
3791 dev_err(adap->pdev_dev, "PHY Firmware transfer error %d\n",
3794 int new_phy_fw_ver = 0;
3796 if (phy_info->phy_fw_version)
3797 new_phy_fw_ver = phy_info->phy_fw_version(phyf->data,
3799 dev_info(adap->pdev_dev, "Successfully transferred PHY "
3800 "Firmware /lib/firmware/%s, version %#x\n",
3801 phy_info->phy_fw_file, new_phy_fw_ver);
3804 release_firmware(phyf);
3810 * Attempt to initialize the adapter via a Firmware Configuration File.
3812 static int adap_init0_config(struct adapter *adapter, int reset)
3814 struct fw_caps_config_cmd caps_cmd;
3815 const struct firmware *cf;
3816 unsigned long mtype = 0, maddr = 0;
3817 u32 finiver, finicsum, cfcsum;
3819 int config_issued = 0;
3820 char *fw_config_file, fw_config_file_path[256];
3821 char *config_name = NULL;
3824 * Reset device if necessary.
3827 ret = t4_fw_reset(adapter, adapter->mbox,
3828 PIORSTMODE_F | PIORST_F);
3833 /* If this is a 10Gb/s-BT adapter make sure the chip-external
3834 * 10Gb/s-BT PHYs have up-to-date firmware. Note that this step needs
3835 * to be performed after any global adapter RESET above since some
3836 * PHYs only have local RAM copies of the PHY firmware.
3838 if (is_10gbt_device(adapter->pdev->device)) {
3839 ret = adap_init0_phy(adapter);
3844 * If we have a T4 configuration file under /lib/firmware/cxgb4/,
3845 * then use that. Otherwise, use the configuration file stored
3846 * in the adapter flash ...
3848 switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) {
3850 fw_config_file = FW4_CFNAME;
3853 fw_config_file = FW5_CFNAME;
3856 fw_config_file = FW6_CFNAME;
3859 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
3860 adapter->pdev->device);
3865 ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev);
3867 config_name = "On FLASH";
3868 mtype = FW_MEMTYPE_CF_FLASH;
3869 maddr = t4_flash_cfg_addr(adapter);
3871 u32 params[7], val[7];
3873 sprintf(fw_config_file_path,
3874 "/lib/firmware/%s", fw_config_file);
3875 config_name = fw_config_file_path;
3877 if (cf->size >= FLASH_CFG_MAX_SIZE)
3880 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3881 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
3882 ret = t4_query_params(adapter, adapter->mbox,
3883 adapter->pf, 0, 1, params, val);
3886 * For t4_memory_rw() below addresses and
3887 * sizes have to be in terms of multiples of 4
3888 * bytes. So, if the Configuration File isn't
3889 * a multiple of 4 bytes in length we'll have
3890 * to write that out separately since we can't
3891 * guarantee that the bytes following the
3892 * residual byte in the buffer returned by
3893 * request_firmware() are zeroed out ...
3895 size_t resid = cf->size & 0x3;
3896 size_t size = cf->size & ~0x3;
3897 __be32 *data = (__be32 *)cf->data;
3899 mtype = FW_PARAMS_PARAM_Y_G(val[0]);
3900 maddr = FW_PARAMS_PARAM_Z_G(val[0]) << 16;
3902 spin_lock(&adapter->win0_lock);
3903 ret = t4_memory_rw(adapter, 0, mtype, maddr,
3904 size, data, T4_MEMORY_WRITE);
3905 if (ret == 0 && resid != 0) {
3912 last.word = data[size >> 2];
3913 for (i = resid; i < 4; i++)
3915 ret = t4_memory_rw(adapter, 0, mtype,
3920 spin_unlock(&adapter->win0_lock);
3924 release_firmware(cf);
3930 * Issue a Capability Configuration command to the firmware to get it
3931 * to parse the Configuration File. We don't use t4_fw_config_file()
3932 * because we want the ability to modify various features after we've
3933 * processed the configuration file ...
3935 memset(&caps_cmd, 0, sizeof(caps_cmd));
3936 caps_cmd.op_to_write =
3937 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3940 caps_cmd.cfvalid_to_len16 =
3941 htonl(FW_CAPS_CONFIG_CMD_CFVALID_F |
3942 FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(mtype) |
3943 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(maddr >> 16) |
3944 FW_LEN16(caps_cmd));
3945 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
3948 /* If the CAPS_CONFIG failed with an ENOENT (for a Firmware
3949 * Configuration File in FLASH), our last gasp effort is to use the
3950 * Firmware Configuration File which is embedded in the firmware. A
3951 * very few early versions of the firmware didn't have one embedded
3952 * but we can ignore those.
3954 if (ret == -ENOENT) {
3955 memset(&caps_cmd, 0, sizeof(caps_cmd));
3956 caps_cmd.op_to_write =
3957 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3960 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
3961 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd,
3962 sizeof(caps_cmd), &caps_cmd);
3963 config_name = "Firmware Default";
3970 finiver = ntohl(caps_cmd.finiver);
3971 finicsum = ntohl(caps_cmd.finicsum);
3972 cfcsum = ntohl(caps_cmd.cfcsum);
3973 if (finicsum != cfcsum)
3974 dev_warn(adapter->pdev_dev, "Configuration File checksum "\
3975 "mismatch: [fini] csum=%#x, computed csum=%#x\n",
3979 * And now tell the firmware to use the configuration we just loaded.
3981 caps_cmd.op_to_write =
3982 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3985 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
3986 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
3992 * Tweak configuration based on system architecture, module
3995 ret = adap_init0_tweaks(adapter);
3999 /* We will proceed even if HMA init fails. */
4000 ret = adap_config_hma(adapter);
4002 dev_err(adapter->pdev_dev,
4003 "HMA configuration failed with error %d\n", ret);
4006 * And finally tell the firmware to initialize itself using the
4007 * parameters from the Configuration File.
4009 ret = t4_fw_initialize(adapter, adapter->mbox);
4013 /* Emit Firmware Configuration File information and return
4016 dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
4017 "Configuration File \"%s\", version %#x, computed checksum %#x\n",
4018 config_name, finiver, cfcsum);
4022 * Something bad happened. Return the error ... (If the "error"
4023 * is that there's no Configuration File on the adapter we don't
4024 * want to issue a warning since this is fairly common.)
4027 if (config_issued && ret != -ENOENT)
4028 dev_warn(adapter->pdev_dev, "\"%s\" configuration file error %d\n",
4033 static struct fw_info fw_info_array[] = {
4036 .fs_name = FW4_CFNAME,
4037 .fw_mod_name = FW4_FNAME,
4039 .chip = FW_HDR_CHIP_T4,
4040 .fw_ver = __cpu_to_be32(FW_VERSION(T4)),
4041 .intfver_nic = FW_INTFVER(T4, NIC),
4042 .intfver_vnic = FW_INTFVER(T4, VNIC),
4043 .intfver_ri = FW_INTFVER(T4, RI),
4044 .intfver_iscsi = FW_INTFVER(T4, ISCSI),
4045 .intfver_fcoe = FW_INTFVER(T4, FCOE),
4049 .fs_name = FW5_CFNAME,
4050 .fw_mod_name = FW5_FNAME,
4052 .chip = FW_HDR_CHIP_T5,
4053 .fw_ver = __cpu_to_be32(FW_VERSION(T5)),
4054 .intfver_nic = FW_INTFVER(T5, NIC),
4055 .intfver_vnic = FW_INTFVER(T5, VNIC),
4056 .intfver_ri = FW_INTFVER(T5, RI),
4057 .intfver_iscsi = FW_INTFVER(T5, ISCSI),
4058 .intfver_fcoe = FW_INTFVER(T5, FCOE),
4062 .fs_name = FW6_CFNAME,
4063 .fw_mod_name = FW6_FNAME,
4065 .chip = FW_HDR_CHIP_T6,
4066 .fw_ver = __cpu_to_be32(FW_VERSION(T6)),
4067 .intfver_nic = FW_INTFVER(T6, NIC),
4068 .intfver_vnic = FW_INTFVER(T6, VNIC),
4069 .intfver_ofld = FW_INTFVER(T6, OFLD),
4070 .intfver_ri = FW_INTFVER(T6, RI),
4071 .intfver_iscsipdu = FW_INTFVER(T6, ISCSIPDU),
4072 .intfver_iscsi = FW_INTFVER(T6, ISCSI),
4073 .intfver_fcoepdu = FW_INTFVER(T6, FCOEPDU),
4074 .intfver_fcoe = FW_INTFVER(T6, FCOE),
4080 static struct fw_info *find_fw_info(int chip)
4084 for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) {
4085 if (fw_info_array[i].chip == chip)
4086 return &fw_info_array[i];
4092 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
4094 static int adap_init0(struct adapter *adap)
4098 enum dev_state state;
4099 u32 params[7], val[7];
4100 struct fw_caps_config_cmd caps_cmd;
4103 /* Grab Firmware Device Log parameters as early as possible so we have
4104 * access to it for debugging, etc.
4106 ret = t4_init_devlog_params(adap);
4110 /* Contact FW, advertising Master capability */
4111 ret = t4_fw_hello(adap, adap->mbox, adap->mbox,
4112 is_kdump_kernel() ? MASTER_MUST : MASTER_MAY, &state);
4114 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
4118 if (ret == adap->mbox)
4119 adap->flags |= MASTER_PF;
4122 * If we're the Master PF Driver and the device is uninitialized,
4123 * then let's consider upgrading the firmware ... (We always want
4124 * to check the firmware version number in order to A. get it for
4125 * later reporting and B. to warn if the currently loaded firmware
4126 * is excessively mismatched relative to the driver.)
4129 t4_get_version_info(adap);
4130 ret = t4_check_fw_version(adap);
4131 /* If firmware is too old (not supported by driver) force an update. */
4133 state = DEV_STATE_UNINIT;
4134 if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
4135 struct fw_info *fw_info;
4136 struct fw_hdr *card_fw;
4137 const struct firmware *fw;
4138 const u8 *fw_data = NULL;
4139 unsigned int fw_size = 0;
4141 /* This is the firmware whose headers the driver was compiled
4144 fw_info = find_fw_info(CHELSIO_CHIP_VERSION(adap->params.chip));
4145 if (fw_info == NULL) {
4146 dev_err(adap->pdev_dev,
4147 "unable to get firmware info for chip %d.\n",
4148 CHELSIO_CHIP_VERSION(adap->params.chip));
4152 /* allocate memory to read the header of the firmware on the
4155 card_fw = kvzalloc(sizeof(*card_fw), GFP_KERNEL);
4161 /* Get FW from from /lib/firmware/ */
4162 ret = request_firmware(&fw, fw_info->fw_mod_name,
4165 dev_err(adap->pdev_dev,
4166 "unable to load firmware image %s, error %d\n",
4167 fw_info->fw_mod_name, ret);
4173 /* upgrade FW logic */
4174 ret = t4_prep_fw(adap, fw_info, fw_data, fw_size, card_fw,
4178 release_firmware(fw);
4185 /* If the firmware is initialized already, emit a simply note to that
4186 * effect. Otherwise, it's time to try initializing the adapter.
4188 if (state == DEV_STATE_INIT) {
4189 ret = adap_config_hma(adap);
4191 dev_err(adap->pdev_dev,
4192 "HMA configuration failed with error %d\n",
4194 dev_info(adap->pdev_dev, "Coming up as %s: "\
4195 "Adapter already initialized\n",
4196 adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
4198 dev_info(adap->pdev_dev, "Coming up as MASTER: "\
4199 "Initializing adapter\n");
4201 /* Find out whether we're dealing with a version of the
4202 * firmware which has configuration file support.
4204 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
4205 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
4206 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
4209 /* If the firmware doesn't support Configuration Files,
4213 dev_err(adap->pdev_dev, "firmware doesn't support "
4214 "Firmware Configuration Files\n");
4218 /* The firmware provides us with a memory buffer where we can
4219 * load a Configuration File from the host if we want to
4220 * override the Configuration File in flash.
4222 ret = adap_init0_config(adap, reset);
4223 if (ret == -ENOENT) {
4224 dev_err(adap->pdev_dev, "no Configuration File "
4225 "present on adapter.\n");
4229 dev_err(adap->pdev_dev, "could not initialize "
4230 "adapter, error %d\n", -ret);
4235 /* Now that we've successfully configured and initialized the adapter
4236 * (or found it already initialized), we can ask the Firmware what
4237 * resources it has provisioned for us.
4239 ret = t4_get_pfres(adap);
4241 dev_err(adap->pdev_dev,
4242 "Unable to retrieve resource provisioning information\n");
4246 /* Grab VPD parameters. This should be done after we establish a
4247 * connection to the firmware since some of the VPD parameters
4248 * (notably the Core Clock frequency) are retrieved via requests to
4249 * the firmware. On the other hand, we need these fairly early on
4250 * so we do this right after getting ahold of the firmware.
4252 * We need to do this after initializing the adapter because someone
4253 * could have FLASHed a new VPD which won't be read by the firmware
4254 * until we do the RESET ...
4256 ret = t4_get_vpd_params(adap, &adap->params.vpd);
4260 /* Find out what ports are available to us. Note that we need to do
4261 * this before calling adap_init0_no_config() since it needs nports
4265 FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
4266 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC);
4267 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec);
4271 adap->params.nports = hweight32(port_vec);
4272 adap->params.portvec = port_vec;
4274 /* Give the SGE code a chance to pull in anything that it needs ...
4275 * Note that this must be called after we retrieve our VPD parameters
4276 * in order to know how to convert core ticks to seconds, etc.
4278 ret = t4_sge_init(adap);
4282 if (is_bypass_device(adap->pdev->device))
4283 adap->params.bypass = 1;
4286 * Grab some of our basic fundamental operating parameters.
4288 #define FW_PARAM_DEV(param) \
4289 (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | \
4290 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_##param))
4292 #define FW_PARAM_PFVF(param) \
4293 FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \
4294 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param)| \
4295 FW_PARAMS_PARAM_Y_V(0) | \
4296 FW_PARAMS_PARAM_Z_V(0)
4298 params[0] = FW_PARAM_PFVF(EQ_START);
4299 params[1] = FW_PARAM_PFVF(L2T_START);
4300 params[2] = FW_PARAM_PFVF(L2T_END);
4301 params[3] = FW_PARAM_PFVF(FILTER_START);
4302 params[4] = FW_PARAM_PFVF(FILTER_END);
4303 params[5] = FW_PARAM_PFVF(IQFLINT_START);
4304 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params, val);
4307 adap->sge.egr_start = val[0];
4308 adap->l2t_start = val[1];
4309 adap->l2t_end = val[2];
4310 adap->tids.ftid_base = val[3];
4311 adap->tids.nftids = val[4] - val[3] + 1;
4312 adap->sge.ingr_start = val[5];
4314 if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
4315 /* Read the raw mps entries. In T6, the last 2 tcam entries
4316 * are reserved for raw mac addresses (rawf = 2, one per port).
4318 params[0] = FW_PARAM_PFVF(RAWF_START);
4319 params[1] = FW_PARAM_PFVF(RAWF_END);
4320 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
4323 adap->rawf_start = val[0];
4324 adap->rawf_cnt = val[1] - val[0] + 1;
4328 /* qids (ingress/egress) returned from firmware can be anywhere
4329 * in the range from EQ(IQFLINT)_START to EQ(IQFLINT)_END.
4330 * Hence driver needs to allocate memory for this range to
4331 * store the queue info. Get the highest IQFLINT/EQ index returned
4332 * in FW_EQ_*_CMD.alloc command.
4334 params[0] = FW_PARAM_PFVF(EQ_END);
4335 params[1] = FW_PARAM_PFVF(IQFLINT_END);
4336 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
4339 adap->sge.egr_sz = val[0] - adap->sge.egr_start + 1;
4340 adap->sge.ingr_sz = val[1] - adap->sge.ingr_start + 1;
4342 adap->sge.egr_map = kcalloc(adap->sge.egr_sz,
4343 sizeof(*adap->sge.egr_map), GFP_KERNEL);
4344 if (!adap->sge.egr_map) {
4349 adap->sge.ingr_map = kcalloc(adap->sge.ingr_sz,
4350 sizeof(*adap->sge.ingr_map), GFP_KERNEL);
4351 if (!adap->sge.ingr_map) {
4356 /* Allocate the memory for the vaious egress queue bitmaps
4357 * ie starving_fl, txq_maperr and blocked_fl.
4359 adap->sge.starving_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
4360 sizeof(long), GFP_KERNEL);
4361 if (!adap->sge.starving_fl) {
4366 adap->sge.txq_maperr = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
4367 sizeof(long), GFP_KERNEL);
4368 if (!adap->sge.txq_maperr) {
4373 #ifdef CONFIG_DEBUG_FS
4374 adap->sge.blocked_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
4375 sizeof(long), GFP_KERNEL);
4376 if (!adap->sge.blocked_fl) {
4382 params[0] = FW_PARAM_PFVF(CLIP_START);
4383 params[1] = FW_PARAM_PFVF(CLIP_END);
4384 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
4387 adap->clipt_start = val[0];
4388 adap->clipt_end = val[1];
4390 /* We don't yet have a PARAMs calls to retrieve the number of Traffic
4391 * Classes supported by the hardware/firmware so we hard code it here
4394 adap->params.nsched_cls = is_t4(adap->params.chip) ? 15 : 16;
4396 /* query params related to active filter region */
4397 params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
4398 params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
4399 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
4400 /* If Active filter size is set we enable establishing
4401 * offload connection through firmware work request
4403 if ((val[0] != val[1]) && (ret >= 0)) {
4404 adap->flags |= FW_OFLD_CONN;
4405 adap->tids.aftid_base = val[0];
4406 adap->tids.aftid_end = val[1];
4409 /* If we're running on newer firmware, let it know that we're
4410 * prepared to deal with encapsulated CPL messages. Older
4411 * firmware won't understand this and we'll just get
4412 * unencapsulated messages ...
4414 params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
4416 (void)t4_set_params(adap, adap->mbox, adap->pf, 0, 1, params, val);
4419 * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL
4420 * capability. Earlier versions of the firmware didn't have the
4421 * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no
4422 * permission to use ULPTX MEMWRITE DSGL.
4424 if (is_t4(adap->params.chip)) {
4425 adap->params.ulptx_memwrite_dsgl = false;
4427 params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
4428 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
4430 adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0);
4433 /* See if FW supports FW_RI_FR_NSMR_TPTE_WR work request */
4434 params[0] = FW_PARAM_DEV(RI_FR_NSMR_TPTE_WR);
4435 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
4437 adap->params.fr_nsmr_tpte_wr_support = (ret == 0 && val[0] != 0);
4439 /* See if FW supports FW_FILTER2 work request */
4440 if (is_t4(adap->params.chip)) {
4441 adap->params.filter2_wr_support = 0;
4443 params[0] = FW_PARAM_DEV(FILTER2_WR);
4444 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
4446 adap->params.filter2_wr_support = (ret == 0 && val[0] != 0);
4450 * Get device capabilities so we can determine what resources we need
4453 memset(&caps_cmd, 0, sizeof(caps_cmd));
4454 caps_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
4455 FW_CMD_REQUEST_F | FW_CMD_READ_F);
4456 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
4457 ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
4462 if (caps_cmd.ofldcaps ||
4463 (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_HASHFILTER))) {
4464 /* query offload-related parameters */
4465 params[0] = FW_PARAM_DEV(NTID);
4466 params[1] = FW_PARAM_PFVF(SERVER_START);
4467 params[2] = FW_PARAM_PFVF(SERVER_END);
4468 params[3] = FW_PARAM_PFVF(TDDP_START);
4469 params[4] = FW_PARAM_PFVF(TDDP_END);
4470 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
4471 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6,
4475 adap->tids.ntids = val[0];
4476 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
4477 adap->tids.stid_base = val[1];
4478 adap->tids.nstids = val[2] - val[1] + 1;
4480 * Setup server filter region. Divide the available filter
4481 * region into two parts. Regular filters get 1/3rd and server
4482 * filters get 2/3rd part. This is only enabled if workarond
4484 * 1. For regular filters.
4485 * 2. Server filter: This are special filters which are used
4486 * to redirect SYN packets to offload queue.
4488 if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) {
4489 adap->tids.sftid_base = adap->tids.ftid_base +
4490 DIV_ROUND_UP(adap->tids.nftids, 3);
4491 adap->tids.nsftids = adap->tids.nftids -
4492 DIV_ROUND_UP(adap->tids.nftids, 3);
4493 adap->tids.nftids = adap->tids.sftid_base -
4494 adap->tids.ftid_base;
4496 adap->vres.ddp.start = val[3];
4497 adap->vres.ddp.size = val[4] - val[3] + 1;
4498 adap->params.ofldq_wr_cred = val[5];
4500 if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_HASHFILTER)) {
4501 ret = init_hash_filter(adap);
4505 adap->params.offload = 1;
4506 adap->num_ofld_uld += 1;
4509 if (caps_cmd.rdmacaps) {
4510 params[0] = FW_PARAM_PFVF(STAG_START);
4511 params[1] = FW_PARAM_PFVF(STAG_END);
4512 params[2] = FW_PARAM_PFVF(RQ_START);
4513 params[3] = FW_PARAM_PFVF(RQ_END);
4514 params[4] = FW_PARAM_PFVF(PBL_START);
4515 params[5] = FW_PARAM_PFVF(PBL_END);
4516 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6,
4520 adap->vres.stag.start = val[0];
4521 adap->vres.stag.size = val[1] - val[0] + 1;
4522 adap->vres.rq.start = val[2];
4523 adap->vres.rq.size = val[3] - val[2] + 1;
4524 adap->vres.pbl.start = val[4];
4525 adap->vres.pbl.size = val[5] - val[4] + 1;
4527 params[0] = FW_PARAM_PFVF(SRQ_START);
4528 params[1] = FW_PARAM_PFVF(SRQ_END);
4529 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
4532 adap->vres.srq.start = val[0];
4533 adap->vres.srq.size = val[1] - val[0] + 1;
4535 if (adap->vres.srq.size) {
4536 adap->srq = t4_init_srq(adap->vres.srq.size);
4538 dev_warn(&adap->pdev->dev, "could not allocate SRQ, continuing\n");
4541 params[0] = FW_PARAM_PFVF(SQRQ_START);
4542 params[1] = FW_PARAM_PFVF(SQRQ_END);
4543 params[2] = FW_PARAM_PFVF(CQ_START);
4544 params[3] = FW_PARAM_PFVF(CQ_END);
4545 params[4] = FW_PARAM_PFVF(OCQ_START);
4546 params[5] = FW_PARAM_PFVF(OCQ_END);
4547 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params,
4551 adap->vres.qp.start = val[0];
4552 adap->vres.qp.size = val[1] - val[0] + 1;
4553 adap->vres.cq.start = val[2];
4554 adap->vres.cq.size = val[3] - val[2] + 1;
4555 adap->vres.ocq.start = val[4];
4556 adap->vres.ocq.size = val[5] - val[4] + 1;
4558 params[0] = FW_PARAM_DEV(MAXORDIRD_QP);
4559 params[1] = FW_PARAM_DEV(MAXIRD_ADAPTER);
4560 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params,
4563 adap->params.max_ordird_qp = 8;
4564 adap->params.max_ird_adapter = 32 * adap->tids.ntids;
4567 adap->params.max_ordird_qp = val[0];
4568 adap->params.max_ird_adapter = val[1];
4570 dev_info(adap->pdev_dev,
4571 "max_ordird_qp %d max_ird_adapter %d\n",
4572 adap->params.max_ordird_qp,
4573 adap->params.max_ird_adapter);
4575 /* Enable write_with_immediate if FW supports it */
4576 params[0] = FW_PARAM_DEV(RDMA_WRITE_WITH_IMM);
4577 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params,
4579 adap->params.write_w_imm_support = (ret == 0 && val[0] != 0);
4581 /* Enable write_cmpl if FW supports it */
4582 params[0] = FW_PARAM_DEV(RI_WRITE_CMPL_WR);
4583 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params,
4585 adap->params.write_cmpl_support = (ret == 0 && val[0] != 0);
4586 adap->num_ofld_uld += 2;
4588 if (caps_cmd.iscsicaps) {
4589 params[0] = FW_PARAM_PFVF(ISCSI_START);
4590 params[1] = FW_PARAM_PFVF(ISCSI_END);
4591 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
4595 adap->vres.iscsi.start = val[0];
4596 adap->vres.iscsi.size = val[1] - val[0] + 1;
4597 /* LIO target and cxgb4i initiaitor */
4598 adap->num_ofld_uld += 2;
4600 if (caps_cmd.cryptocaps) {
4601 if (ntohs(caps_cmd.cryptocaps) &
4602 FW_CAPS_CONFIG_CRYPTO_LOOKASIDE) {
4603 params[0] = FW_PARAM_PFVF(NCRYPTO_LOOKASIDE);
4604 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
4610 adap->vres.ncrypto_fc = val[0];
4612 adap->num_ofld_uld += 1;
4614 if (ntohs(caps_cmd.cryptocaps) &
4615 FW_CAPS_CONFIG_TLS_INLINE) {
4616 params[0] = FW_PARAM_PFVF(TLS_START);
4617 params[1] = FW_PARAM_PFVF(TLS_END);
4618 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
4622 adap->vres.key.start = val[0];
4623 adap->vres.key.size = val[1] - val[0] + 1;
4626 adap->params.crypto = ntohs(caps_cmd.cryptocaps);
4628 #undef FW_PARAM_PFVF
4631 /* The MTU/MSS Table is initialized by now, so load their values. If
4632 * we're initializing the adapter, then we'll make any modifications
4633 * we want to the MTU/MSS Table and also initialize the congestion
4636 t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
4637 if (state != DEV_STATE_INIT) {
4640 /* The default MTU Table contains values 1492 and 1500.
4641 * However, for TCP, it's better to have two values which are
4642 * a multiple of 8 +/- 4 bytes apart near this popular MTU.
4643 * This allows us to have a TCP Data Payload which is a
4644 * multiple of 8 regardless of what combination of TCP Options
4645 * are in use (always a multiple of 4 bytes) which is
4646 * important for performance reasons. For instance, if no
4647 * options are in use, then we have a 20-byte IP header and a
4648 * 20-byte TCP header. In this case, a 1500-byte MSS would
4649 * result in a TCP Data Payload of 1500 - 40 == 1460 bytes
4650 * which is not a multiple of 8. So using an MSS of 1488 in
4651 * this case results in a TCP Data Payload of 1448 bytes which
4652 * is a multiple of 8. On the other hand, if 12-byte TCP Time
4653 * Stamps have been negotiated, then an MTU of 1500 bytes
4654 * results in a TCP Data Payload of 1448 bytes which, as
4655 * above, is a multiple of 8 bytes ...
4657 for (i = 0; i < NMTUS; i++)
4658 if (adap->params.mtus[i] == 1492) {
4659 adap->params.mtus[i] = 1488;
4663 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
4664 adap->params.b_wnd);
4666 t4_init_sge_params(adap);
4667 adap->flags |= FW_OK;
4668 t4_init_tp_params(adap, true);
4672 * Something bad happened. If a command timed out or failed with EIO
4673 * FW does not operate within its spec or something catastrophic
4674 * happened to HW/FW, stop issuing commands.
4677 adap_free_hma_mem(adap);
4678 kfree(adap->sge.egr_map);
4679 kfree(adap->sge.ingr_map);
4680 kfree(adap->sge.starving_fl);
4681 kfree(adap->sge.txq_maperr);
4682 #ifdef CONFIG_DEBUG_FS
4683 kfree(adap->sge.blocked_fl);
4685 if (ret != -ETIMEDOUT && ret != -EIO)
4686 t4_fw_bye(adap, adap->mbox);
4692 static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
4693 pci_channel_state_t state)
4696 struct adapter *adap = pci_get_drvdata(pdev);
4702 adap->flags &= ~FW_OK;
4703 notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
4704 spin_lock(&adap->stats_lock);
4705 for_each_port(adap, i) {
4706 struct net_device *dev = adap->port[i];
4708 netif_device_detach(dev);
4709 netif_carrier_off(dev);
4712 spin_unlock(&adap->stats_lock);
4713 disable_interrupts(adap);
4714 if (adap->flags & FULL_INIT_DONE)
4717 if ((adap->flags & DEV_ENABLED)) {
4718 pci_disable_device(pdev);
4719 adap->flags &= ~DEV_ENABLED;
4721 out: return state == pci_channel_io_perm_failure ?
4722 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
4725 static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
4728 struct fw_caps_config_cmd c;
4729 struct adapter *adap = pci_get_drvdata(pdev);
4732 pci_restore_state(pdev);
4733 pci_save_state(pdev);
4734 return PCI_ERS_RESULT_RECOVERED;
4737 if (!(adap->flags & DEV_ENABLED)) {
4738 if (pci_enable_device(pdev)) {
4739 dev_err(&pdev->dev, "Cannot reenable PCI "
4740 "device after reset\n");
4741 return PCI_ERS_RESULT_DISCONNECT;
4743 adap->flags |= DEV_ENABLED;
4746 pci_set_master(pdev);
4747 pci_restore_state(pdev);
4748 pci_save_state(pdev);
4749 pci_cleanup_aer_uncorrect_error_status(pdev);
4751 if (t4_wait_dev_ready(adap->regs) < 0)
4752 return PCI_ERS_RESULT_DISCONNECT;
4753 if (t4_fw_hello(adap, adap->mbox, adap->pf, MASTER_MUST, NULL) < 0)
4754 return PCI_ERS_RESULT_DISCONNECT;
4755 adap->flags |= FW_OK;
4756 if (adap_init1(adap, &c))
4757 return PCI_ERS_RESULT_DISCONNECT;
4759 for_each_port(adap, i) {
4760 struct port_info *p = adap2pinfo(adap, i);
4762 ret = t4_alloc_vi(adap, adap->mbox, p->tx_chan, adap->pf, 0, 1,
4765 return PCI_ERS_RESULT_DISCONNECT;
4767 p->xact_addr_filt = -1;
4770 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
4771 adap->params.b_wnd);
4774 return PCI_ERS_RESULT_DISCONNECT;
4775 return PCI_ERS_RESULT_RECOVERED;
4778 static void eeh_resume(struct pci_dev *pdev)
4781 struct adapter *adap = pci_get_drvdata(pdev);
4787 for_each_port(adap, i) {
4788 struct net_device *dev = adap->port[i];
4790 if (netif_running(dev)) {
4792 cxgb_set_rxmode(dev);
4794 netif_device_attach(dev);
4800 static const struct pci_error_handlers cxgb4_eeh = {
4801 .error_detected = eeh_err_detected,
4802 .slot_reset = eeh_slot_reset,
4803 .resume = eeh_resume,
4806 /* Return true if the Link Configuration supports "High Speeds" (those greater
4809 static inline bool is_x_10g_port(const struct link_config *lc)
4811 unsigned int speeds, high_speeds;
4813 speeds = FW_PORT_CAP32_SPEED_V(FW_PORT_CAP32_SPEED_G(lc->pcaps));
4814 high_speeds = speeds &
4815 ~(FW_PORT_CAP32_SPEED_100M | FW_PORT_CAP32_SPEED_1G);
4817 return high_speeds != 0;
4821 * Perform default configuration of DMA queues depending on the number and type
4822 * of ports we found and the number of available CPUs. Most settings can be
4823 * modified by the admin prior to actual use.
4825 static int cfg_queues(struct adapter *adap)
4827 struct sge *s = &adap->sge;
4828 int i, n10g = 0, qidx = 0;
4829 int niqflint, neq, avail_eth_qsets;
4830 int max_eth_qsets = 32;
4831 #ifndef CONFIG_CHELSIO_T4_DCB
4835 /* Reduce memory usage in kdump environment, disable all offload.
4837 if (is_kdump_kernel() || (is_uld(adap) && t4_uld_mem_alloc(adap))) {
4838 adap->params.offload = 0;
4839 adap->params.crypto = 0;
4842 /* Calculate the number of Ethernet Queue Sets available based on
4843 * resources provisioned for us. We always have an Asynchronous
4844 * Firmware Event Ingress Queue. If we're operating in MSI or Legacy
4845 * IRQ Pin Interrupt mode, then we'll also have a Forwarded Interrupt
4846 * Ingress Queue. Meanwhile, we need two Egress Queues for each
4847 * Queue Set: one for the Free List and one for the Ethernet TX Queue.
4849 * Note that we should also take into account all of the various
4850 * Offload Queues. But, in any situation where we're operating in
4851 * a Resource Constrained Provisioning environment, doing any Offload
4852 * at all is problematic ...
4854 niqflint = adap->params.pfres.niqflint - 1;
4855 if (!(adap->flags & USING_MSIX))
4857 neq = adap->params.pfres.neq / 2;
4858 avail_eth_qsets = min(niqflint, neq);
4860 if (avail_eth_qsets > max_eth_qsets)
4861 avail_eth_qsets = max_eth_qsets;
4863 if (avail_eth_qsets < adap->params.nports) {
4864 dev_err(adap->pdev_dev, "avail_eth_qsets=%d < nports=%d\n",
4865 avail_eth_qsets, adap->params.nports);
4869 /* Count the number of 10Gb/s or better ports */
4870 for_each_port(adap, i)
4871 n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
4873 #ifdef CONFIG_CHELSIO_T4_DCB
4874 /* For Data Center Bridging support we need to be able to support up
4875 * to 8 Traffic Priorities; each of which will be assigned to its
4876 * own TX Queue in order to prevent Head-Of-Line Blocking.
4878 if (adap->params.nports * 8 > avail_eth_qsets) {
4879 dev_err(adap->pdev_dev, "DCB avail_eth_qsets=%d < %d!\n",
4880 avail_eth_qsets, adap->params.nports * 8);
4884 for_each_port(adap, i) {
4885 struct port_info *pi = adap2pinfo(adap, i);
4887 pi->first_qset = qidx;
4888 pi->nqsets = is_kdump_kernel() ? 1 : 8;
4891 #else /* !CONFIG_CHELSIO_T4_DCB */
4893 * We default to 1 queue per non-10G port and up to # of cores queues
4897 q10g = (avail_eth_qsets - (adap->params.nports - n10g)) / n10g;
4898 if (q10g > netif_get_num_default_rss_queues())
4899 q10g = netif_get_num_default_rss_queues();
4901 if (is_kdump_kernel())
4904 for_each_port(adap, i) {
4905 struct port_info *pi = adap2pinfo(adap, i);
4907 pi->first_qset = qidx;
4908 pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1;
4911 #endif /* !CONFIG_CHELSIO_T4_DCB */
4914 s->max_ethqsets = qidx; /* MSI-X may lower it later */
4918 * For offload we use 1 queue/channel if all ports are up to 1G,
4919 * otherwise we divide all available queues amongst the channels
4920 * capped by the number of available cores.
4923 i = min_t(int, MAX_OFLD_QSETS, num_online_cpus());
4924 s->ofldqsets = roundup(i, adap->params.nports);
4926 s->ofldqsets = adap->params.nports;
4930 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
4931 struct sge_eth_rxq *r = &s->ethrxq[i];
4933 init_rspq(adap, &r->rspq, 5, 10, 1024, 64);
4937 for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
4938 s->ethtxq[i].q.size = 1024;
4940 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
4941 s->ctrlq[i].q.size = 512;
4943 if (!is_t4(adap->params.chip))
4944 s->ptptxq.q.size = 8;
4946 init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64);
4947 init_rspq(adap, &s->intrq, 0, 1, 512, 64);
4953 * Reduce the number of Ethernet queues across all ports to at most n.
4954 * n provides at least one queue per port.
4956 static void reduce_ethqs(struct adapter *adap, int n)
4959 struct port_info *pi;
4961 while (n < adap->sge.ethqsets)
4962 for_each_port(adap, i) {
4963 pi = adap2pinfo(adap, i);
4964 if (pi->nqsets > 1) {
4966 adap->sge.ethqsets--;
4967 if (adap->sge.ethqsets <= n)
4973 for_each_port(adap, i) {
4974 pi = adap2pinfo(adap, i);
4980 static int get_msix_info(struct adapter *adap)
4982 struct uld_msix_info *msix_info;
4983 unsigned int max_ingq = 0;
4985 if (is_offload(adap))
4986 max_ingq += MAX_OFLD_QSETS * adap->num_ofld_uld;
4987 if (is_pci_uld(adap))
4988 max_ingq += MAX_OFLD_QSETS * adap->num_uld;
4993 msix_info = kcalloc(max_ingq, sizeof(*msix_info), GFP_KERNEL);
4997 adap->msix_bmap_ulds.msix_bmap = kcalloc(BITS_TO_LONGS(max_ingq),
4998 sizeof(long), GFP_KERNEL);
4999 if (!adap->msix_bmap_ulds.msix_bmap) {
5003 spin_lock_init(&adap->msix_bmap_ulds.lock);
5004 adap->msix_info_ulds = msix_info;
5009 static void free_msix_info(struct adapter *adap)
5011 if (!(adap->num_uld && adap->num_ofld_uld))
5014 kfree(adap->msix_info_ulds);
5015 kfree(adap->msix_bmap_ulds.msix_bmap);
5018 /* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
5019 #define EXTRA_VECS 2
5021 static int enable_msix(struct adapter *adap)
5023 int ofld_need = 0, uld_need = 0;
5024 int i, j, want, need, allocated;
5025 struct sge *s = &adap->sge;
5026 unsigned int nchan = adap->params.nports;
5027 struct msix_entry *entries;
5028 int max_ingq = MAX_INGQ;
5030 if (is_pci_uld(adap))
5031 max_ingq += (MAX_OFLD_QSETS * adap->num_uld);
5032 if (is_offload(adap))
5033 max_ingq += (MAX_OFLD_QSETS * adap->num_ofld_uld);
5034 entries = kmalloc_array(max_ingq + 1, sizeof(*entries),
5040 if (get_msix_info(adap)) {
5041 adap->params.offload = 0;
5042 adap->params.crypto = 0;
5045 for (i = 0; i < max_ingq + 1; ++i)
5046 entries[i].entry = i;
5048 want = s->max_ethqsets + EXTRA_VECS;
5049 if (is_offload(adap)) {
5050 want += adap->num_ofld_uld * s->ofldqsets;
5051 ofld_need = adap->num_ofld_uld * nchan;
5053 if (is_pci_uld(adap)) {
5054 want += adap->num_uld * s->ofldqsets;
5055 uld_need = adap->num_uld * nchan;
5057 #ifdef CONFIG_CHELSIO_T4_DCB
5058 /* For Data Center Bridging we need 8 Ethernet TX Priority Queues for
5061 need = 8 * adap->params.nports + EXTRA_VECS + ofld_need + uld_need;
5063 need = adap->params.nports + EXTRA_VECS + ofld_need + uld_need;
5065 allocated = pci_enable_msix_range(adap->pdev, entries, need, want);
5066 if (allocated < 0) {
5067 dev_info(adap->pdev_dev, "not enough MSI-X vectors left,"
5068 " not using MSI-X\n");
5073 /* Distribute available vectors to the various queue groups.
5074 * Every group gets its minimum requirement and NIC gets top
5075 * priority for leftovers.
5077 i = allocated - EXTRA_VECS - ofld_need - uld_need;
5078 if (i < s->max_ethqsets) {
5079 s->max_ethqsets = i;
5080 if (i < s->ethqsets)
5081 reduce_ethqs(adap, i);
5084 if (allocated < want)
5085 s->nqs_per_uld = nchan;
5087 s->nqs_per_uld = s->ofldqsets;
5090 for (i = 0; i < (s->max_ethqsets + EXTRA_VECS); ++i)
5091 adap->msix_info[i].vec = entries[i].vector;
5093 for (j = 0 ; i < allocated; ++i, j++) {
5094 adap->msix_info_ulds[j].vec = entries[i].vector;
5095 adap->msix_info_ulds[j].idx = i;
5097 adap->msix_bmap_ulds.mapsize = j;
5099 dev_info(adap->pdev_dev, "%d MSI-X vectors allocated, "
5100 "nic %d per uld %d\n",
5101 allocated, s->max_ethqsets, s->nqs_per_uld);
5109 static int init_rss(struct adapter *adap)
5114 err = t4_init_rss_mode(adap, adap->mbox);
5118 for_each_port(adap, i) {
5119 struct port_info *pi = adap2pinfo(adap, i);
5121 pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
5128 /* Dump basic information about the adapter */
5129 static void print_adapter_info(struct adapter *adapter)
5131 /* Hardware/Firmware/etc. Version/Revision IDs */
5132 t4_dump_version_info(adapter);
5134 /* Software/Hardware configuration */
5135 dev_info(adapter->pdev_dev, "Configuration: %sNIC %s, %s capable\n",
5136 is_offload(adapter) ? "R" : "",
5137 ((adapter->flags & USING_MSIX) ? "MSI-X" :
5138 (adapter->flags & USING_MSI) ? "MSI" : ""),
5139 is_offload(adapter) ? "Offload" : "non-Offload");
5142 static void print_port_info(const struct net_device *dev)
5146 const char *spd = "";
5147 const struct port_info *pi = netdev_priv(dev);
5148 const struct adapter *adap = pi->adapter;
5150 if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
5152 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
5154 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_8_0GB)
5157 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100M)
5158 bufp += sprintf(bufp, "100M/");
5159 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_1G)
5160 bufp += sprintf(bufp, "1G/");
5161 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_10G)
5162 bufp += sprintf(bufp, "10G/");
5163 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_25G)
5164 bufp += sprintf(bufp, "25G/");
5165 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_40G)
5166 bufp += sprintf(bufp, "40G/");
5167 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_50G)
5168 bufp += sprintf(bufp, "50G/");
5169 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100G)
5170 bufp += sprintf(bufp, "100G/");
5171 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_200G)
5172 bufp += sprintf(bufp, "200G/");
5173 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_400G)
5174 bufp += sprintf(bufp, "400G/");
5177 sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type));
5179 netdev_info(dev, "%s: Chelsio %s (%s) %s\n",
5180 dev->name, adap->params.vpd.id, adap->name, buf);
5184 * Free the following resources:
5185 * - memory used for tables
5188 * - resources FW is holding for us
5190 static void free_some_resources(struct adapter *adapter)
5194 kvfree(adapter->mps_encap);
5195 kvfree(adapter->smt);
5196 kvfree(adapter->l2t);
5197 kvfree(adapter->srq);
5198 t4_cleanup_sched(adapter);
5199 kvfree(adapter->tids.tid_tab);
5200 cxgb4_cleanup_tc_flower(adapter);
5201 cxgb4_cleanup_tc_u32(adapter);
5202 kfree(adapter->sge.egr_map);
5203 kfree(adapter->sge.ingr_map);
5204 kfree(adapter->sge.starving_fl);
5205 kfree(adapter->sge.txq_maperr);
5206 #ifdef CONFIG_DEBUG_FS
5207 kfree(adapter->sge.blocked_fl);
5209 disable_msi(adapter);
5211 for_each_port(adapter, i)
5212 if (adapter->port[i]) {
5213 struct port_info *pi = adap2pinfo(adapter, i);
5216 t4_free_vi(adapter, adapter->mbox, adapter->pf,
5218 kfree(adap2pinfo(adapter, i)->rss);
5219 free_netdev(adapter->port[i]);
5221 if (adapter->flags & FW_OK)
5222 t4_fw_bye(adapter, adapter->pf);
5225 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
5226 #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
5227 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
5228 #define SEGMENT_SIZE 128
5230 static int t4_get_chip_type(struct adapter *adap, int ver)
5232 u32 pl_rev = REV_G(t4_read_reg(adap, PL_REV_A));
5236 return CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
5238 return CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
5240 return CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
5247 #ifdef CONFIG_PCI_IOV
5248 static void cxgb4_mgmt_setup(struct net_device *dev)
5250 dev->type = ARPHRD_NONE;
5252 dev->hard_header_len = 0;
5254 dev->tx_queue_len = 0;
5255 dev->flags |= IFF_NOARP;
5256 dev->priv_flags |= IFF_NO_QUEUE;
5258 /* Initialize the device structure. */
5259 dev->netdev_ops = &cxgb4_mgmt_netdev_ops;
5260 dev->ethtool_ops = &cxgb4_mgmt_ethtool_ops;
5263 static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs)
5265 struct adapter *adap = pci_get_drvdata(pdev);
5267 int current_vfs = pci_num_vf(pdev);
5270 pcie_fw = readl(adap->regs + PCIE_FW_A);
5271 /* Check if fw is initialized */
5272 if (!(pcie_fw & PCIE_FW_INIT_F)) {
5273 dev_warn(&pdev->dev, "Device not initialized\n");
5277 /* If any of the VF's is already assigned to Guest OS, then
5278 * SRIOV for the same cannot be modified
5280 if (current_vfs && pci_vfs_assigned(pdev)) {
5282 "Cannot modify SR-IOV while VFs are assigned\n");
5285 /* Note that the upper-level code ensures that we're never called with
5286 * a non-zero "num_vfs" when we already have VFs instantiated. But
5287 * it never hurts to code defensively.
5289 if (num_vfs != 0 && current_vfs != 0)
5292 /* Nothing to do for no change. */
5293 if (num_vfs == current_vfs)
5296 /* Disable SRIOV when zero is passed. */
5298 pci_disable_sriov(pdev);
5299 /* free VF Management Interface */
5300 unregister_netdev(adap->port[0]);
5301 free_netdev(adap->port[0]);
5302 adap->port[0] = NULL;
5304 /* free VF resources */
5306 kfree(adap->vfinfo);
5307 adap->vfinfo = NULL;
5312 struct fw_pfvf_cmd port_cmd, port_rpl;
5313 struct net_device *netdev;
5314 unsigned int pmask, port;
5315 struct pci_dev *pbridge;
5316 struct port_info *pi;
5317 char name[IFNAMSIZ];
5322 /* If we want to instantiate Virtual Functions, then our
5323 * parent bridge's PCI-E needs to support Alternative Routing
5324 * ID (ARI) because our VFs will show up at function offset 8
5327 pbridge = pdev->bus->self;
5328 pos = pci_find_capability(pbridge, PCI_CAP_ID_EXP);
5329 pci_read_config_word(pbridge, pos + PCI_EXP_FLAGS, &flags);
5330 pci_read_config_dword(pbridge, pos + PCI_EXP_DEVCAP2, &devcap2);
5332 if ((flags & PCI_EXP_FLAGS_VERS) < 2 ||
5333 !(devcap2 & PCI_EXP_DEVCAP2_ARI)) {
5334 /* Our parent bridge does not support ARI so issue a
5335 * warning and skip instantiating the VFs. They
5336 * won't be reachable.
5338 dev_warn(&pdev->dev, "Parent bridge %02x:%02x.%x doesn't support ARI; can't instantiate Virtual Functions\n",
5339 pbridge->bus->number, PCI_SLOT(pbridge->devfn),
5340 PCI_FUNC(pbridge->devfn));
5343 memset(&port_cmd, 0, sizeof(port_cmd));
5344 port_cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) |
5347 FW_PFVF_CMD_PFN_V(adap->pf) |
5348 FW_PFVF_CMD_VFN_V(0));
5349 port_cmd.retval_len16 = cpu_to_be32(FW_LEN16(port_cmd));
5350 err = t4_wr_mbox(adap, adap->mbox, &port_cmd, sizeof(port_cmd),
5354 pmask = FW_PFVF_CMD_PMASK_G(be32_to_cpu(port_rpl.type_to_neq));
5355 port = ffs(pmask) - 1;
5356 /* Allocate VF Management Interface. */
5357 snprintf(name, IFNAMSIZ, "mgmtpf%d,%d", adap->adap_idx,
5359 netdev = alloc_netdev(sizeof(struct port_info),
5360 name, NET_NAME_UNKNOWN, cxgb4_mgmt_setup);
5364 pi = netdev_priv(netdev);
5368 SET_NETDEV_DEV(netdev, &pdev->dev);
5370 adap->port[0] = netdev;
5373 err = register_netdev(adap->port[0]);
5375 pr_info("Unable to register VF mgmt netdev %s\n", name);
5376 free_netdev(adap->port[0]);
5377 adap->port[0] = NULL;
5380 /* Allocate and set up VF Information. */
5381 adap->vfinfo = kcalloc(pci_sriov_get_totalvfs(pdev),
5382 sizeof(struct vf_info), GFP_KERNEL);
5383 if (!adap->vfinfo) {
5384 unregister_netdev(adap->port[0]);
5385 free_netdev(adap->port[0]);
5386 adap->port[0] = NULL;
5389 cxgb4_mgmt_fill_vf_station_mac_addr(adap);
5391 /* Instantiate the requested number of VFs. */
5392 err = pci_enable_sriov(pdev, num_vfs);
5394 pr_info("Unable to instantiate %d VFs\n", num_vfs);
5396 unregister_netdev(adap->port[0]);
5397 free_netdev(adap->port[0]);
5398 adap->port[0] = NULL;
5399 kfree(adap->vfinfo);
5400 adap->vfinfo = NULL;
5405 adap->num_vfs = num_vfs;
5408 #endif /* CONFIG_PCI_IOV */
5410 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
5412 struct net_device *netdev;
5413 struct adapter *adapter;
5414 static int adap_idx = 1;
5415 int s_qpp, qpp, num_seg;
5416 struct port_info *pi;
5417 bool highdma = false;
5418 enum chip_type chip;
5425 printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
5427 err = pci_request_regions(pdev, KBUILD_MODNAME);
5429 /* Just info, some other driver may have claimed the device. */
5430 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
5434 err = pci_enable_device(pdev);
5436 dev_err(&pdev->dev, "cannot enable PCI device\n");
5437 goto out_release_regions;
5440 regs = pci_ioremap_bar(pdev, 0);
5442 dev_err(&pdev->dev, "cannot map device registers\n");
5444 goto out_disable_device;
5447 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
5450 goto out_unmap_bar0;
5453 adapter->regs = regs;
5454 err = t4_wait_dev_ready(regs);
5456 goto out_free_adapter;
5458 /* We control everything through one PF */
5459 whoami = t4_read_reg(adapter, PL_WHOAMI_A);
5460 pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
5461 chip = t4_get_chip_type(adapter, CHELSIO_PCI_ID_VER(device_id));
5463 dev_err(&pdev->dev, "Device %d is not supported\n", device_id);
5465 goto out_free_adapter;
5467 chip_ver = CHELSIO_CHIP_VERSION(chip);
5468 func = chip_ver <= CHELSIO_T5 ?
5469 SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
5471 adapter->pdev = pdev;
5472 adapter->pdev_dev = &pdev->dev;
5473 adapter->name = pci_name(pdev);
5474 adapter->mbox = func;
5476 adapter->params.chip = chip;
5477 adapter->adap_idx = adap_idx;
5478 adapter->msg_enable = DFLT_MSG_ENABLE;
5479 adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) +
5480 (sizeof(struct mbox_cmd) *
5481 T4_OS_LOG_MBOX_CMDS),
5483 if (!adapter->mbox_log) {
5485 goto out_free_adapter;
5487 spin_lock_init(&adapter->mbox_lock);
5488 INIT_LIST_HEAD(&adapter->mlist.list);
5489 adapter->mbox_log->size = T4_OS_LOG_MBOX_CMDS;
5490 pci_set_drvdata(pdev, adapter);
5492 if (func != ent->driver_data) {
5493 pci_disable_device(pdev);
5494 pci_save_state(pdev); /* to restore SR-IOV later */
5498 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
5500 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
5502 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
5503 "coherent allocations\n");
5504 goto out_free_adapter;
5507 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
5509 dev_err(&pdev->dev, "no usable DMA configuration\n");
5510 goto out_free_adapter;
5514 pci_enable_pcie_error_reporting(pdev);
5515 pci_set_master(pdev);
5516 pci_save_state(pdev);
5518 adapter->workq = create_singlethread_workqueue("cxgb4");
5519 if (!adapter->workq) {
5521 goto out_free_adapter;
5524 /* PCI device has been enabled */
5525 adapter->flags |= DEV_ENABLED;
5526 memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
5528 /* If possible, we use PCIe Relaxed Ordering Attribute to deliver
5529 * Ingress Packet Data to Free List Buffers in order to allow for
5530 * chipset performance optimizations between the Root Complex and
5531 * Memory Controllers. (Messages to the associated Ingress Queue
5532 * notifying new Packet Placement in the Free Lists Buffers will be
5533 * send without the Relaxed Ordering Attribute thus guaranteeing that
5534 * all preceding PCIe Transaction Layer Packets will be processed
5535 * first.) But some Root Complexes have various issues with Upstream
5536 * Transaction Layer Packets with the Relaxed Ordering Attribute set.
5537 * The PCIe devices which under the Root Complexes will be cleared the
5538 * Relaxed Ordering bit in the configuration space, So we check our
5539 * PCIe configuration space to see if it's flagged with advice against
5540 * using Relaxed Ordering.
5542 if (!pcie_relaxed_ordering_enabled(pdev))
5543 adapter->flags |= ROOT_NO_RELAXED_ORDERING;
5545 spin_lock_init(&adapter->stats_lock);
5546 spin_lock_init(&adapter->tid_release_lock);
5547 spin_lock_init(&adapter->win0_lock);
5549 INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
5550 INIT_WORK(&adapter->db_full_task, process_db_full);
5551 INIT_WORK(&adapter->db_drop_task, process_db_drop);
5552 INIT_WORK(&adapter->fatal_err_notify_task, notify_fatal_err);
5554 err = t4_prep_adapter(adapter);
5556 goto out_free_adapter;
5558 if (is_kdump_kernel()) {
5559 /* Collect hardware state and append to /proc/vmcore */
5560 err = cxgb4_cudbg_vmcore_add_dump(adapter);
5562 dev_warn(adapter->pdev_dev,
5563 "Fail collecting vmcore device dump, err: %d. Continuing\n",
5569 if (!is_t4(adapter->params.chip)) {
5570 s_qpp = (QUEUESPERPAGEPF0_S +
5571 (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) *
5573 qpp = 1 << QUEUESPERPAGEPF0_G(t4_read_reg(adapter,
5574 SGE_EGRESS_QUEUES_PER_PAGE_PF_A) >> s_qpp);
5575 num_seg = PAGE_SIZE / SEGMENT_SIZE;
5577 /* Each segment size is 128B. Write coalescing is enabled only
5578 * when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the
5579 * queue is less no of segments that can be accommodated in
5582 if (qpp > num_seg) {
5584 "Incorrect number of egress queues per page\n");
5586 goto out_free_adapter;
5588 adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
5589 pci_resource_len(pdev, 2));
5590 if (!adapter->bar2) {
5591 dev_err(&pdev->dev, "cannot map device bar2 region\n");
5593 goto out_free_adapter;
5597 setup_memwin(adapter);
5598 err = adap_init0(adapter);
5599 #ifdef CONFIG_DEBUG_FS
5600 bitmap_zero(adapter->sge.blocked_fl, adapter->sge.egr_sz);
5602 setup_memwin_rdma(adapter);
5606 /* configure SGE_STAT_CFG_A to read WC stats */
5607 if (!is_t4(adapter->params.chip))
5608 t4_write_reg(adapter, SGE_STAT_CFG_A, STATSOURCE_T5_V(7) |
5609 (is_t5(adapter->params.chip) ? STATMODE_V(0) :
5612 for_each_port(adapter, i) {
5613 netdev = alloc_etherdev_mq(sizeof(struct port_info),
5620 SET_NETDEV_DEV(netdev, &pdev->dev);
5622 adapter->port[i] = netdev;
5623 pi = netdev_priv(netdev);
5624 pi->adapter = adapter;
5625 pi->xact_addr_filt = -1;
5627 netdev->irq = pdev->irq;
5629 netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
5630 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
5631 NETIF_F_RXCSUM | NETIF_F_RXHASH |
5632 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
5635 if (chip_ver > CHELSIO_T5) {
5636 netdev->hw_enc_features |= NETIF_F_IP_CSUM |
5639 NETIF_F_GSO_UDP_TUNNEL |
5640 NETIF_F_TSO | NETIF_F_TSO6;
5642 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
5646 netdev->hw_features |= NETIF_F_HIGHDMA;
5647 netdev->features |= netdev->hw_features;
5648 netdev->vlan_features = netdev->features & VLAN_FEAT;
5650 netdev->priv_flags |= IFF_UNICAST_FLT;
5652 /* MTU range: 81 - 9600 */
5653 netdev->min_mtu = 81; /* accommodate SACK */
5654 netdev->max_mtu = MAX_MTU;
5656 netdev->netdev_ops = &cxgb4_netdev_ops;
5657 #ifdef CONFIG_CHELSIO_T4_DCB
5658 netdev->dcbnl_ops = &cxgb4_dcb_ops;
5659 cxgb4_dcb_state_init(netdev);
5661 cxgb4_set_ethtool_ops(netdev);
5664 cxgb4_init_ethtool_dump(adapter);
5666 pci_set_drvdata(pdev, adapter);
5668 if (adapter->flags & FW_OK) {
5669 err = t4_port_init(adapter, func, func, 0);
5672 } else if (adapter->params.nports == 1) {
5673 /* If we don't have a connection to the firmware -- possibly
5674 * because of an error -- grab the raw VPD parameters so we
5675 * can set the proper MAC Address on the debug network
5676 * interface that we've created.
5678 u8 hw_addr[ETH_ALEN];
5679 u8 *na = adapter->params.vpd.na;
5681 err = t4_get_raw_vpd_params(adapter, &adapter->params.vpd);
5683 for (i = 0; i < ETH_ALEN; i++)
5684 hw_addr[i] = (hex2val(na[2 * i + 0]) * 16 +
5685 hex2val(na[2 * i + 1]));
5686 t4_set_hw_addr(adapter, 0, hw_addr);
5690 if (!(adapter->flags & FW_OK))
5691 goto fw_attach_fail;
5693 /* Configure queues and allocate tables now, they can be needed as
5694 * soon as the first register_netdev completes.
5696 err = cfg_queues(adapter);
5700 adapter->smt = t4_init_smt();
5701 if (!adapter->smt) {
5702 /* We tolerate a lack of SMT, giving up some functionality */
5703 dev_warn(&pdev->dev, "could not allocate SMT, continuing\n");
5706 adapter->l2t = t4_init_l2t(adapter->l2t_start, adapter->l2t_end);
5707 if (!adapter->l2t) {
5708 /* We tolerate a lack of L2T, giving up some functionality */
5709 dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
5710 adapter->params.offload = 0;
5713 adapter->mps_encap = kvcalloc(adapter->params.arch.mps_tcam_size,
5714 sizeof(struct mps_encap_entry),
5716 if (!adapter->mps_encap)
5717 dev_warn(&pdev->dev, "could not allocate MPS Encap entries, continuing\n");
5719 #if IS_ENABLED(CONFIG_IPV6)
5720 if (chip_ver <= CHELSIO_T5 &&
5721 (!(t4_read_reg(adapter, LE_DB_CONFIG_A) & ASLIPCOMPEN_F))) {
5722 /* CLIP functionality is not present in hardware,
5723 * hence disable all offload features
5725 dev_warn(&pdev->dev,
5726 "CLIP not enabled in hardware, continuing\n");
5727 adapter->params.offload = 0;
5729 adapter->clipt = t4_init_clip_tbl(adapter->clipt_start,
5730 adapter->clipt_end);
5731 if (!adapter->clipt) {
5732 /* We tolerate a lack of clip_table, giving up
5733 * some functionality
5735 dev_warn(&pdev->dev,
5736 "could not allocate Clip table, continuing\n");
5737 adapter->params.offload = 0;
5742 for_each_port(adapter, i) {
5743 pi = adap2pinfo(adapter, i);
5744 pi->sched_tbl = t4_init_sched(adapter->params.nsched_cls);
5746 dev_warn(&pdev->dev,
5747 "could not activate scheduling on port %d\n",
5751 if (tid_init(&adapter->tids) < 0) {
5752 dev_warn(&pdev->dev, "could not allocate TID table, "
5754 adapter->params.offload = 0;
5756 adapter->tc_u32 = cxgb4_init_tc_u32(adapter);
5757 if (!adapter->tc_u32)
5758 dev_warn(&pdev->dev,
5759 "could not offload tc u32, continuing\n");
5761 if (cxgb4_init_tc_flower(adapter))
5762 dev_warn(&pdev->dev,
5763 "could not offload tc flower, continuing\n");
5766 if (is_offload(adapter) || is_hashfilter(adapter)) {
5767 if (t4_read_reg(adapter, LE_DB_CONFIG_A) & HASHEN_F) {
5768 u32 hash_base, hash_reg;
5770 if (chip_ver <= CHELSIO_T5) {
5771 hash_reg = LE_DB_TID_HASHBASE_A;
5772 hash_base = t4_read_reg(adapter, hash_reg);
5773 adapter->tids.hash_base = hash_base / 4;
5775 hash_reg = T6_LE_DB_HASH_TID_BASE_A;
5776 hash_base = t4_read_reg(adapter, hash_reg);
5777 adapter->tids.hash_base = hash_base;
5782 /* See what interrupts we'll be using */
5783 if (msi > 1 && enable_msix(adapter) == 0)
5784 adapter->flags |= USING_MSIX;
5785 else if (msi > 0 && pci_enable_msi(pdev) == 0) {
5786 adapter->flags |= USING_MSI;
5788 free_msix_info(adapter);
5791 /* check for PCI Express bandwidth capabiltites */
5792 pcie_print_link_status(pdev);
5794 err = init_rss(adapter);
5798 err = setup_fw_sge_queues(adapter);
5800 dev_err(adapter->pdev_dev,
5801 "FW sge queue allocation failed, err %d", err);
5807 * The card is now ready to go. If any errors occur during device
5808 * registration we do not fail the whole card but rather proceed only
5809 * with the ports we manage to register successfully. However we must
5810 * register at least one net device.
5812 for_each_port(adapter, i) {
5813 pi = adap2pinfo(adapter, i);
5814 adapter->port[i]->dev_port = pi->lport;
5815 netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
5816 netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
5818 netif_carrier_off(adapter->port[i]);
5820 err = register_netdev(adapter->port[i]);
5823 adapter->chan_map[pi->tx_chan] = i;
5824 print_port_info(adapter->port[i]);
5827 dev_err(&pdev->dev, "could not register any net devices\n");
5831 dev_warn(&pdev->dev, "only %d net devices registered\n", i);
5835 if (cxgb4_debugfs_root) {
5836 adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
5837 cxgb4_debugfs_root);
5838 setup_debugfs(adapter);
5841 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
5842 pdev->needs_freset = 1;
5844 if (is_uld(adapter)) {
5845 mutex_lock(&uld_mutex);
5846 list_add_tail(&adapter->list_node, &adapter_list);
5847 mutex_unlock(&uld_mutex);
5850 if (!is_t4(adapter->params.chip))
5851 cxgb4_ptp_init(adapter);
5853 print_adapter_info(adapter);
5857 t4_free_sge_resources(adapter);
5858 free_some_resources(adapter);
5859 if (adapter->flags & USING_MSIX)
5860 free_msix_info(adapter);
5861 if (adapter->num_uld || adapter->num_ofld_uld)
5862 t4_uld_mem_free(adapter);
5864 if (!is_t4(adapter->params.chip))
5865 iounmap(adapter->bar2);
5868 destroy_workqueue(adapter->workq);
5870 kfree(adapter->mbox_log);
5875 pci_disable_pcie_error_reporting(pdev);
5876 pci_disable_device(pdev);
5877 out_release_regions:
5878 pci_release_regions(pdev);
5882 static void remove_one(struct pci_dev *pdev)
5884 struct adapter *adapter = pci_get_drvdata(pdev);
5887 pci_release_regions(pdev);
5891 adapter->flags |= SHUTTING_DOWN;
5893 if (adapter->pf == 4) {
5896 /* Tear down per-adapter Work Queue first since it can contain
5897 * references to our adapter data structure.
5899 destroy_workqueue(adapter->workq);
5901 if (is_uld(adapter)) {
5902 detach_ulds(adapter);
5903 t4_uld_clean_up(adapter);
5906 adap_free_hma_mem(adapter);
5908 disable_interrupts(adapter);
5910 for_each_port(adapter, i)
5911 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
5912 unregister_netdev(adapter->port[i]);
5914 debugfs_remove_recursive(adapter->debugfs_root);
5916 if (!is_t4(adapter->params.chip))
5917 cxgb4_ptp_stop(adapter);
5919 /* If we allocated filters, free up state associated with any
5922 clear_all_filters(adapter);
5924 if (adapter->flags & FULL_INIT_DONE)
5927 if (adapter->flags & USING_MSIX)
5928 free_msix_info(adapter);
5929 if (adapter->num_uld || adapter->num_ofld_uld)
5930 t4_uld_mem_free(adapter);
5931 free_some_resources(adapter);
5932 #if IS_ENABLED(CONFIG_IPV6)
5933 t4_cleanup_clip_tbl(adapter);
5935 if (!is_t4(adapter->params.chip))
5936 iounmap(adapter->bar2);
5938 #ifdef CONFIG_PCI_IOV
5940 cxgb4_iov_configure(adapter->pdev, 0);
5943 iounmap(adapter->regs);
5944 pci_disable_pcie_error_reporting(pdev);
5945 if ((adapter->flags & DEV_ENABLED)) {
5946 pci_disable_device(pdev);
5947 adapter->flags &= ~DEV_ENABLED;
5949 pci_release_regions(pdev);
5950 kfree(adapter->mbox_log);
5955 /* "Shutdown" quiesces the device, stopping Ingress Packet and Interrupt
5956 * delivery. This is essentially a stripped down version of the PCI remove()
5957 * function where we do the minimal amount of work necessary to shutdown any
5960 static void shutdown_one(struct pci_dev *pdev)
5962 struct adapter *adapter = pci_get_drvdata(pdev);
5964 /* As with remove_one() above (see extended comment), we only want do
5965 * do cleanup on PCI Devices which went all the way through init_one()
5969 pci_release_regions(pdev);
5973 adapter->flags |= SHUTTING_DOWN;
5975 if (adapter->pf == 4) {
5978 for_each_port(adapter, i)
5979 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
5980 cxgb_close(adapter->port[i]);
5982 if (is_uld(adapter)) {
5983 detach_ulds(adapter);
5984 t4_uld_clean_up(adapter);
5987 disable_interrupts(adapter);
5988 disable_msi(adapter);
5990 t4_sge_stop(adapter);
5991 if (adapter->flags & FW_OK)
5992 t4_fw_bye(adapter, adapter->mbox);
5996 static struct pci_driver cxgb4_driver = {
5997 .name = KBUILD_MODNAME,
5998 .id_table = cxgb4_pci_tbl,
6000 .remove = remove_one,
6001 .shutdown = shutdown_one,
6002 #ifdef CONFIG_PCI_IOV
6003 .sriov_configure = cxgb4_iov_configure,
6005 .err_handler = &cxgb4_eeh,
6008 static int __init cxgb4_init_module(void)
6012 /* Debugfs support is optional, just warn if this fails */
6013 cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
6014 if (!cxgb4_debugfs_root)
6015 pr_warn("could not create debugfs entry, continuing\n");
6017 ret = pci_register_driver(&cxgb4_driver);
6019 debugfs_remove(cxgb4_debugfs_root);
6021 #if IS_ENABLED(CONFIG_IPV6)
6022 if (!inet6addr_registered) {
6023 register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
6024 inet6addr_registered = true;
6031 static void __exit cxgb4_cleanup_module(void)
6033 #if IS_ENABLED(CONFIG_IPV6)
6034 if (inet6addr_registered) {
6035 unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier);
6036 inet6addr_registered = false;
6039 pci_unregister_driver(&cxgb4_driver);
6040 debugfs_remove(cxgb4_debugfs_root); /* NULL ok */
6043 module_init(cxgb4_init_module);
6044 module_exit(cxgb4_cleanup_module);