1 /*************************************************************************
2 * myri10ge.c: Myricom Myri-10G Ethernet driver.
4 * Copyright (C) 2005 - 2011 Myricom, Inc.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of Myricom, Inc. nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
32 * If the eeprom on your board is not recent enough, you will need to get a
33 * newer firmware image at:
34 * http://www.myri.com/scs/download-Myri10GE.html
36 * Contact Information:
38 * Myricom, Inc., 325N Santa Anita Avenue, Arcadia, CA 91006
39 *************************************************************************/
41 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
43 #include <linux/tcp.h>
44 #include <linux/netdevice.h>
45 #include <linux/skbuff.h>
46 #include <linux/string.h>
47 #include <linux/module.h>
48 #include <linux/pci.h>
49 #include <linux/dma-mapping.h>
50 #include <linux/etherdevice.h>
51 #include <linux/if_ether.h>
52 #include <linux/if_vlan.h>
53 #include <linux/dca.h>
55 #include <linux/inet.h>
57 #include <linux/ethtool.h>
58 #include <linux/firmware.h>
59 #include <linux/delay.h>
60 #include <linux/timer.h>
61 #include <linux/vmalloc.h>
62 #include <linux/crc32.h>
63 #include <linux/moduleparam.h>
65 #include <linux/log2.h>
66 #include <linux/slab.h>
67 #include <linux/prefetch.h>
68 #include <net/checksum.h>
71 #include <asm/byteorder.h>
72 #include <asm/processor.h>
74 #include "myri10ge_mcp.h"
75 #include "myri10ge_mcp_gen_header.h"
77 #define MYRI10GE_VERSION_STR "1.5.3-1.534"
79 MODULE_DESCRIPTION("Myricom 10G driver (10GbE)");
80 MODULE_AUTHOR("Maintainer: help@myri.com");
81 MODULE_VERSION(MYRI10GE_VERSION_STR);
82 MODULE_LICENSE("Dual BSD/GPL");
84 #define MYRI10GE_MAX_ETHER_MTU 9014
86 #define MYRI10GE_ETH_STOPPED 0
87 #define MYRI10GE_ETH_STOPPING 1
88 #define MYRI10GE_ETH_STARTING 2
89 #define MYRI10GE_ETH_RUNNING 3
90 #define MYRI10GE_ETH_OPEN_FAILED 4
92 #define MYRI10GE_EEPROM_STRINGS_SIZE 256
93 #define MYRI10GE_MAX_SEND_DESC_TSO ((65536 / 2048) * 2)
95 #define MYRI10GE_NO_CONFIRM_DATA htonl(0xffffffff)
96 #define MYRI10GE_NO_RESPONSE_RESULT 0xffffffff
98 #define MYRI10GE_ALLOC_ORDER 0
99 #define MYRI10GE_ALLOC_SIZE ((1 << MYRI10GE_ALLOC_ORDER) * PAGE_SIZE)
100 #define MYRI10GE_MAX_FRAGS_PER_FRAME (MYRI10GE_MAX_ETHER_MTU/MYRI10GE_ALLOC_SIZE + 1)
102 #define MYRI10GE_MAX_SLICES 32
104 struct myri10ge_rx_buffer_state {
107 DEFINE_DMA_UNMAP_ADDR(bus);
108 DEFINE_DMA_UNMAP_LEN(len);
111 struct myri10ge_tx_buffer_state {
114 DEFINE_DMA_UNMAP_ADDR(bus);
115 DEFINE_DMA_UNMAP_LEN(len);
118 struct myri10ge_cmd {
124 struct myri10ge_rx_buf {
125 struct mcp_kreq_ether_recv __iomem *lanai; /* lanai ptr for recv ring */
126 struct mcp_kreq_ether_recv *shadow; /* host shadow of recv ring */
127 struct myri10ge_rx_buffer_state *info;
134 int mask; /* number of rx slots -1 */
138 struct myri10ge_tx_buf {
139 struct mcp_kreq_ether_send __iomem *lanai; /* lanai ptr for sendq */
140 __be32 __iomem *send_go; /* "go" doorbell ptr */
141 __be32 __iomem *send_stop; /* "stop" doorbell ptr */
142 struct mcp_kreq_ether_send *req_list; /* host shadow of sendq */
144 struct myri10ge_tx_buffer_state *info;
145 int mask; /* number of transmit slots -1 */
146 int req ____cacheline_aligned; /* transmit slots submitted */
147 int pkt_start; /* packets started */
150 int done ____cacheline_aligned; /* transmit slots completed */
151 int pkt_done; /* packets completed */
156 struct myri10ge_rx_done {
157 struct mcp_slot *entry;
163 struct myri10ge_slice_netstats {
164 unsigned long rx_packets;
165 unsigned long tx_packets;
166 unsigned long rx_bytes;
167 unsigned long tx_bytes;
168 unsigned long rx_dropped;
169 unsigned long tx_dropped;
172 struct myri10ge_slice_state {
173 struct myri10ge_tx_buf tx; /* transmit ring */
174 struct myri10ge_rx_buf rx_small;
175 struct myri10ge_rx_buf rx_big;
176 struct myri10ge_rx_done rx_done;
177 struct net_device *dev;
178 struct napi_struct napi;
179 struct myri10ge_priv *mgp;
180 struct myri10ge_slice_netstats stats;
181 __be32 __iomem *irq_claim;
182 struct mcp_irq_data *fw_stats;
183 dma_addr_t fw_stats_bus;
184 int watchdog_tx_done;
186 int watchdog_rx_done;
188 #ifdef CONFIG_MYRI10GE_DCA
191 __be32 __iomem *dca_tag;
196 struct myri10ge_priv {
197 struct myri10ge_slice_state *ss;
198 int tx_boundary; /* boundary transmits cannot cross */
200 int running; /* running? */
204 struct net_device *dev;
207 unsigned long board_span;
208 unsigned long iomem_base;
209 __be32 __iomem *irq_deassert;
210 char *mac_addr_string;
211 struct mcp_cmd_response *cmd;
213 struct pci_dev *pdev;
216 struct msix_entry *msix_vectors;
217 #ifdef CONFIG_MYRI10GE_DCA
222 unsigned int rdma_tags_available;
224 __be32 __iomem *intr_coal_delay_ptr;
227 wait_queue_head_t down_wq;
228 struct work_struct watchdog_work;
229 struct timer_list watchdog_timer;
233 bool fw_name_allocated;
235 char eeprom_strings[MYRI10GE_EEPROM_STRINGS_SIZE];
236 char *product_code_string;
237 char fw_version[128];
241 int adopted_rx_filter_bug;
242 u8 mac_addr[ETH_ALEN]; /* eeprom mac address */
243 unsigned long serial_number;
244 int vendor_specific_offset;
245 int fw_multicast_support;
253 unsigned int board_number;
257 static char *myri10ge_fw_unaligned = "myri10ge_ethp_z8e.dat";
258 static char *myri10ge_fw_aligned = "myri10ge_eth_z8e.dat";
259 static char *myri10ge_fw_rss_unaligned = "myri10ge_rss_ethp_z8e.dat";
260 static char *myri10ge_fw_rss_aligned = "myri10ge_rss_eth_z8e.dat";
261 MODULE_FIRMWARE("myri10ge_ethp_z8e.dat");
262 MODULE_FIRMWARE("myri10ge_eth_z8e.dat");
263 MODULE_FIRMWARE("myri10ge_rss_ethp_z8e.dat");
264 MODULE_FIRMWARE("myri10ge_rss_eth_z8e.dat");
266 /* Careful: must be accessed under kernel_param_lock() */
267 static char *myri10ge_fw_name = NULL;
268 module_param(myri10ge_fw_name, charp, 0644);
269 MODULE_PARM_DESC(myri10ge_fw_name, "Firmware image name");
271 #define MYRI10GE_MAX_BOARDS 8
272 static char *myri10ge_fw_names[MYRI10GE_MAX_BOARDS] =
273 {[0 ... (MYRI10GE_MAX_BOARDS - 1)] = NULL };
274 module_param_array_named(myri10ge_fw_names, myri10ge_fw_names, charp, NULL,
276 MODULE_PARM_DESC(myri10ge_fw_names, "Firmware image names per board");
278 static int myri10ge_ecrc_enable = 1;
279 module_param(myri10ge_ecrc_enable, int, 0444);
280 MODULE_PARM_DESC(myri10ge_ecrc_enable, "Enable Extended CRC on PCI-E");
282 static int myri10ge_small_bytes = -1; /* -1 == auto */
283 module_param(myri10ge_small_bytes, int, 0644);
284 MODULE_PARM_DESC(myri10ge_small_bytes, "Threshold of small packets");
286 static int myri10ge_msi = 1; /* enable msi by default */
287 module_param(myri10ge_msi, int, 0644);
288 MODULE_PARM_DESC(myri10ge_msi, "Enable Message Signalled Interrupts");
290 static int myri10ge_intr_coal_delay = 75;
291 module_param(myri10ge_intr_coal_delay, int, 0444);
292 MODULE_PARM_DESC(myri10ge_intr_coal_delay, "Interrupt coalescing delay");
294 static int myri10ge_flow_control = 1;
295 module_param(myri10ge_flow_control, int, 0444);
296 MODULE_PARM_DESC(myri10ge_flow_control, "Pause parameter");
298 static int myri10ge_deassert_wait = 1;
299 module_param(myri10ge_deassert_wait, int, 0644);
300 MODULE_PARM_DESC(myri10ge_deassert_wait,
301 "Wait when deasserting legacy interrupts");
303 static int myri10ge_force_firmware = 0;
304 module_param(myri10ge_force_firmware, int, 0444);
305 MODULE_PARM_DESC(myri10ge_force_firmware,
306 "Force firmware to assume aligned completions");
308 static int myri10ge_initial_mtu = MYRI10GE_MAX_ETHER_MTU - ETH_HLEN;
309 module_param(myri10ge_initial_mtu, int, 0444);
310 MODULE_PARM_DESC(myri10ge_initial_mtu, "Initial MTU");
312 static int myri10ge_napi_weight = 64;
313 module_param(myri10ge_napi_weight, int, 0444);
314 MODULE_PARM_DESC(myri10ge_napi_weight, "Set NAPI weight");
316 static int myri10ge_watchdog_timeout = 1;
317 module_param(myri10ge_watchdog_timeout, int, 0444);
318 MODULE_PARM_DESC(myri10ge_watchdog_timeout, "Set watchdog timeout");
320 static int myri10ge_max_irq_loops = 1048576;
321 module_param(myri10ge_max_irq_loops, int, 0444);
322 MODULE_PARM_DESC(myri10ge_max_irq_loops,
323 "Set stuck legacy IRQ detection threshold");
325 #define MYRI10GE_MSG_DEFAULT NETIF_MSG_LINK
327 static int myri10ge_debug = -1; /* defaults above */
328 module_param(myri10ge_debug, int, 0);
329 MODULE_PARM_DESC(myri10ge_debug, "Debug level (0=none,...,16=all)");
331 static int myri10ge_fill_thresh = 256;
332 module_param(myri10ge_fill_thresh, int, 0644);
333 MODULE_PARM_DESC(myri10ge_fill_thresh, "Number of empty rx slots allowed");
335 static int myri10ge_reset_recover = 1;
337 static int myri10ge_max_slices = 1;
338 module_param(myri10ge_max_slices, int, 0444);
339 MODULE_PARM_DESC(myri10ge_max_slices, "Max tx/rx queues");
341 static int myri10ge_rss_hash = MXGEFW_RSS_HASH_TYPE_SRC_DST_PORT;
342 module_param(myri10ge_rss_hash, int, 0444);
343 MODULE_PARM_DESC(myri10ge_rss_hash, "Type of RSS hashing to do");
345 static int myri10ge_dca = 1;
346 module_param(myri10ge_dca, int, 0444);
347 MODULE_PARM_DESC(myri10ge_dca, "Enable DCA if possible");
349 #define MYRI10GE_FW_OFFSET 1024*1024
350 #define MYRI10GE_HIGHPART_TO_U32(X) \
351 (sizeof (X) == 8) ? ((u32)((u64)(X) >> 32)) : (0)
352 #define MYRI10GE_LOWPART_TO_U32(X) ((u32)(X))
354 #define myri10ge_pio_copy(to,from,size) __iowrite64_copy(to,from,size/8)
356 static void myri10ge_set_multicast_list(struct net_device *dev);
357 static netdev_tx_t myri10ge_sw_tso(struct sk_buff *skb,
358 struct net_device *dev);
360 static inline void put_be32(__be32 val, __be32 __iomem * p)
362 __raw_writel((__force __u32) val, (__force void __iomem *)p);
365 static void myri10ge_get_stats(struct net_device *dev,
366 struct rtnl_link_stats64 *stats);
368 static void set_fw_name(struct myri10ge_priv *mgp, char *name, bool allocated)
370 if (mgp->fw_name_allocated)
373 mgp->fw_name_allocated = allocated;
377 myri10ge_send_cmd(struct myri10ge_priv *mgp, u32 cmd,
378 struct myri10ge_cmd *data, int atomic)
381 char buf_bytes[sizeof(*buf) + 8];
382 struct mcp_cmd_response *response = mgp->cmd;
383 char __iomem *cmd_addr = mgp->sram + MXGEFW_ETH_CMD;
384 u32 dma_low, dma_high, result, value;
387 /* ensure buf is aligned to 8 bytes */
388 buf = (struct mcp_cmd *)ALIGN((unsigned long)buf_bytes, 8);
390 buf->data0 = htonl(data->data0);
391 buf->data1 = htonl(data->data1);
392 buf->data2 = htonl(data->data2);
393 buf->cmd = htonl(cmd);
394 dma_low = MYRI10GE_LOWPART_TO_U32(mgp->cmd_bus);
395 dma_high = MYRI10GE_HIGHPART_TO_U32(mgp->cmd_bus);
397 buf->response_addr.low = htonl(dma_low);
398 buf->response_addr.high = htonl(dma_high);
399 response->result = htonl(MYRI10GE_NO_RESPONSE_RESULT);
401 myri10ge_pio_copy(cmd_addr, buf, sizeof(*buf));
403 /* wait up to 15ms. Longest command is the DMA benchmark,
404 * which is capped at 5ms, but runs from a timeout handler
405 * that runs every 7.8ms. So a 15ms timeout leaves us with
409 /* if atomic is set, do not sleep,
410 * and try to get the completion quickly
411 * (1ms will be enough for those commands) */
412 for (sleep_total = 0;
413 sleep_total < 1000 &&
414 response->result == htonl(MYRI10GE_NO_RESPONSE_RESULT);
420 /* use msleep for most command */
421 for (sleep_total = 0;
423 response->result == htonl(MYRI10GE_NO_RESPONSE_RESULT);
428 result = ntohl(response->result);
429 value = ntohl(response->data);
430 if (result != MYRI10GE_NO_RESPONSE_RESULT) {
434 } else if (result == MXGEFW_CMD_UNKNOWN) {
436 } else if (result == MXGEFW_CMD_ERROR_UNALIGNED) {
438 } else if (result == MXGEFW_CMD_ERROR_RANGE &&
439 cmd == MXGEFW_CMD_ENABLE_RSS_QUEUES &&
441 data1 & MXGEFW_SLICE_ENABLE_MULTIPLE_TX_QUEUES) !=
445 dev_err(&mgp->pdev->dev,
446 "command %d failed, result = %d\n",
452 dev_err(&mgp->pdev->dev, "command %d timed out, result = %d\n",
458 * The eeprom strings on the lanaiX have the format
461 * PT:ddd mmm xx xx:xx:xx xx\0
462 * PV:ddd mmm xx xx:xx:xx xx\0
464 static int myri10ge_read_mac_addr(struct myri10ge_priv *mgp)
469 ptr = mgp->eeprom_strings;
470 limit = mgp->eeprom_strings + MYRI10GE_EEPROM_STRINGS_SIZE;
472 while (*ptr != '\0' && ptr < limit) {
473 if (memcmp(ptr, "MAC=", 4) == 0) {
475 mgp->mac_addr_string = ptr;
476 for (i = 0; i < 6; i++) {
477 if ((ptr + 2) > limit)
480 simple_strtoul(ptr, &ptr, 16);
484 if (memcmp(ptr, "PC=", 3) == 0) {
486 mgp->product_code_string = ptr;
488 if (memcmp((const void *)ptr, "SN=", 3) == 0) {
490 mgp->serial_number = simple_strtoul(ptr, &ptr, 10);
492 while (ptr < limit && *ptr++) ;
498 dev_err(&mgp->pdev->dev, "failed to parse eeprom_strings\n");
503 * Enable or disable periodic RDMAs from the host to make certain
504 * chipsets resend dropped PCIe messages
507 static void myri10ge_dummy_rdma(struct myri10ge_priv *mgp, int enable)
509 char __iomem *submit;
510 __be32 buf[16] __attribute__ ((__aligned__(8)));
511 u32 dma_low, dma_high;
514 /* clear confirmation addr */
518 /* send a rdma command to the PCIe engine, and wait for the
519 * response in the confirmation address. The firmware should
520 * write a -1 there to indicate it is alive and well
522 dma_low = MYRI10GE_LOWPART_TO_U32(mgp->cmd_bus);
523 dma_high = MYRI10GE_HIGHPART_TO_U32(mgp->cmd_bus);
525 buf[0] = htonl(dma_high); /* confirm addr MSW */
526 buf[1] = htonl(dma_low); /* confirm addr LSW */
527 buf[2] = MYRI10GE_NO_CONFIRM_DATA; /* confirm data */
528 buf[3] = htonl(dma_high); /* dummy addr MSW */
529 buf[4] = htonl(dma_low); /* dummy addr LSW */
530 buf[5] = htonl(enable); /* enable? */
532 submit = mgp->sram + MXGEFW_BOOT_DUMMY_RDMA;
534 myri10ge_pio_copy(submit, &buf, sizeof(buf));
535 for (i = 0; mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA && i < 20; i++)
537 if (mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA)
538 dev_err(&mgp->pdev->dev, "dummy rdma %s failed\n",
539 (enable ? "enable" : "disable"));
543 myri10ge_validate_firmware(struct myri10ge_priv *mgp,
544 struct mcp_gen_header *hdr)
546 struct device *dev = &mgp->pdev->dev;
548 /* check firmware type */
549 if (ntohl(hdr->mcp_type) != MCP_TYPE_ETH) {
550 dev_err(dev, "Bad firmware type: 0x%x\n", ntohl(hdr->mcp_type));
554 /* save firmware version for ethtool */
555 strncpy(mgp->fw_version, hdr->version, sizeof(mgp->fw_version));
556 mgp->fw_version[sizeof(mgp->fw_version) - 1] = '\0';
558 sscanf(mgp->fw_version, "%d.%d.%d", &mgp->fw_ver_major,
559 &mgp->fw_ver_minor, &mgp->fw_ver_tiny);
561 if (!(mgp->fw_ver_major == MXGEFW_VERSION_MAJOR &&
562 mgp->fw_ver_minor == MXGEFW_VERSION_MINOR)) {
563 dev_err(dev, "Found firmware version %s\n", mgp->fw_version);
564 dev_err(dev, "Driver needs %d.%d\n", MXGEFW_VERSION_MAJOR,
565 MXGEFW_VERSION_MINOR);
571 static int myri10ge_load_hotplug_firmware(struct myri10ge_priv *mgp, u32 * size)
573 unsigned crc, reread_crc;
574 const struct firmware *fw;
575 struct device *dev = &mgp->pdev->dev;
576 unsigned char *fw_readback;
577 struct mcp_gen_header *hdr;
582 if ((status = request_firmware(&fw, mgp->fw_name, dev)) < 0) {
583 dev_err(dev, "Unable to load %s firmware image via hotplug\n",
586 goto abort_with_nothing;
591 if (fw->size >= mgp->sram_size - MYRI10GE_FW_OFFSET ||
592 fw->size < MCP_HEADER_PTR_OFFSET + 4) {
593 dev_err(dev, "Firmware size invalid:%d\n", (int)fw->size);
599 hdr_offset = ntohl(*(__be32 *) (fw->data + MCP_HEADER_PTR_OFFSET));
600 if ((hdr_offset & 3) || hdr_offset + sizeof(*hdr) > fw->size) {
601 dev_err(dev, "Bad firmware file\n");
605 hdr = (void *)(fw->data + hdr_offset);
607 status = myri10ge_validate_firmware(mgp, hdr);
611 crc = crc32(~0, fw->data, fw->size);
612 for (i = 0; i < fw->size; i += 256) {
613 myri10ge_pio_copy(mgp->sram + MYRI10GE_FW_OFFSET + i,
615 min(256U, (unsigned)(fw->size - i)));
619 fw_readback = vmalloc(fw->size);
624 /* corruption checking is good for parity recovery and buggy chipset */
625 memcpy_fromio(fw_readback, mgp->sram + MYRI10GE_FW_OFFSET, fw->size);
626 reread_crc = crc32(~0, fw_readback, fw->size);
628 if (crc != reread_crc) {
629 dev_err(dev, "CRC failed(fw-len=%u), got 0x%x (expect 0x%x)\n",
630 (unsigned)fw->size, reread_crc, crc);
634 *size = (u32) fw->size;
637 release_firmware(fw);
643 static int myri10ge_adopt_running_firmware(struct myri10ge_priv *mgp)
645 struct mcp_gen_header *hdr;
646 struct device *dev = &mgp->pdev->dev;
647 const size_t bytes = sizeof(struct mcp_gen_header);
651 /* find running firmware header */
652 hdr_offset = swab32(readl(mgp->sram + MCP_HEADER_PTR_OFFSET));
654 if ((hdr_offset & 3) || hdr_offset + sizeof(*hdr) > mgp->sram_size) {
655 dev_err(dev, "Running firmware has bad header offset (%d)\n",
660 /* copy header of running firmware from SRAM to host memory to
661 * validate firmware */
662 hdr = kmalloc(bytes, GFP_KERNEL);
666 memcpy_fromio(hdr, mgp->sram + hdr_offset, bytes);
667 status = myri10ge_validate_firmware(mgp, hdr);
670 /* check to see if adopted firmware has bug where adopting
671 * it will cause broadcasts to be filtered unless the NIC
672 * is kept in ALLMULTI mode */
673 if (mgp->fw_ver_major == 1 && mgp->fw_ver_minor == 4 &&
674 mgp->fw_ver_tiny >= 4 && mgp->fw_ver_tiny <= 11) {
675 mgp->adopted_rx_filter_bug = 1;
676 dev_warn(dev, "Adopting fw %d.%d.%d: "
677 "working around rx filter bug\n",
678 mgp->fw_ver_major, mgp->fw_ver_minor,
684 static int myri10ge_get_firmware_capabilities(struct myri10ge_priv *mgp)
686 struct myri10ge_cmd cmd;
689 /* probe for IPv6 TSO support */
690 mgp->features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO;
691 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_MAX_TSO6_HDR_SIZE,
694 mgp->max_tso6 = cmd.data0;
695 mgp->features |= NETIF_F_TSO6;
698 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_RX_RING_SIZE, &cmd, 0);
700 dev_err(&mgp->pdev->dev,
701 "failed MXGEFW_CMD_GET_RX_RING_SIZE\n");
705 mgp->max_intr_slots = 2 * (cmd.data0 / sizeof(struct mcp_dma_addr));
710 static int myri10ge_load_firmware(struct myri10ge_priv *mgp, int adopt)
712 char __iomem *submit;
713 __be32 buf[16] __attribute__ ((__aligned__(8)));
714 u32 dma_low, dma_high, size;
718 status = myri10ge_load_hotplug_firmware(mgp, &size);
722 dev_warn(&mgp->pdev->dev, "hotplug firmware loading failed\n");
724 /* Do not attempt to adopt firmware if there
729 status = myri10ge_adopt_running_firmware(mgp);
731 dev_err(&mgp->pdev->dev,
732 "failed to adopt running firmware\n");
735 dev_info(&mgp->pdev->dev,
736 "Successfully adopted running firmware\n");
737 if (mgp->tx_boundary == 4096) {
738 dev_warn(&mgp->pdev->dev,
739 "Using firmware currently running on NIC"
741 dev_warn(&mgp->pdev->dev,
742 "performance consider loading optimized "
744 dev_warn(&mgp->pdev->dev, "via hotplug\n");
747 set_fw_name(mgp, "adopted", false);
748 mgp->tx_boundary = 2048;
749 myri10ge_dummy_rdma(mgp, 1);
750 status = myri10ge_get_firmware_capabilities(mgp);
754 /* clear confirmation addr */
758 /* send a reload command to the bootstrap MCP, and wait for the
759 * response in the confirmation address. The firmware should
760 * write a -1 there to indicate it is alive and well
762 dma_low = MYRI10GE_LOWPART_TO_U32(mgp->cmd_bus);
763 dma_high = MYRI10GE_HIGHPART_TO_U32(mgp->cmd_bus);
765 buf[0] = htonl(dma_high); /* confirm addr MSW */
766 buf[1] = htonl(dma_low); /* confirm addr LSW */
767 buf[2] = MYRI10GE_NO_CONFIRM_DATA; /* confirm data */
769 /* FIX: All newest firmware should un-protect the bottom of
770 * the sram before handoff. However, the very first interfaces
771 * do not. Therefore the handoff copy must skip the first 8 bytes
773 buf[3] = htonl(MYRI10GE_FW_OFFSET + 8); /* where the code starts */
774 buf[4] = htonl(size - 8); /* length of code */
775 buf[5] = htonl(8); /* where to copy to */
776 buf[6] = htonl(0); /* where to jump to */
778 submit = mgp->sram + MXGEFW_BOOT_HANDOFF;
780 myri10ge_pio_copy(submit, &buf, sizeof(buf));
785 while (mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA && i < 9) {
789 if (mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA) {
790 dev_err(&mgp->pdev->dev, "handoff failed\n");
793 myri10ge_dummy_rdma(mgp, 1);
794 status = myri10ge_get_firmware_capabilities(mgp);
799 static int myri10ge_update_mac_address(struct myri10ge_priv *mgp, u8 * addr)
801 struct myri10ge_cmd cmd;
804 cmd.data0 = ((addr[0] << 24) | (addr[1] << 16)
805 | (addr[2] << 8) | addr[3]);
807 cmd.data1 = ((addr[4] << 8) | (addr[5]));
809 status = myri10ge_send_cmd(mgp, MXGEFW_SET_MAC_ADDRESS, &cmd, 0);
813 static int myri10ge_change_pause(struct myri10ge_priv *mgp, int pause)
815 struct myri10ge_cmd cmd;
818 ctl = pause ? MXGEFW_ENABLE_FLOW_CONTROL : MXGEFW_DISABLE_FLOW_CONTROL;
819 status = myri10ge_send_cmd(mgp, ctl, &cmd, 0);
822 netdev_err(mgp->dev, "Failed to set flow control mode\n");
830 myri10ge_change_promisc(struct myri10ge_priv *mgp, int promisc, int atomic)
832 struct myri10ge_cmd cmd;
835 ctl = promisc ? MXGEFW_ENABLE_PROMISC : MXGEFW_DISABLE_PROMISC;
836 status = myri10ge_send_cmd(mgp, ctl, &cmd, atomic);
838 netdev_err(mgp->dev, "Failed to set promisc mode\n");
841 static int myri10ge_dma_test(struct myri10ge_priv *mgp, int test_type)
843 struct myri10ge_cmd cmd;
846 struct page *dmatest_page;
847 dma_addr_t dmatest_bus;
850 dmatest_page = alloc_page(GFP_KERNEL);
853 dmatest_bus = pci_map_page(mgp->pdev, dmatest_page, 0, PAGE_SIZE,
855 if (unlikely(pci_dma_mapping_error(mgp->pdev, dmatest_bus))) {
856 __free_page(dmatest_page);
860 /* Run a small DMA test.
861 * The magic multipliers to the length tell the firmware
862 * to do DMA read, write, or read+write tests. The
863 * results are returned in cmd.data0. The upper 16
864 * bits or the return is the number of transfers completed.
865 * The lower 16 bits is the time in 0.5us ticks that the
866 * transfers took to complete.
869 len = mgp->tx_boundary;
871 cmd.data0 = MYRI10GE_LOWPART_TO_U32(dmatest_bus);
872 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(dmatest_bus);
873 cmd.data2 = len * 0x10000;
874 status = myri10ge_send_cmd(mgp, test_type, &cmd, 0);
879 mgp->read_dma = ((cmd.data0 >> 16) * len * 2) / (cmd.data0 & 0xffff);
880 cmd.data0 = MYRI10GE_LOWPART_TO_U32(dmatest_bus);
881 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(dmatest_bus);
882 cmd.data2 = len * 0x1;
883 status = myri10ge_send_cmd(mgp, test_type, &cmd, 0);
888 mgp->write_dma = ((cmd.data0 >> 16) * len * 2) / (cmd.data0 & 0xffff);
890 cmd.data0 = MYRI10GE_LOWPART_TO_U32(dmatest_bus);
891 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(dmatest_bus);
892 cmd.data2 = len * 0x10001;
893 status = myri10ge_send_cmd(mgp, test_type, &cmd, 0);
898 mgp->read_write_dma = ((cmd.data0 >> 16) * len * 2 * 2) /
899 (cmd.data0 & 0xffff);
902 pci_unmap_page(mgp->pdev, dmatest_bus, PAGE_SIZE, DMA_BIDIRECTIONAL);
903 put_page(dmatest_page);
905 if (status != 0 && test_type != MXGEFW_CMD_UNALIGNED_TEST)
906 dev_warn(&mgp->pdev->dev, "DMA %s benchmark failed: %d\n",
912 static int myri10ge_reset(struct myri10ge_priv *mgp)
914 struct myri10ge_cmd cmd;
915 struct myri10ge_slice_state *ss;
918 #ifdef CONFIG_MYRI10GE_DCA
919 unsigned long dca_tag_off;
922 /* try to send a reset command to the card to see if it
924 memset(&cmd, 0, sizeof(cmd));
925 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_RESET, &cmd, 0);
927 dev_err(&mgp->pdev->dev, "failed reset\n");
931 (void)myri10ge_dma_test(mgp, MXGEFW_DMA_TEST);
933 * Use non-ndis mcp_slot (eg, 4 bytes total,
934 * no toeplitz hash value returned. Older firmware will
935 * not understand this command, but will use the correct
936 * sized mcp_slot, so we ignore error returns
938 cmd.data0 = MXGEFW_RSS_MCP_SLOT_TYPE_MIN;
939 (void)myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_RSS_MCP_SLOT_TYPE, &cmd, 0);
941 /* Now exchange information about interrupts */
943 bytes = mgp->max_intr_slots * sizeof(*mgp->ss[0].rx_done.entry);
944 cmd.data0 = (u32) bytes;
945 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_SIZE, &cmd, 0);
948 * Even though we already know how many slices are supported
949 * via myri10ge_probe_slices() MXGEFW_CMD_GET_MAX_RSS_QUEUES
950 * has magic side effects, and must be called after a reset.
951 * It must be called prior to calling any RSS related cmds,
952 * including assigning an interrupt queue for anything but
953 * slice 0. It must also be called *after*
954 * MXGEFW_CMD_SET_INTRQ_SIZE, since the intrq size is used by
955 * the firmware to compute offsets.
958 if (mgp->num_slices > 1) {
960 /* ask the maximum number of slices it supports */
961 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_MAX_RSS_QUEUES,
964 dev_err(&mgp->pdev->dev,
965 "failed to get number of slices\n");
969 * MXGEFW_CMD_ENABLE_RSS_QUEUES must be called prior
970 * to setting up the interrupt queue DMA
973 cmd.data0 = mgp->num_slices;
974 cmd.data1 = MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE;
975 if (mgp->dev->real_num_tx_queues > 1)
976 cmd.data1 |= MXGEFW_SLICE_ENABLE_MULTIPLE_TX_QUEUES;
977 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ENABLE_RSS_QUEUES,
980 /* Firmware older than 1.4.32 only supports multiple
981 * RX queues, so if we get an error, first retry using a
982 * single TX queue before giving up */
983 if (status != 0 && mgp->dev->real_num_tx_queues > 1) {
984 netif_set_real_num_tx_queues(mgp->dev, 1);
985 cmd.data0 = mgp->num_slices;
986 cmd.data1 = MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE;
987 status = myri10ge_send_cmd(mgp,
988 MXGEFW_CMD_ENABLE_RSS_QUEUES,
993 dev_err(&mgp->pdev->dev,
994 "failed to set number of slices\n");
999 for (i = 0; i < mgp->num_slices; i++) {
1001 cmd.data0 = MYRI10GE_LOWPART_TO_U32(ss->rx_done.bus);
1002 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(ss->rx_done.bus);
1004 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_DMA,
1009 myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_IRQ_ACK_OFFSET, &cmd, 0);
1010 for (i = 0; i < mgp->num_slices; i++) {
1013 (__iomem __be32 *) (mgp->sram + cmd.data0 + 8 * i);
1015 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_IRQ_DEASSERT_OFFSET,
1017 mgp->irq_deassert = (__iomem __be32 *) (mgp->sram + cmd.data0);
1019 status |= myri10ge_send_cmd
1020 (mgp, MXGEFW_CMD_GET_INTR_COAL_DELAY_OFFSET, &cmd, 0);
1021 mgp->intr_coal_delay_ptr = (__iomem __be32 *) (mgp->sram + cmd.data0);
1023 dev_err(&mgp->pdev->dev, "failed set interrupt parameters\n");
1026 put_be32(htonl(mgp->intr_coal_delay), mgp->intr_coal_delay_ptr);
1028 #ifdef CONFIG_MYRI10GE_DCA
1029 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_DCA_OFFSET, &cmd, 0);
1030 dca_tag_off = cmd.data0;
1031 for (i = 0; i < mgp->num_slices; i++) {
1034 ss->dca_tag = (__iomem __be32 *)
1035 (mgp->sram + dca_tag_off + 4 * i);
1040 #endif /* CONFIG_MYRI10GE_DCA */
1042 /* reset mcp/driver shared state back to 0 */
1044 mgp->link_changes = 0;
1045 for (i = 0; i < mgp->num_slices; i++) {
1048 memset(ss->rx_done.entry, 0, bytes);
1051 ss->tx.pkt_start = 0;
1052 ss->tx.pkt_done = 0;
1054 ss->rx_small.cnt = 0;
1055 ss->rx_done.idx = 0;
1056 ss->rx_done.cnt = 0;
1057 ss->tx.wake_queue = 0;
1058 ss->tx.stop_queue = 0;
1061 status = myri10ge_update_mac_address(mgp, mgp->dev->dev_addr);
1062 myri10ge_change_pause(mgp, mgp->pause);
1063 myri10ge_set_multicast_list(mgp->dev);
1067 #ifdef CONFIG_MYRI10GE_DCA
1068 static int myri10ge_toggle_relaxed(struct pci_dev *pdev, int on)
1073 pcie_capability_read_word(pdev, PCI_EXP_DEVCTL, &ctl);
1075 ret = (ctl & PCI_EXP_DEVCTL_RELAX_EN) >> 4;
1077 ctl &= ~PCI_EXP_DEVCTL_RELAX_EN;
1079 pcie_capability_write_word(pdev, PCI_EXP_DEVCTL, ctl);
1085 myri10ge_write_dca(struct myri10ge_slice_state *ss, int cpu, int tag)
1087 ss->cached_dca_tag = tag;
1088 put_be32(htonl(tag), ss->dca_tag);
1091 static inline void myri10ge_update_dca(struct myri10ge_slice_state *ss)
1093 int cpu = get_cpu();
1096 if (cpu != ss->cpu) {
1097 tag = dca3_get_tag(&ss->mgp->pdev->dev, cpu);
1098 if (ss->cached_dca_tag != tag)
1099 myri10ge_write_dca(ss, cpu, tag);
1105 static void myri10ge_setup_dca(struct myri10ge_priv *mgp)
1108 struct pci_dev *pdev = mgp->pdev;
1110 if (mgp->ss[0].dca_tag == NULL || mgp->dca_enabled)
1112 if (!myri10ge_dca) {
1113 dev_err(&pdev->dev, "dca disabled by administrator\n");
1116 err = dca_add_requester(&pdev->dev);
1120 "dca_add_requester() failed, err=%d\n", err);
1123 mgp->relaxed_order = myri10ge_toggle_relaxed(pdev, 0);
1124 mgp->dca_enabled = 1;
1125 for (i = 0; i < mgp->num_slices; i++) {
1126 mgp->ss[i].cpu = -1;
1127 mgp->ss[i].cached_dca_tag = -1;
1128 myri10ge_update_dca(&mgp->ss[i]);
1132 static void myri10ge_teardown_dca(struct myri10ge_priv *mgp)
1134 struct pci_dev *pdev = mgp->pdev;
1136 if (!mgp->dca_enabled)
1138 mgp->dca_enabled = 0;
1139 if (mgp->relaxed_order)
1140 myri10ge_toggle_relaxed(pdev, 1);
1141 dca_remove_requester(&pdev->dev);
1144 static int myri10ge_notify_dca_device(struct device *dev, void *data)
1146 struct myri10ge_priv *mgp;
1147 unsigned long event;
1149 mgp = dev_get_drvdata(dev);
1150 event = *(unsigned long *)data;
1152 if (event == DCA_PROVIDER_ADD)
1153 myri10ge_setup_dca(mgp);
1154 else if (event == DCA_PROVIDER_REMOVE)
1155 myri10ge_teardown_dca(mgp);
1158 #endif /* CONFIG_MYRI10GE_DCA */
1161 myri10ge_submit_8rx(struct mcp_kreq_ether_recv __iomem * dst,
1162 struct mcp_kreq_ether_recv *src)
1166 low = src->addr_low;
1167 src->addr_low = htonl(DMA_BIT_MASK(32));
1168 myri10ge_pio_copy(dst, src, 4 * sizeof(*src));
1170 myri10ge_pio_copy(dst + 4, src + 4, 4 * sizeof(*src));
1172 src->addr_low = low;
1173 put_be32(low, &dst->addr_low);
1177 static inline void myri10ge_vlan_ip_csum(struct sk_buff *skb, __wsum hw_csum)
1179 struct vlan_hdr *vh = (struct vlan_hdr *)(skb->data);
1181 if ((skb->protocol == htons(ETH_P_8021Q)) &&
1182 (vh->h_vlan_encapsulated_proto == htons(ETH_P_IP) ||
1183 vh->h_vlan_encapsulated_proto == htons(ETH_P_IPV6))) {
1184 skb->csum = hw_csum;
1185 skb->ip_summed = CHECKSUM_COMPLETE;
1190 myri10ge_alloc_rx_pages(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx,
1191 int bytes, int watchdog)
1196 #if MYRI10GE_ALLOC_SIZE > 4096
1200 if (unlikely(rx->watchdog_needed && !watchdog))
1203 /* try to refill entire ring */
1204 while (rx->fill_cnt != (rx->cnt + rx->mask + 1)) {
1205 idx = rx->fill_cnt & rx->mask;
1206 if (rx->page_offset + bytes <= MYRI10GE_ALLOC_SIZE) {
1207 /* we can use part of previous page */
1210 /* we need a new page */
1212 alloc_pages(GFP_ATOMIC | __GFP_COMP,
1213 MYRI10GE_ALLOC_ORDER);
1214 if (unlikely(page == NULL)) {
1215 if (rx->fill_cnt - rx->cnt < 16)
1216 rx->watchdog_needed = 1;
1220 bus = pci_map_page(mgp->pdev, page, 0,
1221 MYRI10GE_ALLOC_SIZE,
1222 PCI_DMA_FROMDEVICE);
1223 if (unlikely(pci_dma_mapping_error(mgp->pdev, bus))) {
1224 __free_pages(page, MYRI10GE_ALLOC_ORDER);
1225 if (rx->fill_cnt - rx->cnt < 16)
1226 rx->watchdog_needed = 1;
1231 rx->page_offset = 0;
1235 rx->info[idx].page = rx->page;
1236 rx->info[idx].page_offset = rx->page_offset;
1237 /* note that this is the address of the start of the
1239 dma_unmap_addr_set(&rx->info[idx], bus, rx->bus);
1240 rx->shadow[idx].addr_low =
1241 htonl(MYRI10GE_LOWPART_TO_U32(rx->bus) + rx->page_offset);
1242 rx->shadow[idx].addr_high =
1243 htonl(MYRI10GE_HIGHPART_TO_U32(rx->bus));
1245 /* start next packet on a cacheline boundary */
1246 rx->page_offset += SKB_DATA_ALIGN(bytes);
1248 #if MYRI10GE_ALLOC_SIZE > 4096
1249 /* don't cross a 4KB boundary */
1250 end_offset = rx->page_offset + bytes - 1;
1251 if ((unsigned)(rx->page_offset ^ end_offset) > 4095)
1252 rx->page_offset = end_offset & ~4095;
1256 /* copy 8 descriptors to the firmware at a time */
1257 if ((idx & 7) == 7) {
1258 myri10ge_submit_8rx(&rx->lanai[idx - 7],
1259 &rx->shadow[idx - 7]);
1265 myri10ge_unmap_rx_page(struct pci_dev *pdev,
1266 struct myri10ge_rx_buffer_state *info, int bytes)
1268 /* unmap the recvd page if we're the only or last user of it */
1269 if (bytes >= MYRI10GE_ALLOC_SIZE / 2 ||
1270 (info->page_offset + 2 * bytes) > MYRI10GE_ALLOC_SIZE) {
1271 pci_unmap_page(pdev, (dma_unmap_addr(info, bus)
1272 & ~(MYRI10GE_ALLOC_SIZE - 1)),
1273 MYRI10GE_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
1278 * GRO does not support acceleration of tagged vlan frames, and
1279 * this NIC does not support vlan tag offload, so we must pop
1280 * the tag ourselves to be able to achieve GRO performance that
1281 * is comparable to LRO.
1285 myri10ge_vlan_rx(struct net_device *dev, void *addr, struct sk_buff *skb)
1288 struct vlan_ethhdr *veh;
1289 struct skb_frag_struct *frag;
1294 veh = (struct vlan_ethhdr *)va;
1295 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) ==
1296 NETIF_F_HW_VLAN_CTAG_RX &&
1297 veh->h_vlan_proto == htons(ETH_P_8021Q)) {
1298 /* fixup csum if needed */
1299 if (skb->ip_summed == CHECKSUM_COMPLETE) {
1300 vsum = csum_partial(va + ETH_HLEN, VLAN_HLEN, 0);
1301 skb->csum = csum_sub(skb->csum, vsum);
1304 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(veh->h_vlan_TCI));
1305 memmove(va + VLAN_HLEN, va, 2 * ETH_ALEN);
1306 skb->len -= VLAN_HLEN;
1307 skb->data_len -= VLAN_HLEN;
1308 frag = skb_shinfo(skb)->frags;
1309 frag->page_offset += VLAN_HLEN;
1310 skb_frag_size_set(frag, skb_frag_size(frag) - VLAN_HLEN);
1314 #define MYRI10GE_HLEN 64 /* Bytes to copy from page to skb linear memory */
1317 myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum)
1319 struct myri10ge_priv *mgp = ss->mgp;
1320 struct sk_buff *skb;
1321 struct skb_frag_struct *rx_frags;
1322 struct myri10ge_rx_buf *rx;
1323 int i, idx, remainder, bytes;
1324 struct pci_dev *pdev = mgp->pdev;
1325 struct net_device *dev = mgp->dev;
1328 if (len <= mgp->small_bytes) {
1330 bytes = mgp->small_bytes;
1333 bytes = mgp->big_bytes;
1337 idx = rx->cnt & rx->mask;
1338 va = page_address(rx->info[idx].page) + rx->info[idx].page_offset;
1341 skb = napi_get_frags(&ss->napi);
1342 if (unlikely(skb == NULL)) {
1343 ss->stats.rx_dropped++;
1344 for (i = 0, remainder = len; remainder > 0; i++) {
1345 myri10ge_unmap_rx_page(pdev, &rx->info[idx], bytes);
1346 put_page(rx->info[idx].page);
1348 idx = rx->cnt & rx->mask;
1349 remainder -= MYRI10GE_ALLOC_SIZE;
1353 rx_frags = skb_shinfo(skb)->frags;
1354 /* Fill skb_frag_struct(s) with data from our receive */
1355 for (i = 0, remainder = len; remainder > 0; i++) {
1356 myri10ge_unmap_rx_page(pdev, &rx->info[idx], bytes);
1357 skb_fill_page_desc(skb, i, rx->info[idx].page,
1358 rx->info[idx].page_offset,
1359 remainder < MYRI10GE_ALLOC_SIZE ?
1360 remainder : MYRI10GE_ALLOC_SIZE);
1362 idx = rx->cnt & rx->mask;
1363 remainder -= MYRI10GE_ALLOC_SIZE;
1366 /* remove padding */
1367 rx_frags[0].page_offset += MXGEFW_PAD;
1368 rx_frags[0].size -= MXGEFW_PAD;
1372 skb->data_len = len;
1373 skb->truesize += len;
1374 if (dev->features & NETIF_F_RXCSUM) {
1375 skb->ip_summed = CHECKSUM_COMPLETE;
1378 myri10ge_vlan_rx(mgp->dev, va, skb);
1379 skb_record_rx_queue(skb, ss - &mgp->ss[0]);
1381 napi_gro_frags(&ss->napi);
1387 myri10ge_tx_done(struct myri10ge_slice_state *ss, int mcp_index)
1389 struct pci_dev *pdev = ss->mgp->pdev;
1390 struct myri10ge_tx_buf *tx = &ss->tx;
1391 struct netdev_queue *dev_queue;
1392 struct sk_buff *skb;
1395 while (tx->pkt_done != mcp_index) {
1396 idx = tx->done & tx->mask;
1397 skb = tx->info[idx].skb;
1400 tx->info[idx].skb = NULL;
1401 if (tx->info[idx].last) {
1403 tx->info[idx].last = 0;
1406 len = dma_unmap_len(&tx->info[idx], len);
1407 dma_unmap_len_set(&tx->info[idx], len, 0);
1409 ss->stats.tx_bytes += skb->len;
1410 ss->stats.tx_packets++;
1411 dev_consume_skb_irq(skb);
1413 pci_unmap_single(pdev,
1414 dma_unmap_addr(&tx->info[idx],
1419 pci_unmap_page(pdev,
1420 dma_unmap_addr(&tx->info[idx],
1426 dev_queue = netdev_get_tx_queue(ss->dev, ss - ss->mgp->ss);
1428 * Make a minimal effort to prevent the NIC from polling an
1429 * idle tx queue. If we can't get the lock we leave the queue
1430 * active. In this case, either a thread was about to start
1431 * using the queue anyway, or we lost a race and the NIC will
1432 * waste some of its resources polling an inactive queue for a
1436 if ((ss->mgp->dev->real_num_tx_queues > 1) &&
1437 __netif_tx_trylock(dev_queue)) {
1438 if (tx->req == tx->done) {
1439 tx->queue_active = 0;
1440 put_be32(htonl(1), tx->send_stop);
1443 __netif_tx_unlock(dev_queue);
1446 /* start the queue if we've stopped it */
1447 if (netif_tx_queue_stopped(dev_queue) &&
1448 tx->req - tx->done < (tx->mask >> 1) &&
1449 ss->mgp->running == MYRI10GE_ETH_RUNNING) {
1451 netif_tx_wake_queue(dev_queue);
1456 myri10ge_clean_rx_done(struct myri10ge_slice_state *ss, int budget)
1458 struct myri10ge_rx_done *rx_done = &ss->rx_done;
1459 struct myri10ge_priv *mgp = ss->mgp;
1460 unsigned long rx_bytes = 0;
1461 unsigned long rx_packets = 0;
1462 unsigned long rx_ok;
1463 int idx = rx_done->idx;
1464 int cnt = rx_done->cnt;
1469 while (rx_done->entry[idx].length != 0 && work_done < budget) {
1470 length = ntohs(rx_done->entry[idx].length);
1471 rx_done->entry[idx].length = 0;
1472 checksum = csum_unfold(rx_done->entry[idx].checksum);
1473 rx_ok = myri10ge_rx_done(ss, length, checksum);
1474 rx_packets += rx_ok;
1475 rx_bytes += rx_ok * (unsigned long)length;
1477 idx = cnt & (mgp->max_intr_slots - 1);
1482 ss->stats.rx_packets += rx_packets;
1483 ss->stats.rx_bytes += rx_bytes;
1485 /* restock receive rings if needed */
1486 if (ss->rx_small.fill_cnt - ss->rx_small.cnt < myri10ge_fill_thresh)
1487 myri10ge_alloc_rx_pages(mgp, &ss->rx_small,
1488 mgp->small_bytes + MXGEFW_PAD, 0);
1489 if (ss->rx_big.fill_cnt - ss->rx_big.cnt < myri10ge_fill_thresh)
1490 myri10ge_alloc_rx_pages(mgp, &ss->rx_big, mgp->big_bytes, 0);
1495 static inline void myri10ge_check_statblock(struct myri10ge_priv *mgp)
1497 struct mcp_irq_data *stats = mgp->ss[0].fw_stats;
1499 if (unlikely(stats->stats_updated)) {
1500 unsigned link_up = ntohl(stats->link_up);
1501 if (mgp->link_state != link_up) {
1502 mgp->link_state = link_up;
1504 if (mgp->link_state == MXGEFW_LINK_UP) {
1505 netif_info(mgp, link, mgp->dev, "link up\n");
1506 netif_carrier_on(mgp->dev);
1507 mgp->link_changes++;
1509 netif_info(mgp, link, mgp->dev, "link %s\n",
1510 (link_up == MXGEFW_LINK_MYRINET ?
1511 "mismatch (Myrinet detected)" :
1513 netif_carrier_off(mgp->dev);
1514 mgp->link_changes++;
1517 if (mgp->rdma_tags_available !=
1518 ntohl(stats->rdma_tags_available)) {
1519 mgp->rdma_tags_available =
1520 ntohl(stats->rdma_tags_available);
1521 netdev_warn(mgp->dev, "RDMA timed out! %d tags left\n",
1522 mgp->rdma_tags_available);
1524 mgp->down_cnt += stats->link_down;
1525 if (stats->link_down)
1526 wake_up(&mgp->down_wq);
1530 static int myri10ge_poll(struct napi_struct *napi, int budget)
1532 struct myri10ge_slice_state *ss =
1533 container_of(napi, struct myri10ge_slice_state, napi);
1536 #ifdef CONFIG_MYRI10GE_DCA
1537 if (ss->mgp->dca_enabled)
1538 myri10ge_update_dca(ss);
1540 /* process as many rx events as NAPI will allow */
1541 work_done = myri10ge_clean_rx_done(ss, budget);
1543 if (work_done < budget) {
1544 napi_complete_done(napi, work_done);
1545 put_be32(htonl(3), ss->irq_claim);
1550 static irqreturn_t myri10ge_intr(int irq, void *arg)
1552 struct myri10ge_slice_state *ss = arg;
1553 struct myri10ge_priv *mgp = ss->mgp;
1554 struct mcp_irq_data *stats = ss->fw_stats;
1555 struct myri10ge_tx_buf *tx = &ss->tx;
1556 u32 send_done_count;
1559 /* an interrupt on a non-zero receive-only slice is implicitly
1560 * valid since MSI-X irqs are not shared */
1561 if ((mgp->dev->real_num_tx_queues == 1) && (ss != mgp->ss)) {
1562 napi_schedule(&ss->napi);
1566 /* make sure it is our IRQ, and that the DMA has finished */
1567 if (unlikely(!stats->valid))
1570 /* low bit indicates receives are present, so schedule
1571 * napi poll handler */
1572 if (stats->valid & 1)
1573 napi_schedule(&ss->napi);
1575 if (!mgp->msi_enabled && !mgp->msix_enabled) {
1576 put_be32(0, mgp->irq_deassert);
1577 if (!myri10ge_deassert_wait)
1583 /* Wait for IRQ line to go low, if using INTx */
1587 /* check for transmit completes and receives */
1588 send_done_count = ntohl(stats->send_done_count);
1589 if (send_done_count != tx->pkt_done)
1590 myri10ge_tx_done(ss, (int)send_done_count);
1591 if (unlikely(i > myri10ge_max_irq_loops)) {
1592 netdev_warn(mgp->dev, "irq stuck?\n");
1594 schedule_work(&mgp->watchdog_work);
1596 if (likely(stats->valid == 0))
1602 /* Only slice 0 updates stats */
1604 myri10ge_check_statblock(mgp);
1606 put_be32(htonl(3), ss->irq_claim + 1);
1611 myri10ge_get_link_ksettings(struct net_device *netdev,
1612 struct ethtool_link_ksettings *cmd)
1614 struct myri10ge_priv *mgp = netdev_priv(netdev);
1618 cmd->base.autoneg = AUTONEG_DISABLE;
1619 cmd->base.speed = SPEED_10000;
1620 cmd->base.duplex = DUPLEX_FULL;
1623 * parse the product code to deterimine the interface type
1624 * (CX4, XFP, Quad Ribbon Fiber) by looking at the character
1625 * after the 3rd dash in the driver's cached copy of the
1626 * EEPROM's product code string.
1628 ptr = mgp->product_code_string;
1630 netdev_err(netdev, "Missing product code\n");
1633 for (i = 0; i < 3; i++, ptr++) {
1634 ptr = strchr(ptr, '-');
1636 netdev_err(netdev, "Invalid product code %s\n",
1637 mgp->product_code_string);
1643 if (*ptr == 'R' || *ptr == 'Q' || *ptr == 'S') {
1644 /* We've found either an XFP, quad ribbon fiber, or SFP+ */
1645 cmd->base.port = PORT_FIBRE;
1646 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
1647 ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
1649 cmd->base.port = PORT_OTHER;
1656 myri10ge_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info)
1658 struct myri10ge_priv *mgp = netdev_priv(netdev);
1660 strlcpy(info->driver, "myri10ge", sizeof(info->driver));
1661 strlcpy(info->version, MYRI10GE_VERSION_STR, sizeof(info->version));
1662 strlcpy(info->fw_version, mgp->fw_version, sizeof(info->fw_version));
1663 strlcpy(info->bus_info, pci_name(mgp->pdev), sizeof(info->bus_info));
1667 myri10ge_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coal)
1669 struct myri10ge_priv *mgp = netdev_priv(netdev);
1671 coal->rx_coalesce_usecs = mgp->intr_coal_delay;
1676 myri10ge_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coal)
1678 struct myri10ge_priv *mgp = netdev_priv(netdev);
1680 mgp->intr_coal_delay = coal->rx_coalesce_usecs;
1681 put_be32(htonl(mgp->intr_coal_delay), mgp->intr_coal_delay_ptr);
1686 myri10ge_get_pauseparam(struct net_device *netdev,
1687 struct ethtool_pauseparam *pause)
1689 struct myri10ge_priv *mgp = netdev_priv(netdev);
1692 pause->rx_pause = mgp->pause;
1693 pause->tx_pause = mgp->pause;
1697 myri10ge_set_pauseparam(struct net_device *netdev,
1698 struct ethtool_pauseparam *pause)
1700 struct myri10ge_priv *mgp = netdev_priv(netdev);
1702 if (pause->tx_pause != mgp->pause)
1703 return myri10ge_change_pause(mgp, pause->tx_pause);
1704 if (pause->rx_pause != mgp->pause)
1705 return myri10ge_change_pause(mgp, pause->rx_pause);
1706 if (pause->autoneg != 0)
1712 myri10ge_get_ringparam(struct net_device *netdev,
1713 struct ethtool_ringparam *ring)
1715 struct myri10ge_priv *mgp = netdev_priv(netdev);
1717 ring->rx_mini_max_pending = mgp->ss[0].rx_small.mask + 1;
1718 ring->rx_max_pending = mgp->ss[0].rx_big.mask + 1;
1719 ring->rx_jumbo_max_pending = 0;
1720 ring->tx_max_pending = mgp->ss[0].tx.mask + 1;
1721 ring->rx_mini_pending = ring->rx_mini_max_pending;
1722 ring->rx_pending = ring->rx_max_pending;
1723 ring->rx_jumbo_pending = ring->rx_jumbo_max_pending;
1724 ring->tx_pending = ring->tx_max_pending;
1727 static const char myri10ge_gstrings_main_stats[][ETH_GSTRING_LEN] = {
1728 "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
1729 "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
1730 "rx_length_errors", "rx_over_errors", "rx_crc_errors",
1731 "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
1732 "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
1733 "tx_heartbeat_errors", "tx_window_errors",
1734 /* device-specific stats */
1735 "tx_boundary", "irq", "MSI", "MSIX",
1736 "read_dma_bw_MBs", "write_dma_bw_MBs", "read_write_dma_bw_MBs",
1737 "serial_number", "watchdog_resets",
1738 #ifdef CONFIG_MYRI10GE_DCA
1739 "dca_capable_firmware", "dca_device_present",
1741 "link_changes", "link_up", "dropped_link_overflow",
1742 "dropped_link_error_or_filtered",
1743 "dropped_pause", "dropped_bad_phy", "dropped_bad_crc32",
1744 "dropped_unicast_filtered", "dropped_multicast_filtered",
1745 "dropped_runt", "dropped_overrun", "dropped_no_small_buffer",
1746 "dropped_no_big_buffer"
1749 static const char myri10ge_gstrings_slice_stats[][ETH_GSTRING_LEN] = {
1750 "----------- slice ---------",
1751 "tx_pkt_start", "tx_pkt_done", "tx_req", "tx_done",
1752 "rx_small_cnt", "rx_big_cnt",
1753 "wake_queue", "stop_queue", "tx_linearized",
1756 #define MYRI10GE_NET_STATS_LEN 21
1757 #define MYRI10GE_MAIN_STATS_LEN ARRAY_SIZE(myri10ge_gstrings_main_stats)
1758 #define MYRI10GE_SLICE_STATS_LEN ARRAY_SIZE(myri10ge_gstrings_slice_stats)
1761 myri10ge_get_strings(struct net_device *netdev, u32 stringset, u8 * data)
1763 struct myri10ge_priv *mgp = netdev_priv(netdev);
1766 switch (stringset) {
1768 memcpy(data, *myri10ge_gstrings_main_stats,
1769 sizeof(myri10ge_gstrings_main_stats));
1770 data += sizeof(myri10ge_gstrings_main_stats);
1771 for (i = 0; i < mgp->num_slices; i++) {
1772 memcpy(data, *myri10ge_gstrings_slice_stats,
1773 sizeof(myri10ge_gstrings_slice_stats));
1774 data += sizeof(myri10ge_gstrings_slice_stats);
1780 static int myri10ge_get_sset_count(struct net_device *netdev, int sset)
1782 struct myri10ge_priv *mgp = netdev_priv(netdev);
1786 return MYRI10GE_MAIN_STATS_LEN +
1787 mgp->num_slices * MYRI10GE_SLICE_STATS_LEN;
1794 myri10ge_get_ethtool_stats(struct net_device *netdev,
1795 struct ethtool_stats *stats, u64 * data)
1797 struct myri10ge_priv *mgp = netdev_priv(netdev);
1798 struct myri10ge_slice_state *ss;
1799 struct rtnl_link_stats64 link_stats;
1803 /* force stats update */
1804 memset(&link_stats, 0, sizeof(link_stats));
1805 (void)myri10ge_get_stats(netdev, &link_stats);
1806 for (i = 0; i < MYRI10GE_NET_STATS_LEN; i++)
1807 data[i] = ((u64 *)&link_stats)[i];
1809 data[i++] = (unsigned int)mgp->tx_boundary;
1810 data[i++] = (unsigned int)mgp->pdev->irq;
1811 data[i++] = (unsigned int)mgp->msi_enabled;
1812 data[i++] = (unsigned int)mgp->msix_enabled;
1813 data[i++] = (unsigned int)mgp->read_dma;
1814 data[i++] = (unsigned int)mgp->write_dma;
1815 data[i++] = (unsigned int)mgp->read_write_dma;
1816 data[i++] = (unsigned int)mgp->serial_number;
1817 data[i++] = (unsigned int)mgp->watchdog_resets;
1818 #ifdef CONFIG_MYRI10GE_DCA
1819 data[i++] = (unsigned int)(mgp->ss[0].dca_tag != NULL);
1820 data[i++] = (unsigned int)(mgp->dca_enabled);
1822 data[i++] = (unsigned int)mgp->link_changes;
1824 /* firmware stats are useful only in the first slice */
1826 data[i++] = (unsigned int)ntohl(ss->fw_stats->link_up);
1827 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_link_overflow);
1829 (unsigned int)ntohl(ss->fw_stats->dropped_link_error_or_filtered);
1830 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_pause);
1831 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_bad_phy);
1832 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_bad_crc32);
1833 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_unicast_filtered);
1835 (unsigned int)ntohl(ss->fw_stats->dropped_multicast_filtered);
1836 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_runt);
1837 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_overrun);
1838 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_no_small_buffer);
1839 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_no_big_buffer);
1841 for (slice = 0; slice < mgp->num_slices; slice++) {
1842 ss = &mgp->ss[slice];
1844 data[i++] = (unsigned int)ss->tx.pkt_start;
1845 data[i++] = (unsigned int)ss->tx.pkt_done;
1846 data[i++] = (unsigned int)ss->tx.req;
1847 data[i++] = (unsigned int)ss->tx.done;
1848 data[i++] = (unsigned int)ss->rx_small.cnt;
1849 data[i++] = (unsigned int)ss->rx_big.cnt;
1850 data[i++] = (unsigned int)ss->tx.wake_queue;
1851 data[i++] = (unsigned int)ss->tx.stop_queue;
1852 data[i++] = (unsigned int)ss->tx.linearized;
1856 static void myri10ge_set_msglevel(struct net_device *netdev, u32 value)
1858 struct myri10ge_priv *mgp = netdev_priv(netdev);
1859 mgp->msg_enable = value;
1862 static u32 myri10ge_get_msglevel(struct net_device *netdev)
1864 struct myri10ge_priv *mgp = netdev_priv(netdev);
1865 return mgp->msg_enable;
1869 * Use a low-level command to change the LED behavior. Rather than
1870 * blinking (which is the normal case), when identify is used, the
1871 * yellow LED turns solid.
1873 static int myri10ge_led(struct myri10ge_priv *mgp, int on)
1875 struct mcp_gen_header *hdr;
1876 struct device *dev = &mgp->pdev->dev;
1877 size_t hdr_off, pattern_off, hdr_len;
1878 u32 pattern = 0xfffffffe;
1880 /* find running firmware header */
1881 hdr_off = swab32(readl(mgp->sram + MCP_HEADER_PTR_OFFSET));
1882 if ((hdr_off & 3) || hdr_off + sizeof(*hdr) > mgp->sram_size) {
1883 dev_err(dev, "Running firmware has bad header offset (%d)\n",
1887 hdr_len = swab32(readl(mgp->sram + hdr_off +
1888 offsetof(struct mcp_gen_header, header_length)));
1889 pattern_off = hdr_off + offsetof(struct mcp_gen_header, led_pattern);
1890 if (pattern_off >= (hdr_len + hdr_off)) {
1891 dev_info(dev, "Firmware does not support LED identification\n");
1895 pattern = swab32(readl(mgp->sram + pattern_off + 4));
1896 writel(swab32(pattern), mgp->sram + pattern_off);
1901 myri10ge_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state)
1903 struct myri10ge_priv *mgp = netdev_priv(netdev);
1907 case ETHTOOL_ID_ACTIVE:
1908 rc = myri10ge_led(mgp, 1);
1911 case ETHTOOL_ID_INACTIVE:
1912 rc = myri10ge_led(mgp, 0);
1922 static const struct ethtool_ops myri10ge_ethtool_ops = {
1923 .get_drvinfo = myri10ge_get_drvinfo,
1924 .get_coalesce = myri10ge_get_coalesce,
1925 .set_coalesce = myri10ge_set_coalesce,
1926 .get_pauseparam = myri10ge_get_pauseparam,
1927 .set_pauseparam = myri10ge_set_pauseparam,
1928 .get_ringparam = myri10ge_get_ringparam,
1929 .get_link = ethtool_op_get_link,
1930 .get_strings = myri10ge_get_strings,
1931 .get_sset_count = myri10ge_get_sset_count,
1932 .get_ethtool_stats = myri10ge_get_ethtool_stats,
1933 .set_msglevel = myri10ge_set_msglevel,
1934 .get_msglevel = myri10ge_get_msglevel,
1935 .set_phys_id = myri10ge_phys_id,
1936 .get_link_ksettings = myri10ge_get_link_ksettings,
1939 static int myri10ge_allocate_rings(struct myri10ge_slice_state *ss)
1941 struct myri10ge_priv *mgp = ss->mgp;
1942 struct myri10ge_cmd cmd;
1943 struct net_device *dev = mgp->dev;
1944 int tx_ring_size, rx_ring_size;
1945 int tx_ring_entries, rx_ring_entries;
1946 int i, slice, status;
1949 /* get ring sizes */
1950 slice = ss - mgp->ss;
1952 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_RING_SIZE, &cmd, 0);
1953 tx_ring_size = cmd.data0;
1955 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_RX_RING_SIZE, &cmd, 0);
1958 rx_ring_size = cmd.data0;
1960 tx_ring_entries = tx_ring_size / sizeof(struct mcp_kreq_ether_send);
1961 rx_ring_entries = rx_ring_size / sizeof(struct mcp_dma_addr);
1962 ss->tx.mask = tx_ring_entries - 1;
1963 ss->rx_small.mask = ss->rx_big.mask = rx_ring_entries - 1;
1967 /* allocate the host shadow rings */
1969 bytes = 8 + (MYRI10GE_MAX_SEND_DESC_TSO + 4)
1970 * sizeof(*ss->tx.req_list);
1971 ss->tx.req_bytes = kzalloc(bytes, GFP_KERNEL);
1972 if (ss->tx.req_bytes == NULL)
1973 goto abort_with_nothing;
1975 /* ensure req_list entries are aligned to 8 bytes */
1976 ss->tx.req_list = (struct mcp_kreq_ether_send *)
1977 ALIGN((unsigned long)ss->tx.req_bytes, 8);
1978 ss->tx.queue_active = 0;
1980 bytes = rx_ring_entries * sizeof(*ss->rx_small.shadow);
1981 ss->rx_small.shadow = kzalloc(bytes, GFP_KERNEL);
1982 if (ss->rx_small.shadow == NULL)
1983 goto abort_with_tx_req_bytes;
1985 bytes = rx_ring_entries * sizeof(*ss->rx_big.shadow);
1986 ss->rx_big.shadow = kzalloc(bytes, GFP_KERNEL);
1987 if (ss->rx_big.shadow == NULL)
1988 goto abort_with_rx_small_shadow;
1990 /* allocate the host info rings */
1992 bytes = tx_ring_entries * sizeof(*ss->tx.info);
1993 ss->tx.info = kzalloc(bytes, GFP_KERNEL);
1994 if (ss->tx.info == NULL)
1995 goto abort_with_rx_big_shadow;
1997 bytes = rx_ring_entries * sizeof(*ss->rx_small.info);
1998 ss->rx_small.info = kzalloc(bytes, GFP_KERNEL);
1999 if (ss->rx_small.info == NULL)
2000 goto abort_with_tx_info;
2002 bytes = rx_ring_entries * sizeof(*ss->rx_big.info);
2003 ss->rx_big.info = kzalloc(bytes, GFP_KERNEL);
2004 if (ss->rx_big.info == NULL)
2005 goto abort_with_rx_small_info;
2007 /* Fill the receive rings */
2009 ss->rx_small.cnt = 0;
2010 ss->rx_big.fill_cnt = 0;
2011 ss->rx_small.fill_cnt = 0;
2012 ss->rx_small.page_offset = MYRI10GE_ALLOC_SIZE;
2013 ss->rx_big.page_offset = MYRI10GE_ALLOC_SIZE;
2014 ss->rx_small.watchdog_needed = 0;
2015 ss->rx_big.watchdog_needed = 0;
2016 if (mgp->small_bytes == 0) {
2017 ss->rx_small.fill_cnt = ss->rx_small.mask + 1;
2019 myri10ge_alloc_rx_pages(mgp, &ss->rx_small,
2020 mgp->small_bytes + MXGEFW_PAD, 0);
2023 if (ss->rx_small.fill_cnt < ss->rx_small.mask + 1) {
2024 netdev_err(dev, "slice-%d: alloced only %d small bufs\n",
2025 slice, ss->rx_small.fill_cnt);
2026 goto abort_with_rx_small_ring;
2029 myri10ge_alloc_rx_pages(mgp, &ss->rx_big, mgp->big_bytes, 0);
2030 if (ss->rx_big.fill_cnt < ss->rx_big.mask + 1) {
2031 netdev_err(dev, "slice-%d: alloced only %d big bufs\n",
2032 slice, ss->rx_big.fill_cnt);
2033 goto abort_with_rx_big_ring;
2038 abort_with_rx_big_ring:
2039 for (i = ss->rx_big.cnt; i < ss->rx_big.fill_cnt; i++) {
2040 int idx = i & ss->rx_big.mask;
2041 myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_big.info[idx],
2043 put_page(ss->rx_big.info[idx].page);
2046 abort_with_rx_small_ring:
2047 if (mgp->small_bytes == 0)
2048 ss->rx_small.fill_cnt = ss->rx_small.cnt;
2049 for (i = ss->rx_small.cnt; i < ss->rx_small.fill_cnt; i++) {
2050 int idx = i & ss->rx_small.mask;
2051 myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_small.info[idx],
2052 mgp->small_bytes + MXGEFW_PAD);
2053 put_page(ss->rx_small.info[idx].page);
2056 kfree(ss->rx_big.info);
2058 abort_with_rx_small_info:
2059 kfree(ss->rx_small.info);
2064 abort_with_rx_big_shadow:
2065 kfree(ss->rx_big.shadow);
2067 abort_with_rx_small_shadow:
2068 kfree(ss->rx_small.shadow);
2070 abort_with_tx_req_bytes:
2071 kfree(ss->tx.req_bytes);
2072 ss->tx.req_bytes = NULL;
2073 ss->tx.req_list = NULL;
2079 static void myri10ge_free_rings(struct myri10ge_slice_state *ss)
2081 struct myri10ge_priv *mgp = ss->mgp;
2082 struct sk_buff *skb;
2083 struct myri10ge_tx_buf *tx;
2086 /* If not allocated, skip it */
2087 if (ss->tx.req_list == NULL)
2090 for (i = ss->rx_big.cnt; i < ss->rx_big.fill_cnt; i++) {
2091 idx = i & ss->rx_big.mask;
2092 if (i == ss->rx_big.fill_cnt - 1)
2093 ss->rx_big.info[idx].page_offset = MYRI10GE_ALLOC_SIZE;
2094 myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_big.info[idx],
2096 put_page(ss->rx_big.info[idx].page);
2099 if (mgp->small_bytes == 0)
2100 ss->rx_small.fill_cnt = ss->rx_small.cnt;
2101 for (i = ss->rx_small.cnt; i < ss->rx_small.fill_cnt; i++) {
2102 idx = i & ss->rx_small.mask;
2103 if (i == ss->rx_small.fill_cnt - 1)
2104 ss->rx_small.info[idx].page_offset =
2105 MYRI10GE_ALLOC_SIZE;
2106 myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_small.info[idx],
2107 mgp->small_bytes + MXGEFW_PAD);
2108 put_page(ss->rx_small.info[idx].page);
2111 while (tx->done != tx->req) {
2112 idx = tx->done & tx->mask;
2113 skb = tx->info[idx].skb;
2116 tx->info[idx].skb = NULL;
2118 len = dma_unmap_len(&tx->info[idx], len);
2119 dma_unmap_len_set(&tx->info[idx], len, 0);
2121 ss->stats.tx_dropped++;
2122 dev_kfree_skb_any(skb);
2124 pci_unmap_single(mgp->pdev,
2125 dma_unmap_addr(&tx->info[idx],
2130 pci_unmap_page(mgp->pdev,
2131 dma_unmap_addr(&tx->info[idx],
2136 kfree(ss->rx_big.info);
2138 kfree(ss->rx_small.info);
2142 kfree(ss->rx_big.shadow);
2144 kfree(ss->rx_small.shadow);
2146 kfree(ss->tx.req_bytes);
2147 ss->tx.req_bytes = NULL;
2148 ss->tx.req_list = NULL;
2151 static int myri10ge_request_irq(struct myri10ge_priv *mgp)
2153 struct pci_dev *pdev = mgp->pdev;
2154 struct myri10ge_slice_state *ss;
2155 struct net_device *netdev = mgp->dev;
2159 mgp->msi_enabled = 0;
2160 mgp->msix_enabled = 0;
2163 if (mgp->num_slices > 1) {
2164 status = pci_enable_msix_range(pdev, mgp->msix_vectors,
2165 mgp->num_slices, mgp->num_slices);
2168 "Error %d setting up MSI-X\n", status);
2171 mgp->msix_enabled = 1;
2173 if (mgp->msix_enabled == 0) {
2174 status = pci_enable_msi(pdev);
2177 "Error %d setting up MSI; falling back to xPIC\n",
2180 mgp->msi_enabled = 1;
2184 if (mgp->msix_enabled) {
2185 for (i = 0; i < mgp->num_slices; i++) {
2187 snprintf(ss->irq_desc, sizeof(ss->irq_desc),
2188 "%s:slice-%d", netdev->name, i);
2189 status = request_irq(mgp->msix_vectors[i].vector,
2190 myri10ge_intr, 0, ss->irq_desc,
2194 "slice %d failed to allocate IRQ\n", i);
2197 free_irq(mgp->msix_vectors[i].vector,
2201 pci_disable_msix(pdev);
2206 status = request_irq(pdev->irq, myri10ge_intr, IRQF_SHARED,
2207 mgp->dev->name, &mgp->ss[0]);
2209 dev_err(&pdev->dev, "failed to allocate IRQ\n");
2210 if (mgp->msi_enabled)
2211 pci_disable_msi(pdev);
2217 static void myri10ge_free_irq(struct myri10ge_priv *mgp)
2219 struct pci_dev *pdev = mgp->pdev;
2222 if (mgp->msix_enabled) {
2223 for (i = 0; i < mgp->num_slices; i++)
2224 free_irq(mgp->msix_vectors[i].vector, &mgp->ss[i]);
2226 free_irq(pdev->irq, &mgp->ss[0]);
2228 if (mgp->msi_enabled)
2229 pci_disable_msi(pdev);
2230 if (mgp->msix_enabled)
2231 pci_disable_msix(pdev);
2234 static int myri10ge_get_txrx(struct myri10ge_priv *mgp, int slice)
2236 struct myri10ge_cmd cmd;
2237 struct myri10ge_slice_state *ss;
2240 ss = &mgp->ss[slice];
2242 if (slice == 0 || (mgp->dev->real_num_tx_queues > 1)) {
2244 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_OFFSET,
2246 ss->tx.lanai = (struct mcp_kreq_ether_send __iomem *)
2247 (mgp->sram + cmd.data0);
2250 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SMALL_RX_OFFSET,
2252 ss->rx_small.lanai = (struct mcp_kreq_ether_recv __iomem *)
2253 (mgp->sram + cmd.data0);
2256 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_BIG_RX_OFFSET, &cmd, 0);
2257 ss->rx_big.lanai = (struct mcp_kreq_ether_recv __iomem *)
2258 (mgp->sram + cmd.data0);
2260 ss->tx.send_go = (__iomem __be32 *)
2261 (mgp->sram + MXGEFW_ETH_SEND_GO + 64 * slice);
2262 ss->tx.send_stop = (__iomem __be32 *)
2263 (mgp->sram + MXGEFW_ETH_SEND_STOP + 64 * slice);
2268 static int myri10ge_set_stats(struct myri10ge_priv *mgp, int slice)
2270 struct myri10ge_cmd cmd;
2271 struct myri10ge_slice_state *ss;
2274 ss = &mgp->ss[slice];
2275 cmd.data0 = MYRI10GE_LOWPART_TO_U32(ss->fw_stats_bus);
2276 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(ss->fw_stats_bus);
2277 cmd.data2 = sizeof(struct mcp_irq_data) | (slice << 16);
2278 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_STATS_DMA_V2, &cmd, 0);
2279 if (status == -ENOSYS) {
2280 dma_addr_t bus = ss->fw_stats_bus;
2283 bus += offsetof(struct mcp_irq_data, send_done_count);
2284 cmd.data0 = MYRI10GE_LOWPART_TO_U32(bus);
2285 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(bus);
2286 status = myri10ge_send_cmd(mgp,
2287 MXGEFW_CMD_SET_STATS_DMA_OBSOLETE,
2289 /* Firmware cannot support multicast without STATS_DMA_V2 */
2290 mgp->fw_multicast_support = 0;
2292 mgp->fw_multicast_support = 1;
2297 static int myri10ge_open(struct net_device *dev)
2299 struct myri10ge_slice_state *ss;
2300 struct myri10ge_priv *mgp = netdev_priv(dev);
2301 struct myri10ge_cmd cmd;
2302 int i, status, big_pow2, slice;
2305 if (mgp->running != MYRI10GE_ETH_STOPPED)
2308 mgp->running = MYRI10GE_ETH_STARTING;
2309 status = myri10ge_reset(mgp);
2311 netdev_err(dev, "failed reset\n");
2312 goto abort_with_nothing;
2315 if (mgp->num_slices > 1) {
2316 cmd.data0 = mgp->num_slices;
2317 cmd.data1 = MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE;
2318 if (mgp->dev->real_num_tx_queues > 1)
2319 cmd.data1 |= MXGEFW_SLICE_ENABLE_MULTIPLE_TX_QUEUES;
2320 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ENABLE_RSS_QUEUES,
2323 netdev_err(dev, "failed to set number of slices\n");
2324 goto abort_with_nothing;
2326 /* setup the indirection table */
2327 cmd.data0 = mgp->num_slices;
2328 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_RSS_TABLE_SIZE,
2331 status |= myri10ge_send_cmd(mgp,
2332 MXGEFW_CMD_GET_RSS_TABLE_OFFSET,
2335 netdev_err(dev, "failed to setup rss tables\n");
2336 goto abort_with_nothing;
2339 /* just enable an identity mapping */
2340 itable = mgp->sram + cmd.data0;
2341 for (i = 0; i < mgp->num_slices; i++)
2342 __raw_writeb(i, &itable[i]);
2345 cmd.data1 = myri10ge_rss_hash;
2346 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_RSS_ENABLE,
2349 netdev_err(dev, "failed to enable slices\n");
2350 goto abort_with_nothing;
2354 status = myri10ge_request_irq(mgp);
2356 goto abort_with_nothing;
2358 /* decide what small buffer size to use. For good TCP rx
2359 * performance, it is important to not receive 1514 byte
2360 * frames into jumbo buffers, as it confuses the socket buffer
2361 * accounting code, leading to drops and erratic performance.
2364 if (dev->mtu <= ETH_DATA_LEN)
2365 /* enough for a TCP header */
2366 mgp->small_bytes = (128 > SMP_CACHE_BYTES)
2367 ? (128 - MXGEFW_PAD)
2368 : (SMP_CACHE_BYTES - MXGEFW_PAD);
2370 /* enough for a vlan encapsulated ETH_DATA_LEN frame */
2371 mgp->small_bytes = VLAN_ETH_FRAME_LEN;
2373 /* Override the small buffer size? */
2374 if (myri10ge_small_bytes >= 0)
2375 mgp->small_bytes = myri10ge_small_bytes;
2377 /* Firmware needs the big buff size as a power of 2. Lie and
2378 * tell him the buffer is larger, because we only use 1
2379 * buffer/pkt, and the mtu will prevent overruns.
2381 big_pow2 = dev->mtu + ETH_HLEN + VLAN_HLEN + MXGEFW_PAD;
2382 if (big_pow2 < MYRI10GE_ALLOC_SIZE / 2) {
2383 while (!is_power_of_2(big_pow2))
2385 mgp->big_bytes = dev->mtu + ETH_HLEN + VLAN_HLEN + MXGEFW_PAD;
2387 big_pow2 = MYRI10GE_ALLOC_SIZE;
2388 mgp->big_bytes = big_pow2;
2391 /* setup the per-slice data structures */
2392 for (slice = 0; slice < mgp->num_slices; slice++) {
2393 ss = &mgp->ss[slice];
2395 status = myri10ge_get_txrx(mgp, slice);
2397 netdev_err(dev, "failed to get ring sizes or locations\n");
2398 goto abort_with_rings;
2400 status = myri10ge_allocate_rings(ss);
2402 goto abort_with_rings;
2404 /* only firmware which supports multiple TX queues
2405 * supports setting up the tx stats on non-zero
2407 if (slice == 0 || mgp->dev->real_num_tx_queues > 1)
2408 status = myri10ge_set_stats(mgp, slice);
2410 netdev_err(dev, "Couldn't set stats DMA\n");
2411 goto abort_with_rings;
2414 /* must happen prior to any irq */
2415 napi_enable(&(ss)->napi);
2418 /* now give firmware buffers sizes, and MTU */
2419 cmd.data0 = dev->mtu + ETH_HLEN + VLAN_HLEN;
2420 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_MTU, &cmd, 0);
2421 cmd.data0 = mgp->small_bytes;
2423 myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_SMALL_BUFFER_SIZE, &cmd, 0);
2424 cmd.data0 = big_pow2;
2426 myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_BIG_BUFFER_SIZE, &cmd, 0);
2428 netdev_err(dev, "Couldn't set buffer sizes\n");
2429 goto abort_with_rings;
2433 * Set Linux style TSO mode; this is needed only on newer
2434 * firmware versions. Older versions default to Linux
2438 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_TSO_MODE, &cmd, 0);
2439 if (status && status != -ENOSYS) {
2440 netdev_err(dev, "Couldn't set TSO mode\n");
2441 goto abort_with_rings;
2444 mgp->link_state = ~0U;
2445 mgp->rdma_tags_available = 15;
2447 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ETHERNET_UP, &cmd, 0);
2449 netdev_err(dev, "Couldn't bring up link\n");
2450 goto abort_with_rings;
2453 mgp->running = MYRI10GE_ETH_RUNNING;
2454 mgp->watchdog_timer.expires = jiffies + myri10ge_watchdog_timeout * HZ;
2455 add_timer(&mgp->watchdog_timer);
2456 netif_tx_wake_all_queues(dev);
2463 napi_disable(&mgp->ss[slice].napi);
2465 for (i = 0; i < mgp->num_slices; i++)
2466 myri10ge_free_rings(&mgp->ss[i]);
2468 myri10ge_free_irq(mgp);
2471 mgp->running = MYRI10GE_ETH_STOPPED;
2475 static int myri10ge_close(struct net_device *dev)
2477 struct myri10ge_priv *mgp = netdev_priv(dev);
2478 struct myri10ge_cmd cmd;
2479 int status, old_down_cnt;
2482 if (mgp->running != MYRI10GE_ETH_RUNNING)
2485 if (mgp->ss[0].tx.req_bytes == NULL)
2488 del_timer_sync(&mgp->watchdog_timer);
2489 mgp->running = MYRI10GE_ETH_STOPPING;
2490 for (i = 0; i < mgp->num_slices; i++)
2491 napi_disable(&mgp->ss[i].napi);
2493 netif_carrier_off(dev);
2495 netif_tx_stop_all_queues(dev);
2496 if (mgp->rebooted == 0) {
2497 old_down_cnt = mgp->down_cnt;
2500 myri10ge_send_cmd(mgp, MXGEFW_CMD_ETHERNET_DOWN, &cmd, 0);
2502 netdev_err(dev, "Couldn't bring down link\n");
2504 wait_event_timeout(mgp->down_wq, old_down_cnt != mgp->down_cnt,
2506 if (old_down_cnt == mgp->down_cnt)
2507 netdev_err(dev, "never got down irq\n");
2509 netif_tx_disable(dev);
2510 myri10ge_free_irq(mgp);
2511 for (i = 0; i < mgp->num_slices; i++)
2512 myri10ge_free_rings(&mgp->ss[i]);
2514 mgp->running = MYRI10GE_ETH_STOPPED;
2518 /* copy an array of struct mcp_kreq_ether_send's to the mcp. Copy
2519 * backwards one at a time and handle ring wraps */
2522 myri10ge_submit_req_backwards(struct myri10ge_tx_buf *tx,
2523 struct mcp_kreq_ether_send *src, int cnt)
2525 int idx, starting_slot;
2526 starting_slot = tx->req;
2529 idx = (starting_slot + cnt) & tx->mask;
2530 myri10ge_pio_copy(&tx->lanai[idx], &src[cnt], sizeof(*src));
2536 * copy an array of struct mcp_kreq_ether_send's to the mcp. Copy
2537 * at most 32 bytes at a time, so as to avoid involving the software
2538 * pio handler in the nic. We re-write the first segment's flags
2539 * to mark them valid only after writing the entire chain.
2543 myri10ge_submit_req(struct myri10ge_tx_buf *tx, struct mcp_kreq_ether_send *src,
2547 struct mcp_kreq_ether_send __iomem *dstp, *dst;
2548 struct mcp_kreq_ether_send *srcp;
2551 idx = tx->req & tx->mask;
2553 last_flags = src->flags;
2556 dst = dstp = &tx->lanai[idx];
2559 if ((idx + cnt) < tx->mask) {
2560 for (i = 0; i < (cnt - 1); i += 2) {
2561 myri10ge_pio_copy(dstp, srcp, 2 * sizeof(*src));
2562 mb(); /* force write every 32 bytes */
2567 /* submit all but the first request, and ensure
2568 * that it is submitted below */
2569 myri10ge_submit_req_backwards(tx, src, cnt);
2573 /* submit the first request */
2574 myri10ge_pio_copy(dstp, srcp, sizeof(*src));
2575 mb(); /* barrier before setting valid flag */
2578 /* re-write the last 32-bits with the valid flags */
2579 src->flags = last_flags;
2580 put_be32(*((__be32 *) src + 3), (__be32 __iomem *) dst + 3);
2585 static void myri10ge_unmap_tx_dma(struct myri10ge_priv *mgp,
2586 struct myri10ge_tx_buf *tx, int idx)
2591 /* Free any DMA resources we've alloced and clear out the skb slot */
2592 last_idx = (idx + 1) & tx->mask;
2593 idx = tx->req & tx->mask;
2595 len = dma_unmap_len(&tx->info[idx], len);
2597 if (tx->info[idx].skb != NULL)
2598 pci_unmap_single(mgp->pdev,
2599 dma_unmap_addr(&tx->info[idx],
2603 pci_unmap_page(mgp->pdev,
2604 dma_unmap_addr(&tx->info[idx],
2607 dma_unmap_len_set(&tx->info[idx], len, 0);
2608 tx->info[idx].skb = NULL;
2610 idx = (idx + 1) & tx->mask;
2611 } while (idx != last_idx);
2615 * Transmit a packet. We need to split the packet so that a single
2616 * segment does not cross myri10ge->tx_boundary, so this makes segment
2617 * counting tricky. So rather than try to count segments up front, we
2618 * just give up if there are too few segments to hold a reasonably
2619 * fragmented packet currently available. If we run
2620 * out of segments while preparing a packet for DMA, we just linearize
2624 static netdev_tx_t myri10ge_xmit(struct sk_buff *skb,
2625 struct net_device *dev)
2627 struct myri10ge_priv *mgp = netdev_priv(dev);
2628 struct myri10ge_slice_state *ss;
2629 struct mcp_kreq_ether_send *req;
2630 struct myri10ge_tx_buf *tx;
2631 struct skb_frag_struct *frag;
2632 struct netdev_queue *netdev_queue;
2635 __be32 high_swapped;
2637 int idx, avail, frag_cnt, frag_idx, count, mss, max_segments;
2638 u16 pseudo_hdr_offset, cksum_offset, queue;
2639 int cum_len, seglen, boundary, rdma_count;
2642 queue = skb_get_queue_mapping(skb);
2643 ss = &mgp->ss[queue];
2644 netdev_queue = netdev_get_tx_queue(mgp->dev, queue);
2649 avail = tx->mask - 1 - (tx->req - tx->done);
2652 max_segments = MXGEFW_MAX_SEND_DESC;
2654 if (skb_is_gso(skb)) {
2655 mss = skb_shinfo(skb)->gso_size;
2656 max_segments = MYRI10GE_MAX_SEND_DESC_TSO;
2659 if ((unlikely(avail < max_segments))) {
2660 /* we are out of transmit resources */
2662 netif_tx_stop_queue(netdev_queue);
2663 return NETDEV_TX_BUSY;
2666 /* Setup checksum offloading, if needed */
2668 pseudo_hdr_offset = 0;
2670 flags = (MXGEFW_FLAGS_NO_TSO | MXGEFW_FLAGS_FIRST);
2671 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
2672 cksum_offset = skb_checksum_start_offset(skb);
2673 pseudo_hdr_offset = cksum_offset + skb->csum_offset;
2674 /* If the headers are excessively large, then we must
2675 * fall back to a software checksum */
2676 if (unlikely(!mss && (cksum_offset > 255 ||
2677 pseudo_hdr_offset > 127))) {
2678 if (skb_checksum_help(skb))
2681 pseudo_hdr_offset = 0;
2683 odd_flag = MXGEFW_FLAGS_ALIGN_ODD;
2684 flags |= MXGEFW_FLAGS_CKSUM;
2690 if (mss) { /* TSO */
2691 /* this removes any CKSUM flag from before */
2692 flags = (MXGEFW_FLAGS_TSO_HDR | MXGEFW_FLAGS_FIRST);
2694 /* negative cum_len signifies to the
2695 * send loop that we are still in the
2696 * header portion of the TSO packet.
2697 * TSO header can be at most 1KB long */
2698 cum_len = -(skb_transport_offset(skb) + tcp_hdrlen(skb));
2700 /* for IPv6 TSO, the checksum offset stores the
2701 * TCP header length, to save the firmware from
2702 * the need to parse the headers */
2703 if (skb_is_gso_v6(skb)) {
2704 cksum_offset = tcp_hdrlen(skb);
2705 /* Can only handle headers <= max_tso6 long */
2706 if (unlikely(-cum_len > mgp->max_tso6))
2707 return myri10ge_sw_tso(skb, dev);
2709 /* for TSO, pseudo_hdr_offset holds mss.
2710 * The firmware figures out where to put
2711 * the checksum by parsing the header. */
2712 pseudo_hdr_offset = mss;
2714 /* Mark small packets, and pad out tiny packets */
2715 if (skb->len <= MXGEFW_SEND_SMALL_SIZE) {
2716 flags |= MXGEFW_FLAGS_SMALL;
2718 /* pad frames to at least ETH_ZLEN bytes */
2719 if (eth_skb_pad(skb)) {
2720 /* The packet is gone, so we must
2722 ss->stats.tx_dropped += 1;
2723 return NETDEV_TX_OK;
2727 /* map the skb for DMA */
2728 len = skb_headlen(skb);
2729 bus = pci_map_single(mgp->pdev, skb->data, len, PCI_DMA_TODEVICE);
2730 if (unlikely(pci_dma_mapping_error(mgp->pdev, bus)))
2733 idx = tx->req & tx->mask;
2734 tx->info[idx].skb = skb;
2735 dma_unmap_addr_set(&tx->info[idx], bus, bus);
2736 dma_unmap_len_set(&tx->info[idx], len, len);
2738 frag_cnt = skb_shinfo(skb)->nr_frags;
2743 /* "rdma_count" is the number of RDMAs belonging to the
2744 * current packet BEFORE the current send request. For
2745 * non-TSO packets, this is equal to "count".
2746 * For TSO packets, rdma_count needs to be reset
2747 * to 0 after a segment cut.
2749 * The rdma_count field of the send request is
2750 * the number of RDMAs of the packet starting at
2751 * that request. For TSO send requests with one ore more cuts
2752 * in the middle, this is the number of RDMAs starting
2753 * after the last cut in the request. All previous
2754 * segments before the last cut implicitly have 1 RDMA.
2756 * Since the number of RDMAs is not known beforehand,
2757 * it must be filled-in retroactively - after each
2758 * segmentation cut or at the end of the entire packet.
2762 /* Break the SKB or Fragment up into pieces which
2763 * do not cross mgp->tx_boundary */
2764 low = MYRI10GE_LOWPART_TO_U32(bus);
2765 high_swapped = htonl(MYRI10GE_HIGHPART_TO_U32(bus));
2770 if (unlikely(count == max_segments))
2771 goto abort_linearize;
2774 (low + mgp->tx_boundary) & ~(mgp->tx_boundary - 1);
2775 seglen = boundary - low;
2778 flags_next = flags & ~MXGEFW_FLAGS_FIRST;
2779 cum_len_next = cum_len + seglen;
2780 if (mss) { /* TSO */
2781 (req - rdma_count)->rdma_count = rdma_count + 1;
2783 if (likely(cum_len >= 0)) { /* payload */
2784 int next_is_first, chop;
2786 chop = (cum_len_next > mss);
2787 cum_len_next = cum_len_next % mss;
2788 next_is_first = (cum_len_next == 0);
2789 flags |= chop * MXGEFW_FLAGS_TSO_CHOP;
2790 flags_next |= next_is_first *
2792 rdma_count |= -(chop | next_is_first);
2793 rdma_count += chop & ~next_is_first;
2794 } else if (likely(cum_len_next >= 0)) { /* header ends */
2800 small = (mss <= MXGEFW_SEND_SMALL_SIZE);
2801 flags_next = MXGEFW_FLAGS_TSO_PLD |
2802 MXGEFW_FLAGS_FIRST |
2803 (small * MXGEFW_FLAGS_SMALL);
2806 req->addr_high = high_swapped;
2807 req->addr_low = htonl(low);
2808 req->pseudo_hdr_offset = htons(pseudo_hdr_offset);
2809 req->pad = 0; /* complete solid 16-byte block; does this matter? */
2810 req->rdma_count = 1;
2811 req->length = htons(seglen);
2812 req->cksum_offset = cksum_offset;
2813 req->flags = flags | ((cum_len & 1) * odd_flag);
2817 cum_len = cum_len_next;
2822 if (cksum_offset != 0 && !(mss && skb_is_gso_v6(skb))) {
2823 if (unlikely(cksum_offset > seglen))
2824 cksum_offset -= seglen;
2829 if (frag_idx == frag_cnt)
2832 /* map next fragment for DMA */
2833 frag = &skb_shinfo(skb)->frags[frag_idx];
2835 len = skb_frag_size(frag);
2836 bus = skb_frag_dma_map(&mgp->pdev->dev, frag, 0, len,
2838 if (unlikely(pci_dma_mapping_error(mgp->pdev, bus))) {
2839 myri10ge_unmap_tx_dma(mgp, tx, idx);
2842 idx = (count + tx->req) & tx->mask;
2843 dma_unmap_addr_set(&tx->info[idx], bus, bus);
2844 dma_unmap_len_set(&tx->info[idx], len, len);
2847 (req - rdma_count)->rdma_count = rdma_count;
2851 req->flags |= MXGEFW_FLAGS_TSO_LAST;
2852 } while (!(req->flags & (MXGEFW_FLAGS_TSO_CHOP |
2853 MXGEFW_FLAGS_FIRST)));
2854 idx = ((count - 1) + tx->req) & tx->mask;
2855 tx->info[idx].last = 1;
2856 myri10ge_submit_req(tx, tx->req_list, count);
2857 /* if using multiple tx queues, make sure NIC polls the
2859 if ((mgp->dev->real_num_tx_queues > 1) && tx->queue_active == 0) {
2860 tx->queue_active = 1;
2861 put_be32(htonl(1), tx->send_go);
2865 if ((avail - count) < MXGEFW_MAX_SEND_DESC) {
2867 netif_tx_stop_queue(netdev_queue);
2869 return NETDEV_TX_OK;
2872 myri10ge_unmap_tx_dma(mgp, tx, idx);
2874 if (skb_is_gso(skb)) {
2875 netdev_err(mgp->dev, "TSO but wanted to linearize?!?!?\n");
2879 if (skb_linearize(skb))
2886 dev_kfree_skb_any(skb);
2887 ss->stats.tx_dropped += 1;
2888 return NETDEV_TX_OK;
2892 static netdev_tx_t myri10ge_sw_tso(struct sk_buff *skb,
2893 struct net_device *dev)
2895 struct sk_buff *segs, *curr;
2896 struct myri10ge_priv *mgp = netdev_priv(dev);
2897 struct myri10ge_slice_state *ss;
2900 segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO6);
2908 status = myri10ge_xmit(curr, dev);
2910 dev_kfree_skb_any(curr);
2915 dev_kfree_skb_any(segs);
2920 dev_kfree_skb_any(skb);
2921 return NETDEV_TX_OK;
2924 ss = &mgp->ss[skb_get_queue_mapping(skb)];
2925 dev_kfree_skb_any(skb);
2926 ss->stats.tx_dropped += 1;
2927 return NETDEV_TX_OK;
2930 static void myri10ge_get_stats(struct net_device *dev,
2931 struct rtnl_link_stats64 *stats)
2933 const struct myri10ge_priv *mgp = netdev_priv(dev);
2934 const struct myri10ge_slice_netstats *slice_stats;
2937 for (i = 0; i < mgp->num_slices; i++) {
2938 slice_stats = &mgp->ss[i].stats;
2939 stats->rx_packets += slice_stats->rx_packets;
2940 stats->tx_packets += slice_stats->tx_packets;
2941 stats->rx_bytes += slice_stats->rx_bytes;
2942 stats->tx_bytes += slice_stats->tx_bytes;
2943 stats->rx_dropped += slice_stats->rx_dropped;
2944 stats->tx_dropped += slice_stats->tx_dropped;
2948 static void myri10ge_set_multicast_list(struct net_device *dev)
2950 struct myri10ge_priv *mgp = netdev_priv(dev);
2951 struct myri10ge_cmd cmd;
2952 struct netdev_hw_addr *ha;
2953 __be32 data[2] = { 0, 0 };
2956 /* can be called from atomic contexts,
2957 * pass 1 to force atomicity in myri10ge_send_cmd() */
2958 myri10ge_change_promisc(mgp, dev->flags & IFF_PROMISC, 1);
2960 /* This firmware is known to not support multicast */
2961 if (!mgp->fw_multicast_support)
2964 /* Disable multicast filtering */
2966 err = myri10ge_send_cmd(mgp, MXGEFW_ENABLE_ALLMULTI, &cmd, 1);
2968 netdev_err(dev, "Failed MXGEFW_ENABLE_ALLMULTI, error status: %d\n",
2973 if ((dev->flags & IFF_ALLMULTI) || mgp->adopted_rx_filter_bug) {
2974 /* request to disable multicast filtering, so quit here */
2978 /* Flush the filters */
2980 err = myri10ge_send_cmd(mgp, MXGEFW_LEAVE_ALL_MULTICAST_GROUPS,
2983 netdev_err(dev, "Failed MXGEFW_LEAVE_ALL_MULTICAST_GROUPS, error status: %d\n",
2988 /* Walk the multicast list, and add each address */
2989 netdev_for_each_mc_addr(ha, dev) {
2990 memcpy(data, &ha->addr, ETH_ALEN);
2991 cmd.data0 = ntohl(data[0]);
2992 cmd.data1 = ntohl(data[1]);
2993 err = myri10ge_send_cmd(mgp, MXGEFW_JOIN_MULTICAST_GROUP,
2997 netdev_err(dev, "Failed MXGEFW_JOIN_MULTICAST_GROUP, error status:%d %pM\n",
3002 /* Enable multicast filtering */
3003 err = myri10ge_send_cmd(mgp, MXGEFW_DISABLE_ALLMULTI, &cmd, 1);
3005 netdev_err(dev, "Failed MXGEFW_DISABLE_ALLMULTI, error status: %d\n",
3016 static int myri10ge_set_mac_address(struct net_device *dev, void *addr)
3018 struct sockaddr *sa = addr;
3019 struct myri10ge_priv *mgp = netdev_priv(dev);
3022 if (!is_valid_ether_addr(sa->sa_data))
3023 return -EADDRNOTAVAIL;
3025 status = myri10ge_update_mac_address(mgp, sa->sa_data);
3027 netdev_err(dev, "changing mac address failed with %d\n",
3032 /* change the dev structure */
3033 memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
3037 static int myri10ge_change_mtu(struct net_device *dev, int new_mtu)
3039 struct myri10ge_priv *mgp = netdev_priv(dev);
3042 netdev_info(dev, "changing mtu from %d to %d\n", dev->mtu, new_mtu);
3044 /* if we change the mtu on an active device, we must
3045 * reset the device so the firmware sees the change */
3046 myri10ge_close(dev);
3056 * Enable ECRC to align PCI-E Completion packets on an 8-byte boundary.
3057 * Only do it if the bridge is a root port since we don't want to disturb
3058 * any other device, except if forced with myri10ge_ecrc_enable > 1.
3061 static void myri10ge_enable_ecrc(struct myri10ge_priv *mgp)
3063 struct pci_dev *bridge = mgp->pdev->bus->self;
3064 struct device *dev = &mgp->pdev->dev;
3069 if (!myri10ge_ecrc_enable || !bridge)
3072 /* check that the bridge is a root port */
3073 if (pci_pcie_type(bridge) != PCI_EXP_TYPE_ROOT_PORT) {
3074 if (myri10ge_ecrc_enable > 1) {
3075 struct pci_dev *prev_bridge, *old_bridge = bridge;
3077 /* Walk the hierarchy up to the root port
3078 * where ECRC has to be enabled */
3080 prev_bridge = bridge;
3081 bridge = bridge->bus->self;
3082 if (!bridge || prev_bridge == bridge) {
3084 "Failed to find root port"
3085 " to force ECRC\n");
3088 } while (pci_pcie_type(bridge) !=
3089 PCI_EXP_TYPE_ROOT_PORT);
3092 "Forcing ECRC on non-root port %s"
3093 " (enabling on root port %s)\n",
3094 pci_name(old_bridge), pci_name(bridge));
3097 "Not enabling ECRC on non-root port %s\n",
3103 cap = pci_find_ext_capability(bridge, PCI_EXT_CAP_ID_ERR);
3107 ret = pci_read_config_dword(bridge, cap + PCI_ERR_CAP, &err_cap);
3109 dev_err(dev, "failed reading ext-conf-space of %s\n",
3111 dev_err(dev, "\t pci=nommconf in use? "
3112 "or buggy/incomplete/absent ACPI MCFG attr?\n");
3115 if (!(err_cap & PCI_ERR_CAP_ECRC_GENC))
3118 err_cap |= PCI_ERR_CAP_ECRC_GENE;
3119 pci_write_config_dword(bridge, cap + PCI_ERR_CAP, err_cap);
3120 dev_info(dev, "Enabled ECRC on upstream bridge %s\n", pci_name(bridge));
3124 * The Lanai Z8E PCI-E interface achieves higher Read-DMA throughput
3125 * when the PCI-E Completion packets are aligned on an 8-byte
3126 * boundary. Some PCI-E chip sets always align Completion packets; on
3127 * the ones that do not, the alignment can be enforced by enabling
3128 * ECRC generation (if supported).
3130 * When PCI-E Completion packets are not aligned, it is actually more
3131 * efficient to limit Read-DMA transactions to 2KB, rather than 4KB.
3133 * If the driver can neither enable ECRC nor verify that it has
3134 * already been enabled, then it must use a firmware image which works
3135 * around unaligned completion packets (myri10ge_rss_ethp_z8e.dat), and it
3136 * should also ensure that it never gives the device a Read-DMA which is
3137 * larger than 2KB by setting the tx_boundary to 2KB. If ECRC is
3138 * enabled, then the driver should use the aligned (myri10ge_rss_eth_z8e.dat)
3139 * firmware image, and set tx_boundary to 4KB.
3142 static void myri10ge_firmware_probe(struct myri10ge_priv *mgp)
3144 struct pci_dev *pdev = mgp->pdev;
3145 struct device *dev = &pdev->dev;
3148 mgp->tx_boundary = 4096;
3150 * Verify the max read request size was set to 4KB
3151 * before trying the test with 4KB.
3153 status = pcie_get_readrq(pdev);
3155 dev_err(dev, "Couldn't read max read req size: %d\n", status);
3158 if (status != 4096) {
3159 dev_warn(dev, "Max Read Request size != 4096 (%d)\n", status);
3160 mgp->tx_boundary = 2048;
3163 * load the optimized firmware (which assumes aligned PCIe
3164 * completions) in order to see if it works on this host.
3166 set_fw_name(mgp, myri10ge_fw_aligned, false);
3167 status = myri10ge_load_firmware(mgp, 1);
3173 * Enable ECRC if possible
3175 myri10ge_enable_ecrc(mgp);
3178 * Run a DMA test which watches for unaligned completions and
3179 * aborts on the first one seen.
3182 status = myri10ge_dma_test(mgp, MXGEFW_CMD_UNALIGNED_TEST);
3184 return; /* keep the aligned firmware */
3186 if (status != -E2BIG)
3187 dev_warn(dev, "DMA test failed: %d\n", status);
3188 if (status == -ENOSYS)
3189 dev_warn(dev, "Falling back to ethp! "
3190 "Please install up to date fw\n");
3192 /* fall back to using the unaligned firmware */
3193 mgp->tx_boundary = 2048;
3194 set_fw_name(mgp, myri10ge_fw_unaligned, false);
3197 static void myri10ge_select_firmware(struct myri10ge_priv *mgp)
3201 if (myri10ge_force_firmware == 0) {
3205 pcie_capability_read_word(mgp->pdev, PCI_EXP_LNKSTA, &lnk);
3206 link_width = (lnk >> 4) & 0x3f;
3208 /* Check to see if Link is less than 8 or if the
3209 * upstream bridge is known to provide aligned
3211 if (link_width < 8) {
3212 dev_info(&mgp->pdev->dev, "PCIE x%d Link\n",
3214 mgp->tx_boundary = 4096;
3215 set_fw_name(mgp, myri10ge_fw_aligned, false);
3217 myri10ge_firmware_probe(mgp);
3220 if (myri10ge_force_firmware == 1) {
3221 dev_info(&mgp->pdev->dev,
3222 "Assuming aligned completions (forced)\n");
3223 mgp->tx_boundary = 4096;
3224 set_fw_name(mgp, myri10ge_fw_aligned, false);
3226 dev_info(&mgp->pdev->dev,
3227 "Assuming unaligned completions (forced)\n");
3228 mgp->tx_boundary = 2048;
3229 set_fw_name(mgp, myri10ge_fw_unaligned, false);
3233 kernel_param_lock(THIS_MODULE);
3234 if (myri10ge_fw_name != NULL) {
3235 char *fw_name = kstrdup(myri10ge_fw_name, GFP_KERNEL);
3238 set_fw_name(mgp, fw_name, true);
3241 kernel_param_unlock(THIS_MODULE);
3243 if (mgp->board_number < MYRI10GE_MAX_BOARDS &&
3244 myri10ge_fw_names[mgp->board_number] != NULL &&
3245 strlen(myri10ge_fw_names[mgp->board_number])) {
3246 set_fw_name(mgp, myri10ge_fw_names[mgp->board_number], false);
3250 dev_info(&mgp->pdev->dev, "overriding firmware to %s\n",
3254 static void myri10ge_mask_surprise_down(struct pci_dev *pdev)
3256 struct pci_dev *bridge = pdev->bus->self;
3263 cap = pci_find_ext_capability(bridge, PCI_EXT_CAP_ID_ERR);
3265 /* a sram parity error can cause a surprise link
3266 * down; since we expect and can recover from sram
3267 * parity errors, mask surprise link down events */
3268 pci_read_config_dword(bridge, cap + PCI_ERR_UNCOR_MASK, &mask);
3270 pci_write_config_dword(bridge, cap + PCI_ERR_UNCOR_MASK, mask);
3275 static int myri10ge_suspend(struct pci_dev *pdev, pm_message_t state)
3277 struct myri10ge_priv *mgp;
3278 struct net_device *netdev;
3280 mgp = pci_get_drvdata(pdev);
3285 netif_device_detach(netdev);
3286 if (netif_running(netdev)) {
3287 netdev_info(netdev, "closing\n");
3289 myri10ge_close(netdev);
3292 myri10ge_dummy_rdma(mgp, 0);
3293 pci_save_state(pdev);
3294 pci_disable_device(pdev);
3296 return pci_set_power_state(pdev, pci_choose_state(pdev, state));
3299 static int myri10ge_resume(struct pci_dev *pdev)
3301 struct myri10ge_priv *mgp;
3302 struct net_device *netdev;
3306 mgp = pci_get_drvdata(pdev);
3310 pci_set_power_state(pdev, PCI_D0); /* zeros conf space as a side effect */
3311 msleep(5); /* give card time to respond */
3312 pci_read_config_word(mgp->pdev, PCI_VENDOR_ID, &vendor);
3313 if (vendor == 0xffff) {
3314 netdev_err(mgp->dev, "device disappeared!\n");
3318 pci_restore_state(pdev);
3320 status = pci_enable_device(pdev);
3322 dev_err(&pdev->dev, "failed to enable device\n");
3326 pci_set_master(pdev);
3328 myri10ge_reset(mgp);
3329 myri10ge_dummy_rdma(mgp, 1);
3331 /* Save configuration space to be restored if the
3332 * nic resets due to a parity error */
3333 pci_save_state(pdev);
3335 if (netif_running(netdev)) {
3337 status = myri10ge_open(netdev);
3340 goto abort_with_enabled;
3343 netif_device_attach(netdev);
3348 pci_disable_device(pdev);
3352 #endif /* CONFIG_PM */
3354 static u32 myri10ge_read_reboot(struct myri10ge_priv *mgp)
3356 struct pci_dev *pdev = mgp->pdev;
3357 int vs = mgp->vendor_specific_offset;
3360 /*enter read32 mode */
3361 pci_write_config_byte(pdev, vs + 0x10, 0x3);
3363 /*read REBOOT_STATUS (0xfffffff0) */
3364 pci_write_config_dword(pdev, vs + 0x18, 0xfffffff0);
3365 pci_read_config_dword(pdev, vs + 0x14, &reboot);
3370 myri10ge_check_slice(struct myri10ge_slice_state *ss, int *reset_needed,
3371 int *busy_slice_cnt, u32 rx_pause_cnt)
3373 struct myri10ge_priv *mgp = ss->mgp;
3374 int slice = ss - mgp->ss;
3376 if (ss->tx.req != ss->tx.done &&
3377 ss->tx.done == ss->watchdog_tx_done &&
3378 ss->watchdog_tx_req != ss->watchdog_tx_done) {
3379 /* nic seems like it might be stuck.. */
3380 if (rx_pause_cnt != mgp->watchdog_pause) {
3381 if (net_ratelimit())
3382 netdev_warn(mgp->dev, "slice %d: TX paused, "
3383 "check link partner\n", slice);
3385 netdev_warn(mgp->dev,
3386 "slice %d: TX stuck %d %d %d %d %d %d\n",
3387 slice, ss->tx.queue_active, ss->tx.req,
3388 ss->tx.done, ss->tx.pkt_start,
3390 (int)ntohl(mgp->ss[slice].fw_stats->
3396 if (ss->watchdog_tx_done != ss->tx.done ||
3397 ss->watchdog_rx_done != ss->rx_done.cnt) {
3398 *busy_slice_cnt += 1;
3400 ss->watchdog_tx_done = ss->tx.done;
3401 ss->watchdog_tx_req = ss->tx.req;
3402 ss->watchdog_rx_done = ss->rx_done.cnt;
3406 * This watchdog is used to check whether the board has suffered
3407 * from a parity error and needs to be recovered.
3409 static void myri10ge_watchdog(struct work_struct *work)
3411 struct myri10ge_priv *mgp =
3412 container_of(work, struct myri10ge_priv, watchdog_work);
3413 struct myri10ge_slice_state *ss;
3414 u32 reboot, rx_pause_cnt;
3415 int status, rebooted;
3417 int reset_needed = 0;
3418 int busy_slice_cnt = 0;
3421 mgp->watchdog_resets++;
3422 pci_read_config_word(mgp->pdev, PCI_COMMAND, &cmd);
3424 if ((cmd & PCI_COMMAND_MASTER) == 0) {
3425 /* Bus master DMA disabled? Check to see
3426 * if the card rebooted due to a parity error
3427 * For now, just report it */
3428 reboot = myri10ge_read_reboot(mgp);
3429 netdev_err(mgp->dev, "NIC rebooted (0x%x),%s resetting\n",
3430 reboot, myri10ge_reset_recover ? "" : " not");
3431 if (myri10ge_reset_recover == 0)
3436 myri10ge_close(mgp->dev);
3437 myri10ge_reset_recover--;
3440 * A rebooted nic will come back with config space as
3441 * it was after power was applied to PCIe bus.
3442 * Attempt to restore config space which was saved
3443 * when the driver was loaded, or the last time the
3444 * nic was resumed from power saving mode.
3446 pci_restore_state(mgp->pdev);
3448 /* save state again for accounting reasons */
3449 pci_save_state(mgp->pdev);
3452 /* if we get back -1's from our slot, perhaps somebody
3453 * powered off our card. Don't try to reset it in
3455 if (cmd == 0xffff) {
3456 pci_read_config_word(mgp->pdev, PCI_VENDOR_ID, &vendor);
3457 if (vendor == 0xffff) {
3458 netdev_err(mgp->dev, "device disappeared!\n");
3462 /* Perhaps it is a software error. See if stuck slice
3463 * has recovered, reset if not */
3464 rx_pause_cnt = ntohl(mgp->ss[0].fw_stats->dropped_pause);
3465 for (i = 0; i < mgp->num_slices; i++) {
3468 myri10ge_check_slice(ss, &reset_needed,
3474 if (!reset_needed) {
3475 netdev_dbg(mgp->dev, "not resetting\n");
3479 netdev_err(mgp->dev, "device timeout, resetting\n");
3484 myri10ge_close(mgp->dev);
3486 status = myri10ge_load_firmware(mgp, 1);
3488 netdev_err(mgp->dev, "failed to load firmware\n");
3490 myri10ge_open(mgp->dev);
3495 * We use our own timer routine rather than relying upon
3496 * netdev->tx_timeout because we have a very large hardware transmit
3497 * queue. Due to the large queue, the netdev->tx_timeout function
3498 * cannot detect a NIC with a parity error in a timely fashion if the
3499 * NIC is lightly loaded.
3501 static void myri10ge_watchdog_timer(struct timer_list *t)
3503 struct myri10ge_priv *mgp;
3504 struct myri10ge_slice_state *ss;
3505 int i, reset_needed, busy_slice_cnt;
3509 mgp = from_timer(mgp, t, watchdog_timer);
3511 rx_pause_cnt = ntohl(mgp->ss[0].fw_stats->dropped_pause);
3513 for (i = 0, reset_needed = 0;
3514 i < mgp->num_slices && reset_needed == 0; ++i) {
3517 if (ss->rx_small.watchdog_needed) {
3518 myri10ge_alloc_rx_pages(mgp, &ss->rx_small,
3519 mgp->small_bytes + MXGEFW_PAD,
3521 if (ss->rx_small.fill_cnt - ss->rx_small.cnt >=
3522 myri10ge_fill_thresh)
3523 ss->rx_small.watchdog_needed = 0;
3525 if (ss->rx_big.watchdog_needed) {
3526 myri10ge_alloc_rx_pages(mgp, &ss->rx_big,
3528 if (ss->rx_big.fill_cnt - ss->rx_big.cnt >=
3529 myri10ge_fill_thresh)
3530 ss->rx_big.watchdog_needed = 0;
3532 myri10ge_check_slice(ss, &reset_needed, &busy_slice_cnt,
3535 /* if we've sent or received no traffic, poll the NIC to
3536 * ensure it is still there. Otherwise, we risk not noticing
3537 * an error in a timely fashion */
3538 if (busy_slice_cnt == 0) {
3539 pci_read_config_word(mgp->pdev, PCI_COMMAND, &cmd);
3540 if ((cmd & PCI_COMMAND_MASTER) == 0) {
3544 mgp->watchdog_pause = rx_pause_cnt;
3547 schedule_work(&mgp->watchdog_work);
3550 mod_timer(&mgp->watchdog_timer,
3551 jiffies + myri10ge_watchdog_timeout * HZ);
3555 static void myri10ge_free_slices(struct myri10ge_priv *mgp)
3557 struct myri10ge_slice_state *ss;
3558 struct pci_dev *pdev = mgp->pdev;
3562 if (mgp->ss == NULL)
3565 for (i = 0; i < mgp->num_slices; i++) {
3567 if (ss->rx_done.entry != NULL) {
3568 bytes = mgp->max_intr_slots *
3569 sizeof(*ss->rx_done.entry);
3570 dma_free_coherent(&pdev->dev, bytes,
3571 ss->rx_done.entry, ss->rx_done.bus);
3572 ss->rx_done.entry = NULL;
3574 if (ss->fw_stats != NULL) {
3575 bytes = sizeof(*ss->fw_stats);
3576 dma_free_coherent(&pdev->dev, bytes,
3577 ss->fw_stats, ss->fw_stats_bus);
3578 ss->fw_stats = NULL;
3580 napi_hash_del(&ss->napi);
3581 netif_napi_del(&ss->napi);
3583 /* Wait till napi structs are no longer used, and then free ss. */
3589 static int myri10ge_alloc_slices(struct myri10ge_priv *mgp)
3591 struct myri10ge_slice_state *ss;
3592 struct pci_dev *pdev = mgp->pdev;
3596 bytes = sizeof(*mgp->ss) * mgp->num_slices;
3597 mgp->ss = kzalloc(bytes, GFP_KERNEL);
3598 if (mgp->ss == NULL) {
3602 for (i = 0; i < mgp->num_slices; i++) {
3604 bytes = mgp->max_intr_slots * sizeof(*ss->rx_done.entry);
3605 ss->rx_done.entry = dma_alloc_coherent(&pdev->dev, bytes,
3608 if (ss->rx_done.entry == NULL)
3610 bytes = sizeof(*ss->fw_stats);
3611 ss->fw_stats = dma_alloc_coherent(&pdev->dev, bytes,
3614 if (ss->fw_stats == NULL)
3618 netif_napi_add(ss->dev, &ss->napi, myri10ge_poll,
3619 myri10ge_napi_weight);
3623 myri10ge_free_slices(mgp);
3628 * This function determines the number of slices supported.
3629 * The number slices is the minimum of the number of CPUS,
3630 * the number of MSI-X irqs supported, the number of slices
3631 * supported by the firmware
3633 static void myri10ge_probe_slices(struct myri10ge_priv *mgp)
3635 struct myri10ge_cmd cmd;
3636 struct pci_dev *pdev = mgp->pdev;
3639 int i, status, ncpus;
3641 mgp->num_slices = 1;
3642 ncpus = netif_get_num_default_rss_queues();
3644 if (myri10ge_max_slices == 1 || !pdev->msix_cap ||
3645 (myri10ge_max_slices == -1 && ncpus < 2))
3648 /* try to load the slice aware rss firmware */
3649 old_fw = mgp->fw_name;
3650 old_allocated = mgp->fw_name_allocated;
3651 /* don't free old_fw if we override it. */
3652 mgp->fw_name_allocated = false;
3654 if (myri10ge_fw_name != NULL) {
3655 dev_info(&mgp->pdev->dev, "overriding rss firmware to %s\n",
3657 set_fw_name(mgp, myri10ge_fw_name, false);
3658 } else if (old_fw == myri10ge_fw_aligned)
3659 set_fw_name(mgp, myri10ge_fw_rss_aligned, false);
3661 set_fw_name(mgp, myri10ge_fw_rss_unaligned, false);
3662 status = myri10ge_load_firmware(mgp, 0);
3664 dev_info(&pdev->dev, "Rss firmware not found\n");
3670 /* hit the board with a reset to ensure it is alive */
3671 memset(&cmd, 0, sizeof(cmd));
3672 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_RESET, &cmd, 0);
3674 dev_err(&mgp->pdev->dev, "failed reset\n");
3678 mgp->max_intr_slots = cmd.data0 / sizeof(struct mcp_slot);
3680 /* tell it the size of the interrupt queues */
3681 cmd.data0 = mgp->max_intr_slots * sizeof(struct mcp_slot);
3682 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_SIZE, &cmd, 0);
3684 dev_err(&mgp->pdev->dev, "failed MXGEFW_CMD_SET_INTRQ_SIZE\n");
3688 /* ask the maximum number of slices it supports */
3689 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_MAX_RSS_QUEUES, &cmd, 0);
3693 mgp->num_slices = cmd.data0;
3695 /* Only allow multiple slices if MSI-X is usable */
3696 if (!myri10ge_msi) {
3700 /* if the admin did not specify a limit to how many
3701 * slices we should use, cap it automatically to the
3702 * number of CPUs currently online */
3703 if (myri10ge_max_slices == -1)
3704 myri10ge_max_slices = ncpus;
3706 if (mgp->num_slices > myri10ge_max_slices)
3707 mgp->num_slices = myri10ge_max_slices;
3709 /* Now try to allocate as many MSI-X vectors as we have
3710 * slices. We give up on MSI-X if we can only get a single
3713 mgp->msix_vectors = kcalloc(mgp->num_slices, sizeof(*mgp->msix_vectors),
3715 if (mgp->msix_vectors == NULL)
3717 for (i = 0; i < mgp->num_slices; i++) {
3718 mgp->msix_vectors[i].entry = i;
3721 while (mgp->num_slices > 1) {
3722 mgp->num_slices = rounddown_pow_of_two(mgp->num_slices);
3723 if (mgp->num_slices == 1)
3725 status = pci_enable_msix_range(pdev,
3732 pci_disable_msix(pdev);
3734 if (status == mgp->num_slices) {
3739 mgp->num_slices = status;
3744 if (mgp->msix_vectors != NULL) {
3745 kfree(mgp->msix_vectors);
3746 mgp->msix_vectors = NULL;
3750 mgp->num_slices = 1;
3751 set_fw_name(mgp, old_fw, old_allocated);
3752 myri10ge_load_firmware(mgp, 0);
3755 static const struct net_device_ops myri10ge_netdev_ops = {
3756 .ndo_open = myri10ge_open,
3757 .ndo_stop = myri10ge_close,
3758 .ndo_start_xmit = myri10ge_xmit,
3759 .ndo_get_stats64 = myri10ge_get_stats,
3760 .ndo_validate_addr = eth_validate_addr,
3761 .ndo_change_mtu = myri10ge_change_mtu,
3762 .ndo_set_rx_mode = myri10ge_set_multicast_list,
3763 .ndo_set_mac_address = myri10ge_set_mac_address,
3766 static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3768 struct net_device *netdev;
3769 struct myri10ge_priv *mgp;
3770 struct device *dev = &pdev->dev;
3772 int status = -ENXIO;
3774 unsigned hdr_offset, ss_offset;
3775 static int board_number;
3777 netdev = alloc_etherdev_mq(sizeof(*mgp), MYRI10GE_MAX_SLICES);
3781 SET_NETDEV_DEV(netdev, &pdev->dev);
3783 mgp = netdev_priv(netdev);
3786 mgp->pause = myri10ge_flow_control;
3787 mgp->intr_coal_delay = myri10ge_intr_coal_delay;
3788 mgp->msg_enable = netif_msg_init(myri10ge_debug, MYRI10GE_MSG_DEFAULT);
3789 mgp->board_number = board_number;
3790 init_waitqueue_head(&mgp->down_wq);
3792 if (pci_enable_device(pdev)) {
3793 dev_err(&pdev->dev, "pci_enable_device call failed\n");
3795 goto abort_with_netdev;
3798 /* Find the vendor-specific cap so we can check
3799 * the reboot register later on */
3800 mgp->vendor_specific_offset
3801 = pci_find_capability(pdev, PCI_CAP_ID_VNDR);
3803 /* Set our max read request to 4KB */
3804 status = pcie_set_readrq(pdev, 4096);
3806 dev_err(&pdev->dev, "Error %d writing PCI_EXP_DEVCTL\n",
3808 goto abort_with_enabled;
3811 myri10ge_mask_surprise_down(pdev);
3812 pci_set_master(pdev);
3814 status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
3818 "64-bit pci address mask was refused, "
3820 status = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3823 dev_err(&pdev->dev, "Error %d setting DMA mask\n", status);
3824 goto abort_with_enabled;
3826 (void)pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3827 mgp->cmd = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->cmd),
3828 &mgp->cmd_bus, GFP_KERNEL);
3831 goto abort_with_enabled;
3834 mgp->board_span = pci_resource_len(pdev, 0);
3835 mgp->iomem_base = pci_resource_start(pdev, 0);
3836 mgp->wc_cookie = arch_phys_wc_add(mgp->iomem_base, mgp->board_span);
3837 mgp->sram = ioremap_wc(mgp->iomem_base, mgp->board_span);
3838 if (mgp->sram == NULL) {
3839 dev_err(&pdev->dev, "ioremap failed for %ld bytes at 0x%lx\n",
3840 mgp->board_span, mgp->iomem_base);
3842 goto abort_with_mtrr;
3845 swab32(readl(mgp->sram + MCP_HEADER_PTR_OFFSET)) & 0xffffc;
3846 ss_offset = hdr_offset + offsetof(struct mcp_gen_header, string_specs);
3847 mgp->sram_size = swab32(readl(mgp->sram + ss_offset));
3848 if (mgp->sram_size > mgp->board_span ||
3849 mgp->sram_size <= MYRI10GE_FW_OFFSET) {
3851 "invalid sram_size %dB or board span %ldB\n",
3852 mgp->sram_size, mgp->board_span);
3853 goto abort_with_ioremap;
3855 memcpy_fromio(mgp->eeprom_strings,
3856 mgp->sram + mgp->sram_size, MYRI10GE_EEPROM_STRINGS_SIZE);
3857 memset(mgp->eeprom_strings + MYRI10GE_EEPROM_STRINGS_SIZE - 2, 0, 2);
3858 status = myri10ge_read_mac_addr(mgp);
3860 goto abort_with_ioremap;
3862 for (i = 0; i < ETH_ALEN; i++)
3863 netdev->dev_addr[i] = mgp->mac_addr[i];
3865 myri10ge_select_firmware(mgp);
3867 status = myri10ge_load_firmware(mgp, 1);
3869 dev_err(&pdev->dev, "failed to load firmware\n");
3870 goto abort_with_ioremap;
3872 myri10ge_probe_slices(mgp);
3873 status = myri10ge_alloc_slices(mgp);
3875 dev_err(&pdev->dev, "failed to alloc slice state\n");
3876 goto abort_with_firmware;
3878 netif_set_real_num_tx_queues(netdev, mgp->num_slices);
3879 netif_set_real_num_rx_queues(netdev, mgp->num_slices);
3880 status = myri10ge_reset(mgp);
3882 dev_err(&pdev->dev, "failed reset\n");
3883 goto abort_with_slices;
3885 #ifdef CONFIG_MYRI10GE_DCA
3886 myri10ge_setup_dca(mgp);
3888 pci_set_drvdata(pdev, mgp);
3890 /* MTU range: 68 - 9000 */
3891 netdev->min_mtu = ETH_MIN_MTU;
3892 netdev->max_mtu = MYRI10GE_MAX_ETHER_MTU - ETH_HLEN;
3894 if (myri10ge_initial_mtu > netdev->max_mtu)
3895 myri10ge_initial_mtu = netdev->max_mtu;
3896 if (myri10ge_initial_mtu < netdev->min_mtu)
3897 myri10ge_initial_mtu = netdev->min_mtu;
3899 netdev->mtu = myri10ge_initial_mtu;
3901 netdev->netdev_ops = &myri10ge_netdev_ops;
3902 netdev->hw_features = mgp->features | NETIF_F_RXCSUM;
3904 /* fake NETIF_F_HW_VLAN_CTAG_RX for good GRO performance */
3905 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
3907 netdev->features = netdev->hw_features;
3910 netdev->features |= NETIF_F_HIGHDMA;
3912 netdev->vlan_features |= mgp->features;
3913 if (mgp->fw_ver_tiny < 37)
3914 netdev->vlan_features &= ~NETIF_F_TSO6;
3915 if (mgp->fw_ver_tiny < 32)
3916 netdev->vlan_features &= ~NETIF_F_TSO;
3918 /* make sure we can get an irq, and that MSI can be
3919 * setup (if available). */
3920 status = myri10ge_request_irq(mgp);
3922 goto abort_with_firmware;
3923 myri10ge_free_irq(mgp);
3925 /* Save configuration space to be restored if the
3926 * nic resets due to a parity error */
3927 pci_save_state(pdev);
3929 /* Setup the watchdog timer */
3930 timer_setup(&mgp->watchdog_timer, myri10ge_watchdog_timer, 0);
3932 netdev->ethtool_ops = &myri10ge_ethtool_ops;
3933 INIT_WORK(&mgp->watchdog_work, myri10ge_watchdog);
3934 status = register_netdev(netdev);
3936 dev_err(&pdev->dev, "register_netdev failed: %d\n", status);
3937 goto abort_with_state;
3939 if (mgp->msix_enabled)
3940 dev_info(dev, "%d MSI-X IRQs, tx bndry %d, fw %s, MTRR %s, WC Enabled\n",
3941 mgp->num_slices, mgp->tx_boundary, mgp->fw_name,
3942 (mgp->wc_cookie > 0 ? "Enabled" : "Disabled"));
3944 dev_info(dev, "%s IRQ %d, tx bndry %d, fw %s, MTRR %s, WC Enabled\n",
3945 mgp->msi_enabled ? "MSI" : "xPIC",
3946 pdev->irq, mgp->tx_boundary, mgp->fw_name,
3947 (mgp->wc_cookie > 0 ? "Enabled" : "Disabled"));
3953 pci_restore_state(pdev);
3956 myri10ge_free_slices(mgp);
3958 abort_with_firmware:
3959 myri10ge_dummy_rdma(mgp, 0);
3962 if (mgp->mac_addr_string != NULL)
3964 "myri10ge_probe() failed: MAC=%s, SN=%ld\n",
3965 mgp->mac_addr_string, mgp->serial_number);
3969 arch_phys_wc_del(mgp->wc_cookie);
3970 dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd),
3971 mgp->cmd, mgp->cmd_bus);
3974 pci_disable_device(pdev);
3977 set_fw_name(mgp, NULL, false);
3978 free_netdev(netdev);
3985 * Does what is necessary to shutdown one Myrinet device. Called
3986 * once for each Myrinet card by the kernel when a module is
3989 static void myri10ge_remove(struct pci_dev *pdev)
3991 struct myri10ge_priv *mgp;
3992 struct net_device *netdev;
3994 mgp = pci_get_drvdata(pdev);
3998 cancel_work_sync(&mgp->watchdog_work);
4000 unregister_netdev(netdev);
4002 #ifdef CONFIG_MYRI10GE_DCA
4003 myri10ge_teardown_dca(mgp);
4005 myri10ge_dummy_rdma(mgp, 0);
4007 /* avoid a memory leak */
4008 pci_restore_state(pdev);
4011 arch_phys_wc_del(mgp->wc_cookie);
4012 myri10ge_free_slices(mgp);
4013 kfree(mgp->msix_vectors);
4014 dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd),
4015 mgp->cmd, mgp->cmd_bus);
4017 set_fw_name(mgp, NULL, false);
4018 free_netdev(netdev);
4019 pci_disable_device(pdev);
4022 #define PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E 0x0008
4023 #define PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E_9 0x0009
4025 static const struct pci_device_id myri10ge_pci_tbl[] = {
4026 {PCI_DEVICE(PCI_VENDOR_ID_MYRICOM, PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E)},
4028 (PCI_VENDOR_ID_MYRICOM, PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E_9)},
4032 MODULE_DEVICE_TABLE(pci, myri10ge_pci_tbl);
4034 static struct pci_driver myri10ge_driver = {
4036 .probe = myri10ge_probe,
4037 .remove = myri10ge_remove,
4038 .id_table = myri10ge_pci_tbl,
4040 .suspend = myri10ge_suspend,
4041 .resume = myri10ge_resume,
4045 #ifdef CONFIG_MYRI10GE_DCA
4047 myri10ge_notify_dca(struct notifier_block *nb, unsigned long event, void *p)
4049 int err = driver_for_each_device(&myri10ge_driver.driver,
4051 myri10ge_notify_dca_device);
4058 static struct notifier_block myri10ge_dca_notifier = {
4059 .notifier_call = myri10ge_notify_dca,
4063 #endif /* CONFIG_MYRI10GE_DCA */
4065 static __init int myri10ge_init_module(void)
4067 pr_info("Version %s\n", MYRI10GE_VERSION_STR);
4069 if (myri10ge_rss_hash > MXGEFW_RSS_HASH_TYPE_MAX) {
4070 pr_err("Illegal rssh hash type %d, defaulting to source port\n",
4072 myri10ge_rss_hash = MXGEFW_RSS_HASH_TYPE_SRC_PORT;
4074 #ifdef CONFIG_MYRI10GE_DCA
4075 dca_register_notify(&myri10ge_dca_notifier);
4077 if (myri10ge_max_slices > MYRI10GE_MAX_SLICES)
4078 myri10ge_max_slices = MYRI10GE_MAX_SLICES;
4080 return pci_register_driver(&myri10ge_driver);
4083 module_init(myri10ge_init_module);
4085 static __exit void myri10ge_cleanup_module(void)
4087 #ifdef CONFIG_MYRI10GE_DCA
4088 dca_unregister_notify(&myri10ge_dca_notifier);
4090 pci_unregister_driver(&myri10ge_driver);
4093 module_exit(myri10ge_cleanup_module);