]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/net/ethernet/qlogic/qlge/qlge_main.c
Merge branch 'next-tpm' of git://git.kernel.org/pub/scm/linux/kernel/git/jmorris...
[linux.git] / drivers / net / ethernet / qlogic / qlge / qlge_main.c
1 /*
2  * QLogic qlge NIC HBA Driver
3  * Copyright (c)  2003-2008 QLogic Corporation
4  * See LICENSE.qlge for copyright and licensing details.
5  * Author:     Linux qlge network device driver by
6  *                      Ron Mercer <ron.mercer@qlogic.com>
7  */
8 #include <linux/kernel.h>
9 #include <linux/bitops.h>
10 #include <linux/types.h>
11 #include <linux/module.h>
12 #include <linux/list.h>
13 #include <linux/pci.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/pagemap.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/dmapool.h>
19 #include <linux/mempool.h>
20 #include <linux/spinlock.h>
21 #include <linux/kthread.h>
22 #include <linux/interrupt.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/in.h>
26 #include <linux/ip.h>
27 #include <linux/ipv6.h>
28 #include <net/ipv6.h>
29 #include <linux/tcp.h>
30 #include <linux/udp.h>
31 #include <linux/if_arp.h>
32 #include <linux/if_ether.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/ethtool.h>
36 #include <linux/if_vlan.h>
37 #include <linux/skbuff.h>
38 #include <linux/delay.h>
39 #include <linux/mm.h>
40 #include <linux/vmalloc.h>
41 #include <linux/prefetch.h>
42 #include <net/ip6_checksum.h>
43
44 #include "qlge.h"
45
46 char qlge_driver_name[] = DRV_NAME;
47 const char qlge_driver_version[] = DRV_VERSION;
48
49 MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
50 MODULE_DESCRIPTION(DRV_STRING " ");
51 MODULE_LICENSE("GPL");
52 MODULE_VERSION(DRV_VERSION);
53
54 static const u32 default_msg =
55     NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
56 /* NETIF_MSG_TIMER |    */
57     NETIF_MSG_IFDOWN |
58     NETIF_MSG_IFUP |
59     NETIF_MSG_RX_ERR |
60     NETIF_MSG_TX_ERR |
61 /*  NETIF_MSG_TX_QUEUED | */
62 /*  NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
63 /* NETIF_MSG_PKTDATA | */
64     NETIF_MSG_HW | NETIF_MSG_WOL | 0;
65
66 static int debug = -1;  /* defaults above */
67 module_param(debug, int, 0664);
68 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
69
70 #define MSIX_IRQ 0
71 #define MSI_IRQ 1
72 #define LEG_IRQ 2
73 static int qlge_irq_type = MSIX_IRQ;
74 module_param(qlge_irq_type, int, 0664);
75 MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
76
77 static int qlge_mpi_coredump;
78 module_param(qlge_mpi_coredump, int, 0);
79 MODULE_PARM_DESC(qlge_mpi_coredump,
80                 "Option to enable MPI firmware dump. "
81                 "Default is OFF - Do Not allocate memory. ");
82
83 static int qlge_force_coredump;
84 module_param(qlge_force_coredump, int, 0);
85 MODULE_PARM_DESC(qlge_force_coredump,
86                 "Option to allow force of firmware core dump. "
87                 "Default is OFF - Do not allow.");
88
89 static const struct pci_device_id qlge_pci_tbl[] = {
90         {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
91         {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
92         /* required last entry */
93         {0,}
94 };
95
96 MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
97
98 static int ql_wol(struct ql_adapter *);
99 static void qlge_set_multicast_list(struct net_device *);
100 static int ql_adapter_down(struct ql_adapter *);
101 static int ql_adapter_up(struct ql_adapter *);
102
103 /* This hardware semaphore causes exclusive access to
104  * resources shared between the NIC driver, MPI firmware,
105  * FCOE firmware and the FC driver.
106  */
107 static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
108 {
109         u32 sem_bits = 0;
110
111         switch (sem_mask) {
112         case SEM_XGMAC0_MASK:
113                 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
114                 break;
115         case SEM_XGMAC1_MASK:
116                 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
117                 break;
118         case SEM_ICB_MASK:
119                 sem_bits = SEM_SET << SEM_ICB_SHIFT;
120                 break;
121         case SEM_MAC_ADDR_MASK:
122                 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
123                 break;
124         case SEM_FLASH_MASK:
125                 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
126                 break;
127         case SEM_PROBE_MASK:
128                 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
129                 break;
130         case SEM_RT_IDX_MASK:
131                 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
132                 break;
133         case SEM_PROC_REG_MASK:
134                 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
135                 break;
136         default:
137                 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
138                 return -EINVAL;
139         }
140
141         ql_write32(qdev, SEM, sem_bits | sem_mask);
142         return !(ql_read32(qdev, SEM) & sem_bits);
143 }
144
145 int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
146 {
147         unsigned int wait_count = 30;
148         do {
149                 if (!ql_sem_trylock(qdev, sem_mask))
150                         return 0;
151                 udelay(100);
152         } while (--wait_count);
153         return -ETIMEDOUT;
154 }
155
156 void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
157 {
158         ql_write32(qdev, SEM, sem_mask);
159         ql_read32(qdev, SEM);   /* flush */
160 }
161
162 /* This function waits for a specific bit to come ready
163  * in a given register.  It is used mostly by the initialize
164  * process, but is also used in kernel thread API such as
165  * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
166  */
167 int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
168 {
169         u32 temp;
170         int count = UDELAY_COUNT;
171
172         while (count) {
173                 temp = ql_read32(qdev, reg);
174
175                 /* check for errors */
176                 if (temp & err_bit) {
177                         netif_alert(qdev, probe, qdev->ndev,
178                                     "register 0x%.08x access error, value = 0x%.08x!.\n",
179                                     reg, temp);
180                         return -EIO;
181                 } else if (temp & bit)
182                         return 0;
183                 udelay(UDELAY_DELAY);
184                 count--;
185         }
186         netif_alert(qdev, probe, qdev->ndev,
187                     "Timed out waiting for reg %x to come ready.\n", reg);
188         return -ETIMEDOUT;
189 }
190
191 /* The CFG register is used to download TX and RX control blocks
192  * to the chip. This function waits for an operation to complete.
193  */
194 static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
195 {
196         int count = UDELAY_COUNT;
197         u32 temp;
198
199         while (count) {
200                 temp = ql_read32(qdev, CFG);
201                 if (temp & CFG_LE)
202                         return -EIO;
203                 if (!(temp & bit))
204                         return 0;
205                 udelay(UDELAY_DELAY);
206                 count--;
207         }
208         return -ETIMEDOUT;
209 }
210
211
212 /* Used to issue init control blocks to hw. Maps control block,
213  * sets address, triggers download, waits for completion.
214  */
215 int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
216                  u16 q_id)
217 {
218         u64 map;
219         int status = 0;
220         int direction;
221         u32 mask;
222         u32 value;
223
224         direction =
225             (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
226             PCI_DMA_FROMDEVICE;
227
228         map = pci_map_single(qdev->pdev, ptr, size, direction);
229         if (pci_dma_mapping_error(qdev->pdev, map)) {
230                 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
231                 return -ENOMEM;
232         }
233
234         status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
235         if (status)
236                 return status;
237
238         status = ql_wait_cfg(qdev, bit);
239         if (status) {
240                 netif_err(qdev, ifup, qdev->ndev,
241                           "Timed out waiting for CFG to come ready.\n");
242                 goto exit;
243         }
244
245         ql_write32(qdev, ICB_L, (u32) map);
246         ql_write32(qdev, ICB_H, (u32) (map >> 32));
247
248         mask = CFG_Q_MASK | (bit << 16);
249         value = bit | (q_id << CFG_Q_SHIFT);
250         ql_write32(qdev, CFG, (mask | value));
251
252         /*
253          * Wait for the bit to clear after signaling hw.
254          */
255         status = ql_wait_cfg(qdev, bit);
256 exit:
257         ql_sem_unlock(qdev, SEM_ICB_MASK);      /* does flush too */
258         pci_unmap_single(qdev->pdev, map, size, direction);
259         return status;
260 }
261
262 /* Get a specific MAC address from the CAM.  Used for debug and reg dump. */
263 int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
264                         u32 *value)
265 {
266         u32 offset = 0;
267         int status;
268
269         switch (type) {
270         case MAC_ADDR_TYPE_MULTI_MAC:
271         case MAC_ADDR_TYPE_CAM_MAC:
272                 {
273                         status =
274                             ql_wait_reg_rdy(qdev,
275                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
276                         if (status)
277                                 goto exit;
278                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
279                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
280                                    MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
281                         status =
282                             ql_wait_reg_rdy(qdev,
283                                 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
284                         if (status)
285                                 goto exit;
286                         *value++ = ql_read32(qdev, MAC_ADDR_DATA);
287                         status =
288                             ql_wait_reg_rdy(qdev,
289                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
290                         if (status)
291                                 goto exit;
292                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
293                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
294                                    MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
295                         status =
296                             ql_wait_reg_rdy(qdev,
297                                 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
298                         if (status)
299                                 goto exit;
300                         *value++ = ql_read32(qdev, MAC_ADDR_DATA);
301                         if (type == MAC_ADDR_TYPE_CAM_MAC) {
302                                 status =
303                                     ql_wait_reg_rdy(qdev,
304                                         MAC_ADDR_IDX, MAC_ADDR_MW, 0);
305                                 if (status)
306                                         goto exit;
307                                 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
308                                            (index << MAC_ADDR_IDX_SHIFT) | /* index */
309                                            MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
310                                 status =
311                                     ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
312                                                     MAC_ADDR_MR, 0);
313                                 if (status)
314                                         goto exit;
315                                 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
316                         }
317                         break;
318                 }
319         case MAC_ADDR_TYPE_VLAN:
320         case MAC_ADDR_TYPE_MULTI_FLTR:
321         default:
322                 netif_crit(qdev, ifup, qdev->ndev,
323                            "Address type %d not yet supported.\n", type);
324                 status = -EPERM;
325         }
326 exit:
327         return status;
328 }
329
330 /* Set up a MAC, multicast or VLAN address for the
331  * inbound frame matching.
332  */
333 static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
334                                u16 index)
335 {
336         u32 offset = 0;
337         int status = 0;
338
339         switch (type) {
340         case MAC_ADDR_TYPE_MULTI_MAC:
341                 {
342                         u32 upper = (addr[0] << 8) | addr[1];
343                         u32 lower = (addr[2] << 24) | (addr[3] << 16) |
344                                         (addr[4] << 8) | (addr[5]);
345
346                         status =
347                                 ql_wait_reg_rdy(qdev,
348                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
349                         if (status)
350                                 goto exit;
351                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
352                                 (index << MAC_ADDR_IDX_SHIFT) |
353                                 type | MAC_ADDR_E);
354                         ql_write32(qdev, MAC_ADDR_DATA, lower);
355                         status =
356                                 ql_wait_reg_rdy(qdev,
357                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
358                         if (status)
359                                 goto exit;
360                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
361                                 (index << MAC_ADDR_IDX_SHIFT) |
362                                 type | MAC_ADDR_E);
363
364                         ql_write32(qdev, MAC_ADDR_DATA, upper);
365                         status =
366                                 ql_wait_reg_rdy(qdev,
367                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
368                         if (status)
369                                 goto exit;
370                         break;
371                 }
372         case MAC_ADDR_TYPE_CAM_MAC:
373                 {
374                         u32 cam_output;
375                         u32 upper = (addr[0] << 8) | addr[1];
376                         u32 lower =
377                             (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
378                             (addr[5]);
379                         status =
380                             ql_wait_reg_rdy(qdev,
381                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
382                         if (status)
383                                 goto exit;
384                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
385                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
386                                    type);       /* type */
387                         ql_write32(qdev, MAC_ADDR_DATA, lower);
388                         status =
389                             ql_wait_reg_rdy(qdev,
390                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
391                         if (status)
392                                 goto exit;
393                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
394                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
395                                    type);       /* type */
396                         ql_write32(qdev, MAC_ADDR_DATA, upper);
397                         status =
398                             ql_wait_reg_rdy(qdev,
399                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
400                         if (status)
401                                 goto exit;
402                         ql_write32(qdev, MAC_ADDR_IDX, (offset) |       /* offset */
403                                    (index << MAC_ADDR_IDX_SHIFT) |      /* index */
404                                    type);       /* type */
405                         /* This field should also include the queue id
406                            and possibly the function id.  Right now we hardcode
407                            the route field to NIC core.
408                          */
409                         cam_output = (CAM_OUT_ROUTE_NIC |
410                                       (qdev->
411                                        func << CAM_OUT_FUNC_SHIFT) |
412                                         (0 << CAM_OUT_CQ_ID_SHIFT));
413                         if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
414                                 cam_output |= CAM_OUT_RV;
415                         /* route to NIC core */
416                         ql_write32(qdev, MAC_ADDR_DATA, cam_output);
417                         break;
418                 }
419         case MAC_ADDR_TYPE_VLAN:
420                 {
421                         u32 enable_bit = *((u32 *) &addr[0]);
422                         /* For VLAN, the addr actually holds a bit that
423                          * either enables or disables the vlan id we are
424                          * addressing. It's either MAC_ADDR_E on or off.
425                          * That's bit-27 we're talking about.
426                          */
427                         status =
428                             ql_wait_reg_rdy(qdev,
429                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
430                         if (status)
431                                 goto exit;
432                         ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
433                                    (index << MAC_ADDR_IDX_SHIFT) |      /* index */
434                                    type |       /* type */
435                                    enable_bit); /* enable/disable */
436                         break;
437                 }
438         case MAC_ADDR_TYPE_MULTI_FLTR:
439         default:
440                 netif_crit(qdev, ifup, qdev->ndev,
441                            "Address type %d not yet supported.\n", type);
442                 status = -EPERM;
443         }
444 exit:
445         return status;
446 }
447
448 /* Set or clear MAC address in hardware. We sometimes
449  * have to clear it to prevent wrong frame routing
450  * especially in a bonding environment.
451  */
452 static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
453 {
454         int status;
455         char zero_mac_addr[ETH_ALEN];
456         char *addr;
457
458         if (set) {
459                 addr = &qdev->current_mac_addr[0];
460                 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
461                              "Set Mac addr %pM\n", addr);
462         } else {
463                 eth_zero_addr(zero_mac_addr);
464                 addr = &zero_mac_addr[0];
465                 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
466                              "Clearing MAC address\n");
467         }
468         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
469         if (status)
470                 return status;
471         status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
472                         MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
473         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
474         if (status)
475                 netif_err(qdev, ifup, qdev->ndev,
476                           "Failed to init mac address.\n");
477         return status;
478 }
479
480 void ql_link_on(struct ql_adapter *qdev)
481 {
482         netif_err(qdev, link, qdev->ndev, "Link is up.\n");
483         netif_carrier_on(qdev->ndev);
484         ql_set_mac_addr(qdev, 1);
485 }
486
487 void ql_link_off(struct ql_adapter *qdev)
488 {
489         netif_err(qdev, link, qdev->ndev, "Link is down.\n");
490         netif_carrier_off(qdev->ndev);
491         ql_set_mac_addr(qdev, 0);
492 }
493
494 /* Get a specific frame routing value from the CAM.
495  * Used for debug and reg dump.
496  */
497 int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
498 {
499         int status = 0;
500
501         status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
502         if (status)
503                 goto exit;
504
505         ql_write32(qdev, RT_IDX,
506                    RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
507         status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
508         if (status)
509                 goto exit;
510         *value = ql_read32(qdev, RT_DATA);
511 exit:
512         return status;
513 }
514
515 /* The NIC function for this chip has 16 routing indexes.  Each one can be used
516  * to route different frame types to various inbound queues.  We send broadcast/
517  * multicast/error frames to the default queue for slow handling,
518  * and CAM hit/RSS frames to the fast handling queues.
519  */
520 static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
521                               int enable)
522 {
523         int status = -EINVAL; /* Return error if no mask match. */
524         u32 value = 0;
525
526         switch (mask) {
527         case RT_IDX_CAM_HIT:
528                 {
529                         value = RT_IDX_DST_CAM_Q |      /* dest */
530                             RT_IDX_TYPE_NICQ |  /* type */
531                             (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
532                         break;
533                 }
534         case RT_IDX_VALID:      /* Promiscuous Mode frames. */
535                 {
536                         value = RT_IDX_DST_DFLT_Q |     /* dest */
537                             RT_IDX_TYPE_NICQ |  /* type */
538                             (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
539                         break;
540                 }
541         case RT_IDX_ERR:        /* Pass up MAC,IP,TCP/UDP error frames. */
542                 {
543                         value = RT_IDX_DST_DFLT_Q |     /* dest */
544                             RT_IDX_TYPE_NICQ |  /* type */
545                             (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
546                         break;
547                 }
548         case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
549                 {
550                         value = RT_IDX_DST_DFLT_Q | /* dest */
551                                 RT_IDX_TYPE_NICQ | /* type */
552                                 (RT_IDX_IP_CSUM_ERR_SLOT <<
553                                 RT_IDX_IDX_SHIFT); /* index */
554                         break;
555                 }
556         case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
557                 {
558                         value = RT_IDX_DST_DFLT_Q | /* dest */
559                                 RT_IDX_TYPE_NICQ | /* type */
560                                 (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
561                                 RT_IDX_IDX_SHIFT); /* index */
562                         break;
563                 }
564         case RT_IDX_BCAST:      /* Pass up Broadcast frames to default Q. */
565                 {
566                         value = RT_IDX_DST_DFLT_Q |     /* dest */
567                             RT_IDX_TYPE_NICQ |  /* type */
568                             (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
569                         break;
570                 }
571         case RT_IDX_MCAST:      /* Pass up All Multicast frames. */
572                 {
573                         value = RT_IDX_DST_DFLT_Q |     /* dest */
574                             RT_IDX_TYPE_NICQ |  /* type */
575                             (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
576                         break;
577                 }
578         case RT_IDX_MCAST_MATCH:        /* Pass up matched Multicast frames. */
579                 {
580                         value = RT_IDX_DST_DFLT_Q |     /* dest */
581                             RT_IDX_TYPE_NICQ |  /* type */
582                             (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
583                         break;
584                 }
585         case RT_IDX_RSS_MATCH:  /* Pass up matched RSS frames. */
586                 {
587                         value = RT_IDX_DST_RSS |        /* dest */
588                             RT_IDX_TYPE_NICQ |  /* type */
589                             (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
590                         break;
591                 }
592         case 0:         /* Clear the E-bit on an entry. */
593                 {
594                         value = RT_IDX_DST_DFLT_Q |     /* dest */
595                             RT_IDX_TYPE_NICQ |  /* type */
596                             (index << RT_IDX_IDX_SHIFT);/* index */
597                         break;
598                 }
599         default:
600                 netif_err(qdev, ifup, qdev->ndev,
601                           "Mask type %d not yet supported.\n", mask);
602                 status = -EPERM;
603                 goto exit;
604         }
605
606         if (value) {
607                 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
608                 if (status)
609                         goto exit;
610                 value |= (enable ? RT_IDX_E : 0);
611                 ql_write32(qdev, RT_IDX, value);
612                 ql_write32(qdev, RT_DATA, enable ? mask : 0);
613         }
614 exit:
615         return status;
616 }
617
618 static void ql_enable_interrupts(struct ql_adapter *qdev)
619 {
620         ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
621 }
622
623 static void ql_disable_interrupts(struct ql_adapter *qdev)
624 {
625         ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
626 }
627
628 /* If we're running with multiple MSI-X vectors then we enable on the fly.
629  * Otherwise, we may have multiple outstanding workers and don't want to
630  * enable until the last one finishes. In this case, the irq_cnt gets
631  * incremented every time we queue a worker and decremented every time
632  * a worker finishes.  Once it hits zero we enable the interrupt.
633  */
634 u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
635 {
636         u32 var = 0;
637         unsigned long hw_flags = 0;
638         struct intr_context *ctx = qdev->intr_context + intr;
639
640         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
641                 /* Always enable if we're MSIX multi interrupts and
642                  * it's not the default (zeroeth) interrupt.
643                  */
644                 ql_write32(qdev, INTR_EN,
645                            ctx->intr_en_mask);
646                 var = ql_read32(qdev, STS);
647                 return var;
648         }
649
650         spin_lock_irqsave(&qdev->hw_lock, hw_flags);
651         if (atomic_dec_and_test(&ctx->irq_cnt)) {
652                 ql_write32(qdev, INTR_EN,
653                            ctx->intr_en_mask);
654                 var = ql_read32(qdev, STS);
655         }
656         spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
657         return var;
658 }
659
660 static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
661 {
662         u32 var = 0;
663         struct intr_context *ctx;
664
665         /* HW disables for us if we're MSIX multi interrupts and
666          * it's not the default (zeroeth) interrupt.
667          */
668         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
669                 return 0;
670
671         ctx = qdev->intr_context + intr;
672         spin_lock(&qdev->hw_lock);
673         if (!atomic_read(&ctx->irq_cnt)) {
674                 ql_write32(qdev, INTR_EN,
675                 ctx->intr_dis_mask);
676                 var = ql_read32(qdev, STS);
677         }
678         atomic_inc(&ctx->irq_cnt);
679         spin_unlock(&qdev->hw_lock);
680         return var;
681 }
682
683 static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
684 {
685         int i;
686         for (i = 0; i < qdev->intr_count; i++) {
687                 /* The enable call does a atomic_dec_and_test
688                  * and enables only if the result is zero.
689                  * So we precharge it here.
690                  */
691                 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
692                         i == 0))
693                         atomic_set(&qdev->intr_context[i].irq_cnt, 1);
694                 ql_enable_completion_interrupt(qdev, i);
695         }
696
697 }
698
699 static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
700 {
701         int status, i;
702         u16 csum = 0;
703         __le16 *flash = (__le16 *)&qdev->flash;
704
705         status = strncmp((char *)&qdev->flash, str, 4);
706         if (status) {
707                 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
708                 return  status;
709         }
710
711         for (i = 0; i < size; i++)
712                 csum += le16_to_cpu(*flash++);
713
714         if (csum)
715                 netif_err(qdev, ifup, qdev->ndev,
716                           "Invalid flash checksum, csum = 0x%.04x.\n", csum);
717
718         return csum;
719 }
720
721 static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
722 {
723         int status = 0;
724         /* wait for reg to come ready */
725         status = ql_wait_reg_rdy(qdev,
726                         FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
727         if (status)
728                 goto exit;
729         /* set up for reg read */
730         ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
731         /* wait for reg to come ready */
732         status = ql_wait_reg_rdy(qdev,
733                         FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
734         if (status)
735                 goto exit;
736          /* This data is stored on flash as an array of
737          * __le32.  Since ql_read32() returns cpu endian
738          * we need to swap it back.
739          */
740         *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
741 exit:
742         return status;
743 }
744
745 static int ql_get_8000_flash_params(struct ql_adapter *qdev)
746 {
747         u32 i, size;
748         int status;
749         __le32 *p = (__le32 *)&qdev->flash;
750         u32 offset;
751         u8 mac_addr[6];
752
753         /* Get flash offset for function and adjust
754          * for dword access.
755          */
756         if (!qdev->port)
757                 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
758         else
759                 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
760
761         if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
762                 return -ETIMEDOUT;
763
764         size = sizeof(struct flash_params_8000) / sizeof(u32);
765         for (i = 0; i < size; i++, p++) {
766                 status = ql_read_flash_word(qdev, i+offset, p);
767                 if (status) {
768                         netif_err(qdev, ifup, qdev->ndev,
769                                   "Error reading flash.\n");
770                         goto exit;
771                 }
772         }
773
774         status = ql_validate_flash(qdev,
775                         sizeof(struct flash_params_8000) / sizeof(u16),
776                         "8000");
777         if (status) {
778                 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
779                 status = -EINVAL;
780                 goto exit;
781         }
782
783         /* Extract either manufacturer or BOFM modified
784          * MAC address.
785          */
786         if (qdev->flash.flash_params_8000.data_type1 == 2)
787                 memcpy(mac_addr,
788                         qdev->flash.flash_params_8000.mac_addr1,
789                         qdev->ndev->addr_len);
790         else
791                 memcpy(mac_addr,
792                         qdev->flash.flash_params_8000.mac_addr,
793                         qdev->ndev->addr_len);
794
795         if (!is_valid_ether_addr(mac_addr)) {
796                 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
797                 status = -EINVAL;
798                 goto exit;
799         }
800
801         memcpy(qdev->ndev->dev_addr,
802                 mac_addr,
803                 qdev->ndev->addr_len);
804
805 exit:
806         ql_sem_unlock(qdev, SEM_FLASH_MASK);
807         return status;
808 }
809
810 static int ql_get_8012_flash_params(struct ql_adapter *qdev)
811 {
812         int i;
813         int status;
814         __le32 *p = (__le32 *)&qdev->flash;
815         u32 offset = 0;
816         u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
817
818         /* Second function's parameters follow the first
819          * function's.
820          */
821         if (qdev->port)
822                 offset = size;
823
824         if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
825                 return -ETIMEDOUT;
826
827         for (i = 0; i < size; i++, p++) {
828                 status = ql_read_flash_word(qdev, i+offset, p);
829                 if (status) {
830                         netif_err(qdev, ifup, qdev->ndev,
831                                   "Error reading flash.\n");
832                         goto exit;
833                 }
834
835         }
836
837         status = ql_validate_flash(qdev,
838                         sizeof(struct flash_params_8012) / sizeof(u16),
839                         "8012");
840         if (status) {
841                 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
842                 status = -EINVAL;
843                 goto exit;
844         }
845
846         if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
847                 status = -EINVAL;
848                 goto exit;
849         }
850
851         memcpy(qdev->ndev->dev_addr,
852                 qdev->flash.flash_params_8012.mac_addr,
853                 qdev->ndev->addr_len);
854
855 exit:
856         ql_sem_unlock(qdev, SEM_FLASH_MASK);
857         return status;
858 }
859
860 /* xgmac register are located behind the xgmac_addr and xgmac_data
861  * register pair.  Each read/write requires us to wait for the ready
862  * bit before reading/writing the data.
863  */
864 static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
865 {
866         int status;
867         /* wait for reg to come ready */
868         status = ql_wait_reg_rdy(qdev,
869                         XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
870         if (status)
871                 return status;
872         /* write the data to the data reg */
873         ql_write32(qdev, XGMAC_DATA, data);
874         /* trigger the write */
875         ql_write32(qdev, XGMAC_ADDR, reg);
876         return status;
877 }
878
879 /* xgmac register are located behind the xgmac_addr and xgmac_data
880  * register pair.  Each read/write requires us to wait for the ready
881  * bit before reading/writing the data.
882  */
883 int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
884 {
885         int status = 0;
886         /* wait for reg to come ready */
887         status = ql_wait_reg_rdy(qdev,
888                         XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
889         if (status)
890                 goto exit;
891         /* set up for reg read */
892         ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
893         /* wait for reg to come ready */
894         status = ql_wait_reg_rdy(qdev,
895                         XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
896         if (status)
897                 goto exit;
898         /* get the data */
899         *data = ql_read32(qdev, XGMAC_DATA);
900 exit:
901         return status;
902 }
903
904 /* This is used for reading the 64-bit statistics regs. */
905 int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
906 {
907         int status = 0;
908         u32 hi = 0;
909         u32 lo = 0;
910
911         status = ql_read_xgmac_reg(qdev, reg, &lo);
912         if (status)
913                 goto exit;
914
915         status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
916         if (status)
917                 goto exit;
918
919         *data = (u64) lo | ((u64) hi << 32);
920
921 exit:
922         return status;
923 }
924
925 static int ql_8000_port_initialize(struct ql_adapter *qdev)
926 {
927         int status;
928         /*
929          * Get MPI firmware version for driver banner
930          * and ethool info.
931          */
932         status = ql_mb_about_fw(qdev);
933         if (status)
934                 goto exit;
935         status = ql_mb_get_fw_state(qdev);
936         if (status)
937                 goto exit;
938         /* Wake up a worker to get/set the TX/RX frame sizes. */
939         queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
940 exit:
941         return status;
942 }
943
944 /* Take the MAC Core out of reset.
945  * Enable statistics counting.
946  * Take the transmitter/receiver out of reset.
947  * This functionality may be done in the MPI firmware at a
948  * later date.
949  */
950 static int ql_8012_port_initialize(struct ql_adapter *qdev)
951 {
952         int status = 0;
953         u32 data;
954
955         if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
956                 /* Another function has the semaphore, so
957                  * wait for the port init bit to come ready.
958                  */
959                 netif_info(qdev, link, qdev->ndev,
960                            "Another function has the semaphore, so wait for the port init bit to come ready.\n");
961                 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
962                 if (status) {
963                         netif_crit(qdev, link, qdev->ndev,
964                                    "Port initialize timed out.\n");
965                 }
966                 return status;
967         }
968
969         netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
970         /* Set the core reset. */
971         status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
972         if (status)
973                 goto end;
974         data |= GLOBAL_CFG_RESET;
975         status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
976         if (status)
977                 goto end;
978
979         /* Clear the core reset and turn on jumbo for receiver. */
980         data &= ~GLOBAL_CFG_RESET;      /* Clear core reset. */
981         data |= GLOBAL_CFG_JUMBO;       /* Turn on jumbo. */
982         data |= GLOBAL_CFG_TX_STAT_EN;
983         data |= GLOBAL_CFG_RX_STAT_EN;
984         status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
985         if (status)
986                 goto end;
987
988         /* Enable transmitter, and clear it's reset. */
989         status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
990         if (status)
991                 goto end;
992         data &= ~TX_CFG_RESET;  /* Clear the TX MAC reset. */
993         data |= TX_CFG_EN;      /* Enable the transmitter. */
994         status = ql_write_xgmac_reg(qdev, TX_CFG, data);
995         if (status)
996                 goto end;
997
998         /* Enable receiver and clear it's reset. */
999         status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
1000         if (status)
1001                 goto end;
1002         data &= ~RX_CFG_RESET;  /* Clear the RX MAC reset. */
1003         data |= RX_CFG_EN;      /* Enable the receiver. */
1004         status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1005         if (status)
1006                 goto end;
1007
1008         /* Turn on jumbo. */
1009         status =
1010             ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1011         if (status)
1012                 goto end;
1013         status =
1014             ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1015         if (status)
1016                 goto end;
1017
1018         /* Signal to the world that the port is enabled.        */
1019         ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1020 end:
1021         ql_sem_unlock(qdev, qdev->xg_sem_mask);
1022         return status;
1023 }
1024
1025 static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1026 {
1027         return PAGE_SIZE << qdev->lbq_buf_order;
1028 }
1029
1030 /* Get the next large buffer. */
1031 static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
1032 {
1033         struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1034         rx_ring->lbq_curr_idx++;
1035         if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1036                 rx_ring->lbq_curr_idx = 0;
1037         rx_ring->lbq_free_cnt++;
1038         return lbq_desc;
1039 }
1040
1041 static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1042                 struct rx_ring *rx_ring)
1043 {
1044         struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1045
1046         pci_dma_sync_single_for_cpu(qdev->pdev,
1047                                         dma_unmap_addr(lbq_desc, mapaddr),
1048                                     rx_ring->lbq_buf_size,
1049                                         PCI_DMA_FROMDEVICE);
1050
1051         /* If it's the last chunk of our master page then
1052          * we unmap it.
1053          */
1054         if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1055                                         == ql_lbq_block_size(qdev))
1056                 pci_unmap_page(qdev->pdev,
1057                                 lbq_desc->p.pg_chunk.map,
1058                                 ql_lbq_block_size(qdev),
1059                                 PCI_DMA_FROMDEVICE);
1060         return lbq_desc;
1061 }
1062
1063 /* Get the next small buffer. */
1064 static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
1065 {
1066         struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1067         rx_ring->sbq_curr_idx++;
1068         if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1069                 rx_ring->sbq_curr_idx = 0;
1070         rx_ring->sbq_free_cnt++;
1071         return sbq_desc;
1072 }
1073
1074 /* Update an rx ring index. */
1075 static void ql_update_cq(struct rx_ring *rx_ring)
1076 {
1077         rx_ring->cnsmr_idx++;
1078         rx_ring->curr_entry++;
1079         if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1080                 rx_ring->cnsmr_idx = 0;
1081                 rx_ring->curr_entry = rx_ring->cq_base;
1082         }
1083 }
1084
1085 static void ql_write_cq_idx(struct rx_ring *rx_ring)
1086 {
1087         ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1088 }
1089
1090 static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1091                                                 struct bq_desc *lbq_desc)
1092 {
1093         if (!rx_ring->pg_chunk.page) {
1094                 u64 map;
1095                 rx_ring->pg_chunk.page = alloc_pages(__GFP_COMP | GFP_ATOMIC,
1096                                                 qdev->lbq_buf_order);
1097                 if (unlikely(!rx_ring->pg_chunk.page)) {
1098                         netif_err(qdev, drv, qdev->ndev,
1099                                   "page allocation failed.\n");
1100                         return -ENOMEM;
1101                 }
1102                 rx_ring->pg_chunk.offset = 0;
1103                 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1104                                         0, ql_lbq_block_size(qdev),
1105                                         PCI_DMA_FROMDEVICE);
1106                 if (pci_dma_mapping_error(qdev->pdev, map)) {
1107                         __free_pages(rx_ring->pg_chunk.page,
1108                                         qdev->lbq_buf_order);
1109                         rx_ring->pg_chunk.page = NULL;
1110                         netif_err(qdev, drv, qdev->ndev,
1111                                   "PCI mapping failed.\n");
1112                         return -ENOMEM;
1113                 }
1114                 rx_ring->pg_chunk.map = map;
1115                 rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1116         }
1117
1118         /* Copy the current master pg_chunk info
1119          * to the current descriptor.
1120          */
1121         lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1122
1123         /* Adjust the master page chunk for next
1124          * buffer get.
1125          */
1126         rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1127         if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1128                 rx_ring->pg_chunk.page = NULL;
1129                 lbq_desc->p.pg_chunk.last_flag = 1;
1130         } else {
1131                 rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1132                 get_page(rx_ring->pg_chunk.page);
1133                 lbq_desc->p.pg_chunk.last_flag = 0;
1134         }
1135         return 0;
1136 }
1137 /* Process (refill) a large buffer queue. */
1138 static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1139 {
1140         u32 clean_idx = rx_ring->lbq_clean_idx;
1141         u32 start_idx = clean_idx;
1142         struct bq_desc *lbq_desc;
1143         u64 map;
1144         int i;
1145
1146         while (rx_ring->lbq_free_cnt > 32) {
1147                 for (i = (rx_ring->lbq_clean_idx % 16); i < 16; i++) {
1148                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1149                                      "lbq: try cleaning clean_idx = %d.\n",
1150                                      clean_idx);
1151                         lbq_desc = &rx_ring->lbq[clean_idx];
1152                         if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
1153                                 rx_ring->lbq_clean_idx = clean_idx;
1154                                 netif_err(qdev, ifup, qdev->ndev,
1155                                                 "Could not get a page chunk, i=%d, clean_idx =%d .\n",
1156                                                 i, clean_idx);
1157                                 return;
1158                         }
1159
1160                         map = lbq_desc->p.pg_chunk.map +
1161                                 lbq_desc->p.pg_chunk.offset;
1162                                 dma_unmap_addr_set(lbq_desc, mapaddr, map);
1163                         dma_unmap_len_set(lbq_desc, maplen,
1164                                         rx_ring->lbq_buf_size);
1165                                 *lbq_desc->addr = cpu_to_le64(map);
1166
1167                         pci_dma_sync_single_for_device(qdev->pdev, map,
1168                                                 rx_ring->lbq_buf_size,
1169                                                 PCI_DMA_FROMDEVICE);
1170                         clean_idx++;
1171                         if (clean_idx == rx_ring->lbq_len)
1172                                 clean_idx = 0;
1173                 }
1174
1175                 rx_ring->lbq_clean_idx = clean_idx;
1176                 rx_ring->lbq_prod_idx += 16;
1177                 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1178                         rx_ring->lbq_prod_idx = 0;
1179                 rx_ring->lbq_free_cnt -= 16;
1180         }
1181
1182         if (start_idx != clean_idx) {
1183                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1184                              "lbq: updating prod idx = %d.\n",
1185                              rx_ring->lbq_prod_idx);
1186                 ql_write_db_reg(rx_ring->lbq_prod_idx,
1187                                 rx_ring->lbq_prod_idx_db_reg);
1188         }
1189 }
1190
1191 /* Process (refill) a small buffer queue. */
1192 static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1193 {
1194         u32 clean_idx = rx_ring->sbq_clean_idx;
1195         u32 start_idx = clean_idx;
1196         struct bq_desc *sbq_desc;
1197         u64 map;
1198         int i;
1199
1200         while (rx_ring->sbq_free_cnt > 16) {
1201                 for (i = (rx_ring->sbq_clean_idx % 16); i < 16; i++) {
1202                         sbq_desc = &rx_ring->sbq[clean_idx];
1203                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1204                                      "sbq: try cleaning clean_idx = %d.\n",
1205                                      clean_idx);
1206                         if (sbq_desc->p.skb == NULL) {
1207                                 netif_printk(qdev, rx_status, KERN_DEBUG,
1208                                              qdev->ndev,
1209                                              "sbq: getting new skb for index %d.\n",
1210                                              sbq_desc->index);
1211                                 sbq_desc->p.skb =
1212                                     netdev_alloc_skb(qdev->ndev,
1213                                                      SMALL_BUFFER_SIZE);
1214                                 if (sbq_desc->p.skb == NULL) {
1215                                         rx_ring->sbq_clean_idx = clean_idx;
1216                                         return;
1217                                 }
1218                                 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1219                                 map = pci_map_single(qdev->pdev,
1220                                                      sbq_desc->p.skb->data,
1221                                                      rx_ring->sbq_buf_size,
1222                                                      PCI_DMA_FROMDEVICE);
1223                                 if (pci_dma_mapping_error(qdev->pdev, map)) {
1224                                         netif_err(qdev, ifup, qdev->ndev,
1225                                                   "PCI mapping failed.\n");
1226                                         rx_ring->sbq_clean_idx = clean_idx;
1227                                         dev_kfree_skb_any(sbq_desc->p.skb);
1228                                         sbq_desc->p.skb = NULL;
1229                                         return;
1230                                 }
1231                                 dma_unmap_addr_set(sbq_desc, mapaddr, map);
1232                                 dma_unmap_len_set(sbq_desc, maplen,
1233                                                   rx_ring->sbq_buf_size);
1234                                 *sbq_desc->addr = cpu_to_le64(map);
1235                         }
1236
1237                         clean_idx++;
1238                         if (clean_idx == rx_ring->sbq_len)
1239                                 clean_idx = 0;
1240                 }
1241                 rx_ring->sbq_clean_idx = clean_idx;
1242                 rx_ring->sbq_prod_idx += 16;
1243                 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1244                         rx_ring->sbq_prod_idx = 0;
1245                 rx_ring->sbq_free_cnt -= 16;
1246         }
1247
1248         if (start_idx != clean_idx) {
1249                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1250                              "sbq: updating prod idx = %d.\n",
1251                              rx_ring->sbq_prod_idx);
1252                 ql_write_db_reg(rx_ring->sbq_prod_idx,
1253                                 rx_ring->sbq_prod_idx_db_reg);
1254         }
1255 }
1256
1257 static void ql_update_buffer_queues(struct ql_adapter *qdev,
1258                                     struct rx_ring *rx_ring)
1259 {
1260         ql_update_sbq(qdev, rx_ring);
1261         ql_update_lbq(qdev, rx_ring);
1262 }
1263
1264 /* Unmaps tx buffers.  Can be called from send() if a pci mapping
1265  * fails at some stage, or from the interrupt when a tx completes.
1266  */
1267 static void ql_unmap_send(struct ql_adapter *qdev,
1268                           struct tx_ring_desc *tx_ring_desc, int mapped)
1269 {
1270         int i;
1271         for (i = 0; i < mapped; i++) {
1272                 if (i == 0 || (i == 7 && mapped > 7)) {
1273                         /*
1274                          * Unmap the skb->data area, or the
1275                          * external sglist (AKA the Outbound
1276                          * Address List (OAL)).
1277                          * If its the zeroeth element, then it's
1278                          * the skb->data area.  If it's the 7th
1279                          * element and there is more than 6 frags,
1280                          * then its an OAL.
1281                          */
1282                         if (i == 7) {
1283                                 netif_printk(qdev, tx_done, KERN_DEBUG,
1284                                              qdev->ndev,
1285                                              "unmapping OAL area.\n");
1286                         }
1287                         pci_unmap_single(qdev->pdev,
1288                                          dma_unmap_addr(&tx_ring_desc->map[i],
1289                                                         mapaddr),
1290                                          dma_unmap_len(&tx_ring_desc->map[i],
1291                                                        maplen),
1292                                          PCI_DMA_TODEVICE);
1293                 } else {
1294                         netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1295                                      "unmapping frag %d.\n", i);
1296                         pci_unmap_page(qdev->pdev,
1297                                        dma_unmap_addr(&tx_ring_desc->map[i],
1298                                                       mapaddr),
1299                                        dma_unmap_len(&tx_ring_desc->map[i],
1300                                                      maplen), PCI_DMA_TODEVICE);
1301                 }
1302         }
1303
1304 }
1305
1306 /* Map the buffers for this transmit.  This will return
1307  * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1308  */
1309 static int ql_map_send(struct ql_adapter *qdev,
1310                        struct ob_mac_iocb_req *mac_iocb_ptr,
1311                        struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1312 {
1313         int len = skb_headlen(skb);
1314         dma_addr_t map;
1315         int frag_idx, err, map_idx = 0;
1316         struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1317         int frag_cnt = skb_shinfo(skb)->nr_frags;
1318
1319         if (frag_cnt) {
1320                 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1321                              "frag_cnt = %d.\n", frag_cnt);
1322         }
1323         /*
1324          * Map the skb buffer first.
1325          */
1326         map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1327
1328         err = pci_dma_mapping_error(qdev->pdev, map);
1329         if (err) {
1330                 netif_err(qdev, tx_queued, qdev->ndev,
1331                           "PCI mapping failed with error: %d\n", err);
1332
1333                 return NETDEV_TX_BUSY;
1334         }
1335
1336         tbd->len = cpu_to_le32(len);
1337         tbd->addr = cpu_to_le64(map);
1338         dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1339         dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1340         map_idx++;
1341
1342         /*
1343          * This loop fills the remainder of the 8 address descriptors
1344          * in the IOCB.  If there are more than 7 fragments, then the
1345          * eighth address desc will point to an external list (OAL).
1346          * When this happens, the remainder of the frags will be stored
1347          * in this list.
1348          */
1349         for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1350                 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1351                 tbd++;
1352                 if (frag_idx == 6 && frag_cnt > 7) {
1353                         /* Let's tack on an sglist.
1354                          * Our control block will now
1355                          * look like this:
1356                          * iocb->seg[0] = skb->data
1357                          * iocb->seg[1] = frag[0]
1358                          * iocb->seg[2] = frag[1]
1359                          * iocb->seg[3] = frag[2]
1360                          * iocb->seg[4] = frag[3]
1361                          * iocb->seg[5] = frag[4]
1362                          * iocb->seg[6] = frag[5]
1363                          * iocb->seg[7] = ptr to OAL (external sglist)
1364                          * oal->seg[0] = frag[6]
1365                          * oal->seg[1] = frag[7]
1366                          * oal->seg[2] = frag[8]
1367                          * oal->seg[3] = frag[9]
1368                          * oal->seg[4] = frag[10]
1369                          *      etc...
1370                          */
1371                         /* Tack on the OAL in the eighth segment of IOCB. */
1372                         map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1373                                              sizeof(struct oal),
1374                                              PCI_DMA_TODEVICE);
1375                         err = pci_dma_mapping_error(qdev->pdev, map);
1376                         if (err) {
1377                                 netif_err(qdev, tx_queued, qdev->ndev,
1378                                           "PCI mapping outbound address list with error: %d\n",
1379                                           err);
1380                                 goto map_error;
1381                         }
1382
1383                         tbd->addr = cpu_to_le64(map);
1384                         /*
1385                          * The length is the number of fragments
1386                          * that remain to be mapped times the length
1387                          * of our sglist (OAL).
1388                          */
1389                         tbd->len =
1390                             cpu_to_le32((sizeof(struct tx_buf_desc) *
1391                                          (frag_cnt - frag_idx)) | TX_DESC_C);
1392                         dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1393                                            map);
1394                         dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1395                                           sizeof(struct oal));
1396                         tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1397                         map_idx++;
1398                 }
1399
1400                 map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
1401                                        DMA_TO_DEVICE);
1402
1403                 err = dma_mapping_error(&qdev->pdev->dev, map);
1404                 if (err) {
1405                         netif_err(qdev, tx_queued, qdev->ndev,
1406                                   "PCI mapping frags failed with error: %d.\n",
1407                                   err);
1408                         goto map_error;
1409                 }
1410
1411                 tbd->addr = cpu_to_le64(map);
1412                 tbd->len = cpu_to_le32(skb_frag_size(frag));
1413                 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1414                 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1415                                   skb_frag_size(frag));
1416
1417         }
1418         /* Save the number of segments we've mapped. */
1419         tx_ring_desc->map_cnt = map_idx;
1420         /* Terminate the last segment. */
1421         tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1422         return NETDEV_TX_OK;
1423
1424 map_error:
1425         /*
1426          * If the first frag mapping failed, then i will be zero.
1427          * This causes the unmap of the skb->data area.  Otherwise
1428          * we pass in the number of frags that mapped successfully
1429          * so they can be umapped.
1430          */
1431         ql_unmap_send(qdev, tx_ring_desc, map_idx);
1432         return NETDEV_TX_BUSY;
1433 }
1434
1435 /* Categorizing receive firmware frame errors */
1436 static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err,
1437                                  struct rx_ring *rx_ring)
1438 {
1439         struct nic_stats *stats = &qdev->nic_stats;
1440
1441         stats->rx_err_count++;
1442         rx_ring->rx_errors++;
1443
1444         switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) {
1445         case IB_MAC_IOCB_RSP_ERR_CODE_ERR:
1446                 stats->rx_code_err++;
1447                 break;
1448         case IB_MAC_IOCB_RSP_ERR_OVERSIZE:
1449                 stats->rx_oversize_err++;
1450                 break;
1451         case IB_MAC_IOCB_RSP_ERR_UNDERSIZE:
1452                 stats->rx_undersize_err++;
1453                 break;
1454         case IB_MAC_IOCB_RSP_ERR_PREAMBLE:
1455                 stats->rx_preamble_err++;
1456                 break;
1457         case IB_MAC_IOCB_RSP_ERR_FRAME_LEN:
1458                 stats->rx_frame_len_err++;
1459                 break;
1460         case IB_MAC_IOCB_RSP_ERR_CRC:
1461                 stats->rx_crc_err++;
1462         default:
1463                 break;
1464         }
1465 }
1466
1467 /**
1468  * ql_update_mac_hdr_len - helper routine to update the mac header length
1469  * based on vlan tags if present
1470  */
1471 static void ql_update_mac_hdr_len(struct ql_adapter *qdev,
1472                                   struct ib_mac_iocb_rsp *ib_mac_rsp,
1473                                   void *page, size_t *len)
1474 {
1475         u16 *tags;
1476
1477         if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
1478                 return;
1479         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) {
1480                 tags = (u16 *)page;
1481                 /* Look for stacked vlan tags in ethertype field */
1482                 if (tags[6] == ETH_P_8021Q &&
1483                     tags[8] == ETH_P_8021Q)
1484                         *len += 2 * VLAN_HLEN;
1485                 else
1486                         *len += VLAN_HLEN;
1487         }
1488 }
1489
1490 /* Process an inbound completion from an rx ring. */
1491 static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1492                                         struct rx_ring *rx_ring,
1493                                         struct ib_mac_iocb_rsp *ib_mac_rsp,
1494                                         u32 length,
1495                                         u16 vlan_id)
1496 {
1497         struct sk_buff *skb;
1498         struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1499         struct napi_struct *napi = &rx_ring->napi;
1500
1501         /* Frame error, so drop the packet. */
1502         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1503                 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1504                 put_page(lbq_desc->p.pg_chunk.page);
1505                 return;
1506         }
1507         napi->dev = qdev->ndev;
1508
1509         skb = napi_get_frags(napi);
1510         if (!skb) {
1511                 netif_err(qdev, drv, qdev->ndev,
1512                           "Couldn't get an skb, exiting.\n");
1513                 rx_ring->rx_dropped++;
1514                 put_page(lbq_desc->p.pg_chunk.page);
1515                 return;
1516         }
1517         prefetch(lbq_desc->p.pg_chunk.va);
1518         __skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1519                              lbq_desc->p.pg_chunk.page,
1520                              lbq_desc->p.pg_chunk.offset,
1521                              length);
1522
1523         skb->len += length;
1524         skb->data_len += length;
1525         skb->truesize += length;
1526         skb_shinfo(skb)->nr_frags++;
1527
1528         rx_ring->rx_packets++;
1529         rx_ring->rx_bytes += length;
1530         skb->ip_summed = CHECKSUM_UNNECESSARY;
1531         skb_record_rx_queue(skb, rx_ring->cq_id);
1532         if (vlan_id != 0xffff)
1533                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1534         napi_gro_frags(napi);
1535 }
1536
1537 /* Process an inbound completion from an rx ring. */
1538 static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1539                                         struct rx_ring *rx_ring,
1540                                         struct ib_mac_iocb_rsp *ib_mac_rsp,
1541                                         u32 length,
1542                                         u16 vlan_id)
1543 {
1544         struct net_device *ndev = qdev->ndev;
1545         struct sk_buff *skb = NULL;
1546         void *addr;
1547         struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1548         struct napi_struct *napi = &rx_ring->napi;
1549         size_t hlen = ETH_HLEN;
1550
1551         skb = netdev_alloc_skb(ndev, length);
1552         if (!skb) {
1553                 rx_ring->rx_dropped++;
1554                 put_page(lbq_desc->p.pg_chunk.page);
1555                 return;
1556         }
1557
1558         addr = lbq_desc->p.pg_chunk.va;
1559         prefetch(addr);
1560
1561         /* Frame error, so drop the packet. */
1562         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1563                 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1564                 goto err_out;
1565         }
1566
1567         /* Update the MAC header length*/
1568         ql_update_mac_hdr_len(qdev, ib_mac_rsp, addr, &hlen);
1569
1570         /* The max framesize filter on this chip is set higher than
1571          * MTU since FCoE uses 2k frames.
1572          */
1573         if (skb->len > ndev->mtu + hlen) {
1574                 netif_err(qdev, drv, qdev->ndev,
1575                           "Segment too small, dropping.\n");
1576                 rx_ring->rx_dropped++;
1577                 goto err_out;
1578         }
1579         skb_put_data(skb, addr, hlen);
1580         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1581                      "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1582                      length);
1583         skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1584                                 lbq_desc->p.pg_chunk.offset + hlen,
1585                                 length - hlen);
1586         skb->len += length - hlen;
1587         skb->data_len += length - hlen;
1588         skb->truesize += length - hlen;
1589
1590         rx_ring->rx_packets++;
1591         rx_ring->rx_bytes += skb->len;
1592         skb->protocol = eth_type_trans(skb, ndev);
1593         skb_checksum_none_assert(skb);
1594
1595         if ((ndev->features & NETIF_F_RXCSUM) &&
1596                 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1597                 /* TCP frame. */
1598                 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1599                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1600                                      "TCP checksum done!\n");
1601                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1602                 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1603                                 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1604                         /* Unfragmented ipv4 UDP frame. */
1605                         struct iphdr *iph =
1606                                 (struct iphdr *)((u8 *)addr + hlen);
1607                         if (!(iph->frag_off &
1608                                 htons(IP_MF|IP_OFFSET))) {
1609                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1610                                 netif_printk(qdev, rx_status, KERN_DEBUG,
1611                                              qdev->ndev,
1612                                              "UDP checksum done!\n");
1613                         }
1614                 }
1615         }
1616
1617         skb_record_rx_queue(skb, rx_ring->cq_id);
1618         if (vlan_id != 0xffff)
1619                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1620         if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1621                 napi_gro_receive(napi, skb);
1622         else
1623                 netif_receive_skb(skb);
1624         return;
1625 err_out:
1626         dev_kfree_skb_any(skb);
1627         put_page(lbq_desc->p.pg_chunk.page);
1628 }
1629
1630 /* Process an inbound completion from an rx ring. */
1631 static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1632                                         struct rx_ring *rx_ring,
1633                                         struct ib_mac_iocb_rsp *ib_mac_rsp,
1634                                         u32 length,
1635                                         u16 vlan_id)
1636 {
1637         struct net_device *ndev = qdev->ndev;
1638         struct sk_buff *skb = NULL;
1639         struct sk_buff *new_skb = NULL;
1640         struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1641
1642         skb = sbq_desc->p.skb;
1643         /* Allocate new_skb and copy */
1644         new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1645         if (new_skb == NULL) {
1646                 rx_ring->rx_dropped++;
1647                 return;
1648         }
1649         skb_reserve(new_skb, NET_IP_ALIGN);
1650
1651         pci_dma_sync_single_for_cpu(qdev->pdev,
1652                                     dma_unmap_addr(sbq_desc, mapaddr),
1653                                     dma_unmap_len(sbq_desc, maplen),
1654                                     PCI_DMA_FROMDEVICE);
1655
1656         skb_put_data(new_skb, skb->data, length);
1657
1658         pci_dma_sync_single_for_device(qdev->pdev,
1659                                        dma_unmap_addr(sbq_desc, mapaddr),
1660                                        dma_unmap_len(sbq_desc, maplen),
1661                                        PCI_DMA_FROMDEVICE);
1662         skb = new_skb;
1663
1664         /* Frame error, so drop the packet. */
1665         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1666                 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1667                 dev_kfree_skb_any(skb);
1668                 return;
1669         }
1670
1671         /* loopback self test for ethtool */
1672         if (test_bit(QL_SELFTEST, &qdev->flags)) {
1673                 ql_check_lb_frame(qdev, skb);
1674                 dev_kfree_skb_any(skb);
1675                 return;
1676         }
1677
1678         /* The max framesize filter on this chip is set higher than
1679          * MTU since FCoE uses 2k frames.
1680          */
1681         if (skb->len > ndev->mtu + ETH_HLEN) {
1682                 dev_kfree_skb_any(skb);
1683                 rx_ring->rx_dropped++;
1684                 return;
1685         }
1686
1687         prefetch(skb->data);
1688         if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1689                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1690                              "%s Multicast.\n",
1691                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1692                              IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1693                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1694                              IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1695                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1696                              IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1697         }
1698         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
1699                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1700                              "Promiscuous Packet.\n");
1701
1702         rx_ring->rx_packets++;
1703         rx_ring->rx_bytes += skb->len;
1704         skb->protocol = eth_type_trans(skb, ndev);
1705         skb_checksum_none_assert(skb);
1706
1707         /* If rx checksum is on, and there are no
1708          * csum or frame errors.
1709          */
1710         if ((ndev->features & NETIF_F_RXCSUM) &&
1711                 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1712                 /* TCP frame. */
1713                 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1714                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1715                                      "TCP checksum done!\n");
1716                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1717                 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1718                                 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1719                         /* Unfragmented ipv4 UDP frame. */
1720                         struct iphdr *iph = (struct iphdr *) skb->data;
1721                         if (!(iph->frag_off &
1722                                 htons(IP_MF|IP_OFFSET))) {
1723                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1724                                 netif_printk(qdev, rx_status, KERN_DEBUG,
1725                                              qdev->ndev,
1726                                              "UDP checksum done!\n");
1727                         }
1728                 }
1729         }
1730
1731         skb_record_rx_queue(skb, rx_ring->cq_id);
1732         if (vlan_id != 0xffff)
1733                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1734         if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1735                 napi_gro_receive(&rx_ring->napi, skb);
1736         else
1737                 netif_receive_skb(skb);
1738 }
1739
1740 static void ql_realign_skb(struct sk_buff *skb, int len)
1741 {
1742         void *temp_addr = skb->data;
1743
1744         /* Undo the skb_reserve(skb,32) we did before
1745          * giving to hardware, and realign data on
1746          * a 2-byte boundary.
1747          */
1748         skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1749         skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1750         memmove(skb->data, temp_addr, len);
1751 }
1752
1753 /*
1754  * This function builds an skb for the given inbound
1755  * completion.  It will be rewritten for readability in the near
1756  * future, but for not it works well.
1757  */
1758 static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1759                                        struct rx_ring *rx_ring,
1760                                        struct ib_mac_iocb_rsp *ib_mac_rsp)
1761 {
1762         struct bq_desc *lbq_desc;
1763         struct bq_desc *sbq_desc;
1764         struct sk_buff *skb = NULL;
1765         u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1766         u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1767         size_t hlen = ETH_HLEN;
1768
1769         /*
1770          * Handle the header buffer if present.
1771          */
1772         if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1773             ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1774                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1775                              "Header of %d bytes in small buffer.\n", hdr_len);
1776                 /*
1777                  * Headers fit nicely into a small buffer.
1778                  */
1779                 sbq_desc = ql_get_curr_sbuf(rx_ring);
1780                 pci_unmap_single(qdev->pdev,
1781                                 dma_unmap_addr(sbq_desc, mapaddr),
1782                                 dma_unmap_len(sbq_desc, maplen),
1783                                 PCI_DMA_FROMDEVICE);
1784                 skb = sbq_desc->p.skb;
1785                 ql_realign_skb(skb, hdr_len);
1786                 skb_put(skb, hdr_len);
1787                 sbq_desc->p.skb = NULL;
1788         }
1789
1790         /*
1791          * Handle the data buffer(s).
1792          */
1793         if (unlikely(!length)) {        /* Is there data too? */
1794                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1795                              "No Data buffer in this packet.\n");
1796                 return skb;
1797         }
1798
1799         if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1800                 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1801                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1802                                      "Headers in small, data of %d bytes in small, combine them.\n",
1803                                      length);
1804                         /*
1805                          * Data is less than small buffer size so it's
1806                          * stuffed in a small buffer.
1807                          * For this case we append the data
1808                          * from the "data" small buffer to the "header" small
1809                          * buffer.
1810                          */
1811                         sbq_desc = ql_get_curr_sbuf(rx_ring);
1812                         pci_dma_sync_single_for_cpu(qdev->pdev,
1813                                                     dma_unmap_addr
1814                                                     (sbq_desc, mapaddr),
1815                                                     dma_unmap_len
1816                                                     (sbq_desc, maplen),
1817                                                     PCI_DMA_FROMDEVICE);
1818                         skb_put_data(skb, sbq_desc->p.skb->data, length);
1819                         pci_dma_sync_single_for_device(qdev->pdev,
1820                                                        dma_unmap_addr
1821                                                        (sbq_desc,
1822                                                         mapaddr),
1823                                                        dma_unmap_len
1824                                                        (sbq_desc,
1825                                                         maplen),
1826                                                        PCI_DMA_FROMDEVICE);
1827                 } else {
1828                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1829                                      "%d bytes in a single small buffer.\n",
1830                                      length);
1831                         sbq_desc = ql_get_curr_sbuf(rx_ring);
1832                         skb = sbq_desc->p.skb;
1833                         ql_realign_skb(skb, length);
1834                         skb_put(skb, length);
1835                         pci_unmap_single(qdev->pdev,
1836                                          dma_unmap_addr(sbq_desc,
1837                                                         mapaddr),
1838                                          dma_unmap_len(sbq_desc,
1839                                                        maplen),
1840                                          PCI_DMA_FROMDEVICE);
1841                         sbq_desc->p.skb = NULL;
1842                 }
1843         } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1844                 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1845                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1846                                      "Header in small, %d bytes in large. Chain large to small!\n",
1847                                      length);
1848                         /*
1849                          * The data is in a single large buffer.  We
1850                          * chain it to the header buffer's skb and let
1851                          * it rip.
1852                          */
1853                         lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1854                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1855                                      "Chaining page at offset = %d, for %d bytes  to skb.\n",
1856                                      lbq_desc->p.pg_chunk.offset, length);
1857                         skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1858                                                 lbq_desc->p.pg_chunk.offset,
1859                                                 length);
1860                         skb->len += length;
1861                         skb->data_len += length;
1862                         skb->truesize += length;
1863                 } else {
1864                         /*
1865                          * The headers and data are in a single large buffer. We
1866                          * copy it to a new skb and let it go. This can happen with
1867                          * jumbo mtu on a non-TCP/UDP frame.
1868                          */
1869                         lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1870                         skb = netdev_alloc_skb(qdev->ndev, length);
1871                         if (skb == NULL) {
1872                                 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1873                                              "No skb available, drop the packet.\n");
1874                                 return NULL;
1875                         }
1876                         pci_unmap_page(qdev->pdev,
1877                                        dma_unmap_addr(lbq_desc,
1878                                                       mapaddr),
1879                                        dma_unmap_len(lbq_desc, maplen),
1880                                        PCI_DMA_FROMDEVICE);
1881                         skb_reserve(skb, NET_IP_ALIGN);
1882                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1883                                      "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1884                                      length);
1885                         skb_fill_page_desc(skb, 0,
1886                                                 lbq_desc->p.pg_chunk.page,
1887                                                 lbq_desc->p.pg_chunk.offset,
1888                                                 length);
1889                         skb->len += length;
1890                         skb->data_len += length;
1891                         skb->truesize += length;
1892                         ql_update_mac_hdr_len(qdev, ib_mac_rsp,
1893                                               lbq_desc->p.pg_chunk.va,
1894                                               &hlen);
1895                         __pskb_pull_tail(skb, hlen);
1896                 }
1897         } else {
1898                 /*
1899                  * The data is in a chain of large buffers
1900                  * pointed to by a small buffer.  We loop
1901                  * thru and chain them to the our small header
1902                  * buffer's skb.
1903                  * frags:  There are 18 max frags and our small
1904                  *         buffer will hold 32 of them. The thing is,
1905                  *         we'll use 3 max for our 9000 byte jumbo
1906                  *         frames.  If the MTU goes up we could
1907                  *          eventually be in trouble.
1908                  */
1909                 int size, i = 0;
1910                 sbq_desc = ql_get_curr_sbuf(rx_ring);
1911                 pci_unmap_single(qdev->pdev,
1912                                  dma_unmap_addr(sbq_desc, mapaddr),
1913                                  dma_unmap_len(sbq_desc, maplen),
1914                                  PCI_DMA_FROMDEVICE);
1915                 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1916                         /*
1917                          * This is an non TCP/UDP IP frame, so
1918                          * the headers aren't split into a small
1919                          * buffer.  We have to use the small buffer
1920                          * that contains our sg list as our skb to
1921                          * send upstairs. Copy the sg list here to
1922                          * a local buffer and use it to find the
1923                          * pages to chain.
1924                          */
1925                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1926                                      "%d bytes of headers & data in chain of large.\n",
1927                                      length);
1928                         skb = sbq_desc->p.skb;
1929                         sbq_desc->p.skb = NULL;
1930                         skb_reserve(skb, NET_IP_ALIGN);
1931                 }
1932                 do {
1933                         lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1934                         size = (length < rx_ring->lbq_buf_size) ? length :
1935                                 rx_ring->lbq_buf_size;
1936
1937                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1938                                      "Adding page %d to skb for %d bytes.\n",
1939                                      i, size);
1940                         skb_fill_page_desc(skb, i,
1941                                                 lbq_desc->p.pg_chunk.page,
1942                                                 lbq_desc->p.pg_chunk.offset,
1943                                                 size);
1944                         skb->len += size;
1945                         skb->data_len += size;
1946                         skb->truesize += size;
1947                         length -= size;
1948                         i++;
1949                 } while (length > 0);
1950                 ql_update_mac_hdr_len(qdev, ib_mac_rsp, lbq_desc->p.pg_chunk.va,
1951                                       &hlen);
1952                 __pskb_pull_tail(skb, hlen);
1953         }
1954         return skb;
1955 }
1956
1957 /* Process an inbound completion from an rx ring. */
1958 static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
1959                                    struct rx_ring *rx_ring,
1960                                    struct ib_mac_iocb_rsp *ib_mac_rsp,
1961                                    u16 vlan_id)
1962 {
1963         struct net_device *ndev = qdev->ndev;
1964         struct sk_buff *skb = NULL;
1965
1966         QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1967
1968         skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1969         if (unlikely(!skb)) {
1970                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1971                              "No skb available, drop packet.\n");
1972                 rx_ring->rx_dropped++;
1973                 return;
1974         }
1975
1976         /* Frame error, so drop the packet. */
1977         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1978                 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1979                 dev_kfree_skb_any(skb);
1980                 return;
1981         }
1982
1983         /* The max framesize filter on this chip is set higher than
1984          * MTU since FCoE uses 2k frames.
1985          */
1986         if (skb->len > ndev->mtu + ETH_HLEN) {
1987                 dev_kfree_skb_any(skb);
1988                 rx_ring->rx_dropped++;
1989                 return;
1990         }
1991
1992         /* loopback self test for ethtool */
1993         if (test_bit(QL_SELFTEST, &qdev->flags)) {
1994                 ql_check_lb_frame(qdev, skb);
1995                 dev_kfree_skb_any(skb);
1996                 return;
1997         }
1998
1999         prefetch(skb->data);
2000         if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
2001                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
2002                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
2003                              IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
2004                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
2005                              IB_MAC_IOCB_RSP_M_REG ? "Registered" :
2006                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
2007                              IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
2008                 rx_ring->rx_multicast++;
2009         }
2010         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
2011                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2012                              "Promiscuous Packet.\n");
2013         }
2014
2015         skb->protocol = eth_type_trans(skb, ndev);
2016         skb_checksum_none_assert(skb);
2017
2018         /* If rx checksum is on, and there are no
2019          * csum or frame errors.
2020          */
2021         if ((ndev->features & NETIF_F_RXCSUM) &&
2022                 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
2023                 /* TCP frame. */
2024                 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
2025                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2026                                      "TCP checksum done!\n");
2027                         skb->ip_summed = CHECKSUM_UNNECESSARY;
2028                 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
2029                                 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
2030                 /* Unfragmented ipv4 UDP frame. */
2031                         struct iphdr *iph = (struct iphdr *) skb->data;
2032                         if (!(iph->frag_off &
2033                                 htons(IP_MF|IP_OFFSET))) {
2034                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
2035                                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2036                                              "TCP checksum done!\n");
2037                         }
2038                 }
2039         }
2040
2041         rx_ring->rx_packets++;
2042         rx_ring->rx_bytes += skb->len;
2043         skb_record_rx_queue(skb, rx_ring->cq_id);
2044         if (vlan_id != 0xffff)
2045                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
2046         if (skb->ip_summed == CHECKSUM_UNNECESSARY)
2047                 napi_gro_receive(&rx_ring->napi, skb);
2048         else
2049                 netif_receive_skb(skb);
2050 }
2051
2052 /* Process an inbound completion from an rx ring. */
2053 static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
2054                                         struct rx_ring *rx_ring,
2055                                         struct ib_mac_iocb_rsp *ib_mac_rsp)
2056 {
2057         u32 length = le32_to_cpu(ib_mac_rsp->data_len);
2058         u16 vlan_id = ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
2059                         (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)) ?
2060                         ((le16_to_cpu(ib_mac_rsp->vlan_id) &
2061                         IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
2062
2063         QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2064
2065         if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2066                 /* The data and headers are split into
2067                  * separate buffers.
2068                  */
2069                 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2070                                                 vlan_id);
2071         } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2072                 /* The data fit in a single small buffer.
2073                  * Allocate a new skb, copy the data and
2074                  * return the buffer to the free pool.
2075                  */
2076                 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2077                                                 length, vlan_id);
2078         } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2079                 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2080                 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2081                 /* TCP packet in a page chunk that's been checksummed.
2082                  * Tack it on to our GRO skb and let it go.
2083                  */
2084                 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2085                                                 length, vlan_id);
2086         } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2087                 /* Non-TCP packet in a page chunk. Allocate an
2088                  * skb, tack it on frags, and send it up.
2089                  */
2090                 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2091                                                 length, vlan_id);
2092         } else {
2093                 /* Non-TCP/UDP large frames that span multiple buffers
2094                  * can be processed corrrectly by the split frame logic.
2095                  */
2096                 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2097                                                 vlan_id);
2098         }
2099
2100         return (unsigned long)length;
2101 }
2102
2103 /* Process an outbound completion from an rx ring. */
2104 static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2105                                    struct ob_mac_iocb_rsp *mac_rsp)
2106 {
2107         struct tx_ring *tx_ring;
2108         struct tx_ring_desc *tx_ring_desc;
2109
2110         QL_DUMP_OB_MAC_RSP(mac_rsp);
2111         tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2112         tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2113         ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
2114         tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2115         tx_ring->tx_packets++;
2116         dev_kfree_skb(tx_ring_desc->skb);
2117         tx_ring_desc->skb = NULL;
2118
2119         if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2120                                         OB_MAC_IOCB_RSP_S |
2121                                         OB_MAC_IOCB_RSP_L |
2122                                         OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2123                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
2124                         netif_warn(qdev, tx_done, qdev->ndev,
2125                                    "Total descriptor length did not match transfer length.\n");
2126                 }
2127                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
2128                         netif_warn(qdev, tx_done, qdev->ndev,
2129                                    "Frame too short to be valid, not sent.\n");
2130                 }
2131                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
2132                         netif_warn(qdev, tx_done, qdev->ndev,
2133                                    "Frame too long, but sent anyway.\n");
2134                 }
2135                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
2136                         netif_warn(qdev, tx_done, qdev->ndev,
2137                                    "PCI backplane error. Frame not sent.\n");
2138                 }
2139         }
2140         atomic_inc(&tx_ring->tx_count);
2141 }
2142
2143 /* Fire up a handler to reset the MPI processor. */
2144 void ql_queue_fw_error(struct ql_adapter *qdev)
2145 {
2146         ql_link_off(qdev);
2147         queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2148 }
2149
2150 void ql_queue_asic_error(struct ql_adapter *qdev)
2151 {
2152         ql_link_off(qdev);
2153         ql_disable_interrupts(qdev);
2154         /* Clear adapter up bit to signal the recovery
2155          * process that it shouldn't kill the reset worker
2156          * thread
2157          */
2158         clear_bit(QL_ADAPTER_UP, &qdev->flags);
2159         /* Set asic recovery bit to indicate reset process that we are
2160          * in fatal error recovery process rather than normal close
2161          */
2162         set_bit(QL_ASIC_RECOVERY, &qdev->flags);
2163         queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2164 }
2165
2166 static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2167                                     struct ib_ae_iocb_rsp *ib_ae_rsp)
2168 {
2169         switch (ib_ae_rsp->event) {
2170         case MGMT_ERR_EVENT:
2171                 netif_err(qdev, rx_err, qdev->ndev,
2172                           "Management Processor Fatal Error.\n");
2173                 ql_queue_fw_error(qdev);
2174                 return;
2175
2176         case CAM_LOOKUP_ERR_EVENT:
2177                 netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
2178                 netdev_err(qdev->ndev, "This event shouldn't occur.\n");
2179                 ql_queue_asic_error(qdev);
2180                 return;
2181
2182         case SOFT_ECC_ERROR_EVENT:
2183                 netdev_err(qdev->ndev, "Soft ECC error detected.\n");
2184                 ql_queue_asic_error(qdev);
2185                 break;
2186
2187         case PCI_ERR_ANON_BUF_RD:
2188                 netdev_err(qdev->ndev, "PCI error occurred when reading "
2189                                         "anonymous buffers from rx_ring %d.\n",
2190                                         ib_ae_rsp->q_id);
2191                 ql_queue_asic_error(qdev);
2192                 break;
2193
2194         default:
2195                 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2196                           ib_ae_rsp->event);
2197                 ql_queue_asic_error(qdev);
2198                 break;
2199         }
2200 }
2201
2202 static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2203 {
2204         struct ql_adapter *qdev = rx_ring->qdev;
2205         u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2206         struct ob_mac_iocb_rsp *net_rsp = NULL;
2207         int count = 0;
2208
2209         struct tx_ring *tx_ring;
2210         /* While there are entries in the completion queue. */
2211         while (prod != rx_ring->cnsmr_idx) {
2212
2213                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2214                              "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2215                              rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2216
2217                 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2218                 rmb();
2219                 switch (net_rsp->opcode) {
2220
2221                 case OPCODE_OB_MAC_TSO_IOCB:
2222                 case OPCODE_OB_MAC_IOCB:
2223                         ql_process_mac_tx_intr(qdev, net_rsp);
2224                         break;
2225                 default:
2226                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2227                                      "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2228                                      net_rsp->opcode);
2229                 }
2230                 count++;
2231                 ql_update_cq(rx_ring);
2232                 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2233         }
2234         if (!net_rsp)
2235                 return 0;
2236         ql_write_cq_idx(rx_ring);
2237         tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
2238         if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
2239                 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2240                         /*
2241                          * The queue got stopped because the tx_ring was full.
2242                          * Wake it up, because it's now at least 25% empty.
2243                          */
2244                         netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2245         }
2246
2247         return count;
2248 }
2249
2250 static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2251 {
2252         struct ql_adapter *qdev = rx_ring->qdev;
2253         u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2254         struct ql_net_rsp_iocb *net_rsp;
2255         int count = 0;
2256
2257         /* While there are entries in the completion queue. */
2258         while (prod != rx_ring->cnsmr_idx) {
2259
2260                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2261                              "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2262                              rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2263
2264                 net_rsp = rx_ring->curr_entry;
2265                 rmb();
2266                 switch (net_rsp->opcode) {
2267                 case OPCODE_IB_MAC_IOCB:
2268                         ql_process_mac_rx_intr(qdev, rx_ring,
2269                                                (struct ib_mac_iocb_rsp *)
2270                                                net_rsp);
2271                         break;
2272
2273                 case OPCODE_IB_AE_IOCB:
2274                         ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2275                                                 net_rsp);
2276                         break;
2277                 default:
2278                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2279                                      "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2280                                      net_rsp->opcode);
2281                         break;
2282                 }
2283                 count++;
2284                 ql_update_cq(rx_ring);
2285                 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2286                 if (count == budget)
2287                         break;
2288         }
2289         ql_update_buffer_queues(qdev, rx_ring);
2290         ql_write_cq_idx(rx_ring);
2291         return count;
2292 }
2293
2294 static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2295 {
2296         struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2297         struct ql_adapter *qdev = rx_ring->qdev;
2298         struct rx_ring *trx_ring;
2299         int i, work_done = 0;
2300         struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
2301
2302         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2303                      "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
2304
2305         /* Service the TX rings first.  They start
2306          * right after the RSS rings. */
2307         for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2308                 trx_ring = &qdev->rx_ring[i];
2309                 /* If this TX completion ring belongs to this vector and
2310                  * it's not empty then service it.
2311                  */
2312                 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2313                         (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2314                                         trx_ring->cnsmr_idx)) {
2315                         netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2316                                      "%s: Servicing TX completion ring %d.\n",
2317                                      __func__, trx_ring->cq_id);
2318                         ql_clean_outbound_rx_ring(trx_ring);
2319                 }
2320         }
2321
2322         /*
2323          * Now service the RSS ring if it's active.
2324          */
2325         if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2326                                         rx_ring->cnsmr_idx) {
2327                 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2328                              "%s: Servicing RX completion ring %d.\n",
2329                              __func__, rx_ring->cq_id);
2330                 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2331         }
2332
2333         if (work_done < budget) {
2334                 napi_complete_done(napi, work_done);
2335                 ql_enable_completion_interrupt(qdev, rx_ring->irq);
2336         }
2337         return work_done;
2338 }
2339
2340 static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
2341 {
2342         struct ql_adapter *qdev = netdev_priv(ndev);
2343
2344         if (features & NETIF_F_HW_VLAN_CTAG_RX) {
2345                 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
2346                                  NIC_RCV_CFG_VLAN_MATCH_AND_NON);
2347         } else {
2348                 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2349         }
2350 }
2351
2352 /**
2353  * qlge_update_hw_vlan_features - helper routine to reinitialize the adapter
2354  * based on the features to enable/disable hardware vlan accel
2355  */
2356 static int qlge_update_hw_vlan_features(struct net_device *ndev,
2357                                         netdev_features_t features)
2358 {
2359         struct ql_adapter *qdev = netdev_priv(ndev);
2360         int status = 0;
2361         bool need_restart = netif_running(ndev);
2362
2363         if (need_restart) {
2364                 status = ql_adapter_down(qdev);
2365                 if (status) {
2366                         netif_err(qdev, link, qdev->ndev,
2367                                   "Failed to bring down the adapter\n");
2368                         return status;
2369                 }
2370         }
2371
2372         /* update the features with resent change */
2373         ndev->features = features;
2374
2375         if (need_restart) {
2376                 status = ql_adapter_up(qdev);
2377                 if (status) {
2378                         netif_err(qdev, link, qdev->ndev,
2379                                   "Failed to bring up the adapter\n");
2380                         return status;
2381                 }
2382         }
2383
2384         return status;
2385 }
2386
2387 static netdev_features_t qlge_fix_features(struct net_device *ndev,
2388         netdev_features_t features)
2389 {
2390         int err;
2391
2392         /* Update the behavior of vlan accel in the adapter */
2393         err = qlge_update_hw_vlan_features(ndev, features);
2394         if (err)
2395                 return err;
2396
2397         return features;
2398 }
2399
2400 static int qlge_set_features(struct net_device *ndev,
2401         netdev_features_t features)
2402 {
2403         netdev_features_t changed = ndev->features ^ features;
2404
2405         if (changed & NETIF_F_HW_VLAN_CTAG_RX)
2406                 qlge_vlan_mode(ndev, features);
2407
2408         return 0;
2409 }
2410
2411 static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
2412 {
2413         u32 enable_bit = MAC_ADDR_E;
2414         int err;
2415
2416         err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2417                                   MAC_ADDR_TYPE_VLAN, vid);
2418         if (err)
2419                 netif_err(qdev, ifup, qdev->ndev,
2420                           "Failed to init vlan address.\n");
2421         return err;
2422 }
2423
2424 static int qlge_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
2425 {
2426         struct ql_adapter *qdev = netdev_priv(ndev);
2427         int status;
2428         int err;
2429
2430         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2431         if (status)
2432                 return status;
2433
2434         err = __qlge_vlan_rx_add_vid(qdev, vid);
2435         set_bit(vid, qdev->active_vlans);
2436
2437         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2438
2439         return err;
2440 }
2441
2442 static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
2443 {
2444         u32 enable_bit = 0;
2445         int err;
2446
2447         err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2448                                   MAC_ADDR_TYPE_VLAN, vid);
2449         if (err)
2450                 netif_err(qdev, ifup, qdev->ndev,
2451                           "Failed to clear vlan address.\n");
2452         return err;
2453 }
2454
2455 static int qlge_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
2456 {
2457         struct ql_adapter *qdev = netdev_priv(ndev);
2458         int status;
2459         int err;
2460
2461         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2462         if (status)
2463                 return status;
2464
2465         err = __qlge_vlan_rx_kill_vid(qdev, vid);
2466         clear_bit(vid, qdev->active_vlans);
2467
2468         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2469
2470         return err;
2471 }
2472
2473 static void qlge_restore_vlan(struct ql_adapter *qdev)
2474 {
2475         int status;
2476         u16 vid;
2477
2478         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2479         if (status)
2480                 return;
2481
2482         for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
2483                 __qlge_vlan_rx_add_vid(qdev, vid);
2484
2485         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2486 }
2487
2488 /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2489 static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2490 {
2491         struct rx_ring *rx_ring = dev_id;
2492         napi_schedule(&rx_ring->napi);
2493         return IRQ_HANDLED;
2494 }
2495
2496 /* This handles a fatal error, MPI activity, and the default
2497  * rx_ring in an MSI-X multiple vector environment.
2498  * In MSI/Legacy environment it also process the rest of
2499  * the rx_rings.
2500  */
2501 static irqreturn_t qlge_isr(int irq, void *dev_id)
2502 {
2503         struct rx_ring *rx_ring = dev_id;
2504         struct ql_adapter *qdev = rx_ring->qdev;
2505         struct intr_context *intr_context = &qdev->intr_context[0];
2506         u32 var;
2507         int work_done = 0;
2508
2509         spin_lock(&qdev->hw_lock);
2510         if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
2511                 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2512                              "Shared Interrupt, Not ours!\n");
2513                 spin_unlock(&qdev->hw_lock);
2514                 return IRQ_NONE;
2515         }
2516         spin_unlock(&qdev->hw_lock);
2517
2518         var = ql_disable_completion_interrupt(qdev, intr_context->intr);
2519
2520         /*
2521          * Check for fatal error.
2522          */
2523         if (var & STS_FE) {
2524                 ql_queue_asic_error(qdev);
2525                 netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
2526                 var = ql_read32(qdev, ERR_STS);
2527                 netdev_err(qdev->ndev, "Resetting chip. "
2528                                         "Error Status Register = 0x%x\n", var);
2529                 return IRQ_HANDLED;
2530         }
2531
2532         /*
2533          * Check MPI processor activity.
2534          */
2535         if ((var & STS_PI) &&
2536                 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
2537                 /*
2538                  * We've got an async event or mailbox completion.
2539                  * Handle it and clear the source of the interrupt.
2540                  */
2541                 netif_err(qdev, intr, qdev->ndev,
2542                           "Got MPI processor interrupt.\n");
2543                 ql_disable_completion_interrupt(qdev, intr_context->intr);
2544                 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2545                 queue_delayed_work_on(smp_processor_id(),
2546                                 qdev->workqueue, &qdev->mpi_work, 0);
2547                 work_done++;
2548         }
2549
2550         /*
2551          * Get the bit-mask that shows the active queues for this
2552          * pass.  Compare it to the queues that this irq services
2553          * and call napi if there's a match.
2554          */
2555         var = ql_read32(qdev, ISR1);
2556         if (var & intr_context->irq_mask) {
2557                 netif_info(qdev, intr, qdev->ndev,
2558                            "Waking handler for rx_ring[0].\n");
2559                 ql_disable_completion_interrupt(qdev, intr_context->intr);
2560                 napi_schedule(&rx_ring->napi);
2561                 work_done++;
2562         }
2563         ql_enable_completion_interrupt(qdev, intr_context->intr);
2564         return work_done ? IRQ_HANDLED : IRQ_NONE;
2565 }
2566
2567 static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2568 {
2569
2570         if (skb_is_gso(skb)) {
2571                 int err;
2572                 __be16 l3_proto = vlan_get_protocol(skb);
2573
2574                 err = skb_cow_head(skb, 0);
2575                 if (err < 0)
2576                         return err;
2577
2578                 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2579                 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2580                 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2581                 mac_iocb_ptr->total_hdrs_len =
2582                     cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2583                 mac_iocb_ptr->net_trans_offset =
2584                     cpu_to_le16(skb_network_offset(skb) |
2585                                 skb_transport_offset(skb)
2586                                 << OB_MAC_TRANSPORT_HDR_SHIFT);
2587                 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2588                 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2589                 if (likely(l3_proto == htons(ETH_P_IP))) {
2590                         struct iphdr *iph = ip_hdr(skb);
2591                         iph->check = 0;
2592                         mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2593                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2594                                                                  iph->daddr, 0,
2595                                                                  IPPROTO_TCP,
2596                                                                  0);
2597                 } else if (l3_proto == htons(ETH_P_IPV6)) {
2598                         mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2599                         tcp_hdr(skb)->check =
2600                             ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2601                                              &ipv6_hdr(skb)->daddr,
2602                                              0, IPPROTO_TCP, 0);
2603                 }
2604                 return 1;
2605         }
2606         return 0;
2607 }
2608
2609 static void ql_hw_csum_setup(struct sk_buff *skb,
2610                              struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2611 {
2612         int len;
2613         struct iphdr *iph = ip_hdr(skb);
2614         __sum16 *check;
2615         mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2616         mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2617         mac_iocb_ptr->net_trans_offset =
2618                 cpu_to_le16(skb_network_offset(skb) |
2619                 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2620
2621         mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2622         len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2623         if (likely(iph->protocol == IPPROTO_TCP)) {
2624                 check = &(tcp_hdr(skb)->check);
2625                 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2626                 mac_iocb_ptr->total_hdrs_len =
2627                     cpu_to_le16(skb_transport_offset(skb) +
2628                                 (tcp_hdr(skb)->doff << 2));
2629         } else {
2630                 check = &(udp_hdr(skb)->check);
2631                 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2632                 mac_iocb_ptr->total_hdrs_len =
2633                     cpu_to_le16(skb_transport_offset(skb) +
2634                                 sizeof(struct udphdr));
2635         }
2636         *check = ~csum_tcpudp_magic(iph->saddr,
2637                                     iph->daddr, len, iph->protocol, 0);
2638 }
2639
2640 static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2641 {
2642         struct tx_ring_desc *tx_ring_desc;
2643         struct ob_mac_iocb_req *mac_iocb_ptr;
2644         struct ql_adapter *qdev = netdev_priv(ndev);
2645         int tso;
2646         struct tx_ring *tx_ring;
2647         u32 tx_ring_idx = (u32) skb->queue_mapping;
2648
2649         tx_ring = &qdev->tx_ring[tx_ring_idx];
2650
2651         if (skb_padto(skb, ETH_ZLEN))
2652                 return NETDEV_TX_OK;
2653
2654         if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2655                 netif_info(qdev, tx_queued, qdev->ndev,
2656                            "%s: BUG! shutting down tx queue %d due to lack of resources.\n",
2657                            __func__, tx_ring_idx);
2658                 netif_stop_subqueue(ndev, tx_ring->wq_id);
2659                 tx_ring->tx_errors++;
2660                 return NETDEV_TX_BUSY;
2661         }
2662         tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2663         mac_iocb_ptr = tx_ring_desc->queue_entry;
2664         memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
2665
2666         mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2667         mac_iocb_ptr->tid = tx_ring_desc->index;
2668         /* We use the upper 32-bits to store the tx queue for this IO.
2669          * When we get the completion we can use it to establish the context.
2670          */
2671         mac_iocb_ptr->txq_idx = tx_ring_idx;
2672         tx_ring_desc->skb = skb;
2673
2674         mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2675
2676         if (skb_vlan_tag_present(skb)) {
2677                 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2678                              "Adding a vlan tag %d.\n", skb_vlan_tag_get(skb));
2679                 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2680                 mac_iocb_ptr->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
2681         }
2682         tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2683         if (tso < 0) {
2684                 dev_kfree_skb_any(skb);
2685                 return NETDEV_TX_OK;
2686         } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2687                 ql_hw_csum_setup(skb,
2688                                  (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2689         }
2690         if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2691                         NETDEV_TX_OK) {
2692                 netif_err(qdev, tx_queued, qdev->ndev,
2693                           "Could not map the segments.\n");
2694                 tx_ring->tx_errors++;
2695                 return NETDEV_TX_BUSY;
2696         }
2697         QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2698         tx_ring->prod_idx++;
2699         if (tx_ring->prod_idx == tx_ring->wq_len)
2700                 tx_ring->prod_idx = 0;
2701         wmb();
2702
2703         ql_write_db_reg_relaxed(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
2704         mmiowb();
2705         netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2706                      "tx queued, slot %d, len %d\n",
2707                      tx_ring->prod_idx, skb->len);
2708
2709         atomic_dec(&tx_ring->tx_count);
2710
2711         if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2712                 netif_stop_subqueue(ndev, tx_ring->wq_id);
2713                 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2714                         /*
2715                          * The queue got stopped because the tx_ring was full.
2716                          * Wake it up, because it's now at least 25% empty.
2717                          */
2718                         netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2719         }
2720         return NETDEV_TX_OK;
2721 }
2722
2723
2724 static void ql_free_shadow_space(struct ql_adapter *qdev)
2725 {
2726         if (qdev->rx_ring_shadow_reg_area) {
2727                 pci_free_consistent(qdev->pdev,
2728                                     PAGE_SIZE,
2729                                     qdev->rx_ring_shadow_reg_area,
2730                                     qdev->rx_ring_shadow_reg_dma);
2731                 qdev->rx_ring_shadow_reg_area = NULL;
2732         }
2733         if (qdev->tx_ring_shadow_reg_area) {
2734                 pci_free_consistent(qdev->pdev,
2735                                     PAGE_SIZE,
2736                                     qdev->tx_ring_shadow_reg_area,
2737                                     qdev->tx_ring_shadow_reg_dma);
2738                 qdev->tx_ring_shadow_reg_area = NULL;
2739         }
2740 }
2741
2742 static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2743 {
2744         qdev->rx_ring_shadow_reg_area =
2745                 pci_zalloc_consistent(qdev->pdev, PAGE_SIZE,
2746                                       &qdev->rx_ring_shadow_reg_dma);
2747         if (qdev->rx_ring_shadow_reg_area == NULL) {
2748                 netif_err(qdev, ifup, qdev->ndev,
2749                           "Allocation of RX shadow space failed.\n");
2750                 return -ENOMEM;
2751         }
2752
2753         qdev->tx_ring_shadow_reg_area =
2754                 pci_zalloc_consistent(qdev->pdev, PAGE_SIZE,
2755                                       &qdev->tx_ring_shadow_reg_dma);
2756         if (qdev->tx_ring_shadow_reg_area == NULL) {
2757                 netif_err(qdev, ifup, qdev->ndev,
2758                           "Allocation of TX shadow space failed.\n");
2759                 goto err_wqp_sh_area;
2760         }
2761         return 0;
2762
2763 err_wqp_sh_area:
2764         pci_free_consistent(qdev->pdev,
2765                             PAGE_SIZE,
2766                             qdev->rx_ring_shadow_reg_area,
2767                             qdev->rx_ring_shadow_reg_dma);
2768         return -ENOMEM;
2769 }
2770
2771 static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2772 {
2773         struct tx_ring_desc *tx_ring_desc;
2774         int i;
2775         struct ob_mac_iocb_req *mac_iocb_ptr;
2776
2777         mac_iocb_ptr = tx_ring->wq_base;
2778         tx_ring_desc = tx_ring->q;
2779         for (i = 0; i < tx_ring->wq_len; i++) {
2780                 tx_ring_desc->index = i;
2781                 tx_ring_desc->skb = NULL;
2782                 tx_ring_desc->queue_entry = mac_iocb_ptr;
2783                 mac_iocb_ptr++;
2784                 tx_ring_desc++;
2785         }
2786         atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2787 }
2788
2789 static void ql_free_tx_resources(struct ql_adapter *qdev,
2790                                  struct tx_ring *tx_ring)
2791 {
2792         if (tx_ring->wq_base) {
2793                 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2794                                     tx_ring->wq_base, tx_ring->wq_base_dma);
2795                 tx_ring->wq_base = NULL;
2796         }
2797         kfree(tx_ring->q);
2798         tx_ring->q = NULL;
2799 }
2800
2801 static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2802                                  struct tx_ring *tx_ring)
2803 {
2804         tx_ring->wq_base =
2805             pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2806                                  &tx_ring->wq_base_dma);
2807
2808         if ((tx_ring->wq_base == NULL) ||
2809             tx_ring->wq_base_dma & WQ_ADDR_ALIGN)
2810                 goto pci_alloc_err;
2811
2812         tx_ring->q =
2813             kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2814         if (tx_ring->q == NULL)
2815                 goto err;
2816
2817         return 0;
2818 err:
2819         pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2820                             tx_ring->wq_base, tx_ring->wq_base_dma);
2821         tx_ring->wq_base = NULL;
2822 pci_alloc_err:
2823         netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
2824         return -ENOMEM;
2825 }
2826
2827 static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2828 {
2829         struct bq_desc *lbq_desc;
2830
2831         uint32_t  curr_idx, clean_idx;
2832
2833         curr_idx = rx_ring->lbq_curr_idx;
2834         clean_idx = rx_ring->lbq_clean_idx;
2835         while (curr_idx != clean_idx) {
2836                 lbq_desc = &rx_ring->lbq[curr_idx];
2837
2838                 if (lbq_desc->p.pg_chunk.last_flag) {
2839                         pci_unmap_page(qdev->pdev,
2840                                 lbq_desc->p.pg_chunk.map,
2841                                 ql_lbq_block_size(qdev),
2842                                        PCI_DMA_FROMDEVICE);
2843                         lbq_desc->p.pg_chunk.last_flag = 0;
2844                 }
2845
2846                 put_page(lbq_desc->p.pg_chunk.page);
2847                 lbq_desc->p.pg_chunk.page = NULL;
2848
2849                 if (++curr_idx == rx_ring->lbq_len)
2850                         curr_idx = 0;
2851
2852         }
2853         if (rx_ring->pg_chunk.page) {
2854                 pci_unmap_page(qdev->pdev, rx_ring->pg_chunk.map,
2855                         ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE);
2856                 put_page(rx_ring->pg_chunk.page);
2857                 rx_ring->pg_chunk.page = NULL;
2858         }
2859 }
2860
2861 static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2862 {
2863         int i;
2864         struct bq_desc *sbq_desc;
2865
2866         for (i = 0; i < rx_ring->sbq_len; i++) {
2867                 sbq_desc = &rx_ring->sbq[i];
2868                 if (sbq_desc == NULL) {
2869                         netif_err(qdev, ifup, qdev->ndev,
2870                                   "sbq_desc %d is NULL.\n", i);
2871                         return;
2872                 }
2873                 if (sbq_desc->p.skb) {
2874                         pci_unmap_single(qdev->pdev,
2875                                          dma_unmap_addr(sbq_desc, mapaddr),
2876                                          dma_unmap_len(sbq_desc, maplen),
2877                                          PCI_DMA_FROMDEVICE);
2878                         dev_kfree_skb(sbq_desc->p.skb);
2879                         sbq_desc->p.skb = NULL;
2880                 }
2881         }
2882 }
2883
2884 /* Free all large and small rx buffers associated
2885  * with the completion queues for this device.
2886  */
2887 static void ql_free_rx_buffers(struct ql_adapter *qdev)
2888 {
2889         int i;
2890         struct rx_ring *rx_ring;
2891
2892         for (i = 0; i < qdev->rx_ring_count; i++) {
2893                 rx_ring = &qdev->rx_ring[i];
2894                 if (rx_ring->lbq)
2895                         ql_free_lbq_buffers(qdev, rx_ring);
2896                 if (rx_ring->sbq)
2897                         ql_free_sbq_buffers(qdev, rx_ring);
2898         }
2899 }
2900
2901 static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2902 {
2903         struct rx_ring *rx_ring;
2904         int i;
2905
2906         for (i = 0; i < qdev->rx_ring_count; i++) {
2907                 rx_ring = &qdev->rx_ring[i];
2908                 if (rx_ring->type != TX_Q)
2909                         ql_update_buffer_queues(qdev, rx_ring);
2910         }
2911 }
2912
2913 static void ql_init_lbq_ring(struct ql_adapter *qdev,
2914                                 struct rx_ring *rx_ring)
2915 {
2916         int i;
2917         struct bq_desc *lbq_desc;
2918         __le64 *bq = rx_ring->lbq_base;
2919
2920         memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2921         for (i = 0; i < rx_ring->lbq_len; i++) {
2922                 lbq_desc = &rx_ring->lbq[i];
2923                 memset(lbq_desc, 0, sizeof(*lbq_desc));
2924                 lbq_desc->index = i;
2925                 lbq_desc->addr = bq;
2926                 bq++;
2927         }
2928 }
2929
2930 static void ql_init_sbq_ring(struct ql_adapter *qdev,
2931                                 struct rx_ring *rx_ring)
2932 {
2933         int i;
2934         struct bq_desc *sbq_desc;
2935         __le64 *bq = rx_ring->sbq_base;
2936
2937         memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
2938         for (i = 0; i < rx_ring->sbq_len; i++) {
2939                 sbq_desc = &rx_ring->sbq[i];
2940                 memset(sbq_desc, 0, sizeof(*sbq_desc));
2941                 sbq_desc->index = i;
2942                 sbq_desc->addr = bq;
2943                 bq++;
2944         }
2945 }
2946
2947 static void ql_free_rx_resources(struct ql_adapter *qdev,
2948                                  struct rx_ring *rx_ring)
2949 {
2950         /* Free the small buffer queue. */
2951         if (rx_ring->sbq_base) {
2952                 pci_free_consistent(qdev->pdev,
2953                                     rx_ring->sbq_size,
2954                                     rx_ring->sbq_base, rx_ring->sbq_base_dma);
2955                 rx_ring->sbq_base = NULL;
2956         }
2957
2958         /* Free the small buffer queue control blocks. */
2959         kfree(rx_ring->sbq);
2960         rx_ring->sbq = NULL;
2961
2962         /* Free the large buffer queue. */
2963         if (rx_ring->lbq_base) {
2964                 pci_free_consistent(qdev->pdev,
2965                                     rx_ring->lbq_size,
2966                                     rx_ring->lbq_base, rx_ring->lbq_base_dma);
2967                 rx_ring->lbq_base = NULL;
2968         }
2969
2970         /* Free the large buffer queue control blocks. */
2971         kfree(rx_ring->lbq);
2972         rx_ring->lbq = NULL;
2973
2974         /* Free the rx queue. */
2975         if (rx_ring->cq_base) {
2976                 pci_free_consistent(qdev->pdev,
2977                                     rx_ring->cq_size,
2978                                     rx_ring->cq_base, rx_ring->cq_base_dma);
2979                 rx_ring->cq_base = NULL;
2980         }
2981 }
2982
2983 /* Allocate queues and buffers for this completions queue based
2984  * on the values in the parameter structure. */
2985 static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2986                                  struct rx_ring *rx_ring)
2987 {
2988
2989         /*
2990          * Allocate the completion queue for this rx_ring.
2991          */
2992         rx_ring->cq_base =
2993             pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2994                                  &rx_ring->cq_base_dma);
2995
2996         if (rx_ring->cq_base == NULL) {
2997                 netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
2998                 return -ENOMEM;
2999         }
3000
3001         if (rx_ring->sbq_len) {
3002                 /*
3003                  * Allocate small buffer queue.
3004                  */
3005                 rx_ring->sbq_base =
3006                     pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
3007                                          &rx_ring->sbq_base_dma);
3008
3009                 if (rx_ring->sbq_base == NULL) {
3010                         netif_err(qdev, ifup, qdev->ndev,
3011                                   "Small buffer queue allocation failed.\n");
3012                         goto err_mem;
3013                 }
3014
3015                 /*
3016                  * Allocate small buffer queue control blocks.
3017                  */
3018                 rx_ring->sbq = kmalloc_array(rx_ring->sbq_len,
3019                                              sizeof(struct bq_desc),
3020                                              GFP_KERNEL);
3021                 if (rx_ring->sbq == NULL)
3022                         goto err_mem;
3023
3024                 ql_init_sbq_ring(qdev, rx_ring);
3025         }
3026
3027         if (rx_ring->lbq_len) {
3028                 /*
3029                  * Allocate large buffer queue.
3030                  */
3031                 rx_ring->lbq_base =
3032                     pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
3033                                          &rx_ring->lbq_base_dma);
3034
3035                 if (rx_ring->lbq_base == NULL) {
3036                         netif_err(qdev, ifup, qdev->ndev,
3037                                   "Large buffer queue allocation failed.\n");
3038                         goto err_mem;
3039                 }
3040                 /*
3041                  * Allocate large buffer queue control blocks.
3042                  */
3043                 rx_ring->lbq = kmalloc_array(rx_ring->lbq_len,
3044                                              sizeof(struct bq_desc),
3045                                              GFP_KERNEL);
3046                 if (rx_ring->lbq == NULL)
3047                         goto err_mem;
3048
3049                 ql_init_lbq_ring(qdev, rx_ring);
3050         }
3051
3052         return 0;
3053
3054 err_mem:
3055         ql_free_rx_resources(qdev, rx_ring);
3056         return -ENOMEM;
3057 }
3058
3059 static void ql_tx_ring_clean(struct ql_adapter *qdev)
3060 {
3061         struct tx_ring *tx_ring;
3062         struct tx_ring_desc *tx_ring_desc;
3063         int i, j;
3064
3065         /*
3066          * Loop through all queues and free
3067          * any resources.
3068          */
3069         for (j = 0; j < qdev->tx_ring_count; j++) {
3070                 tx_ring = &qdev->tx_ring[j];
3071                 for (i = 0; i < tx_ring->wq_len; i++) {
3072                         tx_ring_desc = &tx_ring->q[i];
3073                         if (tx_ring_desc && tx_ring_desc->skb) {
3074                                 netif_err(qdev, ifdown, qdev->ndev,
3075                                           "Freeing lost SKB %p, from queue %d, index %d.\n",
3076                                           tx_ring_desc->skb, j,
3077                                           tx_ring_desc->index);
3078                                 ql_unmap_send(qdev, tx_ring_desc,
3079                                               tx_ring_desc->map_cnt);
3080                                 dev_kfree_skb(tx_ring_desc->skb);
3081                                 tx_ring_desc->skb = NULL;
3082                         }
3083                 }
3084         }
3085 }
3086
3087 static void ql_free_mem_resources(struct ql_adapter *qdev)
3088 {
3089         int i;
3090
3091         for (i = 0; i < qdev->tx_ring_count; i++)
3092                 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
3093         for (i = 0; i < qdev->rx_ring_count; i++)
3094                 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
3095         ql_free_shadow_space(qdev);
3096 }
3097
3098 static int ql_alloc_mem_resources(struct ql_adapter *qdev)
3099 {
3100         int i;
3101
3102         /* Allocate space for our shadow registers and such. */
3103         if (ql_alloc_shadow_space(qdev))
3104                 return -ENOMEM;
3105
3106         for (i = 0; i < qdev->rx_ring_count; i++) {
3107                 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
3108                         netif_err(qdev, ifup, qdev->ndev,
3109                                   "RX resource allocation failed.\n");
3110                         goto err_mem;
3111                 }
3112         }
3113         /* Allocate tx queue resources */
3114         for (i = 0; i < qdev->tx_ring_count; i++) {
3115                 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
3116                         netif_err(qdev, ifup, qdev->ndev,
3117                                   "TX resource allocation failed.\n");
3118                         goto err_mem;
3119                 }
3120         }
3121         return 0;
3122
3123 err_mem:
3124         ql_free_mem_resources(qdev);
3125         return -ENOMEM;
3126 }
3127
3128 /* Set up the rx ring control block and pass it to the chip.
3129  * The control block is defined as
3130  * "Completion Queue Initialization Control Block", or cqicb.
3131  */
3132 static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
3133 {
3134         struct cqicb *cqicb = &rx_ring->cqicb;
3135         void *shadow_reg = qdev->rx_ring_shadow_reg_area +
3136                 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3137         u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
3138                 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3139         void __iomem *doorbell_area =
3140             qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
3141         int err = 0;
3142         u16 bq_len;
3143         u64 tmp;
3144         __le64 *base_indirect_ptr;
3145         int page_entries;
3146
3147         /* Set up the shadow registers for this ring. */
3148         rx_ring->prod_idx_sh_reg = shadow_reg;
3149         rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
3150         *rx_ring->prod_idx_sh_reg = 0;
3151         shadow_reg += sizeof(u64);
3152         shadow_reg_dma += sizeof(u64);
3153         rx_ring->lbq_base_indirect = shadow_reg;
3154         rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
3155         shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3156         shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3157         rx_ring->sbq_base_indirect = shadow_reg;
3158         rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
3159
3160         /* PCI doorbell mem area + 0x00 for consumer index register */
3161         rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
3162         rx_ring->cnsmr_idx = 0;
3163         rx_ring->curr_entry = rx_ring->cq_base;
3164
3165         /* PCI doorbell mem area + 0x04 for valid register */
3166         rx_ring->valid_db_reg = doorbell_area + 0x04;
3167
3168         /* PCI doorbell mem area + 0x18 for large buffer consumer */
3169         rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
3170
3171         /* PCI doorbell mem area + 0x1c */
3172         rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
3173
3174         memset((void *)cqicb, 0, sizeof(struct cqicb));
3175         cqicb->msix_vect = rx_ring->irq;
3176
3177         bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
3178         cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
3179
3180         cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
3181
3182         cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
3183
3184         /*
3185          * Set up the control block load flags.
3186          */
3187         cqicb->flags = FLAGS_LC |       /* Load queue base address */
3188             FLAGS_LV |          /* Load MSI-X vector */
3189             FLAGS_LI;           /* Load irq delay values */
3190         if (rx_ring->lbq_len) {
3191                 cqicb->flags |= FLAGS_LL;       /* Load lbq values */
3192                 tmp = (u64)rx_ring->lbq_base_dma;
3193                 base_indirect_ptr = rx_ring->lbq_base_indirect;
3194                 page_entries = 0;
3195                 do {
3196                         *base_indirect_ptr = cpu_to_le64(tmp);
3197                         tmp += DB_PAGE_SIZE;
3198                         base_indirect_ptr++;
3199                         page_entries++;
3200                 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3201                 cqicb->lbq_addr =
3202                     cpu_to_le64(rx_ring->lbq_base_indirect_dma);
3203                 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
3204                         (u16) rx_ring->lbq_buf_size;
3205                 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
3206                 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
3207                         (u16) rx_ring->lbq_len;
3208                 cqicb->lbq_len = cpu_to_le16(bq_len);
3209                 rx_ring->lbq_prod_idx = 0;
3210                 rx_ring->lbq_curr_idx = 0;
3211                 rx_ring->lbq_clean_idx = 0;
3212                 rx_ring->lbq_free_cnt = rx_ring->lbq_len;
3213         }
3214         if (rx_ring->sbq_len) {
3215                 cqicb->flags |= FLAGS_LS;       /* Load sbq values */
3216                 tmp = (u64)rx_ring->sbq_base_dma;
3217                 base_indirect_ptr = rx_ring->sbq_base_indirect;
3218                 page_entries = 0;
3219                 do {
3220                         *base_indirect_ptr = cpu_to_le64(tmp);
3221                         tmp += DB_PAGE_SIZE;
3222                         base_indirect_ptr++;
3223                         page_entries++;
3224                 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
3225                 cqicb->sbq_addr =
3226                     cpu_to_le64(rx_ring->sbq_base_indirect_dma);
3227                 cqicb->sbq_buf_size =
3228                     cpu_to_le16((u16)(rx_ring->sbq_buf_size));
3229                 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
3230                         (u16) rx_ring->sbq_len;
3231                 cqicb->sbq_len = cpu_to_le16(bq_len);
3232                 rx_ring->sbq_prod_idx = 0;
3233                 rx_ring->sbq_curr_idx = 0;
3234                 rx_ring->sbq_clean_idx = 0;
3235                 rx_ring->sbq_free_cnt = rx_ring->sbq_len;
3236         }
3237         switch (rx_ring->type) {
3238         case TX_Q:
3239                 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3240                 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3241                 break;
3242         case RX_Q:
3243                 /* Inbound completion handling rx_rings run in
3244                  * separate NAPI contexts.
3245                  */
3246                 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3247                                64);
3248                 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3249                 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3250                 break;
3251         default:
3252                 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3253                              "Invalid rx_ring->type = %d.\n", rx_ring->type);
3254         }
3255         err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3256                            CFG_LCQ, rx_ring->cq_id);
3257         if (err) {
3258                 netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
3259                 return err;
3260         }
3261         return err;
3262 }
3263
3264 static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3265 {
3266         struct wqicb *wqicb = (struct wqicb *)tx_ring;
3267         void __iomem *doorbell_area =
3268             qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3269         void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3270             (tx_ring->wq_id * sizeof(u64));
3271         u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3272             (tx_ring->wq_id * sizeof(u64));
3273         int err = 0;
3274
3275         /*
3276          * Assign doorbell registers for this tx_ring.
3277          */
3278         /* TX PCI doorbell mem area for tx producer index */
3279         tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
3280         tx_ring->prod_idx = 0;
3281         /* TX PCI doorbell mem area + 0x04 */
3282         tx_ring->valid_db_reg = doorbell_area + 0x04;
3283
3284         /*
3285          * Assign shadow registers for this tx_ring.
3286          */
3287         tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3288         tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3289
3290         wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3291         wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3292                                    Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3293         wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3294         wqicb->rid = 0;
3295         wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
3296
3297         wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
3298
3299         ql_init_tx_ring(qdev, tx_ring);
3300
3301         err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
3302                            (u16) tx_ring->wq_id);
3303         if (err) {
3304                 netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
3305                 return err;
3306         }
3307         return err;
3308 }
3309
3310 static void ql_disable_msix(struct ql_adapter *qdev)
3311 {
3312         if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3313                 pci_disable_msix(qdev->pdev);
3314                 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3315                 kfree(qdev->msi_x_entry);
3316                 qdev->msi_x_entry = NULL;
3317         } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3318                 pci_disable_msi(qdev->pdev);
3319                 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3320         }
3321 }
3322
3323 /* We start by trying to get the number of vectors
3324  * stored in qdev->intr_count. If we don't get that
3325  * many then we reduce the count and try again.
3326  */
3327 static void ql_enable_msix(struct ql_adapter *qdev)
3328 {
3329         int i, err;
3330
3331         /* Get the MSIX vectors. */
3332         if (qlge_irq_type == MSIX_IRQ) {
3333                 /* Try to alloc space for the msix struct,
3334                  * if it fails then go to MSI/legacy.
3335                  */
3336                 qdev->msi_x_entry = kcalloc(qdev->intr_count,
3337                                             sizeof(struct msix_entry),
3338                                             GFP_KERNEL);
3339                 if (!qdev->msi_x_entry) {
3340                         qlge_irq_type = MSI_IRQ;
3341                         goto msi;
3342                 }
3343
3344                 for (i = 0; i < qdev->intr_count; i++)
3345                         qdev->msi_x_entry[i].entry = i;
3346
3347                 err = pci_enable_msix_range(qdev->pdev, qdev->msi_x_entry,
3348                                             1, qdev->intr_count);
3349                 if (err < 0) {
3350                         kfree(qdev->msi_x_entry);
3351                         qdev->msi_x_entry = NULL;
3352                         netif_warn(qdev, ifup, qdev->ndev,
3353                                    "MSI-X Enable failed, trying MSI.\n");
3354                         qlge_irq_type = MSI_IRQ;
3355                 } else {
3356                         qdev->intr_count = err;
3357                         set_bit(QL_MSIX_ENABLED, &qdev->flags);
3358                         netif_info(qdev, ifup, qdev->ndev,
3359                                    "MSI-X Enabled, got %d vectors.\n",
3360                                    qdev->intr_count);
3361                         return;
3362                 }
3363         }
3364 msi:
3365         qdev->intr_count = 1;
3366         if (qlge_irq_type == MSI_IRQ) {
3367                 if (!pci_enable_msi(qdev->pdev)) {
3368                         set_bit(QL_MSI_ENABLED, &qdev->flags);
3369                         netif_info(qdev, ifup, qdev->ndev,
3370                                    "Running with MSI interrupts.\n");
3371                         return;
3372                 }
3373         }
3374         qlge_irq_type = LEG_IRQ;
3375         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3376                      "Running with legacy interrupts.\n");
3377 }
3378
3379 /* Each vector services 1 RSS ring and and 1 or more
3380  * TX completion rings.  This function loops through
3381  * the TX completion rings and assigns the vector that
3382  * will service it.  An example would be if there are
3383  * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3384  * This would mean that vector 0 would service RSS ring 0
3385  * and TX completion rings 0,1,2 and 3.  Vector 1 would
3386  * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3387  */
3388 static void ql_set_tx_vect(struct ql_adapter *qdev)
3389 {
3390         int i, j, vect;
3391         u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3392
3393         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3394                 /* Assign irq vectors to TX rx_rings.*/
3395                 for (vect = 0, j = 0, i = qdev->rss_ring_count;
3396                                          i < qdev->rx_ring_count; i++) {
3397                         if (j == tx_rings_per_vector) {
3398                                 vect++;
3399                                 j = 0;
3400                         }
3401                         qdev->rx_ring[i].irq = vect;
3402                         j++;
3403                 }
3404         } else {
3405                 /* For single vector all rings have an irq
3406                  * of zero.
3407                  */
3408                 for (i = 0; i < qdev->rx_ring_count; i++)
3409                         qdev->rx_ring[i].irq = 0;
3410         }
3411 }
3412
3413 /* Set the interrupt mask for this vector.  Each vector
3414  * will service 1 RSS ring and 1 or more TX completion
3415  * rings.  This function sets up a bit mask per vector
3416  * that indicates which rings it services.
3417  */
3418 static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3419 {
3420         int j, vect = ctx->intr;
3421         u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3422
3423         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3424                 /* Add the RSS ring serviced by this vector
3425                  * to the mask.
3426                  */
3427                 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3428                 /* Add the TX ring(s) serviced by this vector
3429                  * to the mask. */
3430                 for (j = 0; j < tx_rings_per_vector; j++) {
3431                         ctx->irq_mask |=
3432                         (1 << qdev->rx_ring[qdev->rss_ring_count +
3433                         (vect * tx_rings_per_vector) + j].cq_id);
3434                 }
3435         } else {
3436                 /* For single vector we just shift each queue's
3437                  * ID into the mask.
3438                  */
3439                 for (j = 0; j < qdev->rx_ring_count; j++)
3440                         ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3441         }
3442 }
3443
3444 /*
3445  * Here we build the intr_context structures based on
3446  * our rx_ring count and intr vector count.
3447  * The intr_context structure is used to hook each vector
3448  * to possibly different handlers.
3449  */
3450 static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3451 {
3452         int i = 0;
3453         struct intr_context *intr_context = &qdev->intr_context[0];
3454
3455         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3456                 /* Each rx_ring has it's
3457                  * own intr_context since we have separate
3458                  * vectors for each queue.
3459                  */
3460                 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3461                         qdev->rx_ring[i].irq = i;
3462                         intr_context->intr = i;
3463                         intr_context->qdev = qdev;
3464                         /* Set up this vector's bit-mask that indicates
3465                          * which queues it services.
3466                          */
3467                         ql_set_irq_mask(qdev, intr_context);
3468                         /*
3469                          * We set up each vectors enable/disable/read bits so
3470                          * there's no bit/mask calculations in the critical path.
3471                          */
3472                         intr_context->intr_en_mask =
3473                             INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3474                             INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3475                             | i;
3476                         intr_context->intr_dis_mask =
3477                             INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3478                             INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3479                             INTR_EN_IHD | i;
3480                         intr_context->intr_read_mask =
3481                             INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3482                             INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3483                             i;
3484                         if (i == 0) {
3485                                 /* The first vector/queue handles
3486                                  * broadcast/multicast, fatal errors,
3487                                  * and firmware events.  This in addition
3488                                  * to normal inbound NAPI processing.
3489                                  */
3490                                 intr_context->handler = qlge_isr;
3491                                 sprintf(intr_context->name, "%s-rx-%d",
3492                                         qdev->ndev->name, i);
3493                         } else {
3494                                 /*
3495                                  * Inbound queues handle unicast frames only.
3496                                  */
3497                                 intr_context->handler = qlge_msix_rx_isr;
3498                                 sprintf(intr_context->name, "%s-rx-%d",
3499                                         qdev->ndev->name, i);
3500                         }
3501                 }
3502         } else {
3503                 /*
3504                  * All rx_rings use the same intr_context since
3505                  * there is only one vector.
3506                  */
3507                 intr_context->intr = 0;
3508                 intr_context->qdev = qdev;
3509                 /*
3510                  * We set up each vectors enable/disable/read bits so
3511                  * there's no bit/mask calculations in the critical path.
3512                  */
3513                 intr_context->intr_en_mask =
3514                     INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3515                 intr_context->intr_dis_mask =
3516                     INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3517                     INTR_EN_TYPE_DISABLE;
3518                 intr_context->intr_read_mask =
3519                     INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3520                 /*
3521                  * Single interrupt means one handler for all rings.
3522                  */
3523                 intr_context->handler = qlge_isr;
3524                 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
3525                 /* Set up this vector's bit-mask that indicates
3526                  * which queues it services. In this case there is
3527                  * a single vector so it will service all RSS and
3528                  * TX completion rings.
3529                  */
3530                 ql_set_irq_mask(qdev, intr_context);
3531         }
3532         /* Tell the TX completion rings which MSIx vector
3533          * they will be using.
3534          */
3535         ql_set_tx_vect(qdev);
3536 }
3537
3538 static void ql_free_irq(struct ql_adapter *qdev)
3539 {
3540         int i;
3541         struct intr_context *intr_context = &qdev->intr_context[0];
3542
3543         for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3544                 if (intr_context->hooked) {
3545                         if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3546                                 free_irq(qdev->msi_x_entry[i].vector,
3547                                          &qdev->rx_ring[i]);
3548                         } else {
3549                                 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
3550                         }
3551                 }
3552         }
3553         ql_disable_msix(qdev);
3554 }
3555
3556 static int ql_request_irq(struct ql_adapter *qdev)
3557 {
3558         int i;
3559         int status = 0;
3560         struct pci_dev *pdev = qdev->pdev;
3561         struct intr_context *intr_context = &qdev->intr_context[0];
3562
3563         ql_resolve_queues_to_irqs(qdev);
3564
3565         for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3566                 atomic_set(&intr_context->irq_cnt, 0);
3567                 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3568                         status = request_irq(qdev->msi_x_entry[i].vector,
3569                                              intr_context->handler,
3570                                              0,
3571                                              intr_context->name,
3572                                              &qdev->rx_ring[i]);
3573                         if (status) {
3574                                 netif_err(qdev, ifup, qdev->ndev,
3575                                           "Failed request for MSIX interrupt %d.\n",
3576                                           i);
3577                                 goto err_irq;
3578                         }
3579                 } else {
3580                         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3581                                      "trying msi or legacy interrupts.\n");
3582                         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3583                                      "%s: irq = %d.\n", __func__, pdev->irq);
3584                         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3585                                      "%s: context->name = %s.\n", __func__,
3586                                      intr_context->name);
3587                         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3588                                      "%s: dev_id = 0x%p.\n", __func__,
3589                                      &qdev->rx_ring[0]);
3590                         status =
3591                             request_irq(pdev->irq, qlge_isr,
3592                                         test_bit(QL_MSI_ENABLED,
3593                                                  &qdev->
3594                                                  flags) ? 0 : IRQF_SHARED,
3595                                         intr_context->name, &qdev->rx_ring[0]);
3596                         if (status)
3597                                 goto err_irq;
3598
3599                         netif_err(qdev, ifup, qdev->ndev,
3600                                   "Hooked intr %d, queue type %s, with name %s.\n",
3601                                   i,
3602                                   qdev->rx_ring[0].type == DEFAULT_Q ?
3603                                   "DEFAULT_Q" :
3604                                   qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
3605                                   qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3606                                   intr_context->name);
3607                 }
3608                 intr_context->hooked = 1;
3609         }
3610         return status;
3611 err_irq:
3612         netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!\n");
3613         ql_free_irq(qdev);
3614         return status;
3615 }
3616
3617 static int ql_start_rss(struct ql_adapter *qdev)
3618 {
3619         static const u8 init_hash_seed[] = {
3620                 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3621                 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
3622                 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
3623                 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
3624                 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
3625         };
3626         struct ricb *ricb = &qdev->ricb;
3627         int status = 0;
3628         int i;
3629         u8 *hash_id = (u8 *) ricb->hash_cq_id;
3630
3631         memset((void *)ricb, 0, sizeof(*ricb));
3632
3633         ricb->base_cq = RSS_L4K;
3634         ricb->flags =
3635                 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3636         ricb->mask = cpu_to_le16((u16)(0x3ff));
3637
3638         /*
3639          * Fill out the Indirection Table.
3640          */
3641         for (i = 0; i < 1024; i++)
3642                 hash_id[i] = (i & (qdev->rss_ring_count - 1));
3643
3644         memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3645         memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
3646
3647         status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
3648         if (status) {
3649                 netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
3650                 return status;
3651         }
3652         return status;
3653 }
3654
3655 static int ql_clear_routing_entries(struct ql_adapter *qdev)
3656 {
3657         int i, status = 0;
3658
3659         status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3660         if (status)
3661                 return status;
3662         /* Clear all the entries in the routing table. */
3663         for (i = 0; i < 16; i++) {
3664                 status = ql_set_routing_reg(qdev, i, 0, 0);
3665                 if (status) {
3666                         netif_err(qdev, ifup, qdev->ndev,
3667                                   "Failed to init routing register for CAM packets.\n");
3668                         break;
3669                 }
3670         }
3671         ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3672         return status;
3673 }
3674
3675 /* Initialize the frame-to-queue routing. */
3676 static int ql_route_initialize(struct ql_adapter *qdev)
3677 {
3678         int status = 0;
3679
3680         /* Clear all the entries in the routing table. */
3681         status = ql_clear_routing_entries(qdev);
3682         if (status)
3683                 return status;
3684
3685         status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3686         if (status)
3687                 return status;
3688
3689         status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
3690                                                 RT_IDX_IP_CSUM_ERR, 1);
3691         if (status) {
3692                 netif_err(qdev, ifup, qdev->ndev,
3693                         "Failed to init routing register "
3694                         "for IP CSUM error packets.\n");
3695                 goto exit;
3696         }
3697         status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
3698                                                 RT_IDX_TU_CSUM_ERR, 1);
3699         if (status) {
3700                 netif_err(qdev, ifup, qdev->ndev,
3701                         "Failed to init routing register "
3702                         "for TCP/UDP CSUM error packets.\n");
3703                 goto exit;
3704         }
3705         status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3706         if (status) {
3707                 netif_err(qdev, ifup, qdev->ndev,
3708                           "Failed to init routing register for broadcast packets.\n");
3709                 goto exit;
3710         }
3711         /* If we have more than one inbound queue, then turn on RSS in the
3712          * routing block.
3713          */
3714         if (qdev->rss_ring_count > 1) {
3715                 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3716                                         RT_IDX_RSS_MATCH, 1);
3717                 if (status) {
3718                         netif_err(qdev, ifup, qdev->ndev,
3719                                   "Failed to init routing register for MATCH RSS packets.\n");
3720                         goto exit;
3721                 }
3722         }
3723
3724         status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3725                                     RT_IDX_CAM_HIT, 1);
3726         if (status)
3727                 netif_err(qdev, ifup, qdev->ndev,
3728                           "Failed to init routing register for CAM packets.\n");
3729 exit:
3730         ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3731         return status;
3732 }
3733
3734 int ql_cam_route_initialize(struct ql_adapter *qdev)
3735 {
3736         int status, set;
3737
3738         /* If check if the link is up and use to
3739          * determine if we are setting or clearing
3740          * the MAC address in the CAM.
3741          */
3742         set = ql_read32(qdev, STS);
3743         set &= qdev->port_link_up;
3744         status = ql_set_mac_addr(qdev, set);
3745         if (status) {
3746                 netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
3747                 return status;
3748         }
3749
3750         status = ql_route_initialize(qdev);
3751         if (status)
3752                 netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
3753
3754         return status;
3755 }
3756
3757 static int ql_adapter_initialize(struct ql_adapter *qdev)
3758 {
3759         u32 value, mask;
3760         int i;
3761         int status = 0;
3762
3763         /*
3764          * Set up the System register to halt on errors.
3765          */
3766         value = SYS_EFE | SYS_FAE;
3767         mask = value << 16;
3768         ql_write32(qdev, SYS, mask | value);
3769
3770         /* Set the default queue, and VLAN behavior. */
3771         value = NIC_RCV_CFG_DFQ;
3772         mask = NIC_RCV_CFG_DFQ_MASK;
3773         if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) {
3774                 value |= NIC_RCV_CFG_RV;
3775                 mask |= (NIC_RCV_CFG_RV << 16);
3776         }
3777         ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3778
3779         /* Set the MPI interrupt to enabled. */
3780         ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3781
3782         /* Enable the function, set pagesize, enable error checking. */
3783         value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
3784             FSC_EC | FSC_VM_PAGE_4K;
3785         value |= SPLT_SETTING;
3786
3787         /* Set/clear header splitting. */
3788         mask = FSC_VM_PAGESIZE_MASK |
3789             FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3790         ql_write32(qdev, FSC, mask | value);
3791
3792         ql_write32(qdev, SPLT_HDR, SPLT_LEN);
3793
3794         /* Set RX packet routing to use port/pci function on which the
3795          * packet arrived on in addition to usual frame routing.
3796          * This is helpful on bonding where both interfaces can have
3797          * the same MAC address.
3798          */
3799         ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
3800         /* Reroute all packets to our Interface.
3801          * They may have been routed to MPI firmware
3802          * due to WOL.
3803          */
3804         value = ql_read32(qdev, MGMT_RCV_CFG);
3805         value &= ~MGMT_RCV_CFG_RM;
3806         mask = 0xffff0000;
3807
3808         /* Sticky reg needs clearing due to WOL. */
3809         ql_write32(qdev, MGMT_RCV_CFG, mask);
3810         ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3811
3812         /* Default WOL is enable on Mezz cards */
3813         if (qdev->pdev->subsystem_device == 0x0068 ||
3814                         qdev->pdev->subsystem_device == 0x0180)
3815                 qdev->wol = WAKE_MAGIC;
3816
3817         /* Start up the rx queues. */
3818         for (i = 0; i < qdev->rx_ring_count; i++) {
3819                 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3820                 if (status) {
3821                         netif_err(qdev, ifup, qdev->ndev,
3822                                   "Failed to start rx ring[%d].\n", i);
3823                         return status;
3824                 }
3825         }
3826
3827         /* If there is more than one inbound completion queue
3828          * then download a RICB to configure RSS.
3829          */
3830         if (qdev->rss_ring_count > 1) {
3831                 status = ql_start_rss(qdev);
3832                 if (status) {
3833                         netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
3834                         return status;
3835                 }
3836         }
3837
3838         /* Start up the tx queues. */
3839         for (i = 0; i < qdev->tx_ring_count; i++) {
3840                 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3841                 if (status) {
3842                         netif_err(qdev, ifup, qdev->ndev,
3843                                   "Failed to start tx ring[%d].\n", i);
3844                         return status;
3845                 }
3846         }
3847
3848         /* Initialize the port and set the max framesize. */
3849         status = qdev->nic_ops->port_initialize(qdev);
3850         if (status)
3851                 netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
3852
3853         /* Set up the MAC address and frame routing filter. */
3854         status = ql_cam_route_initialize(qdev);
3855         if (status) {
3856                 netif_err(qdev, ifup, qdev->ndev,
3857                           "Failed to init CAM/Routing tables.\n");
3858                 return status;
3859         }
3860
3861         /* Start NAPI for the RSS queues. */
3862         for (i = 0; i < qdev->rss_ring_count; i++)
3863                 napi_enable(&qdev->rx_ring[i].napi);
3864
3865         return status;
3866 }
3867
3868 /* Issue soft reset to chip. */
3869 static int ql_adapter_reset(struct ql_adapter *qdev)
3870 {
3871         u32 value;
3872         int status = 0;
3873         unsigned long end_jiffies;
3874
3875         /* Clear all the entries in the routing table. */
3876         status = ql_clear_routing_entries(qdev);
3877         if (status) {
3878                 netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
3879                 return status;
3880         }
3881
3882         /* Check if bit is set then skip the mailbox command and
3883          * clear the bit, else we are in normal reset process.
3884          */
3885         if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
3886                 /* Stop management traffic. */
3887                 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
3888
3889                 /* Wait for the NIC and MGMNT FIFOs to empty. */
3890                 ql_wait_fifo_empty(qdev);
3891         } else
3892                 clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
3893
3894         ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
3895
3896         end_jiffies = jiffies + usecs_to_jiffies(30);
3897         do {
3898                 value = ql_read32(qdev, RST_FO);
3899                 if ((value & RST_FO_FR) == 0)
3900                         break;
3901                 cpu_relax();
3902         } while (time_before(jiffies, end_jiffies));
3903
3904         if (value & RST_FO_FR) {
3905                 netif_err(qdev, ifdown, qdev->ndev,
3906                           "ETIMEDOUT!!! errored out of resetting the chip!\n");
3907                 status = -ETIMEDOUT;
3908         }
3909
3910         /* Resume management traffic. */
3911         ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
3912         return status;
3913 }
3914
3915 static void ql_display_dev_info(struct net_device *ndev)
3916 {
3917         struct ql_adapter *qdev = netdev_priv(ndev);
3918
3919         netif_info(qdev, probe, qdev->ndev,
3920                    "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3921                    "XG Roll = %d, XG Rev = %d.\n",
3922                    qdev->func,
3923                    qdev->port,
3924                    qdev->chip_rev_id & 0x0000000f,
3925                    qdev->chip_rev_id >> 4 & 0x0000000f,
3926                    qdev->chip_rev_id >> 8 & 0x0000000f,
3927                    qdev->chip_rev_id >> 12 & 0x0000000f);
3928         netif_info(qdev, probe, qdev->ndev,
3929                    "MAC address %pM\n", ndev->dev_addr);
3930 }
3931
3932 static int ql_wol(struct ql_adapter *qdev)
3933 {
3934         int status = 0;
3935         u32 wol = MB_WOL_DISABLE;
3936
3937         /* The CAM is still intact after a reset, but if we
3938          * are doing WOL, then we may need to program the
3939          * routing regs. We would also need to issue the mailbox
3940          * commands to instruct the MPI what to do per the ethtool
3941          * settings.
3942          */
3943
3944         if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3945                         WAKE_MCAST | WAKE_BCAST)) {
3946                 netif_err(qdev, ifdown, qdev->ndev,
3947                           "Unsupported WOL parameter. qdev->wol = 0x%x.\n",
3948                           qdev->wol);
3949                 return -EINVAL;
3950         }
3951
3952         if (qdev->wol & WAKE_MAGIC) {
3953                 status = ql_mb_wol_set_magic(qdev, 1);
3954                 if (status) {
3955                         netif_err(qdev, ifdown, qdev->ndev,
3956                                   "Failed to set magic packet on %s.\n",
3957                                   qdev->ndev->name);
3958                         return status;
3959                 } else
3960                         netif_info(qdev, drv, qdev->ndev,
3961                                    "Enabled magic packet successfully on %s.\n",
3962                                    qdev->ndev->name);
3963
3964                 wol |= MB_WOL_MAGIC_PKT;
3965         }
3966
3967         if (qdev->wol) {
3968                 wol |= MB_WOL_MODE_ON;
3969                 status = ql_mb_wol_mode(qdev, wol);
3970                 netif_err(qdev, drv, qdev->ndev,
3971                           "WOL %s (wol code 0x%x) on %s\n",
3972                           (status == 0) ? "Successfully set" : "Failed",
3973                           wol, qdev->ndev->name);
3974         }
3975
3976         return status;
3977 }
3978
3979 static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
3980 {
3981
3982         /* Don't kill the reset worker thread if we
3983          * are in the process of recovery.
3984          */
3985         if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3986                 cancel_delayed_work_sync(&qdev->asic_reset_work);
3987         cancel_delayed_work_sync(&qdev->mpi_reset_work);
3988         cancel_delayed_work_sync(&qdev->mpi_work);
3989         cancel_delayed_work_sync(&qdev->mpi_idc_work);
3990         cancel_delayed_work_sync(&qdev->mpi_core_to_log);
3991         cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
3992 }
3993
3994 static int ql_adapter_down(struct ql_adapter *qdev)
3995 {
3996         int i, status = 0;
3997
3998         ql_link_off(qdev);
3999
4000         ql_cancel_all_work_sync(qdev);
4001
4002         for (i = 0; i < qdev->rss_ring_count; i++)
4003                 napi_disable(&qdev->rx_ring[i].napi);
4004
4005         clear_bit(QL_ADAPTER_UP, &qdev->flags);
4006
4007         ql_disable_interrupts(qdev);
4008
4009         ql_tx_ring_clean(qdev);
4010
4011         /* Call netif_napi_del() from common point.
4012          */
4013         for (i = 0; i < qdev->rss_ring_count; i++)
4014                 netif_napi_del(&qdev->rx_ring[i].napi);
4015
4016         status = ql_adapter_reset(qdev);
4017         if (status)
4018                 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
4019                           qdev->func);
4020         ql_free_rx_buffers(qdev);
4021
4022         return status;
4023 }
4024
4025 static int ql_adapter_up(struct ql_adapter *qdev)
4026 {
4027         int err = 0;
4028
4029         err = ql_adapter_initialize(qdev);
4030         if (err) {
4031                 netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
4032                 goto err_init;
4033         }
4034         set_bit(QL_ADAPTER_UP, &qdev->flags);
4035         ql_alloc_rx_buffers(qdev);
4036         /* If the port is initialized and the
4037          * link is up the turn on the carrier.
4038          */
4039         if ((ql_read32(qdev, STS) & qdev->port_init) &&
4040                         (ql_read32(qdev, STS) & qdev->port_link_up))
4041                 ql_link_on(qdev);
4042         /* Restore rx mode. */
4043         clear_bit(QL_ALLMULTI, &qdev->flags);
4044         clear_bit(QL_PROMISCUOUS, &qdev->flags);
4045         qlge_set_multicast_list(qdev->ndev);
4046
4047         /* Restore vlan setting. */
4048         qlge_restore_vlan(qdev);
4049
4050         ql_enable_interrupts(qdev);
4051         ql_enable_all_completion_interrupts(qdev);
4052         netif_tx_start_all_queues(qdev->ndev);
4053
4054         return 0;
4055 err_init:
4056         ql_adapter_reset(qdev);
4057         return err;
4058 }
4059
4060 static void ql_release_adapter_resources(struct ql_adapter *qdev)
4061 {
4062         ql_free_mem_resources(qdev);
4063         ql_free_irq(qdev);
4064 }
4065
4066 static int ql_get_adapter_resources(struct ql_adapter *qdev)
4067 {
4068         int status = 0;
4069
4070         if (ql_alloc_mem_resources(qdev)) {
4071                 netif_err(qdev, ifup, qdev->ndev, "Unable to  allocate memory.\n");
4072                 return -ENOMEM;
4073         }
4074         status = ql_request_irq(qdev);
4075         return status;
4076 }
4077
4078 static int qlge_close(struct net_device *ndev)
4079 {
4080         struct ql_adapter *qdev = netdev_priv(ndev);
4081
4082         /* If we hit pci_channel_io_perm_failure
4083          * failure condition, then we already
4084          * brought the adapter down.
4085          */
4086         if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
4087                 netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
4088                 clear_bit(QL_EEH_FATAL, &qdev->flags);
4089                 return 0;
4090         }
4091
4092         /*
4093          * Wait for device to recover from a reset.
4094          * (Rarely happens, but possible.)
4095          */
4096         while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
4097                 msleep(1);
4098         ql_adapter_down(qdev);
4099         ql_release_adapter_resources(qdev);
4100         return 0;
4101 }
4102
4103 static int ql_configure_rings(struct ql_adapter *qdev)
4104 {
4105         int i;
4106         struct rx_ring *rx_ring;
4107         struct tx_ring *tx_ring;
4108         int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
4109         unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4110                 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4111
4112         qdev->lbq_buf_order = get_order(lbq_buf_len);
4113
4114         /* In a perfect world we have one RSS ring for each CPU
4115          * and each has it's own vector.  To do that we ask for
4116          * cpu_cnt vectors.  ql_enable_msix() will adjust the
4117          * vector count to what we actually get.  We then
4118          * allocate an RSS ring for each.
4119          * Essentially, we are doing min(cpu_count, msix_vector_count).
4120          */
4121         qdev->intr_count = cpu_cnt;
4122         ql_enable_msix(qdev);
4123         /* Adjust the RSS ring count to the actual vector count. */
4124         qdev->rss_ring_count = qdev->intr_count;
4125         qdev->tx_ring_count = cpu_cnt;
4126         qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
4127
4128         for (i = 0; i < qdev->tx_ring_count; i++) {
4129                 tx_ring = &qdev->tx_ring[i];
4130                 memset((void *)tx_ring, 0, sizeof(*tx_ring));
4131                 tx_ring->qdev = qdev;
4132                 tx_ring->wq_id = i;
4133                 tx_ring->wq_len = qdev->tx_ring_size;
4134                 tx_ring->wq_size =
4135                     tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
4136
4137                 /*
4138                  * The completion queue ID for the tx rings start
4139                  * immediately after the rss rings.
4140                  */
4141                 tx_ring->cq_id = qdev->rss_ring_count + i;
4142         }
4143
4144         for (i = 0; i < qdev->rx_ring_count; i++) {
4145                 rx_ring = &qdev->rx_ring[i];
4146                 memset((void *)rx_ring, 0, sizeof(*rx_ring));
4147                 rx_ring->qdev = qdev;
4148                 rx_ring->cq_id = i;
4149                 rx_ring->cpu = i % cpu_cnt;     /* CPU to run handler on. */
4150                 if (i < qdev->rss_ring_count) {
4151                         /*
4152                          * Inbound (RSS) queues.
4153                          */
4154                         rx_ring->cq_len = qdev->rx_ring_size;
4155                         rx_ring->cq_size =
4156                             rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4157                         rx_ring->lbq_len = NUM_LARGE_BUFFERS;
4158                         rx_ring->lbq_size =
4159                             rx_ring->lbq_len * sizeof(__le64);
4160                         rx_ring->lbq_buf_size = (u16)lbq_buf_len;
4161                         rx_ring->sbq_len = NUM_SMALL_BUFFERS;
4162                         rx_ring->sbq_size =
4163                             rx_ring->sbq_len * sizeof(__le64);
4164                         rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
4165                         rx_ring->type = RX_Q;
4166                 } else {
4167                         /*
4168                          * Outbound queue handles outbound completions only.
4169                          */
4170                         /* outbound cq is same size as tx_ring it services. */
4171                         rx_ring->cq_len = qdev->tx_ring_size;
4172                         rx_ring->cq_size =
4173                             rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4174                         rx_ring->lbq_len = 0;
4175                         rx_ring->lbq_size = 0;
4176                         rx_ring->lbq_buf_size = 0;
4177                         rx_ring->sbq_len = 0;
4178                         rx_ring->sbq_size = 0;
4179                         rx_ring->sbq_buf_size = 0;
4180                         rx_ring->type = TX_Q;
4181                 }
4182         }
4183         return 0;
4184 }
4185
4186 static int qlge_open(struct net_device *ndev)
4187 {
4188         int err = 0;
4189         struct ql_adapter *qdev = netdev_priv(ndev);
4190
4191         err = ql_adapter_reset(qdev);
4192         if (err)
4193                 return err;
4194
4195         err = ql_configure_rings(qdev);
4196         if (err)
4197                 return err;
4198
4199         err = ql_get_adapter_resources(qdev);
4200         if (err)
4201                 goto error_up;
4202
4203         err = ql_adapter_up(qdev);
4204         if (err)
4205                 goto error_up;
4206
4207         return err;
4208
4209 error_up:
4210         ql_release_adapter_resources(qdev);
4211         return err;
4212 }
4213
4214 static int ql_change_rx_buffers(struct ql_adapter *qdev)
4215 {
4216         struct rx_ring *rx_ring;
4217         int i, status;
4218         u32 lbq_buf_len;
4219
4220         /* Wait for an outstanding reset to complete. */
4221         if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4222                 int i = 4;
4223
4224                 while (--i && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4225                         netif_err(qdev, ifup, qdev->ndev,
4226                                   "Waiting for adapter UP...\n");
4227                         ssleep(1);
4228                 }
4229
4230                 if (!i) {
4231                         netif_err(qdev, ifup, qdev->ndev,
4232                                   "Timed out waiting for adapter UP\n");
4233                         return -ETIMEDOUT;
4234                 }
4235         }
4236
4237         status = ql_adapter_down(qdev);
4238         if (status)
4239                 goto error;
4240
4241         /* Get the new rx buffer size. */
4242         lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4243                 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4244         qdev->lbq_buf_order = get_order(lbq_buf_len);
4245
4246         for (i = 0; i < qdev->rss_ring_count; i++) {
4247                 rx_ring = &qdev->rx_ring[i];
4248                 /* Set the new size. */
4249                 rx_ring->lbq_buf_size = lbq_buf_len;
4250         }
4251
4252         status = ql_adapter_up(qdev);
4253         if (status)
4254                 goto error;
4255
4256         return status;
4257 error:
4258         netif_alert(qdev, ifup, qdev->ndev,
4259                     "Driver up/down cycle failed, closing device.\n");
4260         set_bit(QL_ADAPTER_UP, &qdev->flags);
4261         dev_close(qdev->ndev);
4262         return status;
4263 }
4264
4265 static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4266 {
4267         struct ql_adapter *qdev = netdev_priv(ndev);
4268         int status;
4269
4270         if (ndev->mtu == 1500 && new_mtu == 9000) {
4271                 netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
4272         } else if (ndev->mtu == 9000 && new_mtu == 1500) {
4273                 netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
4274         } else
4275                 return -EINVAL;
4276
4277         queue_delayed_work(qdev->workqueue,
4278                         &qdev->mpi_port_cfg_work, 3*HZ);
4279
4280         ndev->mtu = new_mtu;
4281
4282         if (!netif_running(qdev->ndev)) {
4283                 return 0;
4284         }
4285
4286         status = ql_change_rx_buffers(qdev);
4287         if (status) {
4288                 netif_err(qdev, ifup, qdev->ndev,
4289                           "Changing MTU failed.\n");
4290         }
4291
4292         return status;
4293 }
4294
4295 static struct net_device_stats *qlge_get_stats(struct net_device
4296                                                *ndev)
4297 {
4298         struct ql_adapter *qdev = netdev_priv(ndev);
4299         struct rx_ring *rx_ring = &qdev->rx_ring[0];
4300         struct tx_ring *tx_ring = &qdev->tx_ring[0];
4301         unsigned long pkts, mcast, dropped, errors, bytes;
4302         int i;
4303
4304         /* Get RX stats. */
4305         pkts = mcast = dropped = errors = bytes = 0;
4306         for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4307                         pkts += rx_ring->rx_packets;
4308                         bytes += rx_ring->rx_bytes;
4309                         dropped += rx_ring->rx_dropped;
4310                         errors += rx_ring->rx_errors;
4311                         mcast += rx_ring->rx_multicast;
4312         }
4313         ndev->stats.rx_packets = pkts;
4314         ndev->stats.rx_bytes = bytes;
4315         ndev->stats.rx_dropped = dropped;
4316         ndev->stats.rx_errors = errors;
4317         ndev->stats.multicast = mcast;
4318
4319         /* Get TX stats. */
4320         pkts = errors = bytes = 0;
4321         for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4322                         pkts += tx_ring->tx_packets;
4323                         bytes += tx_ring->tx_bytes;
4324                         errors += tx_ring->tx_errors;
4325         }
4326         ndev->stats.tx_packets = pkts;
4327         ndev->stats.tx_bytes = bytes;
4328         ndev->stats.tx_errors = errors;
4329         return &ndev->stats;
4330 }
4331
4332 static void qlge_set_multicast_list(struct net_device *ndev)
4333 {
4334         struct ql_adapter *qdev = netdev_priv(ndev);
4335         struct netdev_hw_addr *ha;
4336         int i, status;
4337
4338         status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4339         if (status)
4340                 return;
4341         /*
4342          * Set or clear promiscuous mode if a
4343          * transition is taking place.
4344          */
4345         if (ndev->flags & IFF_PROMISC) {
4346                 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4347                         if (ql_set_routing_reg
4348                             (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
4349                                 netif_err(qdev, hw, qdev->ndev,
4350                                           "Failed to set promiscuous mode.\n");
4351                         } else {
4352                                 set_bit(QL_PROMISCUOUS, &qdev->flags);
4353                         }
4354                 }
4355         } else {
4356                 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4357                         if (ql_set_routing_reg
4358                             (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
4359                                 netif_err(qdev, hw, qdev->ndev,
4360                                           "Failed to clear promiscuous mode.\n");
4361                         } else {
4362                                 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4363                         }
4364                 }
4365         }
4366
4367         /*
4368          * Set or clear all multicast mode if a
4369          * transition is taking place.
4370          */
4371         if ((ndev->flags & IFF_ALLMULTI) ||
4372             (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
4373                 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4374                         if (ql_set_routing_reg
4375                             (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
4376                                 netif_err(qdev, hw, qdev->ndev,
4377                                           "Failed to set all-multi mode.\n");
4378                         } else {
4379                                 set_bit(QL_ALLMULTI, &qdev->flags);
4380                         }
4381                 }
4382         } else {
4383                 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4384                         if (ql_set_routing_reg
4385                             (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
4386                                 netif_err(qdev, hw, qdev->ndev,
4387                                           "Failed to clear all-multi mode.\n");
4388                         } else {
4389                                 clear_bit(QL_ALLMULTI, &qdev->flags);
4390                         }
4391                 }
4392         }
4393
4394         if (!netdev_mc_empty(ndev)) {
4395                 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4396                 if (status)
4397                         goto exit;
4398                 i = 0;
4399                 netdev_for_each_mc_addr(ha, ndev) {
4400                         if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
4401                                                 MAC_ADDR_TYPE_MULTI_MAC, i)) {
4402                                 netif_err(qdev, hw, qdev->ndev,
4403                                           "Failed to loadmulticast address.\n");
4404                                 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4405                                 goto exit;
4406                         }
4407                         i++;
4408                 }
4409                 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4410                 if (ql_set_routing_reg
4411                     (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
4412                         netif_err(qdev, hw, qdev->ndev,
4413                                   "Failed to set multicast match mode.\n");
4414                 } else {
4415                         set_bit(QL_ALLMULTI, &qdev->flags);
4416                 }
4417         }
4418 exit:
4419         ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
4420 }
4421
4422 static int qlge_set_mac_address(struct net_device *ndev, void *p)
4423 {
4424         struct ql_adapter *qdev = netdev_priv(ndev);
4425         struct sockaddr *addr = p;
4426         int status;
4427
4428         if (!is_valid_ether_addr(addr->sa_data))
4429                 return -EADDRNOTAVAIL;
4430         memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
4431         /* Update local copy of current mac address. */
4432         memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4433
4434         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4435         if (status)
4436                 return status;
4437         status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
4438                         MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
4439         if (status)
4440                 netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
4441         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4442         return status;
4443 }
4444
4445 static void qlge_tx_timeout(struct net_device *ndev)
4446 {
4447         struct ql_adapter *qdev = netdev_priv(ndev);
4448         ql_queue_asic_error(qdev);
4449 }
4450
4451 static void ql_asic_reset_work(struct work_struct *work)
4452 {
4453         struct ql_adapter *qdev =
4454             container_of(work, struct ql_adapter, asic_reset_work.work);
4455         int status;
4456         rtnl_lock();
4457         status = ql_adapter_down(qdev);
4458         if (status)
4459                 goto error;
4460
4461         status = ql_adapter_up(qdev);
4462         if (status)
4463                 goto error;
4464
4465         /* Restore rx mode. */
4466         clear_bit(QL_ALLMULTI, &qdev->flags);
4467         clear_bit(QL_PROMISCUOUS, &qdev->flags);
4468         qlge_set_multicast_list(qdev->ndev);
4469
4470         rtnl_unlock();
4471         return;
4472 error:
4473         netif_alert(qdev, ifup, qdev->ndev,
4474                     "Driver up/down cycle failed, closing device\n");
4475
4476         set_bit(QL_ADAPTER_UP, &qdev->flags);
4477         dev_close(qdev->ndev);
4478         rtnl_unlock();
4479 }
4480
4481 static const struct nic_operations qla8012_nic_ops = {
4482         .get_flash              = ql_get_8012_flash_params,
4483         .port_initialize        = ql_8012_port_initialize,
4484 };
4485
4486 static const struct nic_operations qla8000_nic_ops = {
4487         .get_flash              = ql_get_8000_flash_params,
4488         .port_initialize        = ql_8000_port_initialize,
4489 };
4490
4491 /* Find the pcie function number for the other NIC
4492  * on this chip.  Since both NIC functions share a
4493  * common firmware we have the lowest enabled function
4494  * do any common work.  Examples would be resetting
4495  * after a fatal firmware error, or doing a firmware
4496  * coredump.
4497  */
4498 static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
4499 {
4500         int status = 0;
4501         u32 temp;
4502         u32 nic_func1, nic_func2;
4503
4504         status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4505                         &temp);
4506         if (status)
4507                 return status;
4508
4509         nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4510                         MPI_TEST_NIC_FUNC_MASK);
4511         nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4512                         MPI_TEST_NIC_FUNC_MASK);
4513
4514         if (qdev->func == nic_func1)
4515                 qdev->alt_func = nic_func2;
4516         else if (qdev->func == nic_func2)
4517                 qdev->alt_func = nic_func1;
4518         else
4519                 status = -EIO;
4520
4521         return status;
4522 }
4523
4524 static int ql_get_board_info(struct ql_adapter *qdev)
4525 {
4526         int status;
4527         qdev->func =
4528             (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
4529         if (qdev->func > 3)
4530                 return -EIO;
4531
4532         status = ql_get_alt_pcie_func(qdev);
4533         if (status)
4534                 return status;
4535
4536         qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4537         if (qdev->port) {
4538                 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4539                 qdev->port_link_up = STS_PL1;
4540                 qdev->port_init = STS_PI1;
4541                 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4542                 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4543         } else {
4544                 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4545                 qdev->port_link_up = STS_PL0;
4546                 qdev->port_init = STS_PI0;
4547                 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4548                 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4549         }
4550         qdev->chip_rev_id = ql_read32(qdev, REV_ID);
4551         qdev->device_id = qdev->pdev->device;
4552         if (qdev->device_id == QLGE_DEVICE_ID_8012)
4553                 qdev->nic_ops = &qla8012_nic_ops;
4554         else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4555                 qdev->nic_ops = &qla8000_nic_ops;
4556         return status;
4557 }
4558
4559 static void ql_release_all(struct pci_dev *pdev)
4560 {
4561         struct net_device *ndev = pci_get_drvdata(pdev);
4562         struct ql_adapter *qdev = netdev_priv(ndev);
4563
4564         if (qdev->workqueue) {
4565                 destroy_workqueue(qdev->workqueue);
4566                 qdev->workqueue = NULL;
4567         }
4568
4569         if (qdev->reg_base)
4570                 iounmap(qdev->reg_base);
4571         if (qdev->doorbell_area)
4572                 iounmap(qdev->doorbell_area);
4573         vfree(qdev->mpi_coredump);
4574         pci_release_regions(pdev);
4575 }
4576
4577 static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
4578                           int cards_found)
4579 {
4580         struct ql_adapter *qdev = netdev_priv(ndev);
4581         int err = 0;
4582
4583         memset((void *)qdev, 0, sizeof(*qdev));
4584         err = pci_enable_device(pdev);
4585         if (err) {
4586                 dev_err(&pdev->dev, "PCI device enable failed.\n");
4587                 return err;
4588         }
4589
4590         qdev->ndev = ndev;
4591         qdev->pdev = pdev;
4592         pci_set_drvdata(pdev, ndev);
4593
4594         /* Set PCIe read request size */
4595         err = pcie_set_readrq(pdev, 4096);
4596         if (err) {
4597                 dev_err(&pdev->dev, "Set readrq failed.\n");
4598                 goto err_out1;
4599         }
4600
4601         err = pci_request_regions(pdev, DRV_NAME);
4602         if (err) {
4603                 dev_err(&pdev->dev, "PCI region request failed.\n");
4604                 return err;
4605         }
4606
4607         pci_set_master(pdev);
4608         if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4609                 set_bit(QL_DMA64, &qdev->flags);
4610                 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4611         } else {
4612                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4613                 if (!err)
4614                        err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
4615         }
4616
4617         if (err) {
4618                 dev_err(&pdev->dev, "No usable DMA configuration.\n");
4619                 goto err_out2;
4620         }
4621
4622         /* Set PCIe reset type for EEH to fundamental. */
4623         pdev->needs_freset = 1;
4624         pci_save_state(pdev);
4625         qdev->reg_base =
4626             ioremap_nocache(pci_resource_start(pdev, 1),
4627                             pci_resource_len(pdev, 1));
4628         if (!qdev->reg_base) {
4629                 dev_err(&pdev->dev, "Register mapping failed.\n");
4630                 err = -ENOMEM;
4631                 goto err_out2;
4632         }
4633
4634         qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4635         qdev->doorbell_area =
4636             ioremap_nocache(pci_resource_start(pdev, 3),
4637                             pci_resource_len(pdev, 3));
4638         if (!qdev->doorbell_area) {
4639                 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4640                 err = -ENOMEM;
4641                 goto err_out2;
4642         }
4643
4644         err = ql_get_board_info(qdev);
4645         if (err) {
4646                 dev_err(&pdev->dev, "Register access failed.\n");
4647                 err = -EIO;
4648                 goto err_out2;
4649         }
4650         qdev->msg_enable = netif_msg_init(debug, default_msg);
4651         spin_lock_init(&qdev->hw_lock);
4652         spin_lock_init(&qdev->stats_lock);
4653
4654         if (qlge_mpi_coredump) {
4655                 qdev->mpi_coredump =
4656                         vmalloc(sizeof(struct ql_mpi_coredump));
4657                 if (qdev->mpi_coredump == NULL) {
4658                         err = -ENOMEM;
4659                         goto err_out2;
4660                 }
4661                 if (qlge_force_coredump)
4662                         set_bit(QL_FRC_COREDUMP, &qdev->flags);
4663         }
4664         /* make sure the EEPROM is good */
4665         err = qdev->nic_ops->get_flash(qdev);
4666         if (err) {
4667                 dev_err(&pdev->dev, "Invalid FLASH.\n");
4668                 goto err_out2;
4669         }
4670
4671         /* Keep local copy of current mac address. */
4672         memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4673
4674         /* Set up the default ring sizes. */
4675         qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4676         qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4677
4678         /* Set up the coalescing parameters. */
4679         qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4680         qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4681         qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4682         qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4683
4684         /*
4685          * Set up the operating parameters.
4686          */
4687         qdev->workqueue = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM,
4688                                                   ndev->name);
4689         INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4690         INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4691         INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
4692         INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
4693         INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
4694         INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
4695         init_completion(&qdev->ide_completion);
4696         mutex_init(&qdev->mpi_mutex);
4697
4698         if (!cards_found) {
4699                 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4700                 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4701                          DRV_NAME, DRV_VERSION);
4702         }
4703         return 0;
4704 err_out2:
4705         ql_release_all(pdev);
4706 err_out1:
4707         pci_disable_device(pdev);
4708         return err;
4709 }
4710
4711 static const struct net_device_ops qlge_netdev_ops = {
4712         .ndo_open               = qlge_open,
4713         .ndo_stop               = qlge_close,
4714         .ndo_start_xmit         = qlge_send,
4715         .ndo_change_mtu         = qlge_change_mtu,
4716         .ndo_get_stats          = qlge_get_stats,
4717         .ndo_set_rx_mode        = qlge_set_multicast_list,
4718         .ndo_set_mac_address    = qlge_set_mac_address,
4719         .ndo_validate_addr      = eth_validate_addr,
4720         .ndo_tx_timeout         = qlge_tx_timeout,
4721         .ndo_fix_features       = qlge_fix_features,
4722         .ndo_set_features       = qlge_set_features,
4723         .ndo_vlan_rx_add_vid    = qlge_vlan_rx_add_vid,
4724         .ndo_vlan_rx_kill_vid   = qlge_vlan_rx_kill_vid,
4725 };
4726
4727 static void ql_timer(struct timer_list *t)
4728 {
4729         struct ql_adapter *qdev = from_timer(qdev, t, timer);
4730         u32 var = 0;
4731
4732         var = ql_read32(qdev, STS);
4733         if (pci_channel_offline(qdev->pdev)) {
4734                 netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
4735                 return;
4736         }
4737
4738         mod_timer(&qdev->timer, jiffies + (5*HZ));
4739 }
4740
4741 static int qlge_probe(struct pci_dev *pdev,
4742                       const struct pci_device_id *pci_entry)
4743 {
4744         struct net_device *ndev = NULL;
4745         struct ql_adapter *qdev = NULL;
4746         static int cards_found = 0;
4747         int err = 0;
4748
4749         ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
4750                         min(MAX_CPUS, netif_get_num_default_rss_queues()));
4751         if (!ndev)
4752                 return -ENOMEM;
4753
4754         err = ql_init_device(pdev, ndev, cards_found);
4755         if (err < 0) {
4756                 free_netdev(ndev);
4757                 return err;
4758         }
4759
4760         qdev = netdev_priv(ndev);
4761         SET_NETDEV_DEV(ndev, &pdev->dev);
4762         ndev->hw_features = NETIF_F_SG |
4763                             NETIF_F_IP_CSUM |
4764                             NETIF_F_TSO |
4765                             NETIF_F_TSO_ECN |
4766                             NETIF_F_HW_VLAN_CTAG_TX |
4767                             NETIF_F_HW_VLAN_CTAG_RX |
4768                             NETIF_F_HW_VLAN_CTAG_FILTER |
4769                             NETIF_F_RXCSUM;
4770         ndev->features = ndev->hw_features;
4771         ndev->vlan_features = ndev->hw_features;
4772         /* vlan gets same features (except vlan filter) */
4773         ndev->vlan_features &= ~(NETIF_F_HW_VLAN_CTAG_FILTER |
4774                                  NETIF_F_HW_VLAN_CTAG_TX |
4775                                  NETIF_F_HW_VLAN_CTAG_RX);
4776
4777         if (test_bit(QL_DMA64, &qdev->flags))
4778                 ndev->features |= NETIF_F_HIGHDMA;
4779
4780         /*
4781          * Set up net_device structure.
4782          */
4783         ndev->tx_queue_len = qdev->tx_ring_size;
4784         ndev->irq = pdev->irq;
4785
4786         ndev->netdev_ops = &qlge_netdev_ops;
4787         ndev->ethtool_ops = &qlge_ethtool_ops;
4788         ndev->watchdog_timeo = 10 * HZ;
4789
4790         /* MTU range: this driver only supports 1500 or 9000, so this only
4791          * filters out values above or below, and we'll rely on
4792          * qlge_change_mtu to make sure only 1500 or 9000 are allowed
4793          */
4794         ndev->min_mtu = ETH_DATA_LEN;
4795         ndev->max_mtu = 9000;
4796
4797         err = register_netdev(ndev);
4798         if (err) {
4799                 dev_err(&pdev->dev, "net device registration failed.\n");
4800                 ql_release_all(pdev);
4801                 pci_disable_device(pdev);
4802                 free_netdev(ndev);
4803                 return err;
4804         }
4805         /* Start up the timer to trigger EEH if
4806          * the bus goes dead
4807          */
4808         timer_setup(&qdev->timer, ql_timer, TIMER_DEFERRABLE);
4809         mod_timer(&qdev->timer, jiffies + (5*HZ));
4810         ql_link_off(qdev);
4811         ql_display_dev_info(ndev);
4812         atomic_set(&qdev->lb_count, 0);
4813         cards_found++;
4814         return 0;
4815 }
4816
4817 netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4818 {
4819         return qlge_send(skb, ndev);
4820 }
4821
4822 int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4823 {
4824         return ql_clean_inbound_rx_ring(rx_ring, budget);
4825 }
4826
4827 static void qlge_remove(struct pci_dev *pdev)
4828 {
4829         struct net_device *ndev = pci_get_drvdata(pdev);
4830         struct ql_adapter *qdev = netdev_priv(ndev);
4831         del_timer_sync(&qdev->timer);
4832         ql_cancel_all_work_sync(qdev);
4833         unregister_netdev(ndev);
4834         ql_release_all(pdev);
4835         pci_disable_device(pdev);
4836         free_netdev(ndev);
4837 }
4838
4839 /* Clean up resources without touching hardware. */
4840 static void ql_eeh_close(struct net_device *ndev)
4841 {
4842         int i;
4843         struct ql_adapter *qdev = netdev_priv(ndev);
4844
4845         if (netif_carrier_ok(ndev)) {
4846                 netif_carrier_off(ndev);
4847                 netif_stop_queue(ndev);
4848         }
4849
4850         /* Disabling the timer */
4851         ql_cancel_all_work_sync(qdev);
4852
4853         for (i = 0; i < qdev->rss_ring_count; i++)
4854                 netif_napi_del(&qdev->rx_ring[i].napi);
4855
4856         clear_bit(QL_ADAPTER_UP, &qdev->flags);
4857         ql_tx_ring_clean(qdev);
4858         ql_free_rx_buffers(qdev);
4859         ql_release_adapter_resources(qdev);
4860 }
4861
4862 /*
4863  * This callback is called by the PCI subsystem whenever
4864  * a PCI bus error is detected.
4865  */
4866 static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4867                                                enum pci_channel_state state)
4868 {
4869         struct net_device *ndev = pci_get_drvdata(pdev);
4870         struct ql_adapter *qdev = netdev_priv(ndev);
4871
4872         switch (state) {
4873         case pci_channel_io_normal:
4874                 return PCI_ERS_RESULT_CAN_RECOVER;
4875         case pci_channel_io_frozen:
4876                 netif_device_detach(ndev);
4877                 del_timer_sync(&qdev->timer);
4878                 if (netif_running(ndev))
4879                         ql_eeh_close(ndev);
4880                 pci_disable_device(pdev);
4881                 return PCI_ERS_RESULT_NEED_RESET;
4882         case pci_channel_io_perm_failure:
4883                 dev_err(&pdev->dev,
4884                         "%s: pci_channel_io_perm_failure.\n", __func__);
4885                 del_timer_sync(&qdev->timer);
4886                 ql_eeh_close(ndev);
4887                 set_bit(QL_EEH_FATAL, &qdev->flags);
4888                 return PCI_ERS_RESULT_DISCONNECT;
4889         }
4890
4891         /* Request a slot reset. */
4892         return PCI_ERS_RESULT_NEED_RESET;
4893 }
4894
4895 /*
4896  * This callback is called after the PCI buss has been reset.
4897  * Basically, this tries to restart the card from scratch.
4898  * This is a shortened version of the device probe/discovery code,
4899  * it resembles the first-half of the () routine.
4900  */
4901 static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4902 {
4903         struct net_device *ndev = pci_get_drvdata(pdev);
4904         struct ql_adapter *qdev = netdev_priv(ndev);
4905
4906         pdev->error_state = pci_channel_io_normal;
4907
4908         pci_restore_state(pdev);
4909         if (pci_enable_device(pdev)) {
4910                 netif_err(qdev, ifup, qdev->ndev,
4911                           "Cannot re-enable PCI device after reset.\n");
4912                 return PCI_ERS_RESULT_DISCONNECT;
4913         }
4914         pci_set_master(pdev);
4915
4916         if (ql_adapter_reset(qdev)) {
4917                 netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
4918                 set_bit(QL_EEH_FATAL, &qdev->flags);
4919                 return PCI_ERS_RESULT_DISCONNECT;
4920         }
4921
4922         return PCI_ERS_RESULT_RECOVERED;
4923 }
4924
4925 static void qlge_io_resume(struct pci_dev *pdev)
4926 {
4927         struct net_device *ndev = pci_get_drvdata(pdev);
4928         struct ql_adapter *qdev = netdev_priv(ndev);
4929         int err = 0;
4930
4931         if (netif_running(ndev)) {
4932                 err = qlge_open(ndev);
4933                 if (err) {
4934                         netif_err(qdev, ifup, qdev->ndev,
4935                                   "Device initialization failed after reset.\n");
4936                         return;
4937                 }
4938         } else {
4939                 netif_err(qdev, ifup, qdev->ndev,
4940                           "Device was not running prior to EEH.\n");
4941         }
4942         mod_timer(&qdev->timer, jiffies + (5*HZ));
4943         netif_device_attach(ndev);
4944 }
4945
4946 static const struct pci_error_handlers qlge_err_handler = {
4947         .error_detected = qlge_io_error_detected,
4948         .slot_reset = qlge_io_slot_reset,
4949         .resume = qlge_io_resume,
4950 };
4951
4952 static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4953 {
4954         struct net_device *ndev = pci_get_drvdata(pdev);
4955         struct ql_adapter *qdev = netdev_priv(ndev);
4956         int err;
4957
4958         netif_device_detach(ndev);
4959         del_timer_sync(&qdev->timer);
4960
4961         if (netif_running(ndev)) {
4962                 err = ql_adapter_down(qdev);
4963                 if (!err)
4964                         return err;
4965         }
4966
4967         ql_wol(qdev);
4968         err = pci_save_state(pdev);
4969         if (err)
4970                 return err;
4971
4972         pci_disable_device(pdev);
4973
4974         pci_set_power_state(pdev, pci_choose_state(pdev, state));
4975
4976         return 0;
4977 }
4978
4979 #ifdef CONFIG_PM
4980 static int qlge_resume(struct pci_dev *pdev)
4981 {
4982         struct net_device *ndev = pci_get_drvdata(pdev);
4983         struct ql_adapter *qdev = netdev_priv(ndev);
4984         int err;
4985
4986         pci_set_power_state(pdev, PCI_D0);
4987         pci_restore_state(pdev);
4988         err = pci_enable_device(pdev);
4989         if (err) {
4990                 netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
4991                 return err;
4992         }
4993         pci_set_master(pdev);
4994
4995         pci_enable_wake(pdev, PCI_D3hot, 0);
4996         pci_enable_wake(pdev, PCI_D3cold, 0);
4997
4998         if (netif_running(ndev)) {
4999                 err = ql_adapter_up(qdev);
5000                 if (err)
5001                         return err;
5002         }
5003
5004         mod_timer(&qdev->timer, jiffies + (5*HZ));
5005         netif_device_attach(ndev);
5006
5007         return 0;
5008 }
5009 #endif /* CONFIG_PM */
5010
5011 static void qlge_shutdown(struct pci_dev *pdev)
5012 {
5013         qlge_suspend(pdev, PMSG_SUSPEND);
5014 }
5015
5016 static struct pci_driver qlge_driver = {
5017         .name = DRV_NAME,
5018         .id_table = qlge_pci_tbl,
5019         .probe = qlge_probe,
5020         .remove = qlge_remove,
5021 #ifdef CONFIG_PM
5022         .suspend = qlge_suspend,
5023         .resume = qlge_resume,
5024 #endif
5025         .shutdown = qlge_shutdown,
5026         .err_handler = &qlge_err_handler
5027 };
5028
5029 module_pci_driver(qlge_driver);