]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
e46786a56b0ca9899ba61465c49cd0b466bcf3f0
[linux.git] / drivers / net / ethernet / broadcom / bnx2x / bnx2x_main.c
1 /* bnx2x_main.c: QLogic Everest network driver.
2  *
3  * Copyright (c) 2007-2013 Broadcom Corporation
4  * Copyright (c) 2014 QLogic Corporation
5  * All rights reserved
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation.
10  *
11  * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
12  * Written by: Eliezer Tamir
13  * Based on code from Michael Chan's bnx2 driver
14  * UDP CSUM errata workaround by Arik Gendelman
15  * Slowpath and fastpath rework by Vladislav Zolotarov
16  * Statistics and Link management by Yitchak Gertner
17  *
18  */
19
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
22 #include <linux/module.h>
23 #include <linux/moduleparam.h>
24 #include <linux/kernel.h>
25 #include <linux/device.h>  /* for dev_info() */
26 #include <linux/timer.h>
27 #include <linux/errno.h>
28 #include <linux/ioport.h>
29 #include <linux/slab.h>
30 #include <linux/interrupt.h>
31 #include <linux/pci.h>
32 #include <linux/aer.h>
33 #include <linux/init.h>
34 #include <linux/netdevice.h>
35 #include <linux/etherdevice.h>
36 #include <linux/skbuff.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/bitops.h>
39 #include <linux/irq.h>
40 #include <linux/delay.h>
41 #include <asm/byteorder.h>
42 #include <linux/time.h>
43 #include <linux/ethtool.h>
44 #include <linux/mii.h>
45 #include <linux/if_vlan.h>
46 #include <linux/crash_dump.h>
47 #include <net/ip.h>
48 #include <net/ipv6.h>
49 #include <net/tcp.h>
50 #include <net/vxlan.h>
51 #include <net/checksum.h>
52 #include <net/ip6_checksum.h>
53 #include <linux/workqueue.h>
54 #include <linux/crc32.h>
55 #include <linux/crc32c.h>
56 #include <linux/prefetch.h>
57 #include <linux/zlib.h>
58 #include <linux/io.h>
59 #include <linux/semaphore.h>
60 #include <linux/stringify.h>
61 #include <linux/vmalloc.h>
62 #include "bnx2x.h"
63 #include "bnx2x_init.h"
64 #include "bnx2x_init_ops.h"
65 #include "bnx2x_cmn.h"
66 #include "bnx2x_vfpf.h"
67 #include "bnx2x_dcb.h"
68 #include "bnx2x_sp.h"
69 #include <linux/firmware.h>
70 #include "bnx2x_fw_file_hdr.h"
71 /* FW files */
72 #define FW_FILE_VERSION                                 \
73         __stringify(BCM_5710_FW_MAJOR_VERSION) "."      \
74         __stringify(BCM_5710_FW_MINOR_VERSION) "."      \
75         __stringify(BCM_5710_FW_REVISION_VERSION) "."   \
76         __stringify(BCM_5710_FW_ENGINEERING_VERSION)
77 #define FW_FILE_NAME_E1         "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
78 #define FW_FILE_NAME_E1H        "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
79 #define FW_FILE_NAME_E2         "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
80
81 /* Time in jiffies before concluding the transmitter is hung */
82 #define TX_TIMEOUT              (5*HZ)
83
84 static char version[] =
85         "QLogic 5771x/578xx 10/20-Gigabit Ethernet Driver "
86         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
87
88 MODULE_AUTHOR("Eliezer Tamir");
89 MODULE_DESCRIPTION("QLogic "
90                    "BCM57710/57711/57711E/"
91                    "57712/57712_MF/57800/57800_MF/57810/57810_MF/"
92                    "57840/57840_MF Driver");
93 MODULE_LICENSE("GPL");
94 MODULE_VERSION(DRV_MODULE_VERSION);
95 MODULE_FIRMWARE(FW_FILE_NAME_E1);
96 MODULE_FIRMWARE(FW_FILE_NAME_E1H);
97 MODULE_FIRMWARE(FW_FILE_NAME_E2);
98
99 int bnx2x_num_queues;
100 module_param_named(num_queues, bnx2x_num_queues, int, 0444);
101 MODULE_PARM_DESC(num_queues,
102                  " Set number of queues (default is as a number of CPUs)");
103
104 static int disable_tpa;
105 module_param(disable_tpa, int, 0444);
106 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
107
108 static int int_mode;
109 module_param(int_mode, int, 0444);
110 MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X "
111                                 "(1 INT#x; 2 MSI)");
112
113 static int dropless_fc;
114 module_param(dropless_fc, int, 0444);
115 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
116
117 static int mrrs = -1;
118 module_param(mrrs, int, 0444);
119 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
120
121 static int debug;
122 module_param(debug, int, 0444);
123 MODULE_PARM_DESC(debug, " Default debug msglevel");
124
125 static struct workqueue_struct *bnx2x_wq;
126 struct workqueue_struct *bnx2x_iov_wq;
127
128 struct bnx2x_mac_vals {
129         u32 xmac_addr;
130         u32 xmac_val;
131         u32 emac_addr;
132         u32 emac_val;
133         u32 umac_addr[2];
134         u32 umac_val[2];
135         u32 bmac_addr;
136         u32 bmac_val[2];
137 };
138
139 enum bnx2x_board_type {
140         BCM57710 = 0,
141         BCM57711,
142         BCM57711E,
143         BCM57712,
144         BCM57712_MF,
145         BCM57712_VF,
146         BCM57800,
147         BCM57800_MF,
148         BCM57800_VF,
149         BCM57810,
150         BCM57810_MF,
151         BCM57810_VF,
152         BCM57840_4_10,
153         BCM57840_2_20,
154         BCM57840_MF,
155         BCM57840_VF,
156         BCM57811,
157         BCM57811_MF,
158         BCM57840_O,
159         BCM57840_MFO,
160         BCM57811_VF
161 };
162
163 /* indexed by board_type, above */
164 static struct {
165         char *name;
166 } board_info[] = {
167         [BCM57710]      = { "QLogic BCM57710 10 Gigabit PCIe [Everest]" },
168         [BCM57711]      = { "QLogic BCM57711 10 Gigabit PCIe" },
169         [BCM57711E]     = { "QLogic BCM57711E 10 Gigabit PCIe" },
170         [BCM57712]      = { "QLogic BCM57712 10 Gigabit Ethernet" },
171         [BCM57712_MF]   = { "QLogic BCM57712 10 Gigabit Ethernet Multi Function" },
172         [BCM57712_VF]   = { "QLogic BCM57712 10 Gigabit Ethernet Virtual Function" },
173         [BCM57800]      = { "QLogic BCM57800 10 Gigabit Ethernet" },
174         [BCM57800_MF]   = { "QLogic BCM57800 10 Gigabit Ethernet Multi Function" },
175         [BCM57800_VF]   = { "QLogic BCM57800 10 Gigabit Ethernet Virtual Function" },
176         [BCM57810]      = { "QLogic BCM57810 10 Gigabit Ethernet" },
177         [BCM57810_MF]   = { "QLogic BCM57810 10 Gigabit Ethernet Multi Function" },
178         [BCM57810_VF]   = { "QLogic BCM57810 10 Gigabit Ethernet Virtual Function" },
179         [BCM57840_4_10] = { "QLogic BCM57840 10 Gigabit Ethernet" },
180         [BCM57840_2_20] = { "QLogic BCM57840 20 Gigabit Ethernet" },
181         [BCM57840_MF]   = { "QLogic BCM57840 10/20 Gigabit Ethernet Multi Function" },
182         [BCM57840_VF]   = { "QLogic BCM57840 10/20 Gigabit Ethernet Virtual Function" },
183         [BCM57811]      = { "QLogic BCM57811 10 Gigabit Ethernet" },
184         [BCM57811_MF]   = { "QLogic BCM57811 10 Gigabit Ethernet Multi Function" },
185         [BCM57840_O]    = { "QLogic BCM57840 10/20 Gigabit Ethernet" },
186         [BCM57840_MFO]  = { "QLogic BCM57840 10/20 Gigabit Ethernet Multi Function" },
187         [BCM57811_VF]   = { "QLogic BCM57840 10/20 Gigabit Ethernet Virtual Function" }
188 };
189
190 #ifndef PCI_DEVICE_ID_NX2_57710
191 #define PCI_DEVICE_ID_NX2_57710         CHIP_NUM_57710
192 #endif
193 #ifndef PCI_DEVICE_ID_NX2_57711
194 #define PCI_DEVICE_ID_NX2_57711         CHIP_NUM_57711
195 #endif
196 #ifndef PCI_DEVICE_ID_NX2_57711E
197 #define PCI_DEVICE_ID_NX2_57711E        CHIP_NUM_57711E
198 #endif
199 #ifndef PCI_DEVICE_ID_NX2_57712
200 #define PCI_DEVICE_ID_NX2_57712         CHIP_NUM_57712
201 #endif
202 #ifndef PCI_DEVICE_ID_NX2_57712_MF
203 #define PCI_DEVICE_ID_NX2_57712_MF      CHIP_NUM_57712_MF
204 #endif
205 #ifndef PCI_DEVICE_ID_NX2_57712_VF
206 #define PCI_DEVICE_ID_NX2_57712_VF      CHIP_NUM_57712_VF
207 #endif
208 #ifndef PCI_DEVICE_ID_NX2_57800
209 #define PCI_DEVICE_ID_NX2_57800         CHIP_NUM_57800
210 #endif
211 #ifndef PCI_DEVICE_ID_NX2_57800_MF
212 #define PCI_DEVICE_ID_NX2_57800_MF      CHIP_NUM_57800_MF
213 #endif
214 #ifndef PCI_DEVICE_ID_NX2_57800_VF
215 #define PCI_DEVICE_ID_NX2_57800_VF      CHIP_NUM_57800_VF
216 #endif
217 #ifndef PCI_DEVICE_ID_NX2_57810
218 #define PCI_DEVICE_ID_NX2_57810         CHIP_NUM_57810
219 #endif
220 #ifndef PCI_DEVICE_ID_NX2_57810_MF
221 #define PCI_DEVICE_ID_NX2_57810_MF      CHIP_NUM_57810_MF
222 #endif
223 #ifndef PCI_DEVICE_ID_NX2_57840_O
224 #define PCI_DEVICE_ID_NX2_57840_O       CHIP_NUM_57840_OBSOLETE
225 #endif
226 #ifndef PCI_DEVICE_ID_NX2_57810_VF
227 #define PCI_DEVICE_ID_NX2_57810_VF      CHIP_NUM_57810_VF
228 #endif
229 #ifndef PCI_DEVICE_ID_NX2_57840_4_10
230 #define PCI_DEVICE_ID_NX2_57840_4_10    CHIP_NUM_57840_4_10
231 #endif
232 #ifndef PCI_DEVICE_ID_NX2_57840_2_20
233 #define PCI_DEVICE_ID_NX2_57840_2_20    CHIP_NUM_57840_2_20
234 #endif
235 #ifndef PCI_DEVICE_ID_NX2_57840_MFO
236 #define PCI_DEVICE_ID_NX2_57840_MFO     CHIP_NUM_57840_MF_OBSOLETE
237 #endif
238 #ifndef PCI_DEVICE_ID_NX2_57840_MF
239 #define PCI_DEVICE_ID_NX2_57840_MF      CHIP_NUM_57840_MF
240 #endif
241 #ifndef PCI_DEVICE_ID_NX2_57840_VF
242 #define PCI_DEVICE_ID_NX2_57840_VF      CHIP_NUM_57840_VF
243 #endif
244 #ifndef PCI_DEVICE_ID_NX2_57811
245 #define PCI_DEVICE_ID_NX2_57811         CHIP_NUM_57811
246 #endif
247 #ifndef PCI_DEVICE_ID_NX2_57811_MF
248 #define PCI_DEVICE_ID_NX2_57811_MF      CHIP_NUM_57811_MF
249 #endif
250 #ifndef PCI_DEVICE_ID_NX2_57811_VF
251 #define PCI_DEVICE_ID_NX2_57811_VF      CHIP_NUM_57811_VF
252 #endif
253
254 static const struct pci_device_id bnx2x_pci_tbl[] = {
255         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
256         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
257         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
258         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
259         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_MF), BCM57712_MF },
260         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_VF), BCM57712_VF },
261         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800), BCM57800 },
262         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_MF), BCM57800_MF },
263         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_VF), BCM57800_VF },
264         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810), BCM57810 },
265         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF },
266         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_O), BCM57840_O },
267         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_4_10), BCM57840_4_10 },
268         { PCI_VDEVICE(QLOGIC,   PCI_DEVICE_ID_NX2_57840_4_10), BCM57840_4_10 },
269         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_2_20), BCM57840_2_20 },
270         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_VF), BCM57810_VF },
271         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MFO), BCM57840_MFO },
272         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
273         { PCI_VDEVICE(QLOGIC,   PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
274         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_VF), BCM57840_VF },
275         { PCI_VDEVICE(QLOGIC,   PCI_DEVICE_ID_NX2_57840_VF), BCM57840_VF },
276         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811), BCM57811 },
277         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_MF), BCM57811_MF },
278         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_VF), BCM57811_VF },
279         { 0 }
280 };
281
282 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
283
284 /* Global resources for unloading a previously loaded device */
285 #define BNX2X_PREV_WAIT_NEEDED 1
286 static DEFINE_SEMAPHORE(bnx2x_prev_sem);
287 static LIST_HEAD(bnx2x_prev_list);
288
289 /* Forward declaration */
290 static struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev);
291 static u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp);
292 static int bnx2x_set_storm_rx_mode(struct bnx2x *bp);
293
294 /****************************************************************************
295 * General service functions
296 ****************************************************************************/
297
298 static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr);
299
300 static void __storm_memset_dma_mapping(struct bnx2x *bp,
301                                        u32 addr, dma_addr_t mapping)
302 {
303         REG_WR(bp,  addr, U64_LO(mapping));
304         REG_WR(bp,  addr + 4, U64_HI(mapping));
305 }
306
307 static void storm_memset_spq_addr(struct bnx2x *bp,
308                                   dma_addr_t mapping, u16 abs_fid)
309 {
310         u32 addr = XSEM_REG_FAST_MEMORY +
311                         XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
312
313         __storm_memset_dma_mapping(bp, addr, mapping);
314 }
315
316 static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
317                                   u16 pf_id)
318 {
319         REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
320                 pf_id);
321         REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
322                 pf_id);
323         REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
324                 pf_id);
325         REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
326                 pf_id);
327 }
328
329 static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
330                                  u8 enable)
331 {
332         REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
333                 enable);
334         REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
335                 enable);
336         REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
337                 enable);
338         REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
339                 enable);
340 }
341
342 static void storm_memset_eq_data(struct bnx2x *bp,
343                                  struct event_ring_data *eq_data,
344                                 u16 pfid)
345 {
346         size_t size = sizeof(struct event_ring_data);
347
348         u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
349
350         __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
351 }
352
353 static void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
354                                  u16 pfid)
355 {
356         u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
357         REG_WR16(bp, addr, eq_prod);
358 }
359
360 /* used only at init
361  * locking is done by mcp
362  */
363 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
364 {
365         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
366         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
367         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
368                                PCICFG_VENDOR_ID_OFFSET);
369 }
370
371 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
372 {
373         u32 val;
374
375         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
376         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
377         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
378                                PCICFG_VENDOR_ID_OFFSET);
379
380         return val;
381 }
382
383 #define DMAE_DP_SRC_GRC         "grc src_addr [%08x]"
384 #define DMAE_DP_SRC_PCI         "pci src_addr [%x:%08x]"
385 #define DMAE_DP_DST_GRC         "grc dst_addr [%08x]"
386 #define DMAE_DP_DST_PCI         "pci dst_addr [%x:%08x]"
387 #define DMAE_DP_DST_NONE        "dst_addr [none]"
388
389 static void bnx2x_dp_dmae(struct bnx2x *bp,
390                           struct dmae_command *dmae, int msglvl)
391 {
392         u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
393         int i;
394
395         switch (dmae->opcode & DMAE_COMMAND_DST) {
396         case DMAE_CMD_DST_PCI:
397                 if (src_type == DMAE_CMD_SRC_PCI)
398                         DP(msglvl, "DMAE: opcode 0x%08x\n"
399                            "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
400                            "comp_addr [%x:%08x], comp_val 0x%08x\n",
401                            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
402                            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
403                            dmae->comp_addr_hi, dmae->comp_addr_lo,
404                            dmae->comp_val);
405                 else
406                         DP(msglvl, "DMAE: opcode 0x%08x\n"
407                            "src [%08x], len [%d*4], dst [%x:%08x]\n"
408                            "comp_addr [%x:%08x], comp_val 0x%08x\n",
409                            dmae->opcode, dmae->src_addr_lo >> 2,
410                            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
411                            dmae->comp_addr_hi, dmae->comp_addr_lo,
412                            dmae->comp_val);
413                 break;
414         case DMAE_CMD_DST_GRC:
415                 if (src_type == DMAE_CMD_SRC_PCI)
416                         DP(msglvl, "DMAE: opcode 0x%08x\n"
417                            "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
418                            "comp_addr [%x:%08x], comp_val 0x%08x\n",
419                            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
420                            dmae->len, dmae->dst_addr_lo >> 2,
421                            dmae->comp_addr_hi, dmae->comp_addr_lo,
422                            dmae->comp_val);
423                 else
424                         DP(msglvl, "DMAE: opcode 0x%08x\n"
425                            "src [%08x], len [%d*4], dst [%08x]\n"
426                            "comp_addr [%x:%08x], comp_val 0x%08x\n",
427                            dmae->opcode, dmae->src_addr_lo >> 2,
428                            dmae->len, dmae->dst_addr_lo >> 2,
429                            dmae->comp_addr_hi, dmae->comp_addr_lo,
430                            dmae->comp_val);
431                 break;
432         default:
433                 if (src_type == DMAE_CMD_SRC_PCI)
434                         DP(msglvl, "DMAE: opcode 0x%08x\n"
435                            "src_addr [%x:%08x]  len [%d * 4]  dst_addr [none]\n"
436                            "comp_addr [%x:%08x]  comp_val 0x%08x\n",
437                            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
438                            dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
439                            dmae->comp_val);
440                 else
441                         DP(msglvl, "DMAE: opcode 0x%08x\n"
442                            "src_addr [%08x]  len [%d * 4]  dst_addr [none]\n"
443                            "comp_addr [%x:%08x]  comp_val 0x%08x\n",
444                            dmae->opcode, dmae->src_addr_lo >> 2,
445                            dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
446                            dmae->comp_val);
447                 break;
448         }
449
450         for (i = 0; i < (sizeof(struct dmae_command)/4); i++)
451                 DP(msglvl, "DMAE RAW [%02d]: 0x%08x\n",
452                    i, *(((u32 *)dmae) + i));
453 }
454
455 /* copy command into DMAE command memory and set DMAE command go */
456 void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
457 {
458         u32 cmd_offset;
459         int i;
460
461         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
462         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
463                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
464         }
465         REG_WR(bp, dmae_reg_go_c[idx], 1);
466 }
467
468 u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type)
469 {
470         return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
471                            DMAE_CMD_C_ENABLE);
472 }
473
474 u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode)
475 {
476         return opcode & ~DMAE_CMD_SRC_RESET;
477 }
478
479 u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
480                              bool with_comp, u8 comp_type)
481 {
482         u32 opcode = 0;
483
484         opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
485                    (dst_type << DMAE_COMMAND_DST_SHIFT));
486
487         opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
488
489         opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
490         opcode |= ((BP_VN(bp) << DMAE_CMD_E1HVN_SHIFT) |
491                    (BP_VN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
492         opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
493
494 #ifdef __BIG_ENDIAN
495         opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
496 #else
497         opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
498 #endif
499         if (with_comp)
500                 opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
501         return opcode;
502 }
503
504 void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
505                                       struct dmae_command *dmae,
506                                       u8 src_type, u8 dst_type)
507 {
508         memset(dmae, 0, sizeof(struct dmae_command));
509
510         /* set the opcode */
511         dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
512                                          true, DMAE_COMP_PCI);
513
514         /* fill in the completion parameters */
515         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
516         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
517         dmae->comp_val = DMAE_COMP_VAL;
518 }
519
520 /* issue a dmae command over the init-channel and wait for completion */
521 int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
522                                u32 *comp)
523 {
524         int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000;
525         int rc = 0;
526
527         bnx2x_dp_dmae(bp, dmae, BNX2X_MSG_DMAE);
528
529         /* Lock the dmae channel. Disable BHs to prevent a dead-lock
530          * as long as this code is called both from syscall context and
531          * from ndo_set_rx_mode() flow that may be called from BH.
532          */
533
534         spin_lock_bh(&bp->dmae_lock);
535
536         /* reset completion */
537         *comp = 0;
538
539         /* post the command on the channel used for initializations */
540         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
541
542         /* wait for completion */
543         udelay(5);
544         while ((*comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
545
546                 if (!cnt ||
547                     (bp->recovery_state != BNX2X_RECOVERY_DONE &&
548                      bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) {
549                         BNX2X_ERR("DMAE timeout!\n");
550                         rc = DMAE_TIMEOUT;
551                         goto unlock;
552                 }
553                 cnt--;
554                 udelay(50);
555         }
556         if (*comp & DMAE_PCI_ERR_FLAG) {
557                 BNX2X_ERR("DMAE PCI error!\n");
558                 rc = DMAE_PCI_ERROR;
559         }
560
561 unlock:
562
563         spin_unlock_bh(&bp->dmae_lock);
564
565         return rc;
566 }
567
568 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
569                       u32 len32)
570 {
571         int rc;
572         struct dmae_command dmae;
573
574         if (!bp->dmae_ready) {
575                 u32 *data = bnx2x_sp(bp, wb_data[0]);
576
577                 if (CHIP_IS_E1(bp))
578                         bnx2x_init_ind_wr(bp, dst_addr, data, len32);
579                 else
580                         bnx2x_init_str_wr(bp, dst_addr, data, len32);
581                 return;
582         }
583
584         /* set opcode and fixed command fields */
585         bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
586
587         /* fill in addresses and len */
588         dmae.src_addr_lo = U64_LO(dma_addr);
589         dmae.src_addr_hi = U64_HI(dma_addr);
590         dmae.dst_addr_lo = dst_addr >> 2;
591         dmae.dst_addr_hi = 0;
592         dmae.len = len32;
593
594         /* issue the command and wait for completion */
595         rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
596         if (rc) {
597                 BNX2X_ERR("DMAE returned failure %d\n", rc);
598 #ifdef BNX2X_STOP_ON_ERROR
599                 bnx2x_panic();
600 #endif
601         }
602 }
603
604 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
605 {
606         int rc;
607         struct dmae_command dmae;
608
609         if (!bp->dmae_ready) {
610                 u32 *data = bnx2x_sp(bp, wb_data[0]);
611                 int i;
612
613                 if (CHIP_IS_E1(bp))
614                         for (i = 0; i < len32; i++)
615                                 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
616                 else
617                         for (i = 0; i < len32; i++)
618                                 data[i] = REG_RD(bp, src_addr + i*4);
619
620                 return;
621         }
622
623         /* set opcode and fixed command fields */
624         bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
625
626         /* fill in addresses and len */
627         dmae.src_addr_lo = src_addr >> 2;
628         dmae.src_addr_hi = 0;
629         dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
630         dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
631         dmae.len = len32;
632
633         /* issue the command and wait for completion */
634         rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
635         if (rc) {
636                 BNX2X_ERR("DMAE returned failure %d\n", rc);
637 #ifdef BNX2X_STOP_ON_ERROR
638                 bnx2x_panic();
639 #endif
640         }
641 }
642
643 static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
644                                       u32 addr, u32 len)
645 {
646         int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
647         int offset = 0;
648
649         while (len > dmae_wr_max) {
650                 bnx2x_write_dmae(bp, phys_addr + offset,
651                                  addr + offset, dmae_wr_max);
652                 offset += dmae_wr_max * 4;
653                 len -= dmae_wr_max;
654         }
655
656         bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
657 }
658
659 enum storms {
660            XSTORM,
661            TSTORM,
662            CSTORM,
663            USTORM,
664            MAX_STORMS
665 };
666
667 #define STORMS_NUM 4
668 #define REGS_IN_ENTRY 4
669
670 static inline int bnx2x_get_assert_list_entry(struct bnx2x *bp,
671                                               enum storms storm,
672                                               int entry)
673 {
674         switch (storm) {
675         case XSTORM:
676                 return XSTORM_ASSERT_LIST_OFFSET(entry);
677         case TSTORM:
678                 return TSTORM_ASSERT_LIST_OFFSET(entry);
679         case CSTORM:
680                 return CSTORM_ASSERT_LIST_OFFSET(entry);
681         case USTORM:
682                 return USTORM_ASSERT_LIST_OFFSET(entry);
683         case MAX_STORMS:
684         default:
685                 BNX2X_ERR("unknown storm\n");
686         }
687         return -EINVAL;
688 }
689
690 static int bnx2x_mc_assert(struct bnx2x *bp)
691 {
692         char last_idx;
693         int i, j, rc = 0;
694         enum storms storm;
695         u32 regs[REGS_IN_ENTRY];
696         u32 bar_storm_intmem[STORMS_NUM] = {
697                 BAR_XSTRORM_INTMEM,
698                 BAR_TSTRORM_INTMEM,
699                 BAR_CSTRORM_INTMEM,
700                 BAR_USTRORM_INTMEM
701         };
702         u32 storm_assert_list_index[STORMS_NUM] = {
703                 XSTORM_ASSERT_LIST_INDEX_OFFSET,
704                 TSTORM_ASSERT_LIST_INDEX_OFFSET,
705                 CSTORM_ASSERT_LIST_INDEX_OFFSET,
706                 USTORM_ASSERT_LIST_INDEX_OFFSET
707         };
708         char *storms_string[STORMS_NUM] = {
709                 "XSTORM",
710                 "TSTORM",
711                 "CSTORM",
712                 "USTORM"
713         };
714
715         for (storm = XSTORM; storm < MAX_STORMS; storm++) {
716                 last_idx = REG_RD8(bp, bar_storm_intmem[storm] +
717                                    storm_assert_list_index[storm]);
718                 if (last_idx)
719                         BNX2X_ERR("%s_ASSERT_LIST_INDEX 0x%x\n",
720                                   storms_string[storm], last_idx);
721
722                 /* print the asserts */
723                 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
724                         /* read a single assert entry */
725                         for (j = 0; j < REGS_IN_ENTRY; j++)
726                                 regs[j] = REG_RD(bp, bar_storm_intmem[storm] +
727                                           bnx2x_get_assert_list_entry(bp,
728                                                                       storm,
729                                                                       i) +
730                                           sizeof(u32) * j);
731
732                         /* log entry if it contains a valid assert */
733                         if (regs[0] != COMMON_ASM_INVALID_ASSERT_OPCODE) {
734                                 BNX2X_ERR("%s_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
735                                           storms_string[storm], i, regs[3],
736                                           regs[2], regs[1], regs[0]);
737                                 rc++;
738                         } else {
739                                 break;
740                         }
741                 }
742         }
743
744         BNX2X_ERR("Chip Revision: %s, FW Version: %d_%d_%d\n",
745                   CHIP_IS_E1(bp) ? "everest1" :
746                   CHIP_IS_E1H(bp) ? "everest1h" :
747                   CHIP_IS_E2(bp) ? "everest2" : "everest3",
748                   BCM_5710_FW_MAJOR_VERSION,
749                   BCM_5710_FW_MINOR_VERSION,
750                   BCM_5710_FW_REVISION_VERSION);
751
752         return rc;
753 }
754
755 #define MCPR_TRACE_BUFFER_SIZE  (0x800)
756 #define SCRATCH_BUFFER_SIZE(bp) \
757         (CHIP_IS_E1(bp) ? 0x10000 : (CHIP_IS_E1H(bp) ? 0x20000 : 0x28000))
758
759 void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
760 {
761         u32 addr, val;
762         u32 mark, offset;
763         __be32 data[9];
764         int word;
765         u32 trace_shmem_base;
766         if (BP_NOMCP(bp)) {
767                 BNX2X_ERR("NO MCP - can not dump\n");
768                 return;
769         }
770         netdev_printk(lvl, bp->dev, "bc %d.%d.%d\n",
771                 (bp->common.bc_ver & 0xff0000) >> 16,
772                 (bp->common.bc_ver & 0xff00) >> 8,
773                 (bp->common.bc_ver & 0xff));
774
775         if (pci_channel_offline(bp->pdev)) {
776                 BNX2X_ERR("Cannot dump MCP info while in PCI error\n");
777                 return;
778         }
779
780         val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER);
781         if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER))
782                 BNX2X_ERR("%s" "MCP PC at 0x%x\n", lvl, val);
783
784         if (BP_PATH(bp) == 0)
785                 trace_shmem_base = bp->common.shmem_base;
786         else
787                 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
788
789         /* sanity */
790         if (trace_shmem_base < MCPR_SCRATCH_BASE(bp) + MCPR_TRACE_BUFFER_SIZE ||
791             trace_shmem_base >= MCPR_SCRATCH_BASE(bp) +
792                                 SCRATCH_BUFFER_SIZE(bp)) {
793                 BNX2X_ERR("Unable to dump trace buffer (mark %x)\n",
794                           trace_shmem_base);
795                 return;
796         }
797
798         addr = trace_shmem_base - MCPR_TRACE_BUFFER_SIZE;
799
800         /* validate TRCB signature */
801         mark = REG_RD(bp, addr);
802         if (mark != MFW_TRACE_SIGNATURE) {
803                 BNX2X_ERR("Trace buffer signature is missing.");
804                 return ;
805         }
806
807         /* read cyclic buffer pointer */
808         addr += 4;
809         mark = REG_RD(bp, addr);
810         mark = MCPR_SCRATCH_BASE(bp) + ((mark + 0x3) & ~0x3) - 0x08000000;
811         if (mark >= trace_shmem_base || mark < addr + 4) {
812                 BNX2X_ERR("Mark doesn't fall inside Trace Buffer\n");
813                 return;
814         }
815         printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark);
816
817         printk("%s", lvl);
818
819         /* dump buffer after the mark */
820         for (offset = mark; offset < trace_shmem_base; offset += 0x8*4) {
821                 for (word = 0; word < 8; word++)
822                         data[word] = htonl(REG_RD(bp, offset + 4*word));
823                 data[8] = 0x0;
824                 pr_cont("%s", (char *)data);
825         }
826
827         /* dump buffer before the mark */
828         for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
829                 for (word = 0; word < 8; word++)
830                         data[word] = htonl(REG_RD(bp, offset + 4*word));
831                 data[8] = 0x0;
832                 pr_cont("%s", (char *)data);
833         }
834         printk("%s" "end of fw dump\n", lvl);
835 }
836
837 static void bnx2x_fw_dump(struct bnx2x *bp)
838 {
839         bnx2x_fw_dump_lvl(bp, KERN_ERR);
840 }
841
842 static void bnx2x_hc_int_disable(struct bnx2x *bp)
843 {
844         int port = BP_PORT(bp);
845         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
846         u32 val = REG_RD(bp, addr);
847
848         /* in E1 we must use only PCI configuration space to disable
849          * MSI/MSIX capability
850          * It's forbidden to disable IGU_PF_CONF_MSI_MSIX_EN in HC block
851          */
852         if (CHIP_IS_E1(bp)) {
853                 /* Since IGU_PF_CONF_MSI_MSIX_EN still always on
854                  * Use mask register to prevent from HC sending interrupts
855                  * after we exit the function
856                  */
857                 REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
858
859                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
860                          HC_CONFIG_0_REG_INT_LINE_EN_0 |
861                          HC_CONFIG_0_REG_ATTN_BIT_EN_0);
862         } else
863                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
864                          HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
865                          HC_CONFIG_0_REG_INT_LINE_EN_0 |
866                          HC_CONFIG_0_REG_ATTN_BIT_EN_0);
867
868         DP(NETIF_MSG_IFDOWN,
869            "write %x to HC %d (addr 0x%x)\n",
870            val, port, addr);
871
872         /* flush all outstanding writes */
873         mmiowb();
874
875         REG_WR(bp, addr, val);
876         if (REG_RD(bp, addr) != val)
877                 BNX2X_ERR("BUG! Proper val not read from IGU!\n");
878 }
879
880 static void bnx2x_igu_int_disable(struct bnx2x *bp)
881 {
882         u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
883
884         val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
885                  IGU_PF_CONF_INT_LINE_EN |
886                  IGU_PF_CONF_ATTN_BIT_EN);
887
888         DP(NETIF_MSG_IFDOWN, "write %x to IGU\n", val);
889
890         /* flush all outstanding writes */
891         mmiowb();
892
893         REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
894         if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
895                 BNX2X_ERR("BUG! Proper val not read from IGU!\n");
896 }
897
898 static void bnx2x_int_disable(struct bnx2x *bp)
899 {
900         if (bp->common.int_block == INT_BLOCK_HC)
901                 bnx2x_hc_int_disable(bp);
902         else
903                 bnx2x_igu_int_disable(bp);
904 }
905
906 void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
907 {
908         int i;
909         u16 j;
910         struct hc_sp_status_block_data sp_sb_data;
911         int func = BP_FUNC(bp);
912 #ifdef BNX2X_STOP_ON_ERROR
913         u16 start = 0, end = 0;
914         u8 cos;
915 #endif
916         if (IS_PF(bp) && disable_int)
917                 bnx2x_int_disable(bp);
918
919         bp->stats_state = STATS_STATE_DISABLED;
920         bp->eth_stats.unrecoverable_error++;
921         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
922
923         BNX2X_ERR("begin crash dump -----------------\n");
924
925         /* Indices */
926         /* Common */
927         if (IS_PF(bp)) {
928                 struct host_sp_status_block *def_sb = bp->def_status_blk;
929                 int data_size, cstorm_offset;
930
931                 BNX2X_ERR("def_idx(0x%x)  def_att_idx(0x%x)  attn_state(0x%x)  spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n",
932                           bp->def_idx, bp->def_att_idx, bp->attn_state,
933                           bp->spq_prod_idx, bp->stats_counter);
934                 BNX2X_ERR("DSB: attn bits(0x%x)  ack(0x%x)  id(0x%x)  idx(0x%x)\n",
935                           def_sb->atten_status_block.attn_bits,
936                           def_sb->atten_status_block.attn_bits_ack,
937                           def_sb->atten_status_block.status_block_id,
938                           def_sb->atten_status_block.attn_bits_index);
939                 BNX2X_ERR("     def (");
940                 for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
941                         pr_cont("0x%x%s",
942                                 def_sb->sp_sb.index_values[i],
943                                 (i == HC_SP_SB_MAX_INDICES - 1) ? ")  " : " ");
944
945                 data_size = sizeof(struct hc_sp_status_block_data) /
946                             sizeof(u32);
947                 cstorm_offset = CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func);
948                 for (i = 0; i < data_size; i++)
949                         *((u32 *)&sp_sb_data + i) =
950                                 REG_RD(bp, BAR_CSTRORM_INTMEM + cstorm_offset +
951                                            i * sizeof(u32));
952
953                 pr_cont("igu_sb_id(0x%x)  igu_seg_id(0x%x) pf_id(0x%x)  vnic_id(0x%x)  vf_id(0x%x)  vf_valid (0x%x) state(0x%x)\n",
954                         sp_sb_data.igu_sb_id,
955                         sp_sb_data.igu_seg_id,
956                         sp_sb_data.p_func.pf_id,
957                         sp_sb_data.p_func.vnic_id,
958                         sp_sb_data.p_func.vf_id,
959                         sp_sb_data.p_func.vf_valid,
960                         sp_sb_data.state);
961         }
962
963         for_each_eth_queue(bp, i) {
964                 struct bnx2x_fastpath *fp = &bp->fp[i];
965                 int loop;
966                 struct hc_status_block_data_e2 sb_data_e2;
967                 struct hc_status_block_data_e1x sb_data_e1x;
968                 struct hc_status_block_sm  *hc_sm_p =
969                         CHIP_IS_E1x(bp) ?
970                         sb_data_e1x.common.state_machine :
971                         sb_data_e2.common.state_machine;
972                 struct hc_index_data *hc_index_p =
973                         CHIP_IS_E1x(bp) ?
974                         sb_data_e1x.index_data :
975                         sb_data_e2.index_data;
976                 u8 data_size, cos;
977                 u32 *sb_data_p;
978                 struct bnx2x_fp_txdata txdata;
979
980                 if (!bp->fp)
981                         break;
982
983                 if (!fp->rx_cons_sb)
984                         continue;
985
986                 /* Rx */
987                 BNX2X_ERR("fp%d: rx_bd_prod(0x%x)  rx_bd_cons(0x%x)  rx_comp_prod(0x%x)  rx_comp_cons(0x%x)  *rx_cons_sb(0x%x)\n",
988                           i, fp->rx_bd_prod, fp->rx_bd_cons,
989                           fp->rx_comp_prod,
990                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
991                 BNX2X_ERR("     rx_sge_prod(0x%x)  last_max_sge(0x%x)  fp_hc_idx(0x%x)\n",
992                           fp->rx_sge_prod, fp->last_max_sge,
993                           le16_to_cpu(fp->fp_hc_idx));
994
995                 /* Tx */
996                 for_each_cos_in_tx_queue(fp, cos)
997                 {
998                         if (!fp->txdata_ptr[cos])
999                                 break;
1000
1001                         txdata = *fp->txdata_ptr[cos];
1002
1003                         if (!txdata.tx_cons_sb)
1004                                 continue;
1005
1006                         BNX2X_ERR("fp%d: tx_pkt_prod(0x%x)  tx_pkt_cons(0x%x)  tx_bd_prod(0x%x)  tx_bd_cons(0x%x)  *tx_cons_sb(0x%x)\n",
1007                                   i, txdata.tx_pkt_prod,
1008                                   txdata.tx_pkt_cons, txdata.tx_bd_prod,
1009                                   txdata.tx_bd_cons,
1010                                   le16_to_cpu(*txdata.tx_cons_sb));
1011                 }
1012
1013                 loop = CHIP_IS_E1x(bp) ?
1014                         HC_SB_MAX_INDICES_E1X : HC_SB_MAX_INDICES_E2;
1015
1016                 /* host sb data */
1017
1018                 if (IS_FCOE_FP(fp))
1019                         continue;
1020
1021                 BNX2X_ERR("     run indexes (");
1022                 for (j = 0; j < HC_SB_MAX_SM; j++)
1023                         pr_cont("0x%x%s",
1024                                fp->sb_running_index[j],
1025                                (j == HC_SB_MAX_SM - 1) ? ")" : " ");
1026
1027                 BNX2X_ERR("     indexes (");
1028                 for (j = 0; j < loop; j++)
1029                         pr_cont("0x%x%s",
1030                                fp->sb_index_values[j],
1031                                (j == loop - 1) ? ")" : " ");
1032
1033                 /* VF cannot access FW refelection for status block */
1034                 if (IS_VF(bp))
1035                         continue;
1036
1037                 /* fw sb data */
1038                 data_size = CHIP_IS_E1x(bp) ?
1039                         sizeof(struct hc_status_block_data_e1x) :
1040                         sizeof(struct hc_status_block_data_e2);
1041                 data_size /= sizeof(u32);
1042                 sb_data_p = CHIP_IS_E1x(bp) ?
1043                         (u32 *)&sb_data_e1x :
1044                         (u32 *)&sb_data_e2;
1045                 /* copy sb data in here */
1046                 for (j = 0; j < data_size; j++)
1047                         *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
1048                                 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
1049                                 j * sizeof(u32));
1050
1051                 if (!CHIP_IS_E1x(bp)) {
1052                         pr_cont("pf_id(0x%x)  vf_id(0x%x)  vf_valid(0x%x) vnic_id(0x%x)  same_igu_sb_1b(0x%x) state(0x%x)\n",
1053                                 sb_data_e2.common.p_func.pf_id,
1054                                 sb_data_e2.common.p_func.vf_id,
1055                                 sb_data_e2.common.p_func.vf_valid,
1056                                 sb_data_e2.common.p_func.vnic_id,
1057                                 sb_data_e2.common.same_igu_sb_1b,
1058                                 sb_data_e2.common.state);
1059                 } else {
1060                         pr_cont("pf_id(0x%x)  vf_id(0x%x)  vf_valid(0x%x) vnic_id(0x%x)  same_igu_sb_1b(0x%x) state(0x%x)\n",
1061                                 sb_data_e1x.common.p_func.pf_id,
1062                                 sb_data_e1x.common.p_func.vf_id,
1063                                 sb_data_e1x.common.p_func.vf_valid,
1064                                 sb_data_e1x.common.p_func.vnic_id,
1065                                 sb_data_e1x.common.same_igu_sb_1b,
1066                                 sb_data_e1x.common.state);
1067                 }
1068
1069                 /* SB_SMs data */
1070                 for (j = 0; j < HC_SB_MAX_SM; j++) {
1071                         pr_cont("SM[%d] __flags (0x%x) igu_sb_id (0x%x)  igu_seg_id(0x%x) time_to_expire (0x%x) timer_value(0x%x)\n",
1072                                 j, hc_sm_p[j].__flags,
1073                                 hc_sm_p[j].igu_sb_id,
1074                                 hc_sm_p[j].igu_seg_id,
1075                                 hc_sm_p[j].time_to_expire,
1076                                 hc_sm_p[j].timer_value);
1077                 }
1078
1079                 /* Indices data */
1080                 for (j = 0; j < loop; j++) {
1081                         pr_cont("INDEX[%d] flags (0x%x) timeout (0x%x)\n", j,
1082                                hc_index_p[j].flags,
1083                                hc_index_p[j].timeout);
1084                 }
1085         }
1086
1087 #ifdef BNX2X_STOP_ON_ERROR
1088         if (IS_PF(bp)) {
1089                 /* event queue */
1090                 BNX2X_ERR("eq cons %x prod %x\n", bp->eq_cons, bp->eq_prod);
1091                 for (i = 0; i < NUM_EQ_DESC; i++) {
1092                         u32 *data = (u32 *)&bp->eq_ring[i].message.data;
1093
1094                         BNX2X_ERR("event queue [%d]: header: opcode %d, error %d\n",
1095                                   i, bp->eq_ring[i].message.opcode,
1096                                   bp->eq_ring[i].message.error);
1097                         BNX2X_ERR("data: %x %x %x\n",
1098                                   data[0], data[1], data[2]);
1099                 }
1100         }
1101
1102         /* Rings */
1103         /* Rx */
1104         for_each_valid_rx_queue(bp, i) {
1105                 struct bnx2x_fastpath *fp = &bp->fp[i];
1106
1107                 if (!bp->fp)
1108                         break;
1109
1110                 if (!fp->rx_cons_sb)
1111                         continue;
1112
1113                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
1114                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
1115                 for (j = start; j != end; j = RX_BD(j + 1)) {
1116                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
1117                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
1118
1119                         BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
1120                                   i, j, rx_bd[1], rx_bd[0], sw_bd->data);
1121                 }
1122
1123                 start = RX_SGE(fp->rx_sge_prod);
1124                 end = RX_SGE(fp->last_max_sge);
1125                 for (j = start; j != end; j = RX_SGE(j + 1)) {
1126                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
1127                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
1128
1129                         BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
1130                                   i, j, rx_sge[1], rx_sge[0], sw_page->page);
1131                 }
1132
1133                 start = RCQ_BD(fp->rx_comp_cons - 10);
1134                 end = RCQ_BD(fp->rx_comp_cons + 503);
1135                 for (j = start; j != end; j = RCQ_BD(j + 1)) {
1136                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
1137
1138                         BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
1139                                   i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
1140                 }
1141         }
1142
1143         /* Tx */
1144         for_each_valid_tx_queue(bp, i) {
1145                 struct bnx2x_fastpath *fp = &bp->fp[i];
1146
1147                 if (!bp->fp)
1148                         break;
1149
1150                 for_each_cos_in_tx_queue(fp, cos) {
1151                         struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1152
1153                         if (!fp->txdata_ptr[cos])
1154                                 break;
1155
1156                         if (!txdata->tx_cons_sb)
1157                                 continue;
1158
1159                         start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10);
1160                         end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245);
1161                         for (j = start; j != end; j = TX_BD(j + 1)) {
1162                                 struct sw_tx_bd *sw_bd =
1163                                         &txdata->tx_buf_ring[j];
1164
1165                                 BNX2X_ERR("fp%d: txdata %d, packet[%x]=[%p,%x]\n",
1166                                           i, cos, j, sw_bd->skb,
1167                                           sw_bd->first_bd);
1168                         }
1169
1170                         start = TX_BD(txdata->tx_bd_cons - 10);
1171                         end = TX_BD(txdata->tx_bd_cons + 254);
1172                         for (j = start; j != end; j = TX_BD(j + 1)) {
1173                                 u32 *tx_bd = (u32 *)&txdata->tx_desc_ring[j];
1174
1175                                 BNX2X_ERR("fp%d: txdata %d, tx_bd[%x]=[%x:%x:%x:%x]\n",
1176                                           i, cos, j, tx_bd[0], tx_bd[1],
1177                                           tx_bd[2], tx_bd[3]);
1178                         }
1179                 }
1180         }
1181 #endif
1182         if (IS_PF(bp)) {
1183                 bnx2x_fw_dump(bp);
1184                 bnx2x_mc_assert(bp);
1185         }
1186         BNX2X_ERR("end crash dump -----------------\n");
1187 }
1188
1189 /*
1190  * FLR Support for E2
1191  *
1192  * bnx2x_pf_flr_clnup() is called during nic_load in the per function HW
1193  * initialization.
1194  */
1195 #define FLR_WAIT_USEC           10000   /* 10 milliseconds */
1196 #define FLR_WAIT_INTERVAL       50      /* usec */
1197 #define FLR_POLL_CNT            (FLR_WAIT_USEC/FLR_WAIT_INTERVAL) /* 200 */
1198
1199 struct pbf_pN_buf_regs {
1200         int pN;
1201         u32 init_crd;
1202         u32 crd;
1203         u32 crd_freed;
1204 };
1205
1206 struct pbf_pN_cmd_regs {
1207         int pN;
1208         u32 lines_occup;
1209         u32 lines_freed;
1210 };
1211
1212 static void bnx2x_pbf_pN_buf_flushed(struct bnx2x *bp,
1213                                      struct pbf_pN_buf_regs *regs,
1214                                      u32 poll_count)
1215 {
1216         u32 init_crd, crd, crd_start, crd_freed, crd_freed_start;
1217         u32 cur_cnt = poll_count;
1218
1219         crd_freed = crd_freed_start = REG_RD(bp, regs->crd_freed);
1220         crd = crd_start = REG_RD(bp, regs->crd);
1221         init_crd = REG_RD(bp, regs->init_crd);
1222
1223         DP(BNX2X_MSG_SP, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd);
1224         DP(BNX2X_MSG_SP, "CREDIT[%d]      : s:%x\n", regs->pN, crd);
1225         DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed);
1226
1227         while ((crd != init_crd) && ((u32)SUB_S32(crd_freed, crd_freed_start) <
1228                (init_crd - crd_start))) {
1229                 if (cur_cnt--) {
1230                         udelay(FLR_WAIT_INTERVAL);
1231                         crd = REG_RD(bp, regs->crd);
1232                         crd_freed = REG_RD(bp, regs->crd_freed);
1233                 } else {
1234                         DP(BNX2X_MSG_SP, "PBF tx buffer[%d] timed out\n",
1235                            regs->pN);
1236                         DP(BNX2X_MSG_SP, "CREDIT[%d]      : c:%x\n",
1237                            regs->pN, crd);
1238                         DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: c:%x\n",
1239                            regs->pN, crd_freed);
1240                         break;
1241                 }
1242         }
1243         DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF tx buffer[%d]\n",
1244            poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
1245 }
1246
1247 static void bnx2x_pbf_pN_cmd_flushed(struct bnx2x *bp,
1248                                      struct pbf_pN_cmd_regs *regs,
1249                                      u32 poll_count)
1250 {
1251         u32 occup, to_free, freed, freed_start;
1252         u32 cur_cnt = poll_count;
1253
1254         occup = to_free = REG_RD(bp, regs->lines_occup);
1255         freed = freed_start = REG_RD(bp, regs->lines_freed);
1256
1257         DP(BNX2X_MSG_SP, "OCCUPANCY[%d]   : s:%x\n", regs->pN, occup);
1258         DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
1259
1260         while (occup && ((u32)SUB_S32(freed, freed_start) < to_free)) {
1261                 if (cur_cnt--) {
1262                         udelay(FLR_WAIT_INTERVAL);
1263                         occup = REG_RD(bp, regs->lines_occup);
1264                         freed = REG_RD(bp, regs->lines_freed);
1265                 } else {
1266                         DP(BNX2X_MSG_SP, "PBF cmd queue[%d] timed out\n",
1267                            regs->pN);
1268                         DP(BNX2X_MSG_SP, "OCCUPANCY[%d]   : s:%x\n",
1269                            regs->pN, occup);
1270                         DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n",
1271                            regs->pN, freed);
1272                         break;
1273                 }
1274         }
1275         DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF cmd queue[%d]\n",
1276            poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
1277 }
1278
1279 static u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg,
1280                                     u32 expected, u32 poll_count)
1281 {
1282         u32 cur_cnt = poll_count;
1283         u32 val;
1284
1285         while ((val = REG_RD(bp, reg)) != expected && cur_cnt--)
1286                 udelay(FLR_WAIT_INTERVAL);
1287
1288         return val;
1289 }
1290
1291 int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg,
1292                                     char *msg, u32 poll_cnt)
1293 {
1294         u32 val = bnx2x_flr_clnup_reg_poll(bp, reg, 0, poll_cnt);
1295         if (val != 0) {
1296                 BNX2X_ERR("%s usage count=%d\n", msg, val);
1297                 return 1;
1298         }
1299         return 0;
1300 }
1301
1302 /* Common routines with VF FLR cleanup */
1303 u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp)
1304 {
1305         /* adjust polling timeout */
1306         if (CHIP_REV_IS_EMUL(bp))
1307                 return FLR_POLL_CNT * 2000;
1308
1309         if (CHIP_REV_IS_FPGA(bp))
1310                 return FLR_POLL_CNT * 120;
1311
1312         return FLR_POLL_CNT;
1313 }
1314
1315 void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count)
1316 {
1317         struct pbf_pN_cmd_regs cmd_regs[] = {
1318                 {0, (CHIP_IS_E3B0(bp)) ?
1319                         PBF_REG_TQ_OCCUPANCY_Q0 :
1320                         PBF_REG_P0_TQ_OCCUPANCY,
1321                     (CHIP_IS_E3B0(bp)) ?
1322                         PBF_REG_TQ_LINES_FREED_CNT_Q0 :
1323                         PBF_REG_P0_TQ_LINES_FREED_CNT},
1324                 {1, (CHIP_IS_E3B0(bp)) ?
1325                         PBF_REG_TQ_OCCUPANCY_Q1 :
1326                         PBF_REG_P1_TQ_OCCUPANCY,
1327                     (CHIP_IS_E3B0(bp)) ?
1328                         PBF_REG_TQ_LINES_FREED_CNT_Q1 :
1329                         PBF_REG_P1_TQ_LINES_FREED_CNT},
1330                 {4, (CHIP_IS_E3B0(bp)) ?
1331                         PBF_REG_TQ_OCCUPANCY_LB_Q :
1332                         PBF_REG_P4_TQ_OCCUPANCY,
1333                     (CHIP_IS_E3B0(bp)) ?
1334                         PBF_REG_TQ_LINES_FREED_CNT_LB_Q :
1335                         PBF_REG_P4_TQ_LINES_FREED_CNT}
1336         };
1337
1338         struct pbf_pN_buf_regs buf_regs[] = {
1339                 {0, (CHIP_IS_E3B0(bp)) ?
1340                         PBF_REG_INIT_CRD_Q0 :
1341                         PBF_REG_P0_INIT_CRD ,
1342                     (CHIP_IS_E3B0(bp)) ?
1343                         PBF_REG_CREDIT_Q0 :
1344                         PBF_REG_P0_CREDIT,
1345                     (CHIP_IS_E3B0(bp)) ?
1346                         PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 :
1347                         PBF_REG_P0_INTERNAL_CRD_FREED_CNT},
1348                 {1, (CHIP_IS_E3B0(bp)) ?
1349                         PBF_REG_INIT_CRD_Q1 :
1350                         PBF_REG_P1_INIT_CRD,
1351                     (CHIP_IS_E3B0(bp)) ?
1352                         PBF_REG_CREDIT_Q1 :
1353                         PBF_REG_P1_CREDIT,
1354                     (CHIP_IS_E3B0(bp)) ?
1355                         PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 :
1356                         PBF_REG_P1_INTERNAL_CRD_FREED_CNT},
1357                 {4, (CHIP_IS_E3B0(bp)) ?
1358                         PBF_REG_INIT_CRD_LB_Q :
1359                         PBF_REG_P4_INIT_CRD,
1360                     (CHIP_IS_E3B0(bp)) ?
1361                         PBF_REG_CREDIT_LB_Q :
1362                         PBF_REG_P4_CREDIT,
1363                     (CHIP_IS_E3B0(bp)) ?
1364                         PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q :
1365                         PBF_REG_P4_INTERNAL_CRD_FREED_CNT},
1366         };
1367
1368         int i;
1369
1370         /* Verify the command queues are flushed P0, P1, P4 */
1371         for (i = 0; i < ARRAY_SIZE(cmd_regs); i++)
1372                 bnx2x_pbf_pN_cmd_flushed(bp, &cmd_regs[i], poll_count);
1373
1374         /* Verify the transmission buffers are flushed P0, P1, P4 */
1375         for (i = 0; i < ARRAY_SIZE(buf_regs); i++)
1376                 bnx2x_pbf_pN_buf_flushed(bp, &buf_regs[i], poll_count);
1377 }
1378
1379 #define OP_GEN_PARAM(param) \
1380         (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM)
1381
1382 #define OP_GEN_TYPE(type) \
1383         (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE)
1384
1385 #define OP_GEN_AGG_VECT(index) \
1386         (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX)
1387
1388 int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, u32 poll_cnt)
1389 {
1390         u32 op_gen_command = 0;
1391         u32 comp_addr = BAR_CSTRORM_INTMEM +
1392                         CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func);
1393         int ret = 0;
1394
1395         if (REG_RD(bp, comp_addr)) {
1396                 BNX2X_ERR("Cleanup complete was not 0 before sending\n");
1397                 return 1;
1398         }
1399
1400         op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX);
1401         op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE);
1402         op_gen_command |= OP_GEN_AGG_VECT(clnup_func);
1403         op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;
1404
1405         DP(BNX2X_MSG_SP, "sending FW Final cleanup\n");
1406         REG_WR(bp, XSDM_REG_OPERATION_GEN, op_gen_command);
1407
1408         if (bnx2x_flr_clnup_reg_poll(bp, comp_addr, 1, poll_cnt) != 1) {
1409                 BNX2X_ERR("FW final cleanup did not succeed\n");
1410                 DP(BNX2X_MSG_SP, "At timeout completion address contained %x\n",
1411                    (REG_RD(bp, comp_addr)));
1412                 bnx2x_panic();
1413                 return 1;
1414         }
1415         /* Zero completion for next FLR */
1416         REG_WR(bp, comp_addr, 0);
1417
1418         return ret;
1419 }
1420
1421 u8 bnx2x_is_pcie_pending(struct pci_dev *dev)
1422 {
1423         u16 status;
1424
1425         pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status);
1426         return status & PCI_EXP_DEVSTA_TRPND;
1427 }
1428
1429 /* PF FLR specific routines
1430 */
1431 static int bnx2x_poll_hw_usage_counters(struct bnx2x *bp, u32 poll_cnt)
1432 {
1433         /* wait for CFC PF usage-counter to zero (includes all the VFs) */
1434         if (bnx2x_flr_clnup_poll_hw_counter(bp,
1435                         CFC_REG_NUM_LCIDS_INSIDE_PF,
1436                         "CFC PF usage counter timed out",
1437                         poll_cnt))
1438                 return 1;
1439
1440         /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */
1441         if (bnx2x_flr_clnup_poll_hw_counter(bp,
1442                         DORQ_REG_PF_USAGE_CNT,
1443                         "DQ PF usage counter timed out",
1444                         poll_cnt))
1445                 return 1;
1446
1447         /* Wait for QM PF usage-counter to zero (until DQ cleanup) */
1448         if (bnx2x_flr_clnup_poll_hw_counter(bp,
1449                         QM_REG_PF_USG_CNT_0 + 4*BP_FUNC(bp),
1450                         "QM PF usage counter timed out",
1451                         poll_cnt))
1452                 return 1;
1453
1454         /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */
1455         if (bnx2x_flr_clnup_poll_hw_counter(bp,
1456                         TM_REG_LIN0_VNIC_UC + 4*BP_PORT(bp),
1457                         "Timers VNIC usage counter timed out",
1458                         poll_cnt))
1459                 return 1;
1460         if (bnx2x_flr_clnup_poll_hw_counter(bp,
1461                         TM_REG_LIN0_NUM_SCANS + 4*BP_PORT(bp),
1462                         "Timers NUM_SCANS usage counter timed out",
1463                         poll_cnt))
1464                 return 1;
1465
1466         /* Wait DMAE PF usage counter to zero */
1467         if (bnx2x_flr_clnup_poll_hw_counter(bp,
1468                         dmae_reg_go_c[INIT_DMAE_C(bp)],
1469                         "DMAE command register timed out",
1470                         poll_cnt))
1471                 return 1;
1472
1473         return 0;
1474 }
1475
1476 static void bnx2x_hw_enable_status(struct bnx2x *bp)
1477 {
1478         u32 val;
1479
1480         val = REG_RD(bp, CFC_REG_WEAK_ENABLE_PF);
1481         DP(BNX2X_MSG_SP, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val);
1482
1483         val = REG_RD(bp, PBF_REG_DISABLE_PF);
1484         DP(BNX2X_MSG_SP, "PBF_REG_DISABLE_PF is 0x%x\n", val);
1485
1486         val = REG_RD(bp, IGU_REG_PCI_PF_MSI_EN);
1487         DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val);
1488
1489         val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_EN);
1490         DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val);
1491
1492         val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_FUNC_MASK);
1493         DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val);
1494
1495         val = REG_RD(bp, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR);
1496         DP(BNX2X_MSG_SP, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val);
1497
1498         val = REG_RD(bp, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR);
1499         DP(BNX2X_MSG_SP, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val);
1500
1501         val = REG_RD(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
1502         DP(BNX2X_MSG_SP, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n",
1503            val);
1504 }
1505
1506 static int bnx2x_pf_flr_clnup(struct bnx2x *bp)
1507 {
1508         u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp);
1509
1510         DP(BNX2X_MSG_SP, "Cleanup after FLR PF[%d]\n", BP_ABS_FUNC(bp));
1511
1512         /* Re-enable PF target read access */
1513         REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
1514
1515         /* Poll HW usage counters */
1516         DP(BNX2X_MSG_SP, "Polling usage counters\n");
1517         if (bnx2x_poll_hw_usage_counters(bp, poll_cnt))
1518                 return -EBUSY;
1519
1520         /* Zero the igu 'trailing edge' and 'leading edge' */
1521
1522         /* Send the FW cleanup command */
1523         if (bnx2x_send_final_clnup(bp, (u8)BP_FUNC(bp), poll_cnt))
1524                 return -EBUSY;
1525
1526         /* ATC cleanup */
1527
1528         /* Verify TX hw is flushed */
1529         bnx2x_tx_hw_flushed(bp, poll_cnt);
1530
1531         /* Wait 100ms (not adjusted according to platform) */
1532         msleep(100);
1533
1534         /* Verify no pending pci transactions */
1535         if (bnx2x_is_pcie_pending(bp->pdev))
1536                 BNX2X_ERR("PCIE Transactions still pending\n");
1537
1538         /* Debug */
1539         bnx2x_hw_enable_status(bp);
1540
1541         /*
1542          * Master enable - Due to WB DMAE writes performed before this
1543          * register is re-initialized as part of the regular function init
1544          */
1545         REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
1546
1547         return 0;
1548 }
1549
1550 static void bnx2x_hc_int_enable(struct bnx2x *bp)
1551 {
1552         int port = BP_PORT(bp);
1553         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1554         u32 val = REG_RD(bp, addr);
1555         bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false;
1556         bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false;
1557         bool msi = (bp->flags & USING_MSI_FLAG) ? true : false;
1558
1559         if (msix) {
1560                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1561                          HC_CONFIG_0_REG_INT_LINE_EN_0);
1562                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1563                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1564                 if (single_msix)
1565                         val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
1566         } else if (msi) {
1567                 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
1568                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1569                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1570                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1571         } else {
1572                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1573                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1574                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
1575                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1576
1577                 if (!CHIP_IS_E1(bp)) {
1578                         DP(NETIF_MSG_IFUP,
1579                            "write %x to HC %d (addr 0x%x)\n", val, port, addr);
1580
1581                         REG_WR(bp, addr, val);
1582
1583                         val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
1584                 }
1585         }
1586
1587         if (CHIP_IS_E1(bp))
1588                 REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF);
1589
1590         DP(NETIF_MSG_IFUP,
1591            "write %x to HC %d (addr 0x%x) mode %s\n", val, port, addr,
1592            (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1593
1594         REG_WR(bp, addr, val);
1595         /*
1596          * Ensure that HC_CONFIG is written before leading/trailing edge config
1597          */
1598         mmiowb();
1599         barrier();
1600
1601         if (!CHIP_IS_E1(bp)) {
1602                 /* init leading/trailing edge */
1603                 if (IS_MF(bp)) {
1604                         val = (0xee0f | (1 << (BP_VN(bp) + 4)));
1605                         if (bp->port.pmf)
1606                                 /* enable nig and gpio3 attention */
1607                                 val |= 0x1100;
1608                 } else
1609                         val = 0xffff;
1610
1611                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1612                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1613         }
1614
1615         /* Make sure that interrupts are indeed enabled from here on */
1616         mmiowb();
1617 }
1618
1619 static void bnx2x_igu_int_enable(struct bnx2x *bp)
1620 {
1621         u32 val;
1622         bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false;
1623         bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false;
1624         bool msi = (bp->flags & USING_MSI_FLAG) ? true : false;
1625
1626         val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1627
1628         if (msix) {
1629                 val &= ~(IGU_PF_CONF_INT_LINE_EN |
1630                          IGU_PF_CONF_SINGLE_ISR_EN);
1631                 val |= (IGU_PF_CONF_MSI_MSIX_EN |
1632                         IGU_PF_CONF_ATTN_BIT_EN);
1633
1634                 if (single_msix)
1635                         val |= IGU_PF_CONF_SINGLE_ISR_EN;
1636         } else if (msi) {
1637                 val &= ~IGU_PF_CONF_INT_LINE_EN;
1638                 val |= (IGU_PF_CONF_MSI_MSIX_EN |
1639                         IGU_PF_CONF_ATTN_BIT_EN |
1640                         IGU_PF_CONF_SINGLE_ISR_EN);
1641         } else {
1642                 val &= ~IGU_PF_CONF_MSI_MSIX_EN;
1643                 val |= (IGU_PF_CONF_INT_LINE_EN |
1644                         IGU_PF_CONF_ATTN_BIT_EN |
1645                         IGU_PF_CONF_SINGLE_ISR_EN);
1646         }
1647
1648         /* Clean previous status - need to configure igu prior to ack*/
1649         if ((!msix) || single_msix) {
1650                 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1651                 bnx2x_ack_int(bp);
1652         }
1653
1654         val |= IGU_PF_CONF_FUNC_EN;
1655
1656         DP(NETIF_MSG_IFUP, "write 0x%x to IGU  mode %s\n",
1657            val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1658
1659         REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1660
1661         if (val & IGU_PF_CONF_INT_LINE_EN)
1662                 pci_intx(bp->pdev, true);
1663
1664         barrier();
1665
1666         /* init leading/trailing edge */
1667         if (IS_MF(bp)) {
1668                 val = (0xee0f | (1 << (BP_VN(bp) + 4)));
1669                 if (bp->port.pmf)
1670                         /* enable nig and gpio3 attention */
1671                         val |= 0x1100;
1672         } else
1673                 val = 0xffff;
1674
1675         REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
1676         REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
1677
1678         /* Make sure that interrupts are indeed enabled from here on */
1679         mmiowb();
1680 }
1681
1682 void bnx2x_int_enable(struct bnx2x *bp)
1683 {
1684         if (bp->common.int_block == INT_BLOCK_HC)
1685                 bnx2x_hc_int_enable(bp);
1686         else
1687                 bnx2x_igu_int_enable(bp);
1688 }
1689
1690 void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
1691 {
1692         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1693         int i, offset;
1694
1695         if (disable_hw)
1696                 /* prevent the HW from sending interrupts */
1697                 bnx2x_int_disable(bp);
1698
1699         /* make sure all ISRs are done */
1700         if (msix) {
1701                 synchronize_irq(bp->msix_table[0].vector);
1702                 offset = 1;
1703                 if (CNIC_SUPPORT(bp))
1704                         offset++;
1705                 for_each_eth_queue(bp, i)
1706                         synchronize_irq(bp->msix_table[offset++].vector);
1707         } else
1708                 synchronize_irq(bp->pdev->irq);
1709
1710         /* make sure sp_task is not running */
1711         cancel_delayed_work(&bp->sp_task);
1712         cancel_delayed_work(&bp->period_task);
1713         flush_workqueue(bnx2x_wq);
1714 }
1715
1716 /* fast path */
1717
1718 /*
1719  * General service functions
1720  */
1721
1722 /* Return true if succeeded to acquire the lock */
1723 static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
1724 {
1725         u32 lock_status;
1726         u32 resource_bit = (1 << resource);
1727         int func = BP_FUNC(bp);
1728         u32 hw_lock_control_reg;
1729
1730         DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
1731            "Trying to take a lock on resource %d\n", resource);
1732
1733         /* Validating that the resource is within range */
1734         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1735                 DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
1736                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1737                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1738                 return false;
1739         }
1740
1741         if (func <= 5)
1742                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1743         else
1744                 hw_lock_control_reg =
1745                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1746
1747         /* Try to acquire the lock */
1748         REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1749         lock_status = REG_RD(bp, hw_lock_control_reg);
1750         if (lock_status & resource_bit)
1751                 return true;
1752
1753         DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
1754            "Failed to get a lock on resource %d\n", resource);
1755         return false;
1756 }
1757
1758 /**
1759  * bnx2x_get_leader_lock_resource - get the recovery leader resource id
1760  *
1761  * @bp: driver handle
1762  *
1763  * Returns the recovery leader resource id according to the engine this function
1764  * belongs to. Currently only only 2 engines is supported.
1765  */
1766 static int bnx2x_get_leader_lock_resource(struct bnx2x *bp)
1767 {
1768         if (BP_PATH(bp))
1769                 return HW_LOCK_RESOURCE_RECOVERY_LEADER_1;
1770         else
1771                 return HW_LOCK_RESOURCE_RECOVERY_LEADER_0;
1772 }
1773
1774 /**
1775  * bnx2x_trylock_leader_lock- try to acquire a leader lock.
1776  *
1777  * @bp: driver handle
1778  *
1779  * Tries to acquire a leader lock for current engine.
1780  */
1781 static bool bnx2x_trylock_leader_lock(struct bnx2x *bp)
1782 {
1783         return bnx2x_trylock_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
1784 }
1785
1786 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err);
1787
1788 /* schedule the sp task and mark that interrupt occurred (runs from ISR) */
1789 static int bnx2x_schedule_sp_task(struct bnx2x *bp)
1790 {
1791         /* Set the interrupt occurred bit for the sp-task to recognize it
1792          * must ack the interrupt and transition according to the IGU
1793          * state machine.
1794          */
1795         atomic_set(&bp->interrupt_occurred, 1);
1796
1797         /* The sp_task must execute only after this bit
1798          * is set, otherwise we will get out of sync and miss all
1799          * further interrupts. Hence, the barrier.
1800          */
1801         smp_wmb();
1802
1803         /* schedule sp_task to workqueue */
1804         return queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1805 }
1806
1807 void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
1808 {
1809         struct bnx2x *bp = fp->bp;
1810         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1811         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1812         enum bnx2x_queue_cmd drv_cmd = BNX2X_Q_CMD_MAX;
1813         struct bnx2x_queue_sp_obj *q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
1814
1815         DP(BNX2X_MSG_SP,
1816            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
1817            fp->index, cid, command, bp->state,
1818            rr_cqe->ramrod_cqe.ramrod_type);
1819
1820         /* If cid is within VF range, replace the slowpath object with the
1821          * one corresponding to this VF
1822          */
1823         if (cid >= BNX2X_FIRST_VF_CID  &&
1824             cid < BNX2X_FIRST_VF_CID + BNX2X_VF_CIDS)
1825                 bnx2x_iov_set_queue_sp_obj(bp, cid, &q_obj);
1826
1827         switch (command) {
1828         case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE):
1829                 DP(BNX2X_MSG_SP, "got UPDATE ramrod. CID %d\n", cid);
1830                 drv_cmd = BNX2X_Q_CMD_UPDATE;
1831                 break;
1832
1833         case (RAMROD_CMD_ID_ETH_CLIENT_SETUP):
1834                 DP(BNX2X_MSG_SP, "got MULTI[%d] setup ramrod\n", cid);
1835                 drv_cmd = BNX2X_Q_CMD_SETUP;
1836                 break;
1837
1838         case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP):
1839                 DP(BNX2X_MSG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid);
1840                 drv_cmd = BNX2X_Q_CMD_SETUP_TX_ONLY;
1841                 break;
1842
1843         case (RAMROD_CMD_ID_ETH_HALT):
1844                 DP(BNX2X_MSG_SP, "got MULTI[%d] halt ramrod\n", cid);
1845                 drv_cmd = BNX2X_Q_CMD_HALT;
1846                 break;
1847
1848         case (RAMROD_CMD_ID_ETH_TERMINATE):
1849                 DP(BNX2X_MSG_SP, "got MULTI[%d] terminate ramrod\n", cid);
1850                 drv_cmd = BNX2X_Q_CMD_TERMINATE;
1851                 break;
1852
1853         case (RAMROD_CMD_ID_ETH_EMPTY):
1854                 DP(BNX2X_MSG_SP, "got MULTI[%d] empty ramrod\n", cid);
1855                 drv_cmd = BNX2X_Q_CMD_EMPTY;
1856                 break;
1857
1858         case (RAMROD_CMD_ID_ETH_TPA_UPDATE):
1859                 DP(BNX2X_MSG_SP, "got tpa update ramrod CID=%d\n", cid);
1860                 drv_cmd = BNX2X_Q_CMD_UPDATE_TPA;
1861                 break;
1862
1863         default:
1864                 BNX2X_ERR("unexpected MC reply (%d) on fp[%d]\n",
1865                           command, fp->index);
1866                 return;
1867         }
1868
1869         if ((drv_cmd != BNX2X_Q_CMD_MAX) &&
1870             q_obj->complete_cmd(bp, q_obj, drv_cmd))
1871                 /* q_obj->complete_cmd() failure means that this was
1872                  * an unexpected completion.
1873                  *
1874                  * In this case we don't want to increase the bp->spq_left
1875                  * because apparently we haven't sent this command the first
1876                  * place.
1877                  */
1878 #ifdef BNX2X_STOP_ON_ERROR
1879                 bnx2x_panic();
1880 #else
1881                 return;
1882 #endif
1883
1884         smp_mb__before_atomic();
1885         atomic_inc(&bp->cq_spq_left);
1886         /* push the change in bp->spq_left and towards the memory */
1887         smp_mb__after_atomic();
1888
1889         DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left));
1890
1891         if ((drv_cmd == BNX2X_Q_CMD_UPDATE) && (IS_FCOE_FP(fp)) &&
1892             (!!test_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state))) {
1893                 /* if Q update ramrod is completed for last Q in AFEX vif set
1894                  * flow, then ACK MCP at the end
1895                  *
1896                  * mark pending ACK to MCP bit.
1897                  * prevent case that both bits are cleared.
1898                  * At the end of load/unload driver checks that
1899                  * sp_state is cleared, and this order prevents
1900                  * races
1901                  */
1902                 smp_mb__before_atomic();
1903                 set_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, &bp->sp_state);
1904                 wmb();
1905                 clear_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
1906                 smp_mb__after_atomic();
1907
1908                 /* schedule the sp task as mcp ack is required */
1909                 bnx2x_schedule_sp_task(bp);
1910         }
1911
1912         return;
1913 }
1914
1915 irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1916 {
1917         struct bnx2x *bp = netdev_priv(dev_instance);
1918         u16 status = bnx2x_ack_int(bp);
1919         u16 mask;
1920         int i;
1921         u8 cos;
1922
1923         /* Return here if interrupt is shared and it's not for us */
1924         if (unlikely(status == 0)) {
1925                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1926                 return IRQ_NONE;
1927         }
1928         DP(NETIF_MSG_INTR, "got an interrupt  status 0x%x\n", status);
1929
1930 #ifdef BNX2X_STOP_ON_ERROR
1931         if (unlikely(bp->panic))
1932                 return IRQ_HANDLED;
1933 #endif
1934
1935         for_each_eth_queue(bp, i) {
1936                 struct bnx2x_fastpath *fp = &bp->fp[i];
1937
1938                 mask = 0x2 << (fp->index + CNIC_SUPPORT(bp));
1939                 if (status & mask) {
1940                         /* Handle Rx or Tx according to SB id */
1941                         for_each_cos_in_tx_queue(fp, cos)
1942                                 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
1943                         prefetch(&fp->sb_running_index[SM_RX_ID]);
1944                         napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi));
1945                         status &= ~mask;
1946                 }
1947         }
1948
1949         if (CNIC_SUPPORT(bp)) {
1950                 mask = 0x2;
1951                 if (status & (mask | 0x1)) {
1952                         struct cnic_ops *c_ops = NULL;
1953
1954                         rcu_read_lock();
1955                         c_ops = rcu_dereference(bp->cnic_ops);
1956                         if (c_ops && (bp->cnic_eth_dev.drv_state &
1957                                       CNIC_DRV_STATE_HANDLES_IRQ))
1958                                 c_ops->cnic_handler(bp->cnic_data, NULL);
1959                         rcu_read_unlock();
1960
1961                         status &= ~mask;
1962                 }
1963         }
1964
1965         if (unlikely(status & 0x1)) {
1966
1967                 /* schedule sp task to perform default status block work, ack
1968                  * attentions and enable interrupts.
1969                  */
1970                 bnx2x_schedule_sp_task(bp);
1971
1972                 status &= ~0x1;
1973                 if (!status)
1974                         return IRQ_HANDLED;
1975         }
1976
1977         if (unlikely(status))
1978                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
1979                    status);
1980
1981         return IRQ_HANDLED;
1982 }
1983
1984 /* Link */
1985
1986 /*
1987  * General service functions
1988  */
1989
1990 int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1991 {
1992         u32 lock_status;
1993         u32 resource_bit = (1 << resource);
1994         int func = BP_FUNC(bp);
1995         u32 hw_lock_control_reg;
1996         int cnt;
1997
1998         /* Validating that the resource is within range */
1999         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
2000                 BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
2001                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
2002                 return -EINVAL;
2003         }
2004
2005         if (func <= 5) {
2006                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
2007         } else {
2008                 hw_lock_control_reg =
2009                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
2010         }
2011
2012         /* Validating that the resource is not already taken */
2013         lock_status = REG_RD(bp, hw_lock_control_reg);
2014         if (lock_status & resource_bit) {
2015                 BNX2X_ERR("lock_status 0x%x  resource_bit 0x%x\n",
2016                    lock_status, resource_bit);
2017                 return -EEXIST;
2018         }
2019
2020         /* Try for 5 second every 5ms */
2021         for (cnt = 0; cnt < 1000; cnt++) {
2022                 /* Try to acquire the lock */
2023                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
2024                 lock_status = REG_RD(bp, hw_lock_control_reg);
2025                 if (lock_status & resource_bit)
2026                         return 0;
2027
2028                 usleep_range(5000, 10000);
2029         }
2030         BNX2X_ERR("Timeout\n");
2031         return -EAGAIN;
2032 }
2033
2034 int bnx2x_release_leader_lock(struct bnx2x *bp)
2035 {
2036         return bnx2x_release_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
2037 }
2038
2039 int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
2040 {
2041         u32 lock_status;
2042         u32 resource_bit = (1 << resource);
2043         int func = BP_FUNC(bp);
2044         u32 hw_lock_control_reg;
2045
2046         /* Validating that the resource is within range */
2047         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
2048                 BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
2049                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
2050                 return -EINVAL;
2051         }
2052
2053         if (func <= 5) {
2054                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
2055         } else {
2056                 hw_lock_control_reg =
2057                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
2058         }
2059
2060         /* Validating that the resource is currently taken */
2061         lock_status = REG_RD(bp, hw_lock_control_reg);
2062         if (!(lock_status & resource_bit)) {
2063                 BNX2X_ERR("lock_status 0x%x resource_bit 0x%x. Unlock was called but lock wasn't taken!\n",
2064                           lock_status, resource_bit);
2065                 return -EFAULT;
2066         }
2067
2068         REG_WR(bp, hw_lock_control_reg, resource_bit);
2069         return 0;
2070 }
2071
2072 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
2073 {
2074         /* The GPIO should be swapped if swap register is set and active */
2075         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2076                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2077         int gpio_shift = gpio_num +
2078                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2079         u32 gpio_mask = (1 << gpio_shift);
2080         u32 gpio_reg;
2081         int value;
2082
2083         if (gpio_num > MISC_REGISTERS_GPIO_3) {
2084                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2085                 return -EINVAL;
2086         }
2087
2088         /* read GPIO value */
2089         gpio_reg = REG_RD(bp, MISC_REG_GPIO);
2090
2091         /* get the requested pin value */
2092         if ((gpio_reg & gpio_mask) == gpio_mask)
2093                 value = 1;
2094         else
2095                 value = 0;
2096
2097         return value;
2098 }
2099
2100 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2101 {
2102         /* The GPIO should be swapped if swap register is set and active */
2103         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2104                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2105         int gpio_shift = gpio_num +
2106                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2107         u32 gpio_mask = (1 << gpio_shift);
2108         u32 gpio_reg;
2109
2110         if (gpio_num > MISC_REGISTERS_GPIO_3) {
2111                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2112                 return -EINVAL;
2113         }
2114
2115         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2116         /* read GPIO and mask except the float bits */
2117         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
2118
2119         switch (mode) {
2120         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2121                 DP(NETIF_MSG_LINK,
2122                    "Set GPIO %d (shift %d) -> output low\n",
2123                    gpio_num, gpio_shift);
2124                 /* clear FLOAT and set CLR */
2125                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2126                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
2127                 break;
2128
2129         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2130                 DP(NETIF_MSG_LINK,
2131                    "Set GPIO %d (shift %d) -> output high\n",
2132                    gpio_num, gpio_shift);
2133                 /* clear FLOAT and set SET */
2134                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2135                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
2136                 break;
2137
2138         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2139                 DP(NETIF_MSG_LINK,
2140                    "Set GPIO %d (shift %d) -> input\n",
2141                    gpio_num, gpio_shift);
2142                 /* set FLOAT */
2143                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2144                 break;
2145
2146         default:
2147                 break;
2148         }
2149
2150         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2151         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2152
2153         return 0;
2154 }
2155
2156 int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode)
2157 {
2158         u32 gpio_reg = 0;
2159         int rc = 0;
2160
2161         /* Any port swapping should be handled by caller. */
2162
2163         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2164         /* read GPIO and mask except the float bits */
2165         gpio_reg = REG_RD(bp, MISC_REG_GPIO);
2166         gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2167         gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS);
2168         gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS);
2169
2170         switch (mode) {
2171         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2172                 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output low\n", pins);
2173                 /* set CLR */
2174                 gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS);
2175                 break;
2176
2177         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2178                 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output high\n", pins);
2179                 /* set SET */
2180                 gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS);
2181                 break;
2182
2183         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2184                 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> input\n", pins);
2185                 /* set FLOAT */
2186                 gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2187                 break;
2188
2189         default:
2190                 BNX2X_ERR("Invalid GPIO mode assignment %d\n", mode);
2191                 rc = -EINVAL;
2192                 break;
2193         }
2194
2195         if (rc == 0)
2196                 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2197
2198         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2199
2200         return rc;
2201 }
2202
2203 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2204 {
2205         /* The GPIO should be swapped if swap register is set and active */
2206         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2207                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2208         int gpio_shift = gpio_num +
2209                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2210         u32 gpio_mask = (1 << gpio_shift);
2211         u32 gpio_reg;
2212
2213         if (gpio_num > MISC_REGISTERS_GPIO_3) {
2214                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2215                 return -EINVAL;
2216         }
2217
2218         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2219         /* read GPIO int */
2220         gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2221
2222         switch (mode) {
2223         case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2224                 DP(NETIF_MSG_LINK,
2225                    "Clear GPIO INT %d (shift %d) -> output low\n",
2226                    gpio_num, gpio_shift);
2227                 /* clear SET and set CLR */
2228                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2229                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2230                 break;
2231
2232         case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2233                 DP(NETIF_MSG_LINK,
2234                    "Set GPIO INT %d (shift %d) -> output high\n",
2235                    gpio_num, gpio_shift);
2236                 /* clear CLR and set SET */
2237                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2238                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2239                 break;
2240
2241         default:
2242                 break;
2243         }
2244
2245         REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2246         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2247
2248         return 0;
2249 }
2250
2251 static int bnx2x_set_spio(struct bnx2x *bp, int spio, u32 mode)
2252 {
2253         u32 spio_reg;
2254
2255         /* Only 2 SPIOs are configurable */
2256         if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) {
2257                 BNX2X_ERR("Invalid SPIO 0x%x\n", spio);
2258                 return -EINVAL;
2259         }
2260
2261         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2262         /* read SPIO and mask except the float bits */
2263         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_SPIO_FLOAT);
2264
2265         switch (mode) {
2266         case MISC_SPIO_OUTPUT_LOW:
2267                 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output low\n", spio);
2268                 /* clear FLOAT and set CLR */
2269                 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
2270                 spio_reg |=  (spio << MISC_SPIO_CLR_POS);
2271                 break;
2272
2273         case MISC_SPIO_OUTPUT_HIGH:
2274                 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output high\n", spio);
2275                 /* clear FLOAT and set SET */
2276                 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
2277                 spio_reg |=  (spio << MISC_SPIO_SET_POS);
2278                 break;
2279
2280         case MISC_SPIO_INPUT_HI_Z:
2281                 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> input\n", spio);
2282                 /* set FLOAT */
2283                 spio_reg |= (spio << MISC_SPIO_FLOAT_POS);
2284                 break;
2285
2286         default:
2287                 break;
2288         }
2289
2290         REG_WR(bp, MISC_REG_SPIO, spio_reg);
2291         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2292
2293         return 0;
2294 }
2295
2296 void bnx2x_calc_fc_adv(struct bnx2x *bp)
2297 {
2298         u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
2299
2300         bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
2301                                            ADVERTISED_Pause);
2302         switch (bp->link_vars.ieee_fc &
2303                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2304         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2305                 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
2306                                                   ADVERTISED_Pause);
2307                 break;
2308
2309         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2310                 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
2311                 break;
2312
2313         default:
2314                 break;
2315         }
2316 }
2317
2318 static void bnx2x_set_requested_fc(struct bnx2x *bp)
2319 {
2320         /* Initialize link parameters structure variables
2321          * It is recommended to turn off RX FC for jumbo frames
2322          *  for better performance
2323          */
2324         if (CHIP_IS_E1x(bp) && (bp->dev->mtu > 5000))
2325                 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2326         else
2327                 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2328 }
2329
2330 static void bnx2x_init_dropless_fc(struct bnx2x *bp)
2331 {
2332         u32 pause_enabled = 0;
2333
2334         if (!CHIP_IS_E1(bp) && bp->dropless_fc && bp->link_vars.link_up) {
2335                 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2336                         pause_enabled = 1;
2337
2338                 REG_WR(bp, BAR_USTRORM_INTMEM +
2339                            USTORM_ETH_PAUSE_ENABLED_OFFSET(BP_PORT(bp)),
2340                        pause_enabled);
2341         }
2342
2343         DP(NETIF_MSG_IFUP | NETIF_MSG_LINK, "dropless_fc is %s\n",
2344            pause_enabled ? "enabled" : "disabled");
2345 }
2346
2347 int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2348 {
2349         int rc, cfx_idx = bnx2x_get_link_cfg_idx(bp);
2350         u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
2351
2352         if (!BP_NOMCP(bp)) {
2353                 bnx2x_set_requested_fc(bp);
2354                 bnx2x_acquire_phy_lock(bp);
2355
2356                 if (load_mode == LOAD_DIAG) {
2357                         struct link_params *lp = &bp->link_params;
2358                         lp->loopback_mode = LOOPBACK_XGXS;
2359                         /* Prefer doing PHY loopback at highest speed */
2360                         if (lp->req_line_speed[cfx_idx] < SPEED_20000) {
2361                                 if (lp->speed_cap_mask[cfx_idx] &
2362                                     PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)
2363                                         lp->req_line_speed[cfx_idx] =
2364                                         SPEED_20000;
2365                                 else if (lp->speed_cap_mask[cfx_idx] &
2366                                             PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
2367                                                 lp->req_line_speed[cfx_idx] =
2368                                                 SPEED_10000;
2369                                 else
2370                                         lp->req_line_speed[cfx_idx] =
2371                                         SPEED_1000;
2372                         }
2373                 }
2374
2375                 if (load_mode == LOAD_LOOPBACK_EXT) {
2376                         struct link_params *lp = &bp->link_params;
2377                         lp->loopback_mode = LOOPBACK_EXT;
2378                 }
2379
2380                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2381
2382                 bnx2x_release_phy_lock(bp);
2383
2384                 bnx2x_init_dropless_fc(bp);
2385
2386                 bnx2x_calc_fc_adv(bp);
2387
2388                 if (bp->link_vars.link_up) {
2389                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2390                         bnx2x_link_report(bp);
2391                 }
2392                 queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
2393                 bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
2394                 return rc;
2395         }
2396         BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2397         return -EINVAL;
2398 }
2399
2400 void bnx2x_link_set(struct bnx2x *bp)
2401 {
2402         if (!BP_NOMCP(bp)) {
2403                 bnx2x_acquire_phy_lock(bp);
2404                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2405                 bnx2x_release_phy_lock(bp);
2406
2407                 bnx2x_init_dropless_fc(bp);
2408
2409                 bnx2x_calc_fc_adv(bp);
2410         } else
2411                 BNX2X_ERR("Bootcode is missing - can not set link\n");
2412 }
2413
2414 static void bnx2x__link_reset(struct bnx2x *bp)
2415 {
2416         if (!BP_NOMCP(bp)) {
2417                 bnx2x_acquire_phy_lock(bp);
2418                 bnx2x_lfa_reset(&bp->link_params, &bp->link_vars);
2419                 bnx2x_release_phy_lock(bp);
2420         } else
2421                 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2422 }
2423
2424 void bnx2x_force_link_reset(struct bnx2x *bp)
2425 {
2426         bnx2x_acquire_phy_lock(bp);
2427         bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2428         bnx2x_release_phy_lock(bp);
2429 }
2430
2431 u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
2432 {
2433         u8 rc = 0;
2434
2435         if (!BP_NOMCP(bp)) {
2436                 bnx2x_acquire_phy_lock(bp);
2437                 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
2438                                      is_serdes);
2439                 bnx2x_release_phy_lock(bp);
2440         } else
2441                 BNX2X_ERR("Bootcode is missing - can not test link\n");
2442
2443         return rc;
2444 }
2445
2446 /* Calculates the sum of vn_min_rates.
2447    It's needed for further normalizing of the min_rates.
2448    Returns:
2449      sum of vn_min_rates.
2450        or
2451      0 - if all the min_rates are 0.
2452      In the later case fairness algorithm should be deactivated.
2453      If not all min_rates are zero then those that are zeroes will be set to 1.
2454  */
2455 static void bnx2x_calc_vn_min(struct bnx2x *bp,
2456                                       struct cmng_init_input *input)
2457 {
2458         int all_zero = 1;
2459         int vn;
2460
2461         for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2462                 u32 vn_cfg = bp->mf_config[vn];
2463                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2464                                    FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2465
2466                 /* Skip hidden vns */
2467                 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2468                         vn_min_rate = 0;
2469                 /* If min rate is zero - set it to 1 */
2470                 else if (!vn_min_rate)
2471                         vn_min_rate = DEF_MIN_RATE;
2472                 else
2473                         all_zero = 0;
2474
2475                 input->vnic_min_rate[vn] = vn_min_rate;
2476         }
2477
2478         /* if ETS or all min rates are zeros - disable fairness */
2479         if (BNX2X_IS_ETS_ENABLED(bp)) {
2480                 input->flags.cmng_enables &=
2481                                         ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2482                 DP(NETIF_MSG_IFUP, "Fairness will be disabled due to ETS\n");
2483         } else if (all_zero) {
2484                 input->flags.cmng_enables &=
2485                                         ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2486                 DP(NETIF_MSG_IFUP,
2487                    "All MIN values are zeroes fairness will be disabled\n");
2488         } else
2489                 input->flags.cmng_enables |=
2490                                         CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2491 }
2492
2493 static void bnx2x_calc_vn_max(struct bnx2x *bp, int vn,
2494                                     struct cmng_init_input *input)
2495 {
2496         u16 vn_max_rate;
2497         u32 vn_cfg = bp->mf_config[vn];
2498
2499         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2500                 vn_max_rate = 0;
2501         else {
2502                 u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
2503
2504                 if (IS_MF_PERCENT_BW(bp)) {
2505                         /* maxCfg in percents of linkspeed */
2506                         vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
2507                 } else /* SD modes */
2508                         /* maxCfg is absolute in 100Mb units */
2509                         vn_max_rate = maxCfg * 100;
2510         }
2511
2512         DP(NETIF_MSG_IFUP, "vn %d: vn_max_rate %d\n", vn, vn_max_rate);
2513
2514         input->vnic_max_rate[vn] = vn_max_rate;
2515 }
2516
2517 static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
2518 {
2519         if (CHIP_REV_IS_SLOW(bp))
2520                 return CMNG_FNS_NONE;
2521         if (IS_MF(bp))
2522                 return CMNG_FNS_MINMAX;
2523
2524         return CMNG_FNS_NONE;
2525 }
2526
2527 void bnx2x_read_mf_cfg(struct bnx2x *bp)
2528 {
2529         int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1);
2530
2531         if (BP_NOMCP(bp))
2532                 return; /* what should be the default value in this case */
2533
2534         /* For 2 port configuration the absolute function number formula
2535          * is:
2536          *      abs_func = 2 * vn + BP_PORT + BP_PATH
2537          *
2538          *      and there are 4 functions per port
2539          *
2540          * For 4 port configuration it is
2541          *      abs_func = 4 * vn + 2 * BP_PORT + BP_PATH
2542          *
2543          *      and there are 2 functions per port
2544          */
2545         for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2546                 int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);
2547
2548                 if (func >= E1H_FUNC_MAX)
2549                         break;
2550
2551                 bp->mf_config[vn] =
2552                         MF_CFG_RD(bp, func_mf_config[func].config);
2553         }
2554         if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
2555                 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
2556                 bp->flags |= MF_FUNC_DIS;
2557         } else {
2558                 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2559                 bp->flags &= ~MF_FUNC_DIS;
2560         }
2561 }
2562
2563 static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2564 {
2565         struct cmng_init_input input;
2566         memset(&input, 0, sizeof(struct cmng_init_input));
2567
2568         input.port_rate = bp->link_vars.line_speed;
2569
2570         if (cmng_type == CMNG_FNS_MINMAX && input.port_rate) {
2571                 int vn;
2572
2573                 /* read mf conf from shmem */
2574                 if (read_cfg)
2575                         bnx2x_read_mf_cfg(bp);
2576
2577                 /* vn_weight_sum and enable fairness if not 0 */
2578                 bnx2x_calc_vn_min(bp, &input);
2579
2580                 /* calculate and set min-max rate for each vn */
2581                 if (bp->port.pmf)
2582                         for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++)
2583                                 bnx2x_calc_vn_max(bp, vn, &input);
2584
2585                 /* always enable rate shaping and fairness */
2586                 input.flags.cmng_enables |=
2587                                         CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
2588
2589                 bnx2x_init_cmng(&input, &bp->cmng);
2590                 return;
2591         }
2592
2593         /* rate shaping and fairness are disabled */
2594         DP(NETIF_MSG_IFUP,
2595            "rate shaping and fairness are disabled\n");
2596 }
2597
2598 static void storm_memset_cmng(struct bnx2x *bp,
2599                               struct cmng_init *cmng,
2600                               u8 port)
2601 {
2602         int vn;
2603         size_t size = sizeof(struct cmng_struct_per_port);
2604
2605         u32 addr = BAR_XSTRORM_INTMEM +
2606                         XSTORM_CMNG_PER_PORT_VARS_OFFSET(port);
2607
2608         __storm_memset_struct(bp, addr, size, (u32 *)&cmng->port);
2609
2610         for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2611                 int func = func_by_vn(bp, vn);
2612
2613                 addr = BAR_XSTRORM_INTMEM +
2614                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func);
2615                 size = sizeof(struct rate_shaping_vars_per_vn);
2616                 __storm_memset_struct(bp, addr, size,
2617                                       (u32 *)&cmng->vnic.vnic_max_rate[vn]);
2618
2619                 addr = BAR_XSTRORM_INTMEM +
2620                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func);
2621                 size = sizeof(struct fairness_vars_per_vn);
2622                 __storm_memset_struct(bp, addr, size,
2623                                       (u32 *)&cmng->vnic.vnic_min_rate[vn]);
2624         }
2625 }
2626
2627 /* init cmng mode in HW according to local configuration */
2628 void bnx2x_set_local_cmng(struct bnx2x *bp)
2629 {
2630         int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
2631
2632         if (cmng_fns != CMNG_FNS_NONE) {
2633                 bnx2x_cmng_fns_init(bp, false, cmng_fns);
2634                 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2635         } else {
2636                 /* rate shaping and fairness are disabled */
2637                 DP(NETIF_MSG_IFUP,
2638                    "single function mode without fairness\n");
2639         }
2640 }
2641
2642 /* This function is called upon link interrupt */
2643 static void bnx2x_link_attn(struct bnx2x *bp)
2644 {
2645         /* Make sure that we are synced with the current statistics */
2646         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2647
2648         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2649
2650         bnx2x_init_dropless_fc(bp);
2651
2652         if (bp->link_vars.link_up) {
2653
2654                 if (bp->link_vars.mac_type != MAC_TYPE_EMAC) {
2655                         struct host_port_stats *pstats;
2656
2657                         pstats = bnx2x_sp(bp, port_stats);
2658                         /* reset old mac stats */
2659                         memset(&(pstats->mac_stx[0]), 0,
2660                                sizeof(struct mac_stx));
2661                 }
2662                 if (bp->state == BNX2X_STATE_OPEN)
2663                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2664         }
2665
2666         if (bp->link_vars.link_up && bp->link_vars.line_speed)
2667                 bnx2x_set_local_cmng(bp);
2668
2669         __bnx2x_link_report(bp);
2670
2671         if (IS_MF(bp))
2672                 bnx2x_link_sync_notify(bp);
2673 }
2674
2675 void bnx2x__link_status_update(struct bnx2x *bp)
2676 {
2677         if (bp->state != BNX2X_STATE_OPEN)
2678                 return;
2679
2680         /* read updated dcb configuration */
2681         if (IS_PF(bp)) {
2682                 bnx2x_dcbx_pmf_update(bp);
2683                 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2684                 if (bp->link_vars.link_up)
2685                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2686                 else
2687                         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2688                         /* indicate link status */
2689                 bnx2x_link_report(bp);
2690
2691         } else { /* VF */
2692                 bp->port.supported[0] |= (SUPPORTED_10baseT_Half |
2693                                           SUPPORTED_10baseT_Full |
2694                                           SUPPORTED_100baseT_Half |
2695                                           SUPPORTED_100baseT_Full |
2696                                           SUPPORTED_1000baseT_Full |
2697                                           SUPPORTED_2500baseX_Full |
2698                                           SUPPORTED_10000baseT_Full |
2699                                           SUPPORTED_TP |
2700                                           SUPPORTED_FIBRE |
2701                                           SUPPORTED_Autoneg |
2702                                           SUPPORTED_Pause |
2703                                           SUPPORTED_Asym_Pause);
2704                 bp->port.advertising[0] = bp->port.supported[0];
2705
2706                 bp->link_params.bp = bp;
2707                 bp->link_params.port = BP_PORT(bp);
2708                 bp->link_params.req_duplex[0] = DUPLEX_FULL;
2709                 bp->link_params.req_flow_ctrl[0] = BNX2X_FLOW_CTRL_NONE;
2710                 bp->link_params.req_line_speed[0] = SPEED_10000;
2711                 bp->link_params.speed_cap_mask[0] = 0x7f0000;
2712                 bp->link_params.switch_cfg = SWITCH_CFG_10G;
2713                 bp->link_vars.mac_type = MAC_TYPE_BMAC;
2714                 bp->link_vars.line_speed = SPEED_10000;
2715                 bp->link_vars.link_status =
2716                         (LINK_STATUS_LINK_UP |
2717                          LINK_STATUS_SPEED_AND_DUPLEX_10GTFD);
2718                 bp->link_vars.link_up = 1;
2719                 bp->link_vars.duplex = DUPLEX_FULL;
2720                 bp->link_vars.flow_ctrl = BNX2X_FLOW_CTRL_NONE;
2721                 __bnx2x_link_report(bp);
2722
2723                 bnx2x_sample_bulletin(bp);
2724
2725                 /* if bulletin board did not have an update for link status
2726                  * __bnx2x_link_report will report current status
2727                  * but it will NOT duplicate report in case of already reported
2728                  * during sampling bulletin board.
2729                  */
2730                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2731         }
2732 }
2733
2734 static int bnx2x_afex_func_update(struct bnx2x *bp, u16 vifid,
2735                                   u16 vlan_val, u8 allowed_prio)
2736 {
2737         struct bnx2x_func_state_params func_params = {NULL};
2738         struct bnx2x_func_afex_update_params *f_update_params =
2739                 &func_params.params.afex_update;
2740
2741         func_params.f_obj = &bp->func_obj;
2742         func_params.cmd = BNX2X_F_CMD_AFEX_UPDATE;
2743
2744         /* no need to wait for RAMROD completion, so don't
2745          * set RAMROD_COMP_WAIT flag
2746          */
2747
2748         f_update_params->vif_id = vifid;
2749         f_update_params->afex_default_vlan = vlan_val;
2750         f_update_params->allowed_priorities = allowed_prio;
2751
2752         /* if ramrod can not be sent, response to MCP immediately */
2753         if (bnx2x_func_state_change(bp, &func_params) < 0)
2754                 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
2755
2756         return 0;
2757 }
2758
2759 static int bnx2x_afex_handle_vif_list_cmd(struct bnx2x *bp, u8 cmd_type,
2760                                           u16 vif_index, u8 func_bit_map)
2761 {
2762         struct bnx2x_func_state_params func_params = {NULL};
2763         struct bnx2x_func_afex_viflists_params *update_params =
2764                 &func_params.params.afex_viflists;
2765         int rc;
2766         u32 drv_msg_code;
2767
2768         /* validate only LIST_SET and LIST_GET are received from switch */
2769         if ((cmd_type != VIF_LIST_RULE_GET) && (cmd_type != VIF_LIST_RULE_SET))
2770                 BNX2X_ERR("BUG! afex_handle_vif_list_cmd invalid type 0x%x\n",
2771                           cmd_type);
2772
2773         func_params.f_obj = &bp->func_obj;
2774         func_params.cmd = BNX2X_F_CMD_AFEX_VIFLISTS;
2775
2776         /* set parameters according to cmd_type */
2777         update_params->afex_vif_list_command = cmd_type;
2778         update_params->vif_list_index = vif_index;
2779         update_params->func_bit_map =
2780                 (cmd_type == VIF_LIST_RULE_GET) ? 0 : func_bit_map;
2781         update_params->func_to_clear = 0;
2782         drv_msg_code =
2783                 (cmd_type == VIF_LIST_RULE_GET) ?
2784                 DRV_MSG_CODE_AFEX_LISTGET_ACK :
2785                 DRV_MSG_CODE_AFEX_LISTSET_ACK;
2786
2787         /* if ramrod can not be sent, respond to MCP immediately for
2788          * SET and GET requests (other are not triggered from MCP)
2789          */
2790         rc = bnx2x_func_state_change(bp, &func_params);
2791         if (rc < 0)
2792                 bnx2x_fw_command(bp, drv_msg_code, 0);
2793
2794         return 0;
2795 }
2796
2797 static void bnx2x_handle_afex_cmd(struct bnx2x *bp, u32 cmd)
2798 {
2799         struct afex_stats afex_stats;
2800         u32 func = BP_ABS_FUNC(bp);
2801         u32 mf_config;
2802         u16 vlan_val;
2803         u32 vlan_prio;
2804         u16 vif_id;
2805         u8 allowed_prio;
2806         u8 vlan_mode;
2807         u32 addr_to_write, vifid, addrs, stats_type, i;
2808
2809         if (cmd & DRV_STATUS_AFEX_LISTGET_REQ) {
2810                 vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2811                 DP(BNX2X_MSG_MCP,
2812                    "afex: got MCP req LISTGET_REQ for vifid 0x%x\n", vifid);
2813                 bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_GET, vifid, 0);
2814         }
2815
2816         if (cmd & DRV_STATUS_AFEX_LISTSET_REQ) {
2817                 vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2818                 addrs = SHMEM2_RD(bp, afex_param2_to_driver[BP_FW_MB_IDX(bp)]);
2819                 DP(BNX2X_MSG_MCP,
2820                    "afex: got MCP req LISTSET_REQ for vifid 0x%x addrs 0x%x\n",
2821                    vifid, addrs);
2822                 bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_SET, vifid,
2823                                                addrs);
2824         }
2825
2826         if (cmd & DRV_STATUS_AFEX_STATSGET_REQ) {
2827                 addr_to_write = SHMEM2_RD(bp,
2828                         afex_scratchpad_addr_to_write[BP_FW_MB_IDX(bp)]);
2829                 stats_type = SHMEM2_RD(bp,
2830                         afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2831
2832                 DP(BNX2X_MSG_MCP,
2833                    "afex: got MCP req STATSGET_REQ, write to addr 0x%x\n",
2834                    addr_to_write);
2835
2836                 bnx2x_afex_collect_stats(bp, (void *)&afex_stats, stats_type);
2837
2838                 /* write response to scratchpad, for MCP */
2839                 for (i = 0; i < (sizeof(struct afex_stats)/sizeof(u32)); i++)
2840                         REG_WR(bp, addr_to_write + i*sizeof(u32),
2841                                *(((u32 *)(&afex_stats))+i));
2842
2843                 /* send ack message to MCP */
2844                 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_STATSGET_ACK, 0);
2845         }
2846
2847         if (cmd & DRV_STATUS_AFEX_VIFSET_REQ) {
2848                 mf_config = MF_CFG_RD(bp, func_mf_config[func].config);
2849                 bp->mf_config[BP_VN(bp)] = mf_config;
2850                 DP(BNX2X_MSG_MCP,
2851                    "afex: got MCP req VIFSET_REQ, mf_config 0x%x\n",
2852                    mf_config);
2853
2854                 /* if VIF_SET is "enabled" */
2855                 if (!(mf_config & FUNC_MF_CFG_FUNC_DISABLED)) {
2856                         /* set rate limit directly to internal RAM */
2857                         struct cmng_init_input cmng_input;
2858                         struct rate_shaping_vars_per_vn m_rs_vn;
2859                         size_t size = sizeof(struct rate_shaping_vars_per_vn);
2860                         u32 addr = BAR_XSTRORM_INTMEM +
2861                             XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(BP_FUNC(bp));
2862
2863                         bp->mf_config[BP_VN(bp)] = mf_config;
2864
2865                         bnx2x_calc_vn_max(bp, BP_VN(bp), &cmng_input);
2866                         m_rs_vn.vn_counter.rate =
2867                                 cmng_input.vnic_max_rate[BP_VN(bp)];
2868                         m_rs_vn.vn_counter.quota =
2869                                 (m_rs_vn.vn_counter.rate *
2870                                  RS_PERIODIC_TIMEOUT_USEC) / 8;
2871
2872                         __storm_memset_struct(bp, addr, size, (u32 *)&m_rs_vn);
2873
2874                         /* read relevant values from mf_cfg struct in shmem */
2875                         vif_id =
2876                                 (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
2877                                  FUNC_MF_CFG_E1HOV_TAG_MASK) >>
2878                                 FUNC_MF_CFG_E1HOV_TAG_SHIFT;
2879                         vlan_val =
2880                                 (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
2881                                  FUNC_MF_CFG_AFEX_VLAN_MASK) >>
2882                                 FUNC_MF_CFG_AFEX_VLAN_SHIFT;
2883                         vlan_prio = (mf_config &
2884                                      FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >>
2885                                     FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT;
2886                         vlan_val |= (vlan_prio << VLAN_PRIO_SHIFT);
2887                         vlan_mode =
2888                                 (MF_CFG_RD(bp,
2889                                            func_mf_config[func].afex_config) &
2890                                  FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >>
2891                                 FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT;
2892                         allowed_prio =
2893                                 (MF_CFG_RD(bp,
2894                                            func_mf_config[func].afex_config) &
2895                                  FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >>
2896                                 FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT;
2897
2898                         /* send ramrod to FW, return in case of failure */
2899                         if (bnx2x_afex_func_update(bp, vif_id, vlan_val,
2900                                                    allowed_prio))
2901                                 return;
2902
2903                         bp->afex_def_vlan_tag = vlan_val;
2904                         bp->afex_vlan_mode = vlan_mode;
2905                 } else {
2906                         /* notify link down because BP->flags is disabled */
2907                         bnx2x_link_report(bp);
2908
2909                         /* send INVALID VIF ramrod to FW */
2910                         bnx2x_afex_func_update(bp, 0xFFFF, 0, 0);
2911
2912                         /* Reset the default afex VLAN */
2913                         bp->afex_def_vlan_tag = -1;
2914                 }
2915         }
2916 }
2917
2918 static void bnx2x_handle_update_svid_cmd(struct bnx2x *bp)
2919 {
2920         struct bnx2x_func_switch_update_params *switch_update_params;
2921         struct bnx2x_func_state_params func_params;
2922
2923         memset(&func_params, 0, sizeof(struct bnx2x_func_state_params));
2924         switch_update_params = &func_params.params.switch_update;
2925         func_params.f_obj = &bp->func_obj;
2926         func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
2927
2928         /* Prepare parameters for function state transitions */
2929         __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2930         __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
2931
2932         if (IS_MF_UFP(bp) || IS_MF_BD(bp)) {
2933                 int func = BP_ABS_FUNC(bp);
2934                 u32 val;
2935
2936                 /* Re-learn the S-tag from shmem */
2937                 val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
2938                                 FUNC_MF_CFG_E1HOV_TAG_MASK;
2939                 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
2940                         bp->mf_ov = val;
2941                 } else {
2942                         BNX2X_ERR("Got an SVID event, but no tag is configured in shmem\n");
2943                         goto fail;
2944                 }
2945
2946                 /* Configure new S-tag in LLH */
2947                 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + BP_PORT(bp) * 8,
2948                        bp->mf_ov);
2949
2950                 /* Send Ramrod to update FW of change */
2951                 __set_bit(BNX2X_F_UPDATE_SD_VLAN_TAG_CHNG,
2952                           &switch_update_params->changes);
2953                 switch_update_params->vlan = bp->mf_ov;
2954
2955                 if (bnx2x_func_state_change(bp, &func_params) < 0) {
2956                         BNX2X_ERR("Failed to configure FW of S-tag Change to %02x\n",
2957                                   bp->mf_ov);
2958                         goto fail;
2959                 } else {
2960                         DP(BNX2X_MSG_MCP, "Configured S-tag %02x\n",
2961                            bp->mf_ov);
2962                 }
2963         } else {
2964                 goto fail;
2965         }
2966
2967         bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_OK, 0);
2968         return;
2969 fail:
2970         bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_FAILURE, 0);
2971 }
2972
2973 static void bnx2x_pmf_update(struct bnx2x *bp)
2974 {
2975         int port = BP_PORT(bp);
2976         u32 val;
2977
2978         bp->port.pmf = 1;
2979         DP(BNX2X_MSG_MCP, "pmf %d\n", bp->port.pmf);
2980
2981         /*
2982          * We need the mb() to ensure the ordering between the writing to
2983          * bp->port.pmf here and reading it from the bnx2x_periodic_task().
2984          */
2985         smp_mb();
2986
2987         /* queue a periodic task */
2988         queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
2989
2990         bnx2x_dcbx_pmf_update(bp);
2991
2992         /* enable nig attention */
2993         val = (0xff0f | (1 << (BP_VN(bp) + 4)));
2994         if (bp->common.int_block == INT_BLOCK_HC) {
2995                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2996                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2997         } else if (!CHIP_IS_E1x(bp)) {
2998                 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
2999                 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
3000         }
3001
3002         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3003 }
3004
3005 /* end of Link */
3006
3007 /* slow path */
3008
3009 /*
3010  * General service functions
3011  */
3012
3013 /* send the MCP a request, block until there is a reply */
3014 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
3015 {
3016         int mb_idx = BP_FW_MB_IDX(bp);
3017         u32 seq;
3018         u32 rc = 0;
3019         u32 cnt = 1;
3020         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
3021
3022         mutex_lock(&bp->fw_mb_mutex);
3023         seq = ++bp->fw_seq;
3024         SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
3025         SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
3026
3027         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB param 0x%08x\n",
3028                         (command | seq), param);
3029
3030         do {
3031                 /* let the FW do it's magic ... */
3032                 msleep(delay);
3033
3034                 rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
3035
3036                 /* Give the FW up to 5 second (500*10ms) */
3037         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
3038
3039         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
3040            cnt*delay, rc, seq);
3041
3042         /* is this a reply to our command? */
3043         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
3044                 rc &= FW_MSG_CODE_MASK;
3045         else {
3046                 /* FW BUG! */
3047                 BNX2X_ERR("FW failed to respond!\n");
3048                 bnx2x_fw_dump(bp);
3049                 rc = 0;
3050         }
3051         mutex_unlock(&bp->fw_mb_mutex);
3052
3053         return rc;
3054 }
3055
3056 static void storm_memset_func_cfg(struct bnx2x *bp,
3057                                  struct tstorm_eth_function_common_config *tcfg,
3058                                  u16 abs_fid)
3059 {
3060         size_t size = sizeof(struct tstorm_eth_function_common_config);
3061
3062         u32 addr = BAR_TSTRORM_INTMEM +
3063                         TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
3064
3065         __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
3066 }
3067
3068 void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
3069 {
3070         if (CHIP_IS_E1x(bp)) {
3071                 struct tstorm_eth_function_common_config tcfg = {0};
3072
3073                 storm_memset_func_cfg(bp, &tcfg, p->func_id);
3074         }
3075
3076         /* Enable the function in the FW */
3077         storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
3078         storm_memset_func_en(bp, p->func_id, 1);
3079
3080         /* spq */
3081         if (p->spq_active) {
3082                 storm_memset_spq_addr(bp, p->spq_map, p->func_id);
3083                 REG_WR(bp, XSEM_REG_FAST_MEMORY +
3084                        XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
3085         }
3086 }
3087
3088 /**
3089  * bnx2x_get_common_flags - Return common flags
3090  *
3091  * @bp          device handle
3092  * @fp          queue handle
3093  * @zero_stats  TRUE if statistics zeroing is needed
3094  *
3095  * Return the flags that are common for the Tx-only and not normal connections.
3096  */
3097 static unsigned long bnx2x_get_common_flags(struct bnx2x *bp,
3098                                             struct bnx2x_fastpath *fp,
3099                                             bool zero_stats)
3100 {
3101         unsigned long flags = 0;
3102
3103         /* PF driver will always initialize the Queue to an ACTIVE state */
3104         __set_bit(BNX2X_Q_FLG_ACTIVE, &flags);
3105
3106         /* tx only connections collect statistics (on the same index as the
3107          * parent connection). The statistics are zeroed when the parent
3108          * connection is initialized.
3109          */
3110
3111         __set_bit(BNX2X_Q_FLG_STATS, &flags);
3112         if (zero_stats)
3113                 __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
3114
3115         if (bp->flags & TX_SWITCHING)
3116                 __set_bit(BNX2X_Q_FLG_TX_SWITCH, &flags);
3117
3118         __set_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, &flags);
3119         __set_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, &flags);
3120
3121 #ifdef BNX2X_STOP_ON_ERROR
3122         __set_bit(BNX2X_Q_FLG_TX_SEC, &flags);
3123 #endif
3124
3125         return flags;
3126 }
3127
3128 static unsigned long bnx2x_get_q_flags(struct bnx2x *bp,
3129                                        struct bnx2x_fastpath *fp,
3130                                        bool leading)
3131 {
3132         unsigned long flags = 0;
3133
3134         /* calculate other queue flags */
3135         if (IS_MF_SD(bp))
3136                 __set_bit(BNX2X_Q_FLG_OV, &flags);
3137
3138         if (IS_FCOE_FP(fp)) {
3139                 __set_bit(BNX2X_Q_FLG_FCOE, &flags);
3140                 /* For FCoE - force usage of default priority (for afex) */
3141                 __set_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, &flags);
3142         }
3143
3144         if (fp->mode != TPA_MODE_DISABLED) {
3145                 __set_bit(BNX2X_Q_FLG_TPA, &flags);
3146                 __set_bit(BNX2X_Q_FLG_TPA_IPV6, &flags);
3147                 if (fp->mode == TPA_MODE_GRO)
3148                         __set_bit(BNX2X_Q_FLG_TPA_GRO, &flags);
3149         }
3150
3151         if (leading) {
3152                 __set_bit(BNX2X_Q_FLG_LEADING_RSS, &flags);
3153                 __set_bit(BNX2X_Q_FLG_MCAST, &flags);
3154         }
3155
3156         /* Always set HW VLAN stripping */
3157         __set_bit(BNX2X_Q_FLG_VLAN, &flags);
3158
3159         /* configure silent vlan removal */
3160         if (IS_MF_AFEX(bp))
3161                 __set_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, &flags);
3162
3163         return flags | bnx2x_get_common_flags(bp, fp, true);
3164 }
3165
3166 static void bnx2x_pf_q_prep_general(struct bnx2x *bp,
3167         struct bnx2x_fastpath *fp, struct bnx2x_general_setup_params *gen_init,
3168         u8 cos)
3169 {
3170         gen_init->stat_id = bnx2x_stats_id(fp);
3171         gen_init->spcl_id = fp->cl_id;
3172
3173         /* Always use mini-jumbo MTU for FCoE L2 ring */
3174         if (IS_FCOE_FP(fp))
3175                 gen_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
3176         else
3177                 gen_init->mtu = bp->dev->mtu;
3178
3179         gen_init->cos = cos;
3180
3181         gen_init->fp_hsi = ETH_FP_HSI_VERSION;
3182 }
3183
3184 static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
3185         struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
3186         struct bnx2x_rxq_setup_params *rxq_init)
3187 {
3188         u8 max_sge = 0;
3189         u16 sge_sz = 0;
3190         u16 tpa_agg_size = 0;
3191
3192         if (fp->mode != TPA_MODE_DISABLED) {
3193                 pause->sge_th_lo = SGE_TH_LO(bp);
3194                 pause->sge_th_hi = SGE_TH_HI(bp);
3195
3196                 /* validate SGE ring has enough to cross high threshold */
3197                 WARN_ON(bp->dropless_fc &&
3198                                 pause->sge_th_hi + FW_PREFETCH_CNT >
3199                                 MAX_RX_SGE_CNT * NUM_RX_SGE_PAGES);
3200
3201                 tpa_agg_size = TPA_AGG_SIZE;
3202                 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
3203                         SGE_PAGE_SHIFT;
3204                 max_sge = ((max_sge + PAGES_PER_SGE - 1) &
3205                           (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
3206                 sge_sz = (u16)min_t(u32, SGE_PAGES, 0xffff);
3207         }
3208
3209         /* pause - not for e1 */
3210         if (!CHIP_IS_E1(bp)) {
3211                 pause->bd_th_lo = BD_TH_LO(bp);
3212                 pause->bd_th_hi = BD_TH_HI(bp);
3213
3214                 pause->rcq_th_lo = RCQ_TH_LO(bp);
3215                 pause->rcq_th_hi = RCQ_TH_HI(bp);
3216                 /*
3217                  * validate that rings have enough entries to cross
3218                  * high thresholds
3219                  */
3220                 WARN_ON(bp->dropless_fc &&
3221                                 pause->bd_th_hi + FW_PREFETCH_CNT >
3222                                 bp->rx_ring_size);
3223                 WARN_ON(bp->dropless_fc &&
3224                                 pause->rcq_th_hi + FW_PREFETCH_CNT >
3225                                 NUM_RCQ_RINGS * MAX_RCQ_DESC_CNT);
3226
3227                 pause->pri_map = 1;
3228         }
3229
3230         /* rxq setup */
3231         rxq_init->dscr_map = fp->rx_desc_mapping;
3232         rxq_init->sge_map = fp->rx_sge_mapping;
3233         rxq_init->rcq_map = fp->rx_comp_mapping;
3234         rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
3235
3236         /* This should be a maximum number of data bytes that may be
3237          * placed on the BD (not including paddings).
3238          */
3239         rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN_START -
3240                            BNX2X_FW_RX_ALIGN_END - IP_HEADER_ALIGNMENT_PADDING;
3241
3242         rxq_init->cl_qzone_id = fp->cl_qzone_id;
3243         rxq_init->tpa_agg_sz = tpa_agg_size;
3244         rxq_init->sge_buf_sz = sge_sz;
3245         rxq_init->max_sges_pkt = max_sge;
3246         rxq_init->rss_engine_id = BP_FUNC(bp);
3247         rxq_init->mcast_engine_id = BP_FUNC(bp);
3248
3249         /* Maximum number or simultaneous TPA aggregation for this Queue.
3250          *
3251          * For PF Clients it should be the maximum available number.
3252          * VF driver(s) may want to define it to a smaller value.
3253          */
3254         rxq_init->max_tpa_queues = MAX_AGG_QS(bp);
3255
3256         rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
3257         rxq_init->fw_sb_id = fp->fw_sb_id;
3258
3259         if (IS_FCOE_FP(fp))
3260                 rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS;
3261         else
3262                 rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
3263         /* configure silent vlan removal
3264          * if multi function mode is afex, then mask default vlan
3265          */
3266         if (IS_MF_AFEX(bp)) {
3267                 rxq_init->silent_removal_value = bp->afex_def_vlan_tag;
3268                 rxq_init->silent_removal_mask = VLAN_VID_MASK;
3269         }
3270 }
3271
3272 static void bnx2x_pf_tx_q_prep(struct bnx2x *bp,
3273         struct bnx2x_fastpath *fp, struct bnx2x_txq_setup_params *txq_init,
3274         u8 cos)
3275 {
3276         txq_init->dscr_map = fp->txdata_ptr[cos]->tx_desc_mapping;
3277         txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos;
3278         txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
3279         txq_init->fw_sb_id = fp->fw_sb_id;
3280
3281         /*
3282          * set the tss leading client id for TX classification ==
3283          * leading RSS client id
3284          */
3285         txq_init->tss_leading_cl_id = bnx2x_fp(bp, 0, cl_id);
3286
3287         if (IS_FCOE_FP(fp)) {
3288                 txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS;
3289                 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE;
3290         }
3291 }
3292
3293 static void bnx2x_pf_init(struct bnx2x *bp)
3294 {
3295         struct bnx2x_func_init_params func_init = {0};
3296         struct event_ring_data eq_data = { {0} };
3297
3298         if (!CHIP_IS_E1x(bp)) {
3299                 /* reset IGU PF statistics: MSIX + ATTN */
3300                 /* PF */
3301                 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
3302                            BNX2X_IGU_STAS_MSG_VF_CNT*4 +
3303                            (CHIP_MODE_IS_4_PORT(bp) ?
3304                                 BP_FUNC(bp) : BP_VN(bp))*4, 0);
3305                 /* ATTN */
3306                 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
3307                            BNX2X_IGU_STAS_MSG_VF_CNT*4 +
3308                            BNX2X_IGU_STAS_MSG_PF_CNT*4 +
3309                            (CHIP_MODE_IS_4_PORT(bp) ?
3310                                 BP_FUNC(bp) : BP_VN(bp))*4, 0);
3311         }
3312
3313         func_init.spq_active = true;
3314         func_init.pf_id = BP_FUNC(bp);
3315         func_init.func_id = BP_FUNC(bp);
3316         func_init.spq_map = bp->spq_mapping;
3317         func_init.spq_prod = bp->spq_prod_idx;
3318
3319         bnx2x_func_init(bp, &func_init);
3320
3321         memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
3322
3323         /*
3324          * Congestion management values depend on the link rate
3325          * There is no active link so initial link rate is set to 10 Gbps.
3326          * When the link comes up The congestion management values are
3327          * re-calculated according to the actual link rate.
3328          */
3329         bp->link_vars.line_speed = SPEED_10000;
3330         bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
3331
3332         /* Only the PMF sets the HW */
3333         if (bp->port.pmf)
3334                 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
3335
3336         /* init Event Queue - PCI bus guarantees correct endianity*/
3337         eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
3338         eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
3339         eq_data.producer = bp->eq_prod;
3340         eq_data.index_id = HC_SP_INDEX_EQ_CONS;
3341         eq_data.sb_id = DEF_SB_ID;
3342         storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
3343 }
3344
3345 static void bnx2x_e1h_disable(struct bnx2x *bp)
3346 {
3347         int port = BP_PORT(bp);
3348
3349         bnx2x_tx_disable(bp);
3350
3351         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
3352 }
3353
3354 static void bnx2x_e1h_enable(struct bnx2x *bp)
3355 {
3356         int port = BP_PORT(bp);
3357
3358         if (!(IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)))
3359                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port * 8, 1);
3360
3361         /* Tx queue should be only re-enabled */
3362         netif_tx_wake_all_queues(bp->dev);
3363
3364         /*
3365          * Should not call netif_carrier_on since it will be called if the link
3366          * is up when checking for link state
3367          */
3368 }
3369
3370 #define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3
3371
3372 static void bnx2x_drv_info_ether_stat(struct bnx2x *bp)
3373 {
3374         struct eth_stats_info *ether_stat =
3375                 &bp->slowpath->drv_info_to_mcp.ether_stat;
3376         struct bnx2x_vlan_mac_obj *mac_obj =
3377                 &bp->sp_objs->mac_obj;
3378         int i;
3379
3380         strlcpy(ether_stat->version, DRV_MODULE_VERSION,
3381                 ETH_STAT_INFO_VERSION_LEN);
3382
3383         /* get DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED macs, placing them in the
3384          * mac_local field in ether_stat struct. The base address is offset by 2
3385          * bytes to account for the field being 8 bytes but a mac address is
3386          * only 6 bytes. Likewise, the stride for the get_n_elements function is
3387          * 2 bytes to compensate from the 6 bytes of a mac to the 8 bytes
3388          * allocated by the ether_stat struct, so the macs will land in their
3389          * proper positions.
3390          */
3391         for (i = 0; i < DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED; i++)
3392                 memset(ether_stat->mac_local + i, 0,
3393                        sizeof(ether_stat->mac_local[0]));
3394         mac_obj->get_n_elements(bp, &bp->sp_objs[0].mac_obj,
3395                                 DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
3396                                 ether_stat->mac_local + MAC_PAD, MAC_PAD,
3397                                 ETH_ALEN);
3398         ether_stat->mtu_size = bp->dev->mtu;
3399         if (bp->dev->features & NETIF_F_RXCSUM)
3400                 ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK;
3401         if (bp->dev->features & NETIF_F_TSO)
3402                 ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK;
3403         ether_stat->feature_flags |= bp->common.boot_mode;
3404
3405         ether_stat->promiscuous_mode = (bp->dev->flags & IFF_PROMISC) ? 1 : 0;
3406
3407         ether_stat->txq_size = bp->tx_ring_size;
3408         ether_stat->rxq_size = bp->rx_ring_size;
3409
3410 #ifdef CONFIG_BNX2X_SRIOV
3411         ether_stat->vf_cnt = IS_SRIOV(bp) ? bp->vfdb->sriov.nr_virtfn : 0;
3412 #endif
3413 }
3414
3415 static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
3416 {
3417         struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
3418         struct fcoe_stats_info *fcoe_stat =
3419                 &bp->slowpath->drv_info_to_mcp.fcoe_stat;
3420
3421         if (!CNIC_LOADED(bp))
3422                 return;
3423
3424         memcpy(fcoe_stat->mac_local + MAC_PAD, bp->fip_mac, ETH_ALEN);
3425
3426         fcoe_stat->qos_priority =
3427                 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_FCOE];
3428
3429         /* insert FCoE stats from ramrod response */
3430         if (!NO_FCOE(bp)) {
3431                 struct tstorm_per_queue_stats *fcoe_q_tstorm_stats =
3432                         &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)].
3433                         tstorm_queue_statistics;
3434
3435                 struct xstorm_per_queue_stats *fcoe_q_xstorm_stats =
3436                         &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)].
3437                         xstorm_queue_statistics;
3438
3439                 struct fcoe_statistics_params *fw_fcoe_stat =
3440                         &bp->fw_stats_data->fcoe;
3441
3442                 ADD_64_LE(fcoe_stat->rx_bytes_hi, LE32_0,
3443                           fcoe_stat->rx_bytes_lo,
3444                           fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt);
3445
3446                 ADD_64_LE(fcoe_stat->rx_bytes_hi,
3447                           fcoe_q_tstorm_stats->rcv_ucast_bytes.hi,
3448                           fcoe_stat->rx_bytes_lo,
3449                           fcoe_q_tstorm_stats->rcv_ucast_bytes.lo);
3450
3451                 ADD_64_LE(fcoe_stat->rx_bytes_hi,
3452                           fcoe_q_tstorm_stats->rcv_bcast_bytes.hi,
3453                           fcoe_stat->rx_bytes_lo,
3454                           fcoe_q_tstorm_stats->rcv_bcast_bytes.lo);
3455
3456                 ADD_64_LE(fcoe_stat->rx_bytes_hi,
3457                           fcoe_q_tstorm_stats->rcv_mcast_bytes.hi,
3458                           fcoe_stat->rx_bytes_lo,
3459                           fcoe_q_tstorm_stats->rcv_mcast_bytes.lo);
3460
3461                 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3462                           fcoe_stat->rx_frames_lo,
3463                           fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt);
3464
3465                 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3466                           fcoe_stat->rx_frames_lo,
3467                           fcoe_q_tstorm_stats->rcv_ucast_pkts);
3468
3469                 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3470                           fcoe_stat->rx_frames_lo,
3471                           fcoe_q_tstorm_stats->rcv_bcast_pkts);
3472
3473                 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3474                           fcoe_stat->rx_frames_lo,
3475                           fcoe_q_tstorm_stats->rcv_mcast_pkts);
3476
3477                 ADD_64_LE(fcoe_stat->tx_bytes_hi, LE32_0,
3478                           fcoe_stat->tx_bytes_lo,
3479                           fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt);
3480
3481                 ADD_64_LE(fcoe_stat->tx_bytes_hi,
3482                           fcoe_q_xstorm_stats->ucast_bytes_sent.hi,
3483                           fcoe_stat->tx_bytes_lo,
3484                           fcoe_q_xstorm_stats->ucast_bytes_sent.lo);
3485
3486                 ADD_64_LE(fcoe_stat->tx_bytes_hi,
3487                           fcoe_q_xstorm_stats->bcast_bytes_sent.hi,
3488                           fcoe_stat->tx_bytes_lo,
3489                           fcoe_q_xstorm_stats->bcast_bytes_sent.lo);
3490
3491                 ADD_64_LE(fcoe_stat->tx_bytes_hi,
3492                           fcoe_q_xstorm_stats->mcast_bytes_sent.hi,
3493                           fcoe_stat->tx_bytes_lo,
3494                           fcoe_q_xstorm_stats->mcast_bytes_sent.lo);
3495
3496                 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3497                           fcoe_stat->tx_frames_lo,
3498                           fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt);
3499
3500                 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3501                           fcoe_stat->tx_frames_lo,
3502                           fcoe_q_xstorm_stats->ucast_pkts_sent);
3503
3504                 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3505                           fcoe_stat->tx_frames_lo,
3506                           fcoe_q_xstorm_stats->bcast_pkts_sent);
3507
3508                 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3509                           fcoe_stat->tx_frames_lo,
3510                           fcoe_q_xstorm_stats->mcast_pkts_sent);
3511         }
3512
3513         /* ask L5 driver to add data to the struct */
3514         bnx2x_cnic_notify(bp, CNIC_CTL_FCOE_STATS_GET_CMD);
3515 }
3516
3517 static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp)
3518 {
3519         struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
3520         struct iscsi_stats_info *iscsi_stat =
3521                 &bp->slowpath->drv_info_to_mcp.iscsi_stat;
3522
3523         if (!CNIC_LOADED(bp))
3524                 return;
3525
3526         memcpy(iscsi_stat->mac_local + MAC_PAD, bp->cnic_eth_dev.iscsi_mac,
3527                ETH_ALEN);
3528
3529         iscsi_stat->qos_priority =
3530                 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_ISCSI];
3531
3532         /* ask L5 driver to add data to the struct */
3533         bnx2x_cnic_notify(bp, CNIC_CTL_ISCSI_STATS_GET_CMD);
3534 }
3535
3536 /* called due to MCP event (on pmf):
3537  *      reread new bandwidth configuration
3538  *      configure FW
3539  *      notify others function about the change
3540  */
3541 static void bnx2x_config_mf_bw(struct bnx2x *bp)
3542 {
3543         /* Workaround for MFW bug.
3544          * MFW is not supposed to generate BW attention in
3545          * single function mode.
3546          */
3547         if (!IS_MF(bp)) {
3548                 DP(BNX2X_MSG_MCP,
3549                    "Ignoring MF BW config in single function mode\n");
3550                 return;
3551         }
3552
3553         if (bp->link_vars.link_up) {
3554                 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
3555                 bnx2x_link_sync_notify(bp);
3556         }
3557         storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
3558 }
3559
3560 static void bnx2x_set_mf_bw(struct bnx2x *bp)
3561 {
3562         bnx2x_config_mf_bw(bp);
3563         bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
3564 }
3565
3566 static void bnx2x_handle_eee_event(struct bnx2x *bp)
3567 {
3568         DP(BNX2X_MSG_MCP, "EEE - LLDP event\n");
3569         bnx2x_fw_command(bp, DRV_MSG_CODE_EEE_RESULTS_ACK, 0);
3570 }
3571
3572 #define BNX2X_UPDATE_DRV_INFO_IND_LENGTH        (20)
3573 #define BNX2X_UPDATE_DRV_INFO_IND_COUNT         (25)
3574
3575 static void bnx2x_handle_drv_info_req(struct bnx2x *bp)
3576 {
3577         enum drv_info_opcode op_code;
3578         u32 drv_info_ctl = SHMEM2_RD(bp, drv_info_control);
3579         bool release = false;
3580         int wait;
3581
3582         /* if drv_info version supported by MFW doesn't match - send NACK */
3583         if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) {
3584                 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
3585                 return;
3586         }
3587
3588         op_code = (drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >>
3589                   DRV_INFO_CONTROL_OP_CODE_SHIFT;
3590
3591         /* Must prevent other flows from accessing drv_info_to_mcp */
3592         mutex_lock(&bp->drv_info_mutex);
3593
3594         memset(&bp->slowpath->drv_info_to_mcp, 0,
3595                sizeof(union drv_info_to_mcp));
3596
3597         switch (op_code) {
3598         case ETH_STATS_OPCODE:
3599                 bnx2x_drv_info_ether_stat(bp);
3600                 break;
3601         case FCOE_STATS_OPCODE:
3602                 bnx2x_drv_info_fcoe_stat(bp);
3603                 break;
3604         case ISCSI_STATS_OPCODE:
3605                 bnx2x_drv_info_iscsi_stat(bp);
3606                 break;
3607         default:
3608                 /* if op code isn't supported - send NACK */
3609                 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
3610                 goto out;
3611         }
3612
3613         /* if we got drv_info attn from MFW then these fields are defined in
3614          * shmem2 for sure
3615          */
3616         SHMEM2_WR(bp, drv_info_host_addr_lo,
3617                 U64_LO(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
3618         SHMEM2_WR(bp, drv_info_host_addr_hi,
3619                 U64_HI(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
3620
3621         bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_ACK, 0);
3622
3623         /* Since possible management wants both this and get_driver_version
3624          * need to wait until management notifies us it finished utilizing
3625          * the buffer.
3626          */
3627         if (!SHMEM2_HAS(bp, mfw_drv_indication)) {
3628                 DP(BNX2X_MSG_MCP, "Management does not support indication\n");
3629         } else if (!bp->drv_info_mng_owner) {
3630                 u32 bit = MFW_DRV_IND_READ_DONE_OFFSET((BP_ABS_FUNC(bp) >> 1));
3631
3632                 for (wait = 0; wait < BNX2X_UPDATE_DRV_INFO_IND_COUNT; wait++) {
3633                         u32 indication = SHMEM2_RD(bp, mfw_drv_indication);
3634
3635                         /* Management is done; need to clear indication */
3636                         if (indication & bit) {
3637                                 SHMEM2_WR(bp, mfw_drv_indication,
3638                                           indication & ~bit);
3639                                 release = true;
3640                                 break;
3641                         }
3642
3643                         msleep(BNX2X_UPDATE_DRV_INFO_IND_LENGTH);
3644                 }
3645         }
3646         if (!release) {
3647                 DP(BNX2X_MSG_MCP, "Management did not release indication\n");
3648                 bp->drv_info_mng_owner = true;
3649         }
3650
3651 out:
3652         mutex_unlock(&bp->drv_info_mutex);
3653 }
3654
3655 static u32 bnx2x_update_mng_version_utility(u8 *version, bool bnx2x_format)
3656 {
3657         u8 vals[4];
3658         int i = 0;
3659
3660         if (bnx2x_format) {
3661                 i = sscanf(version, "1.%c%hhd.%hhd.%hhd",
3662                            &vals[0], &vals[1], &vals[2], &vals[3]);
3663                 if (i > 0)
3664                         vals[0] -= '0';
3665         } else {
3666                 i = sscanf(version, "%hhd.%hhd.%hhd.%hhd",
3667                            &vals[0], &vals[1], &vals[2], &vals[3]);
3668         }
3669
3670         while (i < 4)
3671                 vals[i++] = 0;
3672
3673         return (vals[0] << 24) | (vals[1] << 16) | (vals[2] << 8) | vals[3];
3674 }
3675
3676 void bnx2x_update_mng_version(struct bnx2x *bp)
3677 {
3678         u32 iscsiver = DRV_VER_NOT_LOADED;
3679         u32 fcoever = DRV_VER_NOT_LOADED;
3680         u32 ethver = DRV_VER_NOT_LOADED;
3681         int idx = BP_FW_MB_IDX(bp);
3682         u8 *version;
3683
3684         if (!SHMEM2_HAS(bp, func_os_drv_ver))
3685                 return;
3686
3687         mutex_lock(&bp->drv_info_mutex);
3688         /* Must not proceed when `bnx2x_handle_drv_info_req' is feasible */
3689         if (bp->drv_info_mng_owner)
3690                 goto out;
3691
3692         if (bp->state != BNX2X_STATE_OPEN)
3693                 goto out;
3694
3695         /* Parse ethernet driver version */
3696         ethver = bnx2x_update_mng_version_utility(DRV_MODULE_VERSION, true);
3697         if (!CNIC_LOADED(bp))
3698                 goto out;
3699
3700         /* Try getting storage driver version via cnic */
3701         memset(&bp->slowpath->drv_info_to_mcp, 0,
3702                sizeof(union drv_info_to_mcp));
3703         bnx2x_drv_info_iscsi_stat(bp);
3704         version = bp->slowpath->drv_info_to_mcp.iscsi_stat.version;
3705         iscsiver = bnx2x_update_mng_version_utility(version, false);
3706
3707         memset(&bp->slowpath->drv_info_to_mcp, 0,
3708                sizeof(union drv_info_to_mcp));
3709         bnx2x_drv_info_fcoe_stat(bp);
3710         version = bp->slowpath->drv_info_to_mcp.fcoe_stat.version;
3711         fcoever = bnx2x_update_mng_version_utility(version, false);
3712
3713 out:
3714         SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_ETHERNET], ethver);
3715         SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_ISCSI], iscsiver);
3716         SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_FCOE], fcoever);
3717
3718         mutex_unlock(&bp->drv_info_mutex);
3719
3720         DP(BNX2X_MSG_MCP, "Setting driver version: ETH [%08x] iSCSI [%08x] FCoE [%08x]\n",
3721            ethver, iscsiver, fcoever);
3722 }
3723
3724 void bnx2x_update_mfw_dump(struct bnx2x *bp)
3725 {
3726         u32 drv_ver;
3727         u32 valid_dump;
3728
3729         if (!SHMEM2_HAS(bp, drv_info))
3730                 return;
3731
3732         /* Update Driver load time, possibly broken in y2038 */
3733         SHMEM2_WR(bp, drv_info.epoc, (u32)ktime_get_real_seconds());
3734
3735         drv_ver = bnx2x_update_mng_version_utility(DRV_MODULE_VERSION, true);
3736         SHMEM2_WR(bp, drv_info.drv_ver, drv_ver);
3737
3738         SHMEM2_WR(bp, drv_info.fw_ver, REG_RD(bp, XSEM_REG_PRAM));
3739
3740         /* Check & notify On-Chip dump. */
3741         valid_dump = SHMEM2_RD(bp, drv_info.valid_dump);
3742
3743         if (valid_dump & FIRST_DUMP_VALID)
3744                 DP(NETIF_MSG_IFUP, "A valid On-Chip MFW dump found on 1st partition\n");
3745
3746         if (valid_dump & SECOND_DUMP_VALID)
3747                 DP(NETIF_MSG_IFUP, "A valid On-Chip MFW dump found on 2nd partition\n");
3748 }
3749
3750 static void bnx2x_oem_event(struct bnx2x *bp, u32 event)
3751 {
3752         u32 cmd_ok, cmd_fail;
3753
3754         /* sanity */
3755         if (event & DRV_STATUS_DCC_EVENT_MASK &&
3756             event & DRV_STATUS_OEM_EVENT_MASK) {
3757                 BNX2X_ERR("Received simultaneous events %08x\n", event);
3758                 return;
3759         }
3760
3761         if (event & DRV_STATUS_DCC_EVENT_MASK) {
3762                 cmd_fail = DRV_MSG_CODE_DCC_FAILURE;
3763                 cmd_ok = DRV_MSG_CODE_DCC_OK;
3764         } else /* if (event & DRV_STATUS_OEM_EVENT_MASK) */ {
3765                 cmd_fail = DRV_MSG_CODE_OEM_FAILURE;
3766                 cmd_ok = DRV_MSG_CODE_OEM_OK;
3767         }
3768
3769         DP(BNX2X_MSG_MCP, "oem_event 0x%x\n", event);
3770
3771         if (event & (DRV_STATUS_DCC_DISABLE_ENABLE_PF |
3772                      DRV_STATUS_OEM_DISABLE_ENABLE_PF)) {
3773                 /* This is the only place besides the function initialization
3774                  * where the bp->flags can change so it is done without any
3775                  * locks
3776                  */
3777                 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
3778                         DP(BNX2X_MSG_MCP, "mf_cfg function disabled\n");
3779                         bp->flags |= MF_FUNC_DIS;
3780
3781                         bnx2x_e1h_disable(bp);
3782                 } else {
3783                         DP(BNX2X_MSG_MCP, "mf_cfg function enabled\n");
3784                         bp->flags &= ~MF_FUNC_DIS;
3785
3786                         bnx2x_e1h_enable(bp);
3787                 }
3788                 event &= ~(DRV_STATUS_DCC_DISABLE_ENABLE_PF |
3789                            DRV_STATUS_OEM_DISABLE_ENABLE_PF);
3790         }
3791
3792         if (event & (DRV_STATUS_DCC_BANDWIDTH_ALLOCATION |
3793                      DRV_STATUS_OEM_BANDWIDTH_ALLOCATION)) {
3794                 bnx2x_config_mf_bw(bp);
3795                 event &= ~(DRV_STATUS_DCC_BANDWIDTH_ALLOCATION |
3796                            DRV_STATUS_OEM_BANDWIDTH_ALLOCATION);
3797         }
3798
3799         /* Report results to MCP */
3800         if (event)
3801                 bnx2x_fw_command(bp, cmd_fail, 0);
3802         else
3803                 bnx2x_fw_command(bp, cmd_ok, 0);
3804 }
3805
3806 /* must be called under the spq lock */
3807 static struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
3808 {
3809         struct eth_spe *next_spe = bp->spq_prod_bd;
3810
3811         if (bp->spq_prod_bd == bp->spq_last_bd) {
3812                 bp->spq_prod_bd = bp->spq;
3813                 bp->spq_prod_idx = 0;
3814                 DP(BNX2X_MSG_SP, "end of spq\n");
3815         } else {
3816                 bp->spq_prod_bd++;
3817                 bp->spq_prod_idx++;
3818         }
3819         return next_spe;
3820 }
3821
3822 /* must be called under the spq lock */
3823 static void bnx2x_sp_prod_update(struct bnx2x *bp)
3824 {
3825         int func = BP_FUNC(bp);
3826
3827         /*
3828          * Make sure that BD data is updated before writing the producer:
3829          * BD data is written to the memory, the producer is read from the
3830          * memory, thus we need a full memory barrier to ensure the ordering.
3831          */
3832         mb();
3833
3834         REG_WR16_RELAXED(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
3835                          bp->spq_prod_idx);
3836         mmiowb();
3837 }
3838
3839 /**
3840  * bnx2x_is_contextless_ramrod - check if the current command ends on EQ
3841  *
3842  * @cmd:        command to check
3843  * @cmd_type:   command type
3844  */
3845 static bool bnx2x_is_contextless_ramrod(int cmd, int cmd_type)
3846 {
3847         if ((cmd_type == NONE_CONNECTION_TYPE) ||
3848             (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) ||
3849             (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) ||
3850             (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) ||
3851             (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) ||
3852             (cmd == RAMROD_CMD_ID_ETH_SET_MAC) ||
3853             (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE))
3854                 return true;
3855         else
3856                 return false;
3857 }
3858
3859 /**
3860  * bnx2x_sp_post - place a single command on an SP ring
3861  *
3862  * @bp:         driver handle
3863  * @command:    command to place (e.g. SETUP, FILTER_RULES, etc.)
3864  * @cid:        SW CID the command is related to
3865  * @data_hi:    command private data address (high 32 bits)
3866  * @data_lo:    command private data address (low 32 bits)
3867  * @cmd_type:   command type (e.g. NONE, ETH)
3868  *
3869  * SP data is handled as if it's always an address pair, thus data fields are
3870  * not swapped to little endian in upper functions. Instead this function swaps
3871  * data as if it's two u32 fields.
3872  */
3873 int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
3874                   u32 data_hi, u32 data_lo, int cmd_type)
3875 {
3876         struct eth_spe *spe;
3877         u16 type;
3878         bool common = bnx2x_is_contextless_ramrod(command, cmd_type);
3879
3880 #ifdef BNX2X_STOP_ON_ERROR
3881         if (unlikely(bp->panic)) {
3882                 BNX2X_ERR("Can't post SP when there is panic\n");
3883                 return -EIO;
3884         }
3885 #endif
3886
3887         spin_lock_bh(&bp->spq_lock);
3888
3889         if (common) {
3890                 if (!atomic_read(&bp->eq_spq_left)) {
3891                         BNX2X_ERR("BUG! EQ ring full!\n");
3892                         spin_unlock_bh(&bp->spq_lock);
3893                         bnx2x_panic();
3894                         return -EBUSY;
3895                 }
3896         } else if (!atomic_read(&bp->cq_spq_left)) {
3897                         BNX2X_ERR("BUG! SPQ ring full!\n");
3898                         spin_unlock_bh(&bp->spq_lock);
3899                         bnx2x_panic();
3900                         return -EBUSY;
3901         }
3902
3903         spe = bnx2x_sp_get_next(bp);
3904
3905         /* CID needs port number to be encoded int it */
3906         spe->hdr.conn_and_cmd_data =
3907                         cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
3908                                     HW_CID(bp, cid));
3909
3910         /* In some cases, type may already contain the func-id
3911          * mainly in SRIOV related use cases, so we add it here only
3912          * if it's not already set.
3913          */
3914         if (!(cmd_type & SPE_HDR_FUNCTION_ID)) {
3915                 type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) &
3916                         SPE_HDR_CONN_TYPE;
3917                 type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
3918                          SPE_HDR_FUNCTION_ID);
3919         } else {
3920                 type = cmd_type;
3921         }
3922
3923         spe->hdr.type = cpu_to_le16(type);
3924
3925         spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
3926         spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
3927
3928         /*
3929          * It's ok if the actual decrement is issued towards the memory
3930          * somewhere between the spin_lock and spin_unlock. Thus no
3931          * more explicit memory barrier is needed.
3932          */
3933         if (common)
3934                 atomic_dec(&bp->eq_spq_left);
3935         else
3936                 atomic_dec(&bp->cq_spq_left);
3937
3938         DP(BNX2X_MSG_SP,
3939            "SPQE[%x] (%x:%x)  (cmd, common?) (%d,%d)  hw_cid %x  data (%x:%x) type(0x%x) left (CQ, EQ) (%x,%x)\n",
3940            bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
3941            (u32)(U64_LO(bp->spq_mapping) +
3942            (void *)bp->spq_prod_bd - (void *)bp->spq), command, common,
3943            HW_CID(bp, cid), data_hi, data_lo, type,
3944            atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left));
3945
3946         bnx2x_sp_prod_update(bp);
3947         spin_unlock_bh(&bp->spq_lock);
3948         return 0;
3949 }
3950
3951 /* acquire split MCP access lock register */
3952 static int bnx2x_acquire_alr(struct bnx2x *bp)
3953 {
3954         u32 j, val;
3955         int rc = 0;
3956
3957         might_sleep();
3958         for (j = 0; j < 1000; j++) {
3959                 REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, MCPR_ACCESS_LOCK_LOCK);
3960                 val = REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK);
3961                 if (val & MCPR_ACCESS_LOCK_LOCK)
3962                         break;
3963
3964                 usleep_range(5000, 10000);
3965         }
3966         if (!(val & MCPR_ACCESS_LOCK_LOCK)) {
3967                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
3968                 rc = -EBUSY;
3969         }
3970
3971         return rc;
3972 }
3973
3974 /* release split MCP access lock register */
3975 static void bnx2x_release_alr(struct bnx2x *bp)
3976 {
3977         REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, 0);
3978 }
3979
3980 #define BNX2X_DEF_SB_ATT_IDX    0x0001
3981 #define BNX2X_DEF_SB_IDX        0x0002
3982
3983 static u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
3984 {
3985         struct host_sp_status_block *def_sb = bp->def_status_blk;
3986         u16 rc = 0;
3987
3988         barrier(); /* status block is written to by the chip */
3989         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
3990                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
3991                 rc |= BNX2X_DEF_SB_ATT_IDX;
3992         }
3993
3994         if (bp->def_idx != def_sb->sp_sb.running_index) {
3995                 bp->def_idx = def_sb->sp_sb.running_index;
3996                 rc |= BNX2X_DEF_SB_IDX;
3997         }
3998
3999         /* Do not reorder: indices reading should complete before handling */
4000         barrier();
4001         return rc;
4002 }
4003
4004 /*
4005  * slow path service functions
4006  */
4007
4008 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
4009 {
4010         int port = BP_PORT(bp);
4011         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4012                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
4013         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
4014                                        NIG_REG_MASK_INTERRUPT_PORT0;
4015         u32 aeu_mask;
4016         u32 nig_mask = 0;
4017         u32 reg_addr;
4018
4019         if (bp->attn_state & asserted)
4020                 BNX2X_ERR("IGU ERROR\n");
4021
4022         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
4023         aeu_mask = REG_RD(bp, aeu_addr);
4024
4025         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
4026            aeu_mask, asserted);
4027         aeu_mask &= ~(asserted & 0x3ff);
4028         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
4029
4030         REG_WR(bp, aeu_addr, aeu_mask);
4031         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
4032
4033         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
4034         bp->attn_state |= asserted;
4035         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
4036
4037         if (asserted & ATTN_HARD_WIRED_MASK) {
4038                 if (asserted & ATTN_NIG_FOR_FUNC) {
4039
4040                         bnx2x_acquire_phy_lock(bp);
4041
4042                         /* save nig interrupt mask */
4043                         nig_mask = REG_RD(bp, nig_int_mask_addr);
4044
4045                         /* If nig_mask is not set, no need to call the update
4046                          * function.
4047                          */
4048                         if (nig_mask) {
4049                                 REG_WR(bp, nig_int_mask_addr, 0);
4050
4051                                 bnx2x_link_attn(bp);
4052                         }
4053
4054                         /* handle unicore attn? */
4055                 }
4056                 if (asserted & ATTN_SW_TIMER_4_FUNC)
4057                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
4058
4059                 if (asserted & GPIO_2_FUNC)
4060                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
4061
4062                 if (asserted & GPIO_3_FUNC)
4063                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
4064
4065                 if (asserted & GPIO_4_FUNC)
4066                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
4067
4068                 if (port == 0) {
4069                         if (asserted & ATTN_GENERAL_ATTN_1) {
4070                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
4071                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
4072                         }
4073                         if (asserted & ATTN_GENERAL_ATTN_2) {
4074                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
4075                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
4076                         }
4077                         if (asserted & ATTN_GENERAL_ATTN_3) {
4078                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
4079                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
4080                         }
4081                 } else {
4082                         if (asserted & ATTN_GENERAL_ATTN_4) {
4083                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
4084                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
4085                         }
4086                         if (asserted & ATTN_GENERAL_ATTN_5) {
4087                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
4088                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
4089                         }
4090                         if (asserted & ATTN_GENERAL_ATTN_6) {
4091                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
4092                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
4093                         }
4094                 }
4095
4096         } /* if hardwired */
4097
4098         if (bp->common.int_block == INT_BLOCK_HC)
4099                 reg_addr = (HC_REG_COMMAND_REG + port*32 +
4100                             COMMAND_REG_ATTN_BITS_SET);
4101         else
4102                 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
4103
4104         DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted,
4105            (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
4106         REG_WR(bp, reg_addr, asserted);
4107
4108         /* now set back the mask */
4109         if (asserted & ATTN_NIG_FOR_FUNC) {
4110                 /* Verify that IGU ack through BAR was written before restoring
4111                  * NIG mask. This loop should exit after 2-3 iterations max.
4112                  */
4113                 if (bp->common.int_block != INT_BLOCK_HC) {
4114                         u32 cnt = 0, igu_acked;
4115                         do {
4116                                 igu_acked = REG_RD(bp,
4117                                                    IGU_REG_ATTENTION_ACK_BITS);
4118                         } while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0) &&
4119                                  (++cnt < MAX_IGU_ATTN_ACK_TO));
4120                         if (!igu_acked)
4121                                 DP(NETIF_MSG_HW,
4122                                    "Failed to verify IGU ack on time\n");
4123                         barrier();
4124                 }
4125                 REG_WR(bp, nig_int_mask_addr, nig_mask);
4126                 bnx2x_release_phy_lock(bp);
4127         }
4128 }
4129
4130 static void bnx2x_fan_failure(struct bnx2x *bp)
4131 {
4132         int port = BP_PORT(bp);
4133         u32 ext_phy_config;
4134         /* mark the failure */
4135         ext_phy_config =
4136                 SHMEM_RD(bp,
4137                          dev_info.port_hw_config[port].external_phy_config);
4138
4139         ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
4140         ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
4141         SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
4142                  ext_phy_config);
4143
4144         /* log the failure */
4145         netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n"
4146                             "Please contact OEM Support for assistance\n");
4147
4148         /* Schedule device reset (unload)
4149          * This is due to some boards consuming sufficient power when driver is
4150          * up to overheat if fan fails.
4151          */
4152         bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_FAN_FAILURE, 0);
4153 }
4154
4155 static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
4156 {
4157         int port = BP_PORT(bp);
4158         int reg_offset;
4159         u32 val;
4160
4161         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4162                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4163
4164         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
4165
4166                 val = REG_RD(bp, reg_offset);
4167                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
4168                 REG_WR(bp, reg_offset, val);
4169
4170                 BNX2X_ERR("SPIO5 hw attention\n");
4171
4172                 /* Fan failure attention */
4173                 bnx2x_hw_reset_phy(&bp->link_params);
4174                 bnx2x_fan_failure(bp);
4175         }
4176
4177         if ((attn & bp->link_vars.aeu_int_mask) && bp->port.pmf) {
4178                 bnx2x_acquire_phy_lock(bp);
4179                 bnx2x_handle_module_detect_int(&bp->link_params);
4180                 bnx2x_release_phy_lock(bp);
4181         }
4182
4183         if (attn & HW_INTERRUPT_ASSERT_SET_0) {
4184
4185                 val = REG_RD(bp, reg_offset);
4186                 val &= ~(attn & HW_INTERRUPT_ASSERT_SET_0);
4187                 REG_WR(bp, reg_offset, val);
4188
4189                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
4190                           (u32)(attn & HW_INTERRUPT_ASSERT_SET_0));
4191                 bnx2x_panic();
4192         }
4193 }
4194
4195 static void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
4196 {
4197         u32 val;
4198
4199         if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
4200
4201                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
4202                 BNX2X_ERR("DB hw attention 0x%x\n", val);
4203                 /* DORQ discard attention */
4204                 if (val & 0x2)
4205                         BNX2X_ERR("FATAL error from DORQ\n");
4206         }
4207
4208         if (attn & HW_INTERRUPT_ASSERT_SET_1) {
4209
4210                 int port = BP_PORT(bp);
4211                 int reg_offset;
4212
4213                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
4214                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
4215
4216                 val = REG_RD(bp, reg_offset);
4217                 val &= ~(attn & HW_INTERRUPT_ASSERT_SET_1);
4218                 REG_WR(bp, reg_offset, val);
4219
4220                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
4221                           (u32)(attn & HW_INTERRUPT_ASSERT_SET_1));
4222                 bnx2x_panic();
4223         }
4224 }
4225
4226 static void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
4227 {
4228         u32 val;
4229
4230         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
4231
4232                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
4233                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
4234                 /* CFC error attention */
4235                 if (val & 0x2)
4236                         BNX2X_ERR("FATAL error from CFC\n");
4237         }
4238
4239         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
4240                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
4241                 BNX2X_ERR("PXP hw attention-0 0x%x\n", val);
4242                 /* RQ_USDMDP_FIFO_OVERFLOW */
4243                 if (val & 0x18000)
4244                         BNX2X_ERR("FATAL error from PXP\n");
4245
4246                 if (!CHIP_IS_E1x(bp)) {
4247                         val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
4248                         BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
4249                 }
4250         }
4251
4252         if (attn & HW_INTERRUPT_ASSERT_SET_2) {
4253
4254                 int port = BP_PORT(bp);
4255                 int reg_offset;
4256
4257                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
4258                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
4259
4260                 val = REG_RD(bp, reg_offset);
4261                 val &= ~(attn & HW_INTERRUPT_ASSERT_SET_2);
4262                 REG_WR(bp, reg_offset, val);
4263
4264                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
4265                           (u32)(attn & HW_INTERRUPT_ASSERT_SET_2));
4266                 bnx2x_panic();
4267         }
4268 }
4269
4270 static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
4271 {
4272         u32 val;
4273
4274         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
4275
4276                 if (attn & BNX2X_PMF_LINK_ASSERT) {
4277                         int func = BP_FUNC(bp);
4278
4279                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
4280                         bnx2x_read_mf_cfg(bp);
4281                         bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
4282                                         func_mf_config[BP_ABS_FUNC(bp)].config);
4283                         val = SHMEM_RD(bp,
4284                                        func_mb[BP_FW_MB_IDX(bp)].drv_status);
4285
4286                         if (val & (DRV_STATUS_DCC_EVENT_MASK |
4287                                    DRV_STATUS_OEM_EVENT_MASK))
4288                                 bnx2x_oem_event(bp,
4289                                         (val & (DRV_STATUS_DCC_EVENT_MASK |
4290                                                 DRV_STATUS_OEM_EVENT_MASK)));
4291
4292                         if (val & DRV_STATUS_SET_MF_BW)
4293                                 bnx2x_set_mf_bw(bp);
4294
4295                         if (val & DRV_STATUS_DRV_INFO_REQ)
4296                                 bnx2x_handle_drv_info_req(bp);
4297
4298                         if (val & DRV_STATUS_VF_DISABLED)
4299                                 bnx2x_schedule_iov_task(bp,
4300                                                         BNX2X_IOV_HANDLE_FLR);
4301
4302                         if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
4303                                 bnx2x_pmf_update(bp);
4304
4305                         if (bp->port.pmf &&
4306                             (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) &&
4307                                 bp->dcbx_enabled > 0)
4308                                 /* start dcbx state machine */
4309                                 bnx2x_dcbx_set_params(bp,
4310                                         BNX2X_DCBX_STATE_NEG_RECEIVED);
4311                         if (val & DRV_STATUS_AFEX_EVENT_MASK)
4312                                 bnx2x_handle_afex_cmd(bp,
4313                                         val & DRV_STATUS_AFEX_EVENT_MASK);
4314                         if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS)
4315                                 bnx2x_handle_eee_event(bp);
4316
4317                         if (val & DRV_STATUS_OEM_UPDATE_SVID)
4318                                 bnx2x_schedule_sp_rtnl(bp,
4319                                         BNX2X_SP_RTNL_UPDATE_SVID, 0);
4320
4321                         if (bp->link_vars.periodic_flags &
4322                             PERIODIC_FLAGS_LINK_EVENT) {
4323                                 /*  sync with link */
4324                                 bnx2x_acquire_phy_lock(bp);
4325                                 bp->link_vars.periodic_flags &=
4326                                         ~PERIODIC_FLAGS_LINK_EVENT;
4327                                 bnx2x_release_phy_lock(bp);
4328                                 if (IS_MF(bp))
4329                                         bnx2x_link_sync_notify(bp);
4330                                 bnx2x_link_report(bp);
4331                         }
4332                         /* Always call it here: bnx2x_link_report() will
4333                          * prevent the link indication duplication.
4334                          */
4335                         bnx2x__link_status_update(bp);
4336                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
4337
4338                         BNX2X_ERR("MC assert!\n");
4339                         bnx2x_mc_assert(bp);
4340                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
4341                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
4342                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
4343                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
4344                         bnx2x_panic();
4345
4346                 } else if (attn & BNX2X_MCP_ASSERT) {
4347
4348                         BNX2X_ERR("MCP assert!\n");
4349                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
4350                         bnx2x_fw_dump(bp);
4351
4352                 } else
4353                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
4354         }
4355
4356         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
4357                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
4358                 if (attn & BNX2X_GRC_TIMEOUT) {
4359                         val = CHIP_IS_E1(bp) ? 0 :
4360                                         REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
4361                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
4362                 }
4363                 if (attn & BNX2X_GRC_RSV) {
4364                         val = CHIP_IS_E1(bp) ? 0 :
4365                                         REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
4366                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
4367                 }
4368                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
4369         }
4370 }
4371
4372 /*
4373  * Bits map:
4374  * 0-7   - Engine0 load counter.
4375  * 8-15  - Engine1 load counter.
4376  * 16    - Engine0 RESET_IN_PROGRESS bit.
4377  * 17    - Engine1 RESET_IN_PROGRESS bit.
4378  * 18    - Engine0 ONE_IS_LOADED. Set when there is at least one active function
4379  *         on the engine
4380  * 19    - Engine1 ONE_IS_LOADED.
4381  * 20    - Chip reset flow bit. When set none-leader must wait for both engines
4382  *         leader to complete (check for both RESET_IN_PROGRESS bits and not for
4383  *         just the one belonging to its engine).
4384  *
4385  */
4386 #define BNX2X_RECOVERY_GLOB_REG         MISC_REG_GENERIC_POR_1
4387
4388 #define BNX2X_PATH0_LOAD_CNT_MASK       0x000000ff
4389 #define BNX2X_PATH0_LOAD_CNT_SHIFT      0
4390 #define BNX2X_PATH1_LOAD_CNT_MASK       0x0000ff00
4391 #define BNX2X_PATH1_LOAD_CNT_SHIFT      8
4392 #define BNX2X_PATH0_RST_IN_PROG_BIT     0x00010000
4393 #define BNX2X_PATH1_RST_IN_PROG_BIT     0x00020000
4394 #define BNX2X_GLOBAL_RESET_BIT          0x00040000
4395
4396 /*
4397  * Set the GLOBAL_RESET bit.
4398  *
4399  * Should be run under rtnl lock
4400  */
4401 void bnx2x_set_reset_global(struct bnx2x *bp)
4402 {
4403         u32 val;
4404         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4405         val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4406         REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val | BNX2X_GLOBAL_RESET_BIT);
4407         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4408 }
4409
4410 /*
4411  * Clear the GLOBAL_RESET bit.
4412  *
4413  * Should be run under rtnl lock
4414  */
4415 static void bnx2x_clear_reset_global(struct bnx2x *bp)
4416 {
4417         u32 val;
4418         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4419         val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4420         REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~BNX2X_GLOBAL_RESET_BIT));
4421         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4422 }
4423
4424 /*
4425  * Checks the GLOBAL_RESET bit.
4426  *
4427  * should be run under rtnl lock
4428  */
4429 static bool bnx2x_reset_is_global(struct bnx2x *bp)
4430 {
4431         u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4432
4433         DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
4434         return (val & BNX2X_GLOBAL_RESET_BIT) ? true : false;
4435 }
4436
4437 /*
4438  * Clear RESET_IN_PROGRESS bit for the current engine.
4439  *
4440  * Should be run under rtnl lock
4441  */
4442 static void bnx2x_set_reset_done(struct bnx2x *bp)
4443 {
4444         u32 val;
4445         u32 bit = BP_PATH(bp) ?
4446                 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
4447         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4448         val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4449
4450         /* Clear the bit */
4451         val &= ~bit;
4452         REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4453
4454         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4455 }
4456
4457 /*
4458  * Set RESET_IN_PROGRESS for the current engine.
4459  *
4460  * should be run under rtnl lock
4461  */
4462 void bnx2x_set_reset_in_progress(struct bnx2x *bp)
4463 {
4464         u32 val;
4465         u32 bit = BP_PATH(bp) ?
4466                 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
4467         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4468         val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4469
4470         /* Set the bit */
4471         val |= bit;
4472         REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4473         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4474 }
4475
4476 /*
4477  * Checks the RESET_IN_PROGRESS bit for the given engine.
4478  * should be run under rtnl lock
4479  */
4480 bool bnx2x_reset_is_done(struct bnx2x *bp, int engine)
4481 {
4482         u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4483         u32 bit = engine ?
4484                 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
4485
4486         /* return false if bit is set */
4487         return (val & bit) ? false : true;
4488 }
4489
4490 /*
4491  * set pf load for the current pf.
4492  *
4493  * should be run under rtnl lock
4494  */
4495 void bnx2x_set_pf_load(struct bnx2x *bp)
4496 {
4497         u32 val1, val;
4498         u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
4499                              BNX2X_PATH0_LOAD_CNT_MASK;
4500         u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
4501                              BNX2X_PATH0_LOAD_CNT_SHIFT;
4502
4503         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4504         val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4505
4506         DP(NETIF_MSG_IFUP, "Old GEN_REG_VAL=0x%08x\n", val);
4507
4508         /* get the current counter value */
4509         val1 = (val & mask) >> shift;
4510
4511         /* set bit of that PF */
4512         val1 |= (1 << bp->pf_num);
4513
4514         /* clear the old value */
4515         val &= ~mask;
4516
4517         /* set the new one */
4518         val |= ((val1 << shift) & mask);
4519
4520         REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4521         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4522 }
4523
4524 /**
4525  * bnx2x_clear_pf_load - clear pf load mark
4526  *
4527  * @bp:         driver handle
4528  *
4529  * Should be run under rtnl lock.
4530  * Decrements the load counter for the current engine. Returns
4531  * whether other functions are still loaded
4532  */
4533 bool bnx2x_clear_pf_load(struct bnx2x *bp)
4534 {
4535         u32 val1, val;
4536         u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
4537                              BNX2X_PATH0_LOAD_CNT_MASK;
4538         u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
4539                              BNX2X_PATH0_LOAD_CNT_SHIFT;
4540
4541         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4542         val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4543         DP(NETIF_MSG_IFDOWN, "Old GEN_REG_VAL=0x%08x\n", val);
4544
4545         /* get the current counter value */
4546         val1 = (val & mask) >> shift;
4547
4548         /* clear bit of that PF */
4549         val1 &= ~(1 << bp->pf_num);
4550
4551         /* clear the old value */
4552         val &= ~mask;
4553
4554         /* set the new one */
4555         val |= ((val1 << shift) & mask);
4556
4557         REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4558         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4559         return val1 != 0;
4560 }
4561
4562 /*
4563  * Read the load status for the current engine.
4564  *
4565  * should be run under rtnl lock
4566  */
4567 static bool bnx2x_get_load_status(struct bnx2x *bp, int engine)
4568 {
4569         u32 mask = (engine ? BNX2X_PATH1_LOAD_CNT_MASK :
4570                              BNX2X_PATH0_LOAD_CNT_MASK);
4571         u32 shift = (engine ? BNX2X_PATH1_LOAD_CNT_SHIFT :
4572                              BNX2X_PATH0_LOAD_CNT_SHIFT);
4573         u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4574
4575         DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "GLOB_REG=0x%08x\n", val);
4576
4577         val = (val & mask) >> shift;
4578
4579         DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "load mask for engine %d = 0x%x\n",
4580            engine, val);
4581
4582         return val != 0;
4583 }
4584
4585 static void _print_parity(struct bnx2x *bp, u32 reg)
4586 {
4587         pr_cont(" [0x%08x] ", REG_RD(bp, reg));
4588 }
4589
4590 static void _print_next_block(int idx, const char *blk)
4591 {
4592         pr_cont("%s%s", idx ? ", " : "", blk);
4593 }
4594
4595 static bool bnx2x_check_blocks_with_parity0(struct bnx2x *bp, u32 sig,
4596                                             int *par_num, bool print)
4597 {
4598         u32 cur_bit;
4599         bool res;
4600         int i;
4601
4602         res = false;
4603
4604         for (i = 0; sig; i++) {
4605                 cur_bit = (0x1UL << i);
4606                 if (sig & cur_bit) {
4607                         res |= true; /* Each bit is real error! */
4608
4609                         if (print) {
4610                                 switch (cur_bit) {
4611                                 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
4612                                         _print_next_block((*par_num)++, "BRB");
4613                                         _print_parity(bp,
4614                                                       BRB1_REG_BRB1_PRTY_STS);
4615                                         break;
4616                                 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
4617                                         _print_next_block((*par_num)++,
4618                                                           "PARSER");
4619                                         _print_parity(bp, PRS_REG_PRS_PRTY_STS);
4620                                         break;
4621                                 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
4622                                         _print_next_block((*par_num)++, "TSDM");
4623                                         _print_parity(bp,
4624                                                       TSDM_REG_TSDM_PRTY_STS);
4625                                         break;
4626                                 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
4627                                         _print_next_block((*par_num)++,
4628                                                           "SEARCHER");
4629                                         _print_parity(bp, SRC_REG_SRC_PRTY_STS);
4630                                         break;
4631                                 case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
4632                                         _print_next_block((*par_num)++, "TCM");
4633                                         _print_parity(bp, TCM_REG_TCM_PRTY_STS);
4634                                         break;
4635                                 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
4636                                         _print_next_block((*par_num)++,
4637                                                           "TSEMI");
4638                                         _print_parity(bp,
4639                                                       TSEM_REG_TSEM_PRTY_STS_0);
4640                                         _print_parity(bp,
4641                                                       TSEM_REG_TSEM_PRTY_STS_1);
4642                                         break;
4643                                 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
4644                                         _print_next_block((*par_num)++, "XPB");
4645                                         _print_parity(bp, GRCBASE_XPB +
4646                                                           PB_REG_PB_PRTY_STS);
4647                                         break;
4648                                 }
4649                         }
4650
4651                         /* Clear the bit */
4652                         sig &= ~cur_bit;
4653                 }
4654         }
4655
4656         return res;
4657 }
4658
4659 static bool bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
4660                                             int *par_num, bool *global,
4661                                             bool print)
4662 {
4663         u32 cur_bit;
4664         bool res;
4665         int i;
4666
4667         res = false;
4668
4669         for (i = 0; sig; i++) {
4670                 cur_bit = (0x1UL << i);
4671                 if (sig & cur_bit) {
4672                         res |= true; /* Each bit is real error! */
4673                         switch (cur_bit) {
4674                         case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR:
4675                                 if (print) {
4676                                         _print_next_block((*par_num)++, "PBF");
4677                                         _print_parity(bp, PBF_REG_PBF_PRTY_STS);
4678                                 }
4679                                 break;
4680                         case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
4681                                 if (print) {
4682                                         _print_next_block((*par_num)++, "QM");
4683                                         _print_parity(bp, QM_REG_QM_PRTY_STS);
4684                                 }
4685                                 break;
4686                         case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR:
4687                                 if (print) {
4688                                         _print_next_block((*par_num)++, "TM");
4689                                         _print_parity(bp, TM_REG_TM_PRTY_STS);
4690                                 }
4691                                 break;
4692                         case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
4693                                 if (print) {
4694                                         _print_next_block((*par_num)++, "XSDM");
4695                                         _print_parity(bp,
4696                                                       XSDM_REG_XSDM_PRTY_STS);
4697                                 }
4698                                 break;
4699                         case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR:
4700                                 if (print) {
4701                                         _print_next_block((*par_num)++, "XCM");
4702                                         _print_parity(bp, XCM_REG_XCM_PRTY_STS);
4703                                 }
4704                                 break;
4705                         case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
4706                                 if (print) {
4707                                         _print_next_block((*par_num)++,
4708                                                           "XSEMI");
4709                                         _print_parity(bp,
4710                                                       XSEM_REG_XSEM_PRTY_STS_0);
4711                                         _print_parity(bp,
4712                                                       XSEM_REG_XSEM_PRTY_STS_1);
4713                                 }
4714                                 break;
4715                         case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
4716                                 if (print) {
4717                                         _print_next_block((*par_num)++,
4718                                                           "DOORBELLQ");
4719                                         _print_parity(bp,
4720                                                       DORQ_REG_DORQ_PRTY_STS);
4721                                 }
4722                                 break;
4723                         case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR:
4724                                 if (print) {
4725                                         _print_next_block((*par_num)++, "NIG");
4726                                         if (CHIP_IS_E1x(bp)) {
4727                                                 _print_parity(bp,
4728                                                         NIG_REG_NIG_PRTY_STS);
4729                                         } else {
4730                                                 _print_parity(bp,
4731                                                         NIG_REG_NIG_PRTY_STS_0);
4732                                                 _print_parity(bp,
4733                                                         NIG_REG_NIG_PRTY_STS_1);
4734                                         }
4735                                 }
4736                                 break;
4737                         case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
4738                                 if (print)
4739                                         _print_next_block((*par_num)++,
4740                                                           "VAUX PCI CORE");
4741                                 *global = true;
4742                                 break;
4743                         case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
4744                                 if (print) {
4745                                         _print_next_block((*par_num)++,
4746                                                           "DEBUG");
4747                                         _print_parity(bp, DBG_REG_DBG_PRTY_STS);
4748                                 }
4749                                 break;
4750                         case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
4751                                 if (print) {
4752                                         _print_next_block((*par_num)++, "USDM");
4753                                         _print_parity(bp,
4754                                                       USDM_REG_USDM_PRTY_STS);
4755                                 }
4756                                 break;
4757                         case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR:
4758                                 if (print) {
4759                                         _print_next_block((*par_num)++, "UCM");
4760                                         _print_parity(bp, UCM_REG_UCM_PRTY_STS);
4761                                 }
4762                                 break;
4763                         case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
4764                                 if (print) {
4765                                         _print_next_block((*par_num)++,
4766                                                           "USEMI");
4767                                         _print_parity(bp,
4768                                                       USEM_REG_USEM_PRTY_STS_0);
4769                                         _print_parity(bp,
4770                                                       USEM_REG_USEM_PRTY_STS_1);
4771                                 }
4772                                 break;
4773                         case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
4774                                 if (print) {
4775                                         _print_next_block((*par_num)++, "UPB");
4776                                         _print_parity(bp, GRCBASE_UPB +
4777                                                           PB_REG_PB_PRTY_STS);
4778                                 }
4779                                 break;
4780                         case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
4781                                 if (print) {
4782                                         _print_next_block((*par_num)++, "CSDM");
4783                                         _print_parity(bp,
4784                                                       CSDM_REG_CSDM_PRTY_STS);
4785                                 }
4786                                 break;
4787                         case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR:
4788                                 if (print) {
4789                                         _print_next_block((*par_num)++, "CCM");
4790                                         _print_parity(bp, CCM_REG_CCM_PRTY_STS);
4791                                 }
4792                                 break;
4793                         }
4794
4795                         /* Clear the bit */
4796                         sig &= ~cur_bit;
4797                 }
4798         }
4799
4800         return res;
4801 }
4802
4803 static bool bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig,
4804                                             int *par_num, bool print)
4805 {
4806         u32 cur_bit;
4807         bool res;
4808         int i;
4809
4810         res = false;
4811
4812         for (i = 0; sig; i++) {
4813                 cur_bit = (0x1UL << i);
4814                 if (sig & cur_bit) {
4815                         res = true; /* Each bit is real error! */
4816                         if (print) {
4817                                 switch (cur_bit) {
4818                                 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
4819                                         _print_next_block((*par_num)++,
4820                                                           "CSEMI");
4821                                         _print_parity(bp,
4822                                                       CSEM_REG_CSEM_PRTY_STS_0);
4823                                         _print_parity(bp,
4824                                                       CSEM_REG_CSEM_PRTY_STS_1);
4825                                         break;
4826                                 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
4827                                         _print_next_block((*par_num)++, "PXP");
4828                                         _print_parity(bp, PXP_REG_PXP_PRTY_STS);
4829                                         _print_parity(bp,
4830                                                       PXP2_REG_PXP2_PRTY_STS_0);
4831                                         _print_parity(bp,
4832                                                       PXP2_REG_PXP2_PRTY_STS_1);
4833                                         break;
4834                                 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
4835                                         _print_next_block((*par_num)++,
4836                                                           "PXPPCICLOCKCLIENT");
4837                                         break;
4838                                 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
4839                                         _print_next_block((*par_num)++, "CFC");
4840                                         _print_parity(bp,
4841                                                       CFC_REG_CFC_PRTY_STS);
4842                                         break;
4843                                 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
4844                                         _print_next_block((*par_num)++, "CDU");
4845                                         _print_parity(bp, CDU_REG_CDU_PRTY_STS);
4846                                         break;
4847                                 case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
4848                                         _print_next_block((*par_num)++, "DMAE");
4849                                         _print_parity(bp,
4850                                                       DMAE_REG_DMAE_PRTY_STS);
4851                                         break;
4852                                 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
4853                                         _print_next_block((*par_num)++, "IGU");
4854                                         if (CHIP_IS_E1x(bp))
4855                                                 _print_parity(bp,
4856                                                         HC_REG_HC_PRTY_STS);
4857                                         else
4858                                                 _print_parity(bp,
4859                                                         IGU_REG_IGU_PRTY_STS);
4860                                         break;
4861                                 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
4862                                         _print_next_block((*par_num)++, "MISC");
4863                                         _print_parity(bp,
4864                                                       MISC_REG_MISC_PRTY_STS);
4865                                         break;
4866                                 }
4867                         }
4868
4869                         /* Clear the bit */
4870                         sig &= ~cur_bit;
4871                 }
4872         }
4873
4874         return res;
4875 }
4876
4877 static bool bnx2x_check_blocks_with_parity3(struct bnx2x *bp, u32 sig,
4878                                             int *par_num, bool *global,
4879                                             bool print)
4880 {
4881         bool res = false;
4882         u32 cur_bit;
4883         int i;
4884
4885         for (i = 0; sig; i++) {
4886                 cur_bit = (0x1UL << i);
4887                 if (sig & cur_bit) {
4888                         switch (cur_bit) {
4889                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
4890                                 if (print)
4891                                         _print_next_block((*par_num)++,
4892                                                           "MCP ROM");
4893                                 *global = true;
4894                                 res = true;
4895                                 break;
4896                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
4897                                 if (print)
4898                                         _print_next_block((*par_num)++,
4899                                                           "MCP UMP RX");
4900                                 *global = true;
4901                                 res = true;
4902                                 break;
4903                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
4904                                 if (print)
4905                                         _print_next_block((*par_num)++,
4906                                                           "MCP UMP TX");
4907                                 *global = true;
4908                                 res = true;
4909                                 break;
4910                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
4911                                 (*par_num)++;
4912                                 /* clear latched SCPAD PATIRY from MCP */
4913                                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL,
4914                                        1UL << 10);
4915                                 break;
4916                         }
4917
4918                         /* Clear the bit */
4919                         sig &= ~cur_bit;
4920                 }
4921         }
4922
4923         return res;
4924 }
4925
4926 static bool bnx2x_check_blocks_with_parity4(struct bnx2x *bp, u32 sig,
4927                                             int *par_num, bool print)
4928 {
4929         u32 cur_bit;
4930         bool res;
4931         int i;
4932
4933         res = false;
4934
4935         for (i = 0; sig; i++) {
4936                 cur_bit = (0x1UL << i);
4937                 if (sig & cur_bit) {
4938                         res = true; /* Each bit is real error! */
4939                         if (print) {
4940                                 switch (cur_bit) {
4941                                 case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
4942                                         _print_next_block((*par_num)++,
4943                                                           "PGLUE_B");
4944                                         _print_parity(bp,
4945                                                       PGLUE_B_REG_PGLUE_B_PRTY_STS);
4946                                         break;
4947                                 case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
4948                                         _print_next_block((*par_num)++, "ATC");
4949                                         _print_parity(bp,
4950                                                       ATC_REG_ATC_PRTY_STS);
4951                                         break;
4952                                 }
4953                         }
4954                         /* Clear the bit */
4955                         sig &= ~cur_bit;
4956                 }
4957         }
4958
4959         return res;
4960 }
4961
4962 static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
4963                               u32 *sig)
4964 {
4965         bool res = false;
4966
4967         if ((sig[0] & HW_PRTY_ASSERT_SET_0) ||
4968             (sig[1] & HW_PRTY_ASSERT_SET_1) ||
4969             (sig[2] & HW_PRTY_ASSERT_SET_2) ||
4970             (sig[3] & HW_PRTY_ASSERT_SET_3) ||
4971             (sig[4] & HW_PRTY_ASSERT_SET_4)) {
4972                 int par_num = 0;
4973
4974                 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention:\n"
4975                                  "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n",
4976                           sig[0] & HW_PRTY_ASSERT_SET_0,
4977                           sig[1] & HW_PRTY_ASSERT_SET_1,
4978                           sig[2] & HW_PRTY_ASSERT_SET_2,
4979                           sig[3] & HW_PRTY_ASSERT_SET_3,
4980                           sig[4] & HW_PRTY_ASSERT_SET_4);
4981                 if (print) {
4982                         if (((sig[0] & HW_PRTY_ASSERT_SET_0) ||
4983                              (sig[1] & HW_PRTY_ASSERT_SET_1) ||
4984                              (sig[2] & HW_PRTY_ASSERT_SET_2) ||
4985                              (sig[4] & HW_PRTY_ASSERT_SET_4)) ||
4986                              (sig[3] & HW_PRTY_ASSERT_SET_3_WITHOUT_SCPAD)) {
4987                                 netdev_err(bp->dev,
4988                                            "Parity errors detected in blocks: ");
4989                         } else {
4990                                 print = false;
4991                         }
4992                 }
4993                 res |= bnx2x_check_blocks_with_parity0(bp,
4994                         sig[0] & HW_PRTY_ASSERT_SET_0, &par_num, print);
4995                 res |= bnx2x_check_blocks_with_parity1(bp,
4996                         sig[1] & HW_PRTY_ASSERT_SET_1, &par_num, global, print);
4997                 res |= bnx2x_check_blocks_with_parity2(bp,
4998                         sig[2] & HW_PRTY_ASSERT_SET_2, &par_num, print);
4999                 res |= bnx2x_check_blocks_with_parity3(bp,
5000                         sig[3] & HW_PRTY_ASSERT_SET_3, &par_num, global, print);
5001                 res |= bnx2x_check_blocks_with_parity4(bp,
5002                         sig[4] & HW_PRTY_ASSERT_SET_4, &par_num, print);
5003
5004                 if (print)
5005                         pr_cont("\n");
5006         }
5007
5008         return res;
5009 }
5010
5011 /**
5012  * bnx2x_chk_parity_attn - checks for parity attentions.
5013  *
5014  * @bp:         driver handle
5015  * @global:     true if there was a global attention
5016  * @print:      show parity attention in syslog
5017  */
5018 bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print)
5019 {
5020         struct attn_route attn = { {0} };
5021         int port = BP_PORT(bp);
5022
5023         attn.sig[0] = REG_RD(bp,
5024                 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
5025                              port*4);
5026         attn.sig[1] = REG_RD(bp,
5027                 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
5028                              port*4);
5029         attn.sig[2] = REG_RD(bp,
5030                 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
5031                              port*4);
5032         attn.sig[3] = REG_RD(bp,
5033                 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
5034                              port*4);
5035         /* Since MCP attentions can't be disabled inside the block, we need to
5036          * read AEU registers to see whether they're currently disabled
5037          */
5038         attn.sig[3] &= ((REG_RD(bp,
5039                                 !port ? MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0
5040                                       : MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0) &
5041                          MISC_AEU_ENABLE_MCP_PRTY_BITS) |
5042                         ~MISC_AEU_ENABLE_MCP_PRTY_BITS);
5043
5044         if (!CHIP_IS_E1x(bp))
5045                 attn.sig[4] = REG_RD(bp,
5046                         MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 +
5047                                      port*4);
5048
5049         return bnx2x_parity_attn(bp, global, print, attn.sig);
5050 }
5051
5052 static void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
5053 {
5054         u32 val;
5055         if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
5056
5057                 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
5058                 BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
5059                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
5060                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n");
5061                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
5062                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n");
5063                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
5064                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n");
5065                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
5066                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n");
5067                 if (val &
5068                     PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
5069                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n");
5070                 if (val &
5071                     PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
5072                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n");
5073                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
5074                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n");
5075                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
5076                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n");
5077                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
5078                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n");
5079         }
5080         if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
5081                 val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
5082                 BNX2X_ERR("ATC hw attention 0x%x\n", val);
5083                 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
5084                         BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
5085                 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
5086                         BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n");
5087                 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
5088                         BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n");
5089                 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
5090                         BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n");
5091                 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
5092                         BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
5093                 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
5094                         BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n");
5095         }
5096
5097         if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
5098                     AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
5099                 BNX2X_ERR("FATAL parity attention set4 0x%x\n",
5100                 (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
5101                     AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
5102         }
5103 }
5104
5105 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
5106 {
5107         struct attn_route attn, *group_mask;
5108         int port = BP_PORT(bp);
5109         int index;
5110         u32 reg_addr;
5111         u32 val;
5112         u32 aeu_mask;
5113         bool global = false;
5114
5115         /* need to take HW lock because MCP or other port might also
5116            try to handle this event */
5117         bnx2x_acquire_alr(bp);
5118
5119         if (bnx2x_chk_parity_attn(bp, &global, true)) {
5120 #ifndef BNX2X_STOP_ON_ERROR
5121                 bp->recovery_state = BNX2X_RECOVERY_INIT;
5122                 schedule_delayed_work(&bp->sp_rtnl_task, 0);
5123                 /* Disable HW interrupts */
5124                 bnx2x_int_disable(bp);
5125                 /* In case of parity errors don't handle attentions so that
5126                  * other function would "see" parity errors.
5127                  */
5128 #else
5129                 bnx2x_panic();
5130 #endif
5131                 bnx2x_release_alr(bp);
5132                 return;
5133         }
5134
5135         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
5136         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
5137         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
5138         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
5139         if (!CHIP_IS_E1x(bp))
5140                 attn.sig[4] =
5141                       REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
5142         else
5143                 attn.sig[4] = 0;
5144
5145         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n",
5146            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
5147
5148         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
5149                 if (deasserted & (1 << index)) {
5150                         group_mask = &bp->attn_group[index];
5151
5152                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x %08x\n",
5153                            index,
5154                            group_mask->sig[0], group_mask->sig[1],
5155                            group_mask->sig[2], group_mask->sig[3],
5156                            group_mask->sig[4]);
5157
5158                         bnx2x_attn_int_deasserted4(bp,
5159                                         attn.sig[4] & group_mask->sig[4]);
5160                         bnx2x_attn_int_deasserted3(bp,
5161                                         attn.sig[3] & group_mask->sig[3]);
5162                         bnx2x_attn_int_deasserted1(bp,
5163                                         attn.sig[1] & group_mask->sig[1]);
5164                         bnx2x_attn_int_deasserted2(bp,
5165                                         attn.sig[2] & group_mask->sig[2]);
5166                         bnx2x_attn_int_deasserted0(bp,
5167                                         attn.sig[0] & group_mask->sig[0]);
5168                 }
5169         }
5170
5171         bnx2x_release_alr(bp);
5172
5173         if (bp->common.int_block == INT_BLOCK_HC)
5174                 reg_addr = (HC_REG_COMMAND_REG + port*32 +
5175                             COMMAND_REG_ATTN_BITS_CLR);
5176         else
5177                 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
5178
5179         val = ~deasserted;
5180         DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val,
5181            (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
5182         REG_WR(bp, reg_addr, val);
5183
5184         if (~bp->attn_state & deasserted)
5185                 BNX2X_ERR("IGU ERROR\n");
5186
5187         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
5188                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
5189
5190         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
5191         aeu_mask = REG_RD(bp, reg_addr);
5192
5193         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
5194            aeu_mask, deasserted);
5195         aeu_mask |= (deasserted & 0x3ff);
5196         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
5197
5198         REG_WR(bp, reg_addr, aeu_mask);
5199         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
5200
5201         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
5202         bp->attn_state &= ~deasserted;
5203         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
5204 }
5205
5206 static void bnx2x_attn_int(struct bnx2x *bp)
5207 {
5208         /* read local copy of bits */
5209         u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
5210                                                                 attn_bits);
5211         u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
5212                                                                 attn_bits_ack);
5213         u32 attn_state = bp->attn_state;
5214
5215         /* look for changed bits */
5216         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
5217         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
5218
5219         DP(NETIF_MSG_HW,
5220            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
5221            attn_bits, attn_ack, asserted, deasserted);
5222
5223         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
5224                 BNX2X_ERR("BAD attention state\n");
5225
5226         /* handle bits that were raised */
5227         if (asserted)
5228                 bnx2x_attn_int_asserted(bp, asserted);
5229
5230         if (deasserted)
5231                 bnx2x_attn_int_deasserted(bp, deasserted);
5232 }
5233
5234 void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment,
5235                       u16 index, u8 op, u8 update)
5236 {
5237         u32 igu_addr = bp->igu_base_addr;
5238         igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
5239         bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update,
5240                              igu_addr);
5241 }
5242
5243 static void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
5244 {
5245         /* No memory barriers */
5246         storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
5247         mmiowb();
5248 }
5249
5250 static int  bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
5251                                       union event_ring_elem *elem)
5252 {
5253         u8 err = elem->message.error;
5254
5255         if (!bp->cnic_eth_dev.starting_cid  ||
5256             (cid < bp->cnic_eth_dev.starting_cid &&
5257             cid != bp->cnic_eth_dev.iscsi_l2_cid))
5258                 return 1;
5259
5260         DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
5261
5262         if (unlikely(err)) {
5263
5264                 BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
5265                           cid);
5266                 bnx2x_panic_dump(bp, false);
5267         }
5268         bnx2x_cnic_cfc_comp(bp, cid, err);
5269         return 0;
5270 }
5271
5272 static void bnx2x_handle_mcast_eqe(struct bnx2x *bp)
5273 {
5274         struct bnx2x_mcast_ramrod_params rparam;
5275         int rc;
5276
5277         memset(&rparam, 0, sizeof(rparam));
5278
5279         rparam.mcast_obj = &bp->mcast_obj;
5280
5281         netif_addr_lock_bh(bp->dev);
5282
5283         /* Clear pending state for the last command */
5284         bp->mcast_obj.raw.clear_pending(&bp->mcast_obj.raw);
5285
5286         /* If there are pending mcast commands - send them */
5287         if (bp->mcast_obj.check_pending(&bp->mcast_obj)) {
5288                 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
5289                 if (rc < 0)
5290                         BNX2X_ERR("Failed to send pending mcast commands: %d\n",
5291                                   rc);
5292         }
5293
5294         netif_addr_unlock_bh(bp->dev);
5295 }
5296
5297 static void bnx2x_handle_classification_eqe(struct bnx2x *bp,
5298                                             union event_ring_elem *elem)
5299 {
5300         unsigned long ramrod_flags = 0;
5301         int rc = 0;
5302         u32 echo = le32_to_cpu(elem->message.data.eth_event.echo);
5303         u32 cid = echo & BNX2X_SWCID_MASK;
5304         struct bnx2x_vlan_mac_obj *vlan_mac_obj;
5305
5306         /* Always push next commands out, don't wait here */
5307         __set_bit(RAMROD_CONT, &ramrod_flags);
5308
5309         switch (echo >> BNX2X_SWCID_SHIFT) {
5310         case BNX2X_FILTER_MAC_PENDING:
5311                 DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n");
5312                 if (CNIC_LOADED(bp) && (cid == BNX2X_ISCSI_ETH_CID(bp)))
5313                         vlan_mac_obj = &bp->iscsi_l2_mac_obj;
5314                 else
5315                         vlan_mac_obj = &bp->sp_objs[cid].mac_obj;
5316
5317                 break;
5318         case BNX2X_FILTER_VLAN_PENDING:
5319                 DP(BNX2X_MSG_SP, "Got SETUP_VLAN completions\n");
5320                 vlan_mac_obj = &bp->sp_objs[cid].vlan_obj;
5321                 break;
5322         case BNX2X_FILTER_MCAST_PENDING:
5323                 DP(BNX2X_MSG_SP, "Got SETUP_MCAST completions\n");
5324                 /* This is only relevant for 57710 where multicast MACs are
5325                  * configured as unicast MACs using the same ramrod.
5326                  */
5327                 bnx2x_handle_mcast_eqe(bp);
5328                 return;
5329         default:
5330                 BNX2X_ERR("Unsupported classification command: 0x%x\n", echo);
5331                 return;
5332         }
5333
5334         rc = vlan_mac_obj->complete(bp, vlan_mac_obj, elem, &ramrod_flags);
5335
5336         if (rc < 0)
5337                 BNX2X_ERR("Failed to schedule new commands: %d\n", rc);
5338         else if (rc > 0)
5339                 DP(BNX2X_MSG_SP, "Scheduled next pending commands...\n");
5340 }
5341
5342 static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start);
5343
5344 static void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp)
5345 {
5346         netif_addr_lock_bh(bp->dev);
5347
5348         clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state);
5349
5350         /* Send rx_mode command again if was requested */
5351         if (test_and_clear_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state))
5352                 bnx2x_set_storm_rx_mode(bp);
5353         else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED,
5354                                     &bp->sp_state))
5355                 bnx2x_set_iscsi_eth_rx_mode(bp, true);
5356         else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED,
5357                                     &bp->sp_state))
5358                 bnx2x_set_iscsi_eth_rx_mode(bp, false);
5359
5360         netif_addr_unlock_bh(bp->dev);
5361 }
5362
5363 static void bnx2x_after_afex_vif_lists(struct bnx2x *bp,
5364                                               union event_ring_elem *elem)
5365 {
5366         if (elem->message.data.vif_list_event.echo == VIF_LIST_RULE_GET) {
5367                 DP(BNX2X_MSG_SP,
5368                    "afex: ramrod completed VIF LIST_GET, addrs 0x%x\n",
5369                    elem->message.data.vif_list_event.func_bit_map);
5370                 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTGET_ACK,
5371                         elem->message.data.vif_list_event.func_bit_map);
5372         } else if (elem->message.data.vif_list_event.echo ==
5373                    VIF_LIST_RULE_SET) {
5374                 DP(BNX2X_MSG_SP, "afex: ramrod completed VIF LIST_SET\n");
5375                 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTSET_ACK, 0);
5376         }
5377 }
5378
5379 /* called with rtnl_lock */
5380 static void bnx2x_after_function_update(struct bnx2x *bp)
5381 {
5382         int q, rc;
5383         struct bnx2x_fastpath *fp;
5384         struct bnx2x_queue_state_params queue_params = {NULL};
5385         struct bnx2x_queue_update_params *q_update_params =
5386                 &queue_params.params.update;
5387
5388         /* Send Q update command with afex vlan removal values for all Qs */
5389         queue_params.cmd = BNX2X_Q_CMD_UPDATE;
5390
5391         /* set silent vlan removal values according to vlan mode */
5392         __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
5393                   &q_update_params->update_flags);
5394         __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
5395                   &q_update_params->update_flags);
5396         __set_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
5397
5398         /* in access mode mark mask and value are 0 to strip all vlans */
5399         if (bp->afex_vlan_mode == FUNC_MF_CFG_AFEX_VLAN_ACCESS_MODE) {
5400                 q_update_params->silent_removal_value = 0;
5401                 q_update_params->silent_removal_mask = 0;
5402         } else {
5403                 q_update_params->silent_removal_value =
5404                         (bp->afex_def_vlan_tag & VLAN_VID_MASK);
5405                 q_update_params->silent_removal_mask = VLAN_VID_MASK;
5406         }
5407
5408         for_each_eth_queue(bp, q) {
5409                 /* Set the appropriate Queue object */
5410                 fp = &bp->fp[q];
5411                 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
5412
5413                 /* send the ramrod */
5414                 rc = bnx2x_queue_state_change(bp, &queue_params);
5415                 if (rc < 0)
5416                         BNX2X_ERR("Failed to config silent vlan rem for Q %d\n",
5417                                   q);
5418         }
5419
5420         if (!NO_FCOE(bp) && CNIC_ENABLED(bp)) {
5421                 fp = &bp->fp[FCOE_IDX(bp)];
5422                 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
5423
5424                 /* clear pending completion bit */
5425                 __clear_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
5426
5427                 /* mark latest Q bit */
5428                 smp_mb__before_atomic();
5429                 set_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
5430                 smp_mb__after_atomic();
5431
5432                 /* send Q update ramrod for FCoE Q */
5433                 rc = bnx2x_queue_state_change(bp, &queue_params);
5434                 if (rc < 0)
5435                         BNX2X_ERR("Failed to config silent vlan rem for Q %d\n",
5436                                   q);
5437         } else {
5438                 /* If no FCoE ring - ACK MCP now */
5439                 bnx2x_link_report(bp);
5440                 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
5441         }
5442 }
5443
5444 static struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj(
5445         struct bnx2x *bp, u32 cid)
5446 {
5447         DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid);
5448
5449         if (CNIC_LOADED(bp) && (cid == BNX2X_FCOE_ETH_CID(bp)))
5450                 return &bnx2x_fcoe_sp_obj(bp, q_obj);
5451         else
5452                 return &bp->sp_objs[CID_TO_FP(cid, bp)].q_obj;
5453 }
5454
5455 static void bnx2x_eq_int(struct bnx2x *bp)
5456 {
5457         u16 hw_cons, sw_cons, sw_prod;
5458         union event_ring_elem *elem;
5459         u8 echo;
5460         u32 cid;
5461         u8 opcode;
5462         int rc, spqe_cnt = 0;
5463         struct bnx2x_queue_sp_obj *q_obj;
5464         struct bnx2x_func_sp_obj *f_obj = &bp->func_obj;
5465         struct bnx2x_raw_obj *rss_raw = &bp->rss_conf_obj.raw;
5466
5467         hw_cons = le16_to_cpu(*bp->eq_cons_sb);
5468
5469         /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256.
5470          * when we get the next-page we need to adjust so the loop
5471          * condition below will be met. The next element is the size of a
5472          * regular element and hence incrementing by 1
5473          */
5474         if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
5475                 hw_cons++;
5476
5477         /* This function may never run in parallel with itself for a
5478          * specific bp, thus there is no need in "paired" read memory
5479          * barrier here.
5480          */
5481         sw_cons = bp->eq_cons;
5482         sw_prod = bp->eq_prod;
5483
5484         DP(BNX2X_MSG_SP, "EQ:  hw_cons %u  sw_cons %u bp->eq_spq_left %x\n",
5485                         hw_cons, sw_cons, atomic_read(&bp->eq_spq_left));
5486
5487         for (; sw_cons != hw_cons;
5488               sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
5489
5490                 elem = &bp->eq_ring[EQ_DESC(sw_cons)];
5491
5492                 rc = bnx2x_iov_eq_sp_event(bp, elem);
5493                 if (!rc) {
5494                         DP(BNX2X_MSG_IOV, "bnx2x_iov_eq_sp_event returned %d\n",
5495                            rc);
5496                         goto next_spqe;
5497                 }
5498
5499                 opcode = elem->message.opcode;
5500
5501                 /* handle eq element */
5502                 switch (opcode) {
5503                 case EVENT_RING_OPCODE_VF_PF_CHANNEL:
5504                         bnx2x_vf_mbx_schedule(bp,
5505                                               &elem->message.data.vf_pf_event);
5506                         continue;
5507
5508                 case EVENT_RING_OPCODE_STAT_QUERY:
5509                         DP_AND((BNX2X_MSG_SP | BNX2X_MSG_STATS),
5510                                "got statistics comp event %d\n",
5511                                bp->stats_comp++);
5512                         /* nothing to do with stats comp */
5513                         goto next_spqe;
5514
5515                 case EVENT_RING_OPCODE_CFC_DEL:
5516                         /* handle according to cid range */
5517                         /*
5518                          * we may want to verify here that the bp state is
5519                          * HALTING
5520                          */
5521
5522                         /* elem CID originates from FW; actually LE */
5523                         cid = SW_CID(elem->message.data.cfc_del_event.cid);
5524
5525                         DP(BNX2X_MSG_SP,
5526                            "got delete ramrod for MULTI[%d]\n", cid);
5527
5528                         if (CNIC_LOADED(bp) &&
5529                             !bnx2x_cnic_handle_cfc_del(bp, cid, elem))
5530                                 goto next_spqe;
5531
5532                         q_obj = bnx2x_cid_to_q_obj(bp, cid);
5533
5534                         if (q_obj->complete_cmd(bp, q_obj, BNX2X_Q_CMD_CFC_DEL))
5535                                 break;
5536
5537                         goto next_spqe;
5538
5539                 case EVENT_RING_OPCODE_STOP_TRAFFIC:
5540                         DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got STOP TRAFFIC\n");
5541                         bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED);
5542                         if (f_obj->complete_cmd(bp, f_obj,
5543                                                 BNX2X_F_CMD_TX_STOP))
5544                                 break;
5545                         goto next_spqe;
5546
5547                 case EVENT_RING_OPCODE_START_TRAFFIC:
5548                         DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got START TRAFFIC\n");
5549                         bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
5550                         if (f_obj->complete_cmd(bp, f_obj,
5551                                                 BNX2X_F_CMD_TX_START))
5552                                 break;
5553                         goto next_spqe;
5554
5555                 case EVENT_RING_OPCODE_FUNCTION_UPDATE:
5556                         echo = elem->message.data.function_update_event.echo;
5557                         if (echo == SWITCH_UPDATE) {
5558                                 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
5559                                    "got FUNC_SWITCH_UPDATE ramrod\n");
5560                                 if (f_obj->complete_cmd(
5561                                         bp, f_obj, BNX2X_F_CMD_SWITCH_UPDATE))
5562                                         break;
5563
5564                         } else {
5565                                 int cmd = BNX2X_SP_RTNL_AFEX_F_UPDATE;
5566
5567                                 DP(BNX2X_MSG_SP | BNX2X_MSG_MCP,
5568                                    "AFEX: ramrod completed FUNCTION_UPDATE\n");
5569                                 f_obj->complete_cmd(bp, f_obj,
5570                                                     BNX2X_F_CMD_AFEX_UPDATE);
5571
5572                                 /* We will perform the Queues update from
5573                                  * sp_rtnl task as all Queue SP operations
5574                                  * should run under rtnl_lock.
5575                                  */
5576                                 bnx2x_schedule_sp_rtnl(bp, cmd, 0);
5577                         }
5578
5579                         goto next_spqe;
5580
5581                 case EVENT_RING_OPCODE_AFEX_VIF_LISTS:
5582                         f_obj->complete_cmd(bp, f_obj,
5583                                             BNX2X_F_CMD_AFEX_VIFLISTS);
5584                         bnx2x_after_afex_vif_lists(bp, elem);
5585                         goto next_spqe;
5586                 case EVENT_RING_OPCODE_FUNCTION_START:
5587                         DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
5588                            "got FUNC_START ramrod\n");
5589                         if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_START))
5590                                 break;
5591
5592                         goto next_spqe;
5593
5594                 case EVENT_RING_OPCODE_FUNCTION_STOP:
5595                         DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
5596                            "got FUNC_STOP ramrod\n");
5597                         if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_STOP))
5598                                 break;
5599
5600                         goto next_spqe;
5601
5602                 case EVENT_RING_OPCODE_SET_TIMESYNC:
5603                         DP(BNX2X_MSG_SP | BNX2X_MSG_PTP,
5604                            "got set_timesync ramrod completion\n");
5605                         if (f_obj->complete_cmd(bp, f_obj,
5606                                                 BNX2X_F_CMD_SET_TIMESYNC))
5607                                 break;
5608                         goto next_spqe;
5609                 }
5610
5611                 switch (opcode | bp->state) {
5612                 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
5613                       BNX2X_STATE_OPEN):
5614                 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
5615                       BNX2X_STATE_OPENING_WAIT4_PORT):
5616                 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
5617                       BNX2X_STATE_CLOSING_WAIT4_HALT):
5618                         DP(BNX2X_MSG_SP, "got RSS_UPDATE ramrod. CID %d\n",
5619                            SW_CID(elem->message.data.eth_event.echo));
5620                         rss_raw->clear_pending(rss_raw);
5621                         break;
5622
5623                 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
5624                 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
5625                 case (EVENT_RING_OPCODE_SET_MAC |
5626                       BNX2X_STATE_CLOSING_WAIT4_HALT):
5627                 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
5628                       BNX2X_STATE_OPEN):
5629                 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
5630                       BNX2X_STATE_DIAG):
5631                 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
5632                       BNX2X_STATE_CLOSING_WAIT4_HALT):
5633                         DP(BNX2X_MSG_SP, "got (un)set vlan/mac ramrod\n");
5634                         bnx2x_handle_classification_eqe(bp, elem);
5635                         break;
5636
5637                 case (EVENT_RING_OPCODE_MULTICAST_RULES |
5638                       BNX2X_STATE_OPEN):
5639                 case (EVENT_RING_OPCODE_MULTICAST_RULES |
5640                       BNX2X_STATE_DIAG):
5641                 case (EVENT_RING_OPCODE_MULTICAST_RULES |
5642                       BNX2X_STATE_CLOSING_WAIT4_HALT):
5643                         DP(BNX2X_MSG_SP, "got mcast ramrod\n");
5644                         bnx2x_handle_mcast_eqe(bp);
5645                         break;
5646
5647                 case (EVENT_RING_OPCODE_FILTERS_RULES |
5648                       BNX2X_STATE_OPEN):
5649                 case (EVENT_RING_OPCODE_FILTERS_RULES |
5650                       BNX2X_STATE_DIAG):
5651                 case (EVENT_RING_OPCODE_FILTERS_RULES |
5652                       BNX2X_STATE_CLOSING_WAIT4_HALT):
5653                         DP(BNX2X_MSG_SP, "got rx_mode ramrod\n");
5654                         bnx2x_handle_rx_mode_eqe(bp);
5655                         break;
5656                 default:
5657                         /* unknown event log error and continue */
5658                         BNX2X_ERR("Unknown EQ event %d, bp->state 0x%x\n",
5659                                   elem->message.opcode, bp->state);
5660                 }
5661 next_spqe:
5662                 spqe_cnt++;
5663         } /* for */
5664
5665         smp_mb__before_atomic();
5666         atomic_add(spqe_cnt, &bp->eq_spq_left);
5667
5668         bp->eq_cons = sw_cons;
5669         bp->eq_prod = sw_prod;
5670         /* Make sure that above mem writes were issued towards the memory */
5671         smp_wmb();
5672
5673         /* update producer */
5674         bnx2x_update_eq_prod(bp, bp->eq_prod);
5675 }
5676
5677 static void bnx2x_sp_task(struct work_struct *work)
5678 {
5679         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
5680
5681         DP(BNX2X_MSG_SP, "sp task invoked\n");
5682
5683         /* make sure the atomic interrupt_occurred has been written */
5684         smp_rmb();
5685         if (atomic_read(&bp->interrupt_occurred)) {
5686
5687                 /* what work needs to be performed? */
5688                 u16 status = bnx2x_update_dsb_idx(bp);
5689
5690                 DP(BNX2X_MSG_SP, "status %x\n", status);
5691                 DP(BNX2X_MSG_SP, "setting interrupt_occurred to 0\n");
5692                 atomic_set(&bp->interrupt_occurred, 0);
5693
5694                 /* HW attentions */
5695                 if (status & BNX2X_DEF_SB_ATT_IDX) {
5696                         bnx2x_attn_int(bp);
5697                         status &= ~BNX2X_DEF_SB_ATT_IDX;
5698                 }
5699
5700                 /* SP events: STAT_QUERY and others */
5701                 if (status & BNX2X_DEF_SB_IDX) {
5702                         struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
5703
5704                         if (FCOE_INIT(bp) &&
5705                             (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
5706                                 /* Prevent local bottom-halves from running as
5707                                  * we are going to change the local NAPI list.
5708                                  */
5709                                 local_bh_disable();
5710                                 napi_schedule(&bnx2x_fcoe(bp, napi));
5711                                 local_bh_enable();
5712                         }
5713
5714                         /* Handle EQ completions */
5715                         bnx2x_eq_int(bp);
5716                         bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
5717                                      le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
5718
5719                         status &= ~BNX2X_DEF_SB_IDX;
5720                 }
5721
5722                 /* if status is non zero then perhaps something went wrong */
5723                 if (unlikely(status))
5724                         DP(BNX2X_MSG_SP,
5725                            "got an unknown interrupt! (status 0x%x)\n", status);
5726
5727                 /* ack status block only if something was actually handled */
5728                 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
5729                              le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
5730         }
5731
5732         /* afex - poll to check if VIFSET_ACK should be sent to MFW */
5733         if (test_and_clear_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK,
5734                                &bp->sp_state)) {
5735                 bnx2x_link_report(bp);
5736                 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
5737         }
5738 }
5739
5740 irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
5741 {
5742         struct net_device *dev = dev_instance;
5743         struct bnx2x *bp = netdev_priv(dev);
5744
5745         bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
5746                      IGU_INT_DISABLE, 0);
5747
5748 #ifdef BNX2X_STOP_ON_ERROR
5749         if (unlikely(bp->panic))
5750                 return IRQ_HANDLED;
5751 #endif
5752
5753         if (CNIC_LOADED(bp)) {
5754                 struct cnic_ops *c_ops;
5755
5756                 rcu_read_lock();
5757                 c_ops = rcu_dereference(bp->cnic_ops);
5758                 if (c_ops)
5759                         c_ops->cnic_handler(bp->cnic_data, NULL);
5760                 rcu_read_unlock();
5761         }
5762
5763         /* schedule sp task to perform default status block work, ack
5764          * attentions and enable interrupts.
5765          */
5766         bnx2x_schedule_sp_task(bp);
5767
5768         return IRQ_HANDLED;
5769 }
5770
5771 /* end of slow path */
5772
5773 void bnx2x_drv_pulse(struct bnx2x *bp)
5774 {
5775         SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb,
5776                  bp->fw_drv_pulse_wr_seq);
5777 }
5778
5779 static void bnx2x_timer(struct timer_list *t)
5780 {
5781         struct bnx2x *bp = from_timer(bp, t, timer);
5782
5783         if (!netif_running(bp->dev))
5784                 return;
5785
5786         if (IS_PF(bp) &&
5787             !BP_NOMCP(bp)) {
5788                 int mb_idx = BP_FW_MB_IDX(bp);
5789                 u16 drv_pulse;
5790                 u16 mcp_pulse;
5791
5792                 ++bp->fw_drv_pulse_wr_seq;
5793                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
5794                 drv_pulse = bp->fw_drv_pulse_wr_seq;
5795                 bnx2x_drv_pulse(bp);
5796
5797                 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
5798                              MCP_PULSE_SEQ_MASK);
5799                 /* The delta between driver pulse and mcp response
5800                  * should not get too big. If the MFW is more than 5 pulses
5801                  * behind, we should worry about it enough to generate an error
5802                  * log.
5803                  */
5804                 if (((drv_pulse - mcp_pulse) & MCP_PULSE_SEQ_MASK) > 5)
5805                         BNX2X_ERR("MFW seems hanged: drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
5806                                   drv_pulse, mcp_pulse);
5807         }
5808
5809         if (bp->state == BNX2X_STATE_OPEN)
5810                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
5811
5812         /* sample pf vf bulletin board for new posts from pf */
5813         if (IS_VF(bp))
5814                 bnx2x_timer_sriov(bp);
5815
5816         mod_timer(&bp->timer, jiffies + bp->current_interval);
5817 }
5818
5819 /* end of Statistics */
5820
5821 /* nic init */
5822
5823 /*
5824  * nic init service functions
5825  */
5826
5827 static void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
5828 {
5829         u32 i;
5830         if (!(len%4) && !(addr%4))
5831                 for (i = 0; i < len; i += 4)
5832                         REG_WR(bp, addr + i, fill);
5833         else
5834                 for (i = 0; i < len; i++)
5835                         REG_WR8(bp, addr + i, fill);
5836 }
5837
5838 /* helper: writes FP SP data to FW - data_size in dwords */
5839 static void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
5840                                 int fw_sb_id,
5841                                 u32 *sb_data_p,
5842                                 u32 data_size)
5843 {
5844         int index;
5845         for (index = 0; index < data_size; index++)
5846                 REG_WR(bp, BAR_CSTRORM_INTMEM +
5847                         CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
5848                         sizeof(u32)*index,
5849                         *(sb_data_p + index));
5850 }
5851
5852 static void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
5853 {
5854         u32 *sb_data_p;
5855         u32 data_size = 0;
5856         struct hc_status_block_data_e2 sb_data_e2;
5857         struct hc_status_block_data_e1x sb_data_e1x;
5858
5859         /* disable the function first */
5860         if (!CHIP_IS_E1x(bp)) {
5861                 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
5862                 sb_data_e2.common.state = SB_DISABLED;
5863                 sb_data_e2.common.p_func.vf_valid = false;
5864                 sb_data_p = (u32 *)&sb_data_e2;
5865                 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
5866         } else {
5867                 memset(&sb_data_e1x, 0,
5868                        sizeof(struct hc_status_block_data_e1x));
5869                 sb_data_e1x.common.state = SB_DISABLED;
5870                 sb_data_e1x.common.p_func.vf_valid = false;
5871                 sb_data_p = (u32 *)&sb_data_e1x;
5872                 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
5873         }
5874         bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
5875
5876         bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5877                         CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
5878                         CSTORM_STATUS_BLOCK_SIZE);
5879         bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5880                         CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
5881                         CSTORM_SYNC_BLOCK_SIZE);
5882 }
5883
5884 /* helper:  writes SP SB data to FW */
5885 static void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
5886                 struct hc_sp_status_block_data *sp_sb_data)
5887 {
5888         int func = BP_FUNC(bp);
5889         int i;
5890         for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
5891                 REG_WR(bp, BAR_CSTRORM_INTMEM +
5892                         CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
5893                         i*sizeof(u32),
5894                         *((u32 *)sp_sb_data + i));
5895 }
5896
5897 static void bnx2x_zero_sp_sb(struct bnx2x *bp)
5898 {
5899         int func = BP_FUNC(bp);
5900         struct hc_sp_status_block_data sp_sb_data;
5901         memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
5902
5903         sp_sb_data.state = SB_DISABLED;
5904         sp_sb_data.p_func.vf_valid = false;
5905
5906         bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
5907
5908         bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5909                         CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
5910                         CSTORM_SP_STATUS_BLOCK_SIZE);
5911         bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5912                         CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
5913                         CSTORM_SP_SYNC_BLOCK_SIZE);
5914 }
5915
5916 static void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
5917                                            int igu_sb_id, int igu_seg_id)
5918 {
5919         hc_sm->igu_sb_id = igu_sb_id;
5920         hc_sm->igu_seg_id = igu_seg_id;
5921         hc_sm->timer_value = 0xFF;
5922         hc_sm->time_to_expire = 0xFFFFFFFF;
5923 }
5924
5925 /* allocates state machine ids. */
5926 static void bnx2x_map_sb_state_machines(struct hc_index_data *index_data)
5927 {
5928         /* zero out state machine indices */
5929         /* rx indices */
5930         index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
5931
5932         /* tx indices */
5933         index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
5934         index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID;
5935         index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID;
5936         index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID;
5937
5938         /* map indices */
5939         /* rx indices */
5940         index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |=
5941                 SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5942
5943         /* tx indices */
5944         index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |=
5945                 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5946         index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |=
5947                 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5948         index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |=
5949                 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5950         index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |=
5951                 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5952 }
5953
5954 void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
5955                           u8 vf_valid, int fw_sb_id, int igu_sb_id)
5956 {
5957         int igu_seg_id;
5958
5959         struct hc_status_block_data_e2 sb_data_e2;
5960         struct hc_status_block_data_e1x sb_data_e1x;
5961         struct hc_status_block_sm  *hc_sm_p;
5962         int data_size;
5963         u32 *sb_data_p;
5964
5965         if (CHIP_INT_MODE_IS_BC(bp))
5966                 igu_seg_id = HC_SEG_ACCESS_NORM;
5967         else
5968                 igu_seg_id = IGU_SEG_ACCESS_NORM;
5969
5970         bnx2x_zero_fp_sb(bp, fw_sb_id);
5971
5972         if (!CHIP_IS_E1x(bp)) {
5973                 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
5974                 sb_data_e2.common.state = SB_ENABLED;
5975                 sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
5976                 sb_data_e2.common.p_func.vf_id = vfid;
5977                 sb_data_e2.common.p_func.vf_valid = vf_valid;
5978                 sb_data_e2.common.p_func.vnic_id = BP_VN(bp);
5979                 sb_data_e2.common.same_igu_sb_1b = true;
5980                 sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
5981                 sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
5982                 hc_sm_p = sb_data_e2.common.state_machine;
5983                 sb_data_p = (u32 *)&sb_data_e2;
5984                 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
5985                 bnx2x_map_sb_state_machines(sb_data_e2.index_data);
5986         } else {
5987                 memset(&sb_data_e1x, 0,
5988                        sizeof(struct hc_status_block_data_e1x));
5989                 sb_data_e1x.common.state = SB_ENABLED;
5990                 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
5991                 sb_data_e1x.common.p_func.vf_id = 0xff;
5992                 sb_data_e1x.common.p_func.vf_valid = false;
5993                 sb_data_e1x.common.p_func.vnic_id = BP_VN(bp);
5994                 sb_data_e1x.common.same_igu_sb_1b = true;
5995                 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
5996                 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
5997                 hc_sm_p = sb_data_e1x.common.state_machine;
5998                 sb_data_p = (u32 *)&sb_data_e1x;
5999                 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
6000                 bnx2x_map_sb_state_machines(sb_data_e1x.index_data);
6001         }
6002
6003         bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
6004                                        igu_sb_id, igu_seg_id);
6005         bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
6006                                        igu_sb_id, igu_seg_id);
6007
6008         DP(NETIF_MSG_IFUP, "Init FW SB %d\n", fw_sb_id);
6009
6010         /* write indices to HW - PCI guarantees endianity of regpairs */
6011         bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
6012 }
6013
6014 static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u8 fw_sb_id,
6015                                      u16 tx_usec, u16 rx_usec)
6016 {
6017         bnx2x_update_coalesce_sb_index(bp, fw_sb_id, HC_INDEX_ETH_RX_CQ_CONS,
6018                                     false, rx_usec);
6019         bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
6020                                        HC_INDEX_ETH_TX_CQ_CONS_COS0, false,
6021                                        tx_usec);
6022         bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
6023                                        HC_INDEX_ETH_TX_CQ_CONS_COS1, false,
6024                                        tx_usec);
6025         bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
6026                                        HC_INDEX_ETH_TX_CQ_CONS_COS2, false,
6027                                        tx_usec);
6028 }
6029
6030 static void bnx2x_init_def_sb(struct bnx2x *bp)
6031 {
6032         struct host_sp_status_block *def_sb = bp->def_status_blk;
6033         dma_addr_t mapping = bp->def_status_blk_mapping;
6034         int igu_sp_sb_index;
6035         int igu_seg_id;
6036         int port = BP_PORT(bp);
6037         int func = BP_FUNC(bp);
6038         int reg_offset, reg_offset_en5;
6039         u64 section;
6040         int index;
6041         struct hc_sp_status_block_data sp_sb_data;
6042         memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
6043
6044         if (CHIP_INT_MODE_IS_BC(bp)) {
6045                 igu_sp_sb_index = DEF_SB_IGU_ID;
6046                 igu_seg_id = HC_SEG_ACCESS_DEF;
6047         } else {
6048                 igu_sp_sb_index = bp->igu_dsb_id;
6049                 igu_seg_id = IGU_SEG_ACCESS_DEF;
6050         }
6051
6052         /* ATTN */
6053         section = ((u64)mapping) + offsetof(struct host_sp_status_block,
6054                                             atten_status_block);
6055         def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
6056
6057         bp->attn_state = 0;
6058
6059         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6060                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6061         reg_offset_en5 = (port ? MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 :
6062                                  MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0);
6063         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
6064                 int sindex;
6065                 /* take care of sig[0]..sig[4] */
6066                 for (sindex = 0; sindex < 4; sindex++)
6067                         bp->attn_group[index].sig[sindex] =
6068                            REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
6069
6070                 if (!CHIP_IS_E1x(bp))
6071                         /*
6072                          * enable5 is separate from the rest of the registers,
6073                          * and therefore the address skip is 4
6074                          * and not 16 between the different groups
6075                          */
6076                         bp->attn_group[index].sig[4] = REG_RD(bp,
6077                                         reg_offset_en5 + 0x4*index);
6078                 else
6079                         bp->attn_group[index].sig[4] = 0;
6080         }
6081
6082         if (bp->common.int_block == INT_BLOCK_HC) {
6083                 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
6084                                      HC_REG_ATTN_MSG0_ADDR_L);
6085
6086                 REG_WR(bp, reg_offset, U64_LO(section));
6087                 REG_WR(bp, reg_offset + 4, U64_HI(section));
6088         } else if (!CHIP_IS_E1x(bp)) {
6089                 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
6090                 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
6091         }
6092
6093         section = ((u64)mapping) + offsetof(struct host_sp_status_block,
6094                                             sp_sb);
6095
6096         bnx2x_zero_sp_sb(bp);
6097
6098         /* PCI guarantees endianity of regpairs */
6099         sp_sb_data.state                = SB_ENABLED;
6100         sp_sb_data.host_sb_addr.lo      = U64_LO(section);
6101         sp_sb_data.host_sb_addr.hi      = U64_HI(section);
6102         sp_sb_data.igu_sb_id            = igu_sp_sb_index;
6103         sp_sb_data.igu_seg_id           = igu_seg_id;
6104         sp_sb_data.p_func.pf_id         = func;
6105         sp_sb_data.p_func.vnic_id       = BP_VN(bp);
6106         sp_sb_data.p_func.vf_id         = 0xff;
6107
6108         bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
6109
6110         bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
6111 }
6112
6113 void bnx2x_update_coalesce(struct bnx2x *bp)
6114 {
6115         int i;
6116
6117         for_each_eth_queue(bp, i)
6118                 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
6119                                          bp->tx_ticks, bp->rx_ticks);
6120 }
6121
6122 static void bnx2x_init_sp_ring(struct bnx2x *bp)
6123 {
6124         spin_lock_init(&bp->spq_lock);
6125         atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING);
6126
6127         bp->spq_prod_idx = 0;
6128         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
6129         bp->spq_prod_bd = bp->spq;
6130         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
6131 }
6132
6133 static void bnx2x_init_eq_ring(struct bnx2x *bp)
6134 {
6135         int i;
6136         for (i = 1; i <= NUM_EQ_PAGES; i++) {
6137                 union event_ring_elem *elem =
6138                         &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
6139
6140                 elem->next_page.addr.hi =
6141                         cpu_to_le32(U64_HI(bp->eq_mapping +
6142                                    BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
6143                 elem->next_page.addr.lo =
6144                         cpu_to_le32(U64_LO(bp->eq_mapping +
6145                                    BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
6146         }
6147         bp->eq_cons = 0;
6148         bp->eq_prod = NUM_EQ_DESC;
6149         bp->eq_cons_sb = BNX2X_EQ_INDEX;
6150         /* we want a warning message before it gets wrought... */
6151         atomic_set(&bp->eq_spq_left,
6152                 min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1);
6153 }
6154
6155 /* called with netif_addr_lock_bh() */
6156 static int bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
6157                                unsigned long rx_mode_flags,
6158                                unsigned long rx_accept_flags,
6159                                unsigned long tx_accept_flags,
6160                                unsigned long ramrod_flags)
6161 {
6162         struct bnx2x_rx_mode_ramrod_params ramrod_param;
6163         int rc;
6164
6165         memset(&ramrod_param, 0, sizeof(ramrod_param));
6166
6167         /* Prepare ramrod parameters */
6168         ramrod_param.cid = 0;
6169         ramrod_param.cl_id = cl_id;
6170         ramrod_param.rx_mode_obj = &bp->rx_mode_obj;
6171         ramrod_param.func_id = BP_FUNC(bp);
6172
6173         ramrod_param.pstate = &bp->sp_state;
6174         ramrod_param.state = BNX2X_FILTER_RX_MODE_PENDING;
6175
6176         ramrod_param.rdata = bnx2x_sp(bp, rx_mode_rdata);
6177         ramrod_param.rdata_mapping = bnx2x_sp_mapping(bp, rx_mode_rdata);
6178
6179         set_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state);
6180
6181         ramrod_param.ramrod_flags = ramrod_flags;
6182         ramrod_param.rx_mode_flags = rx_mode_flags;
6183
6184         ramrod_param.rx_accept_flags = rx_accept_flags;
6185         ramrod_param.tx_accept_flags = tx_accept_flags;
6186
6187         rc = bnx2x_config_rx_mode(bp, &ramrod_param);
6188         if (rc < 0) {
6189                 BNX2X_ERR("Set rx_mode %d failed\n", bp->rx_mode);
6190                 return rc;
6191         }
6192
6193         return 0;
6194 }
6195
6196 static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode,
6197                                    unsigned long *rx_accept_flags,
6198                                    unsigned long *tx_accept_flags)
6199 {
6200         /* Clear the flags first */
6201         *rx_accept_flags = 0;
6202         *tx_accept_flags = 0;
6203
6204         switch (rx_mode) {
6205         case BNX2X_RX_MODE_NONE:
6206                 /*
6207                  * 'drop all' supersedes any accept flags that may have been
6208                  * passed to the function.
6209                  */
6210                 break;
6211         case BNX2X_RX_MODE_NORMAL:
6212                 __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
6213                 __set_bit(BNX2X_ACCEPT_MULTICAST, rx_accept_flags);
6214                 __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
6215
6216                 /* internal switching mode */
6217                 __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
6218                 __set_bit(BNX2X_ACCEPT_MULTICAST, tx_accept_flags);
6219                 __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
6220
6221                 if (bp->accept_any_vlan) {
6222                         __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
6223                         __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
6224                 }
6225
6226                 break;
6227         case BNX2X_RX_MODE_ALLMULTI:
6228                 __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
6229                 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, rx_accept_flags);
6230                 __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
6231
6232                 /* internal switching mode */
6233                 __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
6234                 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags);
6235                 __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
6236
6237                 if (bp->accept_any_vlan) {
6238                         __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
6239                         __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
6240                 }
6241
6242                 break;
6243         case BNX2X_RX_MODE_PROMISC:
6244                 /* According to definition of SI mode, iface in promisc mode
6245                  * should receive matched and unmatched (in resolution of port)
6246                  * unicast packets.
6247                  */
6248                 __set_bit(BNX2X_ACCEPT_UNMATCHED, rx_accept_flags);
6249                 __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
6250                 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, rx_accept_flags);
6251                 __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
6252
6253                 /* internal switching mode */
6254                 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags);
6255                 __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
6256
6257                 if (IS_MF_SI(bp))
6258                         __set_bit(BNX2X_ACCEPT_ALL_UNICAST, tx_accept_flags);
6259                 else
6260                         __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
6261
6262                 __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
6263                 __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
6264
6265                 break;
6266         default:
6267                 BNX2X_ERR("Unknown rx_mode: %d\n", rx_mode);
6268                 return -EINVAL;
6269         }
6270
6271         return 0;
6272 }
6273
6274 /* called with netif_addr_lock_bh() */
6275 static int bnx2x_set_storm_rx_mode(struct bnx2x *bp)
6276 {
6277         unsigned long rx_mode_flags = 0, ramrod_flags = 0;
6278         unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
6279         int rc;
6280
6281         if (!NO_FCOE(bp))
6282                 /* Configure rx_mode of FCoE Queue */
6283                 __set_bit(BNX2X_RX_MODE_FCOE_ETH, &rx_mode_flags);
6284
6285         rc = bnx2x_fill_accept_flags(bp, bp->rx_mode, &rx_accept_flags,
6286                                      &tx_accept_flags);
6287         if (rc)
6288                 return rc;
6289
6290         __set_bit(RAMROD_RX, &ramrod_flags);
6291         __set_bit(RAMROD_TX, &ramrod_flags);
6292
6293         return bnx2x_set_q_rx_mode(bp, bp->fp->cl_id, rx_mode_flags,
6294                                    rx_accept_flags, tx_accept_flags,
6295                                    ramrod_flags);
6296 }
6297
6298 static void bnx2x_init_internal_common(struct bnx2x *bp)
6299 {
6300         int i;
6301
6302         /* Zero this manually as its initialization is
6303            currently missing in the initTool */
6304         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
6305                 REG_WR(bp, BAR_USTRORM_INTMEM +
6306                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
6307         if (!CHIP_IS_E1x(bp)) {
6308                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
6309                         CHIP_INT_MODE_IS_BC(bp) ?
6310                         HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
6311         }
6312 }
6313
6314 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
6315 {
6316         switch (load_code) {
6317         case FW_MSG_CODE_DRV_LOAD_COMMON:
6318         case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
6319                 bnx2x_init_internal_common(bp);
6320                 /* no break */
6321
6322         case FW_MSG_CODE_DRV_LOAD_PORT:
6323                 /* nothing to do */
6324                 /* no break */
6325
6326         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6327                 /* internal memory per function is
6328                    initialized inside bnx2x_pf_init */
6329                 break;
6330
6331         default:
6332                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6333                 break;
6334         }
6335 }
6336
6337 static inline u8 bnx2x_fp_igu_sb_id(struct bnx2x_fastpath *fp)
6338 {
6339         return fp->bp->igu_base_sb + fp->index + CNIC_SUPPORT(fp->bp);
6340 }
6341
6342 static inline u8 bnx2x_fp_fw_sb_id(struct bnx2x_fastpath *fp)
6343 {
6344         return fp->bp->base_fw_ndsb + fp->index + CNIC_SUPPORT(fp->bp);
6345 }
6346
6347 static u8 bnx2x_fp_cl_id(struct bnx2x_fastpath *fp)
6348 {
6349         if (CHIP_IS_E1x(fp->bp))
6350                 return BP_L_ID(fp->bp) + fp->index;
6351         else    /* We want Client ID to be the same as IGU SB ID for 57712 */
6352                 return bnx2x_fp_igu_sb_id(fp);
6353 }
6354
6355 static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx)
6356 {
6357         struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
6358         u8 cos;
6359         unsigned long q_type = 0;
6360         u32 cids[BNX2X_MULTI_TX_COS] = { 0 };
6361         fp->rx_queue = fp_idx;
6362         fp->cid = fp_idx;
6363         fp->cl_id = bnx2x_fp_cl_id(fp);
6364         fp->fw_sb_id = bnx2x_fp_fw_sb_id(fp);
6365         fp->igu_sb_id = bnx2x_fp_igu_sb_id(fp);
6366         /* qZone id equals to FW (per path) client id */
6367         fp->cl_qzone_id  = bnx2x_fp_qzone_id(fp);
6368
6369         /* init shortcut */
6370         fp->ustorm_rx_prods_offset = bnx2x_rx_ustorm_prods_offset(fp);
6371
6372         /* Setup SB indices */
6373         fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
6374
6375         /* Configure Queue State object */
6376         __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
6377         __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
6378
6379         BUG_ON(fp->max_cos > BNX2X_MULTI_TX_COS);
6380
6381         /* init tx data */
6382         for_each_cos_in_tx_queue(fp, cos) {
6383                 bnx2x_init_txdata(bp, fp->txdata_ptr[cos],
6384                                   CID_COS_TO_TX_ONLY_CID(fp->cid, cos, bp),
6385                                   FP_COS_TO_TXQ(fp, cos, bp),
6386                                   BNX2X_TX_SB_INDEX_BASE + cos, fp);
6387                 cids[cos] = fp->txdata_ptr[cos]->cid;
6388         }
6389
6390         /* nothing more for vf to do here */
6391         if (IS_VF(bp))
6392                 return;
6393
6394         bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
6395                       fp->fw_sb_id, fp->igu_sb_id);
6396         bnx2x_update_fpsb_idx(fp);
6397         bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id, cids,
6398                              fp->max_cos, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
6399                              bnx2x_sp_mapping(bp, q_rdata), q_type);
6400
6401         /**
6402          * Configure classification DBs: Always enable Tx switching
6403          */
6404         bnx2x_init_vlan_mac_fp_objs(fp, BNX2X_OBJ_TYPE_RX_TX);
6405
6406         DP(NETIF_MSG_IFUP,
6407            "queue[%d]:  bnx2x_init_sb(%p,%p)  cl_id %d  fw_sb %d  igu_sb %d\n",
6408            fp_idx, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
6409            fp->igu_sb_id);
6410 }
6411
6412 static void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata)
6413 {
6414         int i;
6415
6416         for (i = 1; i <= NUM_TX_RINGS; i++) {
6417                 struct eth_tx_next_bd *tx_next_bd =
6418                         &txdata->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
6419
6420                 tx_next_bd->addr_hi =
6421                         cpu_to_le32(U64_HI(txdata->tx_desc_mapping +
6422                                     BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
6423                 tx_next_bd->addr_lo =
6424                         cpu_to_le32(U64_LO(txdata->tx_desc_mapping +
6425                                     BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
6426         }
6427
6428         *txdata->tx_cons_sb = cpu_to_le16(0);
6429
6430         SET_FLAG(txdata->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1);
6431         txdata->tx_db.data.zero_fill1 = 0;
6432         txdata->tx_db.data.prod = 0;
6433
6434         txdata->tx_pkt_prod = 0;
6435         txdata->tx_pkt_cons = 0;
6436         txdata->tx_bd_prod = 0;
6437         txdata->tx_bd_cons = 0;
6438         txdata->tx_pkt = 0;
6439 }
6440
6441 static void bnx2x_init_tx_rings_cnic(struct bnx2x *bp)
6442 {
6443         int i;
6444
6445         for_each_tx_queue_cnic(bp, i)
6446                 bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[0]);
6447 }
6448
6449 static void bnx2x_init_tx_rings(struct bnx2x *bp)
6450 {
6451         int i;
6452         u8 cos;
6453
6454         for_each_eth_queue(bp, i)
6455                 for_each_cos_in_tx_queue(&bp->fp[i], cos)
6456                         bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[cos]);
6457 }
6458
6459 static void bnx2x_init_fcoe_fp(struct bnx2x *bp)
6460 {
6461         struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
6462         unsigned long q_type = 0;
6463
6464         bnx2x_fcoe(bp, rx_queue) = BNX2X_NUM_ETH_QUEUES(bp);
6465         bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp,
6466                                                      BNX2X_FCOE_ETH_CL_ID_IDX);
6467         bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID(bp);
6468         bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID;
6469         bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id;
6470         bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX;
6471         bnx2x_init_txdata(bp, bnx2x_fcoe(bp, txdata_ptr[0]),
6472                           fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX,
6473                           fp);
6474
6475         DP(NETIF_MSG_IFUP, "created fcoe tx data (fp index %d)\n", fp->index);
6476
6477         /* qZone id equals to FW (per path) client id */
6478         bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp);
6479         /* init shortcut */
6480         bnx2x_fcoe(bp, ustorm_rx_prods_offset) =
6481                 bnx2x_rx_ustorm_prods_offset(fp);
6482
6483         /* Configure Queue State object */
6484         __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
6485         __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
6486
6487         /* No multi-CoS for FCoE L2 client */
6488         BUG_ON(fp->max_cos != 1);
6489
6490         bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id,
6491                              &fp->cid, 1, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
6492                              bnx2x_sp_mapping(bp, q_rdata), q_type);
6493
6494         DP(NETIF_MSG_IFUP,
6495            "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n",
6496            fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
6497            fp->igu_sb_id);
6498 }
6499
6500 void bnx2x_nic_init_cnic(struct bnx2x *bp)
6501 {
6502         if (!NO_FCOE(bp))
6503                 bnx2x_init_fcoe_fp(bp);
6504
6505         bnx2x_init_sb(bp, bp->cnic_sb_mapping,
6506                       BNX2X_VF_ID_INVALID, false,
6507                       bnx2x_cnic_fw_sb_id(bp), bnx2x_cnic_igu_sb_id(bp));
6508
6509         /* ensure status block indices were read */
6510         rmb();
6511         bnx2x_init_rx_rings_cnic(bp);
6512         bnx2x_init_tx_rings_cnic(bp);
6513
6514         /* flush all */
6515         mb();
6516         mmiowb();
6517 }
6518
6519 void bnx2x_pre_irq_nic_init(struct bnx2x *bp)
6520 {
6521         int i;
6522
6523         /* Setup NIC internals and enable interrupts */
6524         for_each_eth_queue(bp, i)
6525                 bnx2x_init_eth_fp(bp, i);
6526
6527         /* ensure status block indices were read */
6528         rmb();
6529         bnx2x_init_rx_rings(bp);
6530         bnx2x_init_tx_rings(bp);
6531
6532         if (IS_PF(bp)) {
6533                 /* Initialize MOD_ABS interrupts */
6534                 bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id,
6535                                        bp->common.shmem_base,
6536                                        bp->common.shmem2_base, BP_PORT(bp));
6537
6538                 /* initialize the default status block and sp ring */
6539                 bnx2x_init_def_sb(bp);
6540                 bnx2x_update_dsb_idx(bp);
6541                 bnx2x_init_sp_ring(bp);
6542         } else {
6543                 bnx2x_memset_stats(bp);
6544         }
6545 }
6546
6547 void bnx2x_post_irq_nic_init(struct bnx2x *bp, u32 load_code)
6548 {
6549         bnx2x_init_eq_ring(bp);
6550         bnx2x_init_internal(bp, load_code);
6551         bnx2x_pf_init(bp);
6552         bnx2x_stats_init(bp);
6553
6554         /* flush all before enabling interrupts */
6555         mb();
6556         mmiowb();
6557
6558         bnx2x_int_enable(bp);
6559
6560         /* Check for SPIO5 */
6561         bnx2x_attn_int_deasserted0(bp,
6562                 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
6563                                    AEU_INPUTS_ATTN_BITS_SPIO5);
6564 }
6565
6566 /* gzip service functions */
6567 static int bnx2x_gunzip_init(struct bnx2x *bp)
6568 {
6569         bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
6570                                             &bp->gunzip_mapping, GFP_KERNEL);
6571         if (bp->gunzip_buf  == NULL)
6572                 goto gunzip_nomem1;
6573
6574         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
6575         if (bp->strm  == NULL)
6576                 goto gunzip_nomem2;
6577
6578         bp->strm->workspace = vmalloc(zlib_inflate_workspacesize());
6579         if (bp->strm->workspace == NULL)
6580                 goto gunzip_nomem3;
6581
6582         return 0;
6583
6584 gunzip_nomem3:
6585         kfree(bp->strm);
6586         bp->strm = NULL;
6587
6588 gunzip_nomem2:
6589         dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6590                           bp->gunzip_mapping);
6591         bp->gunzip_buf = NULL;
6592
6593 gunzip_nomem1:
6594         BNX2X_ERR("Cannot allocate firmware buffer for un-compression\n");
6595         return -ENOMEM;
6596 }
6597
6598 static void bnx2x_gunzip_end(struct bnx2x *bp)
6599 {
6600         if (bp->strm) {
6601                 vfree(bp->strm->workspace);
6602                 kfree(bp->strm);
6603                 bp->strm = NULL;
6604         }
6605
6606         if (bp->gunzip_buf) {
6607                 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6608                                   bp->gunzip_mapping);
6609                 bp->gunzip_buf = NULL;
6610         }
6611 }
6612
6613 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
6614 {
6615         int n, rc;
6616
6617         /* check gzip header */
6618         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
6619                 BNX2X_ERR("Bad gzip header\n");
6620                 return -EINVAL;
6621         }
6622
6623         n = 10;
6624
6625 #define FNAME                           0x8
6626
6627         if (zbuf[3] & FNAME)
6628                 while ((zbuf[n++] != 0) && (n < len));
6629
6630         bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
6631         bp->strm->avail_in = len - n;
6632         bp->strm->next_out = bp->gunzip_buf;
6633         bp->strm->avail_out = FW_BUF_SIZE;
6634
6635         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
6636         if (rc != Z_OK)
6637                 return rc;
6638
6639         rc = zlib_inflate(bp->strm, Z_FINISH);
6640         if ((rc != Z_OK) && (rc != Z_STREAM_END))
6641                 netdev_err(bp->dev, "Firmware decompression error: %s\n",
6642                            bp->strm->msg);
6643
6644         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
6645         if (bp->gunzip_outlen & 0x3)
6646                 netdev_err(bp->dev,
6647                            "Firmware decompression error: gunzip_outlen (%d) not aligned\n",
6648                                 bp->gunzip_outlen);
6649         bp->gunzip_outlen >>= 2;
6650
6651         zlib_inflateEnd(bp->strm);
6652
6653         if (rc == Z_STREAM_END)
6654                 return 0;
6655
6656         return rc;
6657 }
6658
6659 /* nic load/unload */
6660
6661 /*
6662  * General service functions
6663  */
6664
6665 /* send a NIG loopback debug packet */
6666 static void bnx2x_lb_pckt(struct bnx2x *bp)
6667 {
6668         u32 wb_write[3];
6669
6670         /* Ethernet source and destination addresses */
6671         wb_write[0] = 0x55555555;
6672         wb_write[1] = 0x55555555;
6673         wb_write[2] = 0x20;             /* SOP */
6674         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
6675
6676         /* NON-IP protocol */
6677         wb_write[0] = 0x09000000;
6678         wb_write[1] = 0x55555555;
6679         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
6680         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
6681 }
6682
6683 /* some of the internal memories
6684  * are not directly readable from the driver
6685  * to test them we send debug packets
6686  */
6687 static int bnx2x_int_mem_test(struct bnx2x *bp)
6688 {
6689         int factor;
6690         int count, i;
6691         u32 val = 0;
6692
6693         if (CHIP_REV_IS_FPGA(bp))
6694                 factor = 120;
6695         else if (CHIP_REV_IS_EMUL(bp))
6696                 factor = 200;
6697         else
6698                 factor = 1;
6699
6700         /* Disable inputs of parser neighbor blocks */
6701         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6702         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6703         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
6704         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
6705
6706         /*  Write 0 to parser credits for CFC search request */
6707         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6708
6709         /* send Ethernet packet */
6710         bnx2x_lb_pckt(bp);
6711
6712         /* TODO do i reset NIG statistic? */
6713         /* Wait until NIG register shows 1 packet of size 0x10 */
6714         count = 1000 * factor;
6715         while (count) {
6716
6717                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6718                 val = *bnx2x_sp(bp, wb_data[0]);
6719                 if (val == 0x10)
6720                         break;
6721
6722                 usleep_range(10000, 20000);
6723                 count--;
6724         }
6725         if (val != 0x10) {
6726                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
6727                 return -1;
6728         }
6729
6730         /* Wait until PRS register shows 1 packet */
6731         count = 1000 * factor;
6732         while (count) {
6733                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6734                 if (val == 1)
6735                         break;
6736
6737                 usleep_range(10000, 20000);
6738                 count--;
6739         }
6740         if (val != 0x1) {
6741                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6742                 return -2;
6743         }
6744
6745         /* Reset and init BRB, PRS */
6746         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6747         msleep(50);
6748         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6749         msleep(50);
6750         bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
6751         bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
6752
6753         DP(NETIF_MSG_HW, "part2\n");
6754
6755         /* Disable inputs of parser neighbor blocks */
6756         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6757         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6758         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
6759         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
6760
6761         /* Write 0 to parser credits for CFC search request */
6762         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6763
6764         /* send 10 Ethernet packets */
6765         for (i = 0; i < 10; i++)
6766                 bnx2x_lb_pckt(bp);
6767
6768         /* Wait until NIG register shows 10 + 1
6769            packets of size 11*0x10 = 0xb0 */
6770         count = 1000 * factor;
6771         while (count) {
6772
6773                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6774                 val = *bnx2x_sp(bp, wb_data[0]);
6775                 if (val == 0xb0)
6776                         break;
6777
6778                 usleep_range(10000, 20000);
6779                 count--;
6780         }
6781         if (val != 0xb0) {
6782                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
6783                 return -3;
6784         }
6785
6786         /* Wait until PRS register shows 2 packets */
6787         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6788         if (val != 2)
6789                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
6790
6791         /* Write 1 to parser credits for CFC search request */
6792         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
6793
6794         /* Wait until PRS register shows 3 packets */
6795         msleep(10 * factor);
6796         /* Wait until NIG register shows 1 packet of size 0x10 */
6797         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6798         if (val != 3)
6799                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
6800
6801         /* clear NIG EOP FIFO */
6802         for (i = 0; i < 11; i++)
6803                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
6804         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
6805         if (val != 1) {
6806                 BNX2X_ERR("clear of NIG failed\n");
6807                 return -4;
6808         }
6809
6810         /* Reset and init BRB, PRS, NIG */
6811         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6812         msleep(50);
6813         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6814         msleep(50);
6815         bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
6816         bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
6817         if (!CNIC_SUPPORT(bp))
6818                 /* set NIC mode */
6819                 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6820
6821         /* Enable inputs of parser neighbor blocks */
6822         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
6823         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
6824         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
6825         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
6826
6827         DP(NETIF_MSG_HW, "done\n");
6828
6829         return 0; /* OK */
6830 }
6831
6832 static void bnx2x_enable_blocks_attention(struct bnx2x *bp)
6833 {
6834         u32 val;
6835
6836         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6837         if (!CHIP_IS_E1x(bp))
6838                 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
6839         else
6840                 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
6841         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6842         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6843         /*
6844          * mask read length error interrupts in brb for parser
6845          * (parsing unit and 'checksum and crc' unit)
6846          * these errors are legal (PU reads fixed length and CAC can cause
6847          * read length error on truncated packets)
6848          */
6849         REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00);
6850         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
6851         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
6852         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
6853         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
6854         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
6855 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
6856 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
6857         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
6858         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
6859         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
6860 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
6861 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
6862         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
6863         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
6864         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
6865         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
6866 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
6867 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
6868
6869         val = PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT  |
6870                 PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF |
6871                 PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN;
6872         if (!CHIP_IS_E1x(bp))
6873                 val |= PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED |
6874                         PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED;
6875         REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, val);
6876
6877         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
6878         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
6879         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
6880 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
6881
6882         if (!CHIP_IS_E1x(bp))
6883                 /* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */
6884                 REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0x07ff);
6885
6886         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
6887         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
6888 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
6889         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18);         /* bit 3,4 masked */
6890 }
6891
6892 static void bnx2x_reset_common(struct bnx2x *bp)
6893 {
6894         u32 val = 0x1400;
6895
6896         /* reset_common */
6897         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6898                0xd3ffff7f);
6899
6900         if (CHIP_IS_E3(bp)) {
6901                 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
6902                 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
6903         }
6904
6905         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, val);
6906 }
6907
6908 static void bnx2x_setup_dmae(struct bnx2x *bp)
6909 {
6910         bp->dmae_ready = 0;
6911         spin_lock_init(&bp->dmae_lock);
6912 }
6913
6914 static void bnx2x_init_pxp(struct bnx2x *bp)
6915 {
6916         u16 devctl;
6917         int r_order, w_order;
6918
6919         pcie_capability_read_word(bp->pdev, PCI_EXP_DEVCTL, &devctl);
6920         DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
6921         w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
6922         if (bp->mrrs == -1)
6923                 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
6924         else {
6925                 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
6926                 r_order = bp->mrrs;
6927         }
6928
6929         bnx2x_init_pxp_arb(bp, r_order, w_order);
6930 }
6931
6932 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
6933 {
6934         int is_required;
6935         u32 val;
6936         int port;
6937
6938         if (BP_NOMCP(bp))
6939                 return;
6940
6941         is_required = 0;
6942         val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
6943               SHARED_HW_CFG_FAN_FAILURE_MASK;
6944
6945         if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
6946                 is_required = 1;
6947
6948         /*
6949          * The fan failure mechanism is usually related to the PHY type since
6950          * the power consumption of the board is affected by the PHY. Currently,
6951          * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
6952          */
6953         else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
6954                 for (port = PORT_0; port < PORT_MAX; port++) {
6955                         is_required |=
6956                                 bnx2x_fan_failure_det_req(
6957                                         bp,
6958                                         bp->common.shmem_base,
6959                                         bp->common.shmem2_base,
6960                                         port);
6961                 }
6962
6963         DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6964
6965         if (is_required == 0)
6966                 return;
6967
6968         /* Fan failure is indicated by SPIO 5 */
6969         bnx2x_set_spio(bp, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z);
6970
6971         /* set to active low mode */
6972         val = REG_RD(bp, MISC_REG_SPIO_INT);
6973         val |= (MISC_SPIO_SPIO5 << MISC_SPIO_INT_OLD_SET_POS);
6974         REG_WR(bp, MISC_REG_SPIO_INT, val);
6975
6976         /* enable interrupt to signal the IGU */
6977         val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6978         val |= MISC_SPIO_SPIO5;
6979         REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6980 }
6981
6982 void bnx2x_pf_disable(struct bnx2x *bp)
6983 {
6984         u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
6985         val &= ~IGU_PF_CONF_FUNC_EN;
6986
6987         REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
6988         REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
6989         REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
6990 }
6991
6992 static void bnx2x__common_init_phy(struct bnx2x *bp)
6993 {
6994         u32 shmem_base[2], shmem2_base[2];
6995         /* Avoid common init in case MFW supports LFA */
6996         if (SHMEM2_RD(bp, size) >
6997             (u32)offsetof(struct shmem2_region, lfa_host_addr[BP_PORT(bp)]))
6998                 return;
6999         shmem_base[0] =  bp->common.shmem_base;
7000         shmem2_base[0] = bp->common.shmem2_base;
7001         if (!CHIP_IS_E1x(bp)) {
7002                 shmem_base[1] =
7003                         SHMEM2_RD(bp, other_shmem_base_addr);
7004                 shmem2_base[1] =
7005                         SHMEM2_RD(bp, other_shmem2_base_addr);
7006         }
7007         bnx2x_acquire_phy_lock(bp);
7008         bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
7009                               bp->common.chip_id);
7010         bnx2x_release_phy_lock(bp);
7011 }
7012
7013 static void bnx2x_config_endianity(struct bnx2x *bp, u32 val)
7014 {
7015         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, val);
7016         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, val);
7017         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, val);
7018         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, val);
7019         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, val);
7020
7021         /* make sure this value is 0 */
7022         REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
7023
7024         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, val);
7025         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, val);
7026         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, val);
7027         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, val);
7028 }
7029
7030 static void bnx2x_set_endianity(struct bnx2x *bp)
7031 {
7032 #ifdef __BIG_ENDIAN
7033         bnx2x_config_endianity(bp, 1);
7034 #else
7035         bnx2x_config_endianity(bp, 0);
7036 #endif
7037 }
7038
7039 static void bnx2x_reset_endianity(struct bnx2x *bp)
7040 {
7041         bnx2x_config_endianity(bp, 0);
7042 }
7043
7044 /**
7045  * bnx2x_init_hw_common - initialize the HW at the COMMON phase.
7046  *
7047  * @bp:         driver handle
7048  */
7049 static int bnx2x_init_hw_common(struct bnx2x *bp)
7050 {
7051         u32 val;
7052
7053         DP(NETIF_MSG_HW, "starting common init  func %d\n", BP_ABS_FUNC(bp));
7054
7055         /*
7056          * take the RESET lock to protect undi_unload flow from accessing
7057          * registers while we're resetting the chip
7058          */
7059         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
7060
7061         bnx2x_reset_common(bp);
7062         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
7063
7064         val = 0xfffc;
7065         if (CHIP_IS_E3(bp)) {
7066                 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
7067                 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
7068         }
7069         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val);
7070
7071         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
7072
7073         bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON);
7074
7075         if (!CHIP_IS_E1x(bp)) {
7076                 u8 abs_func_id;
7077
7078                 /**
7079                  * 4-port mode or 2-port mode we need to turn of master-enable
7080                  * for everyone, after that, turn it back on for self.
7081                  * so, we disregard multi-function or not, and always disable
7082                  * for all functions on the given path, this means 0,2,4,6 for
7083                  * path 0 and 1,3,5,7 for path 1
7084                  */
7085                 for (abs_func_id = BP_PATH(bp);
7086                      abs_func_id < E2_FUNC_MAX*2; abs_func_id += 2) {
7087                         if (abs_func_id == BP_ABS_FUNC(bp)) {
7088                                 REG_WR(bp,
7089                                     PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER,
7090                                     1);
7091                                 continue;
7092                         }
7093
7094                         bnx2x_pretend_func(bp, abs_func_id);
7095                         /* clear pf enable */
7096                         bnx2x_pf_disable(bp);
7097                         bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
7098                 }
7099         }
7100
7101         bnx2x_init_block(bp, BLOCK_PXP, PHASE_COMMON);
7102         if (CHIP_IS_E1(bp)) {
7103                 /* enable HW interrupt from PXP on USDM overflow
7104                    bit 16 on INT_MASK_0 */
7105                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
7106         }
7107
7108         bnx2x_init_block(bp, BLOCK_PXP2, PHASE_COMMON);
7109         bnx2x_init_pxp(bp);
7110         bnx2x_set_endianity(bp);
7111         bnx2x_ilt_init_page_size(bp, INITOP_SET);
7112
7113         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
7114                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
7115
7116         /* let the HW do it's magic ... */
7117         msleep(100);
7118         /* finish PXP init */
7119         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
7120         if (val != 1) {
7121                 BNX2X_ERR("PXP2 CFG failed\n");
7122                 return -EBUSY;
7123         }
7124         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
7125         if (val != 1) {
7126                 BNX2X_ERR("PXP2 RD_INIT failed\n");
7127                 return -EBUSY;
7128         }
7129
7130         /* Timers bug workaround E2 only. We need to set the entire ILT to
7131          * have entries with value "0" and valid bit on.
7132          * This needs to be done by the first PF that is loaded in a path
7133          * (i.e. common phase)
7134          */
7135         if (!CHIP_IS_E1x(bp)) {
7136 /* In E2 there is a bug in the timers block that can cause function 6 / 7
7137  * (i.e. vnic3) to start even if it is marked as "scan-off".
7138  * This occurs when a different function (func2,3) is being marked
7139  * as "scan-off". Real-life scenario for example: if a driver is being
7140  * load-unloaded while func6,7 are down. This will cause the timer to access
7141  * the ilt, translate to a logical address and send a request to read/write.
7142  * Since the ilt for the function that is down is not valid, this will cause
7143  * a translation error which is unrecoverable.
7144  * The Workaround is intended to make sure that when this happens nothing fatal
7145  * will occur. The workaround:
7146  *      1.  First PF driver which loads on a path will:
7147  *              a.  After taking the chip out of reset, by using pretend,
7148  *                  it will write "0" to the following registers of
7149  *                  the other vnics.
7150  *                  REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
7151  *                  REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0);
7152  *                  REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0);
7153  *                  And for itself it will write '1' to
7154  *                  PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable
7155  *                  dmae-operations (writing to pram for example.)
7156  *                  note: can be done for only function 6,7 but cleaner this
7157  *                        way.
7158  *              b.  Write zero+valid to the entire ILT.
7159  *              c.  Init the first_timers_ilt_entry, last_timers_ilt_entry of
7160  *                  VNIC3 (of that port). The range allocated will be the
7161  *                  entire ILT. This is needed to prevent  ILT range error.
7162  *      2.  Any PF driver load flow:
7163  *              a.  ILT update with the physical addresses of the allocated
7164  *                  logical pages.
7165  *              b.  Wait 20msec. - note that this timeout is needed to make
7166  *                  sure there are no requests in one of the PXP internal
7167  *                  queues with "old" ILT addresses.
7168  *              c.  PF enable in the PGLC.
7169  *              d.  Clear the was_error of the PF in the PGLC. (could have
7170  *                  occurred while driver was down)
7171  *              e.  PF enable in the CFC (WEAK + STRONG)
7172  *              f.  Timers scan enable
7173  *      3.  PF driver unload flow:
7174  *              a.  Clear the Timers scan_en.
7175  *              b.  Polling for scan_on=0 for that PF.
7176  *              c.  Clear the PF enable bit in the PXP.
7177  *              d.  Clear the PF enable in the CFC (WEAK + STRONG)
7178  *              e.  Write zero+valid to all ILT entries (The valid bit must
7179  *                  stay set)
7180  *              f.  If this is VNIC 3 of a port then also init
7181  *                  first_timers_ilt_entry to zero and last_timers_ilt_entry
7182  *                  to the last entry in the ILT.
7183  *
7184  *      Notes:
7185  *      Currently the PF error in the PGLC is non recoverable.
7186  *      In the future the there will be a recovery routine for this error.
7187  *      Currently attention is masked.
7188  *      Having an MCP lock on the load/unload process does not guarantee that
7189  *      there is no Timer disable during Func6/7 enable. This is because the
7190  *      Timers scan is currently being cleared by the MCP on FLR.
7191  *      Step 2.d can be done only for PF6/7 and the driver can also check if
7192  *      there is error before clearing it. But the flow above is simpler and
7193  *      more general.
7194  *      All ILT entries are written by zero+valid and not just PF6/7
7195  *      ILT entries since in the future the ILT entries allocation for
7196  *      PF-s might be dynamic.
7197  */
7198                 struct ilt_client_info ilt_cli;
7199                 struct bnx2x_ilt ilt;
7200                 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
7201                 memset(&ilt, 0, sizeof(struct bnx2x_ilt));
7202
7203                 /* initialize dummy TM client */
7204                 ilt_cli.start = 0;
7205                 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
7206                 ilt_cli.client_num = ILT_CLIENT_TM;
7207
7208                 /* Step 1: set zeroes to all ilt page entries with valid bit on
7209                  * Step 2: set the timers first/last ilt entry to point
7210                  * to the entire range to prevent ILT range error for 3rd/4th
7211                  * vnic (this code assumes existence of the vnic)
7212                  *
7213                  * both steps performed by call to bnx2x_ilt_client_init_op()
7214                  * with dummy TM client
7215                  *
7216                  * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
7217                  * and his brother are split registers
7218                  */
7219                 bnx2x_pretend_func(bp, (BP_PATH(bp) + 6));
7220                 bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR);
7221                 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
7222
7223                 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN);
7224                 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN);
7225                 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
7226         }
7227
7228         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
7229         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
7230
7231         if (!CHIP_IS_E1x(bp)) {
7232                 int factor = CHIP_REV_IS_EMUL(bp) ? 1000 :
7233                                 (CHIP_REV_IS_FPGA(bp) ? 400 : 0);
7234                 bnx2x_init_block(bp, BLOCK_PGLUE_B, PHASE_COMMON);
7235
7236                 bnx2x_init_block(bp, BLOCK_ATC, PHASE_COMMON);
7237
7238                 /* let the HW do it's magic ... */
7239                 do {
7240                         msleep(200);
7241                         val = REG_RD(bp, ATC_REG_ATC_INIT_DONE);
7242                 } while (factor-- && (val != 1));
7243
7244                 if (val != 1) {
7245                         BNX2X_ERR("ATC_INIT failed\n");
7246                         return -EBUSY;
7247                 }
7248         }
7249
7250         bnx2x_init_block(bp, BLOCK_DMAE, PHASE_COMMON);
7251
7252         bnx2x_iov_init_dmae(bp);
7253
7254         /* clean the DMAE memory */
7255         bp->dmae_ready = 1;
7256         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8, 1);
7257
7258         bnx2x_init_block(bp, BLOCK_TCM, PHASE_COMMON);
7259
7260         bnx2x_init_block(bp, BLOCK_UCM, PHASE_COMMON);
7261
7262         bnx2x_init_block(bp, BLOCK_CCM, PHASE_COMMON);
7263
7264         bnx2x_init_block(bp, BLOCK_XCM, PHASE_COMMON);
7265
7266         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
7267         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
7268         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
7269         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
7270
7271         bnx2x_init_block(bp, BLOCK_QM, PHASE_COMMON);
7272
7273         /* QM queues pointers table */
7274         bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
7275
7276         /* soft reset pulse */
7277         REG_WR(bp, QM_REG_SOFT_RESET, 1);
7278         REG_WR(bp, QM_REG_SOFT_RESET, 0);
7279
7280         if (CNIC_SUPPORT(bp))
7281                 bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON);
7282
7283         bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON);
7284
7285         if (!CHIP_REV_IS_SLOW(bp))
7286                 /* enable hw interrupt from doorbell Q */
7287                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
7288
7289         bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
7290
7291         bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
7292         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
7293
7294         if (!CHIP_IS_E1(bp))
7295                 REG_WR(bp, PRS_REG_E1HOV_MODE, bp->path_has_ovlan);
7296
7297         if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3B0(bp)) {
7298                 if (IS_MF_AFEX(bp)) {
7299                         /* configure that VNTag and VLAN headers must be
7300                          * received in afex mode
7301                          */
7302                         REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, 0xE);
7303                         REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, 0xA);
7304                         REG_WR(bp, PRS_REG_HDRS_AFTER_TAG_0, 0x6);
7305                         REG_WR(bp, PRS_REG_TAG_ETHERTYPE_0, 0x8926);
7306                         REG_WR(bp, PRS_REG_TAG_LEN_0, 0x4);
7307                 } else {
7308                         /* Bit-map indicating which L2 hdrs may appear
7309                          * after the basic Ethernet header
7310                          */
7311                         REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC,
7312                                bp->path_has_ovlan ? 7 : 6);
7313                 }
7314         }
7315
7316         bnx2x_init_block(bp, BLOCK_TSDM, PHASE_COMMON);
7317         bnx2x_init_block(bp, BLOCK_CSDM, PHASE_COMMON);
7318         bnx2x_init_block(bp, BLOCK_USDM, PHASE_COMMON);
7319         bnx2x_init_block(bp, BLOCK_XSDM, PHASE_COMMON);
7320
7321         if (!CHIP_IS_E1x(bp)) {
7322                 /* reset VFC memories */
7323                 REG_WR(bp, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
7324                            VFC_MEMORIES_RST_REG_CAM_RST |
7325                            VFC_MEMORIES_RST_REG_RAM_RST);
7326                 REG_WR(bp, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
7327                            VFC_MEMORIES_RST_REG_CAM_RST |
7328                            VFC_MEMORIES_RST_REG_RAM_RST);
7329
7330                 msleep(20);
7331         }
7332
7333         bnx2x_init_block(bp, BLOCK_TSEM, PHASE_COMMON);
7334         bnx2x_init_block(bp, BLOCK_USEM, PHASE_COMMON);
7335         bnx2x_init_block(bp, BLOCK_CSEM, PHASE_COMMON);
7336         bnx2x_init_block(bp, BLOCK_XSEM, PHASE_COMMON);
7337
7338         /* sync semi rtc */
7339         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7340                0x80000000);
7341         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7342                0x80000000);
7343
7344         bnx2x_init_block(bp, BLOCK_UPB, PHASE_COMMON);
7345         bnx2x_init_block(bp, BLOCK_XPB, PHASE_COMMON);
7346         bnx2x_init_block(bp, BLOCK_PBF, PHASE_COMMON);
7347
7348         if (!CHIP_IS_E1x(bp)) {
7349                 if (IS_MF_AFEX(bp)) {
7350                         /* configure that VNTag and VLAN headers must be
7351                          * sent in afex mode
7352                          */
7353                         REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, 0xE);
7354                         REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, 0xA);
7355                         REG_WR(bp, PBF_REG_HDRS_AFTER_TAG_0, 0x6);
7356                         REG_WR(bp, PBF_REG_TAG_ETHERTYPE_0, 0x8926);
7357                         REG_WR(bp, PBF_REG_TAG_LEN_0, 0x4);
7358                 } else {
7359                         REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC,
7360                                bp->path_has_ovlan ? 7 : 6);
7361                 }
7362         }
7363
7364         REG_WR(bp, SRC_REG_SOFT_RST, 1);
7365
7366         bnx2x_init_block(bp, BLOCK_SRC, PHASE_COMMON);
7367
7368         if (CNIC_SUPPORT(bp)) {
7369                 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
7370                 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
7371                 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
7372                 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
7373                 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
7374                 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
7375                 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
7376                 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
7377                 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
7378                 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
7379         }
7380         REG_WR(bp, SRC_REG_SOFT_RST, 0);
7381
7382         if (sizeof(union cdu_context) != 1024)
7383                 /* we currently assume that a context is 1024 bytes */
7384                 dev_alert(&bp->pdev->dev,
7385                           "please adjust the size of cdu_context(%ld)\n",
7386                           (long)sizeof(union cdu_context));
7387
7388         bnx2x_init_block(bp, BLOCK_CDU, PHASE_COMMON);
7389         val = (4 << 24) + (0 << 12) + 1024;
7390         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
7391
7392         bnx2x_init_block(bp, BLOCK_CFC, PHASE_COMMON);
7393         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
7394         /* enable context validation interrupt from CFC */
7395         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
7396
7397         /* set the thresholds to prevent CFC/CDU race */
7398         REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
7399
7400         bnx2x_init_block(bp, BLOCK_HC, PHASE_COMMON);
7401
7402         if (!CHIP_IS_E1x(bp) && BP_NOMCP(bp))
7403                 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36);
7404
7405         bnx2x_init_block(bp, BLOCK_IGU, PHASE_COMMON);
7406         bnx2x_init_block(bp, BLOCK_MISC_AEU, PHASE_COMMON);
7407
7408         /* Reset PCIE errors for debug */
7409         REG_WR(bp, 0x2814, 0xffffffff);
7410         REG_WR(bp, 0x3820, 0xffffffff);
7411
7412         if (!CHIP_IS_E1x(bp)) {
7413                 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
7414                            (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
7415                                 PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
7416                 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
7417                            (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
7418                                 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
7419                                 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
7420                 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
7421                            (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
7422                                 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
7423                                 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
7424         }
7425
7426         bnx2x_init_block(bp, BLOCK_NIG, PHASE_COMMON);
7427         if (!CHIP_IS_E1(bp)) {
7428                 /* in E3 this done in per-port section */
7429                 if (!CHIP_IS_E3(bp))
7430                         REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
7431         }
7432         if (CHIP_IS_E1H(bp))
7433                 /* not applicable for E2 (and above ...) */
7434                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp));
7435
7436         if (CHIP_REV_IS_SLOW(bp))
7437                 msleep(200);
7438
7439         /* finish CFC init */
7440         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
7441         if (val != 1) {
7442                 BNX2X_ERR("CFC LL_INIT failed\n");
7443                 return -EBUSY;
7444         }
7445         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
7446         if (val != 1) {
7447                 BNX2X_ERR("CFC AC_INIT failed\n");
7448                 return -EBUSY;
7449         }
7450         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
7451         if (val != 1) {
7452                 BNX2X_ERR("CFC CAM_INIT failed\n");
7453                 return -EBUSY;
7454         }
7455         REG_WR(bp, CFC_REG_DEBUG0, 0);
7456
7457         if (CHIP_IS_E1(bp)) {
7458                 /* read NIG statistic
7459                    to see if this is our first up since powerup */
7460                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
7461                 val = *bnx2x_sp(bp, wb_data[0]);
7462
7463                 /* do internal memory self test */
7464                 if ((val == 0) && bnx2x_int_mem_test(bp)) {
7465                         BNX2X_ERR("internal mem self test failed\n");
7466                         return -EBUSY;
7467                 }
7468         }
7469
7470         bnx2x_setup_fan_failure_detection(bp);
7471
7472         /* clear PXP2 attentions */
7473         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
7474
7475         bnx2x_enable_blocks_attention(bp);
7476         bnx2x_enable_blocks_parity(bp);
7477
7478         if (!BP_NOMCP(bp)) {
7479                 if (CHIP_IS_E1x(bp))
7480                         bnx2x__common_init_phy(bp);
7481         } else
7482                 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
7483
7484         if (SHMEM2_HAS(bp, netproc_fw_ver))
7485                 SHMEM2_WR(bp, netproc_fw_ver, REG_RD(bp, XSEM_REG_PRAM));
7486
7487         return 0;
7488 }
7489
7490 /**
7491  * bnx2x_init_hw_common_chip - init HW at the COMMON_CHIP phase.
7492  *
7493  * @bp:         driver handle
7494  */
7495 static int bnx2x_init_hw_common_chip(struct bnx2x *bp)
7496 {
7497         int rc = bnx2x_init_hw_common(bp);
7498
7499         if (rc)
7500                 return rc;
7501
7502         /* In E2 2-PORT mode, same ext phy is used for the two paths */
7503         if (!BP_NOMCP(bp))
7504                 bnx2x__common_init_phy(bp);
7505
7506         return 0;
7507 }
7508
7509 static int bnx2x_init_hw_port(struct bnx2x *bp)
7510 {
7511         int port = BP_PORT(bp);
7512         int init_phase = port ? PHASE_PORT1 : PHASE_PORT0;
7513         u32 low, high;
7514         u32 val, reg;
7515
7516         DP(NETIF_MSG_HW, "starting port init  port %d\n", port);
7517
7518         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7519
7520         bnx2x_init_block(bp, BLOCK_MISC, init_phase);
7521         bnx2x_init_block(bp, BLOCK_PXP, init_phase);
7522         bnx2x_init_block(bp, BLOCK_PXP2, init_phase);
7523
7524         /* Timers bug workaround: disables the pf_master bit in pglue at
7525          * common phase, we need to enable it here before any dmae access are
7526          * attempted. Therefore we manually added the enable-master to the
7527          * port phase (it also happens in the function phase)
7528          */
7529         if (!CHIP_IS_E1x(bp))
7530                 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
7531
7532         bnx2x_init_block(bp, BLOCK_ATC, init_phase);
7533         bnx2x_init_block(bp, BLOCK_DMAE, init_phase);
7534         bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase);
7535         bnx2x_init_block(bp, BLOCK_QM, init_phase);
7536
7537         bnx2x_init_block(bp, BLOCK_TCM, init_phase);
7538         bnx2x_init_block(bp, BLOCK_UCM, init_phase);
7539         bnx2x_init_block(bp, BLOCK_CCM, init_phase);
7540         bnx2x_init_block(bp, BLOCK_XCM, init_phase);
7541
7542         /* QM cid (connection) count */
7543         bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
7544
7545         if (CNIC_SUPPORT(bp)) {
7546                 bnx2x_init_block(bp, BLOCK_TM, init_phase);
7547                 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
7548                 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
7549         }
7550
7551         bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
7552
7553         bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
7554
7555         if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
7556
7557                 if (IS_MF(bp))
7558                         low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
7559                 else if (bp->dev->mtu > 4096) {
7560                         if (bp->flags & ONE_PORT_FLAG)
7561                                 low = 160;
7562                         else {
7563                                 val = bp->dev->mtu;
7564                                 /* (24*1024 + val*4)/256 */
7565                                 low = 96 + (val/64) +
7566                                                 ((val % 64) ? 1 : 0);
7567                         }
7568                 } else
7569                         low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
7570                 high = low + 56;        /* 14*1024/256 */
7571                 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
7572                 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
7573         }
7574
7575         if (CHIP_MODE_IS_4_PORT(bp))
7576                 REG_WR(bp, (BP_PORT(bp) ?
7577                             BRB1_REG_MAC_GUARANTIED_1 :
7578                             BRB1_REG_MAC_GUARANTIED_0), 40);
7579
7580         bnx2x_init_block(bp, BLOCK_PRS, init_phase);
7581         if (CHIP_IS_E3B0(bp)) {
7582                 if (IS_MF_AFEX(bp)) {
7583                         /* configure headers for AFEX mode */
7584                         REG_WR(bp, BP_PORT(bp) ?
7585                                PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
7586                                PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE);
7587                         REG_WR(bp, BP_PORT(bp) ?
7588                                PRS_REG_HDRS_AFTER_TAG_0_PORT_1 :
7589                                PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6);
7590                         REG_WR(bp, BP_PORT(bp) ?
7591                                PRS_REG_MUST_HAVE_HDRS_PORT_1 :
7592                                PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA);
7593                 } else {
7594                         /* Ovlan exists only if we are in multi-function +
7595                          * switch-dependent mode, in switch-independent there
7596                          * is no ovlan headers
7597                          */
7598                         REG_WR(bp, BP_PORT(bp) ?
7599                                PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
7600                                PRS_REG_HDRS_AFTER_BASIC_PORT_0,
7601                                (bp->path_has_ovlan ? 7 : 6));
7602                 }
7603         }
7604
7605         bnx2x_init_block(bp, BLOCK_TSDM, init_phase);
7606         bnx2x_init_block(bp, BLOCK_CSDM, init_phase);
7607         bnx2x_init_block(bp, BLOCK_USDM, init_phase);
7608         bnx2x_init_block(bp, BLOCK_XSDM, init_phase);
7609
7610         bnx2x_init_block(bp, BLOCK_TSEM, init_phase);
7611         bnx2x_init_block(bp, BLOCK_USEM, init_phase);
7612         bnx2x_init_block(bp, BLOCK_CSEM, init_phase);
7613         bnx2x_init_block(bp, BLOCK_XSEM, init_phase);
7614
7615         bnx2x_init_block(bp, BLOCK_UPB, init_phase);
7616         bnx2x_init_block(bp, BLOCK_XPB, init_phase);
7617
7618         bnx2x_init_block(bp, BLOCK_PBF, init_phase);
7619
7620         if (CHIP_IS_E1x(bp)) {
7621                 /* configure PBF to work without PAUSE mtu 9000 */
7622                 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
7623
7624                 /* update threshold */
7625                 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
7626                 /* update init credit */
7627                 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
7628
7629                 /* probe changes */
7630                 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
7631                 udelay(50);
7632                 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
7633         }
7634
7635         if (CNIC_SUPPORT(bp))
7636                 bnx2x_init_block(bp, BLOCK_SRC, init_phase);
7637
7638         bnx2x_init_block(bp, BLOCK_CDU, init_phase);
7639         bnx2x_init_block(bp, BLOCK_CFC, init_phase);
7640
7641         if (CHIP_IS_E1(bp)) {
7642                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7643                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7644         }
7645         bnx2x_init_block(bp, BLOCK_HC, init_phase);
7646
7647         bnx2x_init_block(bp, BLOCK_IGU, init_phase);
7648
7649         bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase);
7650         /* init aeu_mask_attn_func_0/1:
7651          *  - SF mode: bits 3-7 are masked. Only bits 0-2 are in use
7652          *  - MF mode: bit 3 is masked. Bits 0-2 are in use as in SF
7653          *             bits 4-7 are used for "per vn group attention" */
7654         val = IS_MF(bp) ? 0xF7 : 0x7;
7655         /* Enable DCBX attention for all but E1 */
7656         val |= CHIP_IS_E1(bp) ? 0 : 0x10;
7657         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
7658
7659         /* SCPAD_PARITY should NOT trigger close the gates */
7660         reg = port ? MISC_REG_AEU_ENABLE4_NIG_1 : MISC_REG_AEU_ENABLE4_NIG_0;
7661         REG_WR(bp, reg,
7662                REG_RD(bp, reg) &
7663                ~AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY);
7664
7665         reg = port ? MISC_REG_AEU_ENABLE4_PXP_1 : MISC_REG_AEU_ENABLE4_PXP_0;
7666         REG_WR(bp, reg,
7667                REG_RD(bp, reg) &
7668                ~AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY);
7669
7670         bnx2x_init_block(bp, BLOCK_NIG, init_phase);
7671
7672         if (!CHIP_IS_E1x(bp)) {
7673                 /* Bit-map indicating which L2 hdrs may appear after the
7674                  * basic Ethernet header
7675                  */
7676                 if (IS_MF_AFEX(bp))
7677                         REG_WR(bp, BP_PORT(bp) ?
7678                                NIG_REG_P1_HDRS_AFTER_BASIC :
7679                                NIG_REG_P0_HDRS_AFTER_BASIC, 0xE);
7680                 else
7681                         REG_WR(bp, BP_PORT(bp) ?
7682                                NIG_REG_P1_HDRS_AFTER_BASIC :
7683                                NIG_REG_P0_HDRS_AFTER_BASIC,
7684                                IS_MF_SD(bp) ? 7 : 6);
7685
7686                 if (CHIP_IS_E3(bp))
7687                         REG_WR(bp, BP_PORT(bp) ?
7688                                    NIG_REG_LLH1_MF_MODE :
7689                                    NIG_REG_LLH_MF_MODE, IS_MF(bp));
7690         }
7691         if (!CHIP_IS_E3(bp))
7692                 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
7693
7694         if (!CHIP_IS_E1(bp)) {
7695                 /* 0x2 disable mf_ov, 0x1 enable */
7696                 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
7697                        (IS_MF_SD(bp) ? 0x1 : 0x2));
7698
7699                 if (!CHIP_IS_E1x(bp)) {
7700                         val = 0;
7701                         switch (bp->mf_mode) {
7702                         case MULTI_FUNCTION_SD:
7703                                 val = 1;
7704                                 break;
7705                         case MULTI_FUNCTION_SI:
7706                         case MULTI_FUNCTION_AFEX:
7707                                 val = 2;
7708                                 break;
7709                         }
7710
7711                         REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE :
7712                                                   NIG_REG_LLH0_CLS_TYPE), val);
7713                 }
7714                 {
7715                         REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
7716                         REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
7717                         REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
7718                 }
7719         }
7720
7721         /* If SPIO5 is set to generate interrupts, enable it for this port */
7722         val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
7723         if (val & MISC_SPIO_SPIO5) {
7724                 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
7725                                        MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
7726                 val = REG_RD(bp, reg_addr);
7727                 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
7728                 REG_WR(bp, reg_addr, val);
7729         }
7730
7731         if (CHIP_IS_E3B0(bp))
7732                 bp->flags |= PTP_SUPPORTED;
7733
7734         return 0;
7735 }
7736
7737 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
7738 {
7739         int reg;
7740         u32 wb_write[2];
7741
7742         if (CHIP_IS_E1(bp))
7743                 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
7744         else
7745                 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
7746
7747         wb_write[0] = ONCHIP_ADDR1(addr);
7748         wb_write[1] = ONCHIP_ADDR2(addr);
7749         REG_WR_DMAE(bp, reg, wb_write, 2);
7750 }
7751
7752 void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, bool is_pf)
7753 {
7754         u32 data, ctl, cnt = 100;
7755         u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
7756         u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
7757         u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
7758         u32 sb_bit =  1 << (idu_sb_id%32);
7759         u32 func_encode = func | (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT;
7760         u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
7761
7762         /* Not supported in BC mode */
7763         if (CHIP_INT_MODE_IS_BC(bp))
7764                 return;
7765
7766         data = (IGU_USE_REGISTER_cstorm_type_0_sb_cleanup
7767                         << IGU_REGULAR_CLEANUP_TYPE_SHIFT)      |
7768                 IGU_REGULAR_CLEANUP_SET                         |
7769                 IGU_REGULAR_BCLEANUP;
7770
7771         ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT         |
7772               func_encode << IGU_CTRL_REG_FID_SHIFT             |
7773               IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT;
7774
7775         DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
7776                          data, igu_addr_data);
7777         REG_WR(bp, igu_addr_data, data);
7778         mmiowb();
7779         barrier();
7780         DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
7781                           ctl, igu_addr_ctl);
7782         REG_WR(bp, igu_addr_ctl, ctl);
7783         mmiowb();
7784         barrier();
7785
7786         /* wait for clean up to finish */
7787         while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt)
7788                 msleep(20);
7789
7790         if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) {
7791                 DP(NETIF_MSG_HW,
7792                    "Unable to finish IGU cleanup: idu_sb_id %d offset %d bit %d (cnt %d)\n",
7793                           idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt);
7794         }
7795 }
7796
7797 static void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
7798 {
7799         bnx2x_igu_clear_sb_gen(bp, BP_FUNC(bp), idu_sb_id, true /*PF*/);
7800 }
7801
7802 static void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
7803 {
7804         u32 i, base = FUNC_ILT_BASE(func);
7805         for (i = base; i < base + ILT_PER_FUNC; i++)
7806                 bnx2x_ilt_wr(bp, i, 0);
7807 }
7808
7809 static void bnx2x_init_searcher(struct bnx2x *bp)
7810 {
7811         int port = BP_PORT(bp);
7812         bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
7813         /* T1 hash bits value determines the T1 number of entries */
7814         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
7815 }
7816
7817 static inline int bnx2x_func_switch_update(struct bnx2x *bp, int suspend)
7818 {
7819         int rc;
7820         struct bnx2x_func_state_params func_params = {NULL};
7821         struct bnx2x_func_switch_update_params *switch_update_params =
7822                 &func_params.params.switch_update;
7823
7824         /* Prepare parameters for function state transitions */
7825         __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
7826         __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
7827
7828         func_params.f_obj = &bp->func_obj;
7829         func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
7830
7831         /* Function parameters */
7832         __set_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND_CHNG,
7833                   &switch_update_params->changes);
7834         if (suspend)
7835                 __set_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND,
7836                           &switch_update_params->changes);
7837
7838         rc = bnx2x_func_state_change(bp, &func_params);
7839
7840         return rc;
7841 }
7842
7843 static int bnx2x_reset_nic_mode(struct bnx2x *bp)
7844 {
7845         int rc, i, port = BP_PORT(bp);
7846         int vlan_en = 0, mac_en[NUM_MACS];
7847
7848         /* Close input from network */
7849         if (bp->mf_mode == SINGLE_FUNCTION) {
7850                 bnx2x_set_rx_filter(&bp->link_params, 0);
7851         } else {
7852                 vlan_en = REG_RD(bp, port ? NIG_REG_LLH1_FUNC_EN :
7853                                    NIG_REG_LLH0_FUNC_EN);
7854                 REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN :
7855                           NIG_REG_LLH0_FUNC_EN, 0);
7856                 for (i = 0; i < NUM_MACS; i++) {
7857                         mac_en[i] = REG_RD(bp, port ?
7858                                              (NIG_REG_LLH1_FUNC_MEM_ENABLE +
7859                                               4 * i) :
7860                                              (NIG_REG_LLH0_FUNC_MEM_ENABLE +
7861                                               4 * i));
7862                         REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE +
7863                                               4 * i) :
7864                                   (NIG_REG_LLH0_FUNC_MEM_ENABLE + 4 * i), 0);
7865                 }
7866         }
7867
7868         /* Close BMC to host */
7869         REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE :
7870                NIG_REG_P1_TX_MNG_HOST_ENABLE, 0);
7871
7872         /* Suspend Tx switching to the PF. Completion of this ramrod
7873          * further guarantees that all the packets of that PF / child
7874          * VFs in BRB were processed by the Parser, so it is safe to
7875          * change the NIC_MODE register.
7876          */
7877         rc = bnx2x_func_switch_update(bp, 1);
7878         if (rc) {
7879                 BNX2X_ERR("Can't suspend tx-switching!\n");
7880                 return rc;
7881         }
7882
7883         /* Change NIC_MODE register */
7884         REG_WR(bp, PRS_REG_NIC_MODE, 0);
7885
7886         /* Open input from network */
7887         if (bp->mf_mode == SINGLE_FUNCTION) {
7888                 bnx2x_set_rx_filter(&bp->link_params, 1);
7889         } else {
7890                 REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN :
7891                           NIG_REG_LLH0_FUNC_EN, vlan_en);
7892                 for (i = 0; i < NUM_MACS; i++) {
7893                         REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE +
7894                                               4 * i) :
7895                                   (NIG_REG_LLH0_FUNC_MEM_ENABLE + 4 * i),
7896                                   mac_en[i]);
7897                 }
7898         }
7899
7900         /* Enable BMC to host */
7901         REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE :
7902                NIG_REG_P1_TX_MNG_HOST_ENABLE, 1);
7903
7904         /* Resume Tx switching to the PF */
7905         rc = bnx2x_func_switch_update(bp, 0);
7906         if (rc) {
7907                 BNX2X_ERR("Can't resume tx-switching!\n");
7908                 return rc;
7909         }
7910
7911         DP(NETIF_MSG_IFUP, "NIC MODE disabled\n");
7912         return 0;
7913 }
7914
7915 int bnx2x_init_hw_func_cnic(struct bnx2x *bp)
7916 {
7917         int rc;
7918
7919         bnx2x_ilt_init_op_cnic(bp, INITOP_SET);
7920
7921         if (CONFIGURE_NIC_MODE(bp)) {
7922                 /* Configure searcher as part of function hw init */
7923                 bnx2x_init_searcher(bp);
7924
7925                 /* Reset NIC mode */
7926                 rc = bnx2x_reset_nic_mode(bp);
7927                 if (rc)
7928                         BNX2X_ERR("Can't change NIC mode!\n");
7929                 return rc;
7930         }
7931
7932         return 0;
7933 }
7934
7935 /* previous driver DMAE transaction may have occurred when pre-boot stage ended
7936  * and boot began, or when kdump kernel was loaded. Either case would invalidate
7937  * the addresses of the transaction, resulting in was-error bit set in the pci
7938  * causing all hw-to-host pcie transactions to timeout. If this happened we want
7939  * to clear the interrupt which detected this from the pglueb and the was done
7940  * bit
7941  */
7942 static void bnx2x_clean_pglue_errors(struct bnx2x *bp)
7943 {
7944         if (!CHIP_IS_E1x(bp))
7945                 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR,
7946                        1 << BP_ABS_FUNC(bp));
7947 }
7948
7949 static int bnx2x_init_hw_func(struct bnx2x *bp)
7950 {
7951         int port = BP_PORT(bp);
7952         int func = BP_FUNC(bp);
7953         int init_phase = PHASE_PF0 + func;
7954         struct bnx2x_ilt *ilt = BP_ILT(bp);
7955         u16 cdu_ilt_start;
7956         u32 addr, val;
7957         u32 main_mem_base, main_mem_size, main_mem_prty_clr;
7958         int i, main_mem_width, rc;
7959
7960         DP(NETIF_MSG_HW, "starting func init  func %d\n", func);
7961
7962         /* FLR cleanup - hmmm */
7963         if (!CHIP_IS_E1x(bp)) {
7964                 rc = bnx2x_pf_flr_clnup(bp);
7965                 if (rc) {
7966                         bnx2x_fw_dump(bp);
7967                         return rc;
7968                 }
7969         }
7970
7971         /* set MSI reconfigure capability */
7972         if (bp->common.int_block == INT_BLOCK_HC) {
7973                 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
7974                 val = REG_RD(bp, addr);
7975                 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
7976                 REG_WR(bp, addr, val);
7977         }
7978
7979         bnx2x_init_block(bp, BLOCK_PXP, init_phase);
7980         bnx2x_init_block(bp, BLOCK_PXP2, init_phase);
7981
7982         ilt = BP_ILT(bp);
7983         cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
7984
7985         if (IS_SRIOV(bp))
7986                 cdu_ilt_start += BNX2X_FIRST_VF_CID/ILT_PAGE_CIDS;
7987         cdu_ilt_start = bnx2x_iov_init_ilt(bp, cdu_ilt_start);
7988
7989         /* since BNX2X_FIRST_VF_CID > 0 the PF L2 cids precedes
7990          * those of the VFs, so start line should be reset
7991          */
7992         cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
7993         for (i = 0; i < L2_ILT_LINES(bp); i++) {
7994                 ilt->lines[cdu_ilt_start + i].page = bp->context[i].vcxt;
7995                 ilt->lines[cdu_ilt_start + i].page_mapping =
7996                         bp->context[i].cxt_mapping;
7997                 ilt->lines[cdu_ilt_start + i].size = bp->context[i].size;
7998         }
7999
8000         bnx2x_ilt_init_op(bp, INITOP_SET);
8001
8002         if (!CONFIGURE_NIC_MODE(bp)) {
8003                 bnx2x_init_searcher(bp);
8004                 REG_WR(bp, PRS_REG_NIC_MODE, 0);
8005                 DP(NETIF_MSG_IFUP, "NIC MODE disabled\n");
8006         } else {
8007                 /* Set NIC mode */
8008                 REG_WR(bp, PRS_REG_NIC_MODE, 1);
8009                 DP(NETIF_MSG_IFUP, "NIC MODE configured\n");
8010         }
8011
8012         if (!CHIP_IS_E1x(bp)) {
8013                 u32 pf_conf = IGU_PF_CONF_FUNC_EN;
8014
8015                 /* Turn on a single ISR mode in IGU if driver is going to use
8016                  * INT#x or MSI
8017                  */
8018                 if (!(bp->flags & USING_MSIX_FLAG))
8019                         pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
8020                 /*
8021                  * Timers workaround bug: function init part.
8022                  * Need to wait 20msec after initializing ILT,
8023                  * needed to make sure there are no requests in
8024                  * one of the PXP internal queues with "old" ILT addresses
8025                  */
8026                 msleep(20);
8027                 /*
8028                  * Master enable - Due to WB DMAE writes performed before this
8029                  * register is re-initialized as part of the regular function
8030                  * init
8031                  */
8032                 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
8033                 /* Enable the function in IGU */
8034                 REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf);
8035         }
8036
8037         bp->dmae_ready = 1;
8038
8039         bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase);
8040
8041         bnx2x_clean_pglue_errors(bp);
8042
8043         bnx2x_init_block(bp, BLOCK_ATC, init_phase);
8044         bnx2x_init_block(bp, BLOCK_DMAE, init_phase);
8045         bnx2x_init_block(bp, BLOCK_NIG, init_phase);
8046         bnx2x_init_block(bp, BLOCK_SRC, init_phase);
8047         bnx2x_init_block(bp, BLOCK_MISC, init_phase);
8048         bnx2x_init_block(bp, BLOCK_TCM, init_phase);
8049         bnx2x_init_block(bp, BLOCK_UCM, init_phase);
8050         bnx2x_init_block(bp, BLOCK_CCM, init_phase);
8051         bnx2x_init_block(bp, BLOCK_XCM, init_phase);
8052         bnx2x_init_block(bp, BLOCK_TSEM, init_phase);
8053         bnx2x_init_block(bp, BLOCK_USEM, init_phase);
8054         bnx2x_init_block(bp, BLOCK_CSEM, init_phase);
8055         bnx2x_init_block(bp, BLOCK_XSEM, init_phase);
8056
8057         if (!CHIP_IS_E1x(bp))
8058                 REG_WR(bp, QM_REG_PF_EN, 1);
8059
8060         if (!CHIP_IS_E1x(bp)) {
8061                 REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
8062                 REG_WR(bp, USEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
8063                 REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
8064                 REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
8065         }
8066         bnx2x_init_block(bp, BLOCK_QM, init_phase);
8067
8068         bnx2x_init_block(bp, BLOCK_TM, init_phase);
8069         bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
8070         REG_WR(bp, DORQ_REG_MODE_ACT, 1); /* no dpm */
8071
8072         bnx2x_iov_init_dq(bp);
8073
8074         bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
8075         bnx2x_init_block(bp, BLOCK_PRS, init_phase);
8076         bnx2x_init_block(bp, BLOCK_TSDM, init_phase);
8077         bnx2x_init_block(bp, BLOCK_CSDM, init_phase);
8078         bnx2x_init_block(bp, BLOCK_USDM, init_phase);
8079         bnx2x_init_block(bp, BLOCK_XSDM, init_phase);
8080         bnx2x_init_block(bp, BLOCK_UPB, init_phase);
8081         bnx2x_init_block(bp, BLOCK_XPB, init_phase);
8082         bnx2x_init_block(bp, BLOCK_PBF, init_phase);
8083         if (!CHIP_IS_E1x(bp))
8084                 REG_WR(bp, PBF_REG_DISABLE_PF, 0);
8085
8086         bnx2x_init_block(bp, BLOCK_CDU, init_phase);
8087
8088         bnx2x_init_block(bp, BLOCK_CFC, init_phase);
8089
8090         if (!CHIP_IS_E1x(bp))
8091                 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
8092
8093         if (IS_MF(bp)) {
8094                 if (!(IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp))) {
8095                         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port * 8, 1);
8096                         REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port * 8,
8097                                bp->mf_ov);
8098                 }
8099         }
8100
8101         bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase);
8102
8103         /* HC init per function */
8104         if (bp->common.int_block == INT_BLOCK_HC) {
8105                 if (CHIP_IS_E1H(bp)) {
8106                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
8107
8108                         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
8109                         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
8110                 }
8111                 bnx2x_init_block(bp, BLOCK_HC, init_phase);
8112
8113         } else {
8114                 int num_segs, sb_idx, prod_offset;
8115
8116                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
8117
8118                 if (!CHIP_IS_E1x(bp)) {
8119                         REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
8120                         REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
8121                 }
8122
8123                 bnx2x_init_block(bp, BLOCK_IGU, init_phase);
8124
8125                 if (!CHIP_IS_E1x(bp)) {
8126                         int dsb_idx = 0;
8127                         /**
8128                          * Producer memory:
8129                          * E2 mode: address 0-135 match to the mapping memory;
8130                          * 136 - PF0 default prod; 137 - PF1 default prod;
8131                          * 138 - PF2 default prod; 139 - PF3 default prod;
8132                          * 140 - PF0 attn prod;    141 - PF1 attn prod;
8133                          * 142 - PF2 attn prod;    143 - PF3 attn prod;
8134                          * 144-147 reserved.
8135                          *
8136                          * E1.5 mode - In backward compatible mode;
8137                          * for non default SB; each even line in the memory
8138                          * holds the U producer and each odd line hold
8139                          * the C producer. The first 128 producers are for
8140                          * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
8141                          * producers are for the DSB for each PF.
8142                          * Each PF has five segments: (the order inside each
8143                          * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
8144                          * 132-135 C prods; 136-139 X prods; 140-143 T prods;
8145                          * 144-147 attn prods;
8146                          */
8147                         /* non-default-status-blocks */
8148                         num_segs = CHIP_INT_MODE_IS_BC(bp) ?
8149                                 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
8150                         for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) {
8151                                 prod_offset = (bp->igu_base_sb + sb_idx) *
8152                                         num_segs;
8153
8154                                 for (i = 0; i < num_segs; i++) {
8155                                         addr = IGU_REG_PROD_CONS_MEMORY +
8156                                                         (prod_offset + i) * 4;
8157                                         REG_WR(bp, addr, 0);
8158                                 }
8159                                 /* send consumer update with value 0 */
8160                                 bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx,
8161                                              USTORM_ID, 0, IGU_INT_NOP, 1);
8162                                 bnx2x_igu_clear_sb(bp,
8163                                                    bp->igu_base_sb + sb_idx);
8164                         }
8165
8166                         /* default-status-blocks */
8167                         num_segs = CHIP_INT_MODE_IS_BC(bp) ?
8168                                 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
8169
8170                         if (CHIP_MODE_IS_4_PORT(bp))
8171                                 dsb_idx = BP_FUNC(bp);
8172                         else
8173                                 dsb_idx = BP_VN(bp);
8174
8175                         prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
8176                                        IGU_BC_BASE_DSB_PROD + dsb_idx :
8177                                        IGU_NORM_BASE_DSB_PROD + dsb_idx);
8178
8179                         /*
8180                          * igu prods come in chunks of E1HVN_MAX (4) -
8181                          * does not matters what is the current chip mode
8182                          */
8183                         for (i = 0; i < (num_segs * E1HVN_MAX);
8184                              i += E1HVN_MAX) {
8185                                 addr = IGU_REG_PROD_CONS_MEMORY +
8186                                                         (prod_offset + i)*4;
8187                                 REG_WR(bp, addr, 0);
8188                         }
8189                         /* send consumer update with 0 */
8190                         if (CHIP_INT_MODE_IS_BC(bp)) {
8191                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8192                                              USTORM_ID, 0, IGU_INT_NOP, 1);
8193                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8194                                              CSTORM_ID, 0, IGU_INT_NOP, 1);
8195                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8196                                              XSTORM_ID, 0, IGU_INT_NOP, 1);
8197                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8198                                              TSTORM_ID, 0, IGU_INT_NOP, 1);
8199                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8200                                              ATTENTION_ID, 0, IGU_INT_NOP, 1);
8201                         } else {
8202                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8203                                              USTORM_ID, 0, IGU_INT_NOP, 1);
8204                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8205                                              ATTENTION_ID, 0, IGU_INT_NOP, 1);
8206                         }
8207                         bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
8208
8209                         /* !!! These should become driver const once
8210                            rf-tool supports split-68 const */
8211                         REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
8212                         REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
8213                         REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
8214                         REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
8215                         REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
8216                         REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
8217                 }
8218         }
8219
8220         /* Reset PCIE errors for debug */
8221         REG_WR(bp, 0x2114, 0xffffffff);
8222         REG_WR(bp, 0x2120, 0xffffffff);
8223
8224         if (CHIP_IS_E1x(bp)) {
8225                 main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/
8226                 main_mem_base = HC_REG_MAIN_MEMORY +
8227                                 BP_PORT(bp) * (main_mem_size * 4);
8228                 main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
8229                 main_mem_width = 8;
8230
8231                 val = REG_RD(bp, main_mem_prty_clr);
8232                 if (val)
8233                         DP(NETIF_MSG_HW,
8234                            "Hmmm... Parity errors in HC block during function init (0x%x)!\n",
8235                            val);
8236
8237                 /* Clear "false" parity errors in MSI-X table */
8238                 for (i = main_mem_base;
8239                      i < main_mem_base + main_mem_size * 4;
8240                      i += main_mem_width) {
8241                         bnx2x_read_dmae(bp, i, main_mem_width / 4);
8242                         bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data),
8243                                          i, main_mem_width / 4);
8244                 }
8245                 /* Clear HC parity attention */
8246                 REG_RD(bp, main_mem_prty_clr);
8247         }
8248
8249 #ifdef BNX2X_STOP_ON_ERROR
8250         /* Enable STORMs SP logging */
8251         REG_WR8(bp, BAR_USTRORM_INTMEM +
8252                USTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
8253         REG_WR8(bp, BAR_TSTRORM_INTMEM +
8254                TSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
8255         REG_WR8(bp, BAR_CSTRORM_INTMEM +
8256                CSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
8257         REG_WR8(bp, BAR_XSTRORM_INTMEM +
8258                XSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
8259 #endif
8260
8261         bnx2x_phy_probe(&bp->link_params);
8262
8263         return 0;
8264 }
8265
8266 void bnx2x_free_mem_cnic(struct bnx2x *bp)
8267 {
8268         bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_FREE);
8269
8270         if (!CHIP_IS_E1x(bp))
8271                 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
8272                                sizeof(struct host_hc_status_block_e2));
8273         else
8274                 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
8275                                sizeof(struct host_hc_status_block_e1x));
8276
8277         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
8278 }
8279
8280 void bnx2x_free_mem(struct bnx2x *bp)
8281 {
8282         int i;
8283
8284         BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
8285                        bp->fw_stats_data_sz + bp->fw_stats_req_sz);
8286
8287         if (IS_VF(bp))
8288                 return;
8289
8290         BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
8291                        sizeof(struct host_sp_status_block));
8292
8293         BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
8294                        sizeof(struct bnx2x_slowpath));
8295
8296         for (i = 0; i < L2_ILT_LINES(bp); i++)
8297                 BNX2X_PCI_FREE(bp->context[i].vcxt, bp->context[i].cxt_mapping,
8298                                bp->context[i].size);
8299         bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
8300
8301         BNX2X_FREE(bp->ilt->lines);
8302
8303         BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
8304
8305         BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
8306                        BCM_PAGE_SIZE * NUM_EQ_PAGES);
8307
8308         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
8309
8310         bnx2x_iov_free_mem(bp);
8311 }
8312
8313 int bnx2x_alloc_mem_cnic(struct bnx2x *bp)
8314 {
8315         if (!CHIP_IS_E1x(bp)) {
8316                 /* size = the status block + ramrod buffers */
8317                 bp->cnic_sb.e2_sb = BNX2X_PCI_ALLOC(&bp->cnic_sb_mapping,
8318                                                     sizeof(struct host_hc_status_block_e2));
8319                 if (!bp->cnic_sb.e2_sb)
8320                         goto alloc_mem_err;
8321         } else {
8322                 bp->cnic_sb.e1x_sb = BNX2X_PCI_ALLOC(&bp->cnic_sb_mapping,
8323                                                      sizeof(struct host_hc_status_block_e1x));
8324                 if (!bp->cnic_sb.e1x_sb)
8325                         goto alloc_mem_err;
8326         }
8327
8328         if (CONFIGURE_NIC_MODE(bp) && !bp->t2) {
8329                 /* allocate searcher T2 table, as it wasn't allocated before */
8330                 bp->t2 = BNX2X_PCI_ALLOC(&bp->t2_mapping, SRC_T2_SZ);
8331                 if (!bp->t2)
8332                         goto alloc_mem_err;
8333         }
8334
8335         /* write address to which L5 should insert its values */
8336         bp->cnic_eth_dev.addr_drv_info_to_mcp =
8337                 &bp->slowpath->drv_info_to_mcp;
8338
8339         if (bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_ALLOC))
8340                 goto alloc_mem_err;
8341
8342         return 0;
8343
8344 alloc_mem_err:
8345         bnx2x_free_mem_cnic(bp);
8346         BNX2X_ERR("Can't allocate memory\n");
8347         return -ENOMEM;
8348 }
8349
8350 int bnx2x_alloc_mem(struct bnx2x *bp)
8351 {
8352         int i, allocated, context_size;
8353
8354         if (!CONFIGURE_NIC_MODE(bp) && !bp->t2) {
8355                 /* allocate searcher T2 table */
8356                 bp->t2 = BNX2X_PCI_ALLOC(&bp->t2_mapping, SRC_T2_SZ);
8357                 if (!bp->t2)
8358                         goto alloc_mem_err;
8359         }
8360
8361         bp->def_status_blk = BNX2X_PCI_ALLOC(&bp->def_status_blk_mapping,
8362                                              sizeof(struct host_sp_status_block));
8363         if (!bp->def_status_blk)
8364                 goto alloc_mem_err;
8365
8366         bp->slowpath = BNX2X_PCI_ALLOC(&bp->slowpath_mapping,
8367                                        sizeof(struct bnx2x_slowpath));
8368         if (!bp->slowpath)
8369                 goto alloc_mem_err;
8370
8371         /* Allocate memory for CDU context:
8372          * This memory is allocated separately and not in the generic ILT
8373          * functions because CDU differs in few aspects:
8374          * 1. There are multiple entities allocating memory for context -
8375          * 'regular' driver, CNIC and SRIOV driver. Each separately controls
8376          * its own ILT lines.
8377          * 2. Since CDU page-size is not a single 4KB page (which is the case
8378          * for the other ILT clients), to be efficient we want to support
8379          * allocation of sub-page-size in the last entry.
8380          * 3. Context pointers are used by the driver to pass to FW / update
8381          * the context (for the other ILT clients the pointers are used just to
8382          * free the memory during unload).
8383          */
8384         context_size = sizeof(union cdu_context) * BNX2X_L2_CID_COUNT(bp);
8385
8386         for (i = 0, allocated = 0; allocated < context_size; i++) {
8387                 bp->context[i].size = min(CDU_ILT_PAGE_SZ,
8388                                           (context_size - allocated));
8389                 bp->context[i].vcxt = BNX2X_PCI_ALLOC(&bp->context[i].cxt_mapping,
8390                                                       bp->context[i].size);
8391                 if (!bp->context[i].vcxt)
8392                         goto alloc_mem_err;
8393                 allocated += bp->context[i].size;
8394         }
8395         bp->ilt->lines = kcalloc(ILT_MAX_LINES, sizeof(struct ilt_line),
8396                                  GFP_KERNEL);
8397         if (!bp->ilt->lines)
8398                 goto alloc_mem_err;
8399
8400         if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
8401                 goto alloc_mem_err;
8402
8403         if (bnx2x_iov_alloc_mem(bp))
8404                 goto alloc_mem_err;
8405
8406         /* Slow path ring */
8407         bp->spq = BNX2X_PCI_ALLOC(&bp->spq_mapping, BCM_PAGE_SIZE);
8408         if (!bp->spq)
8409                 goto alloc_mem_err;
8410
8411         /* EQ */
8412         bp->eq_ring = BNX2X_PCI_ALLOC(&bp->eq_mapping,
8413                                       BCM_PAGE_SIZE * NUM_EQ_PAGES);
8414         if (!bp->eq_ring)
8415                 goto alloc_mem_err;
8416
8417         return 0;
8418
8419 alloc_mem_err:
8420         bnx2x_free_mem(bp);
8421         BNX2X_ERR("Can't allocate memory\n");
8422         return -ENOMEM;
8423 }
8424
8425 /*
8426  * Init service functions
8427  */
8428
8429 int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac,
8430                       struct bnx2x_vlan_mac_obj *obj, bool set,
8431                       int mac_type, unsigned long *ramrod_flags)
8432 {
8433         int rc;
8434         struct bnx2x_vlan_mac_ramrod_params ramrod_param;
8435
8436         memset(&ramrod_param, 0, sizeof(ramrod_param));
8437
8438         /* Fill general parameters */
8439         ramrod_param.vlan_mac_obj = obj;
8440         ramrod_param.ramrod_flags = *ramrod_flags;
8441
8442         /* Fill a user request section if needed */
8443         if (!test_bit(RAMROD_CONT, ramrod_flags)) {
8444                 memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN);
8445
8446                 __set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags);
8447
8448                 /* Set the command: ADD or DEL */
8449                 if (set)
8450                         ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
8451                 else
8452                         ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_DEL;
8453         }
8454
8455         rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
8456
8457         if (rc == -EEXIST) {
8458                 DP(BNX2X_MSG_SP, "Failed to schedule ADD operations: %d\n", rc);
8459                 /* do not treat adding same MAC as error */
8460                 rc = 0;
8461         } else if (rc < 0)
8462                 BNX2X_ERR("%s MAC failed\n", (set ? "Set" : "Del"));
8463
8464         return rc;
8465 }
8466
8467 int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan,
8468                        struct bnx2x_vlan_mac_obj *obj, bool set,
8469                        unsigned long *ramrod_flags)
8470 {
8471         int rc;
8472         struct bnx2x_vlan_mac_ramrod_params ramrod_param;
8473
8474         memset(&ramrod_param, 0, sizeof(ramrod_param));
8475
8476         /* Fill general parameters */
8477         ramrod_param.vlan_mac_obj = obj;
8478         ramrod_param.ramrod_flags = *ramrod_flags;
8479
8480         /* Fill a user request section if needed */
8481         if (!test_bit(RAMROD_CONT, ramrod_flags)) {
8482                 ramrod_param.user_req.u.vlan.vlan = vlan;
8483                 __set_bit(BNX2X_VLAN, &ramrod_param.user_req.vlan_mac_flags);
8484                 /* Set the command: ADD or DEL */
8485                 if (set)
8486                         ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
8487                 else
8488                         ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_DEL;
8489         }
8490
8491         rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
8492
8493         if (rc == -EEXIST) {
8494                 /* Do not treat adding same vlan as error. */
8495                 DP(BNX2X_MSG_SP, "Failed to schedule ADD operations: %d\n", rc);
8496                 rc = 0;
8497         } else if (rc < 0) {
8498                 BNX2X_ERR("%s VLAN failed\n", (set ? "Set" : "Del"));
8499         }
8500
8501         return rc;
8502 }
8503
8504 static int bnx2x_del_all_vlans(struct bnx2x *bp)
8505 {
8506         struct bnx2x_vlan_mac_obj *vlan_obj = &bp->sp_objs[0].vlan_obj;
8507         unsigned long ramrod_flags = 0, vlan_flags = 0;
8508         struct bnx2x_vlan_entry *vlan;
8509         int rc;
8510
8511         __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
8512         __set_bit(BNX2X_VLAN, &vlan_flags);
8513         rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_flags, &ramrod_flags);
8514         if (rc)
8515                 return rc;
8516
8517         /* Mark that hw forgot all entries */
8518         list_for_each_entry(vlan, &bp->vlan_reg, link)
8519                 vlan->hw = false;
8520         bp->vlan_cnt = 0;
8521
8522         return 0;
8523 }
8524
8525 int bnx2x_del_all_macs(struct bnx2x *bp,
8526                        struct bnx2x_vlan_mac_obj *mac_obj,
8527                        int mac_type, bool wait_for_comp)
8528 {
8529         int rc;
8530         unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
8531
8532         /* Wait for completion of requested */
8533         if (wait_for_comp)
8534                 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
8535
8536         /* Set the mac type of addresses we want to clear */
8537         __set_bit(mac_type, &vlan_mac_flags);
8538
8539         rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags, &ramrod_flags);
8540         if (rc < 0)
8541                 BNX2X_ERR("Failed to delete MACs: %d\n", rc);
8542
8543         return rc;
8544 }
8545
8546 int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
8547 {
8548         if (IS_PF(bp)) {
8549                 unsigned long ramrod_flags = 0;
8550
8551                 DP(NETIF_MSG_IFUP, "Adding Eth MAC\n");
8552                 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
8553                 return bnx2x_set_mac_one(bp, bp->dev->dev_addr,
8554                                          &bp->sp_objs->mac_obj, set,
8555                                          BNX2X_ETH_MAC, &ramrod_flags);
8556         } else { /* vf */
8557                 return bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr,
8558                                              bp->fp->index, set);
8559         }
8560 }
8561
8562 int bnx2x_setup_leading(struct bnx2x *bp)
8563 {
8564         if (IS_PF(bp))
8565                 return bnx2x_setup_queue(bp, &bp->fp[0], true);
8566         else /* VF */
8567                 return bnx2x_vfpf_setup_q(bp, &bp->fp[0], true);
8568 }
8569
8570 /**
8571  * bnx2x_set_int_mode - configure interrupt mode
8572  *
8573  * @bp:         driver handle
8574  *
8575  * In case of MSI-X it will also try to enable MSI-X.
8576  */
8577 int bnx2x_set_int_mode(struct bnx2x *bp)
8578 {
8579         int rc = 0;
8580
8581         if (IS_VF(bp) && int_mode != BNX2X_INT_MODE_MSIX) {
8582                 BNX2X_ERR("VF not loaded since interrupt mode not msix\n");
8583                 return -EINVAL;
8584         }
8585
8586         switch (int_mode) {
8587         case BNX2X_INT_MODE_MSIX:
8588                 /* attempt to enable msix */
8589                 rc = bnx2x_enable_msix(bp);
8590
8591                 /* msix attained */
8592                 if (!rc)
8593                         return 0;
8594
8595                 /* vfs use only msix */
8596                 if (rc && IS_VF(bp))
8597                         return rc;
8598
8599                 /* failed to enable multiple MSI-X */
8600                 BNX2X_DEV_INFO("Failed to enable multiple MSI-X (%d), set number of queues to %d\n",
8601                                bp->num_queues,
8602                                1 + bp->num_cnic_queues);
8603
8604                 /* fall through */
8605         case BNX2X_INT_MODE_MSI:
8606                 bnx2x_enable_msi(bp);
8607
8608                 /* fall through */
8609         case BNX2X_INT_MODE_INTX:
8610                 bp->num_ethernet_queues = 1;
8611                 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
8612                 BNX2X_DEV_INFO("set number of queues to 1\n");
8613                 break;
8614         default:
8615                 BNX2X_DEV_INFO("unknown value in int_mode module parameter\n");
8616                 return -EINVAL;
8617         }
8618         return 0;
8619 }
8620
8621 /* must be called prior to any HW initializations */
8622 static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp)
8623 {
8624         if (IS_SRIOV(bp))
8625                 return (BNX2X_FIRST_VF_CID + BNX2X_VF_CIDS)/ILT_PAGE_CIDS;
8626         return L2_ILT_LINES(bp);
8627 }
8628
8629 void bnx2x_ilt_set_info(struct bnx2x *bp)
8630 {
8631         struct ilt_client_info *ilt_client;
8632         struct bnx2x_ilt *ilt = BP_ILT(bp);
8633         u16 line = 0;
8634
8635         ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp));
8636         DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line);
8637
8638         /* CDU */
8639         ilt_client = &ilt->clients[ILT_CLIENT_CDU];
8640         ilt_client->client_num = ILT_CLIENT_CDU;
8641         ilt_client->page_size = CDU_ILT_PAGE_SZ;
8642         ilt_client->flags = ILT_CLIENT_SKIP_MEM;
8643         ilt_client->start = line;
8644         line += bnx2x_cid_ilt_lines(bp);
8645
8646         if (CNIC_SUPPORT(bp))
8647                 line += CNIC_ILT_LINES;
8648         ilt_client->end = line - 1;
8649
8650         DP(NETIF_MSG_IFUP, "ilt client[CDU]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
8651            ilt_client->start,
8652            ilt_client->end,
8653            ilt_client->page_size,
8654            ilt_client->flags,
8655            ilog2(ilt_client->page_size >> 12));
8656
8657         /* QM */
8658         if (QM_INIT(bp->qm_cid_count)) {
8659                 ilt_client = &ilt->clients[ILT_CLIENT_QM];
8660                 ilt_client->client_num = ILT_CLIENT_QM;
8661                 ilt_client->page_size = QM_ILT_PAGE_SZ;
8662                 ilt_client->flags = 0;
8663                 ilt_client->start = line;
8664
8665                 /* 4 bytes for each cid */
8666                 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
8667                                                          QM_ILT_PAGE_SZ);
8668
8669                 ilt_client->end = line - 1;
8670
8671                 DP(NETIF_MSG_IFUP,
8672                    "ilt client[QM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
8673                    ilt_client->start,
8674                    ilt_client->end,
8675                    ilt_client->page_size,
8676                    ilt_client->flags,
8677                    ilog2(ilt_client->page_size >> 12));
8678         }
8679
8680         if (CNIC_SUPPORT(bp)) {
8681                 /* SRC */
8682                 ilt_client = &ilt->clients[ILT_CLIENT_SRC];
8683                 ilt_client->client_num = ILT_CLIENT_SRC;
8684                 ilt_client->page_size = SRC_ILT_PAGE_SZ;
8685                 ilt_client->flags = 0;
8686                 ilt_client->start = line;
8687                 line += SRC_ILT_LINES;
8688                 ilt_client->end = line - 1;
8689
8690                 DP(NETIF_MSG_IFUP,
8691                    "ilt client[SRC]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
8692                    ilt_client->start,
8693                    ilt_client->end,
8694                    ilt_client->page_size,
8695                    ilt_client->flags,
8696                    ilog2(ilt_client->page_size >> 12));
8697
8698                 /* TM */
8699                 ilt_client = &ilt->clients[ILT_CLIENT_TM];
8700                 ilt_client->client_num = ILT_CLIENT_TM;
8701                 ilt_client->page_size = TM_ILT_PAGE_SZ;
8702                 ilt_client->flags = 0;
8703                 ilt_client->start = line;
8704                 line += TM_ILT_LINES;
8705                 ilt_client->end = line - 1;
8706
8707                 DP(NETIF_MSG_IFUP,
8708                    "ilt client[TM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
8709                    ilt_client->start,
8710                    ilt_client->end,
8711                    ilt_client->page_size,
8712                    ilt_client->flags,
8713                    ilog2(ilt_client->page_size >> 12));
8714         }
8715
8716         BUG_ON(line > ILT_MAX_LINES);
8717 }
8718
8719 /**
8720  * bnx2x_pf_q_prep_init - prepare INIT transition parameters
8721  *
8722  * @bp:                 driver handle
8723  * @fp:                 pointer to fastpath
8724  * @init_params:        pointer to parameters structure
8725  *
8726  * parameters configured:
8727  *      - HC configuration
8728  *      - Queue's CDU context
8729  */
8730 static void bnx2x_pf_q_prep_init(struct bnx2x *bp,
8731         struct bnx2x_fastpath *fp, struct bnx2x_queue_init_params *init_params)
8732 {
8733         u8 cos;
8734         int cxt_index, cxt_offset;
8735
8736         /* FCoE Queue uses Default SB, thus has no HC capabilities */
8737         if (!IS_FCOE_FP(fp)) {
8738                 __set_bit(BNX2X_Q_FLG_HC, &init_params->rx.flags);
8739                 __set_bit(BNX2X_Q_FLG_HC, &init_params->tx.flags);
8740
8741                 /* If HC is supported, enable host coalescing in the transition
8742                  * to INIT state.
8743                  */
8744                 __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->rx.flags);
8745                 __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->tx.flags);
8746
8747                 /* HC rate */
8748                 init_params->rx.hc_rate = bp->rx_ticks ?
8749                         (1000000 / bp->rx_ticks) : 0;
8750                 init_params->tx.hc_rate = bp->tx_ticks ?
8751                         (1000000 / bp->tx_ticks) : 0;
8752
8753                 /* FW SB ID */
8754                 init_params->rx.fw_sb_id = init_params->tx.fw_sb_id =
8755                         fp->fw_sb_id;
8756
8757                 /*
8758                  * CQ index among the SB indices: FCoE clients uses the default
8759                  * SB, therefore it's different.
8760                  */
8761                 init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
8762                 init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS;
8763         }
8764
8765         /* set maximum number of COSs supported by this queue */
8766         init_params->max_cos = fp->max_cos;
8767
8768         DP(NETIF_MSG_IFUP, "fp: %d setting queue params max cos to: %d\n",
8769             fp->index, init_params->max_cos);
8770
8771         /* set the context pointers queue object */
8772         for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) {
8773                 cxt_index = fp->txdata_ptr[cos]->cid / ILT_PAGE_CIDS;
8774                 cxt_offset = fp->txdata_ptr[cos]->cid - (cxt_index *
8775                                 ILT_PAGE_CIDS);
8776                 init_params->cxts[cos] =
8777                         &bp->context[cxt_index].vcxt[cxt_offset].eth;
8778         }
8779 }
8780
8781 static int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp,
8782                         struct bnx2x_queue_state_params *q_params,
8783                         struct bnx2x_queue_setup_tx_only_params *tx_only_params,
8784                         int tx_index, bool leading)
8785 {
8786         memset(tx_only_params, 0, sizeof(*tx_only_params));
8787
8788         /* Set the command */
8789         q_params->cmd = BNX2X_Q_CMD_SETUP_TX_ONLY;
8790
8791         /* Set tx-only QUEUE flags: don't zero statistics */
8792         tx_only_params->flags = bnx2x_get_common_flags(bp, fp, false);
8793
8794         /* choose the index of the cid to send the slow path on */
8795         tx_only_params->cid_index = tx_index;
8796
8797         /* Set general TX_ONLY_SETUP parameters */
8798         bnx2x_pf_q_prep_general(bp, fp, &tx_only_params->gen_params, tx_index);
8799
8800         /* Set Tx TX_ONLY_SETUP parameters */
8801         bnx2x_pf_tx_q_prep(bp, fp, &tx_only_params->txq_params, tx_index);
8802
8803         DP(NETIF_MSG_IFUP,
8804            "preparing to send tx-only ramrod for connection: cos %d, primary cid %d, cid %d, client id %d, sp-client id %d, flags %lx\n",
8805            tx_index, q_params->q_obj->cids[FIRST_TX_COS_INDEX],
8806            q_params->q_obj->cids[tx_index], q_params->q_obj->cl_id,
8807            tx_only_params->gen_params.spcl_id, tx_only_params->flags);
8808
8809         /* send the ramrod */
8810         return bnx2x_queue_state_change(bp, q_params);
8811 }
8812
8813 /**
8814  * bnx2x_setup_queue - setup queue
8815  *
8816  * @bp:         driver handle
8817  * @fp:         pointer to fastpath
8818  * @leading:    is leading
8819  *
8820  * This function performs 2 steps in a Queue state machine
8821  *      actually: 1) RESET->INIT 2) INIT->SETUP
8822  */
8823
8824 int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
8825                        bool leading)
8826 {
8827         struct bnx2x_queue_state_params q_params = {NULL};
8828         struct bnx2x_queue_setup_params *setup_params =
8829                                                 &q_params.params.setup;
8830         struct bnx2x_queue_setup_tx_only_params *tx_only_params =
8831                                                 &q_params.params.tx_only;
8832         int rc;
8833         u8 tx_index;
8834
8835         DP(NETIF_MSG_IFUP, "setting up queue %d\n", fp->index);
8836
8837         /* reset IGU state skip FCoE L2 queue */
8838         if (!IS_FCOE_FP(fp))
8839                 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
8840                              IGU_INT_ENABLE, 0);
8841
8842         q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
8843         /* We want to wait for completion in this context */
8844         __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
8845
8846         /* Prepare the INIT parameters */
8847         bnx2x_pf_q_prep_init(bp, fp, &q_params.params.init);
8848
8849         /* Set the command */
8850         q_params.cmd = BNX2X_Q_CMD_INIT;
8851
8852         /* Change the state to INIT */
8853         rc = bnx2x_queue_state_change(bp, &q_params);
8854         if (rc) {
8855                 BNX2X_ERR("Queue(%d) INIT failed\n", fp->index);
8856                 return rc;
8857         }
8858
8859         DP(NETIF_MSG_IFUP, "init complete\n");
8860
8861         /* Now move the Queue to the SETUP state... */
8862         memset(setup_params, 0, sizeof(*setup_params));
8863
8864         /* Set QUEUE flags */
8865         setup_params->flags = bnx2x_get_q_flags(bp, fp, leading);
8866
8867         /* Set general SETUP parameters */
8868         bnx2x_pf_q_prep_general(bp, fp, &setup_params->gen_params,
8869                                 FIRST_TX_COS_INDEX);
8870
8871         bnx2x_pf_rx_q_prep(bp, fp, &setup_params->pause_params,
8872                             &setup_params->rxq_params);
8873
8874         bnx2x_pf_tx_q_prep(bp, fp, &setup_params->txq_params,
8875                            FIRST_TX_COS_INDEX);
8876
8877         /* Set the command */
8878         q_params.cmd = BNX2X_Q_CMD_SETUP;
8879
8880         if (IS_FCOE_FP(fp))
8881                 bp->fcoe_init = true;
8882
8883         /* Change the state to SETUP */
8884         rc = bnx2x_queue_state_change(bp, &q_params);
8885         if (rc) {
8886                 BNX2X_ERR("Queue(%d) SETUP failed\n", fp->index);
8887                 return rc;
8888         }
8889
8890         /* loop through the relevant tx-only indices */
8891         for (tx_index = FIRST_TX_ONLY_COS_INDEX;
8892               tx_index < fp->max_cos;
8893               tx_index++) {
8894
8895                 /* prepare and send tx-only ramrod*/
8896                 rc = bnx2x_setup_tx_only(bp, fp, &q_params,
8897                                           tx_only_params, tx_index, leading);
8898                 if (rc) {
8899                         BNX2X_ERR("Queue(%d.%d) TX_ONLY_SETUP failed\n",
8900                                   fp->index, tx_index);
8901                         return rc;
8902                 }
8903         }
8904
8905         return rc;
8906 }
8907
8908 static int bnx2x_stop_queue(struct bnx2x *bp, int index)
8909 {
8910         struct bnx2x_fastpath *fp = &bp->fp[index];
8911         struct bnx2x_fp_txdata *txdata;
8912         struct bnx2x_queue_state_params q_params = {NULL};
8913         int rc, tx_index;
8914
8915         DP(NETIF_MSG_IFDOWN, "stopping queue %d cid %d\n", index, fp->cid);
8916
8917         q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
8918         /* We want to wait for completion in this context */
8919         __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
8920
8921         /* close tx-only connections */
8922         for (tx_index = FIRST_TX_ONLY_COS_INDEX;
8923              tx_index < fp->max_cos;
8924              tx_index++){
8925
8926                 /* ascertain this is a normal queue*/
8927                 txdata = fp->txdata_ptr[tx_index];
8928
8929                 DP(NETIF_MSG_IFDOWN, "stopping tx-only queue %d\n",
8930                                                         txdata->txq_index);
8931
8932                 /* send halt terminate on tx-only connection */
8933                 q_params.cmd = BNX2X_Q_CMD_TERMINATE;
8934                 memset(&q_params.params.terminate, 0,
8935                        sizeof(q_params.params.terminate));
8936                 q_params.params.terminate.cid_index = tx_index;
8937
8938                 rc = bnx2x_queue_state_change(bp, &q_params);
8939                 if (rc)
8940                         return rc;
8941
8942                 /* send halt terminate on tx-only connection */
8943                 q_params.cmd = BNX2X_Q_CMD_CFC_DEL;
8944                 memset(&q_params.params.cfc_del, 0,
8945                        sizeof(q_params.params.cfc_del));
8946                 q_params.params.cfc_del.cid_index = tx_index;
8947                 rc = bnx2x_queue_state_change(bp, &q_params);
8948                 if (rc)
8949                         return rc;
8950         }
8951         /* Stop the primary connection: */
8952         /* ...halt the connection */
8953         q_params.cmd = BNX2X_Q_CMD_HALT;
8954         rc = bnx2x_queue_state_change(bp, &q_params);
8955         if (rc)
8956                 return rc;
8957
8958         /* ...terminate the connection */
8959         q_params.cmd = BNX2X_Q_CMD_TERMINATE;
8960         memset(&q_params.params.terminate, 0,
8961                sizeof(q_params.params.terminate));
8962         q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX;
8963         rc = bnx2x_queue_state_change(bp, &q_params);
8964         if (rc)
8965                 return rc;
8966         /* ...delete cfc entry */
8967         q_params.cmd = BNX2X_Q_CMD_CFC_DEL;
8968         memset(&q_params.params.cfc_del, 0,
8969                sizeof(q_params.params.cfc_del));
8970         q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX;
8971         return bnx2x_queue_state_change(bp, &q_params);
8972 }
8973
8974 static void bnx2x_reset_func(struct bnx2x *bp)
8975 {
8976         int port = BP_PORT(bp);
8977         int func = BP_FUNC(bp);
8978         int i;
8979
8980         /* Disable the function in the FW */
8981         REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
8982         REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
8983         REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
8984         REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
8985
8986         /* FP SBs */
8987         for_each_eth_queue(bp, i) {
8988                 struct bnx2x_fastpath *fp = &bp->fp[i];
8989                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
8990                            CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id),
8991                            SB_DISABLED);
8992         }
8993
8994         if (CNIC_LOADED(bp))
8995                 /* CNIC SB */
8996                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
8997                         CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET
8998                         (bnx2x_cnic_fw_sb_id(bp)), SB_DISABLED);
8999
9000         /* SP SB */
9001         REG_WR8(bp, BAR_CSTRORM_INTMEM +
9002                 CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func),
9003                 SB_DISABLED);
9004
9005         for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
9006                 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
9007                        0);
9008
9009         /* Configure IGU */
9010         if (bp->common.int_block == INT_BLOCK_HC) {
9011                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
9012                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
9013         } else {
9014                 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
9015                 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
9016         }
9017
9018         if (CNIC_LOADED(bp)) {
9019                 /* Disable Timer scan */
9020                 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
9021                 /*
9022                  * Wait for at least 10ms and up to 2 second for the timers
9023                  * scan to complete
9024                  */
9025                 for (i = 0; i < 200; i++) {
9026                         usleep_range(10000, 20000);
9027                         if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
9028                                 break;
9029                 }
9030         }
9031         /* Clear ILT */
9032         bnx2x_clear_func_ilt(bp, func);
9033
9034         /* Timers workaround bug for E2: if this is vnic-3,
9035          * we need to set the entire ilt range for this timers.
9036          */
9037         if (!CHIP_IS_E1x(bp) && BP_VN(bp) == 3) {
9038                 struct ilt_client_info ilt_cli;
9039                 /* use dummy TM client */
9040                 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
9041                 ilt_cli.start = 0;
9042                 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
9043                 ilt_cli.client_num = ILT_CLIENT_TM;
9044
9045                 bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR);
9046         }
9047
9048         /* this assumes that reset_port() called before reset_func()*/
9049         if (!CHIP_IS_E1x(bp))
9050                 bnx2x_pf_disable(bp);
9051
9052         bp->dmae_ready = 0;
9053 }
9054
9055 static void bnx2x_reset_port(struct bnx2x *bp)
9056 {
9057         int port = BP_PORT(bp);
9058         u32 val;
9059
9060         /* Reset physical Link */
9061         bnx2x__link_reset(bp);
9062
9063         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
9064
9065         /* Do not rcv packets to BRB */
9066         REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
9067         /* Do not direct rcv packets that are not for MCP to the BRB */
9068         REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
9069                            NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
9070
9071         /* Configure AEU */
9072         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
9073
9074         msleep(100);
9075         /* Check for BRB port occupancy */
9076         val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
9077         if (val)
9078                 DP(NETIF_MSG_IFDOWN,
9079                    "BRB1 is not empty  %d blocks are occupied\n", val);
9080
9081         /* TODO: Close Doorbell port? */
9082 }
9083
9084 static int bnx2x_reset_hw(struct bnx2x *bp, u32 load_code)
9085 {
9086         struct bnx2x_func_state_params func_params = {NULL};
9087
9088         /* Prepare parameters for function state transitions */
9089         __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
9090
9091         func_params.f_obj = &bp->func_obj;
9092         func_params.cmd = BNX2X_F_CMD_HW_RESET;
9093
9094         func_params.params.hw_init.load_phase = load_code;
9095
9096         return bnx2x_func_state_change(bp, &func_params);
9097 }
9098
9099 static int bnx2x_func_stop(struct bnx2x *bp)
9100 {
9101         struct bnx2x_func_state_params func_params = {NULL};
9102         int rc;
9103
9104         /* Prepare parameters for function state transitions */
9105         __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
9106         func_params.f_obj = &bp->func_obj;
9107         func_params.cmd = BNX2X_F_CMD_STOP;
9108
9109         /*
9110          * Try to stop the function the 'good way'. If fails (in case
9111          * of a parity error during bnx2x_chip_cleanup()) and we are
9112          * not in a debug mode, perform a state transaction in order to
9113          * enable further HW_RESET transaction.
9114          */
9115         rc = bnx2x_func_state_change(bp, &func_params);
9116         if (rc) {
9117 #ifdef BNX2X_STOP_ON_ERROR
9118                 return rc;
9119 #else
9120                 BNX2X_ERR("FUNC_STOP ramrod failed. Running a dry transaction\n");
9121                 __set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
9122                 return bnx2x_func_state_change(bp, &func_params);
9123 #endif
9124         }
9125
9126         return 0;
9127 }
9128
9129 /**
9130  * bnx2x_send_unload_req - request unload mode from the MCP.
9131  *
9132  * @bp:                 driver handle
9133  * @unload_mode:        requested function's unload mode
9134  *
9135  * Return unload mode returned by the MCP: COMMON, PORT or FUNC.
9136  */
9137 u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
9138 {
9139         u32 reset_code = 0;
9140         int port = BP_PORT(bp);
9141
9142         /* Select the UNLOAD request mode */
9143         if (unload_mode == UNLOAD_NORMAL)
9144                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
9145
9146         else if (bp->flags & NO_WOL_FLAG)
9147                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
9148
9149         else if (bp->wol) {
9150                 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
9151                 u8 *mac_addr = bp->dev->dev_addr;
9152                 struct pci_dev *pdev = bp->pdev;
9153                 u32 val;
9154                 u16 pmc;
9155
9156                 /* The mac address is written to entries 1-4 to
9157                  * preserve entry 0 which is used by the PMF
9158                  */
9159                 u8 entry = (BP_VN(bp) + 1)*8;
9160
9161                 val = (mac_addr[0] << 8) | mac_addr[1];
9162                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
9163
9164                 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
9165                       (mac_addr[4] << 8) | mac_addr[5];
9166                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
9167
9168                 /* Enable the PME and clear the status */
9169                 pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &pmc);
9170                 pmc |= PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS;
9171                 pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, pmc);
9172
9173                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
9174
9175         } else
9176                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
9177
9178         /* Send the request to the MCP */
9179         if (!BP_NOMCP(bp))
9180                 reset_code = bnx2x_fw_command(bp, reset_code, 0);
9181         else {
9182                 int path = BP_PATH(bp);
9183
9184                 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d]      %d, %d, %d\n",
9185                    path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
9186                    bnx2x_load_count[path][2]);
9187                 bnx2x_load_count[path][0]--;
9188                 bnx2x_load_count[path][1 + port]--;
9189                 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d]  %d, %d, %d\n",
9190                    path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
9191                    bnx2x_load_count[path][2]);
9192                 if (bnx2x_load_count[path][0] == 0)
9193                         reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
9194                 else if (bnx2x_load_count[path][1 + port] == 0)
9195                         reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
9196                 else
9197                         reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
9198         }
9199
9200         return reset_code;
9201 }
9202
9203 /**
9204  * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP.
9205  *
9206  * @bp:         driver handle
9207  * @keep_link:          true iff link should be kept up
9208  */
9209 void bnx2x_send_unload_done(struct bnx2x *bp, bool keep_link)
9210 {
9211         u32 reset_param = keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0;
9212
9213         /* Report UNLOAD_DONE to MCP */
9214         if (!BP_NOMCP(bp))
9215                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, reset_param);
9216 }
9217
9218 static int bnx2x_func_wait_started(struct bnx2x *bp)
9219 {
9220         int tout = 50;
9221         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
9222
9223         if (!bp->port.pmf)
9224                 return 0;
9225
9226         /*
9227          * (assumption: No Attention from MCP at this stage)
9228          * PMF probably in the middle of TX disable/enable transaction
9229          * 1. Sync IRS for default SB
9230          * 2. Sync SP queue - this guarantees us that attention handling started
9231          * 3. Wait, that TX disable/enable transaction completes
9232          *
9233          * 1+2 guarantee that if DCBx attention was scheduled it already changed
9234          * pending bit of transaction from STARTED-->TX_STOPPED, if we already
9235          * received completion for the transaction the state is TX_STOPPED.
9236          * State will return to STARTED after completion of TX_STOPPED-->STARTED
9237          * transaction.
9238          */
9239
9240         /* make sure default SB ISR is done */
9241         if (msix)
9242                 synchronize_irq(bp->msix_table[0].vector);
9243         else
9244                 synchronize_irq(bp->pdev->irq);
9245
9246         flush_workqueue(bnx2x_wq);
9247         flush_workqueue(bnx2x_iov_wq);
9248
9249         while (bnx2x_func_get_state(bp, &bp->func_obj) !=
9250                                 BNX2X_F_STATE_STARTED && tout--)
9251                 msleep(20);
9252
9253         if (bnx2x_func_get_state(bp, &bp->func_obj) !=
9254                                                 BNX2X_F_STATE_STARTED) {
9255 #ifdef BNX2X_STOP_ON_ERROR
9256                 BNX2X_ERR("Wrong function state\n");
9257                 return -EBUSY;
9258 #else
9259                 /*
9260                  * Failed to complete the transaction in a "good way"
9261                  * Force both transactions with CLR bit
9262                  */
9263                 struct bnx2x_func_state_params func_params = {NULL};
9264
9265                 DP(NETIF_MSG_IFDOWN,
9266                    "Hmmm... Unexpected function state! Forcing STARTED-->TX_STOPPED-->STARTED\n");
9267
9268                 func_params.f_obj = &bp->func_obj;
9269                 __set_bit(RAMROD_DRV_CLR_ONLY,
9270                                         &func_params.ramrod_flags);
9271
9272                 /* STARTED-->TX_ST0PPED */
9273                 func_params.cmd = BNX2X_F_CMD_TX_STOP;
9274                 bnx2x_func_state_change(bp, &func_params);
9275
9276                 /* TX_ST0PPED-->STARTED */
9277                 func_params.cmd = BNX2X_F_CMD_TX_START;
9278                 return bnx2x_func_state_change(bp, &func_params);
9279 #endif
9280         }
9281
9282         return 0;
9283 }
9284
9285 static void bnx2x_disable_ptp(struct bnx2x *bp)
9286 {
9287         int port = BP_PORT(bp);
9288
9289         /* Disable sending PTP packets to host */
9290         REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST :
9291                NIG_REG_P0_LLH_PTP_TO_HOST, 0x0);
9292
9293         /* Reset PTP event detection rules */
9294         REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
9295                NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7FF);
9296         REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
9297                NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FFF);
9298         REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK :
9299                NIG_REG_P0_TLLH_PTP_PARAM_MASK, 0x7FF);
9300         REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK :
9301                NIG_REG_P0_TLLH_PTP_RULE_MASK, 0x3FFF);
9302
9303         /* Disable the PTP feature */
9304         REG_WR(bp, port ? NIG_REG_P1_PTP_EN :
9305                NIG_REG_P0_PTP_EN, 0x0);
9306 }
9307
9308 /* Called during unload, to stop PTP-related stuff */
9309 static void bnx2x_stop_ptp(struct bnx2x *bp)
9310 {
9311         /* Cancel PTP work queue. Should be done after the Tx queues are
9312          * drained to prevent additional scheduling.
9313          */
9314         cancel_work_sync(&bp->ptp_task);
9315
9316         if (bp->ptp_tx_skb) {
9317                 dev_kfree_skb_any(bp->ptp_tx_skb);
9318                 bp->ptp_tx_skb = NULL;
9319         }
9320
9321         /* Disable PTP in HW */
9322         bnx2x_disable_ptp(bp);
9323
9324         DP(BNX2X_MSG_PTP, "PTP stop ended successfully\n");
9325 }
9326
9327 void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link)
9328 {
9329         int port = BP_PORT(bp);
9330         int i, rc = 0;
9331         u8 cos;
9332         struct bnx2x_mcast_ramrod_params rparam = {NULL};
9333         u32 reset_code;
9334
9335         /* Wait until tx fastpath tasks complete */
9336         for_each_tx_queue(bp, i) {
9337                 struct bnx2x_fastpath *fp = &bp->fp[i];
9338
9339                 for_each_cos_in_tx_queue(fp, cos)
9340                         rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
9341 #ifdef BNX2X_STOP_ON_ERROR
9342                 if (rc)
9343                         return;
9344 #endif
9345         }
9346
9347         /* Give HW time to discard old tx messages */
9348         usleep_range(1000, 2000);
9349
9350         /* Clean all ETH MACs */
9351         rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_ETH_MAC,
9352                                 false);
9353         if (rc < 0)
9354                 BNX2X_ERR("Failed to delete all ETH macs: %d\n", rc);
9355
9356         /* Clean up UC list  */
9357         rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_UC_LIST_MAC,
9358                                 true);
9359         if (rc < 0)
9360                 BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: %d\n",
9361                           rc);
9362
9363         /* The whole *vlan_obj structure may be not initialized if VLAN
9364          * filtering offload is not supported by hardware. Currently this is
9365          * true for all hardware covered by CHIP_IS_E1x().
9366          */
9367         if (!CHIP_IS_E1x(bp)) {
9368                 /* Remove all currently configured VLANs */
9369                 rc = bnx2x_del_all_vlans(bp);
9370                 if (rc < 0)
9371                         BNX2X_ERR("Failed to delete all VLANs\n");
9372         }
9373
9374         /* Disable LLH */
9375         if (!CHIP_IS_E1(bp))
9376                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
9377
9378         /* Set "drop all" (stop Rx).
9379          * We need to take a netif_addr_lock() here in order to prevent
9380          * a race between the completion code and this code.
9381          */
9382         netif_addr_lock_bh(bp->dev);
9383         /* Schedule the rx_mode command */
9384         if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state))
9385                 set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
9386         else if (bp->slowpath)
9387                 bnx2x_set_storm_rx_mode(bp);
9388
9389         /* Cleanup multicast configuration */
9390         rparam.mcast_obj = &bp->mcast_obj;
9391         rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
9392         if (rc < 0)
9393                 BNX2X_ERR("Failed to send DEL multicast command: %d\n", rc);
9394
9395         netif_addr_unlock_bh(bp->dev);
9396
9397         bnx2x_iov_chip_cleanup(bp);
9398
9399         /*
9400          * Send the UNLOAD_REQUEST to the MCP. This will return if
9401          * this function should perform FUNC, PORT or COMMON HW
9402          * reset.
9403          */
9404         reset_code = bnx2x_send_unload_req(bp, unload_mode);
9405
9406         /*
9407          * (assumption: No Attention from MCP at this stage)
9408          * PMF probably in the middle of TX disable/enable transaction
9409          */
9410         rc = bnx2x_func_wait_started(bp);
9411         if (rc) {
9412                 BNX2X_ERR("bnx2x_func_wait_started failed\n");
9413 #ifdef BNX2X_STOP_ON_ERROR
9414                 return;
9415 #endif
9416         }
9417
9418         /* Close multi and leading connections
9419          * Completions for ramrods are collected in a synchronous way
9420          */
9421         for_each_eth_queue(bp, i)
9422                 if (bnx2x_stop_queue(bp, i))
9423 #ifdef BNX2X_STOP_ON_ERROR
9424                         return;
9425 #else
9426                         goto unload_error;
9427 #endif
9428
9429         if (CNIC_LOADED(bp)) {
9430                 for_each_cnic_queue(bp, i)
9431                         if (bnx2x_stop_queue(bp, i))
9432 #ifdef BNX2X_STOP_ON_ERROR
9433                                 return;
9434 #else
9435                                 goto unload_error;
9436 #endif
9437         }
9438
9439         /* If SP settings didn't get completed so far - something
9440          * very wrong has happen.
9441          */
9442         if (!bnx2x_wait_sp_comp(bp, ~0x0UL))
9443                 BNX2X_ERR("Hmmm... Common slow path ramrods got stuck!\n");
9444
9445 #ifndef BNX2X_STOP_ON_ERROR
9446 unload_error:
9447 #endif
9448         rc = bnx2x_func_stop(bp);
9449         if (rc) {
9450                 BNX2X_ERR("Function stop failed!\n");
9451 #ifdef BNX2X_STOP_ON_ERROR
9452                 return;
9453 #endif
9454         }
9455
9456         /* stop_ptp should be after the Tx queues are drained to prevent
9457          * scheduling to the cancelled PTP work queue. It should also be after
9458          * function stop ramrod is sent, since as part of this ramrod FW access
9459          * PTP registers.
9460          */
9461         if (bp->flags & PTP_SUPPORTED) {
9462                 bnx2x_stop_ptp(bp);
9463                 if (bp->ptp_clock) {
9464                         ptp_clock_unregister(bp->ptp_clock);
9465                         bp->ptp_clock = NULL;
9466                 }
9467         }
9468
9469         /* Disable HW interrupts, NAPI */
9470         bnx2x_netif_stop(bp, 1);
9471         /* Delete all NAPI objects */
9472         bnx2x_del_all_napi(bp);
9473         if (CNIC_LOADED(bp))
9474                 bnx2x_del_all_napi_cnic(bp);
9475
9476         /* Release IRQs */
9477         bnx2x_free_irq(bp);
9478
9479         /* Reset the chip, unless PCI function is offline. If we reach this
9480          * point following a PCI error handling, it means device is really
9481          * in a bad state and we're about to remove it, so reset the chip
9482          * is not a good idea.
9483          */
9484         if (!pci_channel_offline(bp->pdev)) {
9485                 rc = bnx2x_reset_hw(bp, reset_code);
9486                 if (rc)
9487                         BNX2X_ERR("HW_RESET failed\n");
9488         }
9489
9490         /* Report UNLOAD_DONE to MCP */
9491         bnx2x_send_unload_done(bp, keep_link);
9492 }
9493
9494 void bnx2x_disable_close_the_gate(struct bnx2x *bp)
9495 {
9496         u32 val;
9497
9498         DP(NETIF_MSG_IFDOWN, "Disabling \"close the gates\"\n");
9499
9500         if (CHIP_IS_E1(bp)) {
9501                 int port = BP_PORT(bp);
9502                 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
9503                         MISC_REG_AEU_MASK_ATTN_FUNC_0;
9504
9505                 val = REG_RD(bp, addr);
9506                 val &= ~(0x300);
9507                 REG_WR(bp, addr, val);
9508         } else {
9509                 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
9510                 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
9511                          MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
9512                 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
9513         }
9514 }
9515
9516 /* Close gates #2, #3 and #4: */
9517 static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
9518 {
9519         u32 val;
9520
9521         /* Gates #2 and #4a are closed/opened for "not E1" only */
9522         if (!CHIP_IS_E1(bp)) {
9523                 /* #4 */
9524                 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS, !!close);
9525                 /* #2 */
9526                 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close);
9527         }
9528
9529         /* #3 */
9530         if (CHIP_IS_E1x(bp)) {
9531                 /* Prevent interrupts from HC on both ports */
9532                 val = REG_RD(bp, HC_REG_CONFIG_1);
9533                 REG_WR(bp, HC_REG_CONFIG_1,
9534                        (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) :
9535                        (val & ~(u32)HC_CONFIG_1_REG_BLOCK_DISABLE_1));
9536
9537                 val = REG_RD(bp, HC_REG_CONFIG_0);
9538                 REG_WR(bp, HC_REG_CONFIG_0,
9539                        (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) :
9540                        (val & ~(u32)HC_CONFIG_0_REG_BLOCK_DISABLE_0));
9541         } else {
9542                 /* Prevent incoming interrupts in IGU */
9543                 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
9544
9545                 REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION,
9546                        (!close) ?
9547                        (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) :
9548                        (val & ~(u32)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE));
9549         }
9550
9551         DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "%s gates #2, #3 and #4\n",
9552                 close ? "closing" : "opening");
9553         mmiowb();
9554 }
9555
9556 #define SHARED_MF_CLP_MAGIC  0x80000000 /* `magic' bit */
9557
9558 static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
9559 {
9560         /* Do some magic... */
9561         u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
9562         *magic_val = val & SHARED_MF_CLP_MAGIC;
9563         MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
9564 }
9565
9566 /**
9567  * bnx2x_clp_reset_done - restore the value of the `magic' bit.
9568  *
9569  * @bp:         driver handle
9570  * @magic_val:  old value of the `magic' bit.
9571  */
9572 static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
9573 {
9574         /* Restore the `magic' bit value... */
9575         u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
9576         MF_CFG_WR(bp, shared_mf_config.clp_mb,
9577                 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
9578 }
9579
9580 /**
9581  * bnx2x_reset_mcp_prep - prepare for MCP reset.
9582  *
9583  * @bp:         driver handle
9584  * @magic_val:  old value of 'magic' bit.
9585  *
9586  * Takes care of CLP configurations.
9587  */
9588 static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
9589 {
9590         u32 shmem;
9591         u32 validity_offset;
9592
9593         DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "Starting\n");
9594
9595         /* Set `magic' bit in order to save MF config */
9596         if (!CHIP_IS_E1(bp))
9597                 bnx2x_clp_reset_prep(bp, magic_val);
9598
9599         /* Get shmem offset */
9600         shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9601         validity_offset =
9602                 offsetof(struct shmem_region, validity_map[BP_PORT(bp)]);
9603
9604         /* Clear validity map flags */
9605         if (shmem > 0)
9606                 REG_WR(bp, shmem + validity_offset, 0);
9607 }
9608
9609 #define MCP_TIMEOUT      5000   /* 5 seconds (in ms) */
9610 #define MCP_ONE_TIMEOUT  100    /* 100 ms */
9611
9612 /**
9613  * bnx2x_mcp_wait_one - wait for MCP_ONE_TIMEOUT
9614  *
9615  * @bp: driver handle
9616  */
9617 static void bnx2x_mcp_wait_one(struct bnx2x *bp)
9618 {
9619         /* special handling for emulation and FPGA,
9620            wait 10 times longer */
9621         if (CHIP_REV_IS_SLOW(bp))
9622                 msleep(MCP_ONE_TIMEOUT*10);
9623         else
9624                 msleep(MCP_ONE_TIMEOUT);
9625 }
9626
9627 /*
9628  * initializes bp->common.shmem_base and waits for validity signature to appear
9629  */
9630 static int bnx2x_init_shmem(struct bnx2x *bp)
9631 {
9632         int cnt = 0;
9633         u32 val = 0;
9634
9635         do {
9636                 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9637
9638                 /* If we read all 0xFFs, means we are in PCI error state and
9639                  * should bail out to avoid crashes on adapter's FW reads.
9640                  */
9641                 if (bp->common.shmem_base == 0xFFFFFFFF) {
9642                         bp->flags |= NO_MCP_FLAG;
9643                         return -ENODEV;
9644                 }
9645
9646                 if (bp->common.shmem_base) {
9647                         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9648                         if (val & SHR_MEM_VALIDITY_MB)
9649                                 return 0;
9650                 }
9651
9652                 bnx2x_mcp_wait_one(bp);
9653
9654         } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT));
9655
9656         BNX2X_ERR("BAD MCP validity signature\n");
9657
9658         return -ENODEV;
9659 }
9660
9661 static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
9662 {
9663         int rc = bnx2x_init_shmem(bp);
9664
9665         /* Restore the `magic' bit value */
9666         if (!CHIP_IS_E1(bp))
9667                 bnx2x_clp_reset_done(bp, magic_val);
9668
9669         return rc;
9670 }
9671
9672 static void bnx2x_pxp_prep(struct bnx2x *bp)
9673 {
9674         if (!CHIP_IS_E1(bp)) {
9675                 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
9676                 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
9677                 mmiowb();
9678         }
9679 }
9680
9681 /*
9682  * Reset the whole chip except for:
9683  *      - PCIE core
9684  *      - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
9685  *              one reset bit)
9686  *      - IGU
9687  *      - MISC (including AEU)
9688  *      - GRC
9689  *      - RBCN, RBCP
9690  */
9691 static void bnx2x_process_kill_chip_reset(struct bnx2x *bp, bool global)
9692 {
9693         u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
9694         u32 global_bits2, stay_reset2;
9695
9696         /*
9697          * Bits that have to be set in reset_mask2 if we want to reset 'global'
9698          * (per chip) blocks.
9699          */
9700         global_bits2 =
9701                 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU |
9702                 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE;
9703
9704         /* Don't reset the following blocks.
9705          * Important: per port blocks (such as EMAC, BMAC, UMAC) can't be
9706          *            reset, as in 4 port device they might still be owned
9707          *            by the MCP (there is only one leader per path).
9708          */
9709         not_reset_mask1 =
9710                 MISC_REGISTERS_RESET_REG_1_RST_HC |
9711                 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
9712                 MISC_REGISTERS_RESET_REG_1_RST_PXP;
9713
9714         not_reset_mask2 =
9715                 MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO |
9716                 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
9717                 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
9718                 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
9719                 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
9720                 MISC_REGISTERS_RESET_REG_2_RST_GRC  |
9721                 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
9722                 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B |
9723                 MISC_REGISTERS_RESET_REG_2_RST_ATC |
9724                 MISC_REGISTERS_RESET_REG_2_PGLC |
9725                 MISC_REGISTERS_RESET_REG_2_RST_BMAC0 |
9726                 MISC_REGISTERS_RESET_REG_2_RST_BMAC1 |
9727                 MISC_REGISTERS_RESET_REG_2_RST_EMAC0 |
9728                 MISC_REGISTERS_RESET_REG_2_RST_EMAC1 |
9729                 MISC_REGISTERS_RESET_REG_2_UMAC0 |
9730                 MISC_REGISTERS_RESET_REG_2_UMAC1;
9731
9732         /*
9733          * Keep the following blocks in reset:
9734          *  - all xxMACs are handled by the bnx2x_link code.
9735          */
9736         stay_reset2 =
9737                 MISC_REGISTERS_RESET_REG_2_XMAC |
9738                 MISC_REGISTERS_RESET_REG_2_XMAC_SOFT;
9739
9740         /* Full reset masks according to the chip */
9741         reset_mask1 = 0xffffffff;
9742
9743         if (CHIP_IS_E1(bp))
9744                 reset_mask2 = 0xffff;
9745         else if (CHIP_IS_E1H(bp))
9746                 reset_mask2 = 0x1ffff;
9747         else if (CHIP_IS_E2(bp))
9748                 reset_mask2 = 0xfffff;
9749         else /* CHIP_IS_E3 */
9750                 reset_mask2 = 0x3ffffff;
9751
9752         /* Don't reset global blocks unless we need to */
9753         if (!global)
9754                 reset_mask2 &= ~global_bits2;
9755
9756         /*
9757          * In case of attention in the QM, we need to reset PXP
9758          * (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM
9759          * because otherwise QM reset would release 'close the gates' shortly
9760          * before resetting the PXP, then the PSWRQ would send a write
9761          * request to PGLUE. Then when PXP is reset, PGLUE would try to
9762          * read the payload data from PSWWR, but PSWWR would not
9763          * respond. The write queue in PGLUE would stuck, dmae commands
9764          * would not return. Therefore it's important to reset the second
9765          * reset register (containing the
9766          * MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the
9767          * first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM
9768          * bit).
9769          */
9770         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
9771                reset_mask2 & (~not_reset_mask2));
9772
9773         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
9774                reset_mask1 & (~not_reset_mask1));
9775
9776         barrier();
9777         mmiowb();
9778
9779         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
9780                reset_mask2 & (~stay_reset2));
9781
9782         barrier();
9783         mmiowb();
9784
9785         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
9786         mmiowb();
9787 }
9788
9789 /**
9790  * bnx2x_er_poll_igu_vq - poll for pending writes bit.
9791  * It should get cleared in no more than 1s.
9792  *
9793  * @bp: driver handle
9794  *
9795  * It should get cleared in no more than 1s. Returns 0 if
9796  * pending writes bit gets cleared.
9797  */
9798 static int bnx2x_er_poll_igu_vq(struct bnx2x *bp)
9799 {
9800         u32 cnt = 1000;
9801         u32 pend_bits = 0;
9802
9803         do {
9804                 pend_bits  = REG_RD(bp, IGU_REG_PENDING_BITS_STATUS);
9805
9806                 if (pend_bits == 0)
9807                         break;
9808
9809                 usleep_range(1000, 2000);
9810         } while (cnt-- > 0);
9811
9812         if (cnt <= 0) {
9813                 BNX2X_ERR("Still pending IGU requests pend_bits=%x!\n",
9814                           pend_bits);
9815                 return -EBUSY;
9816         }
9817
9818         return 0;
9819 }
9820
9821 static int bnx2x_process_kill(struct bnx2x *bp, bool global)
9822 {
9823         int cnt = 1000;
9824         u32 val = 0;
9825         u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
9826         u32 tags_63_32 = 0;
9827
9828         /* Empty the Tetris buffer, wait for 1s */
9829         do {
9830                 sr_cnt  = REG_RD(bp, PXP2_REG_RD_SR_CNT);
9831                 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
9832                 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
9833                 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
9834                 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
9835                 if (CHIP_IS_E3(bp))
9836                         tags_63_32 = REG_RD(bp, PGLUE_B_REG_TAGS_63_32);
9837
9838                 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
9839                     ((port_is_idle_0 & 0x1) == 0x1) &&
9840                     ((port_is_idle_1 & 0x1) == 0x1) &&
9841                     (pgl_exp_rom2 == 0xffffffff) &&
9842                     (!CHIP_IS_E3(bp) || (tags_63_32 == 0xffffffff)))
9843                         break;
9844                 usleep_range(1000, 2000);
9845         } while (cnt-- > 0);
9846
9847         if (cnt <= 0) {
9848                 BNX2X_ERR("Tetris buffer didn't get empty or there are still outstanding read requests after 1s!\n");
9849                 BNX2X_ERR("sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
9850                           sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
9851                           pgl_exp_rom2);
9852                 return -EAGAIN;
9853         }
9854
9855         barrier();
9856
9857         /* Close gates #2, #3 and #4 */
9858         bnx2x_set_234_gates(bp, true);
9859
9860         /* Poll for IGU VQs for 57712 and newer chips */
9861         if (!CHIP_IS_E1x(bp) && bnx2x_er_poll_igu_vq(bp))
9862                 return -EAGAIN;
9863
9864         /* TBD: Indicate that "process kill" is in progress to MCP */
9865
9866         /* Clear "unprepared" bit */
9867         REG_WR(bp, MISC_REG_UNPREPARED, 0);
9868         barrier();
9869
9870         /* Make sure all is written to the chip before the reset */
9871         mmiowb();
9872
9873         /* Wait for 1ms to empty GLUE and PCI-E core queues,
9874          * PSWHST, GRC and PSWRD Tetris buffer.
9875          */
9876         usleep_range(1000, 2000);
9877
9878         /* Prepare to chip reset: */
9879         /* MCP */
9880         if (global)
9881                 bnx2x_reset_mcp_prep(bp, &val);
9882
9883         /* PXP */
9884         bnx2x_pxp_prep(bp);
9885         barrier();
9886
9887         /* reset the chip */
9888         bnx2x_process_kill_chip_reset(bp, global);
9889         barrier();
9890
9891         /* clear errors in PGB */
9892         if (!CHIP_IS_E1x(bp))
9893                 REG_WR(bp, PGLUE_B_REG_LATCHED_ERRORS_CLR, 0x7f);
9894
9895         /* Recover after reset: */
9896         /* MCP */
9897         if (global && bnx2x_reset_mcp_comp(bp, val))
9898                 return -EAGAIN;
9899
9900         /* TBD: Add resetting the NO_MCP mode DB here */
9901
9902         /* Open the gates #2, #3 and #4 */
9903         bnx2x_set_234_gates(bp, false);
9904
9905         /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
9906          * reset state, re-enable attentions. */
9907
9908         return 0;
9909 }
9910
9911 static int bnx2x_leader_reset(struct bnx2x *bp)
9912 {
9913         int rc = 0;
9914         bool global = bnx2x_reset_is_global(bp);
9915         u32 load_code;
9916
9917         /* if not going to reset MCP - load "fake" driver to reset HW while
9918          * driver is owner of the HW
9919          */
9920         if (!global && !BP_NOMCP(bp)) {
9921                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ,
9922                                              DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
9923                 if (!load_code) {
9924                         BNX2X_ERR("MCP response failure, aborting\n");
9925                         rc = -EAGAIN;
9926                         goto exit_leader_reset;
9927                 }
9928                 if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) &&
9929                     (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) {
9930                         BNX2X_ERR("MCP unexpected resp, aborting\n");
9931                         rc = -EAGAIN;
9932                         goto exit_leader_reset2;
9933                 }
9934                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
9935                 if (!load_code) {
9936                         BNX2X_ERR("MCP response failure, aborting\n");
9937                         rc = -EAGAIN;
9938                         goto exit_leader_reset2;
9939                 }
9940         }
9941
9942         /* Try to recover after the failure */
9943         if (bnx2x_process_kill(bp, global)) {
9944                 BNX2X_ERR("Something bad had happen on engine %d! Aii!\n",
9945                           BP_PATH(bp));
9946                 rc = -EAGAIN;
9947                 goto exit_leader_reset2;
9948         }
9949
9950         /*
9951          * Clear RESET_IN_PROGRES and RESET_GLOBAL bits and update the driver
9952          * state.
9953          */
9954         bnx2x_set_reset_done(bp);
9955         if (global)
9956                 bnx2x_clear_reset_global(bp);
9957
9958 exit_leader_reset2:
9959         /* unload "fake driver" if it was loaded */
9960         if (!global && !BP_NOMCP(bp)) {
9961                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
9962                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
9963         }
9964 exit_leader_reset:
9965         bp->is_leader = 0;
9966         bnx2x_release_leader_lock(bp);
9967         smp_mb();
9968         return rc;
9969 }
9970
9971 static void bnx2x_recovery_failed(struct bnx2x *bp)
9972 {
9973         netdev_err(bp->dev, "Recovery has failed. Power cycle is needed.\n");
9974
9975         /* Disconnect this device */
9976         netif_device_detach(bp->dev);
9977
9978         /*
9979          * Block ifup for all function on this engine until "process kill"
9980          * or power cycle.
9981          */
9982         bnx2x_set_reset_in_progress(bp);
9983
9984         /* Shut down the power */
9985         bnx2x_set_power_state(bp, PCI_D3hot);
9986
9987         bp->recovery_state = BNX2X_RECOVERY_FAILED;
9988
9989         smp_mb();
9990 }
9991
9992 /*
9993  * Assumption: runs under rtnl lock. This together with the fact
9994  * that it's called only from bnx2x_sp_rtnl() ensure that it
9995  * will never be called when netif_running(bp->dev) is false.
9996  */
9997 static void bnx2x_parity_recover(struct bnx2x *bp)
9998 {
9999         bool global = false;
10000         u32 error_recovered, error_unrecovered;
10001         bool is_parity;
10002
10003         DP(NETIF_MSG_HW, "Handling parity\n");
10004         while (1) {
10005                 switch (bp->recovery_state) {
10006                 case BNX2X_RECOVERY_INIT:
10007                         DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
10008                         is_parity = bnx2x_chk_parity_attn(bp, &global, false);
10009                         WARN_ON(!is_parity);
10010
10011                         /* Try to get a LEADER_LOCK HW lock */
10012                         if (bnx2x_trylock_leader_lock(bp)) {
10013                                 bnx2x_set_reset_in_progress(bp);
10014                                 /*
10015                                  * Check if there is a global attention and if
10016                                  * there was a global attention, set the global
10017                                  * reset bit.
10018                                  */
10019
10020                                 if (global)
10021                                         bnx2x_set_reset_global(bp);
10022
10023                                 bp->is_leader = 1;
10024                         }
10025
10026                         /* Stop the driver */
10027                         /* If interface has been removed - break */
10028                         if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY, false))
10029                                 return;
10030
10031                         bp->recovery_state = BNX2X_RECOVERY_WAIT;
10032
10033                         /* Ensure "is_leader", MCP command sequence and
10034                          * "recovery_state" update values are seen on other
10035                          * CPUs.
10036                          */
10037                         smp_mb();
10038                         break;
10039
10040                 case BNX2X_RECOVERY_WAIT:
10041                         DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
10042                         if (bp->is_leader) {
10043                                 int other_engine = BP_PATH(bp) ? 0 : 1;
10044                                 bool other_load_status =
10045                                         bnx2x_get_load_status(bp, other_engine);
10046                                 bool load_status =
10047                                         bnx2x_get_load_status(bp, BP_PATH(bp));
10048                                 global = bnx2x_reset_is_global(bp);
10049
10050                                 /*
10051                                  * In case of a parity in a global block, let
10052                                  * the first leader that performs a
10053                                  * leader_reset() reset the global blocks in
10054                                  * order to clear global attentions. Otherwise
10055                                  * the gates will remain closed for that
10056                                  * engine.
10057                                  */
10058                                 if (load_status ||
10059                                     (global && other_load_status)) {
10060                                         /* Wait until all other functions get
10061                                          * down.
10062                                          */
10063                                         schedule_delayed_work(&bp->sp_rtnl_task,
10064                                                                 HZ/10);
10065                                         return;
10066                                 } else {
10067                                         /* If all other functions got down -
10068                                          * try to bring the chip back to
10069                                          * normal. In any case it's an exit
10070                                          * point for a leader.
10071                                          */
10072                                         if (bnx2x_leader_reset(bp)) {
10073                                                 bnx2x_recovery_failed(bp);
10074                                                 return;
10075                                         }
10076
10077                                         /* If we are here, means that the
10078                                          * leader has succeeded and doesn't
10079                                          * want to be a leader any more. Try
10080                                          * to continue as a none-leader.
10081                                          */
10082                                         break;
10083                                 }
10084                         } else { /* non-leader */
10085                                 if (!bnx2x_reset_is_done(bp, BP_PATH(bp))) {
10086                                         /* Try to get a LEADER_LOCK HW lock as
10087                                          * long as a former leader may have
10088                                          * been unloaded by the user or
10089                                          * released a leadership by another
10090                                          * reason.
10091                                          */
10092                                         if (bnx2x_trylock_leader_lock(bp)) {
10093                                                 /* I'm a leader now! Restart a
10094                                                  * switch case.
10095                                                  */
10096                                                 bp->is_leader = 1;
10097                                                 break;
10098                                         }
10099
10100                                         schedule_delayed_work(&bp->sp_rtnl_task,
10101                                                                 HZ/10);
10102                                         return;
10103
10104                                 } else {
10105                                         /*
10106                                          * If there was a global attention, wait
10107                                          * for it to be cleared.
10108                                          */
10109                                         if (bnx2x_reset_is_global(bp)) {
10110                                                 schedule_delayed_work(
10111                                                         &bp->sp_rtnl_task,
10112                                                         HZ/10);
10113                                                 return;
10114                                         }
10115
10116                                         error_recovered =
10117                                           bp->eth_stats.recoverable_error;
10118                                         error_unrecovered =
10119                                           bp->eth_stats.unrecoverable_error;
10120                                         bp->recovery_state =
10121                                                 BNX2X_RECOVERY_NIC_LOADING;
10122                                         if (bnx2x_nic_load(bp, LOAD_NORMAL)) {
10123                                                 error_unrecovered++;
10124                                                 netdev_err(bp->dev,
10125                                                            "Recovery failed. Power cycle needed\n");
10126                                                 /* Disconnect this device */
10127                                                 netif_device_detach(bp->dev);
10128                                                 /* Shut down the power */
10129                                                 bnx2x_set_power_state(
10130                                                         bp, PCI_D3hot);
10131                                                 smp_mb();
10132                                         } else {
10133                                                 bp->recovery_state =
10134                                                         BNX2X_RECOVERY_DONE;
10135                                                 error_recovered++;
10136                                                 smp_mb();
10137                                         }
10138                                         bp->eth_stats.recoverable_error =
10139                                                 error_recovered;
10140                                         bp->eth_stats.unrecoverable_error =
10141                                                 error_unrecovered;
10142
10143                                         return;
10144                                 }
10145                         }
10146                 default:
10147                         return;
10148                 }
10149         }
10150 }
10151
10152 static int bnx2x_udp_port_update(struct bnx2x *bp)
10153 {
10154         struct bnx2x_func_switch_update_params *switch_update_params;
10155         struct bnx2x_func_state_params func_params = {NULL};
10156         struct bnx2x_udp_tunnel *udp_tunnel;
10157         u16 vxlan_port = 0, geneve_port = 0;
10158         int rc;
10159
10160         switch_update_params = &func_params.params.switch_update;
10161
10162         /* Prepare parameters for function state transitions */
10163         __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
10164         __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
10165
10166         func_params.f_obj = &bp->func_obj;
10167         func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
10168
10169         /* Function parameters */
10170         __set_bit(BNX2X_F_UPDATE_TUNNEL_CFG_CHNG,
10171                   &switch_update_params->changes);
10172
10173         if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE].count) {
10174                 udp_tunnel = &bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE];
10175                 geneve_port = udp_tunnel->dst_port;
10176                 switch_update_params->geneve_dst_port = geneve_port;
10177         }
10178
10179         if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN].count) {
10180                 udp_tunnel = &bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN];
10181                 vxlan_port = udp_tunnel->dst_port;
10182                 switch_update_params->vxlan_dst_port = vxlan_port;
10183         }
10184
10185         /* Re-enable inner-rss for the offloaded UDP tunnels */
10186         __set_bit(BNX2X_F_UPDATE_TUNNEL_INNER_RSS,
10187                   &switch_update_params->changes);
10188
10189         rc = bnx2x_func_state_change(bp, &func_params);
10190         if (rc)
10191                 BNX2X_ERR("failed to set UDP dst port to %04x %04x (rc = 0x%x)\n",
10192                           vxlan_port, geneve_port, rc);
10193         else
10194                 DP(BNX2X_MSG_SP,
10195                    "Configured UDP ports: Vxlan [%04x] Geneve [%04x]\n",
10196                    vxlan_port, geneve_port);
10197
10198         return rc;
10199 }
10200
10201 static void __bnx2x_add_udp_port(struct bnx2x *bp, u16 port,
10202                                  enum bnx2x_udp_port_type type)
10203 {
10204         struct bnx2x_udp_tunnel *udp_port = &bp->udp_tunnel_ports[type];
10205
10206         if (!netif_running(bp->dev) || !IS_PF(bp) || CHIP_IS_E1x(bp))
10207                 return;
10208
10209         if (udp_port->count && udp_port->dst_port == port) {
10210                 udp_port->count++;
10211                 return;
10212         }
10213
10214         if (udp_port->count) {
10215                 DP(BNX2X_MSG_SP,
10216                    "UDP tunnel [%d] -  destination port limit reached\n",
10217                    type);
10218                 return;
10219         }
10220
10221         udp_port->dst_port = port;
10222         udp_port->count = 1;
10223         bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_CHANGE_UDP_PORT, 0);
10224 }
10225
10226 static void __bnx2x_del_udp_port(struct bnx2x *bp, u16 port,
10227                                  enum bnx2x_udp_port_type type)
10228 {
10229         struct bnx2x_udp_tunnel *udp_port = &bp->udp_tunnel_ports[type];
10230
10231         if (!IS_PF(bp) || CHIP_IS_E1x(bp))
10232                 return;
10233
10234         if (!udp_port->count || udp_port->dst_port != port) {
10235                 DP(BNX2X_MSG_SP, "Invalid UDP tunnel [%d] port\n",
10236                    type);
10237                 return;
10238         }
10239
10240         /* Remove reference, and make certain it's no longer in use */
10241         udp_port->count--;
10242         if (udp_port->count)
10243                 return;
10244         udp_port->dst_port = 0;
10245
10246         if (netif_running(bp->dev))
10247                 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_CHANGE_UDP_PORT, 0);
10248         else
10249                 DP(BNX2X_MSG_SP, "Deleted UDP tunnel [%d] port %d\n",
10250                    type, port);
10251 }
10252
10253 static void bnx2x_udp_tunnel_add(struct net_device *netdev,
10254                                  struct udp_tunnel_info *ti)
10255 {
10256         struct bnx2x *bp = netdev_priv(netdev);
10257         u16 t_port = ntohs(ti->port);
10258
10259         switch (ti->type) {
10260         case UDP_TUNNEL_TYPE_VXLAN:
10261                 __bnx2x_add_udp_port(bp, t_port, BNX2X_UDP_PORT_VXLAN);
10262                 break;
10263         case UDP_TUNNEL_TYPE_GENEVE:
10264                 __bnx2x_add_udp_port(bp, t_port, BNX2X_UDP_PORT_GENEVE);
10265                 break;
10266         default:
10267                 break;
10268         }
10269 }
10270
10271 static void bnx2x_udp_tunnel_del(struct net_device *netdev,
10272                                  struct udp_tunnel_info *ti)
10273 {
10274         struct bnx2x *bp = netdev_priv(netdev);
10275         u16 t_port = ntohs(ti->port);
10276
10277         switch (ti->type) {
10278         case UDP_TUNNEL_TYPE_VXLAN:
10279                 __bnx2x_del_udp_port(bp, t_port, BNX2X_UDP_PORT_VXLAN);
10280                 break;
10281         case UDP_TUNNEL_TYPE_GENEVE:
10282                 __bnx2x_del_udp_port(bp, t_port, BNX2X_UDP_PORT_GENEVE);
10283                 break;
10284         default:
10285                 break;
10286         }
10287 }
10288
10289 static int bnx2x_close(struct net_device *dev);
10290
10291 /* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
10292  * scheduled on a general queue in order to prevent a dead lock.
10293  */
10294 static void bnx2x_sp_rtnl_task(struct work_struct *work)
10295 {
10296         struct bnx2x *bp = container_of(work, struct bnx2x, sp_rtnl_task.work);
10297
10298         rtnl_lock();
10299
10300         if (!netif_running(bp->dev)) {
10301                 rtnl_unlock();
10302                 return;
10303         }
10304
10305         if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE)) {
10306 #ifdef BNX2X_STOP_ON_ERROR
10307                 BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n"
10308                           "you will need to reboot when done\n");
10309                 goto sp_rtnl_not_reset;
10310 #endif
10311                 /*
10312                  * Clear all pending SP commands as we are going to reset the
10313                  * function anyway.
10314                  */
10315                 bp->sp_rtnl_state = 0;
10316                 smp_mb();
10317
10318                 bnx2x_parity_recover(bp);
10319
10320                 rtnl_unlock();
10321                 return;
10322         }
10323
10324         if (test_and_clear_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state)) {
10325 #ifdef BNX2X_STOP_ON_ERROR
10326                 BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n"
10327                           "you will need to reboot when done\n");
10328                 goto sp_rtnl_not_reset;
10329 #endif
10330
10331                 /*
10332                  * Clear all pending SP commands as we are going to reset the
10333                  * function anyway.
10334                  */
10335                 bp->sp_rtnl_state = 0;
10336                 smp_mb();
10337
10338                 /* Immediately indicate link as down */
10339                 bp->link_vars.link_up = 0;
10340                 bp->force_link_down = true;
10341                 netif_carrier_off(bp->dev);
10342                 BNX2X_ERR("Indicating link is down due to Tx-timeout\n");
10343
10344                 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
10345                 /* When ret value shows failure of allocation failure,
10346                  * the nic is rebooted again. If open still fails, a error
10347                  * message to notify the user.
10348                  */
10349                 if (bnx2x_nic_load(bp, LOAD_NORMAL) == -ENOMEM) {
10350                         bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
10351                         if (bnx2x_nic_load(bp, LOAD_NORMAL))
10352                                 BNX2X_ERR("Open the NIC fails again!\n");
10353                 }
10354                 rtnl_unlock();
10355                 return;
10356         }
10357 #ifdef BNX2X_STOP_ON_ERROR
10358 sp_rtnl_not_reset:
10359 #endif
10360         if (test_and_clear_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state))
10361                 bnx2x_setup_tc(bp->dev, bp->dcbx_port_params.ets.num_of_cos);
10362         if (test_and_clear_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE, &bp->sp_rtnl_state))
10363                 bnx2x_after_function_update(bp);
10364         /*
10365          * in case of fan failure we need to reset id if the "stop on error"
10366          * debug flag is set, since we trying to prevent permanent overheating
10367          * damage
10368          */
10369         if (test_and_clear_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state)) {
10370                 DP(NETIF_MSG_HW, "fan failure detected. Unloading driver\n");
10371                 netif_device_detach(bp->dev);
10372                 bnx2x_close(bp->dev);
10373                 rtnl_unlock();
10374                 return;
10375         }
10376
10377         if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_MCAST, &bp->sp_rtnl_state)) {
10378                 DP(BNX2X_MSG_SP,
10379                    "sending set mcast vf pf channel message from rtnl sp-task\n");
10380                 bnx2x_vfpf_set_mcast(bp->dev);
10381         }
10382         if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
10383                                &bp->sp_rtnl_state)){
10384                 if (netif_carrier_ok(bp->dev)) {
10385                         bnx2x_tx_disable(bp);
10386                         BNX2X_ERR("PF indicated channel is not servicable anymore. This means this VF device is no longer operational\n");
10387                 }
10388         }
10389
10390         if (test_and_clear_bit(BNX2X_SP_RTNL_RX_MODE, &bp->sp_rtnl_state)) {
10391                 DP(BNX2X_MSG_SP, "Handling Rx Mode setting\n");
10392                 bnx2x_set_rx_mode_inner(bp);
10393         }
10394
10395         if (test_and_clear_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN,
10396                                &bp->sp_rtnl_state))
10397                 bnx2x_pf_set_vfs_vlan(bp);
10398
10399         if (test_and_clear_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state)) {
10400                 bnx2x_dcbx_stop_hw_tx(bp);
10401                 bnx2x_dcbx_resume_hw_tx(bp);
10402         }
10403
10404         if (test_and_clear_bit(BNX2X_SP_RTNL_GET_DRV_VERSION,
10405                                &bp->sp_rtnl_state))
10406                 bnx2x_update_mng_version(bp);
10407
10408         if (test_and_clear_bit(BNX2X_SP_RTNL_UPDATE_SVID, &bp->sp_rtnl_state))
10409                 bnx2x_handle_update_svid_cmd(bp);
10410
10411         if (test_and_clear_bit(BNX2X_SP_RTNL_CHANGE_UDP_PORT,
10412                                &bp->sp_rtnl_state)) {
10413                 if (bnx2x_udp_port_update(bp)) {
10414                         /* On error, forget configuration */
10415                         memset(bp->udp_tunnel_ports, 0,
10416                                sizeof(struct bnx2x_udp_tunnel) *
10417                                BNX2X_UDP_PORT_MAX);
10418                 } else {
10419                         /* Since we don't store additional port information,
10420                          * if no ports are configured for any feature ask for
10421                          * information about currently configured ports.
10422                          */
10423                         if (!bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN].count &&
10424                             !bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE].count)
10425                                 udp_tunnel_get_rx_info(bp->dev);
10426                 }
10427         }
10428
10429         /* work which needs rtnl lock not-taken (as it takes the lock itself and
10430          * can be called from other contexts as well)
10431          */
10432         rtnl_unlock();
10433
10434         /* enable SR-IOV if applicable */
10435         if (IS_SRIOV(bp) && test_and_clear_bit(BNX2X_SP_RTNL_ENABLE_SRIOV,
10436                                                &bp->sp_rtnl_state)) {
10437                 bnx2x_disable_sriov(bp);
10438                 bnx2x_enable_sriov(bp);
10439         }
10440 }
10441
10442 static void bnx2x_period_task(struct work_struct *work)
10443 {
10444         struct bnx2x *bp = container_of(work, struct bnx2x, period_task.work);
10445
10446         if (!netif_running(bp->dev))
10447                 goto period_task_exit;
10448
10449         if (CHIP_REV_IS_SLOW(bp)) {
10450                 BNX2X_ERR("period task called on emulation, ignoring\n");
10451                 goto period_task_exit;
10452         }
10453
10454         bnx2x_acquire_phy_lock(bp);
10455         /*
10456          * The barrier is needed to ensure the ordering between the writing to
10457          * the bp->port.pmf in the bnx2x_nic_load() or bnx2x_pmf_update() and
10458          * the reading here.
10459          */
10460         smp_mb();
10461         if (bp->port.pmf) {
10462                 bnx2x_period_func(&bp->link_params, &bp->link_vars);
10463
10464                 /* Re-queue task in 1 sec */
10465                 queue_delayed_work(bnx2x_wq, &bp->period_task, 1*HZ);
10466         }
10467
10468         bnx2x_release_phy_lock(bp);
10469 period_task_exit:
10470         return;
10471 }
10472
10473 /*
10474  * Init service functions
10475  */
10476
10477 static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
10478 {
10479         u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
10480         u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
10481         return base + (BP_ABS_FUNC(bp)) * stride;
10482 }
10483
10484 static bool bnx2x_prev_unload_close_umac(struct bnx2x *bp,
10485                                          u8 port, u32 reset_reg,
10486                                          struct bnx2x_mac_vals *vals)
10487 {
10488         u32 mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port;
10489         u32 base_addr;
10490
10491         if (!(mask & reset_reg))
10492                 return false;
10493
10494         BNX2X_DEV_INFO("Disable umac Rx %02x\n", port);
10495         base_addr = port ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
10496         vals->umac_addr[port] = base_addr + UMAC_REG_COMMAND_CONFIG;
10497         vals->umac_val[port] = REG_RD(bp, vals->umac_addr[port]);
10498         REG_WR(bp, vals->umac_addr[port], 0);
10499
10500         return true;
10501 }
10502
10503 static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
10504                                         struct bnx2x_mac_vals *vals)
10505 {
10506         u32 val, base_addr, offset, mask, reset_reg;
10507         bool mac_stopped = false;
10508         u8 port = BP_PORT(bp);
10509
10510         /* reset addresses as they also mark which values were changed */
10511         memset(vals, 0, sizeof(*vals));
10512
10513         reset_reg = REG_RD(bp, MISC_REG_RESET_REG_2);
10514
10515         if (!CHIP_IS_E3(bp)) {
10516                 val = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port * 4);
10517                 mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port;
10518                 if ((mask & reset_reg) && val) {
10519                         u32 wb_data[2];
10520                         BNX2X_DEV_INFO("Disable bmac Rx\n");
10521                         base_addr = BP_PORT(bp) ? NIG_REG_INGRESS_BMAC1_MEM
10522                                                 : NIG_REG_INGRESS_BMAC0_MEM;
10523                         offset = CHIP_IS_E2(bp) ? BIGMAC2_REGISTER_BMAC_CONTROL
10524                                                 : BIGMAC_REGISTER_BMAC_CONTROL;
10525
10526                         /*
10527                          * use rd/wr since we cannot use dmae. This is safe
10528                          * since MCP won't access the bus due to the request
10529                          * to unload, and no function on the path can be
10530                          * loaded at this time.
10531                          */
10532                         wb_data[0] = REG_RD(bp, base_addr + offset);
10533                         wb_data[1] = REG_RD(bp, base_addr + offset + 0x4);
10534                         vals->bmac_addr = base_addr + offset;
10535                         vals->bmac_val[0] = wb_data[0];
10536                         vals->bmac_val[1] = wb_data[1];
10537                         wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
10538                         REG_WR(bp, vals->bmac_addr, wb_data[0]);
10539                         REG_WR(bp, vals->bmac_addr + 0x4, wb_data[1]);
10540                 }
10541                 BNX2X_DEV_INFO("Disable emac Rx\n");
10542                 vals->emac_addr = NIG_REG_NIG_EMAC0_EN + BP_PORT(bp)*4;
10543                 vals->emac_val = REG_RD(bp, vals->emac_addr);
10544                 REG_WR(bp, vals->emac_addr, 0);
10545                 mac_stopped = true;
10546         } else {
10547                 if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) {
10548                         BNX2X_DEV_INFO("Disable xmac Rx\n");
10549                         base_addr = BP_PORT(bp) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
10550                         val = REG_RD(bp, base_addr + XMAC_REG_PFC_CTRL_HI);
10551                         REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI,
10552                                val & ~(1 << 1));
10553                         REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI,
10554                                val | (1 << 1));
10555                         vals->xmac_addr = base_addr + XMAC_REG_CTRL;
10556                         vals->xmac_val = REG_RD(bp, vals->xmac_addr);
10557                         REG_WR(bp, vals->xmac_addr, 0);
10558                         mac_stopped = true;
10559                 }
10560
10561                 mac_stopped |= bnx2x_prev_unload_close_umac(bp, 0,
10562                                                             reset_reg, vals);
10563                 mac_stopped |= bnx2x_prev_unload_close_umac(bp, 1,
10564                                                             reset_reg, vals);
10565         }
10566
10567         if (mac_stopped)
10568                 msleep(20);
10569 }
10570
10571 #define BNX2X_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4))
10572 #define BNX2X_PREV_UNDI_PROD_ADDR_H(f) (BAR_TSTRORM_INTMEM + \
10573                                         0x1848 + ((f) << 4))
10574 #define BNX2X_PREV_UNDI_RCQ(val)        ((val) & 0xffff)
10575 #define BNX2X_PREV_UNDI_BD(val)         ((val) >> 16 & 0xffff)
10576 #define BNX2X_PREV_UNDI_PROD(rcq, bd)   ((bd) << 16 | (rcq))
10577
10578 #define BCM_5710_UNDI_FW_MF_MAJOR       (0x07)
10579 #define BCM_5710_UNDI_FW_MF_MINOR       (0x08)
10580 #define BCM_5710_UNDI_FW_MF_VERS        (0x05)
10581
10582 static bool bnx2x_prev_is_after_undi(struct bnx2x *bp)
10583 {
10584         /* UNDI marks its presence in DORQ -
10585          * it initializes CID offset for normal bell to 0x7
10586          */
10587         if (!(REG_RD(bp, MISC_REG_RESET_REG_1) &
10588             MISC_REGISTERS_RESET_REG_1_RST_DORQ))
10589                 return false;
10590
10591         if (REG_RD(bp, DORQ_REG_NORM_CID_OFST) == 0x7) {
10592                 BNX2X_DEV_INFO("UNDI previously loaded\n");
10593                 return true;
10594         }
10595
10596         return false;
10597 }
10598
10599 static void bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 inc)
10600 {
10601         u16 rcq, bd;
10602         u32 addr, tmp_reg;
10603
10604         if (BP_FUNC(bp) < 2)
10605                 addr = BNX2X_PREV_UNDI_PROD_ADDR(BP_PORT(bp));
10606         else
10607                 addr = BNX2X_PREV_UNDI_PROD_ADDR_H(BP_FUNC(bp) - 2);
10608
10609         tmp_reg = REG_RD(bp, addr);
10610         rcq = BNX2X_PREV_UNDI_RCQ(tmp_reg) + inc;
10611         bd = BNX2X_PREV_UNDI_BD(tmp_reg) + inc;
10612
10613         tmp_reg = BNX2X_PREV_UNDI_PROD(rcq, bd);
10614         REG_WR(bp, addr, tmp_reg);
10615
10616         BNX2X_DEV_INFO("UNDI producer [%d/%d][%08x] rings bd -> 0x%04x, rcq -> 0x%04x\n",
10617                        BP_PORT(bp), BP_FUNC(bp), addr, bd, rcq);
10618 }
10619
10620 static int bnx2x_prev_mcp_done(struct bnx2x *bp)
10621 {
10622         u32 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE,
10623                                   DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET);
10624         if (!rc) {
10625                 BNX2X_ERR("MCP response failure, aborting\n");
10626                 return -EBUSY;
10627         }
10628
10629         return 0;
10630 }
10631
10632 static struct bnx2x_prev_path_list *
10633                 bnx2x_prev_path_get_entry(struct bnx2x *bp)
10634 {
10635         struct bnx2x_prev_path_list *tmp_list;
10636
10637         list_for_each_entry(tmp_list, &bnx2x_prev_list, list)
10638                 if (PCI_SLOT(bp->pdev->devfn) == tmp_list->slot &&
10639                     bp->pdev->bus->number == tmp_list->bus &&
10640                     BP_PATH(bp) == tmp_list->path)
10641                         return tmp_list;
10642
10643         return NULL;
10644 }
10645
10646 static int bnx2x_prev_path_mark_eeh(struct bnx2x *bp)
10647 {
10648         struct bnx2x_prev_path_list *tmp_list;
10649         int rc;
10650
10651         rc = down_interruptible(&bnx2x_prev_sem);
10652         if (rc) {
10653                 BNX2X_ERR("Received %d when tried to take lock\n", rc);
10654                 return rc;
10655         }
10656
10657         tmp_list = bnx2x_prev_path_get_entry(bp);
10658         if (tmp_list) {
10659                 tmp_list->aer = 1;
10660                 rc = 0;
10661         } else {
10662                 BNX2X_ERR("path %d: Entry does not exist for eeh; Flow occurs before initial insmod is over ?\n",
10663                           BP_PATH(bp));
10664         }
10665
10666         up(&bnx2x_prev_sem);
10667
10668         return rc;
10669 }
10670
10671 static bool bnx2x_prev_is_path_marked(struct bnx2x *bp)
10672 {
10673         struct bnx2x_prev_path_list *tmp_list;
10674         bool rc = false;
10675
10676         if (down_trylock(&bnx2x_prev_sem))
10677                 return false;
10678
10679         tmp_list = bnx2x_prev_path_get_entry(bp);
10680         if (tmp_list) {
10681                 if (tmp_list->aer) {
10682                         DP(NETIF_MSG_HW, "Path %d was marked by AER\n",
10683                            BP_PATH(bp));
10684                 } else {
10685                         rc = true;
10686                         BNX2X_DEV_INFO("Path %d was already cleaned from previous drivers\n",
10687                                        BP_PATH(bp));
10688                 }
10689         }
10690
10691         up(&bnx2x_prev_sem);
10692
10693         return rc;
10694 }
10695
10696 bool bnx2x_port_after_undi(struct bnx2x *bp)
10697 {
10698         struct bnx2x_prev_path_list *entry;
10699         bool val;
10700
10701         down(&bnx2x_prev_sem);
10702
10703         entry = bnx2x_prev_path_get_entry(bp);
10704         val = !!(entry && (entry->undi & (1 << BP_PORT(bp))));
10705
10706         up(&bnx2x_prev_sem);
10707
10708         return val;
10709 }
10710
10711 static int bnx2x_prev_mark_path(struct bnx2x *bp, bool after_undi)
10712 {
10713         struct bnx2x_prev_path_list *tmp_list;
10714         int rc;
10715
10716         rc = down_interruptible(&bnx2x_prev_sem);
10717         if (rc) {
10718                 BNX2X_ERR("Received %d when tried to take lock\n", rc);
10719                 return rc;
10720         }
10721
10722         /* Check whether the entry for this path already exists */
10723         tmp_list = bnx2x_prev_path_get_entry(bp);
10724         if (tmp_list) {
10725                 if (!tmp_list->aer) {
10726                         BNX2X_ERR("Re-Marking the path.\n");
10727                 } else {
10728                         DP(NETIF_MSG_HW, "Removing AER indication from path %d\n",
10729                            BP_PATH(bp));
10730                         tmp_list->aer = 0;
10731                 }
10732                 up(&bnx2x_prev_sem);
10733                 return 0;
10734         }
10735         up(&bnx2x_prev_sem);
10736
10737         /* Create an entry for this path and add it */
10738         tmp_list = kmalloc(sizeof(struct bnx2x_prev_path_list), GFP_KERNEL);
10739         if (!tmp_list) {
10740                 BNX2X_ERR("Failed to allocate 'bnx2x_prev_path_list'\n");
10741                 return -ENOMEM;
10742         }
10743
10744         tmp_list->bus = bp->pdev->bus->number;
10745         tmp_list->slot = PCI_SLOT(bp->pdev->devfn);
10746         tmp_list->path = BP_PATH(bp);
10747         tmp_list->aer = 0;
10748         tmp_list->undi = after_undi ? (1 << BP_PORT(bp)) : 0;
10749
10750         rc = down_interruptible(&bnx2x_prev_sem);
10751         if (rc) {
10752                 BNX2X_ERR("Received %d when tried to take lock\n", rc);
10753                 kfree(tmp_list);
10754         } else {
10755                 DP(NETIF_MSG_HW, "Marked path [%d] - finished previous unload\n",
10756                    BP_PATH(bp));
10757                 list_add(&tmp_list->list, &bnx2x_prev_list);
10758                 up(&bnx2x_prev_sem);
10759         }
10760
10761         return rc;
10762 }
10763
10764 static int bnx2x_do_flr(struct bnx2x *bp)
10765 {
10766         struct pci_dev *dev = bp->pdev;
10767
10768         if (CHIP_IS_E1x(bp)) {
10769                 BNX2X_DEV_INFO("FLR not supported in E1/E1H\n");
10770                 return -EINVAL;
10771         }
10772
10773         /* only bootcode REQ_BC_VER_4_INITIATE_FLR and onwards support flr */
10774         if (bp->common.bc_ver < REQ_BC_VER_4_INITIATE_FLR) {
10775                 BNX2X_ERR("FLR not supported by BC_VER: 0x%x\n",
10776                           bp->common.bc_ver);
10777                 return -EINVAL;
10778         }
10779
10780         if (!pci_wait_for_pending_transaction(dev))
10781                 dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n");
10782
10783         BNX2X_DEV_INFO("Initiating FLR\n");
10784         bnx2x_fw_command(bp, DRV_MSG_CODE_INITIATE_FLR, 0);
10785
10786         return 0;
10787 }
10788
10789 static int bnx2x_prev_unload_uncommon(struct bnx2x *bp)
10790 {
10791         int rc;
10792
10793         BNX2X_DEV_INFO("Uncommon unload Flow\n");
10794
10795         /* Test if previous unload process was already finished for this path */
10796         if (bnx2x_prev_is_path_marked(bp))
10797                 return bnx2x_prev_mcp_done(bp);
10798
10799         BNX2X_DEV_INFO("Path is unmarked\n");
10800
10801         /* Cannot proceed with FLR if UNDI is loaded, since FW does not match */
10802         if (bnx2x_prev_is_after_undi(bp))
10803                 goto out;
10804
10805         /* If function has FLR capabilities, and existing FW version matches
10806          * the one required, then FLR will be sufficient to clean any residue
10807          * left by previous driver
10808          */
10809         rc = bnx2x_compare_fw_ver(bp, FW_MSG_CODE_DRV_LOAD_FUNCTION, false);
10810
10811         if (!rc) {
10812                 /* fw version is good */
10813                 BNX2X_DEV_INFO("FW version matches our own. Attempting FLR\n");
10814                 rc = bnx2x_do_flr(bp);
10815         }
10816
10817         if (!rc) {
10818                 /* FLR was performed */
10819                 BNX2X_DEV_INFO("FLR successful\n");
10820                 return 0;
10821         }
10822
10823         BNX2X_DEV_INFO("Could not FLR\n");
10824
10825 out:
10826         /* Close the MCP request, return failure*/
10827         rc = bnx2x_prev_mcp_done(bp);
10828         if (!rc)
10829                 rc = BNX2X_PREV_WAIT_NEEDED;
10830
10831         return rc;
10832 }
10833
10834 static int bnx2x_prev_unload_common(struct bnx2x *bp)
10835 {
10836         u32 reset_reg, tmp_reg = 0, rc;
10837         bool prev_undi = false;
10838         struct bnx2x_mac_vals mac_vals;
10839
10840         /* It is possible a previous function received 'common' answer,
10841          * but hasn't loaded yet, therefore creating a scenario of
10842          * multiple functions receiving 'common' on the same path.
10843          */
10844         BNX2X_DEV_INFO("Common unload Flow\n");
10845
10846         memset(&mac_vals, 0, sizeof(mac_vals));
10847
10848         if (bnx2x_prev_is_path_marked(bp))
10849                 return bnx2x_prev_mcp_done(bp);
10850
10851         reset_reg = REG_RD(bp, MISC_REG_RESET_REG_1);
10852
10853         /* Reset should be performed after BRB is emptied */
10854         if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) {
10855                 u32 timer_count = 1000;
10856
10857                 /* Close the MAC Rx to prevent BRB from filling up */
10858                 bnx2x_prev_unload_close_mac(bp, &mac_vals);
10859
10860                 /* close LLH filters for both ports towards the BRB */
10861                 bnx2x_set_rx_filter(&bp->link_params, 0);
10862                 bp->link_params.port ^= 1;
10863                 bnx2x_set_rx_filter(&bp->link_params, 0);
10864                 bp->link_params.port ^= 1;
10865
10866                 /* Check if the UNDI driver was previously loaded */
10867                 if (bnx2x_prev_is_after_undi(bp)) {
10868                         prev_undi = true;
10869                         /* clear the UNDI indication */
10870                         REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
10871                         /* clear possible idle check errors */
10872                         REG_RD(bp, NIG_REG_NIG_INT_STS_CLR_0);
10873                 }
10874                 if (!CHIP_IS_E1x(bp))
10875                         /* block FW from writing to host */
10876                         REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
10877
10878                 /* wait until BRB is empty */
10879                 tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS);
10880                 while (timer_count) {
10881                         u32 prev_brb = tmp_reg;
10882
10883                         tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS);
10884                         if (!tmp_reg)
10885                                 break;
10886
10887                         BNX2X_DEV_INFO("BRB still has 0x%08x\n", tmp_reg);
10888
10889                         /* reset timer as long as BRB actually gets emptied */
10890                         if (prev_brb > tmp_reg)
10891                                 timer_count = 1000;
10892                         else
10893                                 timer_count--;
10894
10895                         /* If UNDI resides in memory, manually increment it */
10896                         if (prev_undi)
10897                                 bnx2x_prev_unload_undi_inc(bp, 1);
10898
10899                         udelay(10);
10900                 }
10901
10902                 if (!timer_count)
10903                         BNX2X_ERR("Failed to empty BRB, hope for the best\n");
10904         }
10905
10906         /* No packets are in the pipeline, path is ready for reset */
10907         bnx2x_reset_common(bp);
10908
10909         if (mac_vals.xmac_addr)
10910                 REG_WR(bp, mac_vals.xmac_addr, mac_vals.xmac_val);
10911         if (mac_vals.umac_addr[0])
10912                 REG_WR(bp, mac_vals.umac_addr[0], mac_vals.umac_val[0]);
10913         if (mac_vals.umac_addr[1])
10914                 REG_WR(bp, mac_vals.umac_addr[1], mac_vals.umac_val[1]);
10915         if (mac_vals.emac_addr)
10916                 REG_WR(bp, mac_vals.emac_addr, mac_vals.emac_val);
10917         if (mac_vals.bmac_addr) {
10918                 REG_WR(bp, mac_vals.bmac_addr, mac_vals.bmac_val[0]);
10919                 REG_WR(bp, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]);
10920         }
10921
10922         rc = bnx2x_prev_mark_path(bp, prev_undi);
10923         if (rc) {
10924                 bnx2x_prev_mcp_done(bp);
10925                 return rc;
10926         }
10927
10928         return bnx2x_prev_mcp_done(bp);
10929 }
10930
10931 static int bnx2x_prev_unload(struct bnx2x *bp)
10932 {
10933         int time_counter = 10;
10934         u32 rc, fw, hw_lock_reg, hw_lock_val;
10935         BNX2X_DEV_INFO("Entering Previous Unload Flow\n");
10936
10937         /* clear hw from errors which may have resulted from an interrupted
10938          * dmae transaction.
10939          */
10940         bnx2x_clean_pglue_errors(bp);
10941
10942         /* Release previously held locks */
10943         hw_lock_reg = (BP_FUNC(bp) <= 5) ?
10944                       (MISC_REG_DRIVER_CONTROL_1 + BP_FUNC(bp) * 8) :
10945                       (MISC_REG_DRIVER_CONTROL_7 + (BP_FUNC(bp) - 6) * 8);
10946
10947         hw_lock_val = REG_RD(bp, hw_lock_reg);
10948         if (hw_lock_val) {
10949                 if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) {
10950                         BNX2X_DEV_INFO("Release Previously held NVRAM lock\n");
10951                         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
10952                                (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << BP_PORT(bp)));
10953                 }
10954
10955                 BNX2X_DEV_INFO("Release Previously held hw lock\n");
10956                 REG_WR(bp, hw_lock_reg, 0xffffffff);
10957         } else
10958                 BNX2X_DEV_INFO("No need to release hw/nvram locks\n");
10959
10960         if (MCPR_ACCESS_LOCK_LOCK & REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK)) {
10961                 BNX2X_DEV_INFO("Release previously held alr\n");
10962                 bnx2x_release_alr(bp);
10963         }
10964
10965         do {
10966                 int aer = 0;
10967                 /* Lock MCP using an unload request */
10968                 fw = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0);
10969                 if (!fw) {
10970                         BNX2X_ERR("MCP response failure, aborting\n");
10971                         rc = -EBUSY;
10972                         break;
10973                 }
10974
10975                 rc = down_interruptible(&bnx2x_prev_sem);
10976                 if (rc) {
10977                         BNX2X_ERR("Cannot check for AER; Received %d when tried to take lock\n",
10978                                   rc);
10979                 } else {
10980                         /* If Path is marked by EEH, ignore unload status */
10981                         aer = !!(bnx2x_prev_path_get_entry(bp) &&
10982                                  bnx2x_prev_path_get_entry(bp)->aer);
10983                         up(&bnx2x_prev_sem);
10984                 }
10985
10986                 if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON || aer) {
10987                         rc = bnx2x_prev_unload_common(bp);
10988                         break;
10989                 }
10990
10991                 /* non-common reply from MCP might require looping */
10992                 rc = bnx2x_prev_unload_uncommon(bp);
10993                 if (rc != BNX2X_PREV_WAIT_NEEDED)
10994                         break;
10995
10996                 msleep(20);
10997         } while (--time_counter);
10998
10999         if (!time_counter || rc) {
11000                 BNX2X_DEV_INFO("Unloading previous driver did not occur, Possibly due to MF UNDI\n");
11001                 rc = -EPROBE_DEFER;
11002         }
11003
11004         /* Mark function if its port was used to boot from SAN */
11005         if (bnx2x_port_after_undi(bp))
11006                 bp->link_params.feature_config_flags |=
11007                         FEATURE_CONFIG_BOOT_FROM_SAN;
11008
11009         BNX2X_DEV_INFO("Finished Previous Unload Flow [%d]\n", rc);
11010
11011         return rc;
11012 }
11013
11014 static void bnx2x_get_common_hwinfo(struct bnx2x *bp)
11015 {
11016         u32 val, val2, val3, val4, id, boot_mode;
11017         u16 pmc;
11018
11019         /* Get the chip revision id and number. */
11020         /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
11021         val = REG_RD(bp, MISC_REG_CHIP_NUM);
11022         id = ((val & 0xffff) << 16);
11023         val = REG_RD(bp, MISC_REG_CHIP_REV);
11024         id |= ((val & 0xf) << 12);
11025
11026         /* Metal is read from PCI regs, but we can't access >=0x400 from
11027          * the configuration space (so we need to reg_rd)
11028          */
11029         val = REG_RD(bp, PCICFG_OFFSET + PCI_ID_VAL3);
11030         id |= (((val >> 24) & 0xf) << 4);
11031         val = REG_RD(bp, MISC_REG_BOND_ID);
11032         id |= (val & 0xf);
11033         bp->common.chip_id = id;
11034
11035         /* force 57811 according to MISC register */
11036         if (REG_RD(bp, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) {
11037                 if (CHIP_IS_57810(bp))
11038                         bp->common.chip_id = (CHIP_NUM_57811 << 16) |
11039                                 (bp->common.chip_id & 0x0000FFFF);
11040                 else if (CHIP_IS_57810_MF(bp))
11041                         bp->common.chip_id = (CHIP_NUM_57811_MF << 16) |
11042                                 (bp->common.chip_id & 0x0000FFFF);
11043                 bp->common.chip_id |= 0x1;
11044         }
11045
11046         /* Set doorbell size */
11047         bp->db_size = (1 << BNX2X_DB_SHIFT);
11048
11049         if (!CHIP_IS_E1x(bp)) {
11050                 val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
11051                 if ((val & 1) == 0)
11052                         val = REG_RD(bp, MISC_REG_PORT4MODE_EN);
11053                 else
11054                         val = (val >> 1) & 1;
11055                 BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" :
11056                                                        "2_PORT_MODE");
11057                 bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE :
11058                                                  CHIP_2_PORT_MODE;
11059
11060                 if (CHIP_MODE_IS_4_PORT(bp))
11061                         bp->pfid = (bp->pf_num >> 1);   /* 0..3 */
11062                 else
11063                         bp->pfid = (bp->pf_num & 0x6);  /* 0, 2, 4, 6 */
11064         } else {
11065                 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */
11066                 bp->pfid = bp->pf_num;                  /* 0..7 */
11067         }
11068
11069         BNX2X_DEV_INFO("pf_id: %x", bp->pfid);
11070
11071         bp->link_params.chip_id = bp->common.chip_id;
11072         BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
11073
11074         val = (REG_RD(bp, 0x2874) & 0x55);
11075         if ((bp->common.chip_id & 0x1) ||
11076             (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
11077                 bp->flags |= ONE_PORT_FLAG;
11078                 BNX2X_DEV_INFO("single port device\n");
11079         }
11080
11081         val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
11082         bp->common.flash_size = (BNX2X_NVRAM_1MB_SIZE <<
11083                                  (val & MCPR_NVM_CFG4_FLASH_SIZE));
11084         BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
11085                        bp->common.flash_size, bp->common.flash_size);
11086
11087         bnx2x_init_shmem(bp);
11088
11089         bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
11090                                         MISC_REG_GENERIC_CR_1 :
11091                                         MISC_REG_GENERIC_CR_0));
11092
11093         bp->link_params.shmem_base = bp->common.shmem_base;
11094         bp->link_params.shmem2_base = bp->common.shmem2_base;
11095         if (SHMEM2_RD(bp, size) >
11096             (u32)offsetof(struct shmem2_region, lfa_host_addr[BP_PORT(bp)]))
11097                 bp->link_params.lfa_base =
11098                 REG_RD(bp, bp->common.shmem2_base +
11099                        (u32)offsetof(struct shmem2_region,
11100                                      lfa_host_addr[BP_PORT(bp)]));
11101         else
11102                 bp->link_params.lfa_base = 0;
11103         BNX2X_DEV_INFO("shmem offset 0x%x  shmem2 offset 0x%x\n",
11104                        bp->common.shmem_base, bp->common.shmem2_base);
11105
11106         if (!bp->common.shmem_base) {
11107                 BNX2X_DEV_INFO("MCP not active\n");
11108                 bp->flags |= NO_MCP_FLAG;
11109                 return;
11110         }
11111
11112         bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
11113         BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
11114
11115         bp->link_params.hw_led_mode = ((bp->common.hw_config &
11116                                         SHARED_HW_CFG_LED_MODE_MASK) >>
11117                                        SHARED_HW_CFG_LED_MODE_SHIFT);
11118
11119         bp->link_params.feature_config_flags = 0;
11120         val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
11121         if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
11122                 bp->link_params.feature_config_flags |=
11123                                 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
11124         else
11125                 bp->link_params.feature_config_flags &=
11126                                 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
11127
11128         val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
11129         bp->common.bc_ver = val;
11130         BNX2X_DEV_INFO("bc_ver %X\n", val);
11131         if (val < BNX2X_BC_VER) {
11132                 /* for now only warn
11133                  * later we might need to enforce this */
11134                 BNX2X_ERR("This driver needs bc_ver %X but found %X, please upgrade BC\n",
11135                           BNX2X_BC_VER, val);
11136         }
11137         bp->link_params.feature_config_flags |=
11138                                 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
11139                                 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
11140
11141         bp->link_params.feature_config_flags |=
11142                 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
11143                 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
11144         bp->link_params.feature_config_flags |=
11145                 (val >= REQ_BC_VER_4_VRFY_AFEX_SUPPORTED) ?
11146                 FEATURE_CONFIG_BC_SUPPORTS_AFEX : 0;
11147         bp->link_params.feature_config_flags |=
11148                 (val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ?
11149                 FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0;
11150
11151         bp->link_params.feature_config_flags |=
11152                 (val >= REQ_BC_VER_4_MT_SUPPORTED) ?
11153                 FEATURE_CONFIG_MT_SUPPORT : 0;
11154
11155         bp->flags |= (val >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) ?
11156                         BC_SUPPORTS_PFC_STATS : 0;
11157
11158         bp->flags |= (val >= REQ_BC_VER_4_FCOE_FEATURES) ?
11159                         BC_SUPPORTS_FCOE_FEATURES : 0;
11160
11161         bp->flags |= (val >= REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF) ?
11162                         BC_SUPPORTS_DCBX_MSG_NON_PMF : 0;
11163
11164         bp->flags |= (val >= REQ_BC_VER_4_RMMOD_CMD) ?
11165                         BC_SUPPORTS_RMMOD_CMD : 0;
11166
11167         boot_mode = SHMEM_RD(bp,
11168                         dev_info.port_feature_config[BP_PORT(bp)].mba_config) &
11169                         PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK;
11170         switch (boot_mode) {
11171         case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_PXE:
11172                 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_PXE;
11173                 break;
11174         case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_ISCSIB:
11175                 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_ISCSI;
11176                 break;
11177         case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_FCOE_BOOT:
11178                 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_FCOE;
11179                 break;
11180         case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_NONE:
11181                 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_NONE;
11182                 break;
11183         }
11184
11185         pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_PMC, &pmc);
11186         bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
11187
11188         BNX2X_DEV_INFO("%sWoL capable\n",
11189                        (bp->flags & NO_WOL_FLAG) ? "not " : "");
11190
11191         val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
11192         val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
11193         val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
11194         val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
11195
11196         dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
11197                  val, val2, val3, val4);
11198 }
11199
11200 #define IGU_FID(val)    GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
11201 #define IGU_VEC(val)    GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
11202
11203 static int bnx2x_get_igu_cam_info(struct bnx2x *bp)
11204 {
11205         int pfid = BP_FUNC(bp);
11206         int igu_sb_id;
11207         u32 val;
11208         u8 fid, igu_sb_cnt = 0;
11209
11210         bp->igu_base_sb = 0xff;
11211         if (CHIP_INT_MODE_IS_BC(bp)) {
11212                 int vn = BP_VN(bp);
11213                 igu_sb_cnt = bp->igu_sb_cnt;
11214                 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
11215                         FP_SB_MAX_E1x;
11216
11217                 bp->igu_dsb_id =  E1HVN_MAX * FP_SB_MAX_E1x +
11218                         (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn);
11219
11220                 return 0;
11221         }
11222
11223         /* IGU in normal mode - read CAM */
11224         for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
11225              igu_sb_id++) {
11226                 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
11227                 if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
11228                         continue;
11229                 fid = IGU_FID(val);
11230                 if ((fid & IGU_FID_ENCODE_IS_PF)) {
11231                         if ((fid & IGU_FID_PF_NUM_MASK) != pfid)
11232                                 continue;
11233                         if (IGU_VEC(val) == 0)
11234                                 /* default status block */
11235                                 bp->igu_dsb_id = igu_sb_id;
11236                         else {
11237                                 if (bp->igu_base_sb == 0xff)
11238                                         bp->igu_base_sb = igu_sb_id;
11239                                 igu_sb_cnt++;
11240                         }
11241                 }
11242         }
11243
11244 #ifdef CONFIG_PCI_MSI
11245         /* Due to new PF resource allocation by MFW T7.4 and above, it's
11246          * optional that number of CAM entries will not be equal to the value
11247          * advertised in PCI.
11248          * Driver should use the minimal value of both as the actual status
11249          * block count
11250          */
11251         bp->igu_sb_cnt = min_t(int, bp->igu_sb_cnt, igu_sb_cnt);
11252 #endif
11253
11254         if (igu_sb_cnt == 0) {
11255                 BNX2X_ERR("CAM configuration error\n");
11256                 return -EINVAL;
11257         }
11258
11259         return 0;
11260 }
11261
11262 static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg)
11263 {
11264         int cfg_size = 0, idx, port = BP_PORT(bp);
11265
11266         /* Aggregation of supported attributes of all external phys */
11267         bp->port.supported[0] = 0;
11268         bp->port.supported[1] = 0;
11269         switch (bp->link_params.num_phys) {
11270         case 1:
11271                 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
11272                 cfg_size = 1;
11273                 break;
11274         case 2:
11275                 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
11276                 cfg_size = 1;
11277                 break;
11278         case 3:
11279                 if (bp->link_params.multi_phy_config &
11280                     PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
11281                         bp->port.supported[1] =
11282                                 bp->link_params.phy[EXT_PHY1].supported;
11283                         bp->port.supported[0] =
11284                                 bp->link_params.phy[EXT_PHY2].supported;
11285                 } else {
11286                         bp->port.supported[0] =
11287                                 bp->link_params.phy[EXT_PHY1].supported;
11288                         bp->port.supported[1] =
11289                                 bp->link_params.phy[EXT_PHY2].supported;
11290                 }
11291                 cfg_size = 2;
11292                 break;
11293         }
11294
11295         if (!(bp->port.supported[0] || bp->port.supported[1])) {
11296                 BNX2X_ERR("NVRAM config error. BAD phy config. PHY1 config 0x%x, PHY2 config 0x%x\n",
11297                            SHMEM_RD(bp,
11298                            dev_info.port_hw_config[port].external_phy_config),
11299                            SHMEM_RD(bp,
11300                            dev_info.port_hw_config[port].external_phy_config2));
11301                 return;
11302         }
11303
11304         if (CHIP_IS_E3(bp))
11305                 bp->port.phy_addr = REG_RD(bp, MISC_REG_WC0_CTRL_PHY_ADDR);
11306         else {
11307                 switch (switch_cfg) {
11308                 case SWITCH_CFG_1G:
11309                         bp->port.phy_addr = REG_RD(
11310                                 bp, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10);
11311                         break;
11312                 case SWITCH_CFG_10G:
11313                         bp->port.phy_addr = REG_RD(
11314                                 bp, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18);
11315                         break;
11316                 default:
11317                         BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
11318                                   bp->port.link_config[0]);
11319                         return;
11320                 }
11321         }
11322         BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
11323         /* mask what we support according to speed_cap_mask per configuration */
11324         for (idx = 0; idx < cfg_size; idx++) {
11325                 if (!(bp->link_params.speed_cap_mask[idx] &
11326                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
11327                         bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
11328
11329                 if (!(bp->link_params.speed_cap_mask[idx] &
11330                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
11331                         bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
11332
11333                 if (!(bp->link_params.speed_cap_mask[idx] &
11334                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
11335                         bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
11336
11337                 if (!(bp->link_params.speed_cap_mask[idx] &
11338                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
11339                         bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
11340
11341                 if (!(bp->link_params.speed_cap_mask[idx] &
11342                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
11343                         bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
11344                                                      SUPPORTED_1000baseT_Full);
11345
11346                 if (!(bp->link_params.speed_cap_mask[idx] &
11347                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
11348                         bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
11349
11350                 if (!(bp->link_params.speed_cap_mask[idx] &
11351                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
11352                         bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
11353
11354                 if (!(bp->link_params.speed_cap_mask[idx] &
11355                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_20G))
11356                         bp->port.supported[idx] &= ~SUPPORTED_20000baseKR2_Full;
11357         }
11358
11359         BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
11360                        bp->port.supported[1]);
11361 }
11362
11363 static void bnx2x_link_settings_requested(struct bnx2x *bp)
11364 {
11365         u32 link_config, idx, cfg_size = 0;
11366         bp->port.advertising[0] = 0;
11367         bp->port.advertising[1] = 0;
11368         switch (bp->link_params.num_phys) {
11369         case 1:
11370         case 2:
11371                 cfg_size = 1;
11372                 break;
11373         case 3:
11374                 cfg_size = 2;
11375                 break;
11376         }
11377         for (idx = 0; idx < cfg_size; idx++) {
11378                 bp->link_params.req_duplex[idx] = DUPLEX_FULL;
11379                 link_config = bp->port.link_config[idx];
11380                 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
11381                 case PORT_FEATURE_LINK_SPEED_AUTO:
11382                         if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
11383                                 bp->link_params.req_line_speed[idx] =
11384                                         SPEED_AUTO_NEG;
11385                                 bp->port.advertising[idx] |=
11386                                         bp->port.supported[idx];
11387                                 if (bp->link_params.phy[EXT_PHY1].type ==
11388                                     PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
11389                                         bp->port.advertising[idx] |=
11390                                         (SUPPORTED_100baseT_Half |
11391                                          SUPPORTED_100baseT_Full);
11392                         } else {
11393                                 /* force 10G, no AN */
11394                                 bp->link_params.req_line_speed[idx] =
11395                                         SPEED_10000;
11396                                 bp->port.advertising[idx] |=
11397                                         (ADVERTISED_10000baseT_Full |
11398                                          ADVERTISED_FIBRE);
11399                                 continue;
11400                         }
11401                         break;
11402
11403                 case PORT_FEATURE_LINK_SPEED_10M_FULL:
11404                         if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
11405                                 bp->link_params.req_line_speed[idx] =
11406                                         SPEED_10;
11407                                 bp->port.advertising[idx] |=
11408                                         (ADVERTISED_10baseT_Full |
11409                                          ADVERTISED_TP);
11410                         } else {
11411                                 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x  speed_cap_mask 0x%x\n",
11412                                             link_config,
11413                                     bp->link_params.speed_cap_mask[idx]);
11414                                 return;
11415                         }
11416                         break;
11417
11418                 case PORT_FEATURE_LINK_SPEED_10M_HALF:
11419                         if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
11420                                 bp->link_params.req_line_speed[idx] =
11421                                         SPEED_10;
11422                                 bp->link_params.req_duplex[idx] =
11423                                         DUPLEX_HALF;
11424                                 bp->port.advertising[idx] |=
11425                                         (ADVERTISED_10baseT_Half |
11426                                          ADVERTISED_TP);
11427                         } else {
11428                                 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x  speed_cap_mask 0x%x\n",
11429                                             link_config,
11430                                           bp->link_params.speed_cap_mask[idx]);
11431                                 return;
11432                         }
11433                         break;
11434
11435                 case PORT_FEATURE_LINK_SPEED_100M_FULL:
11436                         if (bp->port.supported[idx] &
11437                             SUPPORTED_100baseT_Full) {
11438                                 bp->link_params.req_line_speed[idx] =
11439                                         SPEED_100;
11440                                 bp->port.advertising[idx] |=
11441                                         (ADVERTISED_100baseT_Full |
11442                                          ADVERTISED_TP);
11443                         } else {
11444                                 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x  speed_cap_mask 0x%x\n",
11445                                             link_config,
11446                                           bp->link_params.speed_cap_mask[idx]);
11447                                 return;
11448                         }
11449                         break;
11450
11451                 case PORT_FEATURE_LINK_SPEED_100M_HALF:
11452                         if (bp->port.supported[idx] &
11453                             SUPPORTED_100baseT_Half) {
11454                                 bp->link_params.req_line_speed[idx] =
11455                                                                 SPEED_100;
11456                                 bp->link_params.req_duplex[idx] =
11457                                                                 DUPLEX_HALF;
11458                                 bp->port.advertising[idx] |=
11459                                         (ADVERTISED_100baseT_Half |
11460                                          ADVERTISED_TP);
11461                         } else {
11462                                 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x  speed_cap_mask 0x%x\n",
11463                                     link_config,
11464                                     bp->link_params.speed_cap_mask[idx]);
11465                                 return;
11466                         }
11467                         break;
11468
11469                 case PORT_FEATURE_LINK_SPEED_1G:
11470                         if (bp->port.supported[idx] &
11471                             SUPPORTED_1000baseT_Full) {
11472                                 bp->link_params.req_line_speed[idx] =
11473                                         SPEED_1000;
11474                                 bp->port.advertising[idx] |=
11475                                         (ADVERTISED_1000baseT_Full |
11476                                          ADVERTISED_TP);
11477                         } else if (bp->port.supported[idx] &
11478                                    SUPPORTED_1000baseKX_Full) {
11479                                 bp->link_params.req_line_speed[idx] =
11480                                         SPEED_1000;
11481                                 bp->port.advertising[idx] |=
11482                                         ADVERTISED_1000baseKX_Full;
11483                         } else {
11484                                 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x  speed_cap_mask 0x%x\n",
11485                                     link_config,
11486                                     bp->link_params.speed_cap_mask[idx]);
11487                                 return;
11488                         }
11489                         break;
11490
11491                 case PORT_FEATURE_LINK_SPEED_2_5G:
11492                         if (bp->port.supported[idx] &
11493                             SUPPORTED_2500baseX_Full) {
11494                                 bp->link_params.req_line_speed[idx] =
11495                                         SPEED_2500;
11496                                 bp->port.advertising[idx] |=
11497                                         (ADVERTISED_2500baseX_Full |
11498                                                 ADVERTISED_TP);
11499                         } else {
11500                                 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x  speed_cap_mask 0x%x\n",
11501                                     link_config,
11502                                     bp->link_params.speed_cap_mask[idx]);
11503                                 return;
11504                         }
11505                         break;
11506
11507                 case PORT_FEATURE_LINK_SPEED_10G_CX4:
11508                         if (bp->port.supported[idx] &
11509                             SUPPORTED_10000baseT_Full) {
11510                                 bp->link_params.req_line_speed[idx] =
11511                                         SPEED_10000;
11512                                 bp->port.advertising[idx] |=
11513                                         (ADVERTISED_10000baseT_Full |
11514                                                 ADVERTISED_FIBRE);
11515                         } else if (bp->port.supported[idx] &
11516                                    SUPPORTED_10000baseKR_Full) {
11517                                 bp->link_params.req_line_speed[idx] =
11518                                         SPEED_10000;
11519                                 bp->port.advertising[idx] |=
11520                                         (ADVERTISED_10000baseKR_Full |
11521                                                 ADVERTISED_FIBRE);
11522                         } else {
11523                                 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x  speed_cap_mask 0x%x\n",
11524                                     link_config,
11525                                     bp->link_params.speed_cap_mask[idx]);
11526                                 return;
11527                         }
11528                         break;
11529                 case PORT_FEATURE_LINK_SPEED_20G:
11530                         bp->link_params.req_line_speed[idx] = SPEED_20000;
11531
11532                         break;
11533                 default:
11534                         BNX2X_ERR("NVRAM config error. BAD link speed link_config 0x%x\n",
11535                                   link_config);
11536                                 bp->link_params.req_line_speed[idx] =
11537                                                         SPEED_AUTO_NEG;
11538                                 bp->port.advertising[idx] =
11539                                                 bp->port.supported[idx];
11540                         break;
11541                 }
11542
11543                 bp->link_params.req_flow_ctrl[idx] = (link_config &
11544                                          PORT_FEATURE_FLOW_CONTROL_MASK);
11545                 if (bp->link_params.req_flow_ctrl[idx] ==
11546                     BNX2X_FLOW_CTRL_AUTO) {
11547                         if (!(bp->port.supported[idx] & SUPPORTED_Autoneg))
11548                                 bp->link_params.req_flow_ctrl[idx] =
11549                                                         BNX2X_FLOW_CTRL_NONE;
11550                         else
11551                                 bnx2x_set_requested_fc(bp);
11552                 }
11553
11554                 BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d req_flow_ctrl 0x%x advertising 0x%x\n",
11555                                bp->link_params.req_line_speed[idx],
11556                                bp->link_params.req_duplex[idx],
11557                                bp->link_params.req_flow_ctrl[idx],
11558                                bp->port.advertising[idx]);
11559         }
11560 }
11561
11562 static void bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
11563 {
11564         __be16 mac_hi_be = cpu_to_be16(mac_hi);
11565         __be32 mac_lo_be = cpu_to_be32(mac_lo);
11566         memcpy(mac_buf, &mac_hi_be, sizeof(mac_hi_be));
11567         memcpy(mac_buf + sizeof(mac_hi_be), &mac_lo_be, sizeof(mac_lo_be));
11568 }
11569
11570 static void bnx2x_get_port_hwinfo(struct bnx2x *bp)
11571 {
11572         int port = BP_PORT(bp);
11573         u32 config;
11574         u32 ext_phy_type, ext_phy_config, eee_mode;
11575
11576         bp->link_params.bp = bp;
11577         bp->link_params.port = port;
11578
11579         bp->link_params.lane_config =
11580                 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
11581
11582         bp->link_params.speed_cap_mask[0] =
11583                 SHMEM_RD(bp,
11584                          dev_info.port_hw_config[port].speed_capability_mask) &
11585                 PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK;
11586         bp->link_params.speed_cap_mask[1] =
11587                 SHMEM_RD(bp,
11588                          dev_info.port_hw_config[port].speed_capability_mask2) &
11589                 PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK;
11590         bp->port.link_config[0] =
11591                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
11592
11593         bp->port.link_config[1] =
11594                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
11595
11596         bp->link_params.multi_phy_config =
11597                 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
11598         /* If the device is capable of WoL, set the default state according
11599          * to the HW
11600          */
11601         config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
11602         bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
11603                    (config & PORT_FEATURE_WOL_ENABLED));
11604
11605         if ((config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) ==
11606             PORT_FEAT_CFG_STORAGE_PERSONALITY_FCOE && !IS_MF(bp))
11607                 bp->flags |= NO_ISCSI_FLAG;
11608         if ((config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) ==
11609             PORT_FEAT_CFG_STORAGE_PERSONALITY_ISCSI && !(IS_MF(bp)))
11610                 bp->flags |= NO_FCOE_FLAG;
11611
11612         BNX2X_DEV_INFO("lane_config 0x%08x  speed_cap_mask0 0x%08x  link_config0 0x%08x\n",
11613                        bp->link_params.lane_config,
11614                        bp->link_params.speed_cap_mask[0],
11615                        bp->port.link_config[0]);
11616
11617         bp->link_params.switch_cfg = (bp->port.link_config[0] &
11618                                       PORT_FEATURE_CONNECTED_SWITCH_MASK);
11619         bnx2x_phy_probe(&bp->link_params);
11620         bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
11621
11622         bnx2x_link_settings_requested(bp);
11623
11624         /*
11625          * If connected directly, work with the internal PHY, otherwise, work
11626          * with the external PHY
11627          */
11628         ext_phy_config =
11629                 SHMEM_RD(bp,
11630                          dev_info.port_hw_config[port].external_phy_config);
11631         ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
11632         if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
11633                 bp->mdio.prtad = bp->port.phy_addr;
11634
11635         else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
11636                  (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
11637                 bp->mdio.prtad =
11638                         XGXS_EXT_PHY_ADDR(ext_phy_config);
11639
11640         /* Configure link feature according to nvram value */
11641         eee_mode = (((SHMEM_RD(bp, dev_info.
11642                       port_feature_config[port].eee_power_mode)) &
11643                      PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >>
11644                     PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT);
11645         if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) {
11646                 bp->link_params.eee_mode = EEE_MODE_ADV_LPI |
11647                                            EEE_MODE_ENABLE_LPI |
11648                                            EEE_MODE_OUTPUT_TIME;
11649         } else {
11650                 bp->link_params.eee_mode = 0;
11651         }
11652 }
11653
11654 void bnx2x_get_iscsi_info(struct bnx2x *bp)
11655 {
11656         u32 no_flags = NO_ISCSI_FLAG;
11657         int port = BP_PORT(bp);
11658         u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
11659                                 drv_lic_key[port].max_iscsi_conn);
11660
11661         if (!CNIC_SUPPORT(bp)) {
11662                 bp->flags |= no_flags;
11663                 return;
11664         }
11665
11666         /* Get the number of maximum allowed iSCSI connections */
11667         bp->cnic_eth_dev.max_iscsi_conn =
11668                 (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >>
11669                 BNX2X_MAX_ISCSI_INIT_CONN_SHIFT;
11670
11671         BNX2X_DEV_INFO("max_iscsi_conn 0x%x\n",
11672                        bp->cnic_eth_dev.max_iscsi_conn);
11673
11674         /*
11675          * If maximum allowed number of connections is zero -
11676          * disable the feature.
11677          */
11678         if (!bp->cnic_eth_dev.max_iscsi_conn)
11679                 bp->flags |= no_flags;
11680 }
11681
11682 static void bnx2x_get_ext_wwn_info(struct bnx2x *bp, int func)
11683 {
11684         /* Port info */
11685         bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
11686                 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_upper);
11687         bp->cnic_eth_dev.fcoe_wwn_port_name_lo =
11688                 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_lower);
11689
11690         /* Node info */
11691         bp->cnic_eth_dev.fcoe_wwn_node_name_hi =
11692                 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_upper);
11693         bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
11694                 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_lower);
11695 }
11696
11697 static int bnx2x_shared_fcoe_funcs(struct bnx2x *bp)
11698 {
11699         u8 count = 0;
11700
11701         if (IS_MF(bp)) {
11702                 u8 fid;
11703
11704                 /* iterate over absolute function ids for this path: */
11705                 for (fid = BP_PATH(bp); fid < E2_FUNC_MAX * 2; fid += 2) {
11706                         if (IS_MF_SD(bp)) {
11707                                 u32 cfg = MF_CFG_RD(bp,
11708                                                     func_mf_config[fid].config);
11709
11710                                 if (!(cfg & FUNC_MF_CFG_FUNC_HIDE) &&
11711                                     ((cfg & FUNC_MF_CFG_PROTOCOL_MASK) ==
11712                                             FUNC_MF_CFG_PROTOCOL_FCOE))
11713                                         count++;
11714                         } else {
11715                                 u32 cfg = MF_CFG_RD(bp,
11716                                                     func_ext_config[fid].
11717                                                                       func_cfg);
11718
11719                                 if ((cfg & MACP_FUNC_CFG_FLAGS_ENABLED) &&
11720                                     (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD))
11721                                         count++;
11722                         }
11723                 }
11724         } else { /* SF */
11725                 int port, port_cnt = CHIP_MODE_IS_4_PORT(bp) ? 2 : 1;
11726
11727                 for (port = 0; port < port_cnt; port++) {
11728                         u32 lic = SHMEM_RD(bp,
11729                                            drv_lic_key[port].max_fcoe_conn) ^
11730                                   FW_ENCODE_32BIT_PATTERN;
11731                         if (lic)
11732                                 count++;
11733                 }
11734         }
11735
11736         return count;
11737 }
11738
11739 static void bnx2x_get_fcoe_info(struct bnx2x *bp)
11740 {
11741         int port = BP_PORT(bp);
11742         int func = BP_ABS_FUNC(bp);
11743         u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
11744                                 drv_lic_key[port].max_fcoe_conn);
11745         u8 num_fcoe_func = bnx2x_shared_fcoe_funcs(bp);
11746
11747         if (!CNIC_SUPPORT(bp)) {
11748                 bp->flags |= NO_FCOE_FLAG;
11749                 return;
11750         }
11751
11752         /* Get the number of maximum allowed FCoE connections */
11753         bp->cnic_eth_dev.max_fcoe_conn =
11754                 (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
11755                 BNX2X_MAX_FCOE_INIT_CONN_SHIFT;
11756
11757         /* Calculate the number of maximum allowed FCoE tasks */
11758         bp->cnic_eth_dev.max_fcoe_exchanges = MAX_NUM_FCOE_TASKS_PER_ENGINE;
11759
11760         /* check if FCoE resources must be shared between different functions */
11761         if (num_fcoe_func)
11762                 bp->cnic_eth_dev.max_fcoe_exchanges /= num_fcoe_func;
11763
11764         /* Read the WWN: */
11765         if (!IS_MF(bp)) {
11766                 /* Port info */
11767                 bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
11768                         SHMEM_RD(bp,
11769                                  dev_info.port_hw_config[port].
11770                                  fcoe_wwn_port_name_upper);
11771                 bp->cnic_eth_dev.fcoe_wwn_port_name_lo =
11772                         SHMEM_RD(bp,
11773                                  dev_info.port_hw_config[port].
11774                                  fcoe_wwn_port_name_lower);
11775
11776                 /* Node info */
11777                 bp->cnic_eth_dev.fcoe_wwn_node_name_hi =
11778                         SHMEM_RD(bp,
11779                                  dev_info.port_hw_config[port].
11780                                  fcoe_wwn_node_name_upper);
11781                 bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
11782                         SHMEM_RD(bp,
11783                                  dev_info.port_hw_config[port].
11784                                  fcoe_wwn_node_name_lower);
11785         } else if (!IS_MF_SD(bp)) {
11786                 /* Read the WWN info only if the FCoE feature is enabled for
11787                  * this function.
11788                  */
11789                 if (BNX2X_HAS_MF_EXT_PROTOCOL_FCOE(bp))
11790                         bnx2x_get_ext_wwn_info(bp, func);
11791         } else {
11792                 if (BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp) && !CHIP_IS_E1x(bp))
11793                         bnx2x_get_ext_wwn_info(bp, func);
11794         }
11795
11796         BNX2X_DEV_INFO("max_fcoe_conn 0x%x\n", bp->cnic_eth_dev.max_fcoe_conn);
11797
11798         /*
11799          * If maximum allowed number of connections is zero -
11800          * disable the feature.
11801          */
11802         if (!bp->cnic_eth_dev.max_fcoe_conn) {
11803                 bp->flags |= NO_FCOE_FLAG;
11804                 eth_zero_addr(bp->fip_mac);
11805         }
11806 }
11807
11808 static void bnx2x_get_cnic_info(struct bnx2x *bp)
11809 {
11810         /*
11811          * iSCSI may be dynamically disabled but reading
11812          * info here we will decrease memory usage by driver
11813          * if the feature is disabled for good
11814          */
11815         bnx2x_get_iscsi_info(bp);
11816         bnx2x_get_fcoe_info(bp);
11817 }
11818
11819 static void bnx2x_get_cnic_mac_hwinfo(struct bnx2x *bp)
11820 {
11821         u32 val, val2;
11822         int func = BP_ABS_FUNC(bp);
11823         int port = BP_PORT(bp);
11824         u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
11825         u8 *fip_mac = bp->fip_mac;
11826
11827         if (IS_MF(bp)) {
11828                 /* iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
11829                  * FCoE MAC then the appropriate feature should be disabled.
11830                  * In non SD mode features configuration comes from struct
11831                  * func_ext_config.
11832                  */
11833                 if (!IS_MF_SD(bp)) {
11834                         u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
11835                         if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
11836                                 val2 = MF_CFG_RD(bp, func_ext_config[func].
11837                                                  iscsi_mac_addr_upper);
11838                                 val = MF_CFG_RD(bp, func_ext_config[func].
11839                                                 iscsi_mac_addr_lower);
11840                                 bnx2x_set_mac_buf(iscsi_mac, val, val2);
11841                                 BNX2X_DEV_INFO
11842                                         ("Read iSCSI MAC: %pM\n", iscsi_mac);
11843                         } else {
11844                                 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
11845                         }
11846
11847                         if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
11848                                 val2 = MF_CFG_RD(bp, func_ext_config[func].
11849                                                  fcoe_mac_addr_upper);
11850                                 val = MF_CFG_RD(bp, func_ext_config[func].
11851                                                 fcoe_mac_addr_lower);
11852                                 bnx2x_set_mac_buf(fip_mac, val, val2);
11853                                 BNX2X_DEV_INFO
11854                                         ("Read FCoE L2 MAC: %pM\n", fip_mac);
11855                         } else {
11856                                 bp->flags |= NO_FCOE_FLAG;
11857                         }
11858
11859                         bp->mf_ext_config = cfg;
11860
11861                 } else { /* SD MODE */
11862                         if (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp)) {
11863                                 /* use primary mac as iscsi mac */
11864                                 memcpy(iscsi_mac, bp->dev->dev_addr, ETH_ALEN);
11865
11866                                 BNX2X_DEV_INFO("SD ISCSI MODE\n");
11867                                 BNX2X_DEV_INFO
11868                                         ("Read iSCSI MAC: %pM\n", iscsi_mac);
11869                         } else if (BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)) {
11870                                 /* use primary mac as fip mac */
11871                                 memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN);
11872                                 BNX2X_DEV_INFO("SD FCoE MODE\n");
11873                                 BNX2X_DEV_INFO
11874                                         ("Read FIP MAC: %pM\n", fip_mac);
11875                         }
11876                 }
11877
11878                 /* If this is a storage-only interface, use SAN mac as
11879                  * primary MAC. Notice that for SD this is already the case,
11880                  * as the SAN mac was copied from the primary MAC.
11881                  */
11882                 if (IS_MF_FCOE_AFEX(bp))
11883                         memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN);
11884         } else {
11885                 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
11886                                 iscsi_mac_upper);
11887                 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
11888                                iscsi_mac_lower);
11889                 bnx2x_set_mac_buf(iscsi_mac, val, val2);
11890
11891                 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
11892                                 fcoe_fip_mac_upper);
11893                 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
11894                                fcoe_fip_mac_lower);
11895                 bnx2x_set_mac_buf(fip_mac, val, val2);
11896         }
11897
11898         /* Disable iSCSI OOO if MAC configuration is invalid. */
11899         if (!is_valid_ether_addr(iscsi_mac)) {
11900                 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
11901                 eth_zero_addr(iscsi_mac);
11902         }
11903
11904         /* Disable FCoE if MAC configuration is invalid. */
11905         if (!is_valid_ether_addr(fip_mac)) {
11906                 bp->flags |= NO_FCOE_FLAG;
11907                 eth_zero_addr(bp->fip_mac);
11908         }
11909 }
11910
11911 static void bnx2x_get_mac_hwinfo(struct bnx2x *bp)
11912 {
11913         u32 val, val2;
11914         int func = BP_ABS_FUNC(bp);
11915         int port = BP_PORT(bp);
11916
11917         /* Zero primary MAC configuration */
11918         eth_zero_addr(bp->dev->dev_addr);
11919
11920         if (BP_NOMCP(bp)) {
11921                 BNX2X_ERROR("warning: random MAC workaround active\n");
11922                 eth_hw_addr_random(bp->dev);
11923         } else if (IS_MF(bp)) {
11924                 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
11925                 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
11926                 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
11927                     (val != FUNC_MF_CFG_LOWERMAC_DEFAULT))
11928                         bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
11929
11930                 if (CNIC_SUPPORT(bp))
11931                         bnx2x_get_cnic_mac_hwinfo(bp);
11932         } else {
11933                 /* in SF read MACs from port configuration */
11934                 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
11935                 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
11936                 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
11937
11938                 if (CNIC_SUPPORT(bp))
11939                         bnx2x_get_cnic_mac_hwinfo(bp);
11940         }
11941
11942         if (!BP_NOMCP(bp)) {
11943                 /* Read physical port identifier from shmem */
11944                 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
11945                 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
11946                 bnx2x_set_mac_buf(bp->phys_port_id, val, val2);
11947                 bp->flags |= HAS_PHYS_PORT_ID;
11948         }
11949
11950         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
11951
11952         if (!is_valid_ether_addr(bp->dev->dev_addr))
11953                 dev_err(&bp->pdev->dev,
11954                         "bad Ethernet MAC address configuration: %pM\n"
11955                         "change it manually before bringing up the appropriate network interface\n",
11956                         bp->dev->dev_addr);
11957 }
11958
11959 static bool bnx2x_get_dropless_info(struct bnx2x *bp)
11960 {
11961         int tmp;
11962         u32 cfg;
11963
11964         if (IS_VF(bp))
11965                 return false;
11966
11967         if (IS_MF(bp) && !CHIP_IS_E1x(bp)) {
11968                 /* Take function: tmp = func */
11969                 tmp = BP_ABS_FUNC(bp);
11970                 cfg = MF_CFG_RD(bp, func_ext_config[tmp].func_cfg);
11971                 cfg = !!(cfg & MACP_FUNC_CFG_PAUSE_ON_HOST_RING);
11972         } else {
11973                 /* Take port: tmp = port */
11974                 tmp = BP_PORT(bp);
11975                 cfg = SHMEM_RD(bp,
11976                                dev_info.port_hw_config[tmp].generic_features);
11977                 cfg = !!(cfg & PORT_HW_CFG_PAUSE_ON_HOST_RING_ENABLED);
11978         }
11979         return cfg;
11980 }
11981
11982 static void validate_set_si_mode(struct bnx2x *bp)
11983 {
11984         u8 func = BP_ABS_FUNC(bp);
11985         u32 val;
11986
11987         val = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
11988
11989         /* check for legal mac (upper bytes) */
11990         if (val != 0xffff) {
11991                 bp->mf_mode = MULTI_FUNCTION_SI;
11992                 bp->mf_config[BP_VN(bp)] =
11993                         MF_CFG_RD(bp, func_mf_config[func].config);
11994         } else
11995                 BNX2X_DEV_INFO("illegal MAC address for SI\n");
11996 }
11997
11998 static int bnx2x_get_hwinfo(struct bnx2x *bp)
11999 {
12000         int /*abs*/func = BP_ABS_FUNC(bp);
12001         int vn;
12002         u32 val = 0, val2 = 0;
12003         int rc = 0;
12004
12005         /* Validate that chip access is feasible */
12006         if (REG_RD(bp, MISC_REG_CHIP_NUM) == 0xffffffff) {
12007                 dev_err(&bp->pdev->dev,
12008                         "Chip read returns all Fs. Preventing probe from continuing\n");
12009                 return -EINVAL;
12010         }
12011
12012         bnx2x_get_common_hwinfo(bp);
12013
12014         /*
12015          * initialize IGU parameters
12016          */
12017         if (CHIP_IS_E1x(bp)) {
12018                 bp->common.int_block = INT_BLOCK_HC;
12019
12020                 bp->igu_dsb_id = DEF_SB_IGU_ID;
12021                 bp->igu_base_sb = 0;
12022         } else {
12023                 bp->common.int_block = INT_BLOCK_IGU;
12024
12025                 /* do not allow device reset during IGU info processing */
12026                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
12027
12028                 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
12029
12030                 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
12031                         int tout = 5000;
12032
12033                         BNX2X_DEV_INFO("FORCING Normal Mode\n");
12034
12035                         val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN);
12036                         REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION, val);
12037                         REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x7f);
12038
12039                         while (tout && REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
12040                                 tout--;
12041                                 usleep_range(1000, 2000);
12042                         }
12043
12044                         if (REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
12045                                 dev_err(&bp->pdev->dev,
12046                                         "FORCING Normal Mode failed!!!\n");
12047                                 bnx2x_release_hw_lock(bp,
12048                                                       HW_LOCK_RESOURCE_RESET);
12049                                 return -EPERM;
12050                         }
12051                 }
12052
12053                 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
12054                         BNX2X_DEV_INFO("IGU Backward Compatible Mode\n");
12055                         bp->common.int_block |= INT_BLOCK_MODE_BW_COMP;
12056                 } else
12057                         BNX2X_DEV_INFO("IGU Normal Mode\n");
12058
12059                 rc = bnx2x_get_igu_cam_info(bp);
12060                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
12061                 if (rc)
12062                         return rc;
12063         }
12064
12065         /*
12066          * set base FW non-default (fast path) status block id, this value is
12067          * used to initialize the fw_sb_id saved on the fp/queue structure to
12068          * determine the id used by the FW.
12069          */
12070         if (CHIP_IS_E1x(bp))
12071                 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x + BP_L_ID(bp);
12072         else /*
12073               * 57712 - we currently use one FW SB per IGU SB (Rx and Tx of
12074               * the same queue are indicated on the same IGU SB). So we prefer
12075               * FW and IGU SBs to be the same value.
12076               */
12077                 bp->base_fw_ndsb = bp->igu_base_sb;
12078
12079         BNX2X_DEV_INFO("igu_dsb_id %d  igu_base_sb %d  igu_sb_cnt %d\n"
12080                        "base_fw_ndsb %d\n", bp->igu_dsb_id, bp->igu_base_sb,
12081                        bp->igu_sb_cnt, bp->base_fw_ndsb);
12082
12083         /*
12084          * Initialize MF configuration
12085          */
12086         bp->mf_ov = 0;
12087         bp->mf_mode = 0;
12088         bp->mf_sub_mode = 0;
12089         vn = BP_VN(bp);
12090
12091         if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
12092                 BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n",
12093                                bp->common.shmem2_base, SHMEM2_RD(bp, size),
12094                               (u32)offsetof(struct shmem2_region, mf_cfg_addr));
12095
12096                 if (SHMEM2_HAS(bp, mf_cfg_addr))
12097                         bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
12098                 else
12099                         bp->common.mf_cfg_base = bp->common.shmem_base +
12100                                 offsetof(struct shmem_region, func_mb) +
12101                                 E1H_FUNC_MAX * sizeof(struct drv_func_mb);
12102                 /*
12103                  * get mf configuration:
12104                  * 1. Existence of MF configuration
12105                  * 2. MAC address must be legal (check only upper bytes)
12106                  *    for  Switch-Independent mode;
12107                  *    OVLAN must be legal for Switch-Dependent mode
12108                  * 3. SF_MODE configures specific MF mode
12109                  */
12110                 if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
12111                         /* get mf configuration */
12112                         val = SHMEM_RD(bp,
12113                                        dev_info.shared_feature_config.config);
12114                         val &= SHARED_FEAT_CFG_FORCE_SF_MODE_MASK;
12115
12116                         switch (val) {
12117                         case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
12118                                 validate_set_si_mode(bp);
12119                                 break;
12120                         case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE:
12121                                 if ((!CHIP_IS_E1x(bp)) &&
12122                                     (MF_CFG_RD(bp, func_mf_config[func].
12123                                                mac_upper) != 0xffff) &&
12124                                     (SHMEM2_HAS(bp,
12125                                                 afex_driver_support))) {
12126                                         bp->mf_mode = MULTI_FUNCTION_AFEX;
12127                                         bp->mf_config[vn] = MF_CFG_RD(bp,
12128                                                 func_mf_config[func].config);
12129                                 } else {
12130                                         BNX2X_DEV_INFO("can not configure afex mode\n");
12131                                 }
12132                                 break;
12133                         case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
12134                                 /* get OV configuration */
12135                                 val = MF_CFG_RD(bp,
12136                                         func_mf_config[FUNC_0].e1hov_tag);
12137                                 val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
12138
12139                                 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
12140                                         bp->mf_mode = MULTI_FUNCTION_SD;
12141                                         bp->mf_config[vn] = MF_CFG_RD(bp,
12142                                                 func_mf_config[func].config);
12143                                 } else
12144                                         BNX2X_DEV_INFO("illegal OV for SD\n");
12145                                 break;
12146                         case SHARED_FEAT_CFG_FORCE_SF_MODE_BD_MODE:
12147                                 bp->mf_mode = MULTI_FUNCTION_SD;
12148                                 bp->mf_sub_mode = SUB_MF_MODE_BD;
12149                                 bp->mf_config[vn] =
12150                                         MF_CFG_RD(bp,
12151                                                   func_mf_config[func].config);
12152
12153                                 if (SHMEM2_HAS(bp, mtu_size)) {
12154                                         int mtu_idx = BP_FW_MB_IDX(bp);
12155                                         u16 mtu_size;
12156                                         u32 mtu;
12157
12158                                         mtu = SHMEM2_RD(bp, mtu_size[mtu_idx]);
12159                                         mtu_size = (u16)mtu;
12160                                         DP(NETIF_MSG_IFUP, "Read MTU size %04x [%08x]\n",
12161                                            mtu_size, mtu);
12162
12163                                         /* if valid: update device mtu */
12164                                         if ((mtu_size >= ETH_MIN_PACKET_SIZE) &&
12165                                             (mtu_size <=
12166                                              ETH_MAX_JUMBO_PACKET_SIZE))
12167                                                 bp->dev->mtu = mtu_size;
12168                                 }
12169                                 break;
12170                         case SHARED_FEAT_CFG_FORCE_SF_MODE_UFP_MODE:
12171                                 bp->mf_mode = MULTI_FUNCTION_SD;
12172                                 bp->mf_sub_mode = SUB_MF_MODE_UFP;
12173                                 bp->mf_config[vn] =
12174                                         MF_CFG_RD(bp,
12175                                                   func_mf_config[func].config);
12176                                 break;
12177                         case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF:
12178                                 bp->mf_config[vn] = 0;
12179                                 break;
12180                         case SHARED_FEAT_CFG_FORCE_SF_MODE_EXTENDED_MODE:
12181                                 val2 = SHMEM_RD(bp,
12182                                         dev_info.shared_hw_config.config_3);
12183                                 val2 &= SHARED_HW_CFG_EXTENDED_MF_MODE_MASK;
12184                                 switch (val2) {
12185                                 case SHARED_HW_CFG_EXTENDED_MF_MODE_NPAR1_DOT_5:
12186                                         validate_set_si_mode(bp);
12187                                         bp->mf_sub_mode =
12188                                                         SUB_MF_MODE_NPAR1_DOT_5;
12189                                         break;
12190                                 default:
12191                                         /* Unknown configuration */
12192                                         bp->mf_config[vn] = 0;
12193                                         BNX2X_DEV_INFO("unknown extended MF mode 0x%x\n",
12194                                                        val);
12195                                 }
12196                                 break;
12197                         default:
12198                                 /* Unknown configuration: reset mf_config */
12199                                 bp->mf_config[vn] = 0;
12200                                 BNX2X_DEV_INFO("unknown MF mode 0x%x\n", val);
12201                         }
12202                 }
12203
12204                 BNX2X_DEV_INFO("%s function mode\n",
12205                                IS_MF(bp) ? "multi" : "single");
12206
12207                 switch (bp->mf_mode) {
12208                 case MULTI_FUNCTION_SD:
12209                         val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
12210                               FUNC_MF_CFG_E1HOV_TAG_MASK;
12211                         if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
12212                                 bp->mf_ov = val;
12213                                 bp->path_has_ovlan = true;
12214
12215                                 BNX2X_DEV_INFO("MF OV for func %d is %d (0x%04x)\n",
12216                                                func, bp->mf_ov, bp->mf_ov);
12217                         } else if ((bp->mf_sub_mode == SUB_MF_MODE_UFP) ||
12218                                    (bp->mf_sub_mode == SUB_MF_MODE_BD)) {
12219                                 dev_err(&bp->pdev->dev,
12220                                         "Unexpected - no valid MF OV for func %d in UFP/BD mode\n",
12221                                         func);
12222                                 bp->path_has_ovlan = true;
12223                         } else {
12224                                 dev_err(&bp->pdev->dev,
12225                                         "No valid MF OV for func %d, aborting\n",
12226                                         func);
12227                                 return -EPERM;
12228                         }
12229                         break;
12230                 case MULTI_FUNCTION_AFEX:
12231                         BNX2X_DEV_INFO("func %d is in MF afex mode\n", func);
12232                         break;
12233                 case MULTI_FUNCTION_SI:
12234                         BNX2X_DEV_INFO("func %d is in MF switch-independent mode\n",
12235                                        func);
12236                         break;
12237                 default:
12238                         if (vn) {
12239                                 dev_err(&bp->pdev->dev,
12240                                         "VN %d is in a single function mode, aborting\n",
12241                                         vn);
12242                                 return -EPERM;
12243                         }
12244                         break;
12245                 }
12246
12247                 /* check if other port on the path needs ovlan:
12248                  * Since MF configuration is shared between ports
12249                  * Possible mixed modes are only
12250                  * {SF, SI} {SF, SD} {SD, SF} {SI, SF}
12251                  */
12252                 if (CHIP_MODE_IS_4_PORT(bp) &&
12253                     !bp->path_has_ovlan &&
12254                     !IS_MF(bp) &&
12255                     bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
12256                         u8 other_port = !BP_PORT(bp);
12257                         u8 other_func = BP_PATH(bp) + 2*other_port;
12258                         val = MF_CFG_RD(bp,
12259                                         func_mf_config[other_func].e1hov_tag);
12260                         if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
12261                                 bp->path_has_ovlan = true;
12262                 }
12263         }
12264
12265         /* adjust igu_sb_cnt to MF for E1H */
12266         if (CHIP_IS_E1H(bp) && IS_MF(bp))
12267                 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, E1H_MAX_MF_SB_COUNT);
12268
12269         /* port info */
12270         bnx2x_get_port_hwinfo(bp);
12271
12272         /* Get MAC addresses */
12273         bnx2x_get_mac_hwinfo(bp);
12274
12275         bnx2x_get_cnic_info(bp);
12276
12277         return rc;
12278 }
12279
12280 static void bnx2x_read_fwinfo(struct bnx2x *bp)
12281 {
12282         int cnt, i, block_end, rodi;
12283         char vpd_start[BNX2X_VPD_LEN+1];
12284         char str_id_reg[VENDOR_ID_LEN+1];
12285         char str_id_cap[VENDOR_ID_LEN+1];
12286         char *vpd_data;
12287         char *vpd_extended_data = NULL;
12288         u8 len;
12289
12290         cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_start);
12291         memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
12292
12293         if (cnt < BNX2X_VPD_LEN)
12294                 goto out_not_found;
12295
12296         /* VPD RO tag should be first tag after identifier string, hence
12297          * we should be able to find it in first BNX2X_VPD_LEN chars
12298          */
12299         i = pci_vpd_find_tag(vpd_start, 0, BNX2X_VPD_LEN,
12300                              PCI_VPD_LRDT_RO_DATA);
12301         if (i < 0)
12302                 goto out_not_found;
12303
12304         block_end = i + PCI_VPD_LRDT_TAG_SIZE +
12305                     pci_vpd_lrdt_size(&vpd_start[i]);
12306
12307         i += PCI_VPD_LRDT_TAG_SIZE;
12308
12309         if (block_end > BNX2X_VPD_LEN) {
12310                 vpd_extended_data = kmalloc(block_end, GFP_KERNEL);
12311                 if (vpd_extended_data  == NULL)
12312                         goto out_not_found;
12313
12314                 /* read rest of vpd image into vpd_extended_data */
12315                 memcpy(vpd_extended_data, vpd_start, BNX2X_VPD_LEN);
12316                 cnt = pci_read_vpd(bp->pdev, BNX2X_VPD_LEN,
12317                                    block_end - BNX2X_VPD_LEN,
12318                                    vpd_extended_data + BNX2X_VPD_LEN);
12319                 if (cnt < (block_end - BNX2X_VPD_LEN))
12320                         goto out_not_found;
12321                 vpd_data = vpd_extended_data;
12322         } else
12323                 vpd_data = vpd_start;
12324
12325         /* now vpd_data holds full vpd content in both cases */
12326
12327         rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
12328                                    PCI_VPD_RO_KEYWORD_MFR_ID);
12329         if (rodi < 0)
12330                 goto out_not_found;
12331
12332         len = pci_vpd_info_field_size(&vpd_data[rodi]);
12333
12334         if (len != VENDOR_ID_LEN)
12335                 goto out_not_found;
12336
12337         rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
12338
12339         /* vendor specific info */
12340         snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
12341         snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
12342         if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
12343             !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
12344
12345                 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
12346                                                 PCI_VPD_RO_KEYWORD_VENDOR0);
12347                 if (rodi >= 0) {
12348                         len = pci_vpd_info_field_size(&vpd_data[rodi]);
12349
12350                         rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
12351
12352                         if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
12353                                 memcpy(bp->fw_ver, &vpd_data[rodi], len);
12354                                 bp->fw_ver[len] = ' ';
12355                         }
12356                 }
12357                 kfree(vpd_extended_data);
12358                 return;
12359         }
12360 out_not_found:
12361         kfree(vpd_extended_data);
12362         return;
12363 }
12364
12365 static void bnx2x_set_modes_bitmap(struct bnx2x *bp)
12366 {
12367         u32 flags = 0;
12368
12369         if (CHIP_REV_IS_FPGA(bp))
12370                 SET_FLAGS(flags, MODE_FPGA);
12371         else if (CHIP_REV_IS_EMUL(bp))
12372                 SET_FLAGS(flags, MODE_EMUL);
12373         else
12374                 SET_FLAGS(flags, MODE_ASIC);
12375
12376         if (CHIP_MODE_IS_4_PORT(bp))
12377                 SET_FLAGS(flags, MODE_PORT4);
12378         else
12379                 SET_FLAGS(flags, MODE_PORT2);
12380
12381         if (CHIP_IS_E2(bp))
12382                 SET_FLAGS(flags, MODE_E2);
12383         else if (CHIP_IS_E3(bp)) {
12384                 SET_FLAGS(flags, MODE_E3);
12385                 if (CHIP_REV(bp) == CHIP_REV_Ax)
12386                         SET_FLAGS(flags, MODE_E3_A0);
12387                 else /*if (CHIP_REV(bp) == CHIP_REV_Bx)*/
12388                         SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3);
12389         }
12390
12391         if (IS_MF(bp)) {
12392                 SET_FLAGS(flags, MODE_MF);
12393                 switch (bp->mf_mode) {
12394                 case MULTI_FUNCTION_SD:
12395                         SET_FLAGS(flags, MODE_MF_SD);
12396                         break;
12397                 case MULTI_FUNCTION_SI:
12398                         SET_FLAGS(flags, MODE_MF_SI);
12399                         break;
12400                 case MULTI_FUNCTION_AFEX:
12401                         SET_FLAGS(flags, MODE_MF_AFEX);
12402                         break;
12403                 }
12404         } else
12405                 SET_FLAGS(flags, MODE_SF);
12406
12407 #if defined(__LITTLE_ENDIAN)
12408         SET_FLAGS(flags, MODE_LITTLE_ENDIAN);
12409 #else /*(__BIG_ENDIAN)*/
12410         SET_FLAGS(flags, MODE_BIG_ENDIAN);
12411 #endif
12412         INIT_MODE_FLAGS(bp) = flags;
12413 }
12414
12415 static int bnx2x_init_bp(struct bnx2x *bp)
12416 {
12417         int func;
12418         int rc;
12419
12420         mutex_init(&bp->port.phy_mutex);
12421         mutex_init(&bp->fw_mb_mutex);
12422         mutex_init(&bp->drv_info_mutex);
12423         sema_init(&bp->stats_lock, 1);
12424         bp->drv_info_mng_owner = false;
12425         INIT_LIST_HEAD(&bp->vlan_reg);
12426
12427         INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
12428         INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
12429         INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task);
12430         INIT_DELAYED_WORK(&bp->iov_task, bnx2x_iov_task);
12431         if (IS_PF(bp)) {
12432                 rc = bnx2x_get_hwinfo(bp);
12433                 if (rc)
12434                         return rc;
12435         } else {
12436                 eth_zero_addr(bp->dev->dev_addr);
12437         }
12438
12439         bnx2x_set_modes_bitmap(bp);
12440
12441         rc = bnx2x_alloc_mem_bp(bp);
12442         if (rc)
12443                 return rc;
12444
12445         bnx2x_read_fwinfo(bp);
12446
12447         func = BP_FUNC(bp);
12448
12449         /* need to reset chip if undi was active */
12450         if (IS_PF(bp) && !BP_NOMCP(bp)) {
12451                 /* init fw_seq */
12452                 bp->fw_seq =
12453                         SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
12454                                                         DRV_MSG_SEQ_NUMBER_MASK;
12455                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
12456
12457                 rc = bnx2x_prev_unload(bp);
12458                 if (rc) {
12459                         bnx2x_free_mem_bp(bp);
12460                         return rc;
12461                 }
12462         }
12463
12464         if (CHIP_REV_IS_FPGA(bp))
12465                 dev_err(&bp->pdev->dev, "FPGA detected\n");
12466
12467         if (BP_NOMCP(bp) && (func == 0))
12468                 dev_err(&bp->pdev->dev, "MCP disabled, must load devices in order!\n");
12469
12470         bp->disable_tpa = disable_tpa;
12471         bp->disable_tpa |= !!IS_MF_STORAGE_ONLY(bp);
12472         /* Reduce memory usage in kdump environment by disabling TPA */
12473         bp->disable_tpa |= is_kdump_kernel();
12474
12475         /* Set TPA flags */
12476         if (bp->disable_tpa) {
12477                 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
12478                 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
12479         }
12480
12481         if (CHIP_IS_E1(bp))
12482                 bp->dropless_fc = 0;
12483         else
12484                 bp->dropless_fc = dropless_fc | bnx2x_get_dropless_info(bp);
12485
12486         bp->mrrs = mrrs;
12487
12488         bp->tx_ring_size = IS_MF_STORAGE_ONLY(bp) ? 0 : MAX_TX_AVAIL;
12489         if (IS_VF(bp))
12490                 bp->rx_ring_size = MAX_RX_AVAIL;
12491
12492         /* make sure that the numbers are in the right granularity */
12493         bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
12494         bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
12495
12496         bp->current_interval = CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ;
12497
12498         timer_setup(&bp->timer, bnx2x_timer, 0);
12499         bp->timer.expires = jiffies + bp->current_interval;
12500
12501         if (SHMEM2_HAS(bp, dcbx_lldp_params_offset) &&
12502             SHMEM2_HAS(bp, dcbx_lldp_dcbx_stat_offset) &&
12503             SHMEM2_HAS(bp, dcbx_en) &&
12504             SHMEM2_RD(bp, dcbx_lldp_params_offset) &&
12505             SHMEM2_RD(bp, dcbx_lldp_dcbx_stat_offset) &&
12506             SHMEM2_RD(bp, dcbx_en[BP_PORT(bp)])) {
12507                 bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON);
12508                 bnx2x_dcbx_init_params(bp);
12509         } else {
12510                 bnx2x_dcbx_set_state(bp, false, BNX2X_DCBX_ENABLED_OFF);
12511         }
12512
12513         if (CHIP_IS_E1x(bp))
12514                 bp->cnic_base_cl_id = FP_SB_MAX_E1x;
12515         else
12516                 bp->cnic_base_cl_id = FP_SB_MAX_E2;
12517
12518         /* multiple tx priority */
12519         if (IS_VF(bp))
12520                 bp->max_cos = 1;
12521         else if (CHIP_IS_E1x(bp))
12522                 bp->max_cos = BNX2X_MULTI_TX_COS_E1X;
12523         else if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp))
12524                 bp->max_cos = BNX2X_MULTI_TX_COS_E2_E3A0;
12525         else if (CHIP_IS_E3B0(bp))
12526                 bp->max_cos = BNX2X_MULTI_TX_COS_E3B0;
12527         else
12528                 BNX2X_ERR("unknown chip %x revision %x\n",
12529                           CHIP_NUM(bp), CHIP_REV(bp));
12530         BNX2X_DEV_INFO("set bp->max_cos to %d\n", bp->max_cos);
12531
12532         /* We need at least one default status block for slow-path events,
12533          * second status block for the L2 queue, and a third status block for
12534          * CNIC if supported.
12535          */
12536         if (IS_VF(bp))
12537                 bp->min_msix_vec_cnt = 1;
12538         else if (CNIC_SUPPORT(bp))
12539                 bp->min_msix_vec_cnt = 3;
12540         else /* PF w/o cnic */
12541                 bp->min_msix_vec_cnt = 2;
12542         BNX2X_DEV_INFO("bp->min_msix_vec_cnt %d", bp->min_msix_vec_cnt);
12543
12544         bp->dump_preset_idx = 1;
12545
12546         return rc;
12547 }
12548
12549 /****************************************************************************
12550 * General service functions
12551 ****************************************************************************/
12552
12553 /*
12554  * net_device service functions
12555  */
12556
12557 /* called with rtnl_lock */
12558 static int bnx2x_open(struct net_device *dev)
12559 {
12560         struct bnx2x *bp = netdev_priv(dev);
12561         int rc;
12562
12563         bp->stats_init = true;
12564
12565         netif_carrier_off(dev);
12566
12567         bnx2x_set_power_state(bp, PCI_D0);
12568
12569         /* If parity had happen during the unload, then attentions
12570          * and/or RECOVERY_IN_PROGRES may still be set. In this case we
12571          * want the first function loaded on the current engine to
12572          * complete the recovery.
12573          * Parity recovery is only relevant for PF driver.
12574          */
12575         if (IS_PF(bp)) {
12576                 int other_engine = BP_PATH(bp) ? 0 : 1;
12577                 bool other_load_status, load_status;
12578                 bool global = false;
12579
12580                 other_load_status = bnx2x_get_load_status(bp, other_engine);
12581                 load_status = bnx2x_get_load_status(bp, BP_PATH(bp));
12582                 if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) ||
12583                     bnx2x_chk_parity_attn(bp, &global, true)) {
12584                         do {
12585                                 /* If there are attentions and they are in a
12586                                  * global blocks, set the GLOBAL_RESET bit
12587                                  * regardless whether it will be this function
12588                                  * that will complete the recovery or not.
12589                                  */
12590                                 if (global)
12591                                         bnx2x_set_reset_global(bp);
12592
12593                                 /* Only the first function on the current
12594                                  * engine should try to recover in open. In case
12595                                  * of attentions in global blocks only the first
12596                                  * in the chip should try to recover.
12597                                  */
12598                                 if ((!load_status &&
12599                                      (!global || !other_load_status)) &&
12600                                       bnx2x_trylock_leader_lock(bp) &&
12601                                       !bnx2x_leader_reset(bp)) {
12602                                         netdev_info(bp->dev,
12603                                                     "Recovered in open\n");
12604                                         break;
12605                                 }
12606
12607                                 /* recovery has failed... */
12608                                 bnx2x_set_power_state(bp, PCI_D3hot);
12609                                 bp->recovery_state = BNX2X_RECOVERY_FAILED;
12610
12611                                 BNX2X_ERR("Recovery flow hasn't been properly completed yet. Try again later.\n"
12612                                           "If you still see this message after a few retries then power cycle is required.\n");
12613
12614                                 return -EAGAIN;
12615                         } while (0);
12616                 }
12617         }
12618
12619         bp->recovery_state = BNX2X_RECOVERY_DONE;
12620         rc = bnx2x_nic_load(bp, LOAD_OPEN);
12621         if (rc)
12622                 return rc;
12623
12624         if (IS_PF(bp))
12625                 udp_tunnel_get_rx_info(dev);
12626
12627         return 0;
12628 }
12629
12630 /* called with rtnl_lock */
12631 static int bnx2x_close(struct net_device *dev)
12632 {
12633         struct bnx2x *bp = netdev_priv(dev);
12634
12635         /* Unload the driver, release IRQs */
12636         bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
12637
12638         return 0;
12639 }
12640
12641 struct bnx2x_mcast_list_elem_group
12642 {
12643         struct list_head mcast_group_link;
12644         struct bnx2x_mcast_list_elem mcast_elems[];
12645 };
12646
12647 #define MCAST_ELEMS_PER_PG \
12648         ((PAGE_SIZE - sizeof(struct bnx2x_mcast_list_elem_group)) / \
12649         sizeof(struct bnx2x_mcast_list_elem))
12650
12651 static void bnx2x_free_mcast_macs_list(struct list_head *mcast_group_list)
12652 {
12653         struct bnx2x_mcast_list_elem_group *current_mcast_group;
12654
12655         while (!list_empty(mcast_group_list)) {
12656                 current_mcast_group = list_first_entry(mcast_group_list,
12657                                       struct bnx2x_mcast_list_elem_group,
12658                                       mcast_group_link);
12659                 list_del(&current_mcast_group->mcast_group_link);
12660                 free_page((unsigned long)current_mcast_group);
12661         }
12662 }
12663
12664 static int bnx2x_init_mcast_macs_list(struct bnx2x *bp,
12665                                       struct bnx2x_mcast_ramrod_params *p,
12666                                       struct list_head *mcast_group_list)
12667 {
12668         struct bnx2x_mcast_list_elem *mc_mac;
12669         struct netdev_hw_addr *ha;
12670         struct bnx2x_mcast_list_elem_group *current_mcast_group = NULL;
12671         int mc_count = netdev_mc_count(bp->dev);
12672         int offset = 0;
12673
12674         INIT_LIST_HEAD(&p->mcast_list);
12675         netdev_for_each_mc_addr(ha, bp->dev) {
12676                 if (!offset) {
12677                         current_mcast_group =
12678                                 (struct bnx2x_mcast_list_elem_group *)
12679                                 __get_free_page(GFP_ATOMIC);
12680                         if (!current_mcast_group) {
12681                                 bnx2x_free_mcast_macs_list(mcast_group_list);
12682                                 BNX2X_ERR("Failed to allocate mc MAC list\n");
12683                                 return -ENOMEM;
12684                         }
12685                         list_add(&current_mcast_group->mcast_group_link,
12686                                  mcast_group_list);
12687                 }
12688                 mc_mac = &current_mcast_group->mcast_elems[offset];
12689                 mc_mac->mac = bnx2x_mc_addr(ha);
12690                 list_add_tail(&mc_mac->link, &p->mcast_list);
12691                 offset++;
12692                 if (offset == MCAST_ELEMS_PER_PG)
12693                         offset = 0;
12694         }
12695         p->mcast_list_len = mc_count;
12696         return 0;
12697 }
12698
12699 /**
12700  * bnx2x_set_uc_list - configure a new unicast MACs list.
12701  *
12702  * @bp: driver handle
12703  *
12704  * We will use zero (0) as a MAC type for these MACs.
12705  */
12706 static int bnx2x_set_uc_list(struct bnx2x *bp)
12707 {
12708         int rc;
12709         struct net_device *dev = bp->dev;
12710         struct netdev_hw_addr *ha;
12711         struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
12712         unsigned long ramrod_flags = 0;
12713
12714         /* First schedule a cleanup up of old configuration */
12715         rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, false);
12716         if (rc < 0) {
12717                 BNX2X_ERR("Failed to schedule DELETE operations: %d\n", rc);
12718                 return rc;
12719         }
12720
12721         netdev_for_each_uc_addr(ha, dev) {
12722                 rc = bnx2x_set_mac_one(bp, bnx2x_uc_addr(ha), mac_obj, true,
12723                                        BNX2X_UC_LIST_MAC, &ramrod_flags);
12724                 if (rc == -EEXIST) {
12725                         DP(BNX2X_MSG_SP,
12726                            "Failed to schedule ADD operations: %d\n", rc);
12727                         /* do not treat adding same MAC as error */
12728                         rc = 0;
12729
12730                 } else if (rc < 0) {
12731
12732                         BNX2X_ERR("Failed to schedule ADD operations: %d\n",
12733                                   rc);
12734                         return rc;
12735                 }
12736         }
12737
12738         /* Execute the pending commands */
12739         __set_bit(RAMROD_CONT, &ramrod_flags);
12740         return bnx2x_set_mac_one(bp, NULL, mac_obj, false /* don't care */,
12741                                  BNX2X_UC_LIST_MAC, &ramrod_flags);
12742 }
12743
12744 static int bnx2x_set_mc_list_e1x(struct bnx2x *bp)
12745 {
12746         LIST_HEAD(mcast_group_list);
12747         struct net_device *dev = bp->dev;
12748         struct bnx2x_mcast_ramrod_params rparam = {NULL};
12749         int rc = 0;
12750
12751         rparam.mcast_obj = &bp->mcast_obj;
12752
12753         /* first, clear all configured multicast MACs */
12754         rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
12755         if (rc < 0) {
12756                 BNX2X_ERR("Failed to clear multicast configuration: %d\n", rc);
12757                 return rc;
12758         }
12759
12760         /* then, configure a new MACs list */
12761         if (netdev_mc_count(dev)) {
12762                 rc = bnx2x_init_mcast_macs_list(bp, &rparam, &mcast_group_list);
12763                 if (rc)
12764                         return rc;
12765
12766                 /* Now add the new MACs */
12767                 rc = bnx2x_config_mcast(bp, &rparam,
12768                                         BNX2X_MCAST_CMD_ADD);
12769                 if (rc < 0)
12770                         BNX2X_ERR("Failed to set a new multicast configuration: %d\n",
12771                                   rc);
12772
12773                 bnx2x_free_mcast_macs_list(&mcast_group_list);
12774         }
12775
12776         return rc;
12777 }
12778
12779 static int bnx2x_set_mc_list(struct bnx2x *bp)
12780 {
12781         LIST_HEAD(mcast_group_list);
12782         struct bnx2x_mcast_ramrod_params rparam = {NULL};
12783         struct net_device *dev = bp->dev;
12784         int rc = 0;
12785
12786         /* On older adapters, we need to flush and re-add filters */
12787         if (CHIP_IS_E1x(bp))
12788                 return bnx2x_set_mc_list_e1x(bp);
12789
12790         rparam.mcast_obj = &bp->mcast_obj;
12791
12792         if (netdev_mc_count(dev)) {
12793                 rc = bnx2x_init_mcast_macs_list(bp, &rparam, &mcast_group_list);
12794                 if (rc)
12795                         return rc;
12796
12797                 /* Override the curently configured set of mc filters */
12798                 rc = bnx2x_config_mcast(bp, &rparam,
12799                                         BNX2X_MCAST_CMD_SET);
12800                 if (rc < 0)
12801                         BNX2X_ERR("Failed to set a new multicast configuration: %d\n",
12802                                   rc);
12803
12804                 bnx2x_free_mcast_macs_list(&mcast_group_list);
12805         } else {
12806                 /* If no mc addresses are required, flush the configuration */
12807                 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
12808                 if (rc < 0)
12809                         BNX2X_ERR("Failed to clear multicast configuration %d\n",
12810                                   rc);
12811         }
12812
12813         return rc;
12814 }
12815
12816 /* If bp->state is OPEN, should be called with netif_addr_lock_bh() */
12817 static void bnx2x_set_rx_mode(struct net_device *dev)
12818 {
12819         struct bnx2x *bp = netdev_priv(dev);
12820
12821         if (bp->state != BNX2X_STATE_OPEN) {
12822                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
12823                 return;
12824         } else {
12825                 /* Schedule an SP task to handle rest of change */
12826                 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_RX_MODE,
12827                                        NETIF_MSG_IFUP);
12828         }
12829 }
12830
12831 void bnx2x_set_rx_mode_inner(struct bnx2x *bp)
12832 {
12833         u32 rx_mode = BNX2X_RX_MODE_NORMAL;
12834
12835         DP(NETIF_MSG_IFUP, "dev->flags = %x\n", bp->dev->flags);
12836
12837         netif_addr_lock_bh(bp->dev);
12838
12839         if (bp->dev->flags & IFF_PROMISC) {
12840                 rx_mode = BNX2X_RX_MODE_PROMISC;
12841         } else if ((bp->dev->flags & IFF_ALLMULTI) ||
12842                    ((netdev_mc_count(bp->dev) > BNX2X_MAX_MULTICAST) &&
12843                     CHIP_IS_E1(bp))) {
12844                 rx_mode = BNX2X_RX_MODE_ALLMULTI;
12845         } else {
12846                 if (IS_PF(bp)) {
12847                         /* some multicasts */
12848                         if (bnx2x_set_mc_list(bp) < 0)
12849                                 rx_mode = BNX2X_RX_MODE_ALLMULTI;
12850
12851                         /* release bh lock, as bnx2x_set_uc_list might sleep */
12852                         netif_addr_unlock_bh(bp->dev);
12853                         if (bnx2x_set_uc_list(bp) < 0)
12854                                 rx_mode = BNX2X_RX_MODE_PROMISC;
12855                         netif_addr_lock_bh(bp->dev);
12856                 } else {
12857                         /* configuring mcast to a vf involves sleeping (when we
12858                          * wait for the pf's response).
12859                          */
12860                         bnx2x_schedule_sp_rtnl(bp,
12861                                                BNX2X_SP_RTNL_VFPF_MCAST, 0);
12862                 }
12863         }
12864
12865         bp->rx_mode = rx_mode;
12866         /* handle ISCSI SD mode */
12867         if (IS_MF_ISCSI_ONLY(bp))
12868                 bp->rx_mode = BNX2X_RX_MODE_NONE;
12869
12870         /* Schedule the rx_mode command */
12871         if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) {
12872                 set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
12873                 netif_addr_unlock_bh(bp->dev);
12874                 return;
12875         }
12876
12877         if (IS_PF(bp)) {
12878                 bnx2x_set_storm_rx_mode(bp);
12879                 netif_addr_unlock_bh(bp->dev);
12880         } else {
12881                 /* VF will need to request the PF to make this change, and so
12882                  * the VF needs to release the bottom-half lock prior to the
12883                  * request (as it will likely require sleep on the VF side)
12884                  */
12885                 netif_addr_unlock_bh(bp->dev);
12886                 bnx2x_vfpf_storm_rx_mode(bp);
12887         }
12888 }
12889
12890 /* called with rtnl_lock */
12891 static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
12892                            int devad, u16 addr)
12893 {
12894         struct bnx2x *bp = netdev_priv(netdev);
12895         u16 value;
12896         int rc;
12897
12898         DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
12899            prtad, devad, addr);
12900
12901         /* The HW expects different devad if CL22 is used */
12902         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
12903
12904         bnx2x_acquire_phy_lock(bp);
12905         rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
12906         bnx2x_release_phy_lock(bp);
12907         DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
12908
12909         if (!rc)
12910                 rc = value;
12911         return rc;
12912 }
12913
12914 /* called with rtnl_lock */
12915 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
12916                             u16 addr, u16 value)
12917 {
12918         struct bnx2x *bp = netdev_priv(netdev);
12919         int rc;
12920
12921         DP(NETIF_MSG_LINK,
12922            "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x, value 0x%x\n",
12923            prtad, devad, addr, value);
12924
12925         /* The HW expects different devad if CL22 is used */
12926         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
12927
12928         bnx2x_acquire_phy_lock(bp);
12929         rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
12930         bnx2x_release_phy_lock(bp);
12931         return rc;
12932 }
12933
12934 /* called with rtnl_lock */
12935 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12936 {
12937         struct bnx2x *bp = netdev_priv(dev);
12938         struct mii_ioctl_data *mdio = if_mii(ifr);
12939
12940         if (!netif_running(dev))
12941                 return -EAGAIN;
12942
12943         switch (cmd) {
12944         case SIOCSHWTSTAMP:
12945                 return bnx2x_hwtstamp_ioctl(bp, ifr);
12946         default:
12947                 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
12948                    mdio->phy_id, mdio->reg_num, mdio->val_in);
12949                 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
12950         }
12951 }
12952
12953 static int bnx2x_validate_addr(struct net_device *dev)
12954 {
12955         struct bnx2x *bp = netdev_priv(dev);
12956
12957         /* query the bulletin board for mac address configured by the PF */
12958         if (IS_VF(bp))
12959                 bnx2x_sample_bulletin(bp);
12960
12961         if (!is_valid_ether_addr(dev->dev_addr)) {
12962                 BNX2X_ERR("Non-valid Ethernet address\n");
12963                 return -EADDRNOTAVAIL;
12964         }
12965         return 0;
12966 }
12967
12968 static int bnx2x_get_phys_port_id(struct net_device *netdev,
12969                                   struct netdev_phys_item_id *ppid)
12970 {
12971         struct bnx2x *bp = netdev_priv(netdev);
12972
12973         if (!(bp->flags & HAS_PHYS_PORT_ID))
12974                 return -EOPNOTSUPP;
12975
12976         ppid->id_len = sizeof(bp->phys_port_id);
12977         memcpy(ppid->id, bp->phys_port_id, ppid->id_len);
12978
12979         return 0;
12980 }
12981
12982 static netdev_features_t bnx2x_features_check(struct sk_buff *skb,
12983                                               struct net_device *dev,
12984                                               netdev_features_t features)
12985 {
12986         /*
12987          * A skb with gso_size + header length > 9700 will cause a
12988          * firmware panic. Drop GSO support.
12989          *
12990          * Eventually the upper layer should not pass these packets down.
12991          *
12992          * For speed, if the gso_size is <= 9000, assume there will
12993          * not be 700 bytes of headers and pass it through. Only do a
12994          * full (slow) validation if the gso_size is > 9000.
12995          *
12996          * (Due to the way SKB_BY_FRAGS works this will also do a full
12997          * validation in that case.)
12998          */
12999         if (unlikely(skb_is_gso(skb) &&
13000                      (skb_shinfo(skb)->gso_size > 9000) &&
13001                      !skb_gso_validate_mac_len(skb, 9700)))
13002                 features &= ~NETIF_F_GSO_MASK;
13003
13004         features = vlan_features_check(skb, features);
13005         return vxlan_features_check(skb, features);
13006 }
13007
13008 static int __bnx2x_vlan_configure_vid(struct bnx2x *bp, u16 vid, bool add)
13009 {
13010         int rc;
13011
13012         if (IS_PF(bp)) {
13013                 unsigned long ramrod_flags = 0;
13014
13015                 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
13016                 rc = bnx2x_set_vlan_one(bp, vid, &bp->sp_objs->vlan_obj,
13017                                         add, &ramrod_flags);
13018         } else {
13019                 rc = bnx2x_vfpf_update_vlan(bp, vid, bp->fp->index, add);
13020         }
13021
13022         return rc;
13023 }
13024
13025 static int bnx2x_vlan_configure_vid_list(struct bnx2x *bp)
13026 {
13027         struct bnx2x_vlan_entry *vlan;
13028         int rc = 0;
13029
13030         /* Configure all non-configured entries */
13031         list_for_each_entry(vlan, &bp->vlan_reg, link) {
13032                 if (vlan->hw)
13033                         continue;
13034
13035                 if (bp->vlan_cnt >= bp->vlan_credit)
13036                         return -ENOBUFS;
13037
13038                 rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true);
13039                 if (rc) {
13040                         BNX2X_ERR("Unable to config VLAN %d\n", vlan->vid);
13041                         return rc;
13042                 }
13043
13044                 DP(NETIF_MSG_IFUP, "HW configured for VLAN %d\n", vlan->vid);
13045                 vlan->hw = true;
13046                 bp->vlan_cnt++;
13047         }
13048
13049         return 0;
13050 }
13051
13052 static void bnx2x_vlan_configure(struct bnx2x *bp, bool set_rx_mode)
13053 {
13054         bool need_accept_any_vlan;
13055
13056         need_accept_any_vlan = !!bnx2x_vlan_configure_vid_list(bp);
13057
13058         if (bp->accept_any_vlan != need_accept_any_vlan) {
13059                 bp->accept_any_vlan = need_accept_any_vlan;
13060                 DP(NETIF_MSG_IFUP, "Accept all VLAN %s\n",
13061                    bp->accept_any_vlan ? "raised" : "cleared");
13062                 if (set_rx_mode) {
13063                         if (IS_PF(bp))
13064                                 bnx2x_set_rx_mode_inner(bp);
13065                         else
13066                                 bnx2x_vfpf_storm_rx_mode(bp);
13067                 }
13068         }
13069 }
13070
13071 int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp)
13072 {
13073         /* Don't set rx mode here. Our caller will do it. */
13074         bnx2x_vlan_configure(bp, false);
13075
13076         return 0;
13077 }
13078
13079 static int bnx2x_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
13080 {
13081         struct bnx2x *bp = netdev_priv(dev);
13082         struct bnx2x_vlan_entry *vlan;
13083
13084         DP(NETIF_MSG_IFUP, "Adding VLAN %d\n", vid);
13085
13086         vlan = kmalloc(sizeof(*vlan), GFP_KERNEL);
13087         if (!vlan)
13088                 return -ENOMEM;
13089
13090         vlan->vid = vid;
13091         vlan->hw = false;
13092         list_add_tail(&vlan->link, &bp->vlan_reg);
13093
13094         if (netif_running(dev))
13095                 bnx2x_vlan_configure(bp, true);
13096
13097         return 0;
13098 }
13099
13100 static int bnx2x_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
13101 {
13102         struct bnx2x *bp = netdev_priv(dev);
13103         struct bnx2x_vlan_entry *vlan;
13104         bool found = false;
13105         int rc = 0;
13106
13107         DP(NETIF_MSG_IFUP, "Removing VLAN %d\n", vid);
13108
13109         list_for_each_entry(vlan, &bp->vlan_reg, link)
13110                 if (vlan->vid == vid) {
13111                         found = true;
13112                         break;
13113                 }
13114
13115         if (!found) {
13116                 BNX2X_ERR("Unable to kill VLAN %d - not found\n", vid);
13117                 return -EINVAL;
13118         }
13119
13120         if (netif_running(dev) && vlan->hw) {
13121                 rc = __bnx2x_vlan_configure_vid(bp, vid, false);
13122                 DP(NETIF_MSG_IFUP, "HW deconfigured for VLAN %d\n", vid);
13123                 bp->vlan_cnt--;
13124         }
13125
13126         list_del(&vlan->link);
13127         kfree(vlan);
13128
13129         if (netif_running(dev))
13130                 bnx2x_vlan_configure(bp, true);
13131
13132         DP(NETIF_MSG_IFUP, "Removing VLAN result %d\n", rc);
13133
13134         return rc;
13135 }
13136
13137 static const struct net_device_ops bnx2x_netdev_ops = {
13138         .ndo_open               = bnx2x_open,
13139         .ndo_stop               = bnx2x_close,
13140         .ndo_start_xmit         = bnx2x_start_xmit,
13141         .ndo_select_queue       = bnx2x_select_queue,
13142         .ndo_set_rx_mode        = bnx2x_set_rx_mode,
13143         .ndo_set_mac_address    = bnx2x_change_mac_addr,
13144         .ndo_validate_addr      = bnx2x_validate_addr,
13145         .ndo_do_ioctl           = bnx2x_ioctl,
13146         .ndo_change_mtu         = bnx2x_change_mtu,
13147         .ndo_fix_features       = bnx2x_fix_features,
13148         .ndo_set_features       = bnx2x_set_features,
13149         .ndo_tx_timeout         = bnx2x_tx_timeout,
13150         .ndo_vlan_rx_add_vid    = bnx2x_vlan_rx_add_vid,
13151         .ndo_vlan_rx_kill_vid   = bnx2x_vlan_rx_kill_vid,
13152         .ndo_setup_tc           = __bnx2x_setup_tc,
13153 #ifdef CONFIG_BNX2X_SRIOV
13154         .ndo_set_vf_mac         = bnx2x_set_vf_mac,
13155         .ndo_set_vf_vlan        = bnx2x_set_vf_vlan,
13156         .ndo_get_vf_config      = bnx2x_get_vf_config,
13157         .ndo_set_vf_spoofchk    = bnx2x_set_vf_spoofchk,
13158 #endif
13159 #ifdef NETDEV_FCOE_WWNN
13160         .ndo_fcoe_get_wwn       = bnx2x_fcoe_get_wwn,
13161 #endif
13162
13163         .ndo_get_phys_port_id   = bnx2x_get_phys_port_id,
13164         .ndo_set_vf_link_state  = bnx2x_set_vf_link_state,
13165         .ndo_features_check     = bnx2x_features_check,
13166         .ndo_udp_tunnel_add     = bnx2x_udp_tunnel_add,
13167         .ndo_udp_tunnel_del     = bnx2x_udp_tunnel_del,
13168 };
13169
13170 static int bnx2x_set_coherency_mask(struct bnx2x *bp)
13171 {
13172         struct device *dev = &bp->pdev->dev;
13173
13174         if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) != 0 &&
13175             dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)) != 0) {
13176                 dev_err(dev, "System does not support DMA, aborting\n");
13177                 return -EIO;
13178         }
13179
13180         return 0;
13181 }
13182
13183 static void bnx2x_disable_pcie_error_reporting(struct bnx2x *bp)
13184 {
13185         if (bp->flags & AER_ENABLED) {
13186                 pci_disable_pcie_error_reporting(bp->pdev);
13187                 bp->flags &= ~AER_ENABLED;
13188         }
13189 }
13190
13191 static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
13192                           struct net_device *dev, unsigned long board_type)
13193 {
13194         int rc;
13195         u32 pci_cfg_dword;
13196         bool chip_is_e1x = (board_type == BCM57710 ||
13197                             board_type == BCM57711 ||
13198                             board_type == BCM57711E);
13199
13200         SET_NETDEV_DEV(dev, &pdev->dev);
13201
13202         bp->dev = dev;
13203         bp->pdev = pdev;
13204
13205         rc = pci_enable_device(pdev);
13206         if (rc) {
13207                 dev_err(&bp->pdev->dev,
13208                         "Cannot enable PCI device, aborting\n");
13209                 goto err_out;
13210         }
13211
13212         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
13213                 dev_err(&bp->pdev->dev,
13214                         "Cannot find PCI device base address, aborting\n");
13215                 rc = -ENODEV;
13216                 goto err_out_disable;
13217         }
13218
13219         if (IS_PF(bp) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
13220                 dev_err(&bp->pdev->dev, "Cannot find second PCI device base address, aborting\n");
13221                 rc = -ENODEV;
13222                 goto err_out_disable;
13223         }
13224
13225         pci_read_config_dword(pdev, PCICFG_REVISION_ID_OFFSET, &pci_cfg_dword);
13226         if ((pci_cfg_dword & PCICFG_REVESION_ID_MASK) ==
13227             PCICFG_REVESION_ID_ERROR_VAL) {
13228                 pr_err("PCI device error, probably due to fan failure, aborting\n");
13229                 rc = -ENODEV;
13230                 goto err_out_disable;
13231         }
13232
13233         if (atomic_read(&pdev->enable_cnt) == 1) {
13234                 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
13235                 if (rc) {
13236                         dev_err(&bp->pdev->dev,
13237                                 "Cannot obtain PCI resources, aborting\n");
13238                         goto err_out_disable;
13239                 }
13240
13241                 pci_set_master(pdev);
13242                 pci_save_state(pdev);
13243         }
13244
13245         if (IS_PF(bp)) {
13246                 if (!pdev->pm_cap) {
13247                         dev_err(&bp->pdev->dev,
13248                                 "Cannot find power management capability, aborting\n");
13249                         rc = -EIO;
13250                         goto err_out_release;
13251                 }
13252         }
13253
13254         if (!pci_is_pcie(pdev)) {
13255                 dev_err(&bp->pdev->dev, "Not PCI Express, aborting\n");
13256                 rc = -EIO;
13257                 goto err_out_release;
13258         }
13259
13260         rc = bnx2x_set_coherency_mask(bp);
13261         if (rc)
13262                 goto err_out_release;
13263
13264         dev->mem_start = pci_resource_start(pdev, 0);
13265         dev->base_addr = dev->mem_start;
13266         dev->mem_end = pci_resource_end(pdev, 0);
13267
13268         dev->irq = pdev->irq;
13269
13270         bp->regview = pci_ioremap_bar(pdev, 0);
13271         if (!bp->regview) {
13272                 dev_err(&bp->pdev->dev,
13273                         "Cannot map register space, aborting\n");
13274                 rc = -ENOMEM;
13275                 goto err_out_release;
13276         }
13277
13278         /* In E1/E1H use pci device function given by kernel.
13279          * In E2/E3 read physical function from ME register since these chips
13280          * support Physical Device Assignment where kernel BDF maybe arbitrary
13281          * (depending on hypervisor).
13282          */
13283         if (chip_is_e1x) {
13284                 bp->pf_num = PCI_FUNC(pdev->devfn);
13285         } else {
13286                 /* chip is E2/3*/
13287                 pci_read_config_dword(bp->pdev,
13288                                       PCICFG_ME_REGISTER, &pci_cfg_dword);
13289                 bp->pf_num = (u8)((pci_cfg_dword & ME_REG_ABS_PF_NUM) >>
13290                                   ME_REG_ABS_PF_NUM_SHIFT);
13291         }
13292         BNX2X_DEV_INFO("me reg PF num: %d\n", bp->pf_num);
13293
13294         /* clean indirect addresses */
13295         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
13296                                PCICFG_VENDOR_ID_OFFSET);
13297
13298         /* Set PCIe reset type to fundamental for EEH recovery */
13299         pdev->needs_freset = 1;
13300
13301         /* AER (Advanced Error reporting) configuration */
13302         rc = pci_enable_pcie_error_reporting(pdev);
13303         if (!rc)
13304                 bp->flags |= AER_ENABLED;
13305         else
13306                 BNX2X_DEV_INFO("Failed To configure PCIe AER [%d]\n", rc);
13307
13308         /*
13309          * Clean the following indirect addresses for all functions since it
13310          * is not used by the driver.
13311          */
13312         if (IS_PF(bp)) {
13313                 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0);
13314                 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0);
13315                 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0);
13316                 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0);
13317
13318                 if (chip_is_e1x) {
13319                         REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0);
13320                         REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0);
13321                         REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0);
13322                         REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0);
13323                 }
13324
13325                 /* Enable internal target-read (in case we are probed after PF
13326                  * FLR). Must be done prior to any BAR read access. Only for
13327                  * 57712 and up
13328                  */
13329                 if (!chip_is_e1x)
13330                         REG_WR(bp,
13331                                PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
13332         }
13333
13334         dev->watchdog_timeo = TX_TIMEOUT;
13335
13336         dev->netdev_ops = &bnx2x_netdev_ops;
13337         bnx2x_set_ethtool_ops(bp, dev);
13338
13339         dev->priv_flags |= IFF_UNICAST_FLT;
13340
13341         dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
13342                 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
13343                 NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO | NETIF_F_GRO_HW |
13344                 NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX;
13345         if (!chip_is_e1x) {
13346                 dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM |
13347                                     NETIF_F_GSO_IPXIP4 |
13348                                     NETIF_F_GSO_UDP_TUNNEL |
13349                                     NETIF_F_GSO_UDP_TUNNEL_CSUM |
13350                                     NETIF_F_GSO_PARTIAL;
13351
13352                 dev->hw_enc_features =
13353                         NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
13354                         NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
13355                         NETIF_F_GSO_IPXIP4 |
13356                         NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM |
13357                         NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM |
13358                         NETIF_F_GSO_PARTIAL;
13359
13360                 dev->gso_partial_features = NETIF_F_GSO_GRE_CSUM |
13361                                             NETIF_F_GSO_UDP_TUNNEL_CSUM;
13362         }
13363
13364         dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
13365                 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
13366
13367         if (IS_PF(bp)) {
13368                 if (chip_is_e1x)
13369                         bp->accept_any_vlan = true;
13370                 else
13371                         dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
13372         }
13373         /* For VF we'll know whether to enable VLAN filtering after
13374          * getting a response to CHANNEL_TLV_ACQUIRE from PF.
13375          */
13376
13377         dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX;
13378         dev->features |= NETIF_F_HIGHDMA;
13379         if (dev->features & NETIF_F_LRO)
13380                 dev->features &= ~NETIF_F_GRO_HW;
13381
13382         /* Add Loopback capability to the device */
13383         dev->hw_features |= NETIF_F_LOOPBACK;
13384
13385 #ifdef BCM_DCBNL
13386         dev->dcbnl_ops = &bnx2x_dcbnl_ops;
13387 #endif
13388
13389         /* MTU range, 46 - 9600 */
13390         dev->min_mtu = ETH_MIN_PACKET_SIZE;
13391         dev->max_mtu = ETH_MAX_JUMBO_PACKET_SIZE;
13392
13393         /* get_port_hwinfo() will set prtad and mmds properly */
13394         bp->mdio.prtad = MDIO_PRTAD_NONE;
13395         bp->mdio.mmds = 0;
13396         bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
13397         bp->mdio.dev = dev;
13398         bp->mdio.mdio_read = bnx2x_mdio_read;
13399         bp->mdio.mdio_write = bnx2x_mdio_write;
13400
13401         return 0;
13402
13403 err_out_release:
13404         if (atomic_read(&pdev->enable_cnt) == 1)
13405                 pci_release_regions(pdev);
13406
13407 err_out_disable:
13408         pci_disable_device(pdev);
13409
13410 err_out:
13411         return rc;
13412 }
13413
13414 static int bnx2x_check_firmware(struct bnx2x *bp)
13415 {
13416         const struct firmware *firmware = bp->firmware;
13417         struct bnx2x_fw_file_hdr *fw_hdr;
13418         struct bnx2x_fw_file_section *sections;
13419         u32 offset, len, num_ops;
13420         __be16 *ops_offsets;
13421         int i;
13422         const u8 *fw_ver;
13423
13424         if (firmware->size < sizeof(struct bnx2x_fw_file_hdr)) {
13425                 BNX2X_ERR("Wrong FW size\n");
13426                 return -EINVAL;
13427         }
13428
13429         fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
13430         sections = (struct bnx2x_fw_file_section *)fw_hdr;
13431
13432         /* Make sure none of the offsets and sizes make us read beyond
13433          * the end of the firmware data */
13434         for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
13435                 offset = be32_to_cpu(sections[i].offset);
13436                 len = be32_to_cpu(sections[i].len);
13437                 if (offset + len > firmware->size) {
13438                         BNX2X_ERR("Section %d length is out of bounds\n", i);
13439                         return -EINVAL;
13440                 }
13441         }
13442
13443         /* Likewise for the init_ops offsets */
13444         offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
13445         ops_offsets = (__force __be16 *)(firmware->data + offset);
13446         num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
13447
13448         for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
13449                 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
13450                         BNX2X_ERR("Section offset %d is out of bounds\n", i);
13451                         return -EINVAL;
13452                 }
13453         }
13454
13455         /* Check FW version */
13456         offset = be32_to_cpu(fw_hdr->fw_version.offset);
13457         fw_ver = firmware->data + offset;
13458         if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
13459             (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
13460             (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
13461             (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
13462                 BNX2X_ERR("Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
13463                        fw_ver[0], fw_ver[1], fw_ver[2], fw_ver[3],
13464                        BCM_5710_FW_MAJOR_VERSION,
13465                        BCM_5710_FW_MINOR_VERSION,
13466                        BCM_5710_FW_REVISION_VERSION,
13467                        BCM_5710_FW_ENGINEERING_VERSION);
13468                 return -EINVAL;
13469         }
13470
13471         return 0;
13472 }
13473
13474 static void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
13475 {
13476         const __be32 *source = (const __be32 *)_source;
13477         u32 *target = (u32 *)_target;
13478         u32 i;
13479
13480         for (i = 0; i < n/4; i++)
13481                 target[i] = be32_to_cpu(source[i]);
13482 }
13483
13484 /*
13485    Ops array is stored in the following format:
13486    {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
13487  */
13488 static void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
13489 {
13490         const __be32 *source = (const __be32 *)_source;
13491         struct raw_op *target = (struct raw_op *)_target;
13492         u32 i, j, tmp;
13493
13494         for (i = 0, j = 0; i < n/8; i++, j += 2) {
13495                 tmp = be32_to_cpu(source[j]);
13496                 target[i].op = (tmp >> 24) & 0xff;
13497                 target[i].offset = tmp & 0xffffff;
13498                 target[i].raw_data = be32_to_cpu(source[j + 1]);
13499         }
13500 }
13501
13502 /* IRO array is stored in the following format:
13503  * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
13504  */
13505 static void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
13506 {
13507         const __be32 *source = (const __be32 *)_source;
13508         struct iro *target = (struct iro *)_target;
13509         u32 i, j, tmp;
13510
13511         for (i = 0, j = 0; i < n/sizeof(struct iro); i++) {
13512                 target[i].base = be32_to_cpu(source[j]);
13513                 j++;
13514                 tmp = be32_to_cpu(source[j]);
13515                 target[i].m1 = (tmp >> 16) & 0xffff;
13516                 target[i].m2 = tmp & 0xffff;
13517                 j++;
13518                 tmp = be32_to_cpu(source[j]);
13519                 target[i].m3 = (tmp >> 16) & 0xffff;
13520                 target[i].size = tmp & 0xffff;
13521                 j++;
13522         }
13523 }
13524
13525 static void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
13526 {
13527         const __be16 *source = (const __be16 *)_source;
13528         u16 *target = (u16 *)_target;
13529         u32 i;
13530
13531         for (i = 0; i < n/2; i++)
13532                 target[i] = be16_to_cpu(source[i]);
13533 }
13534
13535 #define BNX2X_ALLOC_AND_SET(arr, lbl, func)                             \
13536 do {                                                                    \
13537         u32 len = be32_to_cpu(fw_hdr->arr.len);                         \
13538         bp->arr = kmalloc(len, GFP_KERNEL);                             \
13539         if (!bp->arr)                                                   \
13540                 goto lbl;                                               \
13541         func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset),      \
13542              (u8 *)bp->arr, len);                                       \
13543 } while (0)
13544
13545 static int bnx2x_init_firmware(struct bnx2x *bp)
13546 {
13547         const char *fw_file_name;
13548         struct bnx2x_fw_file_hdr *fw_hdr;
13549         int rc;
13550
13551         if (bp->firmware)
13552                 return 0;
13553
13554         if (CHIP_IS_E1(bp))
13555                 fw_file_name = FW_FILE_NAME_E1;
13556         else if (CHIP_IS_E1H(bp))
13557                 fw_file_name = FW_FILE_NAME_E1H;
13558         else if (!CHIP_IS_E1x(bp))
13559                 fw_file_name = FW_FILE_NAME_E2;
13560         else {
13561                 BNX2X_ERR("Unsupported chip revision\n");
13562                 return -EINVAL;
13563         }
13564         BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
13565
13566         rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
13567         if (rc) {
13568                 BNX2X_ERR("Can't load firmware file %s\n",
13569                           fw_file_name);
13570                 goto request_firmware_exit;
13571         }
13572
13573         rc = bnx2x_check_firmware(bp);
13574         if (rc) {
13575                 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
13576                 goto request_firmware_exit;
13577         }
13578
13579         fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
13580
13581         /* Initialize the pointers to the init arrays */
13582         /* Blob */
13583         rc = -ENOMEM;
13584         BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
13585
13586         /* Opcodes */
13587         BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
13588
13589         /* Offsets */
13590         BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
13591                             be16_to_cpu_n);
13592
13593         /* STORMs firmware */
13594         INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13595                         be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
13596         INIT_TSEM_PRAM_DATA(bp)      = bp->firmware->data +
13597                         be32_to_cpu(fw_hdr->tsem_pram_data.offset);
13598         INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13599                         be32_to_cpu(fw_hdr->usem_int_table_data.offset);
13600         INIT_USEM_PRAM_DATA(bp)      = bp->firmware->data +
13601                         be32_to_cpu(fw_hdr->usem_pram_data.offset);
13602         INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13603                         be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
13604         INIT_XSEM_PRAM_DATA(bp)      = bp->firmware->data +
13605                         be32_to_cpu(fw_hdr->xsem_pram_data.offset);
13606         INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13607                         be32_to_cpu(fw_hdr->csem_int_table_data.offset);
13608         INIT_CSEM_PRAM_DATA(bp)      = bp->firmware->data +
13609                         be32_to_cpu(fw_hdr->csem_pram_data.offset);
13610         /* IRO */
13611         BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro);
13612
13613         return 0;
13614
13615 iro_alloc_err:
13616         kfree(bp->init_ops_offsets);
13617 init_offsets_alloc_err:
13618         kfree(bp->init_ops);
13619 init_ops_alloc_err:
13620         kfree(bp->init_data);
13621 request_firmware_exit:
13622         release_firmware(bp->firmware);
13623         bp->firmware = NULL;
13624
13625         return rc;
13626 }
13627
13628 static void bnx2x_release_firmware(struct bnx2x *bp)
13629 {
13630         kfree(bp->init_ops_offsets);
13631         kfree(bp->init_ops);
13632         kfree(bp->init_data);
13633         release_firmware(bp->firmware);
13634         bp->firmware = NULL;
13635 }
13636
13637 static struct bnx2x_func_sp_drv_ops bnx2x_func_sp_drv = {
13638         .init_hw_cmn_chip = bnx2x_init_hw_common_chip,
13639         .init_hw_cmn      = bnx2x_init_hw_common,
13640         .init_hw_port     = bnx2x_init_hw_port,
13641         .init_hw_func     = bnx2x_init_hw_func,
13642
13643         .reset_hw_cmn     = bnx2x_reset_common,
13644         .reset_hw_port    = bnx2x_reset_port,
13645         .reset_hw_func    = bnx2x_reset_func,
13646
13647         .gunzip_init      = bnx2x_gunzip_init,
13648         .gunzip_end       = bnx2x_gunzip_end,
13649
13650         .init_fw          = bnx2x_init_firmware,
13651         .release_fw       = bnx2x_release_firmware,
13652 };
13653
13654 void bnx2x__init_func_obj(struct bnx2x *bp)
13655 {
13656         /* Prepare DMAE related driver resources */
13657         bnx2x_setup_dmae(bp);
13658
13659         bnx2x_init_func_obj(bp, &bp->func_obj,
13660                             bnx2x_sp(bp, func_rdata),
13661                             bnx2x_sp_mapping(bp, func_rdata),
13662                             bnx2x_sp(bp, func_afex_rdata),
13663                             bnx2x_sp_mapping(bp, func_afex_rdata),
13664                             &bnx2x_func_sp_drv);
13665 }
13666
13667 /* must be called after sriov-enable */
13668 static int bnx2x_set_qm_cid_count(struct bnx2x *bp)
13669 {
13670         int cid_count = BNX2X_L2_MAX_CID(bp);
13671
13672         if (IS_SRIOV(bp))
13673                 cid_count += BNX2X_VF_CIDS;
13674
13675         if (CNIC_SUPPORT(bp))
13676                 cid_count += CNIC_CID_MAX;
13677
13678         return roundup(cid_count, QM_CID_ROUND);
13679 }
13680
13681 /**
13682  * bnx2x_get_num_none_def_sbs - return the number of none default SBs
13683  *
13684  * @dev:        pci device
13685  *
13686  */
13687 static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev, int cnic_cnt)
13688 {
13689         int index;
13690         u16 control = 0;
13691
13692         /*
13693          * If MSI-X is not supported - return number of SBs needed to support
13694          * one fast path queue: one FP queue + SB for CNIC
13695          */
13696         if (!pdev->msix_cap) {
13697                 dev_info(&pdev->dev, "no msix capability found\n");
13698                 return 1 + cnic_cnt;
13699         }
13700         dev_info(&pdev->dev, "msix capability found\n");
13701
13702         /*
13703          * The value in the PCI configuration space is the index of the last
13704          * entry, namely one less than the actual size of the table, which is
13705          * exactly what we want to return from this function: number of all SBs
13706          * without the default SB.
13707          * For VFs there is no default SB, then we return (index+1).
13708          */
13709         pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &control);
13710
13711         index = control & PCI_MSIX_FLAGS_QSIZE;
13712
13713         return index;
13714 }
13715
13716 static int set_max_cos_est(int chip_id)
13717 {
13718         switch (chip_id) {
13719         case BCM57710:
13720         case BCM57711:
13721         case BCM57711E:
13722                 return BNX2X_MULTI_TX_COS_E1X;
13723         case BCM57712:
13724         case BCM57712_MF:
13725                 return BNX2X_MULTI_TX_COS_E2_E3A0;
13726         case BCM57800:
13727         case BCM57800_MF:
13728         case BCM57810:
13729         case BCM57810_MF:
13730         case BCM57840_4_10:
13731         case BCM57840_2_20:
13732         case BCM57840_O:
13733         case BCM57840_MFO:
13734         case BCM57840_MF:
13735         case BCM57811:
13736         case BCM57811_MF:
13737                 return BNX2X_MULTI_TX_COS_E3B0;
13738         case BCM57712_VF:
13739         case BCM57800_VF:
13740         case BCM57810_VF:
13741         case BCM57840_VF:
13742         case BCM57811_VF:
13743                 return 1;
13744         default:
13745                 pr_err("Unknown board_type (%d), aborting\n", chip_id);
13746                 return -ENODEV;
13747         }
13748 }
13749
13750 static int set_is_vf(int chip_id)
13751 {
13752         switch (chip_id) {
13753         case BCM57712_VF:
13754         case BCM57800_VF:
13755         case BCM57810_VF:
13756         case BCM57840_VF:
13757         case BCM57811_VF:
13758                 return true;
13759         default:
13760                 return false;
13761         }
13762 }
13763
13764 /* nig_tsgen registers relative address */
13765 #define tsgen_ctrl 0x0
13766 #define tsgen_freecount 0x10
13767 #define tsgen_synctime_t0 0x20
13768 #define tsgen_offset_t0 0x28
13769 #define tsgen_drift_t0 0x30
13770 #define tsgen_synctime_t1 0x58
13771 #define tsgen_offset_t1 0x60
13772 #define tsgen_drift_t1 0x68
13773
13774 /* FW workaround for setting drift */
13775 static int bnx2x_send_update_drift_ramrod(struct bnx2x *bp, int drift_dir,
13776                                           int best_val, int best_period)
13777 {
13778         struct bnx2x_func_state_params func_params = {NULL};
13779         struct bnx2x_func_set_timesync_params *set_timesync_params =
13780                 &func_params.params.set_timesync;
13781
13782         /* Prepare parameters for function state transitions */
13783         __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
13784         __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
13785
13786         func_params.f_obj = &bp->func_obj;
13787         func_params.cmd = BNX2X_F_CMD_SET_TIMESYNC;
13788
13789         /* Function parameters */
13790         set_timesync_params->drift_adjust_cmd = TS_DRIFT_ADJUST_SET;
13791         set_timesync_params->offset_cmd = TS_OFFSET_KEEP;
13792         set_timesync_params->add_sub_drift_adjust_value =
13793                 drift_dir ? TS_ADD_VALUE : TS_SUB_VALUE;
13794         set_timesync_params->drift_adjust_value = best_val;
13795         set_timesync_params->drift_adjust_period = best_period;
13796
13797         return bnx2x_func_state_change(bp, &func_params);
13798 }
13799
13800 static int bnx2x_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
13801 {
13802         struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13803         int rc;
13804         int drift_dir = 1;
13805         int val, period, period1, period2, dif, dif1, dif2;
13806         int best_dif = BNX2X_MAX_PHC_DRIFT, best_period = 0, best_val = 0;
13807
13808         DP(BNX2X_MSG_PTP, "PTP adjfreq called, ppb = %d\n", ppb);
13809
13810         if (!netif_running(bp->dev)) {
13811                 DP(BNX2X_MSG_PTP,
13812                    "PTP adjfreq called while the interface is down\n");
13813                 return -ENETDOWN;
13814         }
13815
13816         if (ppb < 0) {
13817                 ppb = -ppb;
13818                 drift_dir = 0;
13819         }
13820
13821         if (ppb == 0) {
13822                 best_val = 1;
13823                 best_period = 0x1FFFFFF;
13824         } else if (ppb >= BNX2X_MAX_PHC_DRIFT) {
13825                 best_val = 31;
13826                 best_period = 1;
13827         } else {
13828                 /* Changed not to allow val = 8, 16, 24 as these values
13829                  * are not supported in workaround.
13830                  */
13831                 for (val = 0; val <= 31; val++) {
13832                         if ((val & 0x7) == 0)
13833                                 continue;
13834                         period1 = val * 1000000 / ppb;
13835                         period2 = period1 + 1;
13836                         if (period1 != 0)
13837                                 dif1 = ppb - (val * 1000000 / period1);
13838                         else
13839                                 dif1 = BNX2X_MAX_PHC_DRIFT;
13840                         if (dif1 < 0)
13841                                 dif1 = -dif1;
13842                         dif2 = ppb - (val * 1000000 / period2);
13843                         if (dif2 < 0)
13844                                 dif2 = -dif2;
13845                         dif = (dif1 < dif2) ? dif1 : dif2;
13846                         period = (dif1 < dif2) ? period1 : period2;
13847                         if (dif < best_dif) {
13848                                 best_dif = dif;
13849                                 best_val = val;
13850                                 best_period = period;
13851                         }
13852                 }
13853         }
13854
13855         rc = bnx2x_send_update_drift_ramrod(bp, drift_dir, best_val,
13856                                             best_period);
13857         if (rc) {
13858                 BNX2X_ERR("Failed to set drift\n");
13859                 return -EFAULT;
13860         }
13861
13862         DP(BNX2X_MSG_PTP, "Configured val = %d, period = %d\n", best_val,
13863            best_period);
13864
13865         return 0;
13866 }
13867
13868 static int bnx2x_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
13869 {
13870         struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13871
13872         if (!netif_running(bp->dev)) {
13873                 DP(BNX2X_MSG_PTP,
13874                    "PTP adjtime called while the interface is down\n");
13875                 return -ENETDOWN;
13876         }
13877
13878         DP(BNX2X_MSG_PTP, "PTP adjtime called, delta = %llx\n", delta);
13879
13880         timecounter_adjtime(&bp->timecounter, delta);
13881
13882         return 0;
13883 }
13884
13885 static int bnx2x_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
13886 {
13887         struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13888         u64 ns;
13889
13890         if (!netif_running(bp->dev)) {
13891                 DP(BNX2X_MSG_PTP,
13892                    "PTP gettime called while the interface is down\n");
13893                 return -ENETDOWN;
13894         }
13895
13896         ns = timecounter_read(&bp->timecounter);
13897
13898         DP(BNX2X_MSG_PTP, "PTP gettime called, ns = %llu\n", ns);
13899
13900         *ts = ns_to_timespec64(ns);
13901
13902         return 0;
13903 }
13904
13905 static int bnx2x_ptp_settime(struct ptp_clock_info *ptp,
13906                              const struct timespec64 *ts)
13907 {
13908         struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13909         u64 ns;
13910
13911         if (!netif_running(bp->dev)) {
13912                 DP(BNX2X_MSG_PTP,
13913                    "PTP settime called while the interface is down\n");
13914                 return -ENETDOWN;
13915         }
13916
13917         ns = timespec64_to_ns(ts);
13918
13919         DP(BNX2X_MSG_PTP, "PTP settime called, ns = %llu\n", ns);
13920
13921         /* Re-init the timecounter */
13922         timecounter_init(&bp->timecounter, &bp->cyclecounter, ns);
13923
13924         return 0;
13925 }
13926
13927 /* Enable (or disable) ancillary features of the phc subsystem */
13928 static int bnx2x_ptp_enable(struct ptp_clock_info *ptp,
13929                             struct ptp_clock_request *rq, int on)
13930 {
13931         struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13932
13933         BNX2X_ERR("PHC ancillary features are not supported\n");
13934         return -ENOTSUPP;
13935 }
13936
13937 void bnx2x_register_phc(struct bnx2x *bp)
13938 {
13939         /* Fill the ptp_clock_info struct and register PTP clock*/
13940         bp->ptp_clock_info.owner = THIS_MODULE;
13941         snprintf(bp->ptp_clock_info.name, 16, "%s", bp->dev->name);
13942         bp->ptp_clock_info.max_adj = BNX2X_MAX_PHC_DRIFT; /* In PPB */
13943         bp->ptp_clock_info.n_alarm = 0;
13944         bp->ptp_clock_info.n_ext_ts = 0;
13945         bp->ptp_clock_info.n_per_out = 0;
13946         bp->ptp_clock_info.pps = 0;
13947         bp->ptp_clock_info.adjfreq = bnx2x_ptp_adjfreq;
13948         bp->ptp_clock_info.adjtime = bnx2x_ptp_adjtime;
13949         bp->ptp_clock_info.gettime64 = bnx2x_ptp_gettime;
13950         bp->ptp_clock_info.settime64 = bnx2x_ptp_settime;
13951         bp->ptp_clock_info.enable = bnx2x_ptp_enable;
13952
13953         bp->ptp_clock = ptp_clock_register(&bp->ptp_clock_info, &bp->pdev->dev);
13954         if (IS_ERR(bp->ptp_clock)) {
13955                 bp->ptp_clock = NULL;
13956                 BNX2X_ERR("PTP clock registration failed\n");
13957         }
13958 }
13959
13960 static int bnx2x_init_one(struct pci_dev *pdev,
13961                                     const struct pci_device_id *ent)
13962 {
13963         struct net_device *dev = NULL;
13964         struct bnx2x *bp;
13965         int rc, max_non_def_sbs;
13966         int rx_count, tx_count, rss_count, doorbell_size;
13967         int max_cos_est;
13968         bool is_vf;
13969         int cnic_cnt;
13970
13971         /* Management FW 'remembers' living interfaces. Allow it some time
13972          * to forget previously living interfaces, allowing a proper re-load.
13973          */
13974         if (is_kdump_kernel()) {
13975                 ktime_t now = ktime_get_boottime();
13976                 ktime_t fw_ready_time = ktime_set(5, 0);
13977
13978                 if (ktime_before(now, fw_ready_time))
13979                         msleep(ktime_ms_delta(fw_ready_time, now));
13980         }
13981
13982         /* An estimated maximum supported CoS number according to the chip
13983          * version.
13984          * We will try to roughly estimate the maximum number of CoSes this chip
13985          * may support in order to minimize the memory allocated for Tx
13986          * netdev_queue's. This number will be accurately calculated during the
13987          * initialization of bp->max_cos based on the chip versions AND chip
13988          * revision in the bnx2x_init_bp().
13989          */
13990         max_cos_est = set_max_cos_est(ent->driver_data);
13991         if (max_cos_est < 0)
13992                 return max_cos_est;
13993         is_vf = set_is_vf(ent->driver_data);
13994         cnic_cnt = is_vf ? 0 : 1;
13995
13996         max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev, cnic_cnt);
13997
13998         /* add another SB for VF as it has no default SB */
13999         max_non_def_sbs += is_vf ? 1 : 0;
14000
14001         /* Maximum number of RSS queues: one IGU SB goes to CNIC */
14002         rss_count = max_non_def_sbs - cnic_cnt;
14003
14004         if (rss_count < 1)
14005                 return -EINVAL;
14006
14007         /* Maximum number of netdev Rx queues: RSS + FCoE L2 */
14008         rx_count = rss_count + cnic_cnt;
14009
14010         /* Maximum number of netdev Tx queues:
14011          * Maximum TSS queues * Maximum supported number of CoS  + FCoE L2
14012          */
14013         tx_count = rss_count * max_cos_est + cnic_cnt;
14014
14015         /* dev zeroed in init_etherdev */
14016         dev = alloc_etherdev_mqs(sizeof(*bp), tx_count, rx_count);
14017         if (!dev)
14018                 return -ENOMEM;
14019
14020         bp = netdev_priv(dev);
14021
14022         bp->flags = 0;
14023         if (is_vf)
14024                 bp->flags |= IS_VF_FLAG;
14025
14026         bp->igu_sb_cnt = max_non_def_sbs;
14027         bp->igu_base_addr = IS_VF(bp) ? PXP_VF_ADDR_IGU_START : BAR_IGU_INTMEM;
14028         bp->msg_enable = debug;
14029         bp->cnic_support = cnic_cnt;
14030         bp->cnic_probe = bnx2x_cnic_probe;
14031
14032         pci_set_drvdata(pdev, dev);
14033
14034         rc = bnx2x_init_dev(bp, pdev, dev, ent->driver_data);
14035         if (rc < 0) {
14036                 free_netdev(dev);
14037                 return rc;
14038         }
14039
14040         BNX2X_DEV_INFO("This is a %s function\n",
14041                        IS_PF(bp) ? "physical" : "virtual");
14042         BNX2X_DEV_INFO("Cnic support is %s\n", CNIC_SUPPORT(bp) ? "on" : "off");
14043         BNX2X_DEV_INFO("Max num of status blocks %d\n", max_non_def_sbs);
14044         BNX2X_DEV_INFO("Allocated netdev with %d tx and %d rx queues\n",
14045                        tx_count, rx_count);
14046
14047         rc = bnx2x_init_bp(bp);
14048         if (rc)
14049                 goto init_one_exit;
14050
14051         /* Map doorbells here as we need the real value of bp->max_cos which
14052          * is initialized in bnx2x_init_bp() to determine the number of
14053          * l2 connections.
14054          */
14055         if (IS_VF(bp)) {
14056                 bp->doorbells = bnx2x_vf_doorbells(bp);
14057                 rc = bnx2x_vf_pci_alloc(bp);
14058                 if (rc)
14059                         goto init_one_freemem;
14060         } else {
14061                 doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT);
14062                 if (doorbell_size > pci_resource_len(pdev, 2)) {
14063                         dev_err(&bp->pdev->dev,
14064                                 "Cannot map doorbells, bar size too small, aborting\n");
14065                         rc = -ENOMEM;
14066                         goto init_one_freemem;
14067                 }
14068                 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
14069                                                 doorbell_size);
14070         }
14071         if (!bp->doorbells) {
14072                 dev_err(&bp->pdev->dev,
14073                         "Cannot map doorbell space, aborting\n");
14074                 rc = -ENOMEM;
14075                 goto init_one_freemem;
14076         }
14077
14078         if (IS_VF(bp)) {
14079                 rc = bnx2x_vfpf_acquire(bp, tx_count, rx_count);
14080                 if (rc)
14081                         goto init_one_freemem;
14082
14083 #ifdef CONFIG_BNX2X_SRIOV
14084                 /* VF with OLD Hypervisor or old PF do not support filtering */
14085                 if (bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER) {
14086                         dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
14087                         dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
14088                 }
14089 #endif
14090         }
14091
14092         /* Enable SRIOV if capability found in configuration space */
14093         rc = bnx2x_iov_init_one(bp, int_mode, BNX2X_MAX_NUM_OF_VFS);
14094         if (rc)
14095                 goto init_one_freemem;
14096
14097         /* calc qm_cid_count */
14098         bp->qm_cid_count = bnx2x_set_qm_cid_count(bp);
14099         BNX2X_DEV_INFO("qm_cid_count %d\n", bp->qm_cid_count);
14100
14101         /* disable FCOE L2 queue for E1x*/
14102         if (CHIP_IS_E1x(bp))
14103                 bp->flags |= NO_FCOE_FLAG;
14104
14105         /* Set bp->num_queues for MSI-X mode*/
14106         bnx2x_set_num_queues(bp);
14107
14108         /* Configure interrupt mode: try to enable MSI-X/MSI if
14109          * needed.
14110          */
14111         rc = bnx2x_set_int_mode(bp);
14112         if (rc) {
14113                 dev_err(&pdev->dev, "Cannot set interrupts\n");
14114                 goto init_one_freemem;
14115         }
14116         BNX2X_DEV_INFO("set interrupts successfully\n");
14117
14118         /* register the net device */
14119         rc = register_netdev(dev);
14120         if (rc) {
14121                 dev_err(&pdev->dev, "Cannot register net device\n");
14122                 goto init_one_freemem;
14123         }
14124         BNX2X_DEV_INFO("device name after netdev register %s\n", dev->name);
14125
14126         if (!NO_FCOE(bp)) {
14127                 /* Add storage MAC address */
14128                 rtnl_lock();
14129                 dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
14130                 rtnl_unlock();
14131         }
14132         BNX2X_DEV_INFO(
14133                "%s (%c%d) PCI-E found at mem %lx, IRQ %d, node addr %pM\n",
14134                board_info[ent->driver_data].name,
14135                (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
14136                dev->base_addr, bp->pdev->irq, dev->dev_addr);
14137         pcie_print_link_status(bp->pdev);
14138
14139         if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
14140                 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED);
14141
14142         return 0;
14143
14144 init_one_freemem:
14145         bnx2x_free_mem_bp(bp);
14146
14147 init_one_exit:
14148         bnx2x_disable_pcie_error_reporting(bp);
14149
14150         if (bp->regview)
14151                 iounmap(bp->regview);
14152
14153         if (IS_PF(bp) && bp->doorbells)
14154                 iounmap(bp->doorbells);
14155
14156         free_netdev(dev);
14157
14158         if (atomic_read(&pdev->enable_cnt) == 1)
14159                 pci_release_regions(pdev);
14160
14161         pci_disable_device(pdev);
14162
14163         return rc;
14164 }
14165
14166 static void __bnx2x_remove(struct pci_dev *pdev,
14167                            struct net_device *dev,
14168                            struct bnx2x *bp,
14169                            bool remove_netdev)
14170 {
14171         /* Delete storage MAC address */
14172         if (!NO_FCOE(bp)) {
14173                 rtnl_lock();
14174                 dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
14175                 rtnl_unlock();
14176         }
14177
14178 #ifdef BCM_DCBNL
14179         /* Delete app tlvs from dcbnl */
14180         bnx2x_dcbnl_update_applist(bp, true);
14181 #endif
14182
14183         if (IS_PF(bp) &&
14184             !BP_NOMCP(bp) &&
14185             (bp->flags & BC_SUPPORTS_RMMOD_CMD))
14186                 bnx2x_fw_command(bp, DRV_MSG_CODE_RMMOD, 0);
14187
14188         /* Close the interface - either directly or implicitly */
14189         if (remove_netdev) {
14190                 unregister_netdev(dev);
14191         } else {
14192                 rtnl_lock();
14193                 dev_close(dev);
14194                 rtnl_unlock();
14195         }
14196
14197         bnx2x_iov_remove_one(bp);
14198
14199         /* Power on: we can't let PCI layer write to us while we are in D3 */
14200         if (IS_PF(bp)) {
14201                 bnx2x_set_power_state(bp, PCI_D0);
14202                 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_NOT_LOADED);
14203
14204                 /* Set endianity registers to reset values in case next driver
14205                  * boots in different endianty environment.
14206                  */
14207                 bnx2x_reset_endianity(bp);
14208         }
14209
14210         /* Disable MSI/MSI-X */
14211         bnx2x_disable_msi(bp);
14212
14213         /* Power off */
14214         if (IS_PF(bp))
14215                 bnx2x_set_power_state(bp, PCI_D3hot);
14216
14217         /* Make sure RESET task is not scheduled before continuing */
14218         cancel_delayed_work_sync(&bp->sp_rtnl_task);
14219
14220         /* send message via vfpf channel to release the resources of this vf */
14221         if (IS_VF(bp))
14222                 bnx2x_vfpf_release(bp);
14223
14224         /* Assumes no further PCIe PM changes will occur */
14225         if (system_state == SYSTEM_POWER_OFF) {
14226                 pci_wake_from_d3(pdev, bp->wol);
14227                 pci_set_power_state(pdev, PCI_D3hot);
14228         }
14229
14230         bnx2x_disable_pcie_error_reporting(bp);
14231         if (remove_netdev) {
14232                 if (bp->regview)
14233                         iounmap(bp->regview);
14234
14235                 /* For vfs, doorbells are part of the regview and were unmapped
14236                  * along with it. FW is only loaded by PF.
14237                  */
14238                 if (IS_PF(bp)) {
14239                         if (bp->doorbells)
14240                                 iounmap(bp->doorbells);
14241
14242                         bnx2x_release_firmware(bp);
14243                 } else {
14244                         bnx2x_vf_pci_dealloc(bp);
14245                 }
14246                 bnx2x_free_mem_bp(bp);
14247
14248                 free_netdev(dev);
14249
14250                 if (atomic_read(&pdev->enable_cnt) == 1)
14251                         pci_release_regions(pdev);
14252
14253                 pci_disable_device(pdev);
14254         }
14255 }
14256
14257 static void bnx2x_remove_one(struct pci_dev *pdev)
14258 {
14259         struct net_device *dev = pci_get_drvdata(pdev);
14260         struct bnx2x *bp;
14261
14262         if (!dev) {
14263                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
14264                 return;
14265         }
14266         bp = netdev_priv(dev);
14267
14268         __bnx2x_remove(pdev, dev, bp, true);
14269 }
14270
14271 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
14272 {
14273         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
14274
14275         bp->rx_mode = BNX2X_RX_MODE_NONE;
14276
14277         if (CNIC_LOADED(bp))
14278                 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
14279
14280         /* Stop Tx */
14281         bnx2x_tx_disable(bp);
14282         /* Delete all NAPI objects */
14283         bnx2x_del_all_napi(bp);
14284         if (CNIC_LOADED(bp))
14285                 bnx2x_del_all_napi_cnic(bp);
14286         netdev_reset_tc(bp->dev);
14287
14288         del_timer_sync(&bp->timer);
14289         cancel_delayed_work_sync(&bp->sp_task);
14290         cancel_delayed_work_sync(&bp->period_task);
14291
14292         if (!down_timeout(&bp->stats_lock, HZ / 10)) {
14293                 bp->stats_state = STATS_STATE_DISABLED;
14294                 up(&bp->stats_lock);
14295         }
14296
14297         bnx2x_save_statistics(bp);
14298
14299         netif_carrier_off(bp->dev);
14300
14301         return 0;
14302 }
14303
14304 /**
14305  * bnx2x_io_error_detected - called when PCI error is detected
14306  * @pdev: Pointer to PCI device
14307  * @state: The current pci connection state
14308  *
14309  * This function is called after a PCI bus error affecting
14310  * this device has been detected.
14311  */
14312 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
14313                                                 pci_channel_state_t state)
14314 {
14315         struct net_device *dev = pci_get_drvdata(pdev);
14316         struct bnx2x *bp = netdev_priv(dev);
14317
14318         rtnl_lock();
14319
14320         BNX2X_ERR("IO error detected\n");
14321
14322         netif_device_detach(dev);
14323
14324         if (state == pci_channel_io_perm_failure) {
14325                 rtnl_unlock();
14326                 return PCI_ERS_RESULT_DISCONNECT;
14327         }
14328
14329         if (netif_running(dev))
14330                 bnx2x_eeh_nic_unload(bp);
14331
14332         bnx2x_prev_path_mark_eeh(bp);
14333
14334         pci_disable_device(pdev);
14335
14336         rtnl_unlock();
14337
14338         /* Request a slot reset */
14339         return PCI_ERS_RESULT_NEED_RESET;
14340 }
14341
14342 /**
14343  * bnx2x_io_slot_reset - called after the PCI bus has been reset
14344  * @pdev: Pointer to PCI device
14345  *
14346  * Restart the card from scratch, as if from a cold-boot.
14347  */
14348 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
14349 {
14350         struct net_device *dev = pci_get_drvdata(pdev);
14351         struct bnx2x *bp = netdev_priv(dev);
14352         int i;
14353
14354         rtnl_lock();
14355         BNX2X_ERR("IO slot reset initializing...\n");
14356         if (pci_enable_device(pdev)) {
14357                 dev_err(&pdev->dev,
14358                         "Cannot re-enable PCI device after reset\n");
14359                 rtnl_unlock();
14360                 return PCI_ERS_RESULT_DISCONNECT;
14361         }
14362
14363         pci_set_master(pdev);
14364         pci_restore_state(pdev);
14365         pci_save_state(pdev);
14366
14367         if (netif_running(dev))
14368                 bnx2x_set_power_state(bp, PCI_D0);
14369
14370         if (netif_running(dev)) {
14371                 BNX2X_ERR("IO slot reset --> driver unload\n");
14372
14373                 /* MCP should have been reset; Need to wait for validity */
14374                 if (bnx2x_init_shmem(bp)) {
14375                         rtnl_unlock();
14376                         return PCI_ERS_RESULT_DISCONNECT;
14377                 }
14378
14379                 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
14380                         u32 v;
14381
14382                         v = SHMEM2_RD(bp,
14383                                       drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
14384                         SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
14385                                   v & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
14386                 }
14387                 bnx2x_drain_tx_queues(bp);
14388                 bnx2x_send_unload_req(bp, UNLOAD_RECOVERY);
14389                 bnx2x_netif_stop(bp, 1);
14390                 bnx2x_free_irq(bp);
14391
14392                 /* Report UNLOAD_DONE to MCP */
14393                 bnx2x_send_unload_done(bp, true);
14394
14395                 bp->sp_state = 0;
14396                 bp->port.pmf = 0;
14397
14398                 bnx2x_prev_unload(bp);
14399
14400                 /* We should have reseted the engine, so It's fair to
14401                  * assume the FW will no longer write to the bnx2x driver.
14402                  */
14403                 bnx2x_squeeze_objects(bp);
14404                 bnx2x_free_skbs(bp);
14405                 for_each_rx_queue(bp, i)
14406                         bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
14407                 bnx2x_free_fp_mem(bp);
14408                 bnx2x_free_mem(bp);
14409
14410                 bp->state = BNX2X_STATE_CLOSED;
14411         }
14412
14413         rtnl_unlock();
14414
14415         return PCI_ERS_RESULT_RECOVERED;
14416 }
14417
14418 /**
14419  * bnx2x_io_resume - called when traffic can start flowing again
14420  * @pdev: Pointer to PCI device
14421  *
14422  * This callback is called when the error recovery driver tells us that
14423  * its OK to resume normal operation.
14424  */
14425 static void bnx2x_io_resume(struct pci_dev *pdev)
14426 {
14427         struct net_device *dev = pci_get_drvdata(pdev);
14428         struct bnx2x *bp = netdev_priv(dev);
14429
14430         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
14431                 netdev_err(bp->dev, "Handling parity error recovery. Try again later\n");
14432                 return;
14433         }
14434
14435         rtnl_lock();
14436
14437         bp->fw_seq = SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
14438                                                         DRV_MSG_SEQ_NUMBER_MASK;
14439
14440         if (netif_running(dev))
14441                 bnx2x_nic_load(bp, LOAD_NORMAL);
14442
14443         netif_device_attach(dev);
14444
14445         rtnl_unlock();
14446 }
14447
14448 static const struct pci_error_handlers bnx2x_err_handler = {
14449         .error_detected = bnx2x_io_error_detected,
14450         .slot_reset     = bnx2x_io_slot_reset,
14451         .resume         = bnx2x_io_resume,
14452 };
14453
14454 static void bnx2x_shutdown(struct pci_dev *pdev)
14455 {
14456         struct net_device *dev = pci_get_drvdata(pdev);
14457         struct bnx2x *bp;
14458
14459         if (!dev)
14460                 return;
14461
14462         bp = netdev_priv(dev);
14463         if (!bp)
14464                 return;
14465
14466         rtnl_lock();
14467         netif_device_detach(dev);
14468         rtnl_unlock();
14469
14470         /* Don't remove the netdevice, as there are scenarios which will cause
14471          * the kernel to hang, e.g., when trying to remove bnx2i while the
14472          * rootfs is mounted from SAN.
14473          */
14474         __bnx2x_remove(pdev, dev, bp, false);
14475 }
14476
14477 static struct pci_driver bnx2x_pci_driver = {
14478         .name        = DRV_MODULE_NAME,
14479         .id_table    = bnx2x_pci_tbl,
14480         .probe       = bnx2x_init_one,
14481         .remove      = bnx2x_remove_one,
14482         .suspend     = bnx2x_suspend,
14483         .resume      = bnx2x_resume,
14484         .err_handler = &bnx2x_err_handler,
14485 #ifdef CONFIG_BNX2X_SRIOV
14486         .sriov_configure = bnx2x_sriov_configure,
14487 #endif
14488         .shutdown    = bnx2x_shutdown,
14489 };
14490
14491 static int __init bnx2x_init(void)
14492 {
14493         int ret;
14494
14495         pr_info("%s", version);
14496
14497         bnx2x_wq = create_singlethread_workqueue("bnx2x");
14498         if (bnx2x_wq == NULL) {
14499                 pr_err("Cannot create workqueue\n");
14500                 return -ENOMEM;
14501         }
14502         bnx2x_iov_wq = create_singlethread_workqueue("bnx2x_iov");
14503         if (!bnx2x_iov_wq) {
14504                 pr_err("Cannot create iov workqueue\n");
14505                 destroy_workqueue(bnx2x_wq);
14506                 return -ENOMEM;
14507         }
14508
14509         ret = pci_register_driver(&bnx2x_pci_driver);
14510         if (ret) {
14511                 pr_err("Cannot register driver\n");
14512                 destroy_workqueue(bnx2x_wq);
14513                 destroy_workqueue(bnx2x_iov_wq);
14514         }
14515         return ret;
14516 }
14517
14518 static void __exit bnx2x_cleanup(void)
14519 {
14520         struct list_head *pos, *q;
14521
14522         pci_unregister_driver(&bnx2x_pci_driver);
14523
14524         destroy_workqueue(bnx2x_wq);
14525         destroy_workqueue(bnx2x_iov_wq);
14526
14527         /* Free globally allocated resources */
14528         list_for_each_safe(pos, q, &bnx2x_prev_list) {
14529                 struct bnx2x_prev_path_list *tmp =
14530                         list_entry(pos, struct bnx2x_prev_path_list, list);
14531                 list_del(pos);
14532                 kfree(tmp);
14533         }
14534 }
14535
14536 void bnx2x_notify_link_changed(struct bnx2x *bp)
14537 {
14538         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + BP_FUNC(bp)*sizeof(u32), 1);
14539 }
14540
14541 module_init(bnx2x_init);
14542 module_exit(bnx2x_cleanup);
14543
14544 /**
14545  * bnx2x_set_iscsi_eth_mac_addr - set iSCSI MAC(s).
14546  *
14547  * @bp:         driver handle
14548  * @set:        set or clear the CAM entry
14549  *
14550  * This function will wait until the ramrod completion returns.
14551  * Return 0 if success, -ENODEV if ramrod doesn't return.
14552  */
14553 static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp)
14554 {
14555         unsigned long ramrod_flags = 0;
14556
14557         __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
14558         return bnx2x_set_mac_one(bp, bp->cnic_eth_dev.iscsi_mac,
14559                                  &bp->iscsi_l2_mac_obj, true,
14560                                  BNX2X_ISCSI_ETH_MAC, &ramrod_flags);
14561 }
14562
14563 /* count denotes the number of new completions we have seen */
14564 static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
14565 {
14566         struct eth_spe *spe;
14567         int cxt_index, cxt_offset;
14568
14569 #ifdef BNX2X_STOP_ON_ERROR
14570         if (unlikely(bp->panic))
14571                 return;
14572 #endif
14573
14574         spin_lock_bh(&bp->spq_lock);
14575         BUG_ON(bp->cnic_spq_pending < count);
14576         bp->cnic_spq_pending -= count;
14577
14578         for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) {
14579                 u16 type =  (le16_to_cpu(bp->cnic_kwq_cons->hdr.type)
14580                                 & SPE_HDR_CONN_TYPE) >>
14581                                 SPE_HDR_CONN_TYPE_SHIFT;
14582                 u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->hdr.conn_and_cmd_data)
14583                                 >> SPE_HDR_CMD_ID_SHIFT) & 0xff;
14584
14585                 /* Set validation for iSCSI L2 client before sending SETUP
14586                  *  ramrod
14587                  */
14588                 if (type == ETH_CONNECTION_TYPE) {
14589                         if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP) {
14590                                 cxt_index = BNX2X_ISCSI_ETH_CID(bp) /
14591                                         ILT_PAGE_CIDS;
14592                                 cxt_offset = BNX2X_ISCSI_ETH_CID(bp) -
14593                                         (cxt_index * ILT_PAGE_CIDS);
14594                                 bnx2x_set_ctx_validation(bp,
14595                                         &bp->context[cxt_index].
14596                                                          vcxt[cxt_offset].eth,
14597                                         BNX2X_ISCSI_ETH_CID(bp));
14598                         }
14599                 }
14600
14601                 /*
14602                  * There may be not more than 8 L2, not more than 8 L5 SPEs
14603                  * and in the air. We also check that number of outstanding
14604                  * COMMON ramrods is not more than the EQ and SPQ can
14605                  * accommodate.
14606                  */
14607                 if (type == ETH_CONNECTION_TYPE) {
14608                         if (!atomic_read(&bp->cq_spq_left))
14609                                 break;
14610                         else
14611                                 atomic_dec(&bp->cq_spq_left);
14612                 } else if (type == NONE_CONNECTION_TYPE) {
14613                         if (!atomic_read(&bp->eq_spq_left))
14614                                 break;
14615                         else
14616                                 atomic_dec(&bp->eq_spq_left);
14617                 } else if ((type == ISCSI_CONNECTION_TYPE) ||
14618                            (type == FCOE_CONNECTION_TYPE)) {
14619                         if (bp->cnic_spq_pending >=
14620                             bp->cnic_eth_dev.max_kwqe_pending)
14621                                 break;
14622                         else
14623                                 bp->cnic_spq_pending++;
14624                 } else {
14625                         BNX2X_ERR("Unknown SPE type: %d\n", type);
14626                         bnx2x_panic();
14627                         break;
14628                 }
14629
14630                 spe = bnx2x_sp_get_next(bp);
14631                 *spe = *bp->cnic_kwq_cons;
14632
14633                 DP(BNX2X_MSG_SP, "pending on SPQ %d, on KWQ %d count %d\n",
14634                    bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
14635
14636                 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
14637                         bp->cnic_kwq_cons = bp->cnic_kwq;
14638                 else
14639                         bp->cnic_kwq_cons++;
14640         }
14641         bnx2x_sp_prod_update(bp);
14642         spin_unlock_bh(&bp->spq_lock);
14643 }
14644
14645 static int bnx2x_cnic_sp_queue(struct net_device *dev,
14646                                struct kwqe_16 *kwqes[], u32 count)
14647 {
14648         struct bnx2x *bp = netdev_priv(dev);
14649         int i;
14650
14651 #ifdef BNX2X_STOP_ON_ERROR
14652         if (unlikely(bp->panic)) {
14653                 BNX2X_ERR("Can't post to SP queue while panic\n");
14654                 return -EIO;
14655         }
14656 #endif
14657
14658         if ((bp->recovery_state != BNX2X_RECOVERY_DONE) &&
14659             (bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) {
14660                 BNX2X_ERR("Handling parity error recovery. Try again later\n");
14661                 return -EAGAIN;
14662         }
14663
14664         spin_lock_bh(&bp->spq_lock);
14665
14666         for (i = 0; i < count; i++) {
14667                 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
14668
14669                 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
14670                         break;
14671
14672                 *bp->cnic_kwq_prod = *spe;
14673
14674                 bp->cnic_kwq_pending++;
14675
14676                 DP(BNX2X_MSG_SP, "L5 SPQE %x %x %x:%x pos %d\n",
14677                    spe->hdr.conn_and_cmd_data, spe->hdr.type,
14678                    spe->data.update_data_addr.hi,
14679                    spe->data.update_data_addr.lo,
14680                    bp->cnic_kwq_pending);
14681
14682                 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
14683                         bp->cnic_kwq_prod = bp->cnic_kwq;
14684                 else
14685                         bp->cnic_kwq_prod++;
14686         }
14687
14688         spin_unlock_bh(&bp->spq_lock);
14689
14690         if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
14691                 bnx2x_cnic_sp_post(bp, 0);
14692
14693         return i;
14694 }
14695
14696 static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
14697 {
14698         struct cnic_ops *c_ops;
14699         int rc = 0;
14700
14701         mutex_lock(&bp->cnic_mutex);
14702         c_ops = rcu_dereference_protected(bp->cnic_ops,
14703                                           lockdep_is_held(&bp->cnic_mutex));
14704         if (c_ops)
14705                 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
14706         mutex_unlock(&bp->cnic_mutex);
14707
14708         return rc;
14709 }
14710
14711 static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
14712 {
14713         struct cnic_ops *c_ops;
14714         int rc = 0;
14715
14716         rcu_read_lock();
14717         c_ops = rcu_dereference(bp->cnic_ops);
14718         if (c_ops)
14719                 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
14720         rcu_read_unlock();
14721
14722         return rc;
14723 }
14724
14725 /*
14726  * for commands that have no data
14727  */
14728 int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
14729 {
14730         struct cnic_ctl_info ctl = {0};
14731
14732         ctl.cmd = cmd;
14733
14734         return bnx2x_cnic_ctl_send(bp, &ctl);
14735 }
14736
14737 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err)
14738 {
14739         struct cnic_ctl_info ctl = {0};
14740
14741         /* first we tell CNIC and only then we count this as a completion */
14742         ctl.cmd = CNIC_CTL_COMPLETION_CMD;
14743         ctl.data.comp.cid = cid;
14744         ctl.data.comp.error = err;
14745
14746         bnx2x_cnic_ctl_send_bh(bp, &ctl);
14747         bnx2x_cnic_sp_post(bp, 0);
14748 }
14749
14750 /* Called with netif_addr_lock_bh() taken.
14751  * Sets an rx_mode config for an iSCSI ETH client.
14752  * Doesn't block.
14753  * Completion should be checked outside.
14754  */
14755 static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start)
14756 {
14757         unsigned long accept_flags = 0, ramrod_flags = 0;
14758         u8 cl_id = bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX);
14759         int sched_state = BNX2X_FILTER_ISCSI_ETH_STOP_SCHED;
14760
14761         if (start) {
14762                 /* Start accepting on iSCSI L2 ring. Accept all multicasts
14763                  * because it's the only way for UIO Queue to accept
14764                  * multicasts (in non-promiscuous mode only one Queue per
14765                  * function will receive multicast packets (leading in our
14766                  * case).
14767                  */
14768                 __set_bit(BNX2X_ACCEPT_UNICAST, &accept_flags);
14769                 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags);
14770                 __set_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags);
14771                 __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
14772
14773                 /* Clear STOP_PENDING bit if START is requested */
14774                 clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &bp->sp_state);
14775
14776                 sched_state = BNX2X_FILTER_ISCSI_ETH_START_SCHED;
14777         } else
14778                 /* Clear START_PENDING bit if STOP is requested */
14779                 clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &bp->sp_state);
14780
14781         if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state))
14782                 set_bit(sched_state, &bp->sp_state);
14783         else {
14784                 __set_bit(RAMROD_RX, &ramrod_flags);
14785                 bnx2x_set_q_rx_mode(bp, cl_id, 0, accept_flags, 0,
14786                                     ramrod_flags);
14787         }
14788 }
14789
14790 static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
14791 {
14792         struct bnx2x *bp = netdev_priv(dev);
14793         int rc = 0;
14794
14795         switch (ctl->cmd) {
14796         case DRV_CTL_CTXTBL_WR_CMD: {
14797                 u32 index = ctl->data.io.offset;
14798                 dma_addr_t addr = ctl->data.io.dma_addr;
14799
14800                 bnx2x_ilt_wr(bp, index, addr);
14801                 break;
14802         }
14803
14804         case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: {
14805                 int count = ctl->data.credit.credit_count;
14806
14807                 bnx2x_cnic_sp_post(bp, count);
14808                 break;
14809         }
14810
14811         /* rtnl_lock is held.  */
14812         case DRV_CTL_START_L2_CMD: {
14813                 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
14814                 unsigned long sp_bits = 0;
14815
14816                 /* Configure the iSCSI classification object */
14817                 bnx2x_init_mac_obj(bp, &bp->iscsi_l2_mac_obj,
14818                                    cp->iscsi_l2_client_id,
14819                                    cp->iscsi_l2_cid, BP_FUNC(bp),
14820                                    bnx2x_sp(bp, mac_rdata),
14821                                    bnx2x_sp_mapping(bp, mac_rdata),
14822                                    BNX2X_FILTER_MAC_PENDING,
14823                                    &bp->sp_state, BNX2X_OBJ_TYPE_RX,
14824                                    &bp->macs_pool);
14825
14826                 /* Set iSCSI MAC address */
14827                 rc = bnx2x_set_iscsi_eth_mac_addr(bp);
14828                 if (rc)
14829                         break;
14830
14831                 mmiowb();
14832                 barrier();
14833
14834                 /* Start accepting on iSCSI L2 ring */
14835
14836                 netif_addr_lock_bh(dev);
14837                 bnx2x_set_iscsi_eth_rx_mode(bp, true);
14838                 netif_addr_unlock_bh(dev);
14839
14840                 /* bits to wait on */
14841                 __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits);
14842                 __set_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &sp_bits);
14843
14844                 if (!bnx2x_wait_sp_comp(bp, sp_bits))
14845                         BNX2X_ERR("rx_mode completion timed out!\n");
14846
14847                 break;
14848         }
14849
14850         /* rtnl_lock is held.  */
14851         case DRV_CTL_STOP_L2_CMD: {
14852                 unsigned long sp_bits = 0;
14853
14854                 /* Stop accepting on iSCSI L2 ring */
14855                 netif_addr_lock_bh(dev);
14856                 bnx2x_set_iscsi_eth_rx_mode(bp, false);
14857                 netif_addr_unlock_bh(dev);
14858
14859                 /* bits to wait on */
14860                 __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits);
14861                 __set_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &sp_bits);
14862
14863                 if (!bnx2x_wait_sp_comp(bp, sp_bits))
14864                         BNX2X_ERR("rx_mode completion timed out!\n");
14865
14866                 mmiowb();
14867                 barrier();
14868
14869                 /* Unset iSCSI L2 MAC */
14870                 rc = bnx2x_del_all_macs(bp, &bp->iscsi_l2_mac_obj,
14871                                         BNX2X_ISCSI_ETH_MAC, true);
14872                 break;
14873         }
14874         case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {
14875                 int count = ctl->data.credit.credit_count;
14876
14877                 smp_mb__before_atomic();
14878                 atomic_add(count, &bp->cq_spq_left);
14879                 smp_mb__after_atomic();
14880                 break;
14881         }
14882         case DRV_CTL_ULP_REGISTER_CMD: {
14883                 int ulp_type = ctl->data.register_data.ulp_type;
14884
14885                 if (CHIP_IS_E3(bp)) {
14886                         int idx = BP_FW_MB_IDX(bp);
14887                         u32 cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
14888                         int path = BP_PATH(bp);
14889                         int port = BP_PORT(bp);
14890                         int i;
14891                         u32 scratch_offset;
14892                         u32 *host_addr;
14893
14894                         /* first write capability to shmem2 */
14895                         if (ulp_type == CNIC_ULP_ISCSI)
14896                                 cap |= DRV_FLAGS_CAPABILITIES_LOADED_ISCSI;
14897                         else if (ulp_type == CNIC_ULP_FCOE)
14898                                 cap |= DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
14899                         SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
14900
14901                         if ((ulp_type != CNIC_ULP_FCOE) ||
14902                             (!SHMEM2_HAS(bp, ncsi_oem_data_addr)) ||
14903                             (!(bp->flags &  BC_SUPPORTS_FCOE_FEATURES)))
14904                                 break;
14905
14906                         /* if reached here - should write fcoe capabilities */
14907                         scratch_offset = SHMEM2_RD(bp, ncsi_oem_data_addr);
14908                         if (!scratch_offset)
14909                                 break;
14910                         scratch_offset += offsetof(struct glob_ncsi_oem_data,
14911                                                    fcoe_features[path][port]);
14912                         host_addr = (u32 *) &(ctl->data.register_data.
14913                                               fcoe_features);
14914                         for (i = 0; i < sizeof(struct fcoe_capabilities);
14915                              i += 4)
14916                                 REG_WR(bp, scratch_offset + i,
14917                                        *(host_addr + i/4));
14918                 }
14919                 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
14920                 break;
14921         }
14922
14923         case DRV_CTL_ULP_UNREGISTER_CMD: {
14924                 int ulp_type = ctl->data.ulp_type;
14925
14926                 if (CHIP_IS_E3(bp)) {
14927                         int idx = BP_FW_MB_IDX(bp);
14928                         u32 cap;
14929
14930                         cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
14931                         if (ulp_type == CNIC_ULP_ISCSI)
14932                                 cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_ISCSI;
14933                         else if (ulp_type == CNIC_ULP_FCOE)
14934                                 cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
14935                         SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
14936                 }
14937                 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
14938                 break;
14939         }
14940
14941         default:
14942                 BNX2X_ERR("unknown command %x\n", ctl->cmd);
14943                 rc = -EINVAL;
14944         }
14945
14946         /* For storage-only interfaces, change driver state */
14947         if (IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp)) {
14948                 switch (ctl->drv_state) {
14949                 case DRV_NOP:
14950                         break;
14951                 case DRV_ACTIVE:
14952                         bnx2x_set_os_driver_state(bp,
14953                                                   OS_DRIVER_STATE_ACTIVE);
14954                         break;
14955                 case DRV_INACTIVE:
14956                         bnx2x_set_os_driver_state(bp,
14957                                                   OS_DRIVER_STATE_DISABLED);
14958                         break;
14959                 case DRV_UNLOADED:
14960                         bnx2x_set_os_driver_state(bp,
14961                                                   OS_DRIVER_STATE_NOT_LOADED);
14962                         break;
14963                 default:
14964                 BNX2X_ERR("Unknown cnic driver state: %d\n", ctl->drv_state);
14965                 }
14966         }
14967
14968         return rc;
14969 }
14970
14971 static int bnx2x_get_fc_npiv(struct net_device *dev,
14972                              struct cnic_fc_npiv_tbl *cnic_tbl)
14973 {
14974         struct bnx2x *bp = netdev_priv(dev);
14975         struct bdn_fc_npiv_tbl *tbl = NULL;
14976         u32 offset, entries;
14977         int rc = -EINVAL;
14978         int i;
14979
14980         if (!SHMEM2_HAS(bp, fc_npiv_nvram_tbl_addr[0]))
14981                 goto out;
14982
14983         DP(BNX2X_MSG_MCP, "About to read the FC-NPIV table\n");
14984
14985         tbl = kmalloc(sizeof(*tbl), GFP_KERNEL);
14986         if (!tbl) {
14987                 BNX2X_ERR("Failed to allocate fc_npiv table\n");
14988                 goto out;
14989         }
14990
14991         offset = SHMEM2_RD(bp, fc_npiv_nvram_tbl_addr[BP_PORT(bp)]);
14992         if (!offset) {
14993                 DP(BNX2X_MSG_MCP, "No FC-NPIV in NVRAM\n");
14994                 goto out;
14995         }
14996         DP(BNX2X_MSG_MCP, "Offset of FC-NPIV in NVRAM: %08x\n", offset);
14997
14998         /* Read the table contents from nvram */
14999         if (bnx2x_nvram_read(bp, offset, (u8 *)tbl, sizeof(*tbl))) {
15000                 BNX2X_ERR("Failed to read FC-NPIV table\n");
15001                 goto out;
15002         }
15003
15004         /* Since bnx2x_nvram_read() returns data in be32, we need to convert
15005          * the number of entries back to cpu endianness.
15006          */
15007         entries = tbl->fc_npiv_cfg.num_of_npiv;
15008         entries = (__force u32)be32_to_cpu((__force __be32)entries);
15009         tbl->fc_npiv_cfg.num_of_npiv = entries;
15010
15011         if (!tbl->fc_npiv_cfg.num_of_npiv) {
15012                 DP(BNX2X_MSG_MCP,
15013                    "No FC-NPIV table [valid, simply not present]\n");
15014                 goto out;
15015         } else if (tbl->fc_npiv_cfg.num_of_npiv > MAX_NUMBER_NPIV) {
15016                 BNX2X_ERR("FC-NPIV table with bad length 0x%08x\n",
15017                           tbl->fc_npiv_cfg.num_of_npiv);
15018                 goto out;
15019         } else {
15020                 DP(BNX2X_MSG_MCP, "Read 0x%08x entries from NVRAM\n",
15021                    tbl->fc_npiv_cfg.num_of_npiv);
15022         }
15023
15024         /* Copy the data into cnic-provided struct */
15025         cnic_tbl->count = tbl->fc_npiv_cfg.num_of_npiv;
15026         for (i = 0; i < cnic_tbl->count; i++) {
15027                 memcpy(cnic_tbl->wwpn[i], tbl->settings[i].npiv_wwpn, 8);
15028                 memcpy(cnic_tbl->wwnn[i], tbl->settings[i].npiv_wwnn, 8);
15029         }
15030
15031         rc = 0;
15032 out:
15033         kfree(tbl);
15034         return rc;
15035 }
15036
15037 void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
15038 {
15039         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
15040
15041         if (bp->flags & USING_MSIX_FLAG) {
15042                 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
15043                 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
15044                 cp->irq_arr[0].vector = bp->msix_table[1].vector;
15045         } else {
15046                 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
15047                 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
15048         }
15049         if (!CHIP_IS_E1x(bp))
15050                 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb;
15051         else
15052                 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
15053
15054         cp->irq_arr[0].status_blk_num =  bnx2x_cnic_fw_sb_id(bp);
15055         cp->irq_arr[0].status_blk_num2 = bnx2x_cnic_igu_sb_id(bp);
15056         cp->irq_arr[1].status_blk = bp->def_status_blk;
15057         cp->irq_arr[1].status_blk_num = DEF_SB_ID;
15058         cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
15059
15060         cp->num_irq = 2;
15061 }
15062
15063 void bnx2x_setup_cnic_info(struct bnx2x *bp)
15064 {
15065         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
15066
15067         cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
15068                              bnx2x_cid_ilt_lines(bp);
15069         cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
15070         cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
15071         cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp);
15072
15073         DP(NETIF_MSG_IFUP, "BNX2X_1st_NON_L2_ETH_CID(bp) %x, cp->starting_cid %x, cp->fcoe_init_cid %x, cp->iscsi_l2_cid %x\n",
15074            BNX2X_1st_NON_L2_ETH_CID(bp), cp->starting_cid, cp->fcoe_init_cid,
15075            cp->iscsi_l2_cid);
15076
15077         if (NO_ISCSI_OOO(bp))
15078                 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
15079 }
15080
15081 static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
15082                                void *data)
15083 {
15084         struct bnx2x *bp = netdev_priv(dev);
15085         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
15086         int rc;
15087
15088         DP(NETIF_MSG_IFUP, "Register_cnic called\n");
15089
15090         if (ops == NULL) {
15091                 BNX2X_ERR("NULL ops received\n");
15092                 return -EINVAL;
15093         }
15094
15095         if (!CNIC_SUPPORT(bp)) {
15096                 BNX2X_ERR("Can't register CNIC when not supported\n");
15097                 return -EOPNOTSUPP;
15098         }
15099
15100         if (!CNIC_LOADED(bp)) {
15101                 rc = bnx2x_load_cnic(bp);
15102                 if (rc) {
15103                         BNX2X_ERR("CNIC-related load failed\n");
15104                         return rc;
15105                 }
15106         }
15107
15108         bp->cnic_enabled = true;
15109
15110         bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
15111         if (!bp->cnic_kwq)
15112                 return -ENOMEM;
15113
15114         bp->cnic_kwq_cons = bp->cnic_kwq;
15115         bp->cnic_kwq_prod = bp->cnic_kwq;
15116         bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
15117
15118         bp->cnic_spq_pending = 0;
15119         bp->cnic_kwq_pending = 0;
15120
15121         bp->cnic_data = data;
15122
15123         cp->num_irq = 0;
15124         cp->drv_state |= CNIC_DRV_STATE_REGD;
15125         cp->iro_arr = bp->iro_arr;
15126
15127         bnx2x_setup_cnic_irq_info(bp);
15128
15129         rcu_assign_pointer(bp->cnic_ops, ops);
15130
15131         /* Schedule driver to read CNIC driver versions */
15132         bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
15133
15134         return 0;
15135 }
15136
15137 static int bnx2x_unregister_cnic(struct net_device *dev)
15138 {
15139         struct bnx2x *bp = netdev_priv(dev);
15140         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
15141
15142         mutex_lock(&bp->cnic_mutex);
15143         cp->drv_state = 0;
15144         RCU_INIT_POINTER(bp->cnic_ops, NULL);
15145         mutex_unlock(&bp->cnic_mutex);
15146         synchronize_rcu();
15147         bp->cnic_enabled = false;
15148         kfree(bp->cnic_kwq);
15149         bp->cnic_kwq = NULL;
15150
15151         return 0;
15152 }
15153
15154 static struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
15155 {
15156         struct bnx2x *bp = netdev_priv(dev);
15157         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
15158
15159         /* If both iSCSI and FCoE are disabled - return NULL in
15160          * order to indicate CNIC that it should not try to work
15161          * with this device.
15162          */
15163         if (NO_ISCSI(bp) && NO_FCOE(bp))
15164                 return NULL;
15165
15166         cp->drv_owner = THIS_MODULE;
15167         cp->chip_id = CHIP_ID(bp);
15168         cp->pdev = bp->pdev;
15169         cp->io_base = bp->regview;
15170         cp->io_base2 = bp->doorbells;
15171         cp->max_kwqe_pending = 8;
15172         cp->ctx_blk_size = CDU_ILT_PAGE_SZ;
15173         cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
15174                              bnx2x_cid_ilt_lines(bp);
15175         cp->ctx_tbl_len = CNIC_ILT_LINES;
15176         cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
15177         cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
15178         cp->drv_ctl = bnx2x_drv_ctl;
15179         cp->drv_get_fc_npiv_tbl = bnx2x_get_fc_npiv;
15180         cp->drv_register_cnic = bnx2x_register_cnic;
15181         cp->drv_unregister_cnic = bnx2x_unregister_cnic;
15182         cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
15183         cp->iscsi_l2_client_id =
15184                 bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX);
15185         cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp);
15186
15187         if (NO_ISCSI_OOO(bp))
15188                 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
15189
15190         if (NO_ISCSI(bp))
15191                 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI;
15192
15193         if (NO_FCOE(bp))
15194                 cp->drv_state |= CNIC_DRV_STATE_NO_FCOE;
15195
15196         BNX2X_DEV_INFO(
15197                 "page_size %d, tbl_offset %d, tbl_lines %d, starting cid %d\n",
15198            cp->ctx_blk_size,
15199            cp->ctx_tbl_offset,
15200            cp->ctx_tbl_len,
15201            cp->starting_cid);
15202         return cp;
15203 }
15204
15205 static u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp)
15206 {
15207         struct bnx2x *bp = fp->bp;
15208         u32 offset = BAR_USTRORM_INTMEM;
15209
15210         if (IS_VF(bp))
15211                 return bnx2x_vf_ustorm_prods_offset(bp, fp);
15212         else if (!CHIP_IS_E1x(bp))
15213                 offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id);
15214         else
15215                 offset += USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
15216
15217         return offset;
15218 }
15219
15220 /* called only on E1H or E2.
15221  * When pretending to be PF, the pretend value is the function number 0...7
15222  * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID
15223  * combination
15224  */
15225 int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val)
15226 {
15227         u32 pretend_reg;
15228
15229         if (CHIP_IS_E1H(bp) && pretend_func_val >= E1H_FUNC_MAX)
15230                 return -1;
15231
15232         /* get my own pretend register */
15233         pretend_reg = bnx2x_get_pretend_reg(bp);
15234         REG_WR(bp, pretend_reg, pretend_func_val);
15235         REG_RD(bp, pretend_reg);
15236         return 0;
15237 }
15238
15239 static void bnx2x_ptp_task(struct work_struct *work)
15240 {
15241         struct bnx2x *bp = container_of(work, struct bnx2x, ptp_task);
15242         int port = BP_PORT(bp);
15243         u32 val_seq;
15244         u64 timestamp, ns;
15245         struct skb_shared_hwtstamps shhwtstamps;
15246
15247         /* Read Tx timestamp registers */
15248         val_seq = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
15249                          NIG_REG_P0_TLLH_PTP_BUF_SEQID);
15250         if (val_seq & 0x10000) {
15251                 /* There is a valid timestamp value */
15252                 timestamp = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_TS_MSB :
15253                                    NIG_REG_P0_TLLH_PTP_BUF_TS_MSB);
15254                 timestamp <<= 32;
15255                 timestamp |= REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_TS_LSB :
15256                                     NIG_REG_P0_TLLH_PTP_BUF_TS_LSB);
15257                 /* Reset timestamp register to allow new timestamp */
15258                 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
15259                        NIG_REG_P0_TLLH_PTP_BUF_SEQID, 0x10000);
15260                 ns = timecounter_cyc2time(&bp->timecounter, timestamp);
15261
15262                 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
15263                 shhwtstamps.hwtstamp = ns_to_ktime(ns);
15264                 skb_tstamp_tx(bp->ptp_tx_skb, &shhwtstamps);
15265                 dev_kfree_skb_any(bp->ptp_tx_skb);
15266                 bp->ptp_tx_skb = NULL;
15267
15268                 DP(BNX2X_MSG_PTP, "Tx timestamp, timestamp cycles = %llu, ns = %llu\n",
15269                    timestamp, ns);
15270         } else {
15271                 DP(BNX2X_MSG_PTP, "There is no valid Tx timestamp yet\n");
15272                 /* Reschedule to keep checking for a valid timestamp value */
15273                 schedule_work(&bp->ptp_task);
15274         }
15275 }
15276
15277 void bnx2x_set_rx_ts(struct bnx2x *bp, struct sk_buff *skb)
15278 {
15279         int port = BP_PORT(bp);
15280         u64 timestamp, ns;
15281
15282         timestamp = REG_RD(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_TS_MSB :
15283                             NIG_REG_P0_LLH_PTP_HOST_BUF_TS_MSB);
15284         timestamp <<= 32;
15285         timestamp |= REG_RD(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_TS_LSB :
15286                             NIG_REG_P0_LLH_PTP_HOST_BUF_TS_LSB);
15287
15288         /* Reset timestamp register to allow new timestamp */
15289         REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_SEQID :
15290                NIG_REG_P0_LLH_PTP_HOST_BUF_SEQID, 0x10000);
15291
15292         ns = timecounter_cyc2time(&bp->timecounter, timestamp);
15293
15294         skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
15295
15296         DP(BNX2X_MSG_PTP, "Rx timestamp, timestamp cycles = %llu, ns = %llu\n",
15297            timestamp, ns);
15298 }
15299
15300 /* Read the PHC */
15301 static u64 bnx2x_cyclecounter_read(const struct cyclecounter *cc)
15302 {
15303         struct bnx2x *bp = container_of(cc, struct bnx2x, cyclecounter);
15304         int port = BP_PORT(bp);
15305         u32 wb_data[2];
15306         u64 phc_cycles;
15307
15308         REG_RD_DMAE(bp, port ? NIG_REG_TIMESYNC_GEN_REG + tsgen_synctime_t1 :
15309                     NIG_REG_TIMESYNC_GEN_REG + tsgen_synctime_t0, wb_data, 2);
15310         phc_cycles = wb_data[1];
15311         phc_cycles = (phc_cycles << 32) + wb_data[0];
15312
15313         DP(BNX2X_MSG_PTP, "PHC read cycles = %llu\n", phc_cycles);
15314
15315         return phc_cycles;
15316 }
15317
15318 static void bnx2x_init_cyclecounter(struct bnx2x *bp)
15319 {
15320         memset(&bp->cyclecounter, 0, sizeof(bp->cyclecounter));
15321         bp->cyclecounter.read = bnx2x_cyclecounter_read;
15322         bp->cyclecounter.mask = CYCLECOUNTER_MASK(64);
15323         bp->cyclecounter.shift = 0;
15324         bp->cyclecounter.mult = 1;
15325 }
15326
15327 static int bnx2x_send_reset_timesync_ramrod(struct bnx2x *bp)
15328 {
15329         struct bnx2x_func_state_params func_params = {NULL};
15330         struct bnx2x_func_set_timesync_params *set_timesync_params =
15331                 &func_params.params.set_timesync;
15332
15333         /* Prepare parameters for function state transitions */
15334         __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
15335         __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
15336
15337         func_params.f_obj = &bp->func_obj;
15338         func_params.cmd = BNX2X_F_CMD_SET_TIMESYNC;
15339
15340         /* Function parameters */
15341         set_timesync_params->drift_adjust_cmd = TS_DRIFT_ADJUST_RESET;
15342         set_timesync_params->offset_cmd = TS_OFFSET_KEEP;
15343
15344         return bnx2x_func_state_change(bp, &func_params);
15345 }
15346
15347 static int bnx2x_enable_ptp_packets(struct bnx2x *bp)
15348 {
15349         struct bnx2x_queue_state_params q_params;
15350         int rc, i;
15351
15352         /* send queue update ramrod to enable PTP packets */
15353         memset(&q_params, 0, sizeof(q_params));
15354         __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
15355         q_params.cmd = BNX2X_Q_CMD_UPDATE;
15356         __set_bit(BNX2X_Q_UPDATE_PTP_PKTS_CHNG,
15357                   &q_params.params.update.update_flags);
15358         __set_bit(BNX2X_Q_UPDATE_PTP_PKTS,
15359                   &q_params.params.update.update_flags);
15360
15361         /* send the ramrod on all the queues of the PF */
15362         for_each_eth_queue(bp, i) {
15363                 struct bnx2x_fastpath *fp = &bp->fp[i];
15364
15365                 /* Set the appropriate Queue object */
15366                 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
15367
15368                 /* Update the Queue state */
15369                 rc = bnx2x_queue_state_change(bp, &q_params);
15370                 if (rc) {
15371                         BNX2X_ERR("Failed to enable PTP packets\n");
15372                         return rc;
15373                 }
15374         }
15375
15376         return 0;
15377 }
15378
15379 int bnx2x_configure_ptp_filters(struct bnx2x *bp)
15380 {
15381         int port = BP_PORT(bp);
15382         int rc;
15383
15384         if (!bp->hwtstamp_ioctl_called)
15385                 return 0;
15386
15387         switch (bp->tx_type) {
15388         case HWTSTAMP_TX_ON:
15389                 bp->flags |= TX_TIMESTAMPING_EN;
15390                 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK :
15391                        NIG_REG_P0_TLLH_PTP_PARAM_MASK, 0x6AA);
15392                 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK :
15393                        NIG_REG_P0_TLLH_PTP_RULE_MASK, 0x3EEE);
15394                 break;
15395         case HWTSTAMP_TX_ONESTEP_SYNC:
15396                 BNX2X_ERR("One-step timestamping is not supported\n");
15397                 return -ERANGE;
15398         }
15399
15400         switch (bp->rx_filter) {
15401         case HWTSTAMP_FILTER_NONE:
15402                 break;
15403         case HWTSTAMP_FILTER_ALL:
15404         case HWTSTAMP_FILTER_SOME:
15405         case HWTSTAMP_FILTER_NTP_ALL:
15406                 bp->rx_filter = HWTSTAMP_FILTER_NONE;
15407                 break;
15408         case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
15409         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
15410         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
15411                 bp->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
15412                 /* Initialize PTP detection for UDP/IPv4 events */
15413                 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
15414                        NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7EE);
15415                 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
15416                        NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FFE);
15417                 break;
15418         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
15419         case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
15420         case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
15421                 bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
15422                 /* Initialize PTP detection for UDP/IPv4 or UDP/IPv6 events */
15423                 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
15424                        NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7EA);
15425                 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
15426                        NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FEE);
15427                 break;
15428         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
15429         case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
15430         case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
15431                 bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
15432                 /* Initialize PTP detection L2 events */
15433                 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
15434                        NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x6BF);
15435                 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
15436                        NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3EFF);
15437
15438                 break;
15439         case HWTSTAMP_FILTER_PTP_V2_EVENT:
15440         case HWTSTAMP_FILTER_PTP_V2_SYNC:
15441         case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
15442                 bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
15443                 /* Initialize PTP detection L2, UDP/IPv4 or UDP/IPv6 events */
15444                 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
15445                        NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x6AA);
15446                 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
15447                        NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3EEE);
15448                 break;
15449         }
15450
15451         /* Indicate to FW that this PF expects recorded PTP packets */
15452         rc = bnx2x_enable_ptp_packets(bp);
15453         if (rc)
15454                 return rc;
15455
15456         /* Enable sending PTP packets to host */
15457         REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST :
15458                NIG_REG_P0_LLH_PTP_TO_HOST, 0x1);
15459
15460         return 0;
15461 }
15462
15463 static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr)
15464 {
15465         struct hwtstamp_config config;
15466         int rc;
15467
15468         DP(BNX2X_MSG_PTP, "HWTSTAMP IOCTL called\n");
15469
15470         if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
15471                 return -EFAULT;
15472
15473         DP(BNX2X_MSG_PTP, "Requested tx_type: %d, requested rx_filters = %d\n",
15474            config.tx_type, config.rx_filter);
15475
15476         if (config.flags) {
15477                 BNX2X_ERR("config.flags is reserved for future use\n");
15478                 return -EINVAL;
15479         }
15480
15481         bp->hwtstamp_ioctl_called = 1;
15482         bp->tx_type = config.tx_type;
15483         bp->rx_filter = config.rx_filter;
15484
15485         rc = bnx2x_configure_ptp_filters(bp);
15486         if (rc)
15487                 return rc;
15488
15489         config.rx_filter = bp->rx_filter;
15490
15491         return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
15492                 -EFAULT : 0;
15493 }
15494
15495 /* Configures HW for PTP */
15496 static int bnx2x_configure_ptp(struct bnx2x *bp)
15497 {
15498         int rc, port = BP_PORT(bp);
15499         u32 wb_data[2];
15500
15501         /* Reset PTP event detection rules - will be configured in the IOCTL */
15502         REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
15503                NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7FF);
15504         REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
15505                NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FFF);
15506         REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK :
15507                NIG_REG_P0_TLLH_PTP_PARAM_MASK, 0x7FF);
15508         REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK :
15509                NIG_REG_P0_TLLH_PTP_RULE_MASK, 0x3FFF);
15510
15511         /* Disable PTP packets to host - will be configured in the IOCTL*/
15512         REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST :
15513                NIG_REG_P0_LLH_PTP_TO_HOST, 0x0);
15514
15515         /* Enable the PTP feature */
15516         REG_WR(bp, port ? NIG_REG_P1_PTP_EN :
15517                NIG_REG_P0_PTP_EN, 0x3F);
15518
15519         /* Enable the free-running counter */
15520         wb_data[0] = 0;
15521         wb_data[1] = 0;
15522         REG_WR_DMAE(bp, NIG_REG_TIMESYNC_GEN_REG + tsgen_ctrl, wb_data, 2);
15523
15524         /* Reset drift register (offset register is not reset) */
15525         rc = bnx2x_send_reset_timesync_ramrod(bp);
15526         if (rc) {
15527                 BNX2X_ERR("Failed to reset PHC drift register\n");
15528                 return -EFAULT;
15529         }
15530
15531         /* Reset possibly old timestamps */
15532         REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_SEQID :
15533                NIG_REG_P0_LLH_PTP_HOST_BUF_SEQID, 0x10000);
15534         REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
15535                NIG_REG_P0_TLLH_PTP_BUF_SEQID, 0x10000);
15536
15537         return 0;
15538 }
15539
15540 /* Called during load, to initialize PTP-related stuff */
15541 void bnx2x_init_ptp(struct bnx2x *bp)
15542 {
15543         int rc;
15544
15545         /* Configure PTP in HW */
15546         rc = bnx2x_configure_ptp(bp);
15547         if (rc) {
15548                 BNX2X_ERR("Stopping PTP initialization\n");
15549                 return;
15550         }
15551
15552         /* Init work queue for Tx timestamping */
15553         INIT_WORK(&bp->ptp_task, bnx2x_ptp_task);
15554
15555         /* Init cyclecounter and timecounter. This is done only in the first
15556          * load. If done in every load, PTP application will fail when doing
15557          * unload / load (e.g. MTU change) while it is running.
15558          */
15559         if (!bp->timecounter_init_done) {
15560                 bnx2x_init_cyclecounter(bp);
15561                 timecounter_init(&bp->timecounter, &bp->cyclecounter,
15562                                  ktime_to_ns(ktime_get_real()));
15563                 bp->timecounter_init_done = 1;
15564         }
15565
15566         DP(BNX2X_MSG_PTP, "PTP initialization ended successfully\n");
15567 }