]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
bnx2x: Disable multi-cos feature.
[linux.git] / drivers / net / ethernet / broadcom / bnx2x / bnx2x_cmn.c
1 /* bnx2x_cmn.c: QLogic Everest network driver.
2  *
3  * Copyright (c) 2007-2013 Broadcom Corporation
4  * Copyright (c) 2014 QLogic Corporation
5  * All rights reserved
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation.
10  *
11  * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
12  * Written by: Eliezer Tamir
13  * Based on code from Michael Chan's bnx2 driver
14  * UDP CSUM errata workaround by Arik Gendelman
15  * Slowpath and fastpath rework by Vladislav Zolotarov
16  * Statistics and Link management by Yitchak Gertner
17  *
18  */
19
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
22 #include <linux/etherdevice.h>
23 #include <linux/if_vlan.h>
24 #include <linux/interrupt.h>
25 #include <linux/ip.h>
26 #include <linux/crash_dump.h>
27 #include <net/tcp.h>
28 #include <net/ipv6.h>
29 #include <net/ip6_checksum.h>
30 #include <linux/prefetch.h>
31 #include "bnx2x_cmn.h"
32 #include "bnx2x_init.h"
33 #include "bnx2x_sp.h"
34
35 static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
36 static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
37 static int bnx2x_alloc_fp_mem(struct bnx2x *bp);
38 static int bnx2x_poll(struct napi_struct *napi, int budget);
39
40 static void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
41 {
42         int i;
43
44         /* Add NAPI objects */
45         for_each_rx_queue_cnic(bp, i) {
46                 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
47                                bnx2x_poll, NAPI_POLL_WEIGHT);
48         }
49 }
50
51 static void bnx2x_add_all_napi(struct bnx2x *bp)
52 {
53         int i;
54
55         /* Add NAPI objects */
56         for_each_eth_queue(bp, i) {
57                 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
58                                bnx2x_poll, NAPI_POLL_WEIGHT);
59         }
60 }
61
62 static int bnx2x_calc_num_queues(struct bnx2x *bp)
63 {
64         int nq = bnx2x_num_queues ? : netif_get_num_default_rss_queues();
65
66         /* Reduce memory usage in kdump environment by using only one queue */
67         if (is_kdump_kernel())
68                 nq = 1;
69
70         nq = clamp(nq, 1, BNX2X_MAX_QUEUES(bp));
71         return nq;
72 }
73
74 /**
75  * bnx2x_move_fp - move content of the fastpath structure.
76  *
77  * @bp:         driver handle
78  * @from:       source FP index
79  * @to:         destination FP index
80  *
81  * Makes sure the contents of the bp->fp[to].napi is kept
82  * intact. This is done by first copying the napi struct from
83  * the target to the source, and then mem copying the entire
84  * source onto the target. Update txdata pointers and related
85  * content.
86  */
87 static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
88 {
89         struct bnx2x_fastpath *from_fp = &bp->fp[from];
90         struct bnx2x_fastpath *to_fp = &bp->fp[to];
91         struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
92         struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
93         struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
94         struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
95         int old_max_eth_txqs, new_max_eth_txqs;
96         int old_txdata_index = 0, new_txdata_index = 0;
97         struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info;
98
99         /* Copy the NAPI object as it has been already initialized */
100         from_fp->napi = to_fp->napi;
101
102         /* Move bnx2x_fastpath contents */
103         memcpy(to_fp, from_fp, sizeof(*to_fp));
104         to_fp->index = to;
105
106         /* Retain the tpa_info of the original `to' version as we don't want
107          * 2 FPs to contain the same tpa_info pointer.
108          */
109         to_fp->tpa_info = old_tpa_info;
110
111         /* move sp_objs contents as well, as their indices match fp ones */
112         memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
113
114         /* move fp_stats contents as well, as their indices match fp ones */
115         memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
116
117         /* Update txdata pointers in fp and move txdata content accordingly:
118          * Each fp consumes 'max_cos' txdata structures, so the index should be
119          * decremented by max_cos x delta.
120          */
121
122         old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
123         new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
124                                 (bp)->max_cos;
125         if (from == FCOE_IDX(bp)) {
126                 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
127                 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
128         }
129
130         memcpy(&bp->bnx2x_txq[new_txdata_index],
131                &bp->bnx2x_txq[old_txdata_index],
132                sizeof(struct bnx2x_fp_txdata));
133         to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
134 }
135
136 /**
137  * bnx2x_fill_fw_str - Fill buffer with FW version string.
138  *
139  * @bp:        driver handle
140  * @buf:       character buffer to fill with the fw name
141  * @buf_len:   length of the above buffer
142  *
143  */
144 void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
145 {
146         if (IS_PF(bp)) {
147                 u8 phy_fw_ver[PHY_FW_VER_LEN];
148
149                 phy_fw_ver[0] = '\0';
150                 bnx2x_get_ext_phy_fw_version(&bp->link_params,
151                                              phy_fw_ver, PHY_FW_VER_LEN);
152                 strlcpy(buf, bp->fw_ver, buf_len);
153                 snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
154                          "bc %d.%d.%d%s%s",
155                          (bp->common.bc_ver & 0xff0000) >> 16,
156                          (bp->common.bc_ver & 0xff00) >> 8,
157                          (bp->common.bc_ver & 0xff),
158                          ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
159         } else {
160                 bnx2x_vf_fill_fw_str(bp, buf, buf_len);
161         }
162 }
163
164 /**
165  * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
166  *
167  * @bp: driver handle
168  * @delta:      number of eth queues which were not allocated
169  */
170 static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
171 {
172         int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
173
174         /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
175          * backward along the array could cause memory to be overridden
176          */
177         for (cos = 1; cos < bp->max_cos; cos++) {
178                 for (i = 0; i < old_eth_num - delta; i++) {
179                         struct bnx2x_fastpath *fp = &bp->fp[i];
180                         int new_idx = cos * (old_eth_num - delta) + i;
181
182                         memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
183                                sizeof(struct bnx2x_fp_txdata));
184                         fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
185                 }
186         }
187 }
188
189 int bnx2x_load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
190
191 /* free skb in the packet ring at pos idx
192  * return idx of last bd freed
193  */
194 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
195                              u16 idx, unsigned int *pkts_compl,
196                              unsigned int *bytes_compl)
197 {
198         struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
199         struct eth_tx_start_bd *tx_start_bd;
200         struct eth_tx_bd *tx_data_bd;
201         struct sk_buff *skb = tx_buf->skb;
202         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
203         int nbd;
204         u16 split_bd_len = 0;
205
206         /* prefetch skb end pointer to speedup dev_kfree_skb() */
207         prefetch(&skb->end);
208
209         DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d  buff @(%p)->skb %p\n",
210            txdata->txq_index, idx, tx_buf, skb);
211
212         tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
213
214         nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
215 #ifdef BNX2X_STOP_ON_ERROR
216         if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
217                 BNX2X_ERR("BAD nbd!\n");
218                 bnx2x_panic();
219         }
220 #endif
221         new_cons = nbd + tx_buf->first_bd;
222
223         /* Get the next bd */
224         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
225
226         /* Skip a parse bd... */
227         --nbd;
228         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
229
230         if (tx_buf->flags & BNX2X_HAS_SECOND_PBD) {
231                 /* Skip second parse bd... */
232                 --nbd;
233                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
234         }
235
236         /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
237         if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
238                 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
239                 split_bd_len = BD_UNMAP_LEN(tx_data_bd);
240                 --nbd;
241                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
242         }
243
244         /* unmap first bd */
245         dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
246                          BD_UNMAP_LEN(tx_start_bd) + split_bd_len,
247                          DMA_TO_DEVICE);
248
249         /* now free frags */
250         while (nbd > 0) {
251
252                 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
253                 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
254                                BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
255                 if (--nbd)
256                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
257         }
258
259         /* release skb */
260         WARN_ON(!skb);
261         if (likely(skb)) {
262                 (*pkts_compl)++;
263                 (*bytes_compl) += skb->len;
264                 dev_kfree_skb_any(skb);
265         }
266
267         tx_buf->first_bd = 0;
268         tx_buf->skb = NULL;
269
270         return new_cons;
271 }
272
273 int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
274 {
275         struct netdev_queue *txq;
276         u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
277         unsigned int pkts_compl = 0, bytes_compl = 0;
278
279 #ifdef BNX2X_STOP_ON_ERROR
280         if (unlikely(bp->panic))
281                 return -1;
282 #endif
283
284         txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
285         hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
286         sw_cons = txdata->tx_pkt_cons;
287
288         /* Ensure subsequent loads occur after hw_cons */
289         smp_rmb();
290
291         while (sw_cons != hw_cons) {
292                 u16 pkt_cons;
293
294                 pkt_cons = TX_BD(sw_cons);
295
296                 DP(NETIF_MSG_TX_DONE,
297                    "queue[%d]: hw_cons %u  sw_cons %u  pkt_cons %u\n",
298                    txdata->txq_index, hw_cons, sw_cons, pkt_cons);
299
300                 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
301                                             &pkts_compl, &bytes_compl);
302
303                 sw_cons++;
304         }
305
306         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
307
308         txdata->tx_pkt_cons = sw_cons;
309         txdata->tx_bd_cons = bd_cons;
310
311         /* Need to make the tx_bd_cons update visible to start_xmit()
312          * before checking for netif_tx_queue_stopped().  Without the
313          * memory barrier, there is a small possibility that
314          * start_xmit() will miss it and cause the queue to be stopped
315          * forever.
316          * On the other hand we need an rmb() here to ensure the proper
317          * ordering of bit testing in the following
318          * netif_tx_queue_stopped(txq) call.
319          */
320         smp_mb();
321
322         if (unlikely(netif_tx_queue_stopped(txq))) {
323                 /* Taking tx_lock() is needed to prevent re-enabling the queue
324                  * while it's empty. This could have happen if rx_action() gets
325                  * suspended in bnx2x_tx_int() after the condition before
326                  * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
327                  *
328                  * stops the queue->sees fresh tx_bd_cons->releases the queue->
329                  * sends some packets consuming the whole queue again->
330                  * stops the queue
331                  */
332
333                 __netif_tx_lock(txq, smp_processor_id());
334
335                 if ((netif_tx_queue_stopped(txq)) &&
336                     (bp->state == BNX2X_STATE_OPEN) &&
337                     (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
338                         netif_tx_wake_queue(txq);
339
340                 __netif_tx_unlock(txq);
341         }
342         return 0;
343 }
344
345 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
346                                              u16 idx)
347 {
348         u16 last_max = fp->last_max_sge;
349
350         if (SUB_S16(idx, last_max) > 0)
351                 fp->last_max_sge = idx;
352 }
353
354 static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
355                                          u16 sge_len,
356                                          struct eth_end_agg_rx_cqe *cqe)
357 {
358         struct bnx2x *bp = fp->bp;
359         u16 last_max, last_elem, first_elem;
360         u16 delta = 0;
361         u16 i;
362
363         if (!sge_len)
364                 return;
365
366         /* First mark all used pages */
367         for (i = 0; i < sge_len; i++)
368                 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
369                         RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
370
371         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
372            sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
373
374         /* Here we assume that the last SGE index is the biggest */
375         prefetch((void *)(fp->sge_mask));
376         bnx2x_update_last_max_sge(fp,
377                 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
378
379         last_max = RX_SGE(fp->last_max_sge);
380         last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
381         first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
382
383         /* If ring is not full */
384         if (last_elem + 1 != first_elem)
385                 last_elem++;
386
387         /* Now update the prod */
388         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
389                 if (likely(fp->sge_mask[i]))
390                         break;
391
392                 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
393                 delta += BIT_VEC64_ELEM_SZ;
394         }
395
396         if (delta > 0) {
397                 fp->rx_sge_prod += delta;
398                 /* clear page-end entries */
399                 bnx2x_clear_sge_mask_next_elems(fp);
400         }
401
402         DP(NETIF_MSG_RX_STATUS,
403            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
404            fp->last_max_sge, fp->rx_sge_prod);
405 }
406
407 /* Get Toeplitz hash value in the skb using the value from the
408  * CQE (calculated by HW).
409  */
410 static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
411                             const struct eth_fast_path_rx_cqe *cqe,
412                             enum pkt_hash_types *rxhash_type)
413 {
414         /* Get Toeplitz hash from CQE */
415         if ((bp->dev->features & NETIF_F_RXHASH) &&
416             (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
417                 enum eth_rss_hash_type htype;
418
419                 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
420                 *rxhash_type = ((htype == TCP_IPV4_HASH_TYPE) ||
421                                 (htype == TCP_IPV6_HASH_TYPE)) ?
422                                PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3;
423
424                 return le32_to_cpu(cqe->rss_hash_result);
425         }
426         *rxhash_type = PKT_HASH_TYPE_NONE;
427         return 0;
428 }
429
430 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
431                             u16 cons, u16 prod,
432                             struct eth_fast_path_rx_cqe *cqe)
433 {
434         struct bnx2x *bp = fp->bp;
435         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
436         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
437         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
438         dma_addr_t mapping;
439         struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
440         struct sw_rx_bd *first_buf = &tpa_info->first_buf;
441
442         /* print error if current state != stop */
443         if (tpa_info->tpa_state != BNX2X_TPA_STOP)
444                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
445
446         /* Try to map an empty data buffer from the aggregation info  */
447         mapping = dma_map_single(&bp->pdev->dev,
448                                  first_buf->data + NET_SKB_PAD,
449                                  fp->rx_buf_size, DMA_FROM_DEVICE);
450         /*
451          *  ...if it fails - move the skb from the consumer to the producer
452          *  and set the current aggregation state as ERROR to drop it
453          *  when TPA_STOP arrives.
454          */
455
456         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
457                 /* Move the BD from the consumer to the producer */
458                 bnx2x_reuse_rx_data(fp, cons, prod);
459                 tpa_info->tpa_state = BNX2X_TPA_ERROR;
460                 return;
461         }
462
463         /* move empty data from pool to prod */
464         prod_rx_buf->data = first_buf->data;
465         dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
466         /* point prod_bd to new data */
467         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
468         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
469
470         /* move partial skb from cons to pool (don't unmap yet) */
471         *first_buf = *cons_rx_buf;
472
473         /* mark bin state as START */
474         tpa_info->parsing_flags =
475                 le16_to_cpu(cqe->pars_flags.flags);
476         tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
477         tpa_info->tpa_state = BNX2X_TPA_START;
478         tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
479         tpa_info->placement_offset = cqe->placement_offset;
480         tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->rxhash_type);
481         if (fp->mode == TPA_MODE_GRO) {
482                 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
483                 tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
484                 tpa_info->gro_size = gro_size;
485         }
486
487 #ifdef BNX2X_STOP_ON_ERROR
488         fp->tpa_queue_used |= (1 << queue);
489         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
490            fp->tpa_queue_used);
491 #endif
492 }
493
494 /* Timestamp option length allowed for TPA aggregation:
495  *
496  *              nop nop kind length echo val
497  */
498 #define TPA_TSTAMP_OPT_LEN      12
499 /**
500  * bnx2x_set_gro_params - compute GRO values
501  *
502  * @skb:                packet skb
503  * @parsing_flags:      parsing flags from the START CQE
504  * @len_on_bd:          total length of the first packet for the
505  *                      aggregation.
506  * @pkt_len:            length of all segments
507  *
508  * Approximate value of the MSS for this aggregation calculated using
509  * the first packet of it.
510  * Compute number of aggregated segments, and gso_type.
511  */
512 static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
513                                  u16 len_on_bd, unsigned int pkt_len,
514                                  u16 num_of_coalesced_segs)
515 {
516         /* TPA aggregation won't have either IP options or TCP options
517          * other than timestamp or IPv6 extension headers.
518          */
519         u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
520
521         if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
522             PRS_FLAG_OVERETH_IPV6) {
523                 hdrs_len += sizeof(struct ipv6hdr);
524                 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
525         } else {
526                 hdrs_len += sizeof(struct iphdr);
527                 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
528         }
529
530         /* Check if there was a TCP timestamp, if there is it's will
531          * always be 12 bytes length: nop nop kind length echo val.
532          *
533          * Otherwise FW would close the aggregation.
534          */
535         if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
536                 hdrs_len += TPA_TSTAMP_OPT_LEN;
537
538         skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
539
540         /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
541          * to skb_shinfo(skb)->gso_segs
542          */
543         NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
544 }
545
546 static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
547                               u16 index, gfp_t gfp_mask)
548 {
549         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
550         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
551         struct bnx2x_alloc_pool *pool = &fp->page_pool;
552         dma_addr_t mapping;
553
554         if (!pool->page) {
555                 pool->page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
556                 if (unlikely(!pool->page))
557                         return -ENOMEM;
558
559                 pool->offset = 0;
560         }
561
562         mapping = dma_map_page(&bp->pdev->dev, pool->page,
563                                pool->offset, SGE_PAGE_SIZE, DMA_FROM_DEVICE);
564         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
565                 BNX2X_ERR("Can't map sge\n");
566                 return -ENOMEM;
567         }
568
569         sw_buf->page = pool->page;
570         sw_buf->offset = pool->offset;
571
572         dma_unmap_addr_set(sw_buf, mapping, mapping);
573
574         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
575         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
576
577         pool->offset += SGE_PAGE_SIZE;
578         if (PAGE_SIZE - pool->offset >= SGE_PAGE_SIZE)
579                 get_page(pool->page);
580         else
581                 pool->page = NULL;
582         return 0;
583 }
584
585 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
586                                struct bnx2x_agg_info *tpa_info,
587                                u16 pages,
588                                struct sk_buff *skb,
589                                struct eth_end_agg_rx_cqe *cqe,
590                                u16 cqe_idx)
591 {
592         struct sw_rx_page *rx_pg, old_rx_pg;
593         u32 i, frag_len, frag_size;
594         int err, j, frag_id = 0;
595         u16 len_on_bd = tpa_info->len_on_bd;
596         u16 full_page = 0, gro_size = 0;
597
598         frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
599
600         if (fp->mode == TPA_MODE_GRO) {
601                 gro_size = tpa_info->gro_size;
602                 full_page = tpa_info->full_page;
603         }
604
605         /* This is needed in order to enable forwarding support */
606         if (frag_size)
607                 bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
608                                      le16_to_cpu(cqe->pkt_len),
609                                      le16_to_cpu(cqe->num_of_coalesced_segs));
610
611 #ifdef BNX2X_STOP_ON_ERROR
612         if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
613                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
614                           pages, cqe_idx);
615                 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
616                 bnx2x_panic();
617                 return -EINVAL;
618         }
619 #endif
620
621         /* Run through the SGL and compose the fragmented skb */
622         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
623                 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
624
625                 /* FW gives the indices of the SGE as if the ring is an array
626                    (meaning that "next" element will consume 2 indices) */
627                 if (fp->mode == TPA_MODE_GRO)
628                         frag_len = min_t(u32, frag_size, (u32)full_page);
629                 else /* LRO */
630                         frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
631
632                 rx_pg = &fp->rx_page_ring[sge_idx];
633                 old_rx_pg = *rx_pg;
634
635                 /* If we fail to allocate a substitute page, we simply stop
636                    where we are and drop the whole packet */
637                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC);
638                 if (unlikely(err)) {
639                         bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
640                         return err;
641                 }
642
643                 dma_unmap_page(&bp->pdev->dev,
644                                dma_unmap_addr(&old_rx_pg, mapping),
645                                SGE_PAGE_SIZE, DMA_FROM_DEVICE);
646                 /* Add one frag and update the appropriate fields in the skb */
647                 if (fp->mode == TPA_MODE_LRO)
648                         skb_fill_page_desc(skb, j, old_rx_pg.page,
649                                            old_rx_pg.offset, frag_len);
650                 else { /* GRO */
651                         int rem;
652                         int offset = 0;
653                         for (rem = frag_len; rem > 0; rem -= gro_size) {
654                                 int len = rem > gro_size ? gro_size : rem;
655                                 skb_fill_page_desc(skb, frag_id++,
656                                                    old_rx_pg.page,
657                                                    old_rx_pg.offset + offset,
658                                                    len);
659                                 if (offset)
660                                         get_page(old_rx_pg.page);
661                                 offset += len;
662                         }
663                 }
664
665                 skb->data_len += frag_len;
666                 skb->truesize += SGE_PAGES;
667                 skb->len += frag_len;
668
669                 frag_size -= frag_len;
670         }
671
672         return 0;
673 }
674
675 static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
676 {
677         if (fp->rx_frag_size)
678                 skb_free_frag(data);
679         else
680                 kfree(data);
681 }
682
683 static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask)
684 {
685         if (fp->rx_frag_size) {
686                 /* GFP_KERNEL allocations are used only during initialization */
687                 if (unlikely(gfpflags_allow_blocking(gfp_mask)))
688                         return (void *)__get_free_page(gfp_mask);
689
690                 return napi_alloc_frag(fp->rx_frag_size);
691         }
692
693         return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask);
694 }
695
696 #ifdef CONFIG_INET
697 static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
698 {
699         const struct iphdr *iph = ip_hdr(skb);
700         struct tcphdr *th;
701
702         skb_set_transport_header(skb, sizeof(struct iphdr));
703         th = tcp_hdr(skb);
704
705         th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
706                                   iph->saddr, iph->daddr, 0);
707 }
708
709 static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
710 {
711         struct ipv6hdr *iph = ipv6_hdr(skb);
712         struct tcphdr *th;
713
714         skb_set_transport_header(skb, sizeof(struct ipv6hdr));
715         th = tcp_hdr(skb);
716
717         th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
718                                   &iph->saddr, &iph->daddr, 0);
719 }
720
721 static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
722                             void (*gro_func)(struct bnx2x*, struct sk_buff*))
723 {
724         skb_reset_network_header(skb);
725         gro_func(bp, skb);
726         tcp_gro_complete(skb);
727 }
728 #endif
729
730 static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
731                                struct sk_buff *skb)
732 {
733 #ifdef CONFIG_INET
734         if (skb_shinfo(skb)->gso_size) {
735                 switch (be16_to_cpu(skb->protocol)) {
736                 case ETH_P_IP:
737                         bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
738                         break;
739                 case ETH_P_IPV6:
740                         bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
741                         break;
742                 default:
743                         netdev_WARN_ONCE(bp->dev,
744                                          "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
745                                          be16_to_cpu(skb->protocol));
746                 }
747         }
748 #endif
749         skb_record_rx_queue(skb, fp->rx_queue);
750         napi_gro_receive(&fp->napi, skb);
751 }
752
753 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
754                            struct bnx2x_agg_info *tpa_info,
755                            u16 pages,
756                            struct eth_end_agg_rx_cqe *cqe,
757                            u16 cqe_idx)
758 {
759         struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
760         u8 pad = tpa_info->placement_offset;
761         u16 len = tpa_info->len_on_bd;
762         struct sk_buff *skb = NULL;
763         u8 *new_data, *data = rx_buf->data;
764         u8 old_tpa_state = tpa_info->tpa_state;
765
766         tpa_info->tpa_state = BNX2X_TPA_STOP;
767
768         /* If we there was an error during the handling of the TPA_START -
769          * drop this aggregation.
770          */
771         if (old_tpa_state == BNX2X_TPA_ERROR)
772                 goto drop;
773
774         /* Try to allocate the new data */
775         new_data = bnx2x_frag_alloc(fp, GFP_ATOMIC);
776         /* Unmap skb in the pool anyway, as we are going to change
777            pool entry status to BNX2X_TPA_STOP even if new skb allocation
778            fails. */
779         dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
780                          fp->rx_buf_size, DMA_FROM_DEVICE);
781         if (likely(new_data))
782                 skb = build_skb(data, fp->rx_frag_size);
783
784         if (likely(skb)) {
785 #ifdef BNX2X_STOP_ON_ERROR
786                 if (pad + len > fp->rx_buf_size) {
787                         BNX2X_ERR("skb_put is about to fail...  pad %d  len %d  rx_buf_size %d\n",
788                                   pad, len, fp->rx_buf_size);
789                         bnx2x_panic();
790                         return;
791                 }
792 #endif
793
794                 skb_reserve(skb, pad + NET_SKB_PAD);
795                 skb_put(skb, len);
796                 skb_set_hash(skb, tpa_info->rxhash, tpa_info->rxhash_type);
797
798                 skb->protocol = eth_type_trans(skb, bp->dev);
799                 skb->ip_summed = CHECKSUM_UNNECESSARY;
800
801                 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
802                                          skb, cqe, cqe_idx)) {
803                         if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
804                                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
805                         bnx2x_gro_receive(bp, fp, skb);
806                 } else {
807                         DP(NETIF_MSG_RX_STATUS,
808                            "Failed to allocate new pages - dropping packet!\n");
809                         dev_kfree_skb_any(skb);
810                 }
811
812                 /* put new data in bin */
813                 rx_buf->data = new_data;
814
815                 return;
816         }
817         if (new_data)
818                 bnx2x_frag_free(fp, new_data);
819 drop:
820         /* drop the packet and keep the buffer in the bin */
821         DP(NETIF_MSG_RX_STATUS,
822            "Failed to allocate or map a new skb - dropping packet!\n");
823         bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
824 }
825
826 static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp,
827                                u16 index, gfp_t gfp_mask)
828 {
829         u8 *data;
830         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
831         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
832         dma_addr_t mapping;
833
834         data = bnx2x_frag_alloc(fp, gfp_mask);
835         if (unlikely(data == NULL))
836                 return -ENOMEM;
837
838         mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
839                                  fp->rx_buf_size,
840                                  DMA_FROM_DEVICE);
841         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
842                 bnx2x_frag_free(fp, data);
843                 BNX2X_ERR("Can't map rx data\n");
844                 return -ENOMEM;
845         }
846
847         rx_buf->data = data;
848         dma_unmap_addr_set(rx_buf, mapping, mapping);
849
850         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
851         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
852
853         return 0;
854 }
855
856 static
857 void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
858                                  struct bnx2x_fastpath *fp,
859                                  struct bnx2x_eth_q_stats *qstats)
860 {
861         /* Do nothing if no L4 csum validation was done.
862          * We do not check whether IP csum was validated. For IPv4 we assume
863          * that if the card got as far as validating the L4 csum, it also
864          * validated the IP csum. IPv6 has no IP csum.
865          */
866         if (cqe->fast_path_cqe.status_flags &
867             ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
868                 return;
869
870         /* If L4 validation was done, check if an error was found. */
871
872         if (cqe->fast_path_cqe.type_error_flags &
873             (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
874              ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
875                 qstats->hw_csum_err++;
876         else
877                 skb->ip_summed = CHECKSUM_UNNECESSARY;
878 }
879
880 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
881 {
882         struct bnx2x *bp = fp->bp;
883         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
884         u16 sw_comp_cons, sw_comp_prod;
885         int rx_pkt = 0;
886         union eth_rx_cqe *cqe;
887         struct eth_fast_path_rx_cqe *cqe_fp;
888
889 #ifdef BNX2X_STOP_ON_ERROR
890         if (unlikely(bp->panic))
891                 return 0;
892 #endif
893         if (budget <= 0)
894                 return rx_pkt;
895
896         bd_cons = fp->rx_bd_cons;
897         bd_prod = fp->rx_bd_prod;
898         bd_prod_fw = bd_prod;
899         sw_comp_cons = fp->rx_comp_cons;
900         sw_comp_prod = fp->rx_comp_prod;
901
902         comp_ring_cons = RCQ_BD(sw_comp_cons);
903         cqe = &fp->rx_comp_ring[comp_ring_cons];
904         cqe_fp = &cqe->fast_path_cqe;
905
906         DP(NETIF_MSG_RX_STATUS,
907            "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons);
908
909         while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) {
910                 struct sw_rx_bd *rx_buf = NULL;
911                 struct sk_buff *skb;
912                 u8 cqe_fp_flags;
913                 enum eth_rx_cqe_type cqe_fp_type;
914                 u16 len, pad, queue;
915                 u8 *data;
916                 u32 rxhash;
917                 enum pkt_hash_types rxhash_type;
918
919 #ifdef BNX2X_STOP_ON_ERROR
920                 if (unlikely(bp->panic))
921                         return 0;
922 #endif
923
924                 bd_prod = RX_BD(bd_prod);
925                 bd_cons = RX_BD(bd_cons);
926
927                 /* A rmb() is required to ensure that the CQE is not read
928                  * before it is written by the adapter DMA.  PCI ordering
929                  * rules will make sure the other fields are written before
930                  * the marker at the end of struct eth_fast_path_rx_cqe
931                  * but without rmb() a weakly ordered processor can process
932                  * stale data.  Without the barrier TPA state-machine might
933                  * enter inconsistent state and kernel stack might be
934                  * provided with incorrect packet description - these lead
935                  * to various kernel crashed.
936                  */
937                 rmb();
938
939                 cqe_fp_flags = cqe_fp->type_error_flags;
940                 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
941
942                 DP(NETIF_MSG_RX_STATUS,
943                    "CQE type %x  err %x  status %x  queue %x  vlan %x  len %u\n",
944                    CQE_TYPE(cqe_fp_flags),
945                    cqe_fp_flags, cqe_fp->status_flags,
946                    le32_to_cpu(cqe_fp->rss_hash_result),
947                    le16_to_cpu(cqe_fp->vlan_tag),
948                    le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
949
950                 /* is this a slowpath msg? */
951                 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
952                         bnx2x_sp_event(fp, cqe);
953                         goto next_cqe;
954                 }
955
956                 rx_buf = &fp->rx_buf_ring[bd_cons];
957                 data = rx_buf->data;
958
959                 if (!CQE_TYPE_FAST(cqe_fp_type)) {
960                         struct bnx2x_agg_info *tpa_info;
961                         u16 frag_size, pages;
962 #ifdef BNX2X_STOP_ON_ERROR
963                         /* sanity check */
964                         if (fp->mode == TPA_MODE_DISABLED &&
965                             (CQE_TYPE_START(cqe_fp_type) ||
966                              CQE_TYPE_STOP(cqe_fp_type)))
967                                 BNX2X_ERR("START/STOP packet while TPA disabled, type %x\n",
968                                           CQE_TYPE(cqe_fp_type));
969 #endif
970
971                         if (CQE_TYPE_START(cqe_fp_type)) {
972                                 u16 queue = cqe_fp->queue_index;
973                                 DP(NETIF_MSG_RX_STATUS,
974                                    "calling tpa_start on queue %d\n",
975                                    queue);
976
977                                 bnx2x_tpa_start(fp, queue,
978                                                 bd_cons, bd_prod,
979                                                 cqe_fp);
980
981                                 goto next_rx;
982                         }
983                         queue = cqe->end_agg_cqe.queue_index;
984                         tpa_info = &fp->tpa_info[queue];
985                         DP(NETIF_MSG_RX_STATUS,
986                            "calling tpa_stop on queue %d\n",
987                            queue);
988
989                         frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
990                                     tpa_info->len_on_bd;
991
992                         if (fp->mode == TPA_MODE_GRO)
993                                 pages = (frag_size + tpa_info->full_page - 1) /
994                                          tpa_info->full_page;
995                         else
996                                 pages = SGE_PAGE_ALIGN(frag_size) >>
997                                         SGE_PAGE_SHIFT;
998
999                         bnx2x_tpa_stop(bp, fp, tpa_info, pages,
1000                                        &cqe->end_agg_cqe, comp_ring_cons);
1001 #ifdef BNX2X_STOP_ON_ERROR
1002                         if (bp->panic)
1003                                 return 0;
1004 #endif
1005
1006                         bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
1007                         goto next_cqe;
1008                 }
1009                 /* non TPA */
1010                 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
1011                 pad = cqe_fp->placement_offset;
1012                 dma_sync_single_for_cpu(&bp->pdev->dev,
1013                                         dma_unmap_addr(rx_buf, mapping),
1014                                         pad + RX_COPY_THRESH,
1015                                         DMA_FROM_DEVICE);
1016                 pad += NET_SKB_PAD;
1017                 prefetch(data + pad); /* speedup eth_type_trans() */
1018                 /* is this an error packet? */
1019                 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1020                         DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1021                            "ERROR  flags %x  rx packet %u\n",
1022                            cqe_fp_flags, sw_comp_cons);
1023                         bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
1024                         goto reuse_rx;
1025                 }
1026
1027                 /* Since we don't have a jumbo ring
1028                  * copy small packets if mtu > 1500
1029                  */
1030                 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1031                     (len <= RX_COPY_THRESH)) {
1032                         skb = napi_alloc_skb(&fp->napi, len);
1033                         if (skb == NULL) {
1034                                 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1035                                    "ERROR  packet dropped because of alloc failure\n");
1036                                 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
1037                                 goto reuse_rx;
1038                         }
1039                         memcpy(skb->data, data + pad, len);
1040                         bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1041                 } else {
1042                         if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod,
1043                                                        GFP_ATOMIC) == 0)) {
1044                                 dma_unmap_single(&bp->pdev->dev,
1045                                                  dma_unmap_addr(rx_buf, mapping),
1046                                                  fp->rx_buf_size,
1047                                                  DMA_FROM_DEVICE);
1048                                 skb = build_skb(data, fp->rx_frag_size);
1049                                 if (unlikely(!skb)) {
1050                                         bnx2x_frag_free(fp, data);
1051                                         bnx2x_fp_qstats(bp, fp)->
1052                                                         rx_skb_alloc_failed++;
1053                                         goto next_rx;
1054                                 }
1055                                 skb_reserve(skb, pad);
1056                         } else {
1057                                 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1058                                    "ERROR  packet dropped because of alloc failure\n");
1059                                 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
1060 reuse_rx:
1061                                 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1062                                 goto next_rx;
1063                         }
1064                 }
1065
1066                 skb_put(skb, len);
1067                 skb->protocol = eth_type_trans(skb, bp->dev);
1068
1069                 /* Set Toeplitz hash for a none-LRO skb */
1070                 rxhash = bnx2x_get_rxhash(bp, cqe_fp, &rxhash_type);
1071                 skb_set_hash(skb, rxhash, rxhash_type);
1072
1073                 skb_checksum_none_assert(skb);
1074
1075                 if (bp->dev->features & NETIF_F_RXCSUM)
1076                         bnx2x_csum_validate(skb, cqe, fp,
1077                                             bnx2x_fp_qstats(bp, fp));
1078
1079                 skb_record_rx_queue(skb, fp->rx_queue);
1080
1081                 /* Check if this packet was timestamped */
1082                 if (unlikely(cqe->fast_path_cqe.type_error_flags &
1083                              (1 << ETH_FAST_PATH_RX_CQE_PTP_PKT_SHIFT)))
1084                         bnx2x_set_rx_ts(bp, skb);
1085
1086                 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
1087                     PARSING_FLAGS_VLAN)
1088                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1089                                                le16_to_cpu(cqe_fp->vlan_tag));
1090
1091                 napi_gro_receive(&fp->napi, skb);
1092 next_rx:
1093                 rx_buf->data = NULL;
1094
1095                 bd_cons = NEXT_RX_IDX(bd_cons);
1096                 bd_prod = NEXT_RX_IDX(bd_prod);
1097                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1098                 rx_pkt++;
1099 next_cqe:
1100                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1101                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1102
1103                 /* mark CQE as free */
1104                 BNX2X_SEED_CQE(cqe_fp);
1105
1106                 if (rx_pkt == budget)
1107                         break;
1108
1109                 comp_ring_cons = RCQ_BD(sw_comp_cons);
1110                 cqe = &fp->rx_comp_ring[comp_ring_cons];
1111                 cqe_fp = &cqe->fast_path_cqe;
1112         } /* while */
1113
1114         fp->rx_bd_cons = bd_cons;
1115         fp->rx_bd_prod = bd_prod_fw;
1116         fp->rx_comp_cons = sw_comp_cons;
1117         fp->rx_comp_prod = sw_comp_prod;
1118
1119         /* Update producers */
1120         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1121                              fp->rx_sge_prod);
1122
1123         return rx_pkt;
1124 }
1125
1126 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1127 {
1128         struct bnx2x_fastpath *fp = fp_cookie;
1129         struct bnx2x *bp = fp->bp;
1130         u8 cos;
1131
1132         DP(NETIF_MSG_INTR,
1133            "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
1134            fp->index, fp->fw_sb_id, fp->igu_sb_id);
1135
1136         bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1137
1138 #ifdef BNX2X_STOP_ON_ERROR
1139         if (unlikely(bp->panic))
1140                 return IRQ_HANDLED;
1141 #endif
1142
1143         /* Handle Rx and Tx according to MSI-X vector */
1144         for_each_cos_in_tx_queue(fp, cos)
1145                 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
1146
1147         prefetch(&fp->sb_running_index[SM_RX_ID]);
1148         napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi));
1149
1150         return IRQ_HANDLED;
1151 }
1152
1153 /* HW Lock for shared dual port PHYs */
1154 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1155 {
1156         mutex_lock(&bp->port.phy_mutex);
1157
1158         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1159 }
1160
1161 void bnx2x_release_phy_lock(struct bnx2x *bp)
1162 {
1163         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1164
1165         mutex_unlock(&bp->port.phy_mutex);
1166 }
1167
1168 /* calculates MF speed according to current linespeed and MF configuration */
1169 u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1170 {
1171         u16 line_speed = bp->link_vars.line_speed;
1172         if (IS_MF(bp)) {
1173                 u16 maxCfg = bnx2x_extract_max_cfg(bp,
1174                                                    bp->mf_config[BP_VN(bp)]);
1175
1176                 /* Calculate the current MAX line speed limit for the MF
1177                  * devices
1178                  */
1179                 if (IS_MF_PERCENT_BW(bp))
1180                         line_speed = (line_speed * maxCfg) / 100;
1181                 else { /* SD mode */
1182                         u16 vn_max_rate = maxCfg * 100;
1183
1184                         if (vn_max_rate < line_speed)
1185                                 line_speed = vn_max_rate;
1186                 }
1187         }
1188
1189         return line_speed;
1190 }
1191
1192 /**
1193  * bnx2x_fill_report_data - fill link report data to report
1194  *
1195  * @bp:         driver handle
1196  * @data:       link state to update
1197  *
1198  * It uses a none-atomic bit operations because is called under the mutex.
1199  */
1200 static void bnx2x_fill_report_data(struct bnx2x *bp,
1201                                    struct bnx2x_link_report_data *data)
1202 {
1203         memset(data, 0, sizeof(*data));
1204
1205         if (IS_PF(bp)) {
1206                 /* Fill the report data: effective line speed */
1207                 data->line_speed = bnx2x_get_mf_speed(bp);
1208
1209                 /* Link is down */
1210                 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1211                         __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1212                                   &data->link_report_flags);
1213
1214                 if (!BNX2X_NUM_ETH_QUEUES(bp))
1215                         __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1216                                   &data->link_report_flags);
1217
1218                 /* Full DUPLEX */
1219                 if (bp->link_vars.duplex == DUPLEX_FULL)
1220                         __set_bit(BNX2X_LINK_REPORT_FD,
1221                                   &data->link_report_flags);
1222
1223                 /* Rx Flow Control is ON */
1224                 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1225                         __set_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1226                                   &data->link_report_flags);
1227
1228                 /* Tx Flow Control is ON */
1229                 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1230                         __set_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1231                                   &data->link_report_flags);
1232         } else { /* VF */
1233                 *data = bp->vf_link_vars;
1234         }
1235 }
1236
1237 /**
1238  * bnx2x_link_report - report link status to OS.
1239  *
1240  * @bp:         driver handle
1241  *
1242  * Calls the __bnx2x_link_report() under the same locking scheme
1243  * as a link/PHY state managing code to ensure a consistent link
1244  * reporting.
1245  */
1246
1247 void bnx2x_link_report(struct bnx2x *bp)
1248 {
1249         bnx2x_acquire_phy_lock(bp);
1250         __bnx2x_link_report(bp);
1251         bnx2x_release_phy_lock(bp);
1252 }
1253
1254 /**
1255  * __bnx2x_link_report - report link status to OS.
1256  *
1257  * @bp:         driver handle
1258  *
1259  * None atomic implementation.
1260  * Should be called under the phy_lock.
1261  */
1262 void __bnx2x_link_report(struct bnx2x *bp)
1263 {
1264         struct bnx2x_link_report_data cur_data;
1265
1266         if (bp->force_link_down) {
1267                 bp->link_vars.link_up = 0;
1268                 return;
1269         }
1270
1271         /* reread mf_cfg */
1272         if (IS_PF(bp) && !CHIP_IS_E1(bp))
1273                 bnx2x_read_mf_cfg(bp);
1274
1275         /* Read the current link report info */
1276         bnx2x_fill_report_data(bp, &cur_data);
1277
1278         /* Don't report link down or exactly the same link status twice */
1279         if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1280             (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1281                       &bp->last_reported_link.link_report_flags) &&
1282              test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1283                       &cur_data.link_report_flags)))
1284                 return;
1285
1286         bp->link_cnt++;
1287
1288         /* We are going to report a new link parameters now -
1289          * remember the current data for the next time.
1290          */
1291         memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1292
1293         /* propagate status to VFs */
1294         if (IS_PF(bp))
1295                 bnx2x_iov_link_update(bp);
1296
1297         if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1298                      &cur_data.link_report_flags)) {
1299                 netif_carrier_off(bp->dev);
1300                 netdev_err(bp->dev, "NIC Link is Down\n");
1301                 return;
1302         } else {
1303                 const char *duplex;
1304                 const char *flow;
1305
1306                 netif_carrier_on(bp->dev);
1307
1308                 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1309                                        &cur_data.link_report_flags))
1310                         duplex = "full";
1311                 else
1312                         duplex = "half";
1313
1314                 /* Handle the FC at the end so that only these flags would be
1315                  * possibly set. This way we may easily check if there is no FC
1316                  * enabled.
1317                  */
1318                 if (cur_data.link_report_flags) {
1319                         if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1320                                      &cur_data.link_report_flags)) {
1321                                 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1322                                      &cur_data.link_report_flags))
1323                                         flow = "ON - receive & transmit";
1324                                 else
1325                                         flow = "ON - receive";
1326                         } else {
1327                                 flow = "ON - transmit";
1328                         }
1329                 } else {
1330                         flow = "none";
1331                 }
1332                 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1333                             cur_data.line_speed, duplex, flow);
1334         }
1335 }
1336
1337 static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1338 {
1339         int i;
1340
1341         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1342                 struct eth_rx_sge *sge;
1343
1344                 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1345                 sge->addr_hi =
1346                         cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1347                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1348
1349                 sge->addr_lo =
1350                         cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1351                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1352         }
1353 }
1354
1355 static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1356                                 struct bnx2x_fastpath *fp, int last)
1357 {
1358         int i;
1359
1360         for (i = 0; i < last; i++) {
1361                 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1362                 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1363                 u8 *data = first_buf->data;
1364
1365                 if (data == NULL) {
1366                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1367                         continue;
1368                 }
1369                 if (tpa_info->tpa_state == BNX2X_TPA_START)
1370                         dma_unmap_single(&bp->pdev->dev,
1371                                          dma_unmap_addr(first_buf, mapping),
1372                                          fp->rx_buf_size, DMA_FROM_DEVICE);
1373                 bnx2x_frag_free(fp, data);
1374                 first_buf->data = NULL;
1375         }
1376 }
1377
1378 void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1379 {
1380         int j;
1381
1382         for_each_rx_queue_cnic(bp, j) {
1383                 struct bnx2x_fastpath *fp = &bp->fp[j];
1384
1385                 fp->rx_bd_cons = 0;
1386
1387                 /* Activate BD ring */
1388                 /* Warning!
1389                  * this will generate an interrupt (to the TSTORM)
1390                  * must only be done after chip is initialized
1391                  */
1392                 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1393                                      fp->rx_sge_prod);
1394         }
1395 }
1396
1397 void bnx2x_init_rx_rings(struct bnx2x *bp)
1398 {
1399         int func = BP_FUNC(bp);
1400         u16 ring_prod;
1401         int i, j;
1402
1403         /* Allocate TPA resources */
1404         for_each_eth_queue(bp, j) {
1405                 struct bnx2x_fastpath *fp = &bp->fp[j];
1406
1407                 DP(NETIF_MSG_IFUP,
1408                    "mtu %d  rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1409
1410                 if (fp->mode != TPA_MODE_DISABLED) {
1411                         /* Fill the per-aggregation pool */
1412                         for (i = 0; i < MAX_AGG_QS(bp); i++) {
1413                                 struct bnx2x_agg_info *tpa_info =
1414                                         &fp->tpa_info[i];
1415                                 struct sw_rx_bd *first_buf =
1416                                         &tpa_info->first_buf;
1417
1418                                 first_buf->data =
1419                                         bnx2x_frag_alloc(fp, GFP_KERNEL);
1420                                 if (!first_buf->data) {
1421                                         BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1422                                                   j);
1423                                         bnx2x_free_tpa_pool(bp, fp, i);
1424                                         fp->mode = TPA_MODE_DISABLED;
1425                                         break;
1426                                 }
1427                                 dma_unmap_addr_set(first_buf, mapping, 0);
1428                                 tpa_info->tpa_state = BNX2X_TPA_STOP;
1429                         }
1430
1431                         /* "next page" elements initialization */
1432                         bnx2x_set_next_page_sgl(fp);
1433
1434                         /* set SGEs bit mask */
1435                         bnx2x_init_sge_ring_bit_mask(fp);
1436
1437                         /* Allocate SGEs and initialize the ring elements */
1438                         for (i = 0, ring_prod = 0;
1439                              i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1440
1441                                 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod,
1442                                                        GFP_KERNEL) < 0) {
1443                                         BNX2X_ERR("was only able to allocate %d rx sges\n",
1444                                                   i);
1445                                         BNX2X_ERR("disabling TPA for queue[%d]\n",
1446                                                   j);
1447                                         /* Cleanup already allocated elements */
1448                                         bnx2x_free_rx_sge_range(bp, fp,
1449                                                                 ring_prod);
1450                                         bnx2x_free_tpa_pool(bp, fp,
1451                                                             MAX_AGG_QS(bp));
1452                                         fp->mode = TPA_MODE_DISABLED;
1453                                         ring_prod = 0;
1454                                         break;
1455                                 }
1456                                 ring_prod = NEXT_SGE_IDX(ring_prod);
1457                         }
1458
1459                         fp->rx_sge_prod = ring_prod;
1460                 }
1461         }
1462
1463         for_each_eth_queue(bp, j) {
1464                 struct bnx2x_fastpath *fp = &bp->fp[j];
1465
1466                 fp->rx_bd_cons = 0;
1467
1468                 /* Activate BD ring */
1469                 /* Warning!
1470                  * this will generate an interrupt (to the TSTORM)
1471                  * must only be done after chip is initialized
1472                  */
1473                 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1474                                      fp->rx_sge_prod);
1475
1476                 if (j != 0)
1477                         continue;
1478
1479                 if (CHIP_IS_E1(bp)) {
1480                         REG_WR(bp, BAR_USTRORM_INTMEM +
1481                                USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1482                                U64_LO(fp->rx_comp_mapping));
1483                         REG_WR(bp, BAR_USTRORM_INTMEM +
1484                                USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1485                                U64_HI(fp->rx_comp_mapping));
1486                 }
1487         }
1488 }
1489
1490 static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
1491 {
1492         u8 cos;
1493         struct bnx2x *bp = fp->bp;
1494
1495         for_each_cos_in_tx_queue(fp, cos) {
1496                 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1497                 unsigned pkts_compl = 0, bytes_compl = 0;
1498
1499                 u16 sw_prod = txdata->tx_pkt_prod;
1500                 u16 sw_cons = txdata->tx_pkt_cons;
1501
1502                 while (sw_cons != sw_prod) {
1503                         bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1504                                           &pkts_compl, &bytes_compl);
1505                         sw_cons++;
1506                 }
1507
1508                 netdev_tx_reset_queue(
1509                         netdev_get_tx_queue(bp->dev,
1510                                             txdata->txq_index));
1511         }
1512 }
1513
1514 static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1515 {
1516         int i;
1517
1518         for_each_tx_queue_cnic(bp, i) {
1519                 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1520         }
1521 }
1522
1523 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1524 {
1525         int i;
1526
1527         for_each_eth_queue(bp, i) {
1528                 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1529         }
1530 }
1531
1532 static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1533 {
1534         struct bnx2x *bp = fp->bp;
1535         int i;
1536
1537         /* ring wasn't allocated */
1538         if (fp->rx_buf_ring == NULL)
1539                 return;
1540
1541         for (i = 0; i < NUM_RX_BD; i++) {
1542                 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1543                 u8 *data = rx_buf->data;
1544
1545                 if (data == NULL)
1546                         continue;
1547                 dma_unmap_single(&bp->pdev->dev,
1548                                  dma_unmap_addr(rx_buf, mapping),
1549                                  fp->rx_buf_size, DMA_FROM_DEVICE);
1550
1551                 rx_buf->data = NULL;
1552                 bnx2x_frag_free(fp, data);
1553         }
1554 }
1555
1556 static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1557 {
1558         int j;
1559
1560         for_each_rx_queue_cnic(bp, j) {
1561                 bnx2x_free_rx_bds(&bp->fp[j]);
1562         }
1563 }
1564
1565 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1566 {
1567         int j;
1568
1569         for_each_eth_queue(bp, j) {
1570                 struct bnx2x_fastpath *fp = &bp->fp[j];
1571
1572                 bnx2x_free_rx_bds(fp);
1573
1574                 if (fp->mode != TPA_MODE_DISABLED)
1575                         bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
1576         }
1577 }
1578
1579 static void bnx2x_free_skbs_cnic(struct bnx2x *bp)
1580 {
1581         bnx2x_free_tx_skbs_cnic(bp);
1582         bnx2x_free_rx_skbs_cnic(bp);
1583 }
1584
1585 void bnx2x_free_skbs(struct bnx2x *bp)
1586 {
1587         bnx2x_free_tx_skbs(bp);
1588         bnx2x_free_rx_skbs(bp);
1589 }
1590
1591 void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1592 {
1593         /* load old values */
1594         u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1595
1596         if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1597                 /* leave all but MAX value */
1598                 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1599
1600                 /* set new MAX value */
1601                 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1602                                 & FUNC_MF_CFG_MAX_BW_MASK;
1603
1604                 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1605         }
1606 }
1607
1608 /**
1609  * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1610  *
1611  * @bp:         driver handle
1612  * @nvecs:      number of vectors to be released
1613  */
1614 static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1615 {
1616         int i, offset = 0;
1617
1618         if (nvecs == offset)
1619                 return;
1620
1621         /* VFs don't have a default SB */
1622         if (IS_PF(bp)) {
1623                 free_irq(bp->msix_table[offset].vector, bp->dev);
1624                 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1625                    bp->msix_table[offset].vector);
1626                 offset++;
1627         }
1628
1629         if (CNIC_SUPPORT(bp)) {
1630                 if (nvecs == offset)
1631                         return;
1632                 offset++;
1633         }
1634
1635         for_each_eth_queue(bp, i) {
1636                 if (nvecs == offset)
1637                         return;
1638                 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1639                    i, bp->msix_table[offset].vector);
1640
1641                 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
1642         }
1643 }
1644
1645 void bnx2x_free_irq(struct bnx2x *bp)
1646 {
1647         if (bp->flags & USING_MSIX_FLAG &&
1648             !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1649                 int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1650
1651                 /* vfs don't have a default status block */
1652                 if (IS_PF(bp))
1653                         nvecs++;
1654
1655                 bnx2x_free_msix_irqs(bp, nvecs);
1656         } else {
1657                 free_irq(bp->dev->irq, bp->dev);
1658         }
1659 }
1660
1661 int bnx2x_enable_msix(struct bnx2x *bp)
1662 {
1663         int msix_vec = 0, i, rc;
1664
1665         /* VFs don't have a default status block */
1666         if (IS_PF(bp)) {
1667                 bp->msix_table[msix_vec].entry = msix_vec;
1668                 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1669                                bp->msix_table[0].entry);
1670                 msix_vec++;
1671         }
1672
1673         /* Cnic requires an msix vector for itself */
1674         if (CNIC_SUPPORT(bp)) {
1675                 bp->msix_table[msix_vec].entry = msix_vec;
1676                 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1677                                msix_vec, bp->msix_table[msix_vec].entry);
1678                 msix_vec++;
1679         }
1680
1681         /* We need separate vectors for ETH queues only (not FCoE) */
1682         for_each_eth_queue(bp, i) {
1683                 bp->msix_table[msix_vec].entry = msix_vec;
1684                 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1685                                msix_vec, msix_vec, i);
1686                 msix_vec++;
1687         }
1688
1689         DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1690            msix_vec);
1691
1692         rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0],
1693                                    BNX2X_MIN_MSIX_VEC_CNT(bp), msix_vec);
1694         /*
1695          * reconfigure number of tx/rx queues according to available
1696          * MSI-X vectors
1697          */
1698         if (rc == -ENOSPC) {
1699                 /* Get by with single vector */
1700                 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], 1, 1);
1701                 if (rc < 0) {
1702                         BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1703                                        rc);
1704                         goto no_msix;
1705                 }
1706
1707                 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1708                 bp->flags |= USING_SINGLE_MSIX_FLAG;
1709
1710                 BNX2X_DEV_INFO("set number of queues to 1\n");
1711                 bp->num_ethernet_queues = 1;
1712                 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1713         } else if (rc < 0) {
1714                 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1715                 goto no_msix;
1716         } else if (rc < msix_vec) {
1717                 /* how less vectors we will have? */
1718                 int diff = msix_vec - rc;
1719
1720                 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1721
1722                 /*
1723                  * decrease number of queues by number of unallocated entries
1724                  */
1725                 bp->num_ethernet_queues -= diff;
1726                 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1727
1728                 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1729                                bp->num_queues);
1730         }
1731
1732         bp->flags |= USING_MSIX_FLAG;
1733
1734         return 0;
1735
1736 no_msix:
1737         /* fall to INTx if not enough memory */
1738         if (rc == -ENOMEM)
1739                 bp->flags |= DISABLE_MSI_FLAG;
1740
1741         return rc;
1742 }
1743
1744 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1745 {
1746         int i, rc, offset = 0;
1747
1748         /* no default status block for vf */
1749         if (IS_PF(bp)) {
1750                 rc = request_irq(bp->msix_table[offset++].vector,
1751                                  bnx2x_msix_sp_int, 0,
1752                                  bp->dev->name, bp->dev);
1753                 if (rc) {
1754                         BNX2X_ERR("request sp irq failed\n");
1755                         return -EBUSY;
1756                 }
1757         }
1758
1759         if (CNIC_SUPPORT(bp))
1760                 offset++;
1761
1762         for_each_eth_queue(bp, i) {
1763                 struct bnx2x_fastpath *fp = &bp->fp[i];
1764                 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1765                          bp->dev->name, i);
1766
1767                 rc = request_irq(bp->msix_table[offset].vector,
1768                                  bnx2x_msix_fp_int, 0, fp->name, fp);
1769                 if (rc) {
1770                         BNX2X_ERR("request fp #%d irq (%d) failed  rc %d\n", i,
1771                               bp->msix_table[offset].vector, rc);
1772                         bnx2x_free_msix_irqs(bp, offset);
1773                         return -EBUSY;
1774                 }
1775
1776                 offset++;
1777         }
1778
1779         i = BNX2X_NUM_ETH_QUEUES(bp);
1780         if (IS_PF(bp)) {
1781                 offset = 1 + CNIC_SUPPORT(bp);
1782                 netdev_info(bp->dev,
1783                             "using MSI-X  IRQs: sp %d  fp[%d] %d ... fp[%d] %d\n",
1784                             bp->msix_table[0].vector,
1785                             0, bp->msix_table[offset].vector,
1786                             i - 1, bp->msix_table[offset + i - 1].vector);
1787         } else {
1788                 offset = CNIC_SUPPORT(bp);
1789                 netdev_info(bp->dev,
1790                             "using MSI-X  IRQs: fp[%d] %d ... fp[%d] %d\n",
1791                             0, bp->msix_table[offset].vector,
1792                             i - 1, bp->msix_table[offset + i - 1].vector);
1793         }
1794         return 0;
1795 }
1796
1797 int bnx2x_enable_msi(struct bnx2x *bp)
1798 {
1799         int rc;
1800
1801         rc = pci_enable_msi(bp->pdev);
1802         if (rc) {
1803                 BNX2X_DEV_INFO("MSI is not attainable\n");
1804                 return -1;
1805         }
1806         bp->flags |= USING_MSI_FLAG;
1807
1808         return 0;
1809 }
1810
1811 static int bnx2x_req_irq(struct bnx2x *bp)
1812 {
1813         unsigned long flags;
1814         unsigned int irq;
1815
1816         if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
1817                 flags = 0;
1818         else
1819                 flags = IRQF_SHARED;
1820
1821         if (bp->flags & USING_MSIX_FLAG)
1822                 irq = bp->msix_table[0].vector;
1823         else
1824                 irq = bp->pdev->irq;
1825
1826         return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
1827 }
1828
1829 static int bnx2x_setup_irqs(struct bnx2x *bp)
1830 {
1831         int rc = 0;
1832         if (bp->flags & USING_MSIX_FLAG &&
1833             !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1834                 rc = bnx2x_req_msix_irqs(bp);
1835                 if (rc)
1836                         return rc;
1837         } else {
1838                 rc = bnx2x_req_irq(bp);
1839                 if (rc) {
1840                         BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
1841                         return rc;
1842                 }
1843                 if (bp->flags & USING_MSI_FLAG) {
1844                         bp->dev->irq = bp->pdev->irq;
1845                         netdev_info(bp->dev, "using MSI IRQ %d\n",
1846                                     bp->dev->irq);
1847                 }
1848                 if (bp->flags & USING_MSIX_FLAG) {
1849                         bp->dev->irq = bp->msix_table[0].vector;
1850                         netdev_info(bp->dev, "using MSIX IRQ %d\n",
1851                                     bp->dev->irq);
1852                 }
1853         }
1854
1855         return 0;
1856 }
1857
1858 static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1859 {
1860         int i;
1861
1862         for_each_rx_queue_cnic(bp, i) {
1863                 napi_enable(&bnx2x_fp(bp, i, napi));
1864         }
1865 }
1866
1867 static void bnx2x_napi_enable(struct bnx2x *bp)
1868 {
1869         int i;
1870
1871         for_each_eth_queue(bp, i) {
1872                 napi_enable(&bnx2x_fp(bp, i, napi));
1873         }
1874 }
1875
1876 static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1877 {
1878         int i;
1879
1880         for_each_rx_queue_cnic(bp, i) {
1881                 napi_disable(&bnx2x_fp(bp, i, napi));
1882         }
1883 }
1884
1885 static void bnx2x_napi_disable(struct bnx2x *bp)
1886 {
1887         int i;
1888
1889         for_each_eth_queue(bp, i) {
1890                 napi_disable(&bnx2x_fp(bp, i, napi));
1891         }
1892 }
1893
1894 void bnx2x_netif_start(struct bnx2x *bp)
1895 {
1896         if (netif_running(bp->dev)) {
1897                 bnx2x_napi_enable(bp);
1898                 if (CNIC_LOADED(bp))
1899                         bnx2x_napi_enable_cnic(bp);
1900                 bnx2x_int_enable(bp);
1901                 if (bp->state == BNX2X_STATE_OPEN)
1902                         netif_tx_wake_all_queues(bp->dev);
1903         }
1904 }
1905
1906 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1907 {
1908         bnx2x_int_disable_sync(bp, disable_hw);
1909         bnx2x_napi_disable(bp);
1910         if (CNIC_LOADED(bp))
1911                 bnx2x_napi_disable_cnic(bp);
1912 }
1913
1914 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
1915                        struct net_device *sb_dev)
1916 {
1917         struct bnx2x *bp = netdev_priv(dev);
1918
1919         if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
1920                 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1921                 u16 ether_type = ntohs(hdr->h_proto);
1922
1923                 /* Skip VLAN tag if present */
1924                 if (ether_type == ETH_P_8021Q) {
1925                         struct vlan_ethhdr *vhdr =
1926                                 (struct vlan_ethhdr *)skb->data;
1927
1928                         ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1929                 }
1930
1931                 /* If ethertype is FCoE or FIP - use FCoE ring */
1932                 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1933                         return bnx2x_fcoe_tx(bp, txq_index);
1934         }
1935
1936         /* select a non-FCoE queue */
1937         return netdev_pick_tx(dev, skb, NULL) % (BNX2X_NUM_ETH_QUEUES(bp));
1938 }
1939
1940 void bnx2x_set_num_queues(struct bnx2x *bp)
1941 {
1942         /* RSS queues */
1943         bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
1944
1945         /* override in STORAGE SD modes */
1946         if (IS_MF_STORAGE_ONLY(bp))
1947                 bp->num_ethernet_queues = 1;
1948
1949         /* Add special queues */
1950         bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1951         bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1952
1953         BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
1954 }
1955
1956 /**
1957  * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1958  *
1959  * @bp:         Driver handle
1960  *
1961  * We currently support for at most 16 Tx queues for each CoS thus we will
1962  * allocate a multiple of 16 for ETH L2 rings according to the value of the
1963  * bp->max_cos.
1964  *
1965  * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1966  * index after all ETH L2 indices.
1967  *
1968  * If the actual number of Tx queues (for each CoS) is less than 16 then there
1969  * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1970  * 16..31,...) with indices that are not coupled with any real Tx queue.
1971  *
1972  * The proper configuration of skb->queue_mapping is handled by
1973  * bnx2x_select_queue() and __skb_tx_hash().
1974  *
1975  * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1976  * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1977  */
1978 static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
1979 {
1980         int rc, tx, rx;
1981
1982         tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
1983         rx = BNX2X_NUM_ETH_QUEUES(bp);
1984
1985 /* account for fcoe queue */
1986         if (include_cnic && !NO_FCOE(bp)) {
1987                 rx++;
1988                 tx++;
1989         }
1990
1991         rc = netif_set_real_num_tx_queues(bp->dev, tx);
1992         if (rc) {
1993                 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1994                 return rc;
1995         }
1996         rc = netif_set_real_num_rx_queues(bp->dev, rx);
1997         if (rc) {
1998                 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1999                 return rc;
2000         }
2001
2002         DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
2003                           tx, rx);
2004
2005         return rc;
2006 }
2007
2008 static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
2009 {
2010         int i;
2011
2012         for_each_queue(bp, i) {
2013                 struct bnx2x_fastpath *fp = &bp->fp[i];
2014                 u32 mtu;
2015
2016                 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
2017                 if (IS_FCOE_IDX(i))
2018                         /*
2019                          * Although there are no IP frames expected to arrive to
2020                          * this ring we still want to add an
2021                          * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
2022                          * overrun attack.
2023                          */
2024                         mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
2025                 else
2026                         mtu = bp->dev->mtu;
2027                 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
2028                                   IP_HEADER_ALIGNMENT_PADDING +
2029                                   ETH_OVERHEAD +
2030                                   mtu +
2031                                   BNX2X_FW_RX_ALIGN_END;
2032                 fp->rx_buf_size = SKB_DATA_ALIGN(fp->rx_buf_size);
2033                 /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
2034                 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
2035                         fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
2036                 else
2037                         fp->rx_frag_size = 0;
2038         }
2039 }
2040
2041 static int bnx2x_init_rss(struct bnx2x *bp)
2042 {
2043         int i;
2044         u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
2045
2046         /* Prepare the initial contents for the indirection table if RSS is
2047          * enabled
2048          */
2049         for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
2050                 bp->rss_conf_obj.ind_table[i] =
2051                         bp->fp->cl_id +
2052                         ethtool_rxfh_indir_default(i, num_eth_queues);
2053
2054         /*
2055          * For 57710 and 57711 SEARCHER configuration (rss_keys) is
2056          * per-port, so if explicit configuration is needed , do it only
2057          * for a PMF.
2058          *
2059          * For 57712 and newer on the other hand it's a per-function
2060          * configuration.
2061          */
2062         return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
2063 }
2064
2065 int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
2066               bool config_hash, bool enable)
2067 {
2068         struct bnx2x_config_rss_params params = {NULL};
2069
2070         /* Although RSS is meaningless when there is a single HW queue we
2071          * still need it enabled in order to have HW Rx hash generated.
2072          *
2073          * if (!is_eth_multi(bp))
2074          *      bp->multi_mode = ETH_RSS_MODE_DISABLED;
2075          */
2076
2077         params.rss_obj = rss_obj;
2078
2079         __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
2080
2081         if (enable) {
2082                 __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
2083
2084                 /* RSS configuration */
2085                 __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
2086                 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
2087                 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
2088                 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
2089                 if (rss_obj->udp_rss_v4)
2090                         __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
2091                 if (rss_obj->udp_rss_v6)
2092                         __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
2093
2094                 if (!CHIP_IS_E1x(bp)) {
2095                         /* valid only for TUNN_MODE_VXLAN tunnel mode */
2096                         __set_bit(BNX2X_RSS_IPV4_VXLAN, &params.rss_flags);
2097                         __set_bit(BNX2X_RSS_IPV6_VXLAN, &params.rss_flags);
2098
2099                         /* valid only for TUNN_MODE_GRE tunnel mode */
2100                         __set_bit(BNX2X_RSS_TUNN_INNER_HDRS, &params.rss_flags);
2101                 }
2102         } else {
2103                 __set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
2104         }
2105
2106         /* Hash bits */
2107         params.rss_result_mask = MULTI_MASK;
2108
2109         memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
2110
2111         if (config_hash) {
2112                 /* RSS keys */
2113                 netdev_rss_key_fill(params.rss_key, T_ETH_RSS_KEY * 4);
2114                 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
2115         }
2116
2117         if (IS_PF(bp))
2118                 return bnx2x_config_rss(bp, &params);
2119         else
2120                 return bnx2x_vfpf_config_rss(bp, &params);
2121 }
2122
2123 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
2124 {
2125         struct bnx2x_func_state_params func_params = {NULL};
2126
2127         /* Prepare parameters for function state transitions */
2128         __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2129
2130         func_params.f_obj = &bp->func_obj;
2131         func_params.cmd = BNX2X_F_CMD_HW_INIT;
2132
2133         func_params.params.hw_init.load_phase = load_code;
2134
2135         return bnx2x_func_state_change(bp, &func_params);
2136 }
2137
2138 /*
2139  * Cleans the object that have internal lists without sending
2140  * ramrods. Should be run when interrupts are disabled.
2141  */
2142 void bnx2x_squeeze_objects(struct bnx2x *bp)
2143 {
2144         int rc;
2145         unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
2146         struct bnx2x_mcast_ramrod_params rparam = {NULL};
2147         struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
2148
2149         /***************** Cleanup MACs' object first *************************/
2150
2151         /* Wait for completion of requested */
2152         __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2153         /* Perform a dry cleanup */
2154         __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
2155
2156         /* Clean ETH primary MAC */
2157         __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
2158         rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
2159                                  &ramrod_flags);
2160         if (rc != 0)
2161                 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
2162
2163         /* Cleanup UC list */
2164         vlan_mac_flags = 0;
2165         __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
2166         rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
2167                                  &ramrod_flags);
2168         if (rc != 0)
2169                 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
2170
2171         /***************** Now clean mcast object *****************************/
2172         rparam.mcast_obj = &bp->mcast_obj;
2173         __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2174
2175         /* Add a DEL command... - Since we're doing a driver cleanup only,
2176          * we take a lock surrounding both the initial send and the CONTs,
2177          * as we don't want a true completion to disrupt us in the middle.
2178          */
2179         netif_addr_lock_bh(bp->dev);
2180         rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2181         if (rc < 0)
2182                 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2183                           rc);
2184
2185         /* ...and wait until all pending commands are cleared */
2186         rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2187         while (rc != 0) {
2188                 if (rc < 0) {
2189                         BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2190                                   rc);
2191                         netif_addr_unlock_bh(bp->dev);
2192                         return;
2193                 }
2194
2195                 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2196         }
2197         netif_addr_unlock_bh(bp->dev);
2198 }
2199
2200 #ifndef BNX2X_STOP_ON_ERROR
2201 #define LOAD_ERROR_EXIT(bp, label) \
2202         do { \
2203                 (bp)->state = BNX2X_STATE_ERROR; \
2204                 goto label; \
2205         } while (0)
2206
2207 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2208         do { \
2209                 bp->cnic_loaded = false; \
2210                 goto label; \
2211         } while (0)
2212 #else /*BNX2X_STOP_ON_ERROR*/
2213 #define LOAD_ERROR_EXIT(bp, label) \
2214         do { \
2215                 (bp)->state = BNX2X_STATE_ERROR; \
2216                 (bp)->panic = 1; \
2217                 return -EBUSY; \
2218         } while (0)
2219 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2220         do { \
2221                 bp->cnic_loaded = false; \
2222                 (bp)->panic = 1; \
2223                 return -EBUSY; \
2224         } while (0)
2225 #endif /*BNX2X_STOP_ON_ERROR*/
2226
2227 static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
2228 {
2229         BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2230                        bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2231         return;
2232 }
2233
2234 static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
2235 {
2236         int num_groups, vf_headroom = 0;
2237         int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
2238
2239         /* number of queues for statistics is number of eth queues + FCoE */
2240         u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
2241
2242         /* Total number of FW statistics requests =
2243          * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2244          * and fcoe l2 queue) stats + num of queues (which includes another 1
2245          * for fcoe l2 queue if applicable)
2246          */
2247         bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
2248
2249         /* vf stats appear in the request list, but their data is allocated by
2250          * the VFs themselves. We don't include them in the bp->fw_stats_num as
2251          * it is used to determine where to place the vf stats queries in the
2252          * request struct
2253          */
2254         if (IS_SRIOV(bp))
2255                 vf_headroom = bnx2x_vf_headroom(bp);
2256
2257         /* Request is built from stats_query_header and an array of
2258          * stats_query_cmd_group each of which contains
2259          * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2260          * configured in the stats_query_header.
2261          */
2262         num_groups =
2263                 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2264                  (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
2265                  1 : 0));
2266
2267         DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2268            bp->fw_stats_num, vf_headroom, num_groups);
2269         bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2270                 num_groups * sizeof(struct stats_query_cmd_group);
2271
2272         /* Data for statistics requests + stats_counter
2273          * stats_counter holds per-STORM counters that are incremented
2274          * when STORM has finished with the current request.
2275          * memory for FCoE offloaded statistics are counted anyway,
2276          * even if they will not be sent.
2277          * VF stats are not accounted for here as the data of VF stats is stored
2278          * in memory allocated by the VF, not here.
2279          */
2280         bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2281                 sizeof(struct per_pf_stats) +
2282                 sizeof(struct fcoe_statistics_params) +
2283                 sizeof(struct per_queue_stats) * num_queue_stats +
2284                 sizeof(struct stats_counter);
2285
2286         bp->fw_stats = BNX2X_PCI_ALLOC(&bp->fw_stats_mapping,
2287                                        bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2288         if (!bp->fw_stats)
2289                 goto alloc_mem_err;
2290
2291         /* Set shortcuts */
2292         bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2293         bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2294         bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2295                 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2296         bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2297                 bp->fw_stats_req_sz;
2298
2299         DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n",
2300            U64_HI(bp->fw_stats_req_mapping),
2301            U64_LO(bp->fw_stats_req_mapping));
2302         DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n",
2303            U64_HI(bp->fw_stats_data_mapping),
2304            U64_LO(bp->fw_stats_data_mapping));
2305         return 0;
2306
2307 alloc_mem_err:
2308         bnx2x_free_fw_stats_mem(bp);
2309         BNX2X_ERR("Can't allocate FW stats memory\n");
2310         return -ENOMEM;
2311 }
2312
2313 /* send load request to mcp and analyze response */
2314 static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2315 {
2316         u32 param;
2317
2318         /* init fw_seq */
2319         bp->fw_seq =
2320                 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2321                  DRV_MSG_SEQ_NUMBER_MASK);
2322         BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2323
2324         /* Get current FW pulse sequence */
2325         bp->fw_drv_pulse_wr_seq =
2326                 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2327                  DRV_PULSE_SEQ_MASK);
2328         BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2329
2330         param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA;
2331
2332         if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp))
2333                 param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA;
2334
2335         /* load request */
2336         (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param);
2337
2338         /* if mcp fails to respond we must abort */
2339         if (!(*load_code)) {
2340                 BNX2X_ERR("MCP response failure, aborting\n");
2341                 return -EBUSY;
2342         }
2343
2344         /* If mcp refused (e.g. other port is in diagnostic mode) we
2345          * must abort
2346          */
2347         if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2348                 BNX2X_ERR("MCP refused load request, aborting\n");
2349                 return -EBUSY;
2350         }
2351         return 0;
2352 }
2353
2354 /* check whether another PF has already loaded FW to chip. In
2355  * virtualized environments a pf from another VM may have already
2356  * initialized the device including loading FW
2357  */
2358 int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err)
2359 {
2360         /* is another pf loaded on this engine? */
2361         if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2362             load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2363                 /* build my FW version dword */
2364                 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
2365                         (BCM_5710_FW_MINOR_VERSION << 8) +
2366                         (BCM_5710_FW_REVISION_VERSION << 16) +
2367                         (BCM_5710_FW_ENGINEERING_VERSION << 24);
2368
2369                 /* read loaded FW from chip */
2370                 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2371
2372                 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
2373                    loaded_fw, my_fw);
2374
2375                 /* abort nic load if version mismatch */
2376                 if (my_fw != loaded_fw) {
2377                         if (print_err)
2378                                 BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
2379                                           loaded_fw, my_fw);
2380                         else
2381                                 BNX2X_DEV_INFO("bnx2x with FW %x was already loaded which mismatches my %x FW, possibly due to MF UNDI\n",
2382                                                loaded_fw, my_fw);
2383                         return -EBUSY;
2384                 }
2385         }
2386         return 0;
2387 }
2388
2389 /* returns the "mcp load_code" according to global load_count array */
2390 static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2391 {
2392         int path = BP_PATH(bp);
2393
2394         DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d]      %d, %d, %d\n",
2395            path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2396            bnx2x_load_count[path][2]);
2397         bnx2x_load_count[path][0]++;
2398         bnx2x_load_count[path][1 + port]++;
2399         DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d]  %d, %d, %d\n",
2400            path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2401            bnx2x_load_count[path][2]);
2402         if (bnx2x_load_count[path][0] == 1)
2403                 return FW_MSG_CODE_DRV_LOAD_COMMON;
2404         else if (bnx2x_load_count[path][1 + port] == 1)
2405                 return FW_MSG_CODE_DRV_LOAD_PORT;
2406         else
2407                 return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2408 }
2409
2410 /* mark PMF if applicable */
2411 static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2412 {
2413         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2414             (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2415             (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2416                 bp->port.pmf = 1;
2417                 /* We need the barrier to ensure the ordering between the
2418                  * writing to bp->port.pmf here and reading it from the
2419                  * bnx2x_periodic_task().
2420                  */
2421                 smp_mb();
2422         } else {
2423                 bp->port.pmf = 0;
2424         }
2425
2426         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2427 }
2428
2429 static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2430 {
2431         if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2432              (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2433             (bp->common.shmem2_base)) {
2434                 if (SHMEM2_HAS(bp, dcc_support))
2435                         SHMEM2_WR(bp, dcc_support,
2436                                   (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2437                                    SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2438                 if (SHMEM2_HAS(bp, afex_driver_support))
2439                         SHMEM2_WR(bp, afex_driver_support,
2440                                   SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2441         }
2442
2443         /* Set AFEX default VLAN tag to an invalid value */
2444         bp->afex_def_vlan_tag = -1;
2445 }
2446
2447 /**
2448  * bnx2x_bz_fp - zero content of the fastpath structure.
2449  *
2450  * @bp:         driver handle
2451  * @index:      fastpath index to be zeroed
2452  *
2453  * Makes sure the contents of the bp->fp[index].napi is kept
2454  * intact.
2455  */
2456 static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2457 {
2458         struct bnx2x_fastpath *fp = &bp->fp[index];
2459         int cos;
2460         struct napi_struct orig_napi = fp->napi;
2461         struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
2462
2463         /* bzero bnx2x_fastpath contents */
2464         if (fp->tpa_info)
2465                 memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
2466                        sizeof(struct bnx2x_agg_info));
2467         memset(fp, 0, sizeof(*fp));
2468
2469         /* Restore the NAPI object as it has been already initialized */
2470         fp->napi = orig_napi;
2471         fp->tpa_info = orig_tpa_info;
2472         fp->bp = bp;
2473         fp->index = index;
2474         if (IS_ETH_FP(fp))
2475                 fp->max_cos = bp->max_cos;
2476         else
2477                 /* Special queues support only one CoS */
2478                 fp->max_cos = 1;
2479
2480         /* Init txdata pointers */
2481         if (IS_FCOE_FP(fp))
2482                 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
2483         if (IS_ETH_FP(fp))
2484                 for_each_cos_in_tx_queue(fp, cos)
2485                         fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2486                                 BNX2X_NUM_ETH_QUEUES(bp) + index];
2487
2488         /* set the tpa flag for each queue. The tpa flag determines the queue
2489          * minimal size so it must be set prior to queue memory allocation
2490          */
2491         if (bp->dev->features & NETIF_F_LRO)
2492                 fp->mode = TPA_MODE_LRO;
2493         else if (bp->dev->features & NETIF_F_GRO_HW)
2494                 fp->mode = TPA_MODE_GRO;
2495         else
2496                 fp->mode = TPA_MODE_DISABLED;
2497
2498         /* We don't want TPA if it's disabled in bp
2499          * or if this is an FCoE L2 ring.
2500          */
2501         if (bp->disable_tpa || IS_FCOE_FP(fp))
2502                 fp->mode = TPA_MODE_DISABLED;
2503 }
2504
2505 void bnx2x_set_os_driver_state(struct bnx2x *bp, u32 state)
2506 {
2507         u32 cur;
2508
2509         if (!IS_MF_BD(bp) || !SHMEM2_HAS(bp, os_driver_state) || IS_VF(bp))
2510                 return;
2511
2512         cur = SHMEM2_RD(bp, os_driver_state[BP_FW_MB_IDX(bp)]);
2513         DP(NETIF_MSG_IFUP, "Driver state %08x-->%08x\n",
2514            cur, state);
2515
2516         SHMEM2_WR(bp, os_driver_state[BP_FW_MB_IDX(bp)], state);
2517 }
2518
2519 int bnx2x_load_cnic(struct bnx2x *bp)
2520 {
2521         int i, rc, port = BP_PORT(bp);
2522
2523         DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2524
2525         mutex_init(&bp->cnic_mutex);
2526
2527         if (IS_PF(bp)) {
2528                 rc = bnx2x_alloc_mem_cnic(bp);
2529                 if (rc) {
2530                         BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2531                         LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2532                 }
2533         }
2534
2535         rc = bnx2x_alloc_fp_mem_cnic(bp);
2536         if (rc) {
2537                 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2538                 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2539         }
2540
2541         /* Update the number of queues with the cnic queues */
2542         rc = bnx2x_set_real_num_queues(bp, 1);
2543         if (rc) {
2544                 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2545                 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2546         }
2547
2548         /* Add all CNIC NAPI objects */
2549         bnx2x_add_all_napi_cnic(bp);
2550         DP(NETIF_MSG_IFUP, "cnic napi added\n");
2551         bnx2x_napi_enable_cnic(bp);
2552
2553         rc = bnx2x_init_hw_func_cnic(bp);
2554         if (rc)
2555                 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2556
2557         bnx2x_nic_init_cnic(bp);
2558
2559         if (IS_PF(bp)) {
2560                 /* Enable Timer scan */
2561                 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2562
2563                 /* setup cnic queues */
2564                 for_each_cnic_queue(bp, i) {
2565                         rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2566                         if (rc) {
2567                                 BNX2X_ERR("Queue setup failed\n");
2568                                 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2569                         }
2570                 }
2571         }
2572
2573         /* Initialize Rx filter. */
2574         bnx2x_set_rx_mode_inner(bp);
2575
2576         /* re-read iscsi info */
2577         bnx2x_get_iscsi_info(bp);
2578         bnx2x_setup_cnic_irq_info(bp);
2579         bnx2x_setup_cnic_info(bp);
2580         bp->cnic_loaded = true;
2581         if (bp->state == BNX2X_STATE_OPEN)
2582                 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2583
2584         DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2585
2586         return 0;
2587
2588 #ifndef BNX2X_STOP_ON_ERROR
2589 load_error_cnic2:
2590         /* Disable Timer scan */
2591         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2592
2593 load_error_cnic1:
2594         bnx2x_napi_disable_cnic(bp);
2595         /* Update the number of queues without the cnic queues */
2596         if (bnx2x_set_real_num_queues(bp, 0))
2597                 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2598 load_error_cnic0:
2599         BNX2X_ERR("CNIC-related load failed\n");
2600         bnx2x_free_fp_mem_cnic(bp);
2601         bnx2x_free_mem_cnic(bp);
2602         return rc;
2603 #endif /* ! BNX2X_STOP_ON_ERROR */
2604 }
2605
2606 /* must be called with rtnl_lock */
2607 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2608 {
2609         int port = BP_PORT(bp);
2610         int i, rc = 0, load_code = 0;
2611
2612         DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2613         DP(NETIF_MSG_IFUP,
2614            "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2615
2616 #ifdef BNX2X_STOP_ON_ERROR
2617         if (unlikely(bp->panic)) {
2618                 BNX2X_ERR("Can't load NIC when there is panic\n");
2619                 return -EPERM;
2620         }
2621 #endif
2622
2623         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2624
2625         /* zero the structure w/o any lock, before SP handler is initialized */
2626         memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2627         __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2628                 &bp->last_reported_link.link_report_flags);
2629
2630         if (IS_PF(bp))
2631                 /* must be called before memory allocation and HW init */
2632                 bnx2x_ilt_set_info(bp);
2633
2634         /*
2635          * Zero fastpath structures preserving invariants like napi, which are
2636          * allocated only once, fp index, max_cos, bp pointer.
2637          * Also set fp->mode and txdata_ptr.
2638          */
2639         DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
2640         for_each_queue(bp, i)
2641                 bnx2x_bz_fp(bp, i);
2642         memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2643                                   bp->num_cnic_queues) *
2644                                   sizeof(struct bnx2x_fp_txdata));
2645
2646         bp->fcoe_init = false;
2647
2648         /* Set the receive queues buffer size */
2649         bnx2x_set_rx_buf_size(bp);
2650
2651         if (IS_PF(bp)) {
2652                 rc = bnx2x_alloc_mem(bp);
2653                 if (rc) {
2654                         BNX2X_ERR("Unable to allocate bp memory\n");
2655                         return rc;
2656                 }
2657         }
2658
2659         /* need to be done after alloc mem, since it's self adjusting to amount
2660          * of memory available for RSS queues
2661          */
2662         rc = bnx2x_alloc_fp_mem(bp);
2663         if (rc) {
2664                 BNX2X_ERR("Unable to allocate memory for fps\n");
2665                 LOAD_ERROR_EXIT(bp, load_error0);
2666         }
2667
2668         /* Allocated memory for FW statistics  */
2669         if (bnx2x_alloc_fw_stats_mem(bp))
2670                 LOAD_ERROR_EXIT(bp, load_error0);
2671
2672         /* request pf to initialize status blocks */
2673         if (IS_VF(bp)) {
2674                 rc = bnx2x_vfpf_init(bp);
2675                 if (rc)
2676                         LOAD_ERROR_EXIT(bp, load_error0);
2677         }
2678
2679         /* As long as bnx2x_alloc_mem() may possibly update
2680          * bp->num_queues, bnx2x_set_real_num_queues() should always
2681          * come after it. At this stage cnic queues are not counted.
2682          */
2683         rc = bnx2x_set_real_num_queues(bp, 0);
2684         if (rc) {
2685                 BNX2X_ERR("Unable to set real_num_queues\n");
2686                 LOAD_ERROR_EXIT(bp, load_error0);
2687         }
2688
2689         /* configure multi cos mappings in kernel.
2690          * this configuration may be overridden by a multi class queue
2691          * discipline or by a dcbx negotiation result.
2692          */
2693         bnx2x_setup_tc(bp->dev, bp->max_cos);
2694
2695         /* Add all NAPI objects */
2696         bnx2x_add_all_napi(bp);
2697         DP(NETIF_MSG_IFUP, "napi added\n");
2698         bnx2x_napi_enable(bp);
2699
2700         if (IS_PF(bp)) {
2701                 /* set pf load just before approaching the MCP */
2702                 bnx2x_set_pf_load(bp);
2703
2704                 /* if mcp exists send load request and analyze response */
2705                 if (!BP_NOMCP(bp)) {
2706                         /* attempt to load pf */
2707                         rc = bnx2x_nic_load_request(bp, &load_code);
2708                         if (rc)
2709                                 LOAD_ERROR_EXIT(bp, load_error1);
2710
2711                         /* what did mcp say? */
2712                         rc = bnx2x_compare_fw_ver(bp, load_code, true);
2713                         if (rc) {
2714                                 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2715                                 LOAD_ERROR_EXIT(bp, load_error2);
2716                         }
2717                 } else {
2718                         load_code = bnx2x_nic_load_no_mcp(bp, port);
2719                 }
2720
2721                 /* mark pmf if applicable */
2722                 bnx2x_nic_load_pmf(bp, load_code);
2723
2724                 /* Init Function state controlling object */
2725                 bnx2x__init_func_obj(bp);
2726
2727                 /* Initialize HW */
2728                 rc = bnx2x_init_hw(bp, load_code);
2729                 if (rc) {
2730                         BNX2X_ERR("HW init failed, aborting\n");
2731                         bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2732                         LOAD_ERROR_EXIT(bp, load_error2);
2733                 }
2734         }
2735
2736         bnx2x_pre_irq_nic_init(bp);
2737
2738         /* Connect to IRQs */
2739         rc = bnx2x_setup_irqs(bp);
2740         if (rc) {
2741                 BNX2X_ERR("setup irqs failed\n");
2742                 if (IS_PF(bp))
2743                         bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2744                 LOAD_ERROR_EXIT(bp, load_error2);
2745         }
2746
2747         /* Init per-function objects */
2748         if (IS_PF(bp)) {
2749                 /* Setup NIC internals and enable interrupts */
2750                 bnx2x_post_irq_nic_init(bp, load_code);
2751
2752                 bnx2x_init_bp_objs(bp);
2753                 bnx2x_iov_nic_init(bp);
2754
2755                 /* Set AFEX default VLAN tag to an invalid value */
2756                 bp->afex_def_vlan_tag = -1;
2757                 bnx2x_nic_load_afex_dcc(bp, load_code);
2758                 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2759                 rc = bnx2x_func_start(bp);
2760                 if (rc) {
2761                         BNX2X_ERR("Function start failed!\n");
2762                         bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2763
2764                         LOAD_ERROR_EXIT(bp, load_error3);
2765                 }
2766
2767                 /* Send LOAD_DONE command to MCP */
2768                 if (!BP_NOMCP(bp)) {
2769                         load_code = bnx2x_fw_command(bp,
2770                                                      DRV_MSG_CODE_LOAD_DONE, 0);
2771                         if (!load_code) {
2772                                 BNX2X_ERR("MCP response failure, aborting\n");
2773                                 rc = -EBUSY;
2774                                 LOAD_ERROR_EXIT(bp, load_error3);
2775                         }
2776                 }
2777
2778                 /* initialize FW coalescing state machines in RAM */
2779                 bnx2x_update_coalesce(bp);
2780         }
2781
2782         /* setup the leading queue */
2783         rc = bnx2x_setup_leading(bp);
2784         if (rc) {
2785                 BNX2X_ERR("Setup leading failed!\n");
2786                 LOAD_ERROR_EXIT(bp, load_error3);
2787         }
2788
2789         /* set up the rest of the queues */
2790         for_each_nondefault_eth_queue(bp, i) {
2791                 if (IS_PF(bp))
2792                         rc = bnx2x_setup_queue(bp, &bp->fp[i], false);
2793                 else /* VF */
2794                         rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false);
2795                 if (rc) {
2796                         BNX2X_ERR("Queue %d setup failed\n", i);
2797                         LOAD_ERROR_EXIT(bp, load_error3);
2798                 }
2799         }
2800
2801         /* setup rss */
2802         rc = bnx2x_init_rss(bp);
2803         if (rc) {
2804                 BNX2X_ERR("PF RSS init failed\n");
2805                 LOAD_ERROR_EXIT(bp, load_error3);
2806         }
2807
2808         /* Now when Clients are configured we are ready to work */
2809         bp->state = BNX2X_STATE_OPEN;
2810
2811         /* Configure a ucast MAC */
2812         if (IS_PF(bp))
2813                 rc = bnx2x_set_eth_mac(bp, true);
2814         else /* vf */
2815                 rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index,
2816                                            true);
2817         if (rc) {
2818                 BNX2X_ERR("Setting Ethernet MAC failed\n");
2819                 LOAD_ERROR_EXIT(bp, load_error3);
2820         }
2821
2822         if (IS_PF(bp) && bp->pending_max) {
2823                 bnx2x_update_max_mf_config(bp, bp->pending_max);
2824                 bp->pending_max = 0;
2825         }
2826
2827         bp->force_link_down = false;
2828         if (bp->port.pmf) {
2829                 rc = bnx2x_initial_phy_init(bp, load_mode);
2830                 if (rc)
2831                         LOAD_ERROR_EXIT(bp, load_error3);
2832         }
2833         bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
2834
2835         /* Start fast path */
2836
2837         /* Re-configure vlan filters */
2838         rc = bnx2x_vlan_reconfigure_vid(bp);
2839         if (rc)
2840                 LOAD_ERROR_EXIT(bp, load_error3);
2841
2842         /* Initialize Rx filter. */
2843         bnx2x_set_rx_mode_inner(bp);
2844
2845         if (bp->flags & PTP_SUPPORTED) {
2846                 bnx2x_register_phc(bp);
2847                 bnx2x_init_ptp(bp);
2848                 bnx2x_configure_ptp_filters(bp);
2849         }
2850         /* Start Tx */
2851         switch (load_mode) {
2852         case LOAD_NORMAL:
2853                 /* Tx queue should be only re-enabled */
2854                 netif_tx_wake_all_queues(bp->dev);
2855                 break;
2856
2857         case LOAD_OPEN:
2858                 netif_tx_start_all_queues(bp->dev);
2859                 smp_mb__after_atomic();
2860                 break;
2861
2862         case LOAD_DIAG:
2863         case LOAD_LOOPBACK_EXT:
2864                 bp->state = BNX2X_STATE_DIAG;
2865                 break;
2866
2867         default:
2868                 break;
2869         }
2870
2871         if (bp->port.pmf)
2872                 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
2873         else
2874                 bnx2x__link_status_update(bp);
2875
2876         /* start the timer */
2877         mod_timer(&bp->timer, jiffies + bp->current_interval);
2878
2879         if (CNIC_ENABLED(bp))
2880                 bnx2x_load_cnic(bp);
2881
2882         if (IS_PF(bp))
2883                 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
2884
2885         if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2886                 /* mark driver is loaded in shmem2 */
2887                 u32 val;
2888                 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2889                 val &= ~DRV_FLAGS_MTU_MASK;
2890                 val |= (bp->dev->mtu << DRV_FLAGS_MTU_SHIFT);
2891                 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2892                           val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2893                           DRV_FLAGS_CAPABILITIES_LOADED_L2);
2894         }
2895
2896         /* Wait for all pending SP commands to complete */
2897         if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2898                 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2899                 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
2900                 return -EBUSY;
2901         }
2902
2903         /* Update driver data for On-Chip MFW dump. */
2904         if (IS_PF(bp))
2905                 bnx2x_update_mfw_dump(bp);
2906
2907         /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2908         if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2909                 bnx2x_dcbx_init(bp, false);
2910
2911         if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
2912                 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_ACTIVE);
2913
2914         DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2915
2916         return 0;
2917
2918 #ifndef BNX2X_STOP_ON_ERROR
2919 load_error3:
2920         if (IS_PF(bp)) {
2921                 bnx2x_int_disable_sync(bp, 1);
2922
2923                 /* Clean queueable objects */
2924                 bnx2x_squeeze_objects(bp);
2925         }
2926
2927         /* Free SKBs, SGEs, TPA pool and driver internals */
2928         bnx2x_free_skbs(bp);
2929         for_each_rx_queue(bp, i)
2930                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2931
2932         /* Release IRQs */
2933         bnx2x_free_irq(bp);
2934 load_error2:
2935         if (IS_PF(bp) && !BP_NOMCP(bp)) {
2936                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2937                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2938         }
2939
2940         bp->port.pmf = 0;
2941 load_error1:
2942         bnx2x_napi_disable(bp);
2943         bnx2x_del_all_napi(bp);
2944
2945         /* clear pf_load status, as it was already set */
2946         if (IS_PF(bp))
2947                 bnx2x_clear_pf_load(bp);
2948 load_error0:
2949         bnx2x_free_fw_stats_mem(bp);
2950         bnx2x_free_fp_mem(bp);
2951         bnx2x_free_mem(bp);
2952
2953         return rc;
2954 #endif /* ! BNX2X_STOP_ON_ERROR */
2955 }
2956
2957 int bnx2x_drain_tx_queues(struct bnx2x *bp)
2958 {
2959         u8 rc = 0, cos, i;
2960
2961         /* Wait until tx fastpath tasks complete */
2962         for_each_tx_queue(bp, i) {
2963                 struct bnx2x_fastpath *fp = &bp->fp[i];
2964
2965                 for_each_cos_in_tx_queue(fp, cos)
2966                         rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2967                 if (rc)
2968                         return rc;
2969         }
2970         return 0;
2971 }
2972
2973 /* must be called with rtnl_lock */
2974 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2975 {
2976         int i;
2977         bool global = false;
2978
2979         DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2980
2981         if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
2982                 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED);
2983
2984         /* mark driver is unloaded in shmem2 */
2985         if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2986                 u32 val;
2987                 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2988                 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2989                           val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2990         }
2991
2992         if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
2993             (bp->state == BNX2X_STATE_CLOSED ||
2994              bp->state == BNX2X_STATE_ERROR)) {
2995                 /* We can get here if the driver has been unloaded
2996                  * during parity error recovery and is either waiting for a
2997                  * leader to complete or for other functions to unload and
2998                  * then ifdown has been issued. In this case we want to
2999                  * unload and let other functions to complete a recovery
3000                  * process.
3001                  */
3002                 bp->recovery_state = BNX2X_RECOVERY_DONE;
3003                 bp->is_leader = 0;
3004                 bnx2x_release_leader_lock(bp);
3005                 smp_mb();
3006
3007                 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
3008                 BNX2X_ERR("Can't unload in closed or error state\n");
3009                 return -EINVAL;
3010         }
3011
3012         /* Nothing to do during unload if previous bnx2x_nic_load()
3013          * have not completed successfully - all resources are released.
3014          *
3015          * we can get here only after unsuccessful ndo_* callback, during which
3016          * dev->IFF_UP flag is still on.
3017          */
3018         if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
3019                 return 0;
3020
3021         /* It's important to set the bp->state to the value different from
3022          * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
3023          * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
3024          */
3025         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
3026         smp_mb();
3027
3028         /* indicate to VFs that the PF is going down */
3029         bnx2x_iov_channel_down(bp);
3030
3031         if (CNIC_LOADED(bp))
3032                 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
3033
3034         /* Stop Tx */
3035         bnx2x_tx_disable(bp);
3036         netdev_reset_tc(bp->dev);
3037
3038         bp->rx_mode = BNX2X_RX_MODE_NONE;
3039
3040         del_timer_sync(&bp->timer);
3041
3042         if (IS_PF(bp) && !BP_NOMCP(bp)) {
3043                 /* Set ALWAYS_ALIVE bit in shmem */
3044                 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
3045                 bnx2x_drv_pulse(bp);
3046                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
3047                 bnx2x_save_statistics(bp);
3048         }
3049
3050         /* wait till consumers catch up with producers in all queues.
3051          * If we're recovering, FW can't write to host so no reason
3052          * to wait for the queues to complete all Tx.
3053          */
3054         if (unload_mode != UNLOAD_RECOVERY)
3055                 bnx2x_drain_tx_queues(bp);
3056
3057         /* if VF indicate to PF this function is going down (PF will delete sp
3058          * elements and clear initializations
3059          */
3060         if (IS_VF(bp))
3061                 bnx2x_vfpf_close_vf(bp);
3062         else if (unload_mode != UNLOAD_RECOVERY)
3063                 /* if this is a normal/close unload need to clean up chip*/
3064                 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
3065         else {
3066                 /* Send the UNLOAD_REQUEST to the MCP */
3067                 bnx2x_send_unload_req(bp, unload_mode);
3068
3069                 /* Prevent transactions to host from the functions on the
3070                  * engine that doesn't reset global blocks in case of global
3071                  * attention once global blocks are reset and gates are opened
3072                  * (the engine which leader will perform the recovery
3073                  * last).
3074                  */
3075                 if (!CHIP_IS_E1x(bp))
3076                         bnx2x_pf_disable(bp);
3077
3078                 /* Disable HW interrupts, NAPI */
3079                 bnx2x_netif_stop(bp, 1);
3080                 /* Delete all NAPI objects */
3081                 bnx2x_del_all_napi(bp);
3082                 if (CNIC_LOADED(bp))
3083                         bnx2x_del_all_napi_cnic(bp);
3084                 /* Release IRQs */
3085                 bnx2x_free_irq(bp);
3086
3087                 /* Report UNLOAD_DONE to MCP */
3088                 bnx2x_send_unload_done(bp, false);
3089         }
3090
3091         /*
3092          * At this stage no more interrupts will arrive so we may safely clean
3093          * the queueable objects here in case they failed to get cleaned so far.
3094          */
3095         if (IS_PF(bp))
3096                 bnx2x_squeeze_objects(bp);
3097
3098         /* There should be no more pending SP commands at this stage */
3099         bp->sp_state = 0;
3100
3101         bp->port.pmf = 0;
3102
3103         /* clear pending work in rtnl task */
3104         bp->sp_rtnl_state = 0;
3105         smp_mb();
3106
3107         /* Free SKBs, SGEs, TPA pool and driver internals */
3108         bnx2x_free_skbs(bp);
3109         if (CNIC_LOADED(bp))
3110                 bnx2x_free_skbs_cnic(bp);
3111         for_each_rx_queue(bp, i)
3112                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
3113
3114         bnx2x_free_fp_mem(bp);
3115         if (CNIC_LOADED(bp))
3116                 bnx2x_free_fp_mem_cnic(bp);
3117
3118         if (IS_PF(bp)) {
3119                 if (CNIC_LOADED(bp))
3120                         bnx2x_free_mem_cnic(bp);
3121         }
3122         bnx2x_free_mem(bp);
3123
3124         bp->state = BNX2X_STATE_CLOSED;
3125         bp->cnic_loaded = false;
3126
3127         /* Clear driver version indication in shmem */
3128         if (IS_PF(bp) && !BP_NOMCP(bp))
3129                 bnx2x_update_mng_version(bp);
3130
3131         /* Check if there are pending parity attentions. If there are - set
3132          * RECOVERY_IN_PROGRESS.
3133          */
3134         if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
3135                 bnx2x_set_reset_in_progress(bp);
3136
3137                 /* Set RESET_IS_GLOBAL if needed */
3138                 if (global)
3139                         bnx2x_set_reset_global(bp);
3140         }
3141
3142         /* The last driver must disable a "close the gate" if there is no
3143          * parity attention or "process kill" pending.
3144          */
3145         if (IS_PF(bp) &&
3146             !bnx2x_clear_pf_load(bp) &&
3147             bnx2x_reset_is_done(bp, BP_PATH(bp)))
3148                 bnx2x_disable_close_the_gate(bp);
3149
3150         DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
3151
3152         return 0;
3153 }
3154
3155 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
3156 {
3157         u16 pmcsr;
3158
3159         /* If there is no power capability, silently succeed */
3160         if (!bp->pdev->pm_cap) {
3161                 BNX2X_DEV_INFO("No power capability. Breaking.\n");
3162                 return 0;
3163         }
3164
3165         pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
3166
3167         switch (state) {
3168         case PCI_D0:
3169                 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3170                                       ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3171                                        PCI_PM_CTRL_PME_STATUS));
3172
3173                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3174                         /* delay required during transition out of D3hot */
3175                         msleep(20);
3176                 break;
3177
3178         case PCI_D3hot:
3179                 /* If there are other clients above don't
3180                    shut down the power */
3181                 if (atomic_read(&bp->pdev->enable_cnt) != 1)
3182                         return 0;
3183                 /* Don't shut down the power for emulation and FPGA */
3184                 if (CHIP_REV_IS_SLOW(bp))
3185                         return 0;
3186
3187                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3188                 pmcsr |= 3;
3189
3190                 if (bp->wol)
3191                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3192
3193                 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3194                                       pmcsr);
3195
3196                 /* No more memory access after this point until
3197                 * device is brought back to D0.
3198                 */
3199                 break;
3200
3201         default:
3202                 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
3203                 return -EINVAL;
3204         }
3205         return 0;
3206 }
3207
3208 /*
3209  * net_device service functions
3210  */
3211 static int bnx2x_poll(struct napi_struct *napi, int budget)
3212 {
3213         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3214                                                  napi);
3215         struct bnx2x *bp = fp->bp;
3216         int rx_work_done;
3217         u8 cos;
3218
3219 #ifdef BNX2X_STOP_ON_ERROR
3220         if (unlikely(bp->panic)) {
3221                 napi_complete(napi);
3222                 return 0;
3223         }
3224 #endif
3225         for_each_cos_in_tx_queue(fp, cos)
3226                 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
3227                         bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
3228
3229         rx_work_done = (bnx2x_has_rx_work(fp)) ? bnx2x_rx_int(fp, budget) : 0;
3230
3231         if (rx_work_done < budget) {
3232                 /* No need to update SB for FCoE L2 ring as long as
3233                  * it's connected to the default SB and the SB
3234                  * has been updated when NAPI was scheduled.
3235                  */
3236                 if (IS_FCOE_FP(fp)) {
3237                         napi_complete_done(napi, rx_work_done);
3238                 } else {
3239                         bnx2x_update_fpsb_idx(fp);
3240                         /* bnx2x_has_rx_work() reads the status block,
3241                          * thus we need to ensure that status block indices
3242                          * have been actually read (bnx2x_update_fpsb_idx)
3243                          * prior to this check (bnx2x_has_rx_work) so that
3244                          * we won't write the "newer" value of the status block
3245                          * to IGU (if there was a DMA right after
3246                          * bnx2x_has_rx_work and if there is no rmb, the memory
3247                          * reading (bnx2x_update_fpsb_idx) may be postponed
3248                          * to right before bnx2x_ack_sb). In this case there
3249                          * will never be another interrupt until there is
3250                          * another update of the status block, while there
3251                          * is still unhandled work.
3252                          */
3253                         rmb();
3254
3255                         if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3256                                 if (napi_complete_done(napi, rx_work_done)) {
3257                                         /* Re-enable interrupts */
3258                                         DP(NETIF_MSG_RX_STATUS,
3259                                            "Update index to %d\n", fp->fp_hc_idx);
3260                                         bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3261                                                      le16_to_cpu(fp->fp_hc_idx),
3262                                                      IGU_INT_ENABLE, 1);
3263                                 }
3264                         } else {
3265                                 rx_work_done = budget;
3266                         }
3267                 }
3268         }
3269
3270         return rx_work_done;
3271 }
3272
3273 /* we split the first BD into headers and data BDs
3274  * to ease the pain of our fellow microcode engineers
3275  * we use one mapping for both BDs
3276  */
3277 static u16 bnx2x_tx_split(struct bnx2x *bp,
3278                           struct bnx2x_fp_txdata *txdata,
3279                           struct sw_tx_bd *tx_buf,
3280                           struct eth_tx_start_bd **tx_bd, u16 hlen,
3281                           u16 bd_prod)
3282 {
3283         struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3284         struct eth_tx_bd *d_tx_bd;
3285         dma_addr_t mapping;
3286         int old_len = le16_to_cpu(h_tx_bd->nbytes);
3287
3288         /* first fix first BD */
3289         h_tx_bd->nbytes = cpu_to_le16(hlen);
3290
3291         DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
3292            h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
3293
3294         /* now get a new data BD
3295          * (after the pbd) and fill it */
3296         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3297         d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3298
3299         mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3300                            le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3301
3302         d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3303         d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3304         d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3305
3306         /* this marks the BD as one that has no individual mapping */
3307         tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3308
3309         DP(NETIF_MSG_TX_QUEUED,
3310            "TSO split data size is %d (%x:%x)\n",
3311            d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3312
3313         /* update tx_bd */
3314         *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3315
3316         return bd_prod;
3317 }
3318
3319 #define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3320 #define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
3321 static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
3322 {
3323         __sum16 tsum = (__force __sum16) csum;
3324
3325         if (fix > 0)
3326                 tsum = ~csum_fold(csum_sub((__force __wsum) csum,
3327                                   csum_partial(t_header - fix, fix, 0)));
3328
3329         else if (fix < 0)
3330                 tsum = ~csum_fold(csum_add((__force __wsum) csum,
3331                                   csum_partial(t_header, -fix, 0)));
3332
3333         return bswab16(tsum);
3334 }
3335
3336 static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
3337 {
3338         u32 rc;
3339         __u8 prot = 0;
3340         __be16 protocol;
3341
3342         if (skb->ip_summed != CHECKSUM_PARTIAL)
3343                 return XMIT_PLAIN;
3344
3345         protocol = vlan_get_protocol(skb);
3346         if (protocol == htons(ETH_P_IPV6)) {
3347                 rc = XMIT_CSUM_V6;
3348                 prot = ipv6_hdr(skb)->nexthdr;
3349         } else {
3350                 rc = XMIT_CSUM_V4;
3351                 prot = ip_hdr(skb)->protocol;
3352         }
3353
3354         if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
3355                 if (inner_ip_hdr(skb)->version == 6) {
3356                         rc |= XMIT_CSUM_ENC_V6;
3357                         if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3358                                 rc |= XMIT_CSUM_TCP;
3359                 } else {
3360                         rc |= XMIT_CSUM_ENC_V4;
3361                         if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
3362                                 rc |= XMIT_CSUM_TCP;
3363                 }
3364         }
3365         if (prot == IPPROTO_TCP)
3366                 rc |= XMIT_CSUM_TCP;
3367
3368         if (skb_is_gso(skb)) {
3369                 if (skb_is_gso_v6(skb)) {
3370                         rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
3371                         if (rc & XMIT_CSUM_ENC)
3372                                 rc |= XMIT_GSO_ENC_V6;
3373                 } else {
3374                         rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
3375                         if (rc & XMIT_CSUM_ENC)
3376                                 rc |= XMIT_GSO_ENC_V4;
3377                 }
3378         }
3379
3380         return rc;
3381 }
3382
3383 /* VXLAN: 4 = 1 (for linear data BD) + 3 (2 for PBD and last BD) */
3384 #define BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS         4
3385
3386 /* Regular: 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3387 #define BNX2X_NUM_TSO_WIN_SUB_BDS               3
3388
3389 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3390 /* check if packet requires linearization (packet is too fragmented)
3391    no need to check fragmentation if page size > 8K (there will be no
3392    violation to FW restrictions) */
3393 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3394                              u32 xmit_type)
3395 {
3396         int first_bd_sz = 0, num_tso_win_sub = BNX2X_NUM_TSO_WIN_SUB_BDS;
3397         int to_copy = 0, hlen = 0;
3398
3399         if (xmit_type & XMIT_GSO_ENC)
3400                 num_tso_win_sub = BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS;
3401
3402         if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - num_tso_win_sub)) {
3403                 if (xmit_type & XMIT_GSO) {
3404                         unsigned short lso_mss = skb_shinfo(skb)->gso_size;
3405                         int wnd_size = MAX_FETCH_BD - num_tso_win_sub;
3406                         /* Number of windows to check */
3407                         int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3408                         int wnd_idx = 0;
3409                         int frag_idx = 0;
3410                         u32 wnd_sum = 0;
3411
3412                         /* Headers length */
3413                         if (xmit_type & XMIT_GSO_ENC)
3414                                 hlen = (int)(skb_inner_transport_header(skb) -
3415                                              skb->data) +
3416                                              inner_tcp_hdrlen(skb);
3417                         else
3418                                 hlen = (int)(skb_transport_header(skb) -
3419                                              skb->data) + tcp_hdrlen(skb);
3420
3421                         /* Amount of data (w/o headers) on linear part of SKB*/
3422                         first_bd_sz = skb_headlen(skb) - hlen;
3423
3424                         wnd_sum  = first_bd_sz;
3425
3426                         /* Calculate the first sum - it's special */
3427                         for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3428                                 wnd_sum +=
3429                                         skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
3430
3431                         /* If there was data on linear skb data - check it */
3432                         if (first_bd_sz > 0) {
3433                                 if (unlikely(wnd_sum < lso_mss)) {
3434                                         to_copy = 1;
3435                                         goto exit_lbl;
3436                                 }
3437
3438                                 wnd_sum -= first_bd_sz;
3439                         }
3440
3441                         /* Others are easier: run through the frag list and
3442                            check all windows */
3443                         for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3444                                 wnd_sum +=
3445                           skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
3446
3447                                 if (unlikely(wnd_sum < lso_mss)) {
3448                                         to_copy = 1;
3449                                         break;
3450                                 }
3451                                 wnd_sum -=
3452                                         skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
3453                         }
3454                 } else {
3455                         /* in non-LSO too fragmented packet should always
3456                            be linearized */
3457                         to_copy = 1;
3458                 }
3459         }
3460
3461 exit_lbl:
3462         if (unlikely(to_copy))
3463                 DP(NETIF_MSG_TX_QUEUED,
3464                    "Linearization IS REQUIRED for %s packet. num_frags %d  hlen %d  first_bd_sz %d\n",
3465                    (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3466                    skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3467
3468         return to_copy;
3469 }
3470 #endif
3471
3472 /**
3473  * bnx2x_set_pbd_gso - update PBD in GSO case.
3474  *
3475  * @skb:        packet skb
3476  * @pbd:        parse BD
3477  * @xmit_type:  xmit flags
3478  */
3479 static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3480                               struct eth_tx_parse_bd_e1x *pbd,
3481                               u32 xmit_type)
3482 {
3483         pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
3484         pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
3485         pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
3486
3487         if (xmit_type & XMIT_GSO_V4) {
3488                 pbd->ip_id = bswab16(ip_hdr(skb)->id);
3489                 pbd->tcp_pseudo_csum =
3490                         bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3491                                                    ip_hdr(skb)->daddr,
3492                                                    0, IPPROTO_TCP, 0));
3493         } else {
3494                 pbd->tcp_pseudo_csum =
3495                         bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3496                                                  &ipv6_hdr(skb)->daddr,
3497                                                  0, IPPROTO_TCP, 0));
3498         }
3499
3500         pbd->global_data |=
3501                 cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
3502 }
3503
3504 /**
3505  * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
3506  *
3507  * @bp:                 driver handle
3508  * @skb:                packet skb
3509  * @parsing_data:       data to be updated
3510  * @xmit_type:          xmit flags
3511  *
3512  * 57712/578xx related, when skb has encapsulation
3513  */
3514 static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
3515                                  u32 *parsing_data, u32 xmit_type)
3516 {
3517         *parsing_data |=
3518                 ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
3519                 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3520                 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3521
3522         if (xmit_type & XMIT_CSUM_TCP) {
3523                 *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
3524                         ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3525                         ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3526
3527                 return skb_inner_transport_header(skb) +
3528                         inner_tcp_hdrlen(skb) - skb->data;
3529         }
3530
3531         /* We support checksum offload for TCP and UDP only.
3532          * No need to pass the UDP header length - it's a constant.
3533          */
3534         return skb_inner_transport_header(skb) +
3535                 sizeof(struct udphdr) - skb->data;
3536 }
3537
3538 /**
3539  * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
3540  *
3541  * @bp:                 driver handle
3542  * @skb:                packet skb
3543  * @parsing_data:       data to be updated
3544  * @xmit_type:          xmit flags
3545  *
3546  * 57712/578xx related
3547  */
3548 static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3549                                 u32 *parsing_data, u32 xmit_type)
3550 {
3551         *parsing_data |=
3552                 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
3553                 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3554                 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3555
3556         if (xmit_type & XMIT_CSUM_TCP) {
3557                 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3558                         ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3559                         ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3560
3561                 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
3562         }
3563         /* We support checksum offload for TCP and UDP only.
3564          * No need to pass the UDP header length - it's a constant.
3565          */
3566         return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
3567 }
3568
3569 /* set FW indication according to inner or outer protocols if tunneled */
3570 static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3571                                struct eth_tx_start_bd *tx_start_bd,
3572                                u32 xmit_type)
3573 {
3574         tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3575
3576         if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
3577                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
3578
3579         if (!(xmit_type & XMIT_CSUM_TCP))
3580                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
3581 }
3582
3583 /**
3584  * bnx2x_set_pbd_csum - update PBD with checksum and return header length
3585  *
3586  * @bp:         driver handle
3587  * @skb:        packet skb
3588  * @pbd:        parse BD to be updated
3589  * @xmit_type:  xmit flags
3590  */
3591 static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3592                              struct eth_tx_parse_bd_e1x *pbd,
3593                              u32 xmit_type)
3594 {
3595         u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
3596
3597         /* for now NS flag is not used in Linux */
3598         pbd->global_data =
3599                 cpu_to_le16(hlen |
3600                             ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3601                              ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
3602
3603         pbd->ip_hlen_w = (skb_transport_header(skb) -
3604                         skb_network_header(skb)) >> 1;
3605
3606         hlen += pbd->ip_hlen_w;
3607
3608         /* We support checksum offload for TCP and UDP only */
3609         if (xmit_type & XMIT_CSUM_TCP)
3610                 hlen += tcp_hdrlen(skb) / 2;
3611         else
3612                 hlen += sizeof(struct udphdr) / 2;
3613
3614         pbd->total_hlen_w = cpu_to_le16(hlen);
3615         hlen = hlen*2;
3616
3617         if (xmit_type & XMIT_CSUM_TCP) {
3618                 pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
3619
3620         } else {
3621                 s8 fix = SKB_CS_OFF(skb); /* signed! */
3622
3623                 DP(NETIF_MSG_TX_QUEUED,
3624                    "hlen %d  fix %d  csum before fix %x\n",
3625                    le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3626
3627                 /* HW bug: fixup the CSUM */
3628                 pbd->tcp_pseudo_csum =
3629                         bnx2x_csum_fix(skb_transport_header(skb),
3630                                        SKB_CS(skb), fix);
3631
3632                 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3633                    pbd->tcp_pseudo_csum);
3634         }
3635
3636         return hlen;
3637 }
3638
3639 static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3640                                       struct eth_tx_parse_bd_e2 *pbd_e2,
3641                                       struct eth_tx_parse_2nd_bd *pbd2,
3642                                       u16 *global_data,
3643                                       u32 xmit_type)
3644 {
3645         u16 hlen_w = 0;
3646         u8 outerip_off, outerip_len = 0;
3647
3648         /* from outer IP to transport */
3649         hlen_w = (skb_inner_transport_header(skb) -
3650                   skb_network_header(skb)) >> 1;
3651
3652         /* transport len */
3653         hlen_w += inner_tcp_hdrlen(skb) >> 1;
3654
3655         pbd2->fw_ip_hdr_to_payload_w = hlen_w;
3656
3657         /* outer IP header info */
3658         if (xmit_type & XMIT_CSUM_V4) {
3659                 struct iphdr *iph = ip_hdr(skb);
3660                 u32 csum = (__force u32)(~iph->check) -
3661                            (__force u32)iph->tot_len -
3662                            (__force u32)iph->frag_off;
3663
3664                 outerip_len = iph->ihl << 1;
3665
3666                 pbd2->fw_ip_csum_wo_len_flags_frag =
3667                         bswab16(csum_fold((__force __wsum)csum));
3668         } else {
3669                 pbd2->fw_ip_hdr_to_payload_w =
3670                         hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
3671                 pbd_e2->data.tunnel_data.flags |=
3672                         ETH_TUNNEL_DATA_IPV6_OUTER;
3673         }
3674
3675         pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
3676
3677         pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
3678
3679         /* inner IP header info */
3680         if (xmit_type & XMIT_CSUM_ENC_V4) {
3681                 pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
3682
3683                 pbd_e2->data.tunnel_data.pseudo_csum =
3684                         bswab16(~csum_tcpudp_magic(
3685                                         inner_ip_hdr(skb)->saddr,
3686                                         inner_ip_hdr(skb)->daddr,
3687                                         0, IPPROTO_TCP, 0));
3688         } else {
3689                 pbd_e2->data.tunnel_data.pseudo_csum =
3690                         bswab16(~csum_ipv6_magic(
3691                                         &inner_ipv6_hdr(skb)->saddr,
3692                                         &inner_ipv6_hdr(skb)->daddr,
3693                                         0, IPPROTO_TCP, 0));
3694         }
3695
3696         outerip_off = (skb_network_header(skb) - skb->data) >> 1;
3697
3698         *global_data |=
3699                 outerip_off |
3700                 (outerip_len <<
3701                         ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
3702                 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3703                         ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
3704
3705         if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
3706                 SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1);
3707                 pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1;
3708         }
3709 }
3710
3711 static inline void bnx2x_set_ipv6_ext_e2(struct sk_buff *skb, u32 *parsing_data,
3712                                          u32 xmit_type)
3713 {
3714         struct ipv6hdr *ipv6;
3715
3716         if (!(xmit_type & (XMIT_GSO_ENC_V6 | XMIT_GSO_V6)))
3717                 return;
3718
3719         if (xmit_type & XMIT_GSO_ENC_V6)
3720                 ipv6 = inner_ipv6_hdr(skb);
3721         else /* XMIT_GSO_V6 */
3722                 ipv6 = ipv6_hdr(skb);
3723
3724         if (ipv6->nexthdr == NEXTHDR_IPV6)
3725                 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
3726 }
3727
3728 /* called with netif_tx_lock
3729  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3730  * netif_wake_queue()
3731  */
3732 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3733 {
3734         struct bnx2x *bp = netdev_priv(dev);
3735
3736         struct netdev_queue *txq;
3737         struct bnx2x_fp_txdata *txdata;
3738         struct sw_tx_bd *tx_buf;
3739         struct eth_tx_start_bd *tx_start_bd, *first_bd;
3740         struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
3741         struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
3742         struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
3743         struct eth_tx_parse_2nd_bd *pbd2 = NULL;
3744         u32 pbd_e2_parsing_data = 0;
3745         u16 pkt_prod, bd_prod;
3746         int nbd, txq_index;
3747         dma_addr_t mapping;
3748         u32 xmit_type = bnx2x_xmit_type(bp, skb);
3749         int i;
3750         u8 hlen = 0;
3751         __le16 pkt_size = 0;
3752         struct ethhdr *eth;
3753         u8 mac_type = UNICAST_ADDRESS;
3754
3755 #ifdef BNX2X_STOP_ON_ERROR
3756         if (unlikely(bp->panic))
3757                 return NETDEV_TX_BUSY;
3758 #endif
3759
3760         txq_index = skb_get_queue_mapping(skb);
3761         txq = netdev_get_tx_queue(dev, txq_index);
3762
3763         BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
3764
3765         txdata = &bp->bnx2x_txq[txq_index];
3766
3767         /* enable this debug print to view the transmission queue being used
3768         DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
3769            txq_index, fp_index, txdata_index); */
3770
3771         /* enable this debug print to view the transmission details
3772         DP(NETIF_MSG_TX_QUEUED,
3773            "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
3774            txdata->cid, fp_index, txdata_index, txdata, fp); */
3775
3776         if (unlikely(bnx2x_tx_avail(bp, txdata) <
3777                         skb_shinfo(skb)->nr_frags +
3778                         BDS_PER_TX_PKT +
3779                         NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
3780                 /* Handle special storage cases separately */
3781                 if (txdata->tx_ring_size == 0) {
3782                         struct bnx2x_eth_q_stats *q_stats =
3783                                 bnx2x_fp_qstats(bp, txdata->parent_fp);
3784                         q_stats->driver_filtered_tx_pkt++;
3785                         dev_kfree_skb(skb);
3786                         return NETDEV_TX_OK;
3787                 }
3788                 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3789                 netif_tx_stop_queue(txq);
3790                 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
3791
3792                 return NETDEV_TX_BUSY;
3793         }
3794
3795         DP(NETIF_MSG_TX_QUEUED,
3796            "queue[%d]: SKB: summed %x  protocol %x protocol(%x,%x) gso type %x  xmit_type %x len %d\n",
3797            txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
3798            ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
3799            skb->len);
3800
3801         eth = (struct ethhdr *)skb->data;
3802
3803         /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3804         if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3805                 if (is_broadcast_ether_addr(eth->h_dest))
3806                         mac_type = BROADCAST_ADDRESS;
3807                 else
3808                         mac_type = MULTICAST_ADDRESS;
3809         }
3810
3811 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3812         /* First, check if we need to linearize the skb (due to FW
3813            restrictions). No need to check fragmentation if page size > 8K
3814            (there will be no violation to FW restrictions) */
3815         if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3816                 /* Statistics of linearization */
3817                 bp->lin_cnt++;
3818                 if (skb_linearize(skb) != 0) {
3819                         DP(NETIF_MSG_TX_QUEUED,
3820                            "SKB linearization failed - silently dropping this SKB\n");
3821                         dev_kfree_skb_any(skb);
3822                         return NETDEV_TX_OK;
3823                 }
3824         }
3825 #endif
3826         /* Map skb linear data for DMA */
3827         mapping = dma_map_single(&bp->pdev->dev, skb->data,
3828                                  skb_headlen(skb), DMA_TO_DEVICE);
3829         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3830                 DP(NETIF_MSG_TX_QUEUED,
3831                    "SKB mapping failed - silently dropping this SKB\n");
3832                 dev_kfree_skb_any(skb);
3833                 return NETDEV_TX_OK;
3834         }
3835         /*
3836         Please read carefully. First we use one BD which we mark as start,
3837         then we have a parsing info BD (used for TSO or xsum),
3838         and only then we have the rest of the TSO BDs.
3839         (don't forget to mark the last one as last,
3840         and to unmap only AFTER you write to the BD ...)
3841         And above all, all pdb sizes are in words - NOT DWORDS!
3842         */
3843
3844         /* get current pkt produced now - advance it just before sending packet
3845          * since mapping of pages may fail and cause packet to be dropped
3846          */
3847         pkt_prod = txdata->tx_pkt_prod;
3848         bd_prod = TX_BD(txdata->tx_bd_prod);
3849
3850         /* get a tx_buf and first BD
3851          * tx_start_bd may be changed during SPLIT,
3852          * but first_bd will always stay first
3853          */
3854         tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3855         tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
3856         first_bd = tx_start_bd;
3857
3858         tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
3859
3860         if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
3861                 if (!(bp->flags & TX_TIMESTAMPING_EN)) {
3862                         bp->eth_stats.ptp_skip_tx_ts++;
3863                         BNX2X_ERR("Tx timestamping was not enabled, this packet will not be timestamped\n");
3864                 } else if (bp->ptp_tx_skb) {
3865                         bp->eth_stats.ptp_skip_tx_ts++;
3866                         netdev_err_once(bp->dev,
3867                                         "Device supports only a single outstanding packet to timestamp, this packet won't be timestamped\n");
3868                 } else {
3869                         skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3870                         /* schedule check for Tx timestamp */
3871                         bp->ptp_tx_skb = skb_get(skb);
3872                         bp->ptp_tx_start = jiffies;
3873                         schedule_work(&bp->ptp_task);
3874                 }
3875         }
3876
3877         /* header nbd: indirectly zero other flags! */
3878         tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
3879
3880         /* remember the first BD of the packet */
3881         tx_buf->first_bd = txdata->tx_bd_prod;
3882         tx_buf->skb = skb;
3883         tx_buf->flags = 0;
3884
3885         DP(NETIF_MSG_TX_QUEUED,
3886            "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
3887            pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
3888
3889         if (skb_vlan_tag_present(skb)) {
3890                 tx_start_bd->vlan_or_ethertype =
3891                     cpu_to_le16(skb_vlan_tag_get(skb));
3892                 tx_start_bd->bd_flags.as_bitfield |=
3893                     (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3894         } else {
3895                 /* when transmitting in a vf, start bd must hold the ethertype
3896                  * for fw to enforce it
3897                  */
3898                 u16 vlan_tci = 0;
3899 #ifndef BNX2X_STOP_ON_ERROR
3900                 if (IS_VF(bp)) {
3901 #endif
3902                         /* Still need to consider inband vlan for enforced */
3903                         if (__vlan_get_tag(skb, &vlan_tci)) {
3904                                 tx_start_bd->vlan_or_ethertype =
3905                                         cpu_to_le16(ntohs(eth->h_proto));
3906                         } else {
3907                                 tx_start_bd->bd_flags.as_bitfield |=
3908                                         (X_ETH_INBAND_VLAN <<
3909                                          ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3910                                 tx_start_bd->vlan_or_ethertype =
3911                                         cpu_to_le16(vlan_tci);
3912                         }
3913 #ifndef BNX2X_STOP_ON_ERROR
3914                 } else {
3915                         /* used by FW for packet accounting */
3916                         tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
3917                 }
3918 #endif
3919         }
3920
3921         nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3922
3923         /* turn on parsing and get a BD */
3924         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3925
3926         if (xmit_type & XMIT_CSUM)
3927                 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
3928
3929         if (!CHIP_IS_E1x(bp)) {
3930                 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
3931                 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
3932
3933                 if (xmit_type & XMIT_CSUM_ENC) {
3934                         u16 global_data = 0;
3935
3936                         /* Set PBD in enc checksum offload case */
3937                         hlen = bnx2x_set_pbd_csum_enc(bp, skb,
3938                                                       &pbd_e2_parsing_data,
3939                                                       xmit_type);
3940
3941                         /* turn on 2nd parsing and get a BD */
3942                         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3943
3944                         pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
3945
3946                         memset(pbd2, 0, sizeof(*pbd2));
3947
3948                         pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
3949                                 (skb_inner_network_header(skb) -
3950                                  skb->data) >> 1;
3951
3952                         if (xmit_type & XMIT_GSO_ENC)
3953                                 bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
3954                                                           &global_data,
3955                                                           xmit_type);
3956
3957                         pbd2->global_data = cpu_to_le16(global_data);
3958
3959                         /* add addition parse BD indication to start BD */
3960                         SET_FLAG(tx_start_bd->general_data,
3961                                  ETH_TX_START_BD_PARSE_NBDS, 1);
3962                         /* set encapsulation flag in start BD */
3963                         SET_FLAG(tx_start_bd->general_data,
3964                                  ETH_TX_START_BD_TUNNEL_EXIST, 1);
3965
3966                         tx_buf->flags |= BNX2X_HAS_SECOND_PBD;
3967
3968                         nbd++;
3969                 } else if (xmit_type & XMIT_CSUM) {
3970                         /* Set PBD in checksum offload case w/o encapsulation */
3971                         hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3972                                                      &pbd_e2_parsing_data,
3973                                                      xmit_type);
3974                 }
3975
3976                 bnx2x_set_ipv6_ext_e2(skb, &pbd_e2_parsing_data, xmit_type);
3977                 /* Add the macs to the parsing BD if this is a vf or if
3978                  * Tx Switching is enabled.
3979                  */
3980                 if (IS_VF(bp)) {
3981                         /* override GRE parameters in BD */
3982                         bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3983                                               &pbd_e2->data.mac_addr.src_mid,
3984                                               &pbd_e2->data.mac_addr.src_lo,
3985                                               eth->h_source);
3986
3987                         bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
3988                                               &pbd_e2->data.mac_addr.dst_mid,
3989                                               &pbd_e2->data.mac_addr.dst_lo,
3990                                               eth->h_dest);
3991                 } else {
3992                         if (bp->flags & TX_SWITCHING)
3993                                 bnx2x_set_fw_mac_addr(
3994                                                 &pbd_e2->data.mac_addr.dst_hi,
3995                                                 &pbd_e2->data.mac_addr.dst_mid,
3996                                                 &pbd_e2->data.mac_addr.dst_lo,
3997                                                 eth->h_dest);
3998 #ifdef BNX2X_STOP_ON_ERROR
3999                         /* Enforce security is always set in Stop on Error -
4000                          * source mac should be present in the parsing BD
4001                          */
4002                         bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
4003                                               &pbd_e2->data.mac_addr.src_mid,
4004                                               &pbd_e2->data.mac_addr.src_lo,
4005                                               eth->h_source);
4006 #endif
4007                 }
4008
4009                 SET_FLAG(pbd_e2_parsing_data,
4010                          ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
4011         } else {
4012                 u16 global_data = 0;
4013                 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
4014                 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
4015                 /* Set PBD in checksum offload case */
4016                 if (xmit_type & XMIT_CSUM)
4017                         hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
4018
4019                 SET_FLAG(global_data,
4020                          ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
4021                 pbd_e1x->global_data |= cpu_to_le16(global_data);
4022         }
4023
4024         /* Setup the data pointer of the first BD of the packet */
4025         tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4026         tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
4027         tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
4028         pkt_size = tx_start_bd->nbytes;
4029
4030         DP(NETIF_MSG_TX_QUEUED,
4031            "first bd @%p  addr (%x:%x)  nbytes %d  flags %x  vlan %x\n",
4032            tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
4033            le16_to_cpu(tx_start_bd->nbytes),
4034            tx_start_bd->bd_flags.as_bitfield,
4035            le16_to_cpu(tx_start_bd->vlan_or_ethertype));
4036
4037         if (xmit_type & XMIT_GSO) {
4038
4039                 DP(NETIF_MSG_TX_QUEUED,
4040                    "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
4041                    skb->len, hlen, skb_headlen(skb),
4042                    skb_shinfo(skb)->gso_size);
4043
4044                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
4045
4046                 if (unlikely(skb_headlen(skb) > hlen)) {
4047                         nbd++;
4048                         bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
4049                                                  &tx_start_bd, hlen,
4050                                                  bd_prod);
4051                 }
4052                 if (!CHIP_IS_E1x(bp))
4053                         pbd_e2_parsing_data |=
4054                                 (skb_shinfo(skb)->gso_size <<
4055                                  ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
4056                                  ETH_TX_PARSE_BD_E2_LSO_MSS;
4057                 else
4058                         bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
4059         }
4060
4061         /* Set the PBD's parsing_data field if not zero
4062          * (for the chips newer than 57711).
4063          */
4064         if (pbd_e2_parsing_data)
4065                 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
4066
4067         tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
4068
4069         /* Handle fragmented skb */
4070         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4071                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4072
4073                 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
4074                                            skb_frag_size(frag), DMA_TO_DEVICE);
4075                 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
4076                         unsigned int pkts_compl = 0, bytes_compl = 0;
4077
4078                         DP(NETIF_MSG_TX_QUEUED,
4079                            "Unable to map page - dropping packet...\n");
4080
4081                         /* we need unmap all buffers already mapped
4082                          * for this SKB;
4083                          * first_bd->nbd need to be properly updated
4084                          * before call to bnx2x_free_tx_pkt
4085                          */
4086                         first_bd->nbd = cpu_to_le16(nbd);
4087                         bnx2x_free_tx_pkt(bp, txdata,
4088                                           TX_BD(txdata->tx_pkt_prod),
4089                                           &pkts_compl, &bytes_compl);
4090                         return NETDEV_TX_OK;
4091                 }
4092
4093                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4094                 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
4095                 if (total_pkt_bd == NULL)
4096                         total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
4097
4098                 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4099                 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
4100                 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
4101                 le16_add_cpu(&pkt_size, skb_frag_size(frag));
4102                 nbd++;
4103
4104                 DP(NETIF_MSG_TX_QUEUED,
4105                    "frag %d  bd @%p  addr (%x:%x)  nbytes %d\n",
4106                    i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
4107                    le16_to_cpu(tx_data_bd->nbytes));
4108         }
4109
4110         DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
4111
4112         /* update with actual num BDs */
4113         first_bd->nbd = cpu_to_le16(nbd);
4114
4115         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4116
4117         /* now send a tx doorbell, counting the next BD
4118          * if the packet contains or ends with it
4119          */
4120         if (TX_BD_POFF(bd_prod) < nbd)
4121                 nbd++;
4122
4123         /* total_pkt_bytes should be set on the first data BD if
4124          * it's not an LSO packet and there is more than one
4125          * data BD. In this case pkt_size is limited by an MTU value.
4126          * However we prefer to set it for an LSO packet (while we don't
4127          * have to) in order to save some CPU cycles in a none-LSO
4128          * case, when we much more care about them.
4129          */
4130         if (total_pkt_bd != NULL)
4131                 total_pkt_bd->total_pkt_bytes = pkt_size;
4132
4133         if (pbd_e1x)
4134                 DP(NETIF_MSG_TX_QUEUED,
4135                    "PBD (E1X) @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
4136                    pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
4137                    pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
4138                    pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
4139                     le16_to_cpu(pbd_e1x->total_hlen_w));
4140         if (pbd_e2)
4141                 DP(NETIF_MSG_TX_QUEUED,
4142                    "PBD (E2) @%p  dst %x %x %x src %x %x %x parsing_data %x\n",
4143                    pbd_e2,
4144                    pbd_e2->data.mac_addr.dst_hi,
4145                    pbd_e2->data.mac_addr.dst_mid,
4146                    pbd_e2->data.mac_addr.dst_lo,
4147                    pbd_e2->data.mac_addr.src_hi,
4148                    pbd_e2->data.mac_addr.src_mid,
4149                    pbd_e2->data.mac_addr.src_lo,
4150                    pbd_e2->parsing_data);
4151         DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
4152
4153         netdev_tx_sent_queue(txq, skb->len);
4154
4155         skb_tx_timestamp(skb);
4156
4157         txdata->tx_pkt_prod++;
4158         /*
4159          * Make sure that the BD data is updated before updating the producer
4160          * since FW might read the BD right after the producer is updated.
4161          * This is only applicable for weak-ordered memory model archs such
4162          * as IA-64. The following barrier is also mandatory since FW will
4163          * assumes packets must have BDs.
4164          */
4165         wmb();
4166
4167         txdata->tx_db.data.prod += nbd;
4168         /* make sure descriptor update is observed by HW */
4169         wmb();
4170
4171         DOORBELL_RELAXED(bp, txdata->cid, txdata->tx_db.raw);
4172
4173         txdata->tx_bd_prod += nbd;
4174
4175         if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
4176                 netif_tx_stop_queue(txq);
4177
4178                 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
4179                  * ordering of set_bit() in netif_tx_stop_queue() and read of
4180                  * fp->bd_tx_cons */
4181                 smp_mb();
4182
4183                 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
4184                 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
4185                         netif_tx_wake_queue(txq);
4186         }
4187         txdata->tx_pkt++;
4188
4189         return NETDEV_TX_OK;
4190 }
4191
4192 void bnx2x_get_c2s_mapping(struct bnx2x *bp, u8 *c2s_map, u8 *c2s_default)
4193 {
4194         int mfw_vn = BP_FW_MB_IDX(bp);
4195         u32 tmp;
4196
4197         /* If the shmem shouldn't affect configuration, reflect */
4198         if (!IS_MF_BD(bp)) {
4199                 int i;
4200
4201                 for (i = 0; i < BNX2X_MAX_PRIORITY; i++)
4202                         c2s_map[i] = i;
4203                 *c2s_default = 0;
4204
4205                 return;
4206         }
4207
4208         tmp = SHMEM2_RD(bp, c2s_pcp_map_lower[mfw_vn]);
4209         tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4210         c2s_map[0] = tmp & 0xff;
4211         c2s_map[1] = (tmp >> 8) & 0xff;
4212         c2s_map[2] = (tmp >> 16) & 0xff;
4213         c2s_map[3] = (tmp >> 24) & 0xff;
4214
4215         tmp = SHMEM2_RD(bp, c2s_pcp_map_upper[mfw_vn]);
4216         tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4217         c2s_map[4] = tmp & 0xff;
4218         c2s_map[5] = (tmp >> 8) & 0xff;
4219         c2s_map[6] = (tmp >> 16) & 0xff;
4220         c2s_map[7] = (tmp >> 24) & 0xff;
4221
4222         tmp = SHMEM2_RD(bp, c2s_pcp_map_default[mfw_vn]);
4223         tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4224         *c2s_default = (tmp >> (8 * mfw_vn)) & 0xff;
4225 }
4226
4227 /**
4228  * bnx2x_setup_tc - routine to configure net_device for multi tc
4229  *
4230  * @netdev: net device to configure
4231  * @tc: number of traffic classes to enable
4232  *
4233  * callback connected to the ndo_setup_tc function pointer
4234  */
4235 int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
4236 {
4237         struct bnx2x *bp = netdev_priv(dev);
4238         u8 c2s_map[BNX2X_MAX_PRIORITY], c2s_def;
4239         int cos, prio, count, offset;
4240
4241         /* setup tc must be called under rtnl lock */
4242         ASSERT_RTNL();
4243
4244         /* no traffic classes requested. Aborting */
4245         if (!num_tc) {
4246                 netdev_reset_tc(dev);
4247                 return 0;
4248         }
4249
4250         /* requested to support too many traffic classes */
4251         if (num_tc > bp->max_cos) {
4252                 BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n",
4253                           num_tc, bp->max_cos);
4254                 return -EINVAL;
4255         }
4256
4257         /* declare amount of supported traffic classes */
4258         if (netdev_set_num_tc(dev, num_tc)) {
4259                 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
4260                 return -EINVAL;
4261         }
4262
4263         bnx2x_get_c2s_mapping(bp, c2s_map, &c2s_def);
4264
4265         /* configure priority to traffic class mapping */
4266         for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
4267                 int outer_prio = c2s_map[prio];
4268
4269                 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[outer_prio]);
4270                 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4271                    "mapping priority %d to tc %d\n",
4272                    outer_prio, bp->prio_to_cos[outer_prio]);
4273         }
4274
4275         /* Use this configuration to differentiate tc0 from other COSes
4276            This can be used for ets or pfc, and save the effort of setting
4277            up a multio class queue disc or negotiating DCBX with a switch
4278         netdev_set_prio_tc_map(dev, 0, 0);
4279         DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
4280         for (prio = 1; prio < 16; prio++) {
4281                 netdev_set_prio_tc_map(dev, prio, 1);
4282                 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
4283         } */
4284
4285         /* configure traffic class to transmission queue mapping */
4286         for (cos = 0; cos < bp->max_cos; cos++) {
4287                 count = BNX2X_NUM_ETH_QUEUES(bp);
4288                 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
4289                 netdev_set_tc_queue(dev, cos, count, offset);
4290                 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4291                    "mapping tc %d to offset %d count %d\n",
4292                    cos, offset, count);
4293         }
4294
4295         return 0;
4296 }
4297
4298 int __bnx2x_setup_tc(struct net_device *dev, enum tc_setup_type type,
4299                      void *type_data)
4300 {
4301         struct tc_mqprio_qopt *mqprio = type_data;
4302
4303         if (type != TC_SETUP_QDISC_MQPRIO)
4304                 return -EOPNOTSUPP;
4305
4306         mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
4307
4308         return bnx2x_setup_tc(dev, mqprio->num_tc);
4309 }
4310
4311 /* called with rtnl_lock */
4312 int bnx2x_change_mac_addr(struct net_device *dev, void *p)
4313 {
4314         struct sockaddr *addr = p;
4315         struct bnx2x *bp = netdev_priv(dev);
4316         int rc = 0;
4317
4318         if (!is_valid_ether_addr(addr->sa_data)) {
4319                 BNX2X_ERR("Requested MAC address is not valid\n");
4320                 return -EINVAL;
4321         }
4322
4323         if (IS_MF_STORAGE_ONLY(bp)) {
4324                 BNX2X_ERR("Can't change address on STORAGE ONLY function\n");
4325                 return -EINVAL;
4326         }
4327
4328         if (netif_running(dev))  {
4329                 rc = bnx2x_set_eth_mac(bp, false);
4330                 if (rc)
4331                         return rc;
4332         }
4333
4334         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4335
4336         if (netif_running(dev))
4337                 rc = bnx2x_set_eth_mac(bp, true);
4338
4339         if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
4340                 SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
4341
4342         return rc;
4343 }
4344
4345 static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
4346 {
4347         union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
4348         struct bnx2x_fastpath *fp = &bp->fp[fp_index];
4349         u8 cos;
4350
4351         /* Common */
4352
4353         if (IS_FCOE_IDX(fp_index)) {
4354                 memset(sb, 0, sizeof(union host_hc_status_block));
4355                 fp->status_blk_mapping = 0;
4356         } else {
4357                 /* status blocks */
4358                 if (!CHIP_IS_E1x(bp))
4359                         BNX2X_PCI_FREE(sb->e2_sb,
4360                                        bnx2x_fp(bp, fp_index,
4361                                                 status_blk_mapping),
4362                                        sizeof(struct host_hc_status_block_e2));
4363                 else
4364                         BNX2X_PCI_FREE(sb->e1x_sb,
4365                                        bnx2x_fp(bp, fp_index,
4366                                                 status_blk_mapping),
4367                                        sizeof(struct host_hc_status_block_e1x));
4368         }
4369
4370         /* Rx */
4371         if (!skip_rx_queue(bp, fp_index)) {
4372                 bnx2x_free_rx_bds(fp);
4373
4374                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4375                 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
4376                 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
4377                                bnx2x_fp(bp, fp_index, rx_desc_mapping),
4378                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
4379
4380                 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
4381                                bnx2x_fp(bp, fp_index, rx_comp_mapping),
4382                                sizeof(struct eth_fast_path_rx_cqe) *
4383                                NUM_RCQ_BD);
4384
4385                 /* SGE ring */
4386                 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
4387                 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
4388                                bnx2x_fp(bp, fp_index, rx_sge_mapping),
4389                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4390         }
4391
4392         /* Tx */
4393         if (!skip_tx_queue(bp, fp_index)) {
4394                 /* fastpath tx rings: tx_buf tx_desc */
4395                 for_each_cos_in_tx_queue(fp, cos) {
4396                         struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4397
4398                         DP(NETIF_MSG_IFDOWN,
4399                            "freeing tx memory of fp %d cos %d cid %d\n",
4400                            fp_index, cos, txdata->cid);
4401
4402                         BNX2X_FREE(txdata->tx_buf_ring);
4403                         BNX2X_PCI_FREE(txdata->tx_desc_ring,
4404                                 txdata->tx_desc_mapping,
4405                                 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4406                 }
4407         }
4408         /* end of fastpath */
4409 }
4410
4411 static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
4412 {
4413         int i;
4414         for_each_cnic_queue(bp, i)
4415                 bnx2x_free_fp_mem_at(bp, i);
4416 }
4417
4418 void bnx2x_free_fp_mem(struct bnx2x *bp)
4419 {
4420         int i;
4421         for_each_eth_queue(bp, i)
4422                 bnx2x_free_fp_mem_at(bp, i);
4423 }
4424
4425 static void set_sb_shortcuts(struct bnx2x *bp, int index)
4426 {
4427         union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
4428         if (!CHIP_IS_E1x(bp)) {
4429                 bnx2x_fp(bp, index, sb_index_values) =
4430                         (__le16 *)status_blk.e2_sb->sb.index_values;
4431                 bnx2x_fp(bp, index, sb_running_index) =
4432                         (__le16 *)status_blk.e2_sb->sb.running_index;
4433         } else {
4434                 bnx2x_fp(bp, index, sb_index_values) =
4435                         (__le16 *)status_blk.e1x_sb->sb.index_values;
4436                 bnx2x_fp(bp, index, sb_running_index) =
4437                         (__le16 *)status_blk.e1x_sb->sb.running_index;
4438         }
4439 }
4440
4441 /* Returns the number of actually allocated BDs */
4442 static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
4443                               int rx_ring_size)
4444 {
4445         struct bnx2x *bp = fp->bp;
4446         u16 ring_prod, cqe_ring_prod;
4447         int i, failure_cnt = 0;
4448
4449         fp->rx_comp_cons = 0;
4450         cqe_ring_prod = ring_prod = 0;
4451
4452         /* This routine is called only during fo init so
4453          * fp->eth_q_stats.rx_skb_alloc_failed = 0
4454          */
4455         for (i = 0; i < rx_ring_size; i++) {
4456                 if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) {
4457                         failure_cnt++;
4458                         continue;
4459                 }
4460                 ring_prod = NEXT_RX_IDX(ring_prod);
4461                 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4462                 WARN_ON(ring_prod <= (i - failure_cnt));
4463         }
4464
4465         if (failure_cnt)
4466                 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4467                           i - failure_cnt, fp->index);
4468
4469         fp->rx_bd_prod = ring_prod;
4470         /* Limit the CQE producer by the CQE ring size */
4471         fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
4472                                cqe_ring_prod);
4473
4474         bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
4475
4476         return i - failure_cnt;
4477 }
4478
4479 static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
4480 {
4481         int i;
4482
4483         for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4484                 struct eth_rx_cqe_next_page *nextpg;
4485
4486                 nextpg = (struct eth_rx_cqe_next_page *)
4487                         &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4488                 nextpg->addr_hi =
4489                         cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4490                                    BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4491                 nextpg->addr_lo =
4492                         cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4493                                    BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4494         }
4495 }
4496
4497 static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4498 {
4499         union host_hc_status_block *sb;
4500         struct bnx2x_fastpath *fp = &bp->fp[index];
4501         int ring_size = 0;
4502         u8 cos;
4503         int rx_ring_size = 0;
4504
4505         if (!bp->rx_ring_size && IS_MF_STORAGE_ONLY(bp)) {
4506                 rx_ring_size = MIN_RX_SIZE_NONTPA;
4507                 bp->rx_ring_size = rx_ring_size;
4508         } else if (!bp->rx_ring_size) {
4509                 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4510
4511                 if (CHIP_IS_E3(bp)) {
4512                         u32 cfg = SHMEM_RD(bp,
4513                                            dev_info.port_hw_config[BP_PORT(bp)].
4514                                            default_cfg);
4515
4516                         /* Decrease ring size for 1G functions */
4517                         if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4518                             PORT_HW_CFG_NET_SERDES_IF_SGMII)
4519                                 rx_ring_size /= 10;
4520                 }
4521
4522                 /* allocate at least number of buffers required by FW */
4523                 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
4524                                      MIN_RX_SIZE_TPA, rx_ring_size);
4525
4526                 bp->rx_ring_size = rx_ring_size;
4527         } else /* if rx_ring_size specified - use it */
4528                 rx_ring_size = bp->rx_ring_size;
4529
4530         DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
4531
4532         /* Common */
4533         sb = &bnx2x_fp(bp, index, status_blk);
4534
4535         if (!IS_FCOE_IDX(index)) {
4536                 /* status blocks */
4537                 if (!CHIP_IS_E1x(bp)) {
4538                         sb->e2_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4539                                                     sizeof(struct host_hc_status_block_e2));
4540                         if (!sb->e2_sb)
4541                                 goto alloc_mem_err;
4542                 } else {
4543                         sb->e1x_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4544                                                      sizeof(struct host_hc_status_block_e1x));
4545                         if (!sb->e1x_sb)
4546                                 goto alloc_mem_err;
4547                 }
4548         }
4549
4550         /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4551          * set shortcuts for it.
4552          */
4553         if (!IS_FCOE_IDX(index))
4554                 set_sb_shortcuts(bp, index);
4555
4556         /* Tx */
4557         if (!skip_tx_queue(bp, index)) {
4558                 /* fastpath tx rings: tx_buf tx_desc */
4559                 for_each_cos_in_tx_queue(fp, cos) {
4560                         struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4561
4562                         DP(NETIF_MSG_IFUP,
4563                            "allocating tx memory of fp %d cos %d\n",
4564                            index, cos);
4565
4566                         txdata->tx_buf_ring = kcalloc(NUM_TX_BD,
4567                                                       sizeof(struct sw_tx_bd),
4568                                                       GFP_KERNEL);
4569                         if (!txdata->tx_buf_ring)
4570                                 goto alloc_mem_err;
4571                         txdata->tx_desc_ring = BNX2X_PCI_ALLOC(&txdata->tx_desc_mapping,
4572                                                                sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4573                         if (!txdata->tx_desc_ring)
4574                                 goto alloc_mem_err;
4575                 }
4576         }
4577
4578         /* Rx */
4579         if (!skip_rx_queue(bp, index)) {
4580                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4581                 bnx2x_fp(bp, index, rx_buf_ring) =
4582                         kcalloc(NUM_RX_BD, sizeof(struct sw_rx_bd), GFP_KERNEL);
4583                 if (!bnx2x_fp(bp, index, rx_buf_ring))
4584                         goto alloc_mem_err;
4585                 bnx2x_fp(bp, index, rx_desc_ring) =
4586                         BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_desc_mapping),
4587                                         sizeof(struct eth_rx_bd) * NUM_RX_BD);
4588                 if (!bnx2x_fp(bp, index, rx_desc_ring))
4589                         goto alloc_mem_err;
4590
4591                 /* Seed all CQEs by 1s */
4592                 bnx2x_fp(bp, index, rx_comp_ring) =
4593                         BNX2X_PCI_FALLOC(&bnx2x_fp(bp, index, rx_comp_mapping),
4594                                          sizeof(struct eth_fast_path_rx_cqe) * NUM_RCQ_BD);
4595                 if (!bnx2x_fp(bp, index, rx_comp_ring))
4596                         goto alloc_mem_err;
4597
4598                 /* SGE ring */
4599                 bnx2x_fp(bp, index, rx_page_ring) =
4600                         kcalloc(NUM_RX_SGE, sizeof(struct sw_rx_page),
4601                                 GFP_KERNEL);
4602                 if (!bnx2x_fp(bp, index, rx_page_ring))
4603                         goto alloc_mem_err;
4604                 bnx2x_fp(bp, index, rx_sge_ring) =
4605                         BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_sge_mapping),
4606                                         BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4607                 if (!bnx2x_fp(bp, index, rx_sge_ring))
4608                         goto alloc_mem_err;
4609                 /* RX BD ring */
4610                 bnx2x_set_next_page_rx_bd(fp);
4611
4612                 /* CQ ring */
4613                 bnx2x_set_next_page_rx_cq(fp);
4614
4615                 /* BDs */
4616                 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4617                 if (ring_size < rx_ring_size)
4618                         goto alloc_mem_err;
4619         }
4620
4621         return 0;
4622
4623 /* handles low memory cases */
4624 alloc_mem_err:
4625         BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4626                                                 index, ring_size);
4627         /* FW will drop all packets if queue is not big enough,
4628          * In these cases we disable the queue
4629          * Min size is different for OOO, TPA and non-TPA queues
4630          */
4631         if (ring_size < (fp->mode == TPA_MODE_DISABLED ?
4632                                 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
4633                         /* release memory allocated for this queue */
4634                         bnx2x_free_fp_mem_at(bp, index);
4635                         return -ENOMEM;
4636         }
4637         return 0;
4638 }
4639
4640 static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
4641 {
4642         if (!NO_FCOE(bp))
4643                 /* FCoE */
4644                 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
4645                         /* we will fail load process instead of mark
4646                          * NO_FCOE_FLAG
4647                          */
4648                         return -ENOMEM;
4649
4650         return 0;
4651 }
4652
4653 static int bnx2x_alloc_fp_mem(struct bnx2x *bp)
4654 {
4655         int i;
4656
4657         /* 1. Allocate FP for leading - fatal if error
4658          * 2. Allocate RSS - fix number of queues if error
4659          */
4660
4661         /* leading */
4662         if (bnx2x_alloc_fp_mem_at(bp, 0))
4663                 return -ENOMEM;
4664
4665         /* RSS */
4666         for_each_nondefault_eth_queue(bp, i)
4667                 if (bnx2x_alloc_fp_mem_at(bp, i))
4668                         break;
4669
4670         /* handle memory failures */
4671         if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4672                 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4673
4674                 WARN_ON(delta < 0);
4675                 bnx2x_shrink_eth_fp(bp, delta);
4676                 if (CNIC_SUPPORT(bp))
4677                         /* move non eth FPs next to last eth FP
4678                          * must be done in that order
4679                          * FCOE_IDX < FWD_IDX < OOO_IDX
4680                          */
4681
4682                         /* move FCoE fp even NO_FCOE_FLAG is on */
4683                         bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4684                 bp->num_ethernet_queues -= delta;
4685                 bp->num_queues = bp->num_ethernet_queues +
4686                                  bp->num_cnic_queues;
4687                 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4688                           bp->num_queues + delta, bp->num_queues);
4689         }
4690
4691         return 0;
4692 }
4693
4694 void bnx2x_free_mem_bp(struct bnx2x *bp)
4695 {
4696         int i;
4697
4698         for (i = 0; i < bp->fp_array_size; i++)
4699                 kfree(bp->fp[i].tpa_info);
4700         kfree(bp->fp);
4701         kfree(bp->sp_objs);
4702         kfree(bp->fp_stats);
4703         kfree(bp->bnx2x_txq);
4704         kfree(bp->msix_table);
4705         kfree(bp->ilt);
4706 }
4707
4708 int bnx2x_alloc_mem_bp(struct bnx2x *bp)
4709 {
4710         struct bnx2x_fastpath *fp;
4711         struct msix_entry *tbl;
4712         struct bnx2x_ilt *ilt;
4713         int msix_table_size = 0;
4714         int fp_array_size, txq_array_size;
4715         int i;
4716
4717         /*
4718          * The biggest MSI-X table we might need is as a maximum number of fast
4719          * path IGU SBs plus default SB (for PF only).
4720          */
4721         msix_table_size = bp->igu_sb_cnt;
4722         if (IS_PF(bp))
4723                 msix_table_size++;
4724         BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
4725
4726         /* fp array: RSS plus CNIC related L2 queues */
4727         fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
4728         bp->fp_array_size = fp_array_size;
4729         BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
4730
4731         fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
4732         if (!fp)
4733                 goto alloc_err;
4734         for (i = 0; i < bp->fp_array_size; i++) {
4735                 fp[i].tpa_info =
4736                         kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
4737                                 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
4738                 if (!(fp[i].tpa_info))
4739                         goto alloc_err;
4740         }
4741
4742         bp->fp = fp;
4743
4744         /* allocate sp objs */
4745         bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
4746                               GFP_KERNEL);
4747         if (!bp->sp_objs)
4748                 goto alloc_err;
4749
4750         /* allocate fp_stats */
4751         bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
4752                                GFP_KERNEL);
4753         if (!bp->fp_stats)
4754                 goto alloc_err;
4755
4756         /* Allocate memory for the transmission queues array */
4757         txq_array_size =
4758                 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4759         BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4760
4761         bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
4762                                 GFP_KERNEL);
4763         if (!bp->bnx2x_txq)
4764                 goto alloc_err;
4765
4766         /* msix table */
4767         tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
4768         if (!tbl)
4769                 goto alloc_err;
4770         bp->msix_table = tbl;
4771
4772         /* ilt */
4773         ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4774         if (!ilt)
4775                 goto alloc_err;
4776         bp->ilt = ilt;
4777
4778         return 0;
4779 alloc_err:
4780         bnx2x_free_mem_bp(bp);
4781         return -ENOMEM;
4782 }
4783
4784 int bnx2x_reload_if_running(struct net_device *dev)
4785 {
4786         struct bnx2x *bp = netdev_priv(dev);
4787
4788         if (unlikely(!netif_running(dev)))
4789                 return 0;
4790
4791         bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
4792         return bnx2x_nic_load(bp, LOAD_NORMAL);
4793 }
4794
4795 int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4796 {
4797         u32 sel_phy_idx = 0;
4798         if (bp->link_params.num_phys <= 1)
4799                 return INT_PHY;
4800
4801         if (bp->link_vars.link_up) {
4802                 sel_phy_idx = EXT_PHY1;
4803                 /* In case link is SERDES, check if the EXT_PHY2 is the one */
4804                 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4805                     (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4806                         sel_phy_idx = EXT_PHY2;
4807         } else {
4808
4809                 switch (bnx2x_phy_selection(&bp->link_params)) {
4810                 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4811                 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4812                 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4813                        sel_phy_idx = EXT_PHY1;
4814                        break;
4815                 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4816                 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4817                        sel_phy_idx = EXT_PHY2;
4818                        break;
4819                 }
4820         }
4821
4822         return sel_phy_idx;
4823 }
4824 int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4825 {
4826         u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4827         /*
4828          * The selected activated PHY is always after swapping (in case PHY
4829          * swapping is enabled). So when swapping is enabled, we need to reverse
4830          * the configuration
4831          */
4832
4833         if (bp->link_params.multi_phy_config &
4834             PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4835                 if (sel_phy_idx == EXT_PHY1)
4836                         sel_phy_idx = EXT_PHY2;
4837                 else if (sel_phy_idx == EXT_PHY2)
4838                         sel_phy_idx = EXT_PHY1;
4839         }
4840         return LINK_CONFIG_IDX(sel_phy_idx);
4841 }
4842
4843 #ifdef NETDEV_FCOE_WWNN
4844 int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4845 {
4846         struct bnx2x *bp = netdev_priv(dev);
4847         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4848
4849         switch (type) {
4850         case NETDEV_FCOE_WWNN:
4851                 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4852                                 cp->fcoe_wwn_node_name_lo);
4853                 break;
4854         case NETDEV_FCOE_WWPN:
4855                 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4856                                 cp->fcoe_wwn_port_name_lo);
4857                 break;
4858         default:
4859                 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
4860                 return -EINVAL;
4861         }
4862
4863         return 0;
4864 }
4865 #endif
4866
4867 /* called with rtnl_lock */
4868 int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4869 {
4870         struct bnx2x *bp = netdev_priv(dev);
4871
4872         if (pci_num_vf(bp->pdev)) {
4873                 DP(BNX2X_MSG_IOV, "VFs are enabled, can not change MTU\n");
4874                 return -EPERM;
4875         }
4876
4877         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4878                 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
4879                 return -EAGAIN;
4880         }
4881
4882         /* This does not race with packet allocation
4883          * because the actual alloc size is
4884          * only updated as part of load
4885          */
4886         dev->mtu = new_mtu;
4887
4888         if (!bnx2x_mtu_allows_gro(new_mtu))
4889                 dev->features &= ~NETIF_F_GRO_HW;
4890
4891         if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
4892                 SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
4893
4894         return bnx2x_reload_if_running(dev);
4895 }
4896
4897 netdev_features_t bnx2x_fix_features(struct net_device *dev,
4898                                      netdev_features_t features)
4899 {
4900         struct bnx2x *bp = netdev_priv(dev);
4901
4902         if (pci_num_vf(bp->pdev)) {
4903                 netdev_features_t changed = dev->features ^ features;
4904
4905                 /* Revert the requested changes in features if they
4906                  * would require internal reload of PF in bnx2x_set_features().
4907                  */
4908                 if (!(features & NETIF_F_RXCSUM) && !bp->disable_tpa) {
4909                         features &= ~NETIF_F_RXCSUM;
4910                         features |= dev->features & NETIF_F_RXCSUM;
4911                 }
4912
4913                 if (changed & NETIF_F_LOOPBACK) {
4914                         features &= ~NETIF_F_LOOPBACK;
4915                         features |= dev->features & NETIF_F_LOOPBACK;
4916                 }
4917         }
4918
4919         /* TPA requires Rx CSUM offloading */
4920         if (!(features & NETIF_F_RXCSUM))
4921                 features &= ~NETIF_F_LRO;
4922
4923         if (!(features & NETIF_F_GRO) || !bnx2x_mtu_allows_gro(dev->mtu))
4924                 features &= ~NETIF_F_GRO_HW;
4925         if (features & NETIF_F_GRO_HW)
4926                 features &= ~NETIF_F_LRO;
4927
4928         return features;
4929 }
4930
4931 int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
4932 {
4933         struct bnx2x *bp = netdev_priv(dev);
4934         netdev_features_t changes = features ^ dev->features;
4935         bool bnx2x_reload = false;
4936         int rc;
4937
4938         /* VFs or non SRIOV PFs should be able to change loopback feature */
4939         if (!pci_num_vf(bp->pdev)) {
4940                 if (features & NETIF_F_LOOPBACK) {
4941                         if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4942                                 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4943                                 bnx2x_reload = true;
4944                         }
4945                 } else {
4946                         if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4947                                 bp->link_params.loopback_mode = LOOPBACK_NONE;
4948                                 bnx2x_reload = true;
4949                         }
4950                 }
4951         }
4952
4953         /* Don't care about GRO changes */
4954         changes &= ~NETIF_F_GRO;
4955
4956         if (changes)
4957                 bnx2x_reload = true;
4958
4959         if (bnx2x_reload) {
4960                 if (bp->recovery_state == BNX2X_RECOVERY_DONE) {
4961                         dev->features = features;
4962                         rc = bnx2x_reload_if_running(dev);
4963                         return rc ? rc : 1;
4964                 }
4965                 /* else: bnx2x_nic_load() will be called at end of recovery */
4966         }
4967
4968         return 0;
4969 }
4970
4971 void bnx2x_tx_timeout(struct net_device *dev)
4972 {
4973         struct bnx2x *bp = netdev_priv(dev);
4974
4975         /* We want the information of the dump logged,
4976          * but calling bnx2x_panic() would kill all chances of recovery.
4977          */
4978         if (!bp->panic)
4979 #ifndef BNX2X_STOP_ON_ERROR
4980                 bnx2x_panic_dump(bp, false);
4981 #else
4982                 bnx2x_panic();
4983 #endif
4984
4985         /* This allows the netif to be shutdown gracefully before resetting */
4986         bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_TIMEOUT, 0);
4987 }
4988
4989 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
4990 {
4991         struct net_device *dev = pci_get_drvdata(pdev);
4992         struct bnx2x *bp;
4993
4994         if (!dev) {
4995                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4996                 return -ENODEV;
4997         }
4998         bp = netdev_priv(dev);
4999
5000         rtnl_lock();
5001
5002         pci_save_state(pdev);
5003
5004         if (!netif_running(dev)) {
5005                 rtnl_unlock();
5006                 return 0;
5007         }
5008
5009         netif_device_detach(dev);
5010
5011         bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
5012
5013         bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
5014
5015         rtnl_unlock();
5016
5017         return 0;
5018 }
5019
5020 int bnx2x_resume(struct pci_dev *pdev)
5021 {
5022         struct net_device *dev = pci_get_drvdata(pdev);
5023         struct bnx2x *bp;
5024         int rc;
5025
5026         if (!dev) {
5027                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
5028                 return -ENODEV;
5029         }
5030         bp = netdev_priv(dev);
5031
5032         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
5033                 BNX2X_ERR("Handling parity error recovery. Try again later\n");
5034                 return -EAGAIN;
5035         }
5036
5037         rtnl_lock();
5038
5039         pci_restore_state(pdev);
5040
5041         if (!netif_running(dev)) {
5042                 rtnl_unlock();
5043                 return 0;
5044         }
5045
5046         bnx2x_set_power_state(bp, PCI_D0);
5047         netif_device_attach(dev);
5048
5049         rc = bnx2x_nic_load(bp, LOAD_OPEN);
5050
5051         rtnl_unlock();
5052
5053         return rc;
5054 }
5055
5056 void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
5057                               u32 cid)
5058 {
5059         if (!cxt) {
5060                 BNX2X_ERR("bad context pointer %p\n", cxt);
5061                 return;
5062         }
5063
5064         /* ustorm cxt validation */
5065         cxt->ustorm_ag_context.cdu_usage =
5066                 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
5067                         CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
5068         /* xcontext validation */
5069         cxt->xstorm_ag_context.cdu_reserved =
5070                 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
5071                         CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
5072 }
5073
5074 static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
5075                                     u8 fw_sb_id, u8 sb_index,
5076                                     u8 ticks)
5077 {
5078         u32 addr = BAR_CSTRORM_INTMEM +
5079                    CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
5080         REG_WR8(bp, addr, ticks);
5081         DP(NETIF_MSG_IFUP,
5082            "port %x fw_sb_id %d sb_index %d ticks %d\n",
5083            port, fw_sb_id, sb_index, ticks);
5084 }
5085
5086 static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
5087                                     u16 fw_sb_id, u8 sb_index,
5088                                     u8 disable)
5089 {
5090         u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
5091         u32 addr = BAR_CSTRORM_INTMEM +
5092                    CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
5093         u8 flags = REG_RD8(bp, addr);
5094         /* clear and set */
5095         flags &= ~HC_INDEX_DATA_HC_ENABLED;
5096         flags |= enable_flag;
5097         REG_WR8(bp, addr, flags);
5098         DP(NETIF_MSG_IFUP,
5099            "port %x fw_sb_id %d sb_index %d disable %d\n",
5100            port, fw_sb_id, sb_index, disable);
5101 }
5102
5103 void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
5104                                     u8 sb_index, u8 disable, u16 usec)
5105 {
5106         int port = BP_PORT(bp);
5107         u8 ticks = usec / BNX2X_BTR;
5108
5109         storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
5110
5111         disable = disable ? 1 : (usec ? 0 : 1);
5112         storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
5113 }
5114
5115 void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag,
5116                             u32 verbose)
5117 {
5118         smp_mb__before_atomic();
5119         set_bit(flag, &bp->sp_rtnl_state);
5120         smp_mb__after_atomic();
5121         DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n",
5122            flag);
5123         schedule_delayed_work(&bp->sp_rtnl_task, 0);
5124 }