1 /* bnx2x_cmn.c: QLogic Everest network driver.
3 * Copyright (c) 2007-2013 Broadcom Corporation
4 * Copyright (c) 2014 QLogic Corporation
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation.
11 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
12 * Written by: Eliezer Tamir
13 * Based on code from Michael Chan's bnx2 driver
14 * UDP CSUM errata workaround by Arik Gendelman
15 * Slowpath and fastpath rework by Vladislav Zolotarov
16 * Statistics and Link management by Yitchak Gertner
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22 #include <linux/etherdevice.h>
23 #include <linux/if_vlan.h>
24 #include <linux/interrupt.h>
26 #include <linux/crash_dump.h>
29 #include <net/ip6_checksum.h>
30 #include <linux/prefetch.h>
31 #include "bnx2x_cmn.h"
32 #include "bnx2x_init.h"
35 static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
36 static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
37 static int bnx2x_alloc_fp_mem(struct bnx2x *bp);
38 static int bnx2x_poll(struct napi_struct *napi, int budget);
40 static void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
44 /* Add NAPI objects */
45 for_each_rx_queue_cnic(bp, i) {
46 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
47 bnx2x_poll, NAPI_POLL_WEIGHT);
51 static void bnx2x_add_all_napi(struct bnx2x *bp)
55 /* Add NAPI objects */
56 for_each_eth_queue(bp, i) {
57 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
58 bnx2x_poll, NAPI_POLL_WEIGHT);
62 static int bnx2x_calc_num_queues(struct bnx2x *bp)
64 int nq = bnx2x_num_queues ? : netif_get_num_default_rss_queues();
66 /* Reduce memory usage in kdump environment by using only one queue */
67 if (is_kdump_kernel())
70 nq = clamp(nq, 1, BNX2X_MAX_QUEUES(bp));
75 * bnx2x_move_fp - move content of the fastpath structure.
78 * @from: source FP index
79 * @to: destination FP index
81 * Makes sure the contents of the bp->fp[to].napi is kept
82 * intact. This is done by first copying the napi struct from
83 * the target to the source, and then mem copying the entire
84 * source onto the target. Update txdata pointers and related
87 static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
89 struct bnx2x_fastpath *from_fp = &bp->fp[from];
90 struct bnx2x_fastpath *to_fp = &bp->fp[to];
91 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
92 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
93 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
94 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
95 int old_max_eth_txqs, new_max_eth_txqs;
96 int old_txdata_index = 0, new_txdata_index = 0;
97 struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info;
99 /* Copy the NAPI object as it has been already initialized */
100 from_fp->napi = to_fp->napi;
102 /* Move bnx2x_fastpath contents */
103 memcpy(to_fp, from_fp, sizeof(*to_fp));
106 /* Retain the tpa_info of the original `to' version as we don't want
107 * 2 FPs to contain the same tpa_info pointer.
109 to_fp->tpa_info = old_tpa_info;
111 /* move sp_objs contents as well, as their indices match fp ones */
112 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
114 /* move fp_stats contents as well, as their indices match fp ones */
115 memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
117 /* Update txdata pointers in fp and move txdata content accordingly:
118 * Each fp consumes 'max_cos' txdata structures, so the index should be
119 * decremented by max_cos x delta.
122 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
123 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
125 if (from == FCOE_IDX(bp)) {
126 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
127 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
130 memcpy(&bp->bnx2x_txq[new_txdata_index],
131 &bp->bnx2x_txq[old_txdata_index],
132 sizeof(struct bnx2x_fp_txdata));
133 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
137 * bnx2x_fill_fw_str - Fill buffer with FW version string.
140 * @buf: character buffer to fill with the fw name
141 * @buf_len: length of the above buffer
144 void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
147 u8 phy_fw_ver[PHY_FW_VER_LEN];
149 phy_fw_ver[0] = '\0';
150 bnx2x_get_ext_phy_fw_version(&bp->link_params,
151 phy_fw_ver, PHY_FW_VER_LEN);
152 strlcpy(buf, bp->fw_ver, buf_len);
153 snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
155 (bp->common.bc_ver & 0xff0000) >> 16,
156 (bp->common.bc_ver & 0xff00) >> 8,
157 (bp->common.bc_ver & 0xff),
158 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
160 bnx2x_vf_fill_fw_str(bp, buf, buf_len);
165 * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
168 * @delta: number of eth queues which were not allocated
170 static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
172 int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
174 /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
175 * backward along the array could cause memory to be overridden
177 for (cos = 1; cos < bp->max_cos; cos++) {
178 for (i = 0; i < old_eth_num - delta; i++) {
179 struct bnx2x_fastpath *fp = &bp->fp[i];
180 int new_idx = cos * (old_eth_num - delta) + i;
182 memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
183 sizeof(struct bnx2x_fp_txdata));
184 fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
189 int bnx2x_load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
191 /* free skb in the packet ring at pos idx
192 * return idx of last bd freed
194 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
195 u16 idx, unsigned int *pkts_compl,
196 unsigned int *bytes_compl)
198 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
199 struct eth_tx_start_bd *tx_start_bd;
200 struct eth_tx_bd *tx_data_bd;
201 struct sk_buff *skb = tx_buf->skb;
202 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
204 u16 split_bd_len = 0;
206 /* prefetch skb end pointer to speedup dev_kfree_skb() */
209 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
210 txdata->txq_index, idx, tx_buf, skb);
212 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
214 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
215 #ifdef BNX2X_STOP_ON_ERROR
216 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
217 BNX2X_ERR("BAD nbd!\n");
221 new_cons = nbd + tx_buf->first_bd;
223 /* Get the next bd */
224 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
226 /* Skip a parse bd... */
228 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
230 if (tx_buf->flags & BNX2X_HAS_SECOND_PBD) {
231 /* Skip second parse bd... */
233 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
236 /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
237 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
238 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
239 split_bd_len = BD_UNMAP_LEN(tx_data_bd);
241 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
245 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
246 BD_UNMAP_LEN(tx_start_bd) + split_bd_len,
252 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
253 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
254 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
256 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
263 (*bytes_compl) += skb->len;
264 dev_kfree_skb_any(skb);
267 tx_buf->first_bd = 0;
273 int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
275 struct netdev_queue *txq;
276 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
277 unsigned int pkts_compl = 0, bytes_compl = 0;
279 #ifdef BNX2X_STOP_ON_ERROR
280 if (unlikely(bp->panic))
284 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
285 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
286 sw_cons = txdata->tx_pkt_cons;
288 while (sw_cons != hw_cons) {
291 pkt_cons = TX_BD(sw_cons);
293 DP(NETIF_MSG_TX_DONE,
294 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
295 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
297 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
298 &pkts_compl, &bytes_compl);
303 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
305 txdata->tx_pkt_cons = sw_cons;
306 txdata->tx_bd_cons = bd_cons;
308 /* Need to make the tx_bd_cons update visible to start_xmit()
309 * before checking for netif_tx_queue_stopped(). Without the
310 * memory barrier, there is a small possibility that
311 * start_xmit() will miss it and cause the queue to be stopped
313 * On the other hand we need an rmb() here to ensure the proper
314 * ordering of bit testing in the following
315 * netif_tx_queue_stopped(txq) call.
319 if (unlikely(netif_tx_queue_stopped(txq))) {
320 /* Taking tx_lock() is needed to prevent re-enabling the queue
321 * while it's empty. This could have happen if rx_action() gets
322 * suspended in bnx2x_tx_int() after the condition before
323 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
325 * stops the queue->sees fresh tx_bd_cons->releases the queue->
326 * sends some packets consuming the whole queue again->
330 __netif_tx_lock(txq, smp_processor_id());
332 if ((netif_tx_queue_stopped(txq)) &&
333 (bp->state == BNX2X_STATE_OPEN) &&
334 (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
335 netif_tx_wake_queue(txq);
337 __netif_tx_unlock(txq);
342 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
345 u16 last_max = fp->last_max_sge;
347 if (SUB_S16(idx, last_max) > 0)
348 fp->last_max_sge = idx;
351 static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
353 struct eth_end_agg_rx_cqe *cqe)
355 struct bnx2x *bp = fp->bp;
356 u16 last_max, last_elem, first_elem;
363 /* First mark all used pages */
364 for (i = 0; i < sge_len; i++)
365 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
366 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
368 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
369 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
371 /* Here we assume that the last SGE index is the biggest */
372 prefetch((void *)(fp->sge_mask));
373 bnx2x_update_last_max_sge(fp,
374 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
376 last_max = RX_SGE(fp->last_max_sge);
377 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
378 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
380 /* If ring is not full */
381 if (last_elem + 1 != first_elem)
384 /* Now update the prod */
385 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
386 if (likely(fp->sge_mask[i]))
389 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
390 delta += BIT_VEC64_ELEM_SZ;
394 fp->rx_sge_prod += delta;
395 /* clear page-end entries */
396 bnx2x_clear_sge_mask_next_elems(fp);
399 DP(NETIF_MSG_RX_STATUS,
400 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
401 fp->last_max_sge, fp->rx_sge_prod);
404 /* Get Toeplitz hash value in the skb using the value from the
405 * CQE (calculated by HW).
407 static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
408 const struct eth_fast_path_rx_cqe *cqe,
409 enum pkt_hash_types *rxhash_type)
411 /* Get Toeplitz hash from CQE */
412 if ((bp->dev->features & NETIF_F_RXHASH) &&
413 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
414 enum eth_rss_hash_type htype;
416 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
417 *rxhash_type = ((htype == TCP_IPV4_HASH_TYPE) ||
418 (htype == TCP_IPV6_HASH_TYPE)) ?
419 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3;
421 return le32_to_cpu(cqe->rss_hash_result);
423 *rxhash_type = PKT_HASH_TYPE_NONE;
427 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
429 struct eth_fast_path_rx_cqe *cqe)
431 struct bnx2x *bp = fp->bp;
432 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
433 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
434 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
436 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
437 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
439 /* print error if current state != stop */
440 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
441 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
443 /* Try to map an empty data buffer from the aggregation info */
444 mapping = dma_map_single(&bp->pdev->dev,
445 first_buf->data + NET_SKB_PAD,
446 fp->rx_buf_size, DMA_FROM_DEVICE);
448 * ...if it fails - move the skb from the consumer to the producer
449 * and set the current aggregation state as ERROR to drop it
450 * when TPA_STOP arrives.
453 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
454 /* Move the BD from the consumer to the producer */
455 bnx2x_reuse_rx_data(fp, cons, prod);
456 tpa_info->tpa_state = BNX2X_TPA_ERROR;
460 /* move empty data from pool to prod */
461 prod_rx_buf->data = first_buf->data;
462 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
463 /* point prod_bd to new data */
464 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
465 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
467 /* move partial skb from cons to pool (don't unmap yet) */
468 *first_buf = *cons_rx_buf;
470 /* mark bin state as START */
471 tpa_info->parsing_flags =
472 le16_to_cpu(cqe->pars_flags.flags);
473 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
474 tpa_info->tpa_state = BNX2X_TPA_START;
475 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
476 tpa_info->placement_offset = cqe->placement_offset;
477 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->rxhash_type);
478 if (fp->mode == TPA_MODE_GRO) {
479 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
480 tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
481 tpa_info->gro_size = gro_size;
484 #ifdef BNX2X_STOP_ON_ERROR
485 fp->tpa_queue_used |= (1 << queue);
486 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
491 /* Timestamp option length allowed for TPA aggregation:
493 * nop nop kind length echo val
495 #define TPA_TSTAMP_OPT_LEN 12
497 * bnx2x_set_gro_params - compute GRO values
500 * @parsing_flags: parsing flags from the START CQE
501 * @len_on_bd: total length of the first packet for the
503 * @pkt_len: length of all segments
505 * Approximate value of the MSS for this aggregation calculated using
506 * the first packet of it.
507 * Compute number of aggregated segments, and gso_type.
509 static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
510 u16 len_on_bd, unsigned int pkt_len,
511 u16 num_of_coalesced_segs)
513 /* TPA aggregation won't have either IP options or TCP options
514 * other than timestamp or IPv6 extension headers.
516 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
518 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
519 PRS_FLAG_OVERETH_IPV6) {
520 hdrs_len += sizeof(struct ipv6hdr);
521 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
523 hdrs_len += sizeof(struct iphdr);
524 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
527 /* Check if there was a TCP timestamp, if there is it's will
528 * always be 12 bytes length: nop nop kind length echo val.
530 * Otherwise FW would close the aggregation.
532 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
533 hdrs_len += TPA_TSTAMP_OPT_LEN;
535 skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
537 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
538 * to skb_shinfo(skb)->gso_segs
540 NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
543 static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
544 u16 index, gfp_t gfp_mask)
546 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
547 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
548 struct bnx2x_alloc_pool *pool = &fp->page_pool;
552 pool->page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
553 if (unlikely(!pool->page))
559 mapping = dma_map_page(&bp->pdev->dev, pool->page,
560 pool->offset, SGE_PAGE_SIZE, DMA_FROM_DEVICE);
561 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
562 BNX2X_ERR("Can't map sge\n");
566 sw_buf->page = pool->page;
567 sw_buf->offset = pool->offset;
569 dma_unmap_addr_set(sw_buf, mapping, mapping);
571 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
572 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
574 pool->offset += SGE_PAGE_SIZE;
575 if (PAGE_SIZE - pool->offset >= SGE_PAGE_SIZE)
576 get_page(pool->page);
582 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
583 struct bnx2x_agg_info *tpa_info,
586 struct eth_end_agg_rx_cqe *cqe,
589 struct sw_rx_page *rx_pg, old_rx_pg;
590 u32 i, frag_len, frag_size;
591 int err, j, frag_id = 0;
592 u16 len_on_bd = tpa_info->len_on_bd;
593 u16 full_page = 0, gro_size = 0;
595 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
597 if (fp->mode == TPA_MODE_GRO) {
598 gro_size = tpa_info->gro_size;
599 full_page = tpa_info->full_page;
602 /* This is needed in order to enable forwarding support */
604 bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
605 le16_to_cpu(cqe->pkt_len),
606 le16_to_cpu(cqe->num_of_coalesced_segs));
608 #ifdef BNX2X_STOP_ON_ERROR
609 if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
610 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
612 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
618 /* Run through the SGL and compose the fragmented skb */
619 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
620 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
622 /* FW gives the indices of the SGE as if the ring is an array
623 (meaning that "next" element will consume 2 indices) */
624 if (fp->mode == TPA_MODE_GRO)
625 frag_len = min_t(u32, frag_size, (u32)full_page);
627 frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
629 rx_pg = &fp->rx_page_ring[sge_idx];
632 /* If we fail to allocate a substitute page, we simply stop
633 where we are and drop the whole packet */
634 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC);
636 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
640 dma_unmap_page(&bp->pdev->dev,
641 dma_unmap_addr(&old_rx_pg, mapping),
642 SGE_PAGE_SIZE, DMA_FROM_DEVICE);
643 /* Add one frag and update the appropriate fields in the skb */
644 if (fp->mode == TPA_MODE_LRO)
645 skb_fill_page_desc(skb, j, old_rx_pg.page,
646 old_rx_pg.offset, frag_len);
650 for (rem = frag_len; rem > 0; rem -= gro_size) {
651 int len = rem > gro_size ? gro_size : rem;
652 skb_fill_page_desc(skb, frag_id++,
654 old_rx_pg.offset + offset,
657 get_page(old_rx_pg.page);
662 skb->data_len += frag_len;
663 skb->truesize += SGE_PAGES;
664 skb->len += frag_len;
666 frag_size -= frag_len;
672 static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
674 if (fp->rx_frag_size)
680 static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask)
682 if (fp->rx_frag_size) {
683 /* GFP_KERNEL allocations are used only during initialization */
684 if (unlikely(gfpflags_allow_blocking(gfp_mask)))
685 return (void *)__get_free_page(gfp_mask);
687 return netdev_alloc_frag(fp->rx_frag_size);
690 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask);
694 static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
696 const struct iphdr *iph = ip_hdr(skb);
699 skb_set_transport_header(skb, sizeof(struct iphdr));
702 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
703 iph->saddr, iph->daddr, 0);
706 static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
708 struct ipv6hdr *iph = ipv6_hdr(skb);
711 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
714 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
715 &iph->saddr, &iph->daddr, 0);
718 static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
719 void (*gro_func)(struct bnx2x*, struct sk_buff*))
721 skb_reset_network_header(skb);
723 tcp_gro_complete(skb);
727 static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
731 if (skb_shinfo(skb)->gso_size) {
732 switch (be16_to_cpu(skb->protocol)) {
734 bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
737 bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
740 netdev_WARN_ONCE(bp->dev,
741 "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
742 be16_to_cpu(skb->protocol));
746 skb_record_rx_queue(skb, fp->rx_queue);
747 napi_gro_receive(&fp->napi, skb);
750 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
751 struct bnx2x_agg_info *tpa_info,
753 struct eth_end_agg_rx_cqe *cqe,
756 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
757 u8 pad = tpa_info->placement_offset;
758 u16 len = tpa_info->len_on_bd;
759 struct sk_buff *skb = NULL;
760 u8 *new_data, *data = rx_buf->data;
761 u8 old_tpa_state = tpa_info->tpa_state;
763 tpa_info->tpa_state = BNX2X_TPA_STOP;
765 /* If we there was an error during the handling of the TPA_START -
766 * drop this aggregation.
768 if (old_tpa_state == BNX2X_TPA_ERROR)
771 /* Try to allocate the new data */
772 new_data = bnx2x_frag_alloc(fp, GFP_ATOMIC);
773 /* Unmap skb in the pool anyway, as we are going to change
774 pool entry status to BNX2X_TPA_STOP even if new skb allocation
776 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
777 fp->rx_buf_size, DMA_FROM_DEVICE);
778 if (likely(new_data))
779 skb = build_skb(data, fp->rx_frag_size);
782 #ifdef BNX2X_STOP_ON_ERROR
783 if (pad + len > fp->rx_buf_size) {
784 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
785 pad, len, fp->rx_buf_size);
791 skb_reserve(skb, pad + NET_SKB_PAD);
793 skb_set_hash(skb, tpa_info->rxhash, tpa_info->rxhash_type);
795 skb->protocol = eth_type_trans(skb, bp->dev);
796 skb->ip_summed = CHECKSUM_UNNECESSARY;
798 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
799 skb, cqe, cqe_idx)) {
800 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
801 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
802 bnx2x_gro_receive(bp, fp, skb);
804 DP(NETIF_MSG_RX_STATUS,
805 "Failed to allocate new pages - dropping packet!\n");
806 dev_kfree_skb_any(skb);
809 /* put new data in bin */
810 rx_buf->data = new_data;
815 bnx2x_frag_free(fp, new_data);
817 /* drop the packet and keep the buffer in the bin */
818 DP(NETIF_MSG_RX_STATUS,
819 "Failed to allocate or map a new skb - dropping packet!\n");
820 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
823 static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp,
824 u16 index, gfp_t gfp_mask)
827 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
828 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
831 data = bnx2x_frag_alloc(fp, gfp_mask);
832 if (unlikely(data == NULL))
835 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
838 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
839 bnx2x_frag_free(fp, data);
840 BNX2X_ERR("Can't map rx data\n");
845 dma_unmap_addr_set(rx_buf, mapping, mapping);
847 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
848 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
854 void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
855 struct bnx2x_fastpath *fp,
856 struct bnx2x_eth_q_stats *qstats)
858 /* Do nothing if no L4 csum validation was done.
859 * We do not check whether IP csum was validated. For IPv4 we assume
860 * that if the card got as far as validating the L4 csum, it also
861 * validated the IP csum. IPv6 has no IP csum.
863 if (cqe->fast_path_cqe.status_flags &
864 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
867 /* If L4 validation was done, check if an error was found. */
869 if (cqe->fast_path_cqe.type_error_flags &
870 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
871 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
872 qstats->hw_csum_err++;
874 skb->ip_summed = CHECKSUM_UNNECESSARY;
877 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
879 struct bnx2x *bp = fp->bp;
880 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
881 u16 sw_comp_cons, sw_comp_prod;
883 union eth_rx_cqe *cqe;
884 struct eth_fast_path_rx_cqe *cqe_fp;
886 #ifdef BNX2X_STOP_ON_ERROR
887 if (unlikely(bp->panic))
893 bd_cons = fp->rx_bd_cons;
894 bd_prod = fp->rx_bd_prod;
895 bd_prod_fw = bd_prod;
896 sw_comp_cons = fp->rx_comp_cons;
897 sw_comp_prod = fp->rx_comp_prod;
899 comp_ring_cons = RCQ_BD(sw_comp_cons);
900 cqe = &fp->rx_comp_ring[comp_ring_cons];
901 cqe_fp = &cqe->fast_path_cqe;
903 DP(NETIF_MSG_RX_STATUS,
904 "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons);
906 while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) {
907 struct sw_rx_bd *rx_buf = NULL;
910 enum eth_rx_cqe_type cqe_fp_type;
914 enum pkt_hash_types rxhash_type;
916 #ifdef BNX2X_STOP_ON_ERROR
917 if (unlikely(bp->panic))
921 bd_prod = RX_BD(bd_prod);
922 bd_cons = RX_BD(bd_cons);
924 /* A rmb() is required to ensure that the CQE is not read
925 * before it is written by the adapter DMA. PCI ordering
926 * rules will make sure the other fields are written before
927 * the marker at the end of struct eth_fast_path_rx_cqe
928 * but without rmb() a weakly ordered processor can process
929 * stale data. Without the barrier TPA state-machine might
930 * enter inconsistent state and kernel stack might be
931 * provided with incorrect packet description - these lead
932 * to various kernel crashed.
936 cqe_fp_flags = cqe_fp->type_error_flags;
937 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
939 DP(NETIF_MSG_RX_STATUS,
940 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
941 CQE_TYPE(cqe_fp_flags),
942 cqe_fp_flags, cqe_fp->status_flags,
943 le32_to_cpu(cqe_fp->rss_hash_result),
944 le16_to_cpu(cqe_fp->vlan_tag),
945 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
947 /* is this a slowpath msg? */
948 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
949 bnx2x_sp_event(fp, cqe);
953 rx_buf = &fp->rx_buf_ring[bd_cons];
956 if (!CQE_TYPE_FAST(cqe_fp_type)) {
957 struct bnx2x_agg_info *tpa_info;
958 u16 frag_size, pages;
959 #ifdef BNX2X_STOP_ON_ERROR
961 if (fp->mode == TPA_MODE_DISABLED &&
962 (CQE_TYPE_START(cqe_fp_type) ||
963 CQE_TYPE_STOP(cqe_fp_type)))
964 BNX2X_ERR("START/STOP packet while TPA disabled, type %x\n",
965 CQE_TYPE(cqe_fp_type));
968 if (CQE_TYPE_START(cqe_fp_type)) {
969 u16 queue = cqe_fp->queue_index;
970 DP(NETIF_MSG_RX_STATUS,
971 "calling tpa_start on queue %d\n",
974 bnx2x_tpa_start(fp, queue,
980 queue = cqe->end_agg_cqe.queue_index;
981 tpa_info = &fp->tpa_info[queue];
982 DP(NETIF_MSG_RX_STATUS,
983 "calling tpa_stop on queue %d\n",
986 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
989 if (fp->mode == TPA_MODE_GRO)
990 pages = (frag_size + tpa_info->full_page - 1) /
993 pages = SGE_PAGE_ALIGN(frag_size) >>
996 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
997 &cqe->end_agg_cqe, comp_ring_cons);
998 #ifdef BNX2X_STOP_ON_ERROR
1003 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
1007 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
1008 pad = cqe_fp->placement_offset;
1009 dma_sync_single_for_cpu(&bp->pdev->dev,
1010 dma_unmap_addr(rx_buf, mapping),
1011 pad + RX_COPY_THRESH,
1014 prefetch(data + pad); /* speedup eth_type_trans() */
1015 /* is this an error packet? */
1016 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1017 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1018 "ERROR flags %x rx packet %u\n",
1019 cqe_fp_flags, sw_comp_cons);
1020 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
1024 /* Since we don't have a jumbo ring
1025 * copy small packets if mtu > 1500
1027 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1028 (len <= RX_COPY_THRESH)) {
1029 skb = napi_alloc_skb(&fp->napi, len);
1031 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1032 "ERROR packet dropped because of alloc failure\n");
1033 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
1036 memcpy(skb->data, data + pad, len);
1037 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1039 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod,
1040 GFP_ATOMIC) == 0)) {
1041 dma_unmap_single(&bp->pdev->dev,
1042 dma_unmap_addr(rx_buf, mapping),
1045 skb = build_skb(data, fp->rx_frag_size);
1046 if (unlikely(!skb)) {
1047 bnx2x_frag_free(fp, data);
1048 bnx2x_fp_qstats(bp, fp)->
1049 rx_skb_alloc_failed++;
1052 skb_reserve(skb, pad);
1054 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1055 "ERROR packet dropped because of alloc failure\n");
1056 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
1058 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1064 skb->protocol = eth_type_trans(skb, bp->dev);
1066 /* Set Toeplitz hash for a none-LRO skb */
1067 rxhash = bnx2x_get_rxhash(bp, cqe_fp, &rxhash_type);
1068 skb_set_hash(skb, rxhash, rxhash_type);
1070 skb_checksum_none_assert(skb);
1072 if (bp->dev->features & NETIF_F_RXCSUM)
1073 bnx2x_csum_validate(skb, cqe, fp,
1074 bnx2x_fp_qstats(bp, fp));
1076 skb_record_rx_queue(skb, fp->rx_queue);
1078 /* Check if this packet was timestamped */
1079 if (unlikely(cqe->fast_path_cqe.type_error_flags &
1080 (1 << ETH_FAST_PATH_RX_CQE_PTP_PKT_SHIFT)))
1081 bnx2x_set_rx_ts(bp, skb);
1083 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
1085 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1086 le16_to_cpu(cqe_fp->vlan_tag));
1088 napi_gro_receive(&fp->napi, skb);
1090 rx_buf->data = NULL;
1092 bd_cons = NEXT_RX_IDX(bd_cons);
1093 bd_prod = NEXT_RX_IDX(bd_prod);
1094 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1097 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1098 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1100 /* mark CQE as free */
1101 BNX2X_SEED_CQE(cqe_fp);
1103 if (rx_pkt == budget)
1106 comp_ring_cons = RCQ_BD(sw_comp_cons);
1107 cqe = &fp->rx_comp_ring[comp_ring_cons];
1108 cqe_fp = &cqe->fast_path_cqe;
1111 fp->rx_bd_cons = bd_cons;
1112 fp->rx_bd_prod = bd_prod_fw;
1113 fp->rx_comp_cons = sw_comp_cons;
1114 fp->rx_comp_prod = sw_comp_prod;
1116 /* Update producers */
1117 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1123 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1125 struct bnx2x_fastpath *fp = fp_cookie;
1126 struct bnx2x *bp = fp->bp;
1130 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
1131 fp->index, fp->fw_sb_id, fp->igu_sb_id);
1133 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1135 #ifdef BNX2X_STOP_ON_ERROR
1136 if (unlikely(bp->panic))
1140 /* Handle Rx and Tx according to MSI-X vector */
1141 for_each_cos_in_tx_queue(fp, cos)
1142 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
1144 prefetch(&fp->sb_running_index[SM_RX_ID]);
1145 napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi));
1150 /* HW Lock for shared dual port PHYs */
1151 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1153 mutex_lock(&bp->port.phy_mutex);
1155 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1158 void bnx2x_release_phy_lock(struct bnx2x *bp)
1160 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1162 mutex_unlock(&bp->port.phy_mutex);
1165 /* calculates MF speed according to current linespeed and MF configuration */
1166 u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1168 u16 line_speed = bp->link_vars.line_speed;
1170 u16 maxCfg = bnx2x_extract_max_cfg(bp,
1171 bp->mf_config[BP_VN(bp)]);
1173 /* Calculate the current MAX line speed limit for the MF
1176 if (IS_MF_PERCENT_BW(bp))
1177 line_speed = (line_speed * maxCfg) / 100;
1178 else { /* SD mode */
1179 u16 vn_max_rate = maxCfg * 100;
1181 if (vn_max_rate < line_speed)
1182 line_speed = vn_max_rate;
1190 * bnx2x_fill_report_data - fill link report data to report
1192 * @bp: driver handle
1193 * @data: link state to update
1195 * It uses a none-atomic bit operations because is called under the mutex.
1197 static void bnx2x_fill_report_data(struct bnx2x *bp,
1198 struct bnx2x_link_report_data *data)
1200 memset(data, 0, sizeof(*data));
1203 /* Fill the report data: effective line speed */
1204 data->line_speed = bnx2x_get_mf_speed(bp);
1207 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1208 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1209 &data->link_report_flags);
1211 if (!BNX2X_NUM_ETH_QUEUES(bp))
1212 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1213 &data->link_report_flags);
1216 if (bp->link_vars.duplex == DUPLEX_FULL)
1217 __set_bit(BNX2X_LINK_REPORT_FD,
1218 &data->link_report_flags);
1220 /* Rx Flow Control is ON */
1221 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1222 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1223 &data->link_report_flags);
1225 /* Tx Flow Control is ON */
1226 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1227 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1228 &data->link_report_flags);
1230 *data = bp->vf_link_vars;
1235 * bnx2x_link_report - report link status to OS.
1237 * @bp: driver handle
1239 * Calls the __bnx2x_link_report() under the same locking scheme
1240 * as a link/PHY state managing code to ensure a consistent link
1244 void bnx2x_link_report(struct bnx2x *bp)
1246 bnx2x_acquire_phy_lock(bp);
1247 __bnx2x_link_report(bp);
1248 bnx2x_release_phy_lock(bp);
1252 * __bnx2x_link_report - report link status to OS.
1254 * @bp: driver handle
1256 * None atomic implementation.
1257 * Should be called under the phy_lock.
1259 void __bnx2x_link_report(struct bnx2x *bp)
1261 struct bnx2x_link_report_data cur_data;
1263 if (bp->force_link_down) {
1264 bp->link_vars.link_up = 0;
1269 if (IS_PF(bp) && !CHIP_IS_E1(bp))
1270 bnx2x_read_mf_cfg(bp);
1272 /* Read the current link report info */
1273 bnx2x_fill_report_data(bp, &cur_data);
1275 /* Don't report link down or exactly the same link status twice */
1276 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1277 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1278 &bp->last_reported_link.link_report_flags) &&
1279 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1280 &cur_data.link_report_flags)))
1285 /* We are going to report a new link parameters now -
1286 * remember the current data for the next time.
1288 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1290 /* propagate status to VFs */
1292 bnx2x_iov_link_update(bp);
1294 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1295 &cur_data.link_report_flags)) {
1296 netif_carrier_off(bp->dev);
1297 netdev_err(bp->dev, "NIC Link is Down\n");
1303 netif_carrier_on(bp->dev);
1305 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1306 &cur_data.link_report_flags))
1311 /* Handle the FC at the end so that only these flags would be
1312 * possibly set. This way we may easily check if there is no FC
1315 if (cur_data.link_report_flags) {
1316 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1317 &cur_data.link_report_flags)) {
1318 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1319 &cur_data.link_report_flags))
1320 flow = "ON - receive & transmit";
1322 flow = "ON - receive";
1324 flow = "ON - transmit";
1329 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1330 cur_data.line_speed, duplex, flow);
1334 static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1338 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1339 struct eth_rx_sge *sge;
1341 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1343 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1344 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1347 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1348 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1352 static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1353 struct bnx2x_fastpath *fp, int last)
1357 for (i = 0; i < last; i++) {
1358 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1359 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1360 u8 *data = first_buf->data;
1363 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1366 if (tpa_info->tpa_state == BNX2X_TPA_START)
1367 dma_unmap_single(&bp->pdev->dev,
1368 dma_unmap_addr(first_buf, mapping),
1369 fp->rx_buf_size, DMA_FROM_DEVICE);
1370 bnx2x_frag_free(fp, data);
1371 first_buf->data = NULL;
1375 void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1379 for_each_rx_queue_cnic(bp, j) {
1380 struct bnx2x_fastpath *fp = &bp->fp[j];
1384 /* Activate BD ring */
1386 * this will generate an interrupt (to the TSTORM)
1387 * must only be done after chip is initialized
1389 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1394 void bnx2x_init_rx_rings(struct bnx2x *bp)
1396 int func = BP_FUNC(bp);
1400 /* Allocate TPA resources */
1401 for_each_eth_queue(bp, j) {
1402 struct bnx2x_fastpath *fp = &bp->fp[j];
1405 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1407 if (fp->mode != TPA_MODE_DISABLED) {
1408 /* Fill the per-aggregation pool */
1409 for (i = 0; i < MAX_AGG_QS(bp); i++) {
1410 struct bnx2x_agg_info *tpa_info =
1412 struct sw_rx_bd *first_buf =
1413 &tpa_info->first_buf;
1416 bnx2x_frag_alloc(fp, GFP_KERNEL);
1417 if (!first_buf->data) {
1418 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1420 bnx2x_free_tpa_pool(bp, fp, i);
1421 fp->mode = TPA_MODE_DISABLED;
1424 dma_unmap_addr_set(first_buf, mapping, 0);
1425 tpa_info->tpa_state = BNX2X_TPA_STOP;
1428 /* "next page" elements initialization */
1429 bnx2x_set_next_page_sgl(fp);
1431 /* set SGEs bit mask */
1432 bnx2x_init_sge_ring_bit_mask(fp);
1434 /* Allocate SGEs and initialize the ring elements */
1435 for (i = 0, ring_prod = 0;
1436 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1438 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod,
1440 BNX2X_ERR("was only able to allocate %d rx sges\n",
1442 BNX2X_ERR("disabling TPA for queue[%d]\n",
1444 /* Cleanup already allocated elements */
1445 bnx2x_free_rx_sge_range(bp, fp,
1447 bnx2x_free_tpa_pool(bp, fp,
1449 fp->mode = TPA_MODE_DISABLED;
1453 ring_prod = NEXT_SGE_IDX(ring_prod);
1456 fp->rx_sge_prod = ring_prod;
1460 for_each_eth_queue(bp, j) {
1461 struct bnx2x_fastpath *fp = &bp->fp[j];
1465 /* Activate BD ring */
1467 * this will generate an interrupt (to the TSTORM)
1468 * must only be done after chip is initialized
1470 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1476 if (CHIP_IS_E1(bp)) {
1477 REG_WR(bp, BAR_USTRORM_INTMEM +
1478 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1479 U64_LO(fp->rx_comp_mapping));
1480 REG_WR(bp, BAR_USTRORM_INTMEM +
1481 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1482 U64_HI(fp->rx_comp_mapping));
1487 static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
1490 struct bnx2x *bp = fp->bp;
1492 for_each_cos_in_tx_queue(fp, cos) {
1493 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1494 unsigned pkts_compl = 0, bytes_compl = 0;
1496 u16 sw_prod = txdata->tx_pkt_prod;
1497 u16 sw_cons = txdata->tx_pkt_cons;
1499 while (sw_cons != sw_prod) {
1500 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1501 &pkts_compl, &bytes_compl);
1505 netdev_tx_reset_queue(
1506 netdev_get_tx_queue(bp->dev,
1507 txdata->txq_index));
1511 static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1515 for_each_tx_queue_cnic(bp, i) {
1516 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1520 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1524 for_each_eth_queue(bp, i) {
1525 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1529 static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1531 struct bnx2x *bp = fp->bp;
1534 /* ring wasn't allocated */
1535 if (fp->rx_buf_ring == NULL)
1538 for (i = 0; i < NUM_RX_BD; i++) {
1539 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1540 u8 *data = rx_buf->data;
1544 dma_unmap_single(&bp->pdev->dev,
1545 dma_unmap_addr(rx_buf, mapping),
1546 fp->rx_buf_size, DMA_FROM_DEVICE);
1548 rx_buf->data = NULL;
1549 bnx2x_frag_free(fp, data);
1553 static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1557 for_each_rx_queue_cnic(bp, j) {
1558 bnx2x_free_rx_bds(&bp->fp[j]);
1562 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1566 for_each_eth_queue(bp, j) {
1567 struct bnx2x_fastpath *fp = &bp->fp[j];
1569 bnx2x_free_rx_bds(fp);
1571 if (fp->mode != TPA_MODE_DISABLED)
1572 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
1576 static void bnx2x_free_skbs_cnic(struct bnx2x *bp)
1578 bnx2x_free_tx_skbs_cnic(bp);
1579 bnx2x_free_rx_skbs_cnic(bp);
1582 void bnx2x_free_skbs(struct bnx2x *bp)
1584 bnx2x_free_tx_skbs(bp);
1585 bnx2x_free_rx_skbs(bp);
1588 void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1590 /* load old values */
1591 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1593 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1594 /* leave all but MAX value */
1595 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1597 /* set new MAX value */
1598 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1599 & FUNC_MF_CFG_MAX_BW_MASK;
1601 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1606 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1608 * @bp: driver handle
1609 * @nvecs: number of vectors to be released
1611 static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1615 if (nvecs == offset)
1618 /* VFs don't have a default SB */
1620 free_irq(bp->msix_table[offset].vector, bp->dev);
1621 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1622 bp->msix_table[offset].vector);
1626 if (CNIC_SUPPORT(bp)) {
1627 if (nvecs == offset)
1632 for_each_eth_queue(bp, i) {
1633 if (nvecs == offset)
1635 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1636 i, bp->msix_table[offset].vector);
1638 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
1642 void bnx2x_free_irq(struct bnx2x *bp)
1644 if (bp->flags & USING_MSIX_FLAG &&
1645 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1646 int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1648 /* vfs don't have a default status block */
1652 bnx2x_free_msix_irqs(bp, nvecs);
1654 free_irq(bp->dev->irq, bp->dev);
1658 int bnx2x_enable_msix(struct bnx2x *bp)
1660 int msix_vec = 0, i, rc;
1662 /* VFs don't have a default status block */
1664 bp->msix_table[msix_vec].entry = msix_vec;
1665 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1666 bp->msix_table[0].entry);
1670 /* Cnic requires an msix vector for itself */
1671 if (CNIC_SUPPORT(bp)) {
1672 bp->msix_table[msix_vec].entry = msix_vec;
1673 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1674 msix_vec, bp->msix_table[msix_vec].entry);
1678 /* We need separate vectors for ETH queues only (not FCoE) */
1679 for_each_eth_queue(bp, i) {
1680 bp->msix_table[msix_vec].entry = msix_vec;
1681 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1682 msix_vec, msix_vec, i);
1686 DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1689 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0],
1690 BNX2X_MIN_MSIX_VEC_CNT(bp), msix_vec);
1692 * reconfigure number of tx/rx queues according to available
1695 if (rc == -ENOSPC) {
1696 /* Get by with single vector */
1697 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], 1, 1);
1699 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1704 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1705 bp->flags |= USING_SINGLE_MSIX_FLAG;
1707 BNX2X_DEV_INFO("set number of queues to 1\n");
1708 bp->num_ethernet_queues = 1;
1709 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1710 } else if (rc < 0) {
1711 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1713 } else if (rc < msix_vec) {
1714 /* how less vectors we will have? */
1715 int diff = msix_vec - rc;
1717 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1720 * decrease number of queues by number of unallocated entries
1722 bp->num_ethernet_queues -= diff;
1723 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1725 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1729 bp->flags |= USING_MSIX_FLAG;
1734 /* fall to INTx if not enough memory */
1736 bp->flags |= DISABLE_MSI_FLAG;
1741 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1743 int i, rc, offset = 0;
1745 /* no default status block for vf */
1747 rc = request_irq(bp->msix_table[offset++].vector,
1748 bnx2x_msix_sp_int, 0,
1749 bp->dev->name, bp->dev);
1751 BNX2X_ERR("request sp irq failed\n");
1756 if (CNIC_SUPPORT(bp))
1759 for_each_eth_queue(bp, i) {
1760 struct bnx2x_fastpath *fp = &bp->fp[i];
1761 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1764 rc = request_irq(bp->msix_table[offset].vector,
1765 bnx2x_msix_fp_int, 0, fp->name, fp);
1767 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1768 bp->msix_table[offset].vector, rc);
1769 bnx2x_free_msix_irqs(bp, offset);
1776 i = BNX2X_NUM_ETH_QUEUES(bp);
1778 offset = 1 + CNIC_SUPPORT(bp);
1779 netdev_info(bp->dev,
1780 "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1781 bp->msix_table[0].vector,
1782 0, bp->msix_table[offset].vector,
1783 i - 1, bp->msix_table[offset + i - 1].vector);
1785 offset = CNIC_SUPPORT(bp);
1786 netdev_info(bp->dev,
1787 "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n",
1788 0, bp->msix_table[offset].vector,
1789 i - 1, bp->msix_table[offset + i - 1].vector);
1794 int bnx2x_enable_msi(struct bnx2x *bp)
1798 rc = pci_enable_msi(bp->pdev);
1800 BNX2X_DEV_INFO("MSI is not attainable\n");
1803 bp->flags |= USING_MSI_FLAG;
1808 static int bnx2x_req_irq(struct bnx2x *bp)
1810 unsigned long flags;
1813 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
1816 flags = IRQF_SHARED;
1818 if (bp->flags & USING_MSIX_FLAG)
1819 irq = bp->msix_table[0].vector;
1821 irq = bp->pdev->irq;
1823 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
1826 static int bnx2x_setup_irqs(struct bnx2x *bp)
1829 if (bp->flags & USING_MSIX_FLAG &&
1830 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1831 rc = bnx2x_req_msix_irqs(bp);
1835 rc = bnx2x_req_irq(bp);
1837 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1840 if (bp->flags & USING_MSI_FLAG) {
1841 bp->dev->irq = bp->pdev->irq;
1842 netdev_info(bp->dev, "using MSI IRQ %d\n",
1845 if (bp->flags & USING_MSIX_FLAG) {
1846 bp->dev->irq = bp->msix_table[0].vector;
1847 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1855 static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1859 for_each_rx_queue_cnic(bp, i) {
1860 napi_enable(&bnx2x_fp(bp, i, napi));
1864 static void bnx2x_napi_enable(struct bnx2x *bp)
1868 for_each_eth_queue(bp, i) {
1869 napi_enable(&bnx2x_fp(bp, i, napi));
1873 static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1877 for_each_rx_queue_cnic(bp, i) {
1878 napi_disable(&bnx2x_fp(bp, i, napi));
1882 static void bnx2x_napi_disable(struct bnx2x *bp)
1886 for_each_eth_queue(bp, i) {
1887 napi_disable(&bnx2x_fp(bp, i, napi));
1891 void bnx2x_netif_start(struct bnx2x *bp)
1893 if (netif_running(bp->dev)) {
1894 bnx2x_napi_enable(bp);
1895 if (CNIC_LOADED(bp))
1896 bnx2x_napi_enable_cnic(bp);
1897 bnx2x_int_enable(bp);
1898 if (bp->state == BNX2X_STATE_OPEN)
1899 netif_tx_wake_all_queues(bp->dev);
1903 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1905 bnx2x_int_disable_sync(bp, disable_hw);
1906 bnx2x_napi_disable(bp);
1907 if (CNIC_LOADED(bp))
1908 bnx2x_napi_disable_cnic(bp);
1911 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
1912 struct net_device *sb_dev,
1913 select_queue_fallback_t fallback)
1915 struct bnx2x *bp = netdev_priv(dev);
1917 if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
1918 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1919 u16 ether_type = ntohs(hdr->h_proto);
1921 /* Skip VLAN tag if present */
1922 if (ether_type == ETH_P_8021Q) {
1923 struct vlan_ethhdr *vhdr =
1924 (struct vlan_ethhdr *)skb->data;
1926 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1929 /* If ethertype is FCoE or FIP - use FCoE ring */
1930 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1931 return bnx2x_fcoe_tx(bp, txq_index);
1934 /* select a non-FCoE queue */
1935 return fallback(dev, skb, NULL) %
1936 (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos);
1939 void bnx2x_set_num_queues(struct bnx2x *bp)
1942 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
1944 /* override in STORAGE SD modes */
1945 if (IS_MF_STORAGE_ONLY(bp))
1946 bp->num_ethernet_queues = 1;
1948 /* Add special queues */
1949 bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1950 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1952 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
1956 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1958 * @bp: Driver handle
1960 * We currently support for at most 16 Tx queues for each CoS thus we will
1961 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1964 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1965 * index after all ETH L2 indices.
1967 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1968 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1969 * 16..31,...) with indices that are not coupled with any real Tx queue.
1971 * The proper configuration of skb->queue_mapping is handled by
1972 * bnx2x_select_queue() and __skb_tx_hash().
1974 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1975 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1977 static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
1981 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
1982 rx = BNX2X_NUM_ETH_QUEUES(bp);
1984 /* account for fcoe queue */
1985 if (include_cnic && !NO_FCOE(bp)) {
1990 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1992 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1995 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1997 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
2001 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
2007 static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
2011 for_each_queue(bp, i) {
2012 struct bnx2x_fastpath *fp = &bp->fp[i];
2015 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
2018 * Although there are no IP frames expected to arrive to
2019 * this ring we still want to add an
2020 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
2023 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
2026 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
2027 IP_HEADER_ALIGNMENT_PADDING +
2030 BNX2X_FW_RX_ALIGN_END;
2031 fp->rx_buf_size = SKB_DATA_ALIGN(fp->rx_buf_size);
2032 /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
2033 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
2034 fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
2036 fp->rx_frag_size = 0;
2040 static int bnx2x_init_rss(struct bnx2x *bp)
2043 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
2045 /* Prepare the initial contents for the indirection table if RSS is
2048 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
2049 bp->rss_conf_obj.ind_table[i] =
2051 ethtool_rxfh_indir_default(i, num_eth_queues);
2054 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
2055 * per-port, so if explicit configuration is needed , do it only
2058 * For 57712 and newer on the other hand it's a per-function
2061 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
2064 int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
2065 bool config_hash, bool enable)
2067 struct bnx2x_config_rss_params params = {NULL};
2069 /* Although RSS is meaningless when there is a single HW queue we
2070 * still need it enabled in order to have HW Rx hash generated.
2072 * if (!is_eth_multi(bp))
2073 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
2076 params.rss_obj = rss_obj;
2078 __set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags);
2081 __set_bit(BNX2X_RSS_MODE_REGULAR, ¶ms.rss_flags);
2083 /* RSS configuration */
2084 __set_bit(BNX2X_RSS_IPV4, ¶ms.rss_flags);
2085 __set_bit(BNX2X_RSS_IPV4_TCP, ¶ms.rss_flags);
2086 __set_bit(BNX2X_RSS_IPV6, ¶ms.rss_flags);
2087 __set_bit(BNX2X_RSS_IPV6_TCP, ¶ms.rss_flags);
2088 if (rss_obj->udp_rss_v4)
2089 __set_bit(BNX2X_RSS_IPV4_UDP, ¶ms.rss_flags);
2090 if (rss_obj->udp_rss_v6)
2091 __set_bit(BNX2X_RSS_IPV6_UDP, ¶ms.rss_flags);
2093 if (!CHIP_IS_E1x(bp)) {
2094 /* valid only for TUNN_MODE_VXLAN tunnel mode */
2095 __set_bit(BNX2X_RSS_IPV4_VXLAN, ¶ms.rss_flags);
2096 __set_bit(BNX2X_RSS_IPV6_VXLAN, ¶ms.rss_flags);
2098 /* valid only for TUNN_MODE_GRE tunnel mode */
2099 __set_bit(BNX2X_RSS_TUNN_INNER_HDRS, ¶ms.rss_flags);
2102 __set_bit(BNX2X_RSS_MODE_DISABLED, ¶ms.rss_flags);
2106 params.rss_result_mask = MULTI_MASK;
2108 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
2112 netdev_rss_key_fill(params.rss_key, T_ETH_RSS_KEY * 4);
2113 __set_bit(BNX2X_RSS_SET_SRCH, ¶ms.rss_flags);
2117 return bnx2x_config_rss(bp, ¶ms);
2119 return bnx2x_vfpf_config_rss(bp, ¶ms);
2122 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
2124 struct bnx2x_func_state_params func_params = {NULL};
2126 /* Prepare parameters for function state transitions */
2127 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2129 func_params.f_obj = &bp->func_obj;
2130 func_params.cmd = BNX2X_F_CMD_HW_INIT;
2132 func_params.params.hw_init.load_phase = load_code;
2134 return bnx2x_func_state_change(bp, &func_params);
2138 * Cleans the object that have internal lists without sending
2139 * ramrods. Should be run when interrupts are disabled.
2141 void bnx2x_squeeze_objects(struct bnx2x *bp)
2144 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
2145 struct bnx2x_mcast_ramrod_params rparam = {NULL};
2146 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
2148 /***************** Cleanup MACs' object first *************************/
2150 /* Wait for completion of requested */
2151 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2152 /* Perform a dry cleanup */
2153 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
2155 /* Clean ETH primary MAC */
2156 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
2157 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
2160 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
2162 /* Cleanup UC list */
2164 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
2165 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
2168 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
2170 /***************** Now clean mcast object *****************************/
2171 rparam.mcast_obj = &bp->mcast_obj;
2172 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2174 /* Add a DEL command... - Since we're doing a driver cleanup only,
2175 * we take a lock surrounding both the initial send and the CONTs,
2176 * as we don't want a true completion to disrupt us in the middle.
2178 netif_addr_lock_bh(bp->dev);
2179 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2181 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2184 /* ...and wait until all pending commands are cleared */
2185 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2188 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2190 netif_addr_unlock_bh(bp->dev);
2194 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2196 netif_addr_unlock_bh(bp->dev);
2199 #ifndef BNX2X_STOP_ON_ERROR
2200 #define LOAD_ERROR_EXIT(bp, label) \
2202 (bp)->state = BNX2X_STATE_ERROR; \
2206 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2208 bp->cnic_loaded = false; \
2211 #else /*BNX2X_STOP_ON_ERROR*/
2212 #define LOAD_ERROR_EXIT(bp, label) \
2214 (bp)->state = BNX2X_STATE_ERROR; \
2218 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2220 bp->cnic_loaded = false; \
2224 #endif /*BNX2X_STOP_ON_ERROR*/
2226 static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
2228 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2229 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2233 static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
2235 int num_groups, vf_headroom = 0;
2236 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
2238 /* number of queues for statistics is number of eth queues + FCoE */
2239 u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
2241 /* Total number of FW statistics requests =
2242 * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2243 * and fcoe l2 queue) stats + num of queues (which includes another 1
2244 * for fcoe l2 queue if applicable)
2246 bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
2248 /* vf stats appear in the request list, but their data is allocated by
2249 * the VFs themselves. We don't include them in the bp->fw_stats_num as
2250 * it is used to determine where to place the vf stats queries in the
2254 vf_headroom = bnx2x_vf_headroom(bp);
2256 /* Request is built from stats_query_header and an array of
2257 * stats_query_cmd_group each of which contains
2258 * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2259 * configured in the stats_query_header.
2262 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2263 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
2266 DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2267 bp->fw_stats_num, vf_headroom, num_groups);
2268 bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2269 num_groups * sizeof(struct stats_query_cmd_group);
2271 /* Data for statistics requests + stats_counter
2272 * stats_counter holds per-STORM counters that are incremented
2273 * when STORM has finished with the current request.
2274 * memory for FCoE offloaded statistics are counted anyway,
2275 * even if they will not be sent.
2276 * VF stats are not accounted for here as the data of VF stats is stored
2277 * in memory allocated by the VF, not here.
2279 bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2280 sizeof(struct per_pf_stats) +
2281 sizeof(struct fcoe_statistics_params) +
2282 sizeof(struct per_queue_stats) * num_queue_stats +
2283 sizeof(struct stats_counter);
2285 bp->fw_stats = BNX2X_PCI_ALLOC(&bp->fw_stats_mapping,
2286 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2291 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2292 bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2293 bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2294 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2295 bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2296 bp->fw_stats_req_sz;
2298 DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n",
2299 U64_HI(bp->fw_stats_req_mapping),
2300 U64_LO(bp->fw_stats_req_mapping));
2301 DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n",
2302 U64_HI(bp->fw_stats_data_mapping),
2303 U64_LO(bp->fw_stats_data_mapping));
2307 bnx2x_free_fw_stats_mem(bp);
2308 BNX2X_ERR("Can't allocate FW stats memory\n");
2312 /* send load request to mcp and analyze response */
2313 static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2319 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2320 DRV_MSG_SEQ_NUMBER_MASK);
2321 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2323 /* Get current FW pulse sequence */
2324 bp->fw_drv_pulse_wr_seq =
2325 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2326 DRV_PULSE_SEQ_MASK);
2327 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2329 param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA;
2331 if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp))
2332 param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA;
2335 (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param);
2337 /* if mcp fails to respond we must abort */
2338 if (!(*load_code)) {
2339 BNX2X_ERR("MCP response failure, aborting\n");
2343 /* If mcp refused (e.g. other port is in diagnostic mode) we
2346 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2347 BNX2X_ERR("MCP refused load request, aborting\n");
2353 /* check whether another PF has already loaded FW to chip. In
2354 * virtualized environments a pf from another VM may have already
2355 * initialized the device including loading FW
2357 int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err)
2359 /* is another pf loaded on this engine? */
2360 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2361 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2362 /* build my FW version dword */
2363 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
2364 (BCM_5710_FW_MINOR_VERSION << 8) +
2365 (BCM_5710_FW_REVISION_VERSION << 16) +
2366 (BCM_5710_FW_ENGINEERING_VERSION << 24);
2368 /* read loaded FW from chip */
2369 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2371 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
2374 /* abort nic load if version mismatch */
2375 if (my_fw != loaded_fw) {
2377 BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
2380 BNX2X_DEV_INFO("bnx2x with FW %x was already loaded which mismatches my %x FW, possibly due to MF UNDI\n",
2388 /* returns the "mcp load_code" according to global load_count array */
2389 static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2391 int path = BP_PATH(bp);
2393 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
2394 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2395 bnx2x_load_count[path][2]);
2396 bnx2x_load_count[path][0]++;
2397 bnx2x_load_count[path][1 + port]++;
2398 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
2399 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2400 bnx2x_load_count[path][2]);
2401 if (bnx2x_load_count[path][0] == 1)
2402 return FW_MSG_CODE_DRV_LOAD_COMMON;
2403 else if (bnx2x_load_count[path][1 + port] == 1)
2404 return FW_MSG_CODE_DRV_LOAD_PORT;
2406 return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2409 /* mark PMF if applicable */
2410 static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2412 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2413 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2414 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2416 /* We need the barrier to ensure the ordering between the
2417 * writing to bp->port.pmf here and reading it from the
2418 * bnx2x_periodic_task().
2425 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2428 static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2430 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2431 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2432 (bp->common.shmem2_base)) {
2433 if (SHMEM2_HAS(bp, dcc_support))
2434 SHMEM2_WR(bp, dcc_support,
2435 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2436 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2437 if (SHMEM2_HAS(bp, afex_driver_support))
2438 SHMEM2_WR(bp, afex_driver_support,
2439 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2442 /* Set AFEX default VLAN tag to an invalid value */
2443 bp->afex_def_vlan_tag = -1;
2447 * bnx2x_bz_fp - zero content of the fastpath structure.
2449 * @bp: driver handle
2450 * @index: fastpath index to be zeroed
2452 * Makes sure the contents of the bp->fp[index].napi is kept
2455 static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2457 struct bnx2x_fastpath *fp = &bp->fp[index];
2459 struct napi_struct orig_napi = fp->napi;
2460 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
2462 /* bzero bnx2x_fastpath contents */
2464 memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
2465 sizeof(struct bnx2x_agg_info));
2466 memset(fp, 0, sizeof(*fp));
2468 /* Restore the NAPI object as it has been already initialized */
2469 fp->napi = orig_napi;
2470 fp->tpa_info = orig_tpa_info;
2474 fp->max_cos = bp->max_cos;
2476 /* Special queues support only one CoS */
2479 /* Init txdata pointers */
2481 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
2483 for_each_cos_in_tx_queue(fp, cos)
2484 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2485 BNX2X_NUM_ETH_QUEUES(bp) + index];
2487 /* set the tpa flag for each queue. The tpa flag determines the queue
2488 * minimal size so it must be set prior to queue memory allocation
2490 if (bp->dev->features & NETIF_F_LRO)
2491 fp->mode = TPA_MODE_LRO;
2492 else if (bp->dev->features & NETIF_F_GRO_HW)
2493 fp->mode = TPA_MODE_GRO;
2495 fp->mode = TPA_MODE_DISABLED;
2497 /* We don't want TPA if it's disabled in bp
2498 * or if this is an FCoE L2 ring.
2500 if (bp->disable_tpa || IS_FCOE_FP(fp))
2501 fp->mode = TPA_MODE_DISABLED;
2504 void bnx2x_set_os_driver_state(struct bnx2x *bp, u32 state)
2508 if (!IS_MF_BD(bp) || !SHMEM2_HAS(bp, os_driver_state) || IS_VF(bp))
2511 cur = SHMEM2_RD(bp, os_driver_state[BP_FW_MB_IDX(bp)]);
2512 DP(NETIF_MSG_IFUP, "Driver state %08x-->%08x\n",
2515 SHMEM2_WR(bp, os_driver_state[BP_FW_MB_IDX(bp)], state);
2518 int bnx2x_load_cnic(struct bnx2x *bp)
2520 int i, rc, port = BP_PORT(bp);
2522 DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2524 mutex_init(&bp->cnic_mutex);
2527 rc = bnx2x_alloc_mem_cnic(bp);
2529 BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2530 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2534 rc = bnx2x_alloc_fp_mem_cnic(bp);
2536 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2537 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2540 /* Update the number of queues with the cnic queues */
2541 rc = bnx2x_set_real_num_queues(bp, 1);
2543 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2544 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2547 /* Add all CNIC NAPI objects */
2548 bnx2x_add_all_napi_cnic(bp);
2549 DP(NETIF_MSG_IFUP, "cnic napi added\n");
2550 bnx2x_napi_enable_cnic(bp);
2552 rc = bnx2x_init_hw_func_cnic(bp);
2554 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2556 bnx2x_nic_init_cnic(bp);
2559 /* Enable Timer scan */
2560 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2562 /* setup cnic queues */
2563 for_each_cnic_queue(bp, i) {
2564 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2566 BNX2X_ERR("Queue setup failed\n");
2567 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2572 /* Initialize Rx filter. */
2573 bnx2x_set_rx_mode_inner(bp);
2575 /* re-read iscsi info */
2576 bnx2x_get_iscsi_info(bp);
2577 bnx2x_setup_cnic_irq_info(bp);
2578 bnx2x_setup_cnic_info(bp);
2579 bp->cnic_loaded = true;
2580 if (bp->state == BNX2X_STATE_OPEN)
2581 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2583 DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2587 #ifndef BNX2X_STOP_ON_ERROR
2589 /* Disable Timer scan */
2590 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2593 bnx2x_napi_disable_cnic(bp);
2594 /* Update the number of queues without the cnic queues */
2595 if (bnx2x_set_real_num_queues(bp, 0))
2596 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2598 BNX2X_ERR("CNIC-related load failed\n");
2599 bnx2x_free_fp_mem_cnic(bp);
2600 bnx2x_free_mem_cnic(bp);
2602 #endif /* ! BNX2X_STOP_ON_ERROR */
2605 /* must be called with rtnl_lock */
2606 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2608 int port = BP_PORT(bp);
2609 int i, rc = 0, load_code = 0;
2611 DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2613 "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2615 #ifdef BNX2X_STOP_ON_ERROR
2616 if (unlikely(bp->panic)) {
2617 BNX2X_ERR("Can't load NIC when there is panic\n");
2622 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2624 /* zero the structure w/o any lock, before SP handler is initialized */
2625 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2626 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2627 &bp->last_reported_link.link_report_flags);
2630 /* must be called before memory allocation and HW init */
2631 bnx2x_ilt_set_info(bp);
2634 * Zero fastpath structures preserving invariants like napi, which are
2635 * allocated only once, fp index, max_cos, bp pointer.
2636 * Also set fp->mode and txdata_ptr.
2638 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
2639 for_each_queue(bp, i)
2641 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2642 bp->num_cnic_queues) *
2643 sizeof(struct bnx2x_fp_txdata));
2645 bp->fcoe_init = false;
2647 /* Set the receive queues buffer size */
2648 bnx2x_set_rx_buf_size(bp);
2651 rc = bnx2x_alloc_mem(bp);
2653 BNX2X_ERR("Unable to allocate bp memory\n");
2658 /* need to be done after alloc mem, since it's self adjusting to amount
2659 * of memory available for RSS queues
2661 rc = bnx2x_alloc_fp_mem(bp);
2663 BNX2X_ERR("Unable to allocate memory for fps\n");
2664 LOAD_ERROR_EXIT(bp, load_error0);
2667 /* Allocated memory for FW statistics */
2668 if (bnx2x_alloc_fw_stats_mem(bp))
2669 LOAD_ERROR_EXIT(bp, load_error0);
2671 /* request pf to initialize status blocks */
2673 rc = bnx2x_vfpf_init(bp);
2675 LOAD_ERROR_EXIT(bp, load_error0);
2678 /* As long as bnx2x_alloc_mem() may possibly update
2679 * bp->num_queues, bnx2x_set_real_num_queues() should always
2680 * come after it. At this stage cnic queues are not counted.
2682 rc = bnx2x_set_real_num_queues(bp, 0);
2684 BNX2X_ERR("Unable to set real_num_queues\n");
2685 LOAD_ERROR_EXIT(bp, load_error0);
2688 /* configure multi cos mappings in kernel.
2689 * this configuration may be overridden by a multi class queue
2690 * discipline or by a dcbx negotiation result.
2692 bnx2x_setup_tc(bp->dev, bp->max_cos);
2694 /* Add all NAPI objects */
2695 bnx2x_add_all_napi(bp);
2696 DP(NETIF_MSG_IFUP, "napi added\n");
2697 bnx2x_napi_enable(bp);
2700 /* set pf load just before approaching the MCP */
2701 bnx2x_set_pf_load(bp);
2703 /* if mcp exists send load request and analyze response */
2704 if (!BP_NOMCP(bp)) {
2705 /* attempt to load pf */
2706 rc = bnx2x_nic_load_request(bp, &load_code);
2708 LOAD_ERROR_EXIT(bp, load_error1);
2710 /* what did mcp say? */
2711 rc = bnx2x_compare_fw_ver(bp, load_code, true);
2713 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2714 LOAD_ERROR_EXIT(bp, load_error2);
2717 load_code = bnx2x_nic_load_no_mcp(bp, port);
2720 /* mark pmf if applicable */
2721 bnx2x_nic_load_pmf(bp, load_code);
2723 /* Init Function state controlling object */
2724 bnx2x__init_func_obj(bp);
2727 rc = bnx2x_init_hw(bp, load_code);
2729 BNX2X_ERR("HW init failed, aborting\n");
2730 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2731 LOAD_ERROR_EXIT(bp, load_error2);
2735 bnx2x_pre_irq_nic_init(bp);
2737 /* Connect to IRQs */
2738 rc = bnx2x_setup_irqs(bp);
2740 BNX2X_ERR("setup irqs failed\n");
2742 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2743 LOAD_ERROR_EXIT(bp, load_error2);
2746 /* Init per-function objects */
2748 /* Setup NIC internals and enable interrupts */
2749 bnx2x_post_irq_nic_init(bp, load_code);
2751 bnx2x_init_bp_objs(bp);
2752 bnx2x_iov_nic_init(bp);
2754 /* Set AFEX default VLAN tag to an invalid value */
2755 bp->afex_def_vlan_tag = -1;
2756 bnx2x_nic_load_afex_dcc(bp, load_code);
2757 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2758 rc = bnx2x_func_start(bp);
2760 BNX2X_ERR("Function start failed!\n");
2761 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2763 LOAD_ERROR_EXIT(bp, load_error3);
2766 /* Send LOAD_DONE command to MCP */
2767 if (!BP_NOMCP(bp)) {
2768 load_code = bnx2x_fw_command(bp,
2769 DRV_MSG_CODE_LOAD_DONE, 0);
2771 BNX2X_ERR("MCP response failure, aborting\n");
2773 LOAD_ERROR_EXIT(bp, load_error3);
2777 /* initialize FW coalescing state machines in RAM */
2778 bnx2x_update_coalesce(bp);
2781 /* setup the leading queue */
2782 rc = bnx2x_setup_leading(bp);
2784 BNX2X_ERR("Setup leading failed!\n");
2785 LOAD_ERROR_EXIT(bp, load_error3);
2788 /* set up the rest of the queues */
2789 for_each_nondefault_eth_queue(bp, i) {
2791 rc = bnx2x_setup_queue(bp, &bp->fp[i], false);
2793 rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false);
2795 BNX2X_ERR("Queue %d setup failed\n", i);
2796 LOAD_ERROR_EXIT(bp, load_error3);
2801 rc = bnx2x_init_rss(bp);
2803 BNX2X_ERR("PF RSS init failed\n");
2804 LOAD_ERROR_EXIT(bp, load_error3);
2807 /* Now when Clients are configured we are ready to work */
2808 bp->state = BNX2X_STATE_OPEN;
2810 /* Configure a ucast MAC */
2812 rc = bnx2x_set_eth_mac(bp, true);
2814 rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index,
2817 BNX2X_ERR("Setting Ethernet MAC failed\n");
2818 LOAD_ERROR_EXIT(bp, load_error3);
2821 if (IS_PF(bp) && bp->pending_max) {
2822 bnx2x_update_max_mf_config(bp, bp->pending_max);
2823 bp->pending_max = 0;
2826 bp->force_link_down = false;
2828 rc = bnx2x_initial_phy_init(bp, load_mode);
2830 LOAD_ERROR_EXIT(bp, load_error3);
2832 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
2834 /* Start fast path */
2836 /* Re-configure vlan filters */
2837 rc = bnx2x_vlan_reconfigure_vid(bp);
2839 LOAD_ERROR_EXIT(bp, load_error3);
2841 /* Initialize Rx filter. */
2842 bnx2x_set_rx_mode_inner(bp);
2844 if (bp->flags & PTP_SUPPORTED) {
2845 bnx2x_register_phc(bp);
2847 bnx2x_configure_ptp_filters(bp);
2850 switch (load_mode) {
2852 /* Tx queue should be only re-enabled */
2853 netif_tx_wake_all_queues(bp->dev);
2857 netif_tx_start_all_queues(bp->dev);
2858 smp_mb__after_atomic();
2862 case LOAD_LOOPBACK_EXT:
2863 bp->state = BNX2X_STATE_DIAG;
2871 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
2873 bnx2x__link_status_update(bp);
2875 /* start the timer */
2876 mod_timer(&bp->timer, jiffies + bp->current_interval);
2878 if (CNIC_ENABLED(bp))
2879 bnx2x_load_cnic(bp);
2882 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
2884 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2885 /* mark driver is loaded in shmem2 */
2887 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2888 val &= ~DRV_FLAGS_MTU_MASK;
2889 val |= (bp->dev->mtu << DRV_FLAGS_MTU_SHIFT);
2890 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2891 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2892 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2895 /* Wait for all pending SP commands to complete */
2896 if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2897 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2898 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
2902 /* Update driver data for On-Chip MFW dump. */
2904 bnx2x_update_mfw_dump(bp);
2906 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2907 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2908 bnx2x_dcbx_init(bp, false);
2910 if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
2911 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_ACTIVE);
2913 DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2917 #ifndef BNX2X_STOP_ON_ERROR
2920 bnx2x_int_disable_sync(bp, 1);
2922 /* Clean queueable objects */
2923 bnx2x_squeeze_objects(bp);
2926 /* Free SKBs, SGEs, TPA pool and driver internals */
2927 bnx2x_free_skbs(bp);
2928 for_each_rx_queue(bp, i)
2929 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2934 if (IS_PF(bp) && !BP_NOMCP(bp)) {
2935 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2936 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2941 bnx2x_napi_disable(bp);
2942 bnx2x_del_all_napi(bp);
2944 /* clear pf_load status, as it was already set */
2946 bnx2x_clear_pf_load(bp);
2948 bnx2x_free_fw_stats_mem(bp);
2949 bnx2x_free_fp_mem(bp);
2953 #endif /* ! BNX2X_STOP_ON_ERROR */
2956 int bnx2x_drain_tx_queues(struct bnx2x *bp)
2960 /* Wait until tx fastpath tasks complete */
2961 for_each_tx_queue(bp, i) {
2962 struct bnx2x_fastpath *fp = &bp->fp[i];
2964 for_each_cos_in_tx_queue(fp, cos)
2965 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2972 /* must be called with rtnl_lock */
2973 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2976 bool global = false;
2978 DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2980 if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
2981 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED);
2983 /* mark driver is unloaded in shmem2 */
2984 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2986 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2987 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2988 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2991 if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
2992 (bp->state == BNX2X_STATE_CLOSED ||
2993 bp->state == BNX2X_STATE_ERROR)) {
2994 /* We can get here if the driver has been unloaded
2995 * during parity error recovery and is either waiting for a
2996 * leader to complete or for other functions to unload and
2997 * then ifdown has been issued. In this case we want to
2998 * unload and let other functions to complete a recovery
3001 bp->recovery_state = BNX2X_RECOVERY_DONE;
3003 bnx2x_release_leader_lock(bp);
3006 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
3007 BNX2X_ERR("Can't unload in closed or error state\n");
3011 /* Nothing to do during unload if previous bnx2x_nic_load()
3012 * have not completed successfully - all resources are released.
3014 * we can get here only after unsuccessful ndo_* callback, during which
3015 * dev->IFF_UP flag is still on.
3017 if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
3020 /* It's important to set the bp->state to the value different from
3021 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
3022 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
3024 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
3027 /* indicate to VFs that the PF is going down */
3028 bnx2x_iov_channel_down(bp);
3030 if (CNIC_LOADED(bp))
3031 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
3034 bnx2x_tx_disable(bp);
3035 netdev_reset_tc(bp->dev);
3037 bp->rx_mode = BNX2X_RX_MODE_NONE;
3039 del_timer_sync(&bp->timer);
3041 if (IS_PF(bp) && !BP_NOMCP(bp)) {
3042 /* Set ALWAYS_ALIVE bit in shmem */
3043 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
3044 bnx2x_drv_pulse(bp);
3045 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
3046 bnx2x_save_statistics(bp);
3049 /* wait till consumers catch up with producers in all queues.
3050 * If we're recovering, FW can't write to host so no reason
3051 * to wait for the queues to complete all Tx.
3053 if (unload_mode != UNLOAD_RECOVERY)
3054 bnx2x_drain_tx_queues(bp);
3056 /* if VF indicate to PF this function is going down (PF will delete sp
3057 * elements and clear initializations
3060 bnx2x_vfpf_close_vf(bp);
3061 else if (unload_mode != UNLOAD_RECOVERY)
3062 /* if this is a normal/close unload need to clean up chip*/
3063 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
3065 /* Send the UNLOAD_REQUEST to the MCP */
3066 bnx2x_send_unload_req(bp, unload_mode);
3068 /* Prevent transactions to host from the functions on the
3069 * engine that doesn't reset global blocks in case of global
3070 * attention once global blocks are reset and gates are opened
3071 * (the engine which leader will perform the recovery
3074 if (!CHIP_IS_E1x(bp))
3075 bnx2x_pf_disable(bp);
3077 /* Disable HW interrupts, NAPI */
3078 bnx2x_netif_stop(bp, 1);
3079 /* Delete all NAPI objects */
3080 bnx2x_del_all_napi(bp);
3081 if (CNIC_LOADED(bp))
3082 bnx2x_del_all_napi_cnic(bp);
3086 /* Report UNLOAD_DONE to MCP */
3087 bnx2x_send_unload_done(bp, false);
3091 * At this stage no more interrupts will arrive so we may safely clean
3092 * the queueable objects here in case they failed to get cleaned so far.
3095 bnx2x_squeeze_objects(bp);
3097 /* There should be no more pending SP commands at this stage */
3102 /* clear pending work in rtnl task */
3103 bp->sp_rtnl_state = 0;
3106 /* Free SKBs, SGEs, TPA pool and driver internals */
3107 bnx2x_free_skbs(bp);
3108 if (CNIC_LOADED(bp))
3109 bnx2x_free_skbs_cnic(bp);
3110 for_each_rx_queue(bp, i)
3111 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
3113 bnx2x_free_fp_mem(bp);
3114 if (CNIC_LOADED(bp))
3115 bnx2x_free_fp_mem_cnic(bp);
3118 if (CNIC_LOADED(bp))
3119 bnx2x_free_mem_cnic(bp);
3123 bp->state = BNX2X_STATE_CLOSED;
3124 bp->cnic_loaded = false;
3126 /* Clear driver version indication in shmem */
3127 if (IS_PF(bp) && !BP_NOMCP(bp))
3128 bnx2x_update_mng_version(bp);
3130 /* Check if there are pending parity attentions. If there are - set
3131 * RECOVERY_IN_PROGRESS.
3133 if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
3134 bnx2x_set_reset_in_progress(bp);
3136 /* Set RESET_IS_GLOBAL if needed */
3138 bnx2x_set_reset_global(bp);
3141 /* The last driver must disable a "close the gate" if there is no
3142 * parity attention or "process kill" pending.
3145 !bnx2x_clear_pf_load(bp) &&
3146 bnx2x_reset_is_done(bp, BP_PATH(bp)))
3147 bnx2x_disable_close_the_gate(bp);
3149 DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
3154 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
3158 /* If there is no power capability, silently succeed */
3159 if (!bp->pdev->pm_cap) {
3160 BNX2X_DEV_INFO("No power capability. Breaking.\n");
3164 pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
3168 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3169 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3170 PCI_PM_CTRL_PME_STATUS));
3172 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3173 /* delay required during transition out of D3hot */
3178 /* If there are other clients above don't
3179 shut down the power */
3180 if (atomic_read(&bp->pdev->enable_cnt) != 1)
3182 /* Don't shut down the power for emulation and FPGA */
3183 if (CHIP_REV_IS_SLOW(bp))
3186 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3190 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3192 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3195 /* No more memory access after this point until
3196 * device is brought back to D0.
3201 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
3208 * net_device service functions
3210 static int bnx2x_poll(struct napi_struct *napi, int budget)
3212 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3214 struct bnx2x *bp = fp->bp;
3218 #ifdef BNX2X_STOP_ON_ERROR
3219 if (unlikely(bp->panic)) {
3220 napi_complete(napi);
3224 for_each_cos_in_tx_queue(fp, cos)
3225 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
3226 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
3228 rx_work_done = (bnx2x_has_rx_work(fp)) ? bnx2x_rx_int(fp, budget) : 0;
3230 if (rx_work_done < budget) {
3231 /* No need to update SB for FCoE L2 ring as long as
3232 * it's connected to the default SB and the SB
3233 * has been updated when NAPI was scheduled.
3235 if (IS_FCOE_FP(fp)) {
3236 napi_complete_done(napi, rx_work_done);
3238 bnx2x_update_fpsb_idx(fp);
3239 /* bnx2x_has_rx_work() reads the status block,
3240 * thus we need to ensure that status block indices
3241 * have been actually read (bnx2x_update_fpsb_idx)
3242 * prior to this check (bnx2x_has_rx_work) so that
3243 * we won't write the "newer" value of the status block
3244 * to IGU (if there was a DMA right after
3245 * bnx2x_has_rx_work and if there is no rmb, the memory
3246 * reading (bnx2x_update_fpsb_idx) may be postponed
3247 * to right before bnx2x_ack_sb). In this case there
3248 * will never be another interrupt until there is
3249 * another update of the status block, while there
3250 * is still unhandled work.
3254 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3255 if (napi_complete_done(napi, rx_work_done)) {
3256 /* Re-enable interrupts */
3257 DP(NETIF_MSG_RX_STATUS,
3258 "Update index to %d\n", fp->fp_hc_idx);
3259 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3260 le16_to_cpu(fp->fp_hc_idx),
3264 rx_work_done = budget;
3269 return rx_work_done;
3272 /* we split the first BD into headers and data BDs
3273 * to ease the pain of our fellow microcode engineers
3274 * we use one mapping for both BDs
3276 static u16 bnx2x_tx_split(struct bnx2x *bp,
3277 struct bnx2x_fp_txdata *txdata,
3278 struct sw_tx_bd *tx_buf,
3279 struct eth_tx_start_bd **tx_bd, u16 hlen,
3282 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3283 struct eth_tx_bd *d_tx_bd;
3285 int old_len = le16_to_cpu(h_tx_bd->nbytes);
3287 /* first fix first BD */
3288 h_tx_bd->nbytes = cpu_to_le16(hlen);
3290 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
3291 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
3293 /* now get a new data BD
3294 * (after the pbd) and fill it */
3295 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3296 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3298 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3299 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3301 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3302 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3303 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3305 /* this marks the BD as one that has no individual mapping */
3306 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3308 DP(NETIF_MSG_TX_QUEUED,
3309 "TSO split data size is %d (%x:%x)\n",
3310 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3313 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3318 #define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3319 #define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
3320 static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
3322 __sum16 tsum = (__force __sum16) csum;
3325 tsum = ~csum_fold(csum_sub((__force __wsum) csum,
3326 csum_partial(t_header - fix, fix, 0)));
3329 tsum = ~csum_fold(csum_add((__force __wsum) csum,
3330 csum_partial(t_header, -fix, 0)));
3332 return bswab16(tsum);
3335 static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
3341 if (skb->ip_summed != CHECKSUM_PARTIAL)
3344 protocol = vlan_get_protocol(skb);
3345 if (protocol == htons(ETH_P_IPV6)) {
3347 prot = ipv6_hdr(skb)->nexthdr;
3350 prot = ip_hdr(skb)->protocol;
3353 if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
3354 if (inner_ip_hdr(skb)->version == 6) {
3355 rc |= XMIT_CSUM_ENC_V6;
3356 if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3357 rc |= XMIT_CSUM_TCP;
3359 rc |= XMIT_CSUM_ENC_V4;
3360 if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
3361 rc |= XMIT_CSUM_TCP;
3364 if (prot == IPPROTO_TCP)
3365 rc |= XMIT_CSUM_TCP;
3367 if (skb_is_gso(skb)) {
3368 if (skb_is_gso_v6(skb)) {
3369 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
3370 if (rc & XMIT_CSUM_ENC)
3371 rc |= XMIT_GSO_ENC_V6;
3373 rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
3374 if (rc & XMIT_CSUM_ENC)
3375 rc |= XMIT_GSO_ENC_V4;
3382 /* VXLAN: 4 = 1 (for linear data BD) + 3 (2 for PBD and last BD) */
3383 #define BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS 4
3385 /* Regular: 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3386 #define BNX2X_NUM_TSO_WIN_SUB_BDS 3
3388 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3389 /* check if packet requires linearization (packet is too fragmented)
3390 no need to check fragmentation if page size > 8K (there will be no
3391 violation to FW restrictions) */
3392 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3395 int first_bd_sz = 0, num_tso_win_sub = BNX2X_NUM_TSO_WIN_SUB_BDS;
3396 int to_copy = 0, hlen = 0;
3398 if (xmit_type & XMIT_GSO_ENC)
3399 num_tso_win_sub = BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS;
3401 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - num_tso_win_sub)) {
3402 if (xmit_type & XMIT_GSO) {
3403 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
3404 int wnd_size = MAX_FETCH_BD - num_tso_win_sub;
3405 /* Number of windows to check */
3406 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3411 /* Headers length */
3412 if (xmit_type & XMIT_GSO_ENC)
3413 hlen = (int)(skb_inner_transport_header(skb) -
3415 inner_tcp_hdrlen(skb);
3417 hlen = (int)(skb_transport_header(skb) -
3418 skb->data) + tcp_hdrlen(skb);
3420 /* Amount of data (w/o headers) on linear part of SKB*/
3421 first_bd_sz = skb_headlen(skb) - hlen;
3423 wnd_sum = first_bd_sz;
3425 /* Calculate the first sum - it's special */
3426 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3428 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
3430 /* If there was data on linear skb data - check it */
3431 if (first_bd_sz > 0) {
3432 if (unlikely(wnd_sum < lso_mss)) {
3437 wnd_sum -= first_bd_sz;
3440 /* Others are easier: run through the frag list and
3441 check all windows */
3442 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3444 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
3446 if (unlikely(wnd_sum < lso_mss)) {
3451 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
3454 /* in non-LSO too fragmented packet should always
3461 if (unlikely(to_copy))
3462 DP(NETIF_MSG_TX_QUEUED,
3463 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
3464 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3465 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3472 * bnx2x_set_pbd_gso - update PBD in GSO case.
3476 * @xmit_type: xmit flags
3478 static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3479 struct eth_tx_parse_bd_e1x *pbd,
3482 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
3483 pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
3484 pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
3486 if (xmit_type & XMIT_GSO_V4) {
3487 pbd->ip_id = bswab16(ip_hdr(skb)->id);
3488 pbd->tcp_pseudo_csum =
3489 bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3491 0, IPPROTO_TCP, 0));
3493 pbd->tcp_pseudo_csum =
3494 bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3495 &ipv6_hdr(skb)->daddr,
3496 0, IPPROTO_TCP, 0));
3500 cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
3504 * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
3506 * @bp: driver handle
3508 * @parsing_data: data to be updated
3509 * @xmit_type: xmit flags
3511 * 57712/578xx related, when skb has encapsulation
3513 static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
3514 u32 *parsing_data, u32 xmit_type)
3517 ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
3518 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3519 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3521 if (xmit_type & XMIT_CSUM_TCP) {
3522 *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
3523 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3524 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3526 return skb_inner_transport_header(skb) +
3527 inner_tcp_hdrlen(skb) - skb->data;
3530 /* We support checksum offload for TCP and UDP only.
3531 * No need to pass the UDP header length - it's a constant.
3533 return skb_inner_transport_header(skb) +
3534 sizeof(struct udphdr) - skb->data;
3538 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
3540 * @bp: driver handle
3542 * @parsing_data: data to be updated
3543 * @xmit_type: xmit flags
3545 * 57712/578xx related
3547 static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3548 u32 *parsing_data, u32 xmit_type)
3551 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
3552 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3553 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3555 if (xmit_type & XMIT_CSUM_TCP) {
3556 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3557 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3558 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3560 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
3562 /* We support checksum offload for TCP and UDP only.
3563 * No need to pass the UDP header length - it's a constant.
3565 return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
3568 /* set FW indication according to inner or outer protocols if tunneled */
3569 static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3570 struct eth_tx_start_bd *tx_start_bd,
3573 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3575 if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
3576 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
3578 if (!(xmit_type & XMIT_CSUM_TCP))
3579 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
3583 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
3585 * @bp: driver handle
3587 * @pbd: parse BD to be updated
3588 * @xmit_type: xmit flags
3590 static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3591 struct eth_tx_parse_bd_e1x *pbd,
3594 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
3596 /* for now NS flag is not used in Linux */
3599 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3600 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
3602 pbd->ip_hlen_w = (skb_transport_header(skb) -
3603 skb_network_header(skb)) >> 1;
3605 hlen += pbd->ip_hlen_w;
3607 /* We support checksum offload for TCP and UDP only */
3608 if (xmit_type & XMIT_CSUM_TCP)
3609 hlen += tcp_hdrlen(skb) / 2;
3611 hlen += sizeof(struct udphdr) / 2;
3613 pbd->total_hlen_w = cpu_to_le16(hlen);
3616 if (xmit_type & XMIT_CSUM_TCP) {
3617 pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
3620 s8 fix = SKB_CS_OFF(skb); /* signed! */
3622 DP(NETIF_MSG_TX_QUEUED,
3623 "hlen %d fix %d csum before fix %x\n",
3624 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3626 /* HW bug: fixup the CSUM */
3627 pbd->tcp_pseudo_csum =
3628 bnx2x_csum_fix(skb_transport_header(skb),
3631 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3632 pbd->tcp_pseudo_csum);
3638 static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3639 struct eth_tx_parse_bd_e2 *pbd_e2,
3640 struct eth_tx_parse_2nd_bd *pbd2,
3645 u8 outerip_off, outerip_len = 0;
3647 /* from outer IP to transport */
3648 hlen_w = (skb_inner_transport_header(skb) -
3649 skb_network_header(skb)) >> 1;
3652 hlen_w += inner_tcp_hdrlen(skb) >> 1;
3654 pbd2->fw_ip_hdr_to_payload_w = hlen_w;
3656 /* outer IP header info */
3657 if (xmit_type & XMIT_CSUM_V4) {
3658 struct iphdr *iph = ip_hdr(skb);
3659 u32 csum = (__force u32)(~iph->check) -
3660 (__force u32)iph->tot_len -
3661 (__force u32)iph->frag_off;
3663 outerip_len = iph->ihl << 1;
3665 pbd2->fw_ip_csum_wo_len_flags_frag =
3666 bswab16(csum_fold((__force __wsum)csum));
3668 pbd2->fw_ip_hdr_to_payload_w =
3669 hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
3670 pbd_e2->data.tunnel_data.flags |=
3671 ETH_TUNNEL_DATA_IPV6_OUTER;
3674 pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
3676 pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
3678 /* inner IP header info */
3679 if (xmit_type & XMIT_CSUM_ENC_V4) {
3680 pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
3682 pbd_e2->data.tunnel_data.pseudo_csum =
3683 bswab16(~csum_tcpudp_magic(
3684 inner_ip_hdr(skb)->saddr,
3685 inner_ip_hdr(skb)->daddr,
3686 0, IPPROTO_TCP, 0));
3688 pbd_e2->data.tunnel_data.pseudo_csum =
3689 bswab16(~csum_ipv6_magic(
3690 &inner_ipv6_hdr(skb)->saddr,
3691 &inner_ipv6_hdr(skb)->daddr,
3692 0, IPPROTO_TCP, 0));
3695 outerip_off = (skb_network_header(skb) - skb->data) >> 1;
3700 ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
3701 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3702 ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
3704 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
3705 SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1);
3706 pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1;
3710 static inline void bnx2x_set_ipv6_ext_e2(struct sk_buff *skb, u32 *parsing_data,
3713 struct ipv6hdr *ipv6;
3715 if (!(xmit_type & (XMIT_GSO_ENC_V6 | XMIT_GSO_V6)))
3718 if (xmit_type & XMIT_GSO_ENC_V6)
3719 ipv6 = inner_ipv6_hdr(skb);
3720 else /* XMIT_GSO_V6 */
3721 ipv6 = ipv6_hdr(skb);
3723 if (ipv6->nexthdr == NEXTHDR_IPV6)
3724 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
3727 /* called with netif_tx_lock
3728 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3729 * netif_wake_queue()
3731 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3733 struct bnx2x *bp = netdev_priv(dev);
3735 struct netdev_queue *txq;
3736 struct bnx2x_fp_txdata *txdata;
3737 struct sw_tx_bd *tx_buf;
3738 struct eth_tx_start_bd *tx_start_bd, *first_bd;
3739 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
3740 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
3741 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
3742 struct eth_tx_parse_2nd_bd *pbd2 = NULL;
3743 u32 pbd_e2_parsing_data = 0;
3744 u16 pkt_prod, bd_prod;
3747 u32 xmit_type = bnx2x_xmit_type(bp, skb);
3750 __le16 pkt_size = 0;
3752 u8 mac_type = UNICAST_ADDRESS;
3754 #ifdef BNX2X_STOP_ON_ERROR
3755 if (unlikely(bp->panic))
3756 return NETDEV_TX_BUSY;
3759 txq_index = skb_get_queue_mapping(skb);
3760 txq = netdev_get_tx_queue(dev, txq_index);
3762 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
3764 txdata = &bp->bnx2x_txq[txq_index];
3766 /* enable this debug print to view the transmission queue being used
3767 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
3768 txq_index, fp_index, txdata_index); */
3770 /* enable this debug print to view the transmission details
3771 DP(NETIF_MSG_TX_QUEUED,
3772 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
3773 txdata->cid, fp_index, txdata_index, txdata, fp); */
3775 if (unlikely(bnx2x_tx_avail(bp, txdata) <
3776 skb_shinfo(skb)->nr_frags +
3778 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
3779 /* Handle special storage cases separately */
3780 if (txdata->tx_ring_size == 0) {
3781 struct bnx2x_eth_q_stats *q_stats =
3782 bnx2x_fp_qstats(bp, txdata->parent_fp);
3783 q_stats->driver_filtered_tx_pkt++;
3785 return NETDEV_TX_OK;
3787 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3788 netif_tx_stop_queue(txq);
3789 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
3791 return NETDEV_TX_BUSY;
3794 DP(NETIF_MSG_TX_QUEUED,
3795 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x len %d\n",
3796 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
3797 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
3800 eth = (struct ethhdr *)skb->data;
3802 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3803 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3804 if (is_broadcast_ether_addr(eth->h_dest))
3805 mac_type = BROADCAST_ADDRESS;
3807 mac_type = MULTICAST_ADDRESS;
3810 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3811 /* First, check if we need to linearize the skb (due to FW
3812 restrictions). No need to check fragmentation if page size > 8K
3813 (there will be no violation to FW restrictions) */
3814 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3815 /* Statistics of linearization */
3817 if (skb_linearize(skb) != 0) {
3818 DP(NETIF_MSG_TX_QUEUED,
3819 "SKB linearization failed - silently dropping this SKB\n");
3820 dev_kfree_skb_any(skb);
3821 return NETDEV_TX_OK;
3825 /* Map skb linear data for DMA */
3826 mapping = dma_map_single(&bp->pdev->dev, skb->data,
3827 skb_headlen(skb), DMA_TO_DEVICE);
3828 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3829 DP(NETIF_MSG_TX_QUEUED,
3830 "SKB mapping failed - silently dropping this SKB\n");
3831 dev_kfree_skb_any(skb);
3832 return NETDEV_TX_OK;
3835 Please read carefully. First we use one BD which we mark as start,
3836 then we have a parsing info BD (used for TSO or xsum),
3837 and only then we have the rest of the TSO BDs.
3838 (don't forget to mark the last one as last,
3839 and to unmap only AFTER you write to the BD ...)
3840 And above all, all pdb sizes are in words - NOT DWORDS!
3843 /* get current pkt produced now - advance it just before sending packet
3844 * since mapping of pages may fail and cause packet to be dropped
3846 pkt_prod = txdata->tx_pkt_prod;
3847 bd_prod = TX_BD(txdata->tx_bd_prod);
3849 /* get a tx_buf and first BD
3850 * tx_start_bd may be changed during SPLIT,
3851 * but first_bd will always stay first
3853 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3854 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
3855 first_bd = tx_start_bd;
3857 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
3859 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
3860 if (!(bp->flags & TX_TIMESTAMPING_EN)) {
3861 BNX2X_ERR("Tx timestamping was not enabled, this packet will not be timestamped\n");
3862 } else if (bp->ptp_tx_skb) {
3863 BNX2X_ERR("The device supports only a single outstanding packet to timestamp, this packet will not be timestamped\n");
3865 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3866 /* schedule check for Tx timestamp */
3867 bp->ptp_tx_skb = skb_get(skb);
3868 bp->ptp_tx_start = jiffies;
3869 schedule_work(&bp->ptp_task);
3873 /* header nbd: indirectly zero other flags! */
3874 tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
3876 /* remember the first BD of the packet */
3877 tx_buf->first_bd = txdata->tx_bd_prod;
3881 DP(NETIF_MSG_TX_QUEUED,
3882 "sending pkt %u @%p next_idx %u bd %u @%p\n",
3883 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
3885 if (skb_vlan_tag_present(skb)) {
3886 tx_start_bd->vlan_or_ethertype =
3887 cpu_to_le16(skb_vlan_tag_get(skb));
3888 tx_start_bd->bd_flags.as_bitfield |=
3889 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3891 /* when transmitting in a vf, start bd must hold the ethertype
3892 * for fw to enforce it
3895 #ifndef BNX2X_STOP_ON_ERROR
3898 /* Still need to consider inband vlan for enforced */
3899 if (__vlan_get_tag(skb, &vlan_tci)) {
3900 tx_start_bd->vlan_or_ethertype =
3901 cpu_to_le16(ntohs(eth->h_proto));
3903 tx_start_bd->bd_flags.as_bitfield |=
3904 (X_ETH_INBAND_VLAN <<
3905 ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3906 tx_start_bd->vlan_or_ethertype =
3907 cpu_to_le16(vlan_tci);
3909 #ifndef BNX2X_STOP_ON_ERROR
3911 /* used by FW for packet accounting */
3912 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
3917 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3919 /* turn on parsing and get a BD */
3920 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3922 if (xmit_type & XMIT_CSUM)
3923 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
3925 if (!CHIP_IS_E1x(bp)) {
3926 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
3927 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
3929 if (xmit_type & XMIT_CSUM_ENC) {
3930 u16 global_data = 0;
3932 /* Set PBD in enc checksum offload case */
3933 hlen = bnx2x_set_pbd_csum_enc(bp, skb,
3934 &pbd_e2_parsing_data,
3937 /* turn on 2nd parsing and get a BD */
3938 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3940 pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
3942 memset(pbd2, 0, sizeof(*pbd2));
3944 pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
3945 (skb_inner_network_header(skb) -
3948 if (xmit_type & XMIT_GSO_ENC)
3949 bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
3953 pbd2->global_data = cpu_to_le16(global_data);
3955 /* add addition parse BD indication to start BD */
3956 SET_FLAG(tx_start_bd->general_data,
3957 ETH_TX_START_BD_PARSE_NBDS, 1);
3958 /* set encapsulation flag in start BD */
3959 SET_FLAG(tx_start_bd->general_data,
3960 ETH_TX_START_BD_TUNNEL_EXIST, 1);
3962 tx_buf->flags |= BNX2X_HAS_SECOND_PBD;
3965 } else if (xmit_type & XMIT_CSUM) {
3966 /* Set PBD in checksum offload case w/o encapsulation */
3967 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3968 &pbd_e2_parsing_data,
3972 bnx2x_set_ipv6_ext_e2(skb, &pbd_e2_parsing_data, xmit_type);
3973 /* Add the macs to the parsing BD if this is a vf or if
3974 * Tx Switching is enabled.
3977 /* override GRE parameters in BD */
3978 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3979 &pbd_e2->data.mac_addr.src_mid,
3980 &pbd_e2->data.mac_addr.src_lo,
3983 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
3984 &pbd_e2->data.mac_addr.dst_mid,
3985 &pbd_e2->data.mac_addr.dst_lo,
3988 if (bp->flags & TX_SWITCHING)
3989 bnx2x_set_fw_mac_addr(
3990 &pbd_e2->data.mac_addr.dst_hi,
3991 &pbd_e2->data.mac_addr.dst_mid,
3992 &pbd_e2->data.mac_addr.dst_lo,
3994 #ifdef BNX2X_STOP_ON_ERROR
3995 /* Enforce security is always set in Stop on Error -
3996 * source mac should be present in the parsing BD
3998 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3999 &pbd_e2->data.mac_addr.src_mid,
4000 &pbd_e2->data.mac_addr.src_lo,
4005 SET_FLAG(pbd_e2_parsing_data,
4006 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
4008 u16 global_data = 0;
4009 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
4010 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
4011 /* Set PBD in checksum offload case */
4012 if (xmit_type & XMIT_CSUM)
4013 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
4015 SET_FLAG(global_data,
4016 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
4017 pbd_e1x->global_data |= cpu_to_le16(global_data);
4020 /* Setup the data pointer of the first BD of the packet */
4021 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4022 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
4023 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
4024 pkt_size = tx_start_bd->nbytes;
4026 DP(NETIF_MSG_TX_QUEUED,
4027 "first bd @%p addr (%x:%x) nbytes %d flags %x vlan %x\n",
4028 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
4029 le16_to_cpu(tx_start_bd->nbytes),
4030 tx_start_bd->bd_flags.as_bitfield,
4031 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
4033 if (xmit_type & XMIT_GSO) {
4035 DP(NETIF_MSG_TX_QUEUED,
4036 "TSO packet len %d hlen %d total len %d tso size %d\n",
4037 skb->len, hlen, skb_headlen(skb),
4038 skb_shinfo(skb)->gso_size);
4040 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
4042 if (unlikely(skb_headlen(skb) > hlen)) {
4044 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
4048 if (!CHIP_IS_E1x(bp))
4049 pbd_e2_parsing_data |=
4050 (skb_shinfo(skb)->gso_size <<
4051 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
4052 ETH_TX_PARSE_BD_E2_LSO_MSS;
4054 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
4057 /* Set the PBD's parsing_data field if not zero
4058 * (for the chips newer than 57711).
4060 if (pbd_e2_parsing_data)
4061 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
4063 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
4065 /* Handle fragmented skb */
4066 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4067 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4069 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
4070 skb_frag_size(frag), DMA_TO_DEVICE);
4071 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
4072 unsigned int pkts_compl = 0, bytes_compl = 0;
4074 DP(NETIF_MSG_TX_QUEUED,
4075 "Unable to map page - dropping packet...\n");
4077 /* we need unmap all buffers already mapped
4079 * first_bd->nbd need to be properly updated
4080 * before call to bnx2x_free_tx_pkt
4082 first_bd->nbd = cpu_to_le16(nbd);
4083 bnx2x_free_tx_pkt(bp, txdata,
4084 TX_BD(txdata->tx_pkt_prod),
4085 &pkts_compl, &bytes_compl);
4086 return NETDEV_TX_OK;
4089 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4090 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
4091 if (total_pkt_bd == NULL)
4092 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
4094 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4095 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
4096 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
4097 le16_add_cpu(&pkt_size, skb_frag_size(frag));
4100 DP(NETIF_MSG_TX_QUEUED,
4101 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
4102 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
4103 le16_to_cpu(tx_data_bd->nbytes));
4106 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
4108 /* update with actual num BDs */
4109 first_bd->nbd = cpu_to_le16(nbd);
4111 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4113 /* now send a tx doorbell, counting the next BD
4114 * if the packet contains or ends with it
4116 if (TX_BD_POFF(bd_prod) < nbd)
4119 /* total_pkt_bytes should be set on the first data BD if
4120 * it's not an LSO packet and there is more than one
4121 * data BD. In this case pkt_size is limited by an MTU value.
4122 * However we prefer to set it for an LSO packet (while we don't
4123 * have to) in order to save some CPU cycles in a none-LSO
4124 * case, when we much more care about them.
4126 if (total_pkt_bd != NULL)
4127 total_pkt_bd->total_pkt_bytes = pkt_size;
4130 DP(NETIF_MSG_TX_QUEUED,
4131 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
4132 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
4133 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
4134 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
4135 le16_to_cpu(pbd_e1x->total_hlen_w));
4137 DP(NETIF_MSG_TX_QUEUED,
4138 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
4140 pbd_e2->data.mac_addr.dst_hi,
4141 pbd_e2->data.mac_addr.dst_mid,
4142 pbd_e2->data.mac_addr.dst_lo,
4143 pbd_e2->data.mac_addr.src_hi,
4144 pbd_e2->data.mac_addr.src_mid,
4145 pbd_e2->data.mac_addr.src_lo,
4146 pbd_e2->parsing_data);
4147 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
4149 netdev_tx_sent_queue(txq, skb->len);
4151 skb_tx_timestamp(skb);
4153 txdata->tx_pkt_prod++;
4155 * Make sure that the BD data is updated before updating the producer
4156 * since FW might read the BD right after the producer is updated.
4157 * This is only applicable for weak-ordered memory model archs such
4158 * as IA-64. The following barrier is also mandatory since FW will
4159 * assumes packets must have BDs.
4163 txdata->tx_db.data.prod += nbd;
4164 /* make sure descriptor update is observed by HW */
4167 DOORBELL_RELAXED(bp, txdata->cid, txdata->tx_db.raw);
4171 txdata->tx_bd_prod += nbd;
4173 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
4174 netif_tx_stop_queue(txq);
4176 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
4177 * ordering of set_bit() in netif_tx_stop_queue() and read of
4181 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
4182 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
4183 netif_tx_wake_queue(txq);
4187 return NETDEV_TX_OK;
4190 void bnx2x_get_c2s_mapping(struct bnx2x *bp, u8 *c2s_map, u8 *c2s_default)
4192 int mfw_vn = BP_FW_MB_IDX(bp);
4195 /* If the shmem shouldn't affect configuration, reflect */
4196 if (!IS_MF_BD(bp)) {
4199 for (i = 0; i < BNX2X_MAX_PRIORITY; i++)
4206 tmp = SHMEM2_RD(bp, c2s_pcp_map_lower[mfw_vn]);
4207 tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4208 c2s_map[0] = tmp & 0xff;
4209 c2s_map[1] = (tmp >> 8) & 0xff;
4210 c2s_map[2] = (tmp >> 16) & 0xff;
4211 c2s_map[3] = (tmp >> 24) & 0xff;
4213 tmp = SHMEM2_RD(bp, c2s_pcp_map_upper[mfw_vn]);
4214 tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4215 c2s_map[4] = tmp & 0xff;
4216 c2s_map[5] = (tmp >> 8) & 0xff;
4217 c2s_map[6] = (tmp >> 16) & 0xff;
4218 c2s_map[7] = (tmp >> 24) & 0xff;
4220 tmp = SHMEM2_RD(bp, c2s_pcp_map_default[mfw_vn]);
4221 tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4222 *c2s_default = (tmp >> (8 * mfw_vn)) & 0xff;
4226 * bnx2x_setup_tc - routine to configure net_device for multi tc
4228 * @netdev: net device to configure
4229 * @tc: number of traffic classes to enable
4231 * callback connected to the ndo_setup_tc function pointer
4233 int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
4235 struct bnx2x *bp = netdev_priv(dev);
4236 u8 c2s_map[BNX2X_MAX_PRIORITY], c2s_def;
4237 int cos, prio, count, offset;
4239 /* setup tc must be called under rtnl lock */
4242 /* no traffic classes requested. Aborting */
4244 netdev_reset_tc(dev);
4248 /* requested to support too many traffic classes */
4249 if (num_tc > bp->max_cos) {
4250 BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n",
4251 num_tc, bp->max_cos);
4255 /* declare amount of supported traffic classes */
4256 if (netdev_set_num_tc(dev, num_tc)) {
4257 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
4261 bnx2x_get_c2s_mapping(bp, c2s_map, &c2s_def);
4263 /* configure priority to traffic class mapping */
4264 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
4265 int outer_prio = c2s_map[prio];
4267 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[outer_prio]);
4268 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4269 "mapping priority %d to tc %d\n",
4270 outer_prio, bp->prio_to_cos[outer_prio]);
4273 /* Use this configuration to differentiate tc0 from other COSes
4274 This can be used for ets or pfc, and save the effort of setting
4275 up a multio class queue disc or negotiating DCBX with a switch
4276 netdev_set_prio_tc_map(dev, 0, 0);
4277 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
4278 for (prio = 1; prio < 16; prio++) {
4279 netdev_set_prio_tc_map(dev, prio, 1);
4280 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
4283 /* configure traffic class to transmission queue mapping */
4284 for (cos = 0; cos < bp->max_cos; cos++) {
4285 count = BNX2X_NUM_ETH_QUEUES(bp);
4286 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
4287 netdev_set_tc_queue(dev, cos, count, offset);
4288 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4289 "mapping tc %d to offset %d count %d\n",
4290 cos, offset, count);
4296 int __bnx2x_setup_tc(struct net_device *dev, enum tc_setup_type type,
4299 struct tc_mqprio_qopt *mqprio = type_data;
4301 if (type != TC_SETUP_QDISC_MQPRIO)
4304 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
4306 return bnx2x_setup_tc(dev, mqprio->num_tc);
4309 /* called with rtnl_lock */
4310 int bnx2x_change_mac_addr(struct net_device *dev, void *p)
4312 struct sockaddr *addr = p;
4313 struct bnx2x *bp = netdev_priv(dev);
4316 if (!is_valid_ether_addr(addr->sa_data)) {
4317 BNX2X_ERR("Requested MAC address is not valid\n");
4321 if (IS_MF_STORAGE_ONLY(bp)) {
4322 BNX2X_ERR("Can't change address on STORAGE ONLY function\n");
4326 if (netif_running(dev)) {
4327 rc = bnx2x_set_eth_mac(bp, false);
4332 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4334 if (netif_running(dev))
4335 rc = bnx2x_set_eth_mac(bp, true);
4337 if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
4338 SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
4343 static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
4345 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
4346 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
4351 if (IS_FCOE_IDX(fp_index)) {
4352 memset(sb, 0, sizeof(union host_hc_status_block));
4353 fp->status_blk_mapping = 0;
4356 if (!CHIP_IS_E1x(bp))
4357 BNX2X_PCI_FREE(sb->e2_sb,
4358 bnx2x_fp(bp, fp_index,
4359 status_blk_mapping),
4360 sizeof(struct host_hc_status_block_e2));
4362 BNX2X_PCI_FREE(sb->e1x_sb,
4363 bnx2x_fp(bp, fp_index,
4364 status_blk_mapping),
4365 sizeof(struct host_hc_status_block_e1x));
4369 if (!skip_rx_queue(bp, fp_index)) {
4370 bnx2x_free_rx_bds(fp);
4372 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4373 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
4374 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
4375 bnx2x_fp(bp, fp_index, rx_desc_mapping),
4376 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4378 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
4379 bnx2x_fp(bp, fp_index, rx_comp_mapping),
4380 sizeof(struct eth_fast_path_rx_cqe) *
4384 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
4385 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
4386 bnx2x_fp(bp, fp_index, rx_sge_mapping),
4387 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4391 if (!skip_tx_queue(bp, fp_index)) {
4392 /* fastpath tx rings: tx_buf tx_desc */
4393 for_each_cos_in_tx_queue(fp, cos) {
4394 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4396 DP(NETIF_MSG_IFDOWN,
4397 "freeing tx memory of fp %d cos %d cid %d\n",
4398 fp_index, cos, txdata->cid);
4400 BNX2X_FREE(txdata->tx_buf_ring);
4401 BNX2X_PCI_FREE(txdata->tx_desc_ring,
4402 txdata->tx_desc_mapping,
4403 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4406 /* end of fastpath */
4409 static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
4412 for_each_cnic_queue(bp, i)
4413 bnx2x_free_fp_mem_at(bp, i);
4416 void bnx2x_free_fp_mem(struct bnx2x *bp)
4419 for_each_eth_queue(bp, i)
4420 bnx2x_free_fp_mem_at(bp, i);
4423 static void set_sb_shortcuts(struct bnx2x *bp, int index)
4425 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
4426 if (!CHIP_IS_E1x(bp)) {
4427 bnx2x_fp(bp, index, sb_index_values) =
4428 (__le16 *)status_blk.e2_sb->sb.index_values;
4429 bnx2x_fp(bp, index, sb_running_index) =
4430 (__le16 *)status_blk.e2_sb->sb.running_index;
4432 bnx2x_fp(bp, index, sb_index_values) =
4433 (__le16 *)status_blk.e1x_sb->sb.index_values;
4434 bnx2x_fp(bp, index, sb_running_index) =
4435 (__le16 *)status_blk.e1x_sb->sb.running_index;
4439 /* Returns the number of actually allocated BDs */
4440 static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
4443 struct bnx2x *bp = fp->bp;
4444 u16 ring_prod, cqe_ring_prod;
4445 int i, failure_cnt = 0;
4447 fp->rx_comp_cons = 0;
4448 cqe_ring_prod = ring_prod = 0;
4450 /* This routine is called only during fo init so
4451 * fp->eth_q_stats.rx_skb_alloc_failed = 0
4453 for (i = 0; i < rx_ring_size; i++) {
4454 if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) {
4458 ring_prod = NEXT_RX_IDX(ring_prod);
4459 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4460 WARN_ON(ring_prod <= (i - failure_cnt));
4464 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4465 i - failure_cnt, fp->index);
4467 fp->rx_bd_prod = ring_prod;
4468 /* Limit the CQE producer by the CQE ring size */
4469 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
4472 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
4474 return i - failure_cnt;
4477 static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
4481 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4482 struct eth_rx_cqe_next_page *nextpg;
4484 nextpg = (struct eth_rx_cqe_next_page *)
4485 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4487 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4488 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4490 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4491 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4495 static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4497 union host_hc_status_block *sb;
4498 struct bnx2x_fastpath *fp = &bp->fp[index];
4501 int rx_ring_size = 0;
4503 if (!bp->rx_ring_size && IS_MF_STORAGE_ONLY(bp)) {
4504 rx_ring_size = MIN_RX_SIZE_NONTPA;
4505 bp->rx_ring_size = rx_ring_size;
4506 } else if (!bp->rx_ring_size) {
4507 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4509 if (CHIP_IS_E3(bp)) {
4510 u32 cfg = SHMEM_RD(bp,
4511 dev_info.port_hw_config[BP_PORT(bp)].
4514 /* Decrease ring size for 1G functions */
4515 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4516 PORT_HW_CFG_NET_SERDES_IF_SGMII)
4520 /* allocate at least number of buffers required by FW */
4521 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
4522 MIN_RX_SIZE_TPA, rx_ring_size);
4524 bp->rx_ring_size = rx_ring_size;
4525 } else /* if rx_ring_size specified - use it */
4526 rx_ring_size = bp->rx_ring_size;
4528 DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
4531 sb = &bnx2x_fp(bp, index, status_blk);
4533 if (!IS_FCOE_IDX(index)) {
4535 if (!CHIP_IS_E1x(bp)) {
4536 sb->e2_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4537 sizeof(struct host_hc_status_block_e2));
4541 sb->e1x_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4542 sizeof(struct host_hc_status_block_e1x));
4548 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4549 * set shortcuts for it.
4551 if (!IS_FCOE_IDX(index))
4552 set_sb_shortcuts(bp, index);
4555 if (!skip_tx_queue(bp, index)) {
4556 /* fastpath tx rings: tx_buf tx_desc */
4557 for_each_cos_in_tx_queue(fp, cos) {
4558 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4561 "allocating tx memory of fp %d cos %d\n",
4564 txdata->tx_buf_ring = kcalloc(NUM_TX_BD,
4565 sizeof(struct sw_tx_bd),
4567 if (!txdata->tx_buf_ring)
4569 txdata->tx_desc_ring = BNX2X_PCI_ALLOC(&txdata->tx_desc_mapping,
4570 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4571 if (!txdata->tx_desc_ring)
4577 if (!skip_rx_queue(bp, index)) {
4578 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4579 bnx2x_fp(bp, index, rx_buf_ring) =
4580 kcalloc(NUM_RX_BD, sizeof(struct sw_rx_bd), GFP_KERNEL);
4581 if (!bnx2x_fp(bp, index, rx_buf_ring))
4583 bnx2x_fp(bp, index, rx_desc_ring) =
4584 BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_desc_mapping),
4585 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4586 if (!bnx2x_fp(bp, index, rx_desc_ring))
4589 /* Seed all CQEs by 1s */
4590 bnx2x_fp(bp, index, rx_comp_ring) =
4591 BNX2X_PCI_FALLOC(&bnx2x_fp(bp, index, rx_comp_mapping),
4592 sizeof(struct eth_fast_path_rx_cqe) * NUM_RCQ_BD);
4593 if (!bnx2x_fp(bp, index, rx_comp_ring))
4597 bnx2x_fp(bp, index, rx_page_ring) =
4598 kcalloc(NUM_RX_SGE, sizeof(struct sw_rx_page),
4600 if (!bnx2x_fp(bp, index, rx_page_ring))
4602 bnx2x_fp(bp, index, rx_sge_ring) =
4603 BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_sge_mapping),
4604 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4605 if (!bnx2x_fp(bp, index, rx_sge_ring))
4608 bnx2x_set_next_page_rx_bd(fp);
4611 bnx2x_set_next_page_rx_cq(fp);
4614 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4615 if (ring_size < rx_ring_size)
4621 /* handles low memory cases */
4623 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4625 /* FW will drop all packets if queue is not big enough,
4626 * In these cases we disable the queue
4627 * Min size is different for OOO, TPA and non-TPA queues
4629 if (ring_size < (fp->mode == TPA_MODE_DISABLED ?
4630 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
4631 /* release memory allocated for this queue */
4632 bnx2x_free_fp_mem_at(bp, index);
4638 static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
4642 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
4643 /* we will fail load process instead of mark
4651 static int bnx2x_alloc_fp_mem(struct bnx2x *bp)
4655 /* 1. Allocate FP for leading - fatal if error
4656 * 2. Allocate RSS - fix number of queues if error
4660 if (bnx2x_alloc_fp_mem_at(bp, 0))
4664 for_each_nondefault_eth_queue(bp, i)
4665 if (bnx2x_alloc_fp_mem_at(bp, i))
4668 /* handle memory failures */
4669 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4670 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4673 bnx2x_shrink_eth_fp(bp, delta);
4674 if (CNIC_SUPPORT(bp))
4675 /* move non eth FPs next to last eth FP
4676 * must be done in that order
4677 * FCOE_IDX < FWD_IDX < OOO_IDX
4680 /* move FCoE fp even NO_FCOE_FLAG is on */
4681 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4682 bp->num_ethernet_queues -= delta;
4683 bp->num_queues = bp->num_ethernet_queues +
4684 bp->num_cnic_queues;
4685 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4686 bp->num_queues + delta, bp->num_queues);
4692 void bnx2x_free_mem_bp(struct bnx2x *bp)
4696 for (i = 0; i < bp->fp_array_size; i++)
4697 kfree(bp->fp[i].tpa_info);
4700 kfree(bp->fp_stats);
4701 kfree(bp->bnx2x_txq);
4702 kfree(bp->msix_table);
4706 int bnx2x_alloc_mem_bp(struct bnx2x *bp)
4708 struct bnx2x_fastpath *fp;
4709 struct msix_entry *tbl;
4710 struct bnx2x_ilt *ilt;
4711 int msix_table_size = 0;
4712 int fp_array_size, txq_array_size;
4716 * The biggest MSI-X table we might need is as a maximum number of fast
4717 * path IGU SBs plus default SB (for PF only).
4719 msix_table_size = bp->igu_sb_cnt;
4722 BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
4724 /* fp array: RSS plus CNIC related L2 queues */
4725 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
4726 bp->fp_array_size = fp_array_size;
4727 BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
4729 fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
4732 for (i = 0; i < bp->fp_array_size; i++) {
4734 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
4735 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
4736 if (!(fp[i].tpa_info))
4742 /* allocate sp objs */
4743 bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
4748 /* allocate fp_stats */
4749 bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
4754 /* Allocate memory for the transmission queues array */
4756 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4757 BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4759 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
4765 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
4768 bp->msix_table = tbl;
4771 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4778 bnx2x_free_mem_bp(bp);
4782 int bnx2x_reload_if_running(struct net_device *dev)
4784 struct bnx2x *bp = netdev_priv(dev);
4786 if (unlikely(!netif_running(dev)))
4789 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
4790 return bnx2x_nic_load(bp, LOAD_NORMAL);
4793 int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4795 u32 sel_phy_idx = 0;
4796 if (bp->link_params.num_phys <= 1)
4799 if (bp->link_vars.link_up) {
4800 sel_phy_idx = EXT_PHY1;
4801 /* In case link is SERDES, check if the EXT_PHY2 is the one */
4802 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4803 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4804 sel_phy_idx = EXT_PHY2;
4807 switch (bnx2x_phy_selection(&bp->link_params)) {
4808 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4809 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4810 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4811 sel_phy_idx = EXT_PHY1;
4813 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4814 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4815 sel_phy_idx = EXT_PHY2;
4822 int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4824 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4826 * The selected activated PHY is always after swapping (in case PHY
4827 * swapping is enabled). So when swapping is enabled, we need to reverse
4831 if (bp->link_params.multi_phy_config &
4832 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4833 if (sel_phy_idx == EXT_PHY1)
4834 sel_phy_idx = EXT_PHY2;
4835 else if (sel_phy_idx == EXT_PHY2)
4836 sel_phy_idx = EXT_PHY1;
4838 return LINK_CONFIG_IDX(sel_phy_idx);
4841 #ifdef NETDEV_FCOE_WWNN
4842 int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4844 struct bnx2x *bp = netdev_priv(dev);
4845 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4848 case NETDEV_FCOE_WWNN:
4849 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4850 cp->fcoe_wwn_node_name_lo);
4852 case NETDEV_FCOE_WWPN:
4853 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4854 cp->fcoe_wwn_port_name_lo);
4857 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
4865 /* called with rtnl_lock */
4866 int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4868 struct bnx2x *bp = netdev_priv(dev);
4870 if (pci_num_vf(bp->pdev)) {
4871 DP(BNX2X_MSG_IOV, "VFs are enabled, can not change MTU\n");
4875 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4876 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
4880 /* This does not race with packet allocation
4881 * because the actual alloc size is
4882 * only updated as part of load
4886 if (!bnx2x_mtu_allows_gro(new_mtu))
4887 dev->features &= ~NETIF_F_GRO_HW;
4889 if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
4890 SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
4892 return bnx2x_reload_if_running(dev);
4895 netdev_features_t bnx2x_fix_features(struct net_device *dev,
4896 netdev_features_t features)
4898 struct bnx2x *bp = netdev_priv(dev);
4900 if (pci_num_vf(bp->pdev)) {
4901 netdev_features_t changed = dev->features ^ features;
4903 /* Revert the requested changes in features if they
4904 * would require internal reload of PF in bnx2x_set_features().
4906 if (!(features & NETIF_F_RXCSUM) && !bp->disable_tpa) {
4907 features &= ~NETIF_F_RXCSUM;
4908 features |= dev->features & NETIF_F_RXCSUM;
4911 if (changed & NETIF_F_LOOPBACK) {
4912 features &= ~NETIF_F_LOOPBACK;
4913 features |= dev->features & NETIF_F_LOOPBACK;
4917 /* TPA requires Rx CSUM offloading */
4918 if (!(features & NETIF_F_RXCSUM))
4919 features &= ~NETIF_F_LRO;
4921 if (!(features & NETIF_F_GRO) || !bnx2x_mtu_allows_gro(dev->mtu))
4922 features &= ~NETIF_F_GRO_HW;
4923 if (features & NETIF_F_GRO_HW)
4924 features &= ~NETIF_F_LRO;
4929 int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
4931 struct bnx2x *bp = netdev_priv(dev);
4932 netdev_features_t changes = features ^ dev->features;
4933 bool bnx2x_reload = false;
4936 /* VFs or non SRIOV PFs should be able to change loopback feature */
4937 if (!pci_num_vf(bp->pdev)) {
4938 if (features & NETIF_F_LOOPBACK) {
4939 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4940 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4941 bnx2x_reload = true;
4944 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4945 bp->link_params.loopback_mode = LOOPBACK_NONE;
4946 bnx2x_reload = true;
4951 /* Don't care about GRO changes */
4952 changes &= ~NETIF_F_GRO;
4955 bnx2x_reload = true;
4958 if (bp->recovery_state == BNX2X_RECOVERY_DONE) {
4959 dev->features = features;
4960 rc = bnx2x_reload_if_running(dev);
4963 /* else: bnx2x_nic_load() will be called at end of recovery */
4969 void bnx2x_tx_timeout(struct net_device *dev)
4971 struct bnx2x *bp = netdev_priv(dev);
4973 /* We want the information of the dump logged,
4974 * but calling bnx2x_panic() would kill all chances of recovery.
4977 #ifndef BNX2X_STOP_ON_ERROR
4978 bnx2x_panic_dump(bp, false);
4983 /* This allows the netif to be shutdown gracefully before resetting */
4984 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_TIMEOUT, 0);
4987 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
4989 struct net_device *dev = pci_get_drvdata(pdev);
4993 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4996 bp = netdev_priv(dev);
5000 pci_save_state(pdev);
5002 if (!netif_running(dev)) {
5007 netif_device_detach(dev);
5009 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
5011 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
5018 int bnx2x_resume(struct pci_dev *pdev)
5020 struct net_device *dev = pci_get_drvdata(pdev);
5025 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
5028 bp = netdev_priv(dev);
5030 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
5031 BNX2X_ERR("Handling parity error recovery. Try again later\n");
5037 pci_restore_state(pdev);
5039 if (!netif_running(dev)) {
5044 bnx2x_set_power_state(bp, PCI_D0);
5045 netif_device_attach(dev);
5047 rc = bnx2x_nic_load(bp, LOAD_OPEN);
5054 void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
5058 BNX2X_ERR("bad context pointer %p\n", cxt);
5062 /* ustorm cxt validation */
5063 cxt->ustorm_ag_context.cdu_usage =
5064 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
5065 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
5066 /* xcontext validation */
5067 cxt->xstorm_ag_context.cdu_reserved =
5068 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
5069 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
5072 static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
5073 u8 fw_sb_id, u8 sb_index,
5076 u32 addr = BAR_CSTRORM_INTMEM +
5077 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
5078 REG_WR8(bp, addr, ticks);
5080 "port %x fw_sb_id %d sb_index %d ticks %d\n",
5081 port, fw_sb_id, sb_index, ticks);
5084 static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
5085 u16 fw_sb_id, u8 sb_index,
5088 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
5089 u32 addr = BAR_CSTRORM_INTMEM +
5090 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
5091 u8 flags = REG_RD8(bp, addr);
5093 flags &= ~HC_INDEX_DATA_HC_ENABLED;
5094 flags |= enable_flag;
5095 REG_WR8(bp, addr, flags);
5097 "port %x fw_sb_id %d sb_index %d disable %d\n",
5098 port, fw_sb_id, sb_index, disable);
5101 void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
5102 u8 sb_index, u8 disable, u16 usec)
5104 int port = BP_PORT(bp);
5105 u8 ticks = usec / BNX2X_BTR;
5107 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
5109 disable = disable ? 1 : (usec ? 0 : 1);
5110 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
5113 void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag,
5116 smp_mb__before_atomic();
5117 set_bit(flag, &bp->sp_rtnl_state);
5118 smp_mb__after_atomic();
5119 DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n",
5121 schedule_delayed_work(&bp->sp_rtnl_task, 0);