]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/net/ethernet/broadcom/bnxt/bnxt.c
2ad007e5ee7fd20588eefa7f8919b84585a1899c
[linux.git] / drivers / net / ethernet / broadcom / bnxt / bnxt.c
1 /* Broadcom NetXtreme-C/E network driver.
2  *
3  * Copyright (c) 2014-2016 Broadcom Corporation
4  * Copyright (c) 2016-2019 Broadcom Limited
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation.
9  */
10
11 #include <linux/module.h>
12
13 #include <linux/stringify.h>
14 #include <linux/kernel.h>
15 #include <linux/timer.h>
16 #include <linux/errno.h>
17 #include <linux/ioport.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/interrupt.h>
21 #include <linux/pci.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/skbuff.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/bitops.h>
27 #include <linux/io.h>
28 #include <linux/irq.h>
29 #include <linux/delay.h>
30 #include <asm/byteorder.h>
31 #include <asm/page.h>
32 #include <linux/time.h>
33 #include <linux/mii.h>
34 #include <linux/mdio.h>
35 #include <linux/if.h>
36 #include <linux/if_vlan.h>
37 #include <linux/if_bridge.h>
38 #include <linux/rtc.h>
39 #include <linux/bpf.h>
40 #include <net/ip.h>
41 #include <net/tcp.h>
42 #include <net/udp.h>
43 #include <net/checksum.h>
44 #include <net/ip6_checksum.h>
45 #include <net/udp_tunnel.h>
46 #include <linux/workqueue.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/log2.h>
50 #include <linux/aer.h>
51 #include <linux/bitmap.h>
52 #include <linux/cpu_rmap.h>
53 #include <linux/cpumask.h>
54 #include <net/pkt_cls.h>
55 #include <linux/hwmon.h>
56 #include <linux/hwmon-sysfs.h>
57 #include <net/page_pool.h>
58
59 #include "bnxt_hsi.h"
60 #include "bnxt.h"
61 #include "bnxt_ulp.h"
62 #include "bnxt_sriov.h"
63 #include "bnxt_ethtool.h"
64 #include "bnxt_dcb.h"
65 #include "bnxt_xdp.h"
66 #include "bnxt_vfr.h"
67 #include "bnxt_tc.h"
68 #include "bnxt_devlink.h"
69 #include "bnxt_debugfs.h"
70
71 #define BNXT_TX_TIMEOUT         (5 * HZ)
72
73 static const char version[] =
74         "Broadcom NetXtreme-C/E driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION "\n";
75
76 MODULE_LICENSE("GPL");
77 MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
78 MODULE_VERSION(DRV_MODULE_VERSION);
79
80 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
81 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD
82 #define BNXT_RX_COPY_THRESH 256
83
84 #define BNXT_TX_PUSH_THRESH 164
85
86 enum board_idx {
87         BCM57301,
88         BCM57302,
89         BCM57304,
90         BCM57417_NPAR,
91         BCM58700,
92         BCM57311,
93         BCM57312,
94         BCM57402,
95         BCM57404,
96         BCM57406,
97         BCM57402_NPAR,
98         BCM57407,
99         BCM57412,
100         BCM57414,
101         BCM57416,
102         BCM57417,
103         BCM57412_NPAR,
104         BCM57314,
105         BCM57417_SFP,
106         BCM57416_SFP,
107         BCM57404_NPAR,
108         BCM57406_NPAR,
109         BCM57407_SFP,
110         BCM57407_NPAR,
111         BCM57414_NPAR,
112         BCM57416_NPAR,
113         BCM57452,
114         BCM57454,
115         BCM5745x_NPAR,
116         BCM57508,
117         BCM57504,
118         BCM57502,
119         BCM57508_NPAR,
120         BCM57504_NPAR,
121         BCM57502_NPAR,
122         BCM58802,
123         BCM58804,
124         BCM58808,
125         NETXTREME_E_VF,
126         NETXTREME_C_VF,
127         NETXTREME_S_VF,
128         NETXTREME_E_P5_VF,
129 };
130
131 /* indexed by enum above */
132 static const struct {
133         char *name;
134 } board_info[] = {
135         [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
136         [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
137         [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
138         [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
139         [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
140         [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
141         [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
142         [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
143         [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
144         [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
145         [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
146         [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
147         [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
148         [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
149         [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
150         [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
151         [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
152         [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
153         [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
154         [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
155         [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
156         [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
157         [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
158         [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
159         [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
160         [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
161         [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
162         [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
163         [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
164         [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
165         [BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
166         [BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" },
167         [BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" },
168         [BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" },
169         [BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" },
170         [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
171         [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
172         [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
173         [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
174         [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
175         [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
176         [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
177 };
178
179 static const struct pci_device_id bnxt_pci_tbl[] = {
180         { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR },
181         { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR },
182         { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
183         { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
184         { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
185         { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
186         { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
187         { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
188         { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
189         { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
190         { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
191         { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
192         { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
193         { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
194         { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
195         { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
196         { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
197         { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
198         { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
199         { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
200         { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
201         { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
202         { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
203         { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
204         { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
205         { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
206         { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
207         { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
208         { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
209         { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
210         { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
211         { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
212         { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
213         { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
214         { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
215         { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
216         { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
217         { PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 },
218         { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57508_NPAR },
219         { PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR },
220         { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57502_NPAR },
221         { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57508_NPAR },
222         { PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR },
223         { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57502_NPAR },
224         { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
225         { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
226 #ifdef CONFIG_BNXT_SRIOV
227         { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
228         { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
229         { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
230         { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
231         { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
232         { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
233         { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
234         { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
235         { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF },
236         { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
237         { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
238 #endif
239         { 0 }
240 };
241
242 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
243
244 static const u16 bnxt_vf_req_snif[] = {
245         HWRM_FUNC_CFG,
246         HWRM_FUNC_VF_CFG,
247         HWRM_PORT_PHY_QCFG,
248         HWRM_CFA_L2_FILTER_ALLOC,
249 };
250
251 static const u16 bnxt_async_events_arr[] = {
252         ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
253         ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE,
254         ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
255         ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
256         ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
257         ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
258         ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE,
259         ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY,
260         ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY,
261 };
262
263 static struct workqueue_struct *bnxt_pf_wq;
264
265 static bool bnxt_vf_pciid(enum board_idx idx)
266 {
267         return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
268                 idx == NETXTREME_S_VF || idx == NETXTREME_E_P5_VF);
269 }
270
271 #define DB_CP_REARM_FLAGS       (DB_KEY_CP | DB_IDX_VALID)
272 #define DB_CP_FLAGS             (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
273 #define DB_CP_IRQ_DIS_FLAGS     (DB_KEY_CP | DB_IRQ_DIS)
274
275 #define BNXT_CP_DB_IRQ_DIS(db)                                          \
276                 writel(DB_CP_IRQ_DIS_FLAGS, db)
277
278 #define BNXT_DB_CQ(db, idx)                                             \
279         writel(DB_CP_FLAGS | RING_CMP(idx), (db)->doorbell)
280
281 #define BNXT_DB_NQ_P5(db, idx)                                          \
282         writeq((db)->db_key64 | DBR_TYPE_NQ | RING_CMP(idx), (db)->doorbell)
283
284 #define BNXT_DB_CQ_ARM(db, idx)                                         \
285         writel(DB_CP_REARM_FLAGS | RING_CMP(idx), (db)->doorbell)
286
287 #define BNXT_DB_NQ_ARM_P5(db, idx)                                      \
288         writeq((db)->db_key64 | DBR_TYPE_NQ_ARM | RING_CMP(idx), (db)->doorbell)
289
290 static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
291 {
292         if (bp->flags & BNXT_FLAG_CHIP_P5)
293                 BNXT_DB_NQ_P5(db, idx);
294         else
295                 BNXT_DB_CQ(db, idx);
296 }
297
298 static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
299 {
300         if (bp->flags & BNXT_FLAG_CHIP_P5)
301                 BNXT_DB_NQ_ARM_P5(db, idx);
302         else
303                 BNXT_DB_CQ_ARM(db, idx);
304 }
305
306 static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
307 {
308         if (bp->flags & BNXT_FLAG_CHIP_P5)
309                 writeq(db->db_key64 | DBR_TYPE_CQ_ARMALL | RING_CMP(idx),
310                        db->doorbell);
311         else
312                 BNXT_DB_CQ(db, idx);
313 }
314
315 const u16 bnxt_lhint_arr[] = {
316         TX_BD_FLAGS_LHINT_512_AND_SMALLER,
317         TX_BD_FLAGS_LHINT_512_TO_1023,
318         TX_BD_FLAGS_LHINT_1024_TO_2047,
319         TX_BD_FLAGS_LHINT_1024_TO_2047,
320         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
321         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
322         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
323         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
324         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
325         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
326         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
327         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
328         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
329         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
330         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
331         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
332         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
333         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
334         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
335 };
336
337 static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
338 {
339         struct metadata_dst *md_dst = skb_metadata_dst(skb);
340
341         if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX)
342                 return 0;
343
344         return md_dst->u.port_info.port_id;
345 }
346
347 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
348 {
349         struct bnxt *bp = netdev_priv(dev);
350         struct tx_bd *txbd;
351         struct tx_bd_ext *txbd1;
352         struct netdev_queue *txq;
353         int i;
354         dma_addr_t mapping;
355         unsigned int length, pad = 0;
356         u32 len, free_size, vlan_tag_flags, cfa_action, flags;
357         u16 prod, last_frag;
358         struct pci_dev *pdev = bp->pdev;
359         struct bnxt_tx_ring_info *txr;
360         struct bnxt_sw_tx_bd *tx_buf;
361
362         i = skb_get_queue_mapping(skb);
363         if (unlikely(i >= bp->tx_nr_rings)) {
364                 dev_kfree_skb_any(skb);
365                 return NETDEV_TX_OK;
366         }
367
368         txq = netdev_get_tx_queue(dev, i);
369         txr = &bp->tx_ring[bp->tx_ring_map[i]];
370         prod = txr->tx_prod;
371
372         free_size = bnxt_tx_avail(bp, txr);
373         if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
374                 netif_tx_stop_queue(txq);
375                 return NETDEV_TX_BUSY;
376         }
377
378         length = skb->len;
379         len = skb_headlen(skb);
380         last_frag = skb_shinfo(skb)->nr_frags;
381
382         txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
383
384         txbd->tx_bd_opaque = prod;
385
386         tx_buf = &txr->tx_buf_ring[prod];
387         tx_buf->skb = skb;
388         tx_buf->nr_frags = last_frag;
389
390         vlan_tag_flags = 0;
391         cfa_action = bnxt_xmit_get_cfa_action(skb);
392         if (skb_vlan_tag_present(skb)) {
393                 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
394                                  skb_vlan_tag_get(skb);
395                 /* Currently supports 8021Q, 8021AD vlan offloads
396                  * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
397                  */
398                 if (skb->vlan_proto == htons(ETH_P_8021Q))
399                         vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
400         }
401
402         if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) {
403                 struct tx_push_buffer *tx_push_buf = txr->tx_push;
404                 struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
405                 struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
406                 void __iomem *db = txr->tx_db.doorbell;
407                 void *pdata = tx_push_buf->data;
408                 u64 *end;
409                 int j, push_len;
410
411                 /* Set COAL_NOW to be ready quickly for the next push */
412                 tx_push->tx_bd_len_flags_type =
413                         cpu_to_le32((length << TX_BD_LEN_SHIFT) |
414                                         TX_BD_TYPE_LONG_TX_BD |
415                                         TX_BD_FLAGS_LHINT_512_AND_SMALLER |
416                                         TX_BD_FLAGS_COAL_NOW |
417                                         TX_BD_FLAGS_PACKET_END |
418                                         (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
419
420                 if (skb->ip_summed == CHECKSUM_PARTIAL)
421                         tx_push1->tx_bd_hsize_lflags =
422                                         cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
423                 else
424                         tx_push1->tx_bd_hsize_lflags = 0;
425
426                 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
427                 tx_push1->tx_bd_cfa_action =
428                         cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
429
430                 end = pdata + length;
431                 end = PTR_ALIGN(end, 8) - 1;
432                 *end = 0;
433
434                 skb_copy_from_linear_data(skb, pdata, len);
435                 pdata += len;
436                 for (j = 0; j < last_frag; j++) {
437                         skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
438                         void *fptr;
439
440                         fptr = skb_frag_address_safe(frag);
441                         if (!fptr)
442                                 goto normal_tx;
443
444                         memcpy(pdata, fptr, skb_frag_size(frag));
445                         pdata += skb_frag_size(frag);
446                 }
447
448                 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
449                 txbd->tx_bd_haddr = txr->data_mapping;
450                 prod = NEXT_TX(prod);
451                 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
452                 memcpy(txbd, tx_push1, sizeof(*txbd));
453                 prod = NEXT_TX(prod);
454                 tx_push->doorbell =
455                         cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
456                 txr->tx_prod = prod;
457
458                 tx_buf->is_push = 1;
459                 netdev_tx_sent_queue(txq, skb->len);
460                 wmb();  /* Sync is_push and byte queue before pushing data */
461
462                 push_len = (length + sizeof(*tx_push) + 7) / 8;
463                 if (push_len > 16) {
464                         __iowrite64_copy(db, tx_push_buf, 16);
465                         __iowrite32_copy(db + 4, tx_push_buf + 1,
466                                          (push_len - 16) << 1);
467                 } else {
468                         __iowrite64_copy(db, tx_push_buf, push_len);
469                 }
470
471                 goto tx_done;
472         }
473
474 normal_tx:
475         if (length < BNXT_MIN_PKT_SIZE) {
476                 pad = BNXT_MIN_PKT_SIZE - length;
477                 if (skb_pad(skb, pad)) {
478                         /* SKB already freed. */
479                         tx_buf->skb = NULL;
480                         return NETDEV_TX_OK;
481                 }
482                 length = BNXT_MIN_PKT_SIZE;
483         }
484
485         mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
486
487         if (unlikely(dma_mapping_error(&pdev->dev, mapping))) {
488                 dev_kfree_skb_any(skb);
489                 tx_buf->skb = NULL;
490                 return NETDEV_TX_OK;
491         }
492
493         dma_unmap_addr_set(tx_buf, mapping, mapping);
494         flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
495                 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
496
497         txbd->tx_bd_haddr = cpu_to_le64(mapping);
498
499         prod = NEXT_TX(prod);
500         txbd1 = (struct tx_bd_ext *)
501                 &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
502
503         txbd1->tx_bd_hsize_lflags = 0;
504         if (skb_is_gso(skb)) {
505                 u32 hdr_len;
506
507                 if (skb->encapsulation)
508                         hdr_len = skb_inner_network_offset(skb) +
509                                 skb_inner_network_header_len(skb) +
510                                 inner_tcp_hdrlen(skb);
511                 else
512                         hdr_len = skb_transport_offset(skb) +
513                                 tcp_hdrlen(skb);
514
515                 txbd1->tx_bd_hsize_lflags = cpu_to_le32(TX_BD_FLAGS_LSO |
516                                         TX_BD_FLAGS_T_IPID |
517                                         (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
518                 length = skb_shinfo(skb)->gso_size;
519                 txbd1->tx_bd_mss = cpu_to_le32(length);
520                 length += hdr_len;
521         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
522                 txbd1->tx_bd_hsize_lflags =
523                         cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
524                 txbd1->tx_bd_mss = 0;
525         }
526
527         length >>= 9;
528         if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) {
529                 dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n",
530                                      skb->len);
531                 i = 0;
532                 goto tx_dma_error;
533         }
534         flags |= bnxt_lhint_arr[length];
535         txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
536
537         txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
538         txbd1->tx_bd_cfa_action =
539                         cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
540         for (i = 0; i < last_frag; i++) {
541                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
542
543                 prod = NEXT_TX(prod);
544                 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
545
546                 len = skb_frag_size(frag);
547                 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
548                                            DMA_TO_DEVICE);
549
550                 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
551                         goto tx_dma_error;
552
553                 tx_buf = &txr->tx_buf_ring[prod];
554                 dma_unmap_addr_set(tx_buf, mapping, mapping);
555
556                 txbd->tx_bd_haddr = cpu_to_le64(mapping);
557
558                 flags = len << TX_BD_LEN_SHIFT;
559                 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
560         }
561
562         flags &= ~TX_BD_LEN;
563         txbd->tx_bd_len_flags_type =
564                 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
565                             TX_BD_FLAGS_PACKET_END);
566
567         netdev_tx_sent_queue(txq, skb->len);
568
569         /* Sync BD data before updating doorbell */
570         wmb();
571
572         prod = NEXT_TX(prod);
573         txr->tx_prod = prod;
574
575         if (!netdev_xmit_more() || netif_xmit_stopped(txq))
576                 bnxt_db_write(bp, &txr->tx_db, prod);
577
578 tx_done:
579
580         if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
581                 if (netdev_xmit_more() && !tx_buf->is_push)
582                         bnxt_db_write(bp, &txr->tx_db, prod);
583
584                 netif_tx_stop_queue(txq);
585
586                 /* netif_tx_stop_queue() must be done before checking
587                  * tx index in bnxt_tx_avail() below, because in
588                  * bnxt_tx_int(), we update tx index before checking for
589                  * netif_tx_queue_stopped().
590                  */
591                 smp_mb();
592                 if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)
593                         netif_tx_wake_queue(txq);
594         }
595         return NETDEV_TX_OK;
596
597 tx_dma_error:
598         last_frag = i;
599
600         /* start back at beginning and unmap skb */
601         prod = txr->tx_prod;
602         tx_buf = &txr->tx_buf_ring[prod];
603         tx_buf->skb = NULL;
604         dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
605                          skb_headlen(skb), PCI_DMA_TODEVICE);
606         prod = NEXT_TX(prod);
607
608         /* unmap remaining mapped pages */
609         for (i = 0; i < last_frag; i++) {
610                 prod = NEXT_TX(prod);
611                 tx_buf = &txr->tx_buf_ring[prod];
612                 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
613                                skb_frag_size(&skb_shinfo(skb)->frags[i]),
614                                PCI_DMA_TODEVICE);
615         }
616
617         dev_kfree_skb_any(skb);
618         return NETDEV_TX_OK;
619 }
620
621 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
622 {
623         struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
624         struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
625         u16 cons = txr->tx_cons;
626         struct pci_dev *pdev = bp->pdev;
627         int i;
628         unsigned int tx_bytes = 0;
629
630         for (i = 0; i < nr_pkts; i++) {
631                 struct bnxt_sw_tx_bd *tx_buf;
632                 struct sk_buff *skb;
633                 int j, last;
634
635                 tx_buf = &txr->tx_buf_ring[cons];
636                 cons = NEXT_TX(cons);
637                 skb = tx_buf->skb;
638                 tx_buf->skb = NULL;
639
640                 if (tx_buf->is_push) {
641                         tx_buf->is_push = 0;
642                         goto next_tx_int;
643                 }
644
645                 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
646                                  skb_headlen(skb), PCI_DMA_TODEVICE);
647                 last = tx_buf->nr_frags;
648
649                 for (j = 0; j < last; j++) {
650                         cons = NEXT_TX(cons);
651                         tx_buf = &txr->tx_buf_ring[cons];
652                         dma_unmap_page(
653                                 &pdev->dev,
654                                 dma_unmap_addr(tx_buf, mapping),
655                                 skb_frag_size(&skb_shinfo(skb)->frags[j]),
656                                 PCI_DMA_TODEVICE);
657                 }
658
659 next_tx_int:
660                 cons = NEXT_TX(cons);
661
662                 tx_bytes += skb->len;
663                 dev_kfree_skb_any(skb);
664         }
665
666         netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
667         txr->tx_cons = cons;
668
669         /* Need to make the tx_cons update visible to bnxt_start_xmit()
670          * before checking for netif_tx_queue_stopped().  Without the
671          * memory barrier, there is a small possibility that bnxt_start_xmit()
672          * will miss it and cause the queue to be stopped forever.
673          */
674         smp_mb();
675
676         if (unlikely(netif_tx_queue_stopped(txq)) &&
677             (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
678                 __netif_tx_lock(txq, smp_processor_id());
679                 if (netif_tx_queue_stopped(txq) &&
680                     bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
681                     txr->dev_state != BNXT_DEV_STATE_CLOSING)
682                         netif_tx_wake_queue(txq);
683                 __netif_tx_unlock(txq);
684         }
685 }
686
687 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
688                                          struct bnxt_rx_ring_info *rxr,
689                                          gfp_t gfp)
690 {
691         struct device *dev = &bp->pdev->dev;
692         struct page *page;
693
694         page = page_pool_dev_alloc_pages(rxr->page_pool);
695         if (!page)
696                 return NULL;
697
698         *mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir,
699                                       DMA_ATTR_WEAK_ORDERING);
700         if (dma_mapping_error(dev, *mapping)) {
701                 page_pool_recycle_direct(rxr->page_pool, page);
702                 return NULL;
703         }
704         *mapping += bp->rx_dma_offset;
705         return page;
706 }
707
708 static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
709                                        gfp_t gfp)
710 {
711         u8 *data;
712         struct pci_dev *pdev = bp->pdev;
713
714         data = kmalloc(bp->rx_buf_size, gfp);
715         if (!data)
716                 return NULL;
717
718         *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset,
719                                         bp->rx_buf_use_size, bp->rx_dir,
720                                         DMA_ATTR_WEAK_ORDERING);
721
722         if (dma_mapping_error(&pdev->dev, *mapping)) {
723                 kfree(data);
724                 data = NULL;
725         }
726         return data;
727 }
728
729 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
730                        u16 prod, gfp_t gfp)
731 {
732         struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
733         struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
734         dma_addr_t mapping;
735
736         if (BNXT_RX_PAGE_MODE(bp)) {
737                 struct page *page =
738                         __bnxt_alloc_rx_page(bp, &mapping, rxr, gfp);
739
740                 if (!page)
741                         return -ENOMEM;
742
743                 rx_buf->data = page;
744                 rx_buf->data_ptr = page_address(page) + bp->rx_offset;
745         } else {
746                 u8 *data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
747
748                 if (!data)
749                         return -ENOMEM;
750
751                 rx_buf->data = data;
752                 rx_buf->data_ptr = data + bp->rx_offset;
753         }
754         rx_buf->mapping = mapping;
755
756         rxbd->rx_bd_haddr = cpu_to_le64(mapping);
757         return 0;
758 }
759
760 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
761 {
762         u16 prod = rxr->rx_prod;
763         struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
764         struct rx_bd *cons_bd, *prod_bd;
765
766         prod_rx_buf = &rxr->rx_buf_ring[prod];
767         cons_rx_buf = &rxr->rx_buf_ring[cons];
768
769         prod_rx_buf->data = data;
770         prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
771
772         prod_rx_buf->mapping = cons_rx_buf->mapping;
773
774         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
775         cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
776
777         prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
778 }
779
780 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
781 {
782         u16 next, max = rxr->rx_agg_bmap_size;
783
784         next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
785         if (next >= max)
786                 next = find_first_zero_bit(rxr->rx_agg_bmap, max);
787         return next;
788 }
789
790 static inline int bnxt_alloc_rx_page(struct bnxt *bp,
791                                      struct bnxt_rx_ring_info *rxr,
792                                      u16 prod, gfp_t gfp)
793 {
794         struct rx_bd *rxbd =
795                 &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
796         struct bnxt_sw_rx_agg_bd *rx_agg_buf;
797         struct pci_dev *pdev = bp->pdev;
798         struct page *page;
799         dma_addr_t mapping;
800         u16 sw_prod = rxr->rx_sw_agg_prod;
801         unsigned int offset = 0;
802
803         if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
804                 page = rxr->rx_page;
805                 if (!page) {
806                         page = alloc_page(gfp);
807                         if (!page)
808                                 return -ENOMEM;
809                         rxr->rx_page = page;
810                         rxr->rx_page_offset = 0;
811                 }
812                 offset = rxr->rx_page_offset;
813                 rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
814                 if (rxr->rx_page_offset == PAGE_SIZE)
815                         rxr->rx_page = NULL;
816                 else
817                         get_page(page);
818         } else {
819                 page = alloc_page(gfp);
820                 if (!page)
821                         return -ENOMEM;
822         }
823
824         mapping = dma_map_page_attrs(&pdev->dev, page, offset,
825                                      BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE,
826                                      DMA_ATTR_WEAK_ORDERING);
827         if (dma_mapping_error(&pdev->dev, mapping)) {
828                 __free_page(page);
829                 return -EIO;
830         }
831
832         if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
833                 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
834
835         __set_bit(sw_prod, rxr->rx_agg_bmap);
836         rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
837         rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
838
839         rx_agg_buf->page = page;
840         rx_agg_buf->offset = offset;
841         rx_agg_buf->mapping = mapping;
842         rxbd->rx_bd_haddr = cpu_to_le64(mapping);
843         rxbd->rx_bd_opaque = sw_prod;
844         return 0;
845 }
846
847 static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp,
848                                        struct bnxt_cp_ring_info *cpr,
849                                        u16 cp_cons, u16 curr)
850 {
851         struct rx_agg_cmp *agg;
852
853         cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr));
854         agg = (struct rx_agg_cmp *)
855                 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
856         return agg;
857 }
858
859 static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp,
860                                               struct bnxt_rx_ring_info *rxr,
861                                               u16 agg_id, u16 curr)
862 {
863         struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id];
864
865         return &tpa_info->agg_arr[curr];
866 }
867
868 static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
869                                    u16 start, u32 agg_bufs, bool tpa)
870 {
871         struct bnxt_napi *bnapi = cpr->bnapi;
872         struct bnxt *bp = bnapi->bp;
873         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
874         u16 prod = rxr->rx_agg_prod;
875         u16 sw_prod = rxr->rx_sw_agg_prod;
876         bool p5_tpa = false;
877         u32 i;
878
879         if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
880                 p5_tpa = true;
881
882         for (i = 0; i < agg_bufs; i++) {
883                 u16 cons;
884                 struct rx_agg_cmp *agg;
885                 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
886                 struct rx_bd *prod_bd;
887                 struct page *page;
888
889                 if (p5_tpa)
890                         agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i);
891                 else
892                         agg = bnxt_get_agg(bp, cpr, idx, start + i);
893                 cons = agg->rx_agg_cmp_opaque;
894                 __clear_bit(cons, rxr->rx_agg_bmap);
895
896                 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
897                         sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
898
899                 __set_bit(sw_prod, rxr->rx_agg_bmap);
900                 prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
901                 cons_rx_buf = &rxr->rx_agg_ring[cons];
902
903                 /* It is possible for sw_prod to be equal to cons, so
904                  * set cons_rx_buf->page to NULL first.
905                  */
906                 page = cons_rx_buf->page;
907                 cons_rx_buf->page = NULL;
908                 prod_rx_buf->page = page;
909                 prod_rx_buf->offset = cons_rx_buf->offset;
910
911                 prod_rx_buf->mapping = cons_rx_buf->mapping;
912
913                 prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
914
915                 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
916                 prod_bd->rx_bd_opaque = sw_prod;
917
918                 prod = NEXT_RX_AGG(prod);
919                 sw_prod = NEXT_RX_AGG(sw_prod);
920         }
921         rxr->rx_agg_prod = prod;
922         rxr->rx_sw_agg_prod = sw_prod;
923 }
924
925 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
926                                         struct bnxt_rx_ring_info *rxr,
927                                         u16 cons, void *data, u8 *data_ptr,
928                                         dma_addr_t dma_addr,
929                                         unsigned int offset_and_len)
930 {
931         unsigned int payload = offset_and_len >> 16;
932         unsigned int len = offset_and_len & 0xffff;
933         skb_frag_t *frag;
934         struct page *page = data;
935         u16 prod = rxr->rx_prod;
936         struct sk_buff *skb;
937         int off, err;
938
939         err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
940         if (unlikely(err)) {
941                 bnxt_reuse_rx_data(rxr, cons, data);
942                 return NULL;
943         }
944         dma_addr -= bp->rx_dma_offset;
945         dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
946                              DMA_ATTR_WEAK_ORDERING);
947         page_pool_release_page(rxr->page_pool, page);
948
949         if (unlikely(!payload))
950                 payload = eth_get_headlen(bp->dev, data_ptr, len);
951
952         skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
953         if (!skb) {
954                 __free_page(page);
955                 return NULL;
956         }
957
958         off = (void *)data_ptr - page_address(page);
959         skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE);
960         memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
961                payload + NET_IP_ALIGN);
962
963         frag = &skb_shinfo(skb)->frags[0];
964         skb_frag_size_sub(frag, payload);
965         skb_frag_off_add(frag, payload);
966         skb->data_len -= payload;
967         skb->tail += payload;
968
969         return skb;
970 }
971
972 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
973                                    struct bnxt_rx_ring_info *rxr, u16 cons,
974                                    void *data, u8 *data_ptr,
975                                    dma_addr_t dma_addr,
976                                    unsigned int offset_and_len)
977 {
978         u16 prod = rxr->rx_prod;
979         struct sk_buff *skb;
980         int err;
981
982         err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
983         if (unlikely(err)) {
984                 bnxt_reuse_rx_data(rxr, cons, data);
985                 return NULL;
986         }
987
988         skb = build_skb(data, 0);
989         dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
990                                bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
991         if (!skb) {
992                 kfree(data);
993                 return NULL;
994         }
995
996         skb_reserve(skb, bp->rx_offset);
997         skb_put(skb, offset_and_len & 0xffff);
998         return skb;
999 }
1000
1001 static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
1002                                      struct bnxt_cp_ring_info *cpr,
1003                                      struct sk_buff *skb, u16 idx,
1004                                      u32 agg_bufs, bool tpa)
1005 {
1006         struct bnxt_napi *bnapi = cpr->bnapi;
1007         struct pci_dev *pdev = bp->pdev;
1008         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1009         u16 prod = rxr->rx_agg_prod;
1010         bool p5_tpa = false;
1011         u32 i;
1012
1013         if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
1014                 p5_tpa = true;
1015
1016         for (i = 0; i < agg_bufs; i++) {
1017                 u16 cons, frag_len;
1018                 struct rx_agg_cmp *agg;
1019                 struct bnxt_sw_rx_agg_bd *cons_rx_buf;
1020                 struct page *page;
1021                 dma_addr_t mapping;
1022
1023                 if (p5_tpa)
1024                         agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i);
1025                 else
1026                         agg = bnxt_get_agg(bp, cpr, idx, i);
1027                 cons = agg->rx_agg_cmp_opaque;
1028                 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
1029                             RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
1030
1031                 cons_rx_buf = &rxr->rx_agg_ring[cons];
1032                 skb_fill_page_desc(skb, i, cons_rx_buf->page,
1033                                    cons_rx_buf->offset, frag_len);
1034                 __clear_bit(cons, rxr->rx_agg_bmap);
1035
1036                 /* It is possible for bnxt_alloc_rx_page() to allocate
1037                  * a sw_prod index that equals the cons index, so we
1038                  * need to clear the cons entry now.
1039                  */
1040                 mapping = cons_rx_buf->mapping;
1041                 page = cons_rx_buf->page;
1042                 cons_rx_buf->page = NULL;
1043
1044                 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
1045                         struct skb_shared_info *shinfo;
1046                         unsigned int nr_frags;
1047
1048                         shinfo = skb_shinfo(skb);
1049                         nr_frags = --shinfo->nr_frags;
1050                         __skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
1051
1052                         dev_kfree_skb(skb);
1053
1054                         cons_rx_buf->page = page;
1055
1056                         /* Update prod since possibly some pages have been
1057                          * allocated already.
1058                          */
1059                         rxr->rx_agg_prod = prod;
1060                         bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa);
1061                         return NULL;
1062                 }
1063
1064                 dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
1065                                      PCI_DMA_FROMDEVICE,
1066                                      DMA_ATTR_WEAK_ORDERING);
1067
1068                 skb->data_len += frag_len;
1069                 skb->len += frag_len;
1070                 skb->truesize += PAGE_SIZE;
1071
1072                 prod = NEXT_RX_AGG(prod);
1073         }
1074         rxr->rx_agg_prod = prod;
1075         return skb;
1076 }
1077
1078 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1079                                u8 agg_bufs, u32 *raw_cons)
1080 {
1081         u16 last;
1082         struct rx_agg_cmp *agg;
1083
1084         *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
1085         last = RING_CMP(*raw_cons);
1086         agg = (struct rx_agg_cmp *)
1087                 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
1088         return RX_AGG_CMP_VALID(agg, *raw_cons);
1089 }
1090
1091 static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
1092                                             unsigned int len,
1093                                             dma_addr_t mapping)
1094 {
1095         struct bnxt *bp = bnapi->bp;
1096         struct pci_dev *pdev = bp->pdev;
1097         struct sk_buff *skb;
1098
1099         skb = napi_alloc_skb(&bnapi->napi, len);
1100         if (!skb)
1101                 return NULL;
1102
1103         dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh,
1104                                 bp->rx_dir);
1105
1106         memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
1107                len + NET_IP_ALIGN);
1108
1109         dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh,
1110                                    bp->rx_dir);
1111
1112         skb_put(skb, len);
1113         return skb;
1114 }
1115
1116 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1117                            u32 *raw_cons, void *cmp)
1118 {
1119         struct rx_cmp *rxcmp = cmp;
1120         u32 tmp_raw_cons = *raw_cons;
1121         u8 cmp_type, agg_bufs = 0;
1122
1123         cmp_type = RX_CMP_TYPE(rxcmp);
1124
1125         if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1126                 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
1127                             RX_CMP_AGG_BUFS) >>
1128                            RX_CMP_AGG_BUFS_SHIFT;
1129         } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1130                 struct rx_tpa_end_cmp *tpa_end = cmp;
1131
1132                 if (bp->flags & BNXT_FLAG_CHIP_P5)
1133                         return 0;
1134
1135                 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1136         }
1137
1138         if (agg_bufs) {
1139                 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1140                         return -EBUSY;
1141         }
1142         *raw_cons = tmp_raw_cons;
1143         return 0;
1144 }
1145
1146 static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay)
1147 {
1148         if (BNXT_PF(bp))
1149                 queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
1150         else
1151                 schedule_delayed_work(&bp->fw_reset_task, delay);
1152 }
1153
1154 static void bnxt_queue_sp_work(struct bnxt *bp)
1155 {
1156         if (BNXT_PF(bp))
1157                 queue_work(bnxt_pf_wq, &bp->sp_task);
1158         else
1159                 schedule_work(&bp->sp_task);
1160 }
1161
1162 static void bnxt_cancel_sp_work(struct bnxt *bp)
1163 {
1164         if (BNXT_PF(bp))
1165                 flush_workqueue(bnxt_pf_wq);
1166         else
1167                 cancel_work_sync(&bp->sp_task);
1168 }
1169
1170 static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
1171 {
1172         if (!rxr->bnapi->in_reset) {
1173                 rxr->bnapi->in_reset = true;
1174                 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
1175                 bnxt_queue_sp_work(bp);
1176         }
1177         rxr->rx_next_cons = 0xffff;
1178 }
1179
1180 static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1181 {
1182         struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1183         u16 idx = agg_id & MAX_TPA_P5_MASK;
1184
1185         if (test_bit(idx, map->agg_idx_bmap))
1186                 idx = find_first_zero_bit(map->agg_idx_bmap,
1187                                           BNXT_AGG_IDX_BMAP_SIZE);
1188         __set_bit(idx, map->agg_idx_bmap);
1189         map->agg_id_tbl[agg_id] = idx;
1190         return idx;
1191 }
1192
1193 static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
1194 {
1195         struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1196
1197         __clear_bit(idx, map->agg_idx_bmap);
1198 }
1199
1200 static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1201 {
1202         struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1203
1204         return map->agg_id_tbl[agg_id];
1205 }
1206
1207 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1208                            struct rx_tpa_start_cmp *tpa_start,
1209                            struct rx_tpa_start_cmp_ext *tpa_start1)
1210 {
1211         struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
1212         struct bnxt_tpa_info *tpa_info;
1213         u16 cons, prod, agg_id;
1214         struct rx_bd *prod_bd;
1215         dma_addr_t mapping;
1216
1217         if (bp->flags & BNXT_FLAG_CHIP_P5) {
1218                 agg_id = TPA_START_AGG_ID_P5(tpa_start);
1219                 agg_id = bnxt_alloc_agg_idx(rxr, agg_id);
1220         } else {
1221                 agg_id = TPA_START_AGG_ID(tpa_start);
1222         }
1223         cons = tpa_start->rx_tpa_start_cmp_opaque;
1224         prod = rxr->rx_prod;
1225         cons_rx_buf = &rxr->rx_buf_ring[cons];
1226         prod_rx_buf = &rxr->rx_buf_ring[prod];
1227         tpa_info = &rxr->rx_tpa[agg_id];
1228
1229         if (unlikely(cons != rxr->rx_next_cons ||
1230                      TPA_START_ERROR(tpa_start))) {
1231                 netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n",
1232                             cons, rxr->rx_next_cons,
1233                             TPA_START_ERROR_CODE(tpa_start1));
1234                 bnxt_sched_reset(bp, rxr);
1235                 return;
1236         }
1237         /* Store cfa_code in tpa_info to use in tpa_end
1238          * completion processing.
1239          */
1240         tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
1241         prod_rx_buf->data = tpa_info->data;
1242         prod_rx_buf->data_ptr = tpa_info->data_ptr;
1243
1244         mapping = tpa_info->mapping;
1245         prod_rx_buf->mapping = mapping;
1246
1247         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1248
1249         prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
1250
1251         tpa_info->data = cons_rx_buf->data;
1252         tpa_info->data_ptr = cons_rx_buf->data_ptr;
1253         cons_rx_buf->data = NULL;
1254         tpa_info->mapping = cons_rx_buf->mapping;
1255
1256         tpa_info->len =
1257                 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
1258                                 RX_TPA_START_CMP_LEN_SHIFT;
1259         if (likely(TPA_START_HASH_VALID(tpa_start))) {
1260                 u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
1261
1262                 tpa_info->hash_type = PKT_HASH_TYPE_L4;
1263                 tpa_info->gso_type = SKB_GSO_TCPV4;
1264                 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1265                 if (hash_type == 3 || TPA_START_IS_IPV6(tpa_start1))
1266                         tpa_info->gso_type = SKB_GSO_TCPV6;
1267                 tpa_info->rss_hash =
1268                         le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
1269         } else {
1270                 tpa_info->hash_type = PKT_HASH_TYPE_NONE;
1271                 tpa_info->gso_type = 0;
1272                 if (netif_msg_rx_err(bp))
1273                         netdev_warn(bp->dev, "TPA packet without valid hash\n");
1274         }
1275         tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
1276         tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
1277         tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
1278         tpa_info->agg_count = 0;
1279
1280         rxr->rx_prod = NEXT_RX(prod);
1281         cons = NEXT_RX(cons);
1282         rxr->rx_next_cons = NEXT_RX(cons);
1283         cons_rx_buf = &rxr->rx_buf_ring[cons];
1284
1285         bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
1286         rxr->rx_prod = NEXT_RX(rxr->rx_prod);
1287         cons_rx_buf->data = NULL;
1288 }
1289
1290 static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs)
1291 {
1292         if (agg_bufs)
1293                 bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true);
1294 }
1295
1296 #ifdef CONFIG_INET
1297 static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto)
1298 {
1299         struct udphdr *uh = NULL;
1300
1301         if (ip_proto == htons(ETH_P_IP)) {
1302                 struct iphdr *iph = (struct iphdr *)skb->data;
1303
1304                 if (iph->protocol == IPPROTO_UDP)
1305                         uh = (struct udphdr *)(iph + 1);
1306         } else {
1307                 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1308
1309                 if (iph->nexthdr == IPPROTO_UDP)
1310                         uh = (struct udphdr *)(iph + 1);
1311         }
1312         if (uh) {
1313                 if (uh->check)
1314                         skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
1315                 else
1316                         skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1317         }
1318 }
1319 #endif
1320
1321 static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
1322                                            int payload_off, int tcp_ts,
1323                                            struct sk_buff *skb)
1324 {
1325 #ifdef CONFIG_INET
1326         struct tcphdr *th;
1327         int len, nw_off;
1328         u16 outer_ip_off, inner_ip_off, inner_mac_off;
1329         u32 hdr_info = tpa_info->hdr_info;
1330         bool loopback = false;
1331
1332         inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1333         inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1334         outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1335
1336         /* If the packet is an internal loopback packet, the offsets will
1337          * have an extra 4 bytes.
1338          */
1339         if (inner_mac_off == 4) {
1340                 loopback = true;
1341         } else if (inner_mac_off > 4) {
1342                 __be16 proto = *((__be16 *)(skb->data + inner_ip_off -
1343                                             ETH_HLEN - 2));
1344
1345                 /* We only support inner iPv4/ipv6.  If we don't see the
1346                  * correct protocol ID, it must be a loopback packet where
1347                  * the offsets are off by 4.
1348                  */
1349                 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
1350                         loopback = true;
1351         }
1352         if (loopback) {
1353                 /* internal loopback packet, subtract all offsets by 4 */
1354                 inner_ip_off -= 4;
1355                 inner_mac_off -= 4;
1356                 outer_ip_off -= 4;
1357         }
1358
1359         nw_off = inner_ip_off - ETH_HLEN;
1360         skb_set_network_header(skb, nw_off);
1361         if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
1362                 struct ipv6hdr *iph = ipv6_hdr(skb);
1363
1364                 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1365                 len = skb->len - skb_transport_offset(skb);
1366                 th = tcp_hdr(skb);
1367                 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1368         } else {
1369                 struct iphdr *iph = ip_hdr(skb);
1370
1371                 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1372                 len = skb->len - skb_transport_offset(skb);
1373                 th = tcp_hdr(skb);
1374                 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1375         }
1376
1377         if (inner_mac_off) { /* tunnel */
1378                 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1379                                             ETH_HLEN - 2));
1380
1381                 bnxt_gro_tunnel(skb, proto);
1382         }
1383 #endif
1384         return skb;
1385 }
1386
1387 static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info,
1388                                            int payload_off, int tcp_ts,
1389                                            struct sk_buff *skb)
1390 {
1391 #ifdef CONFIG_INET
1392         u16 outer_ip_off, inner_ip_off, inner_mac_off;
1393         u32 hdr_info = tpa_info->hdr_info;
1394         int iphdr_len, nw_off;
1395
1396         inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1397         inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1398         outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1399
1400         nw_off = inner_ip_off - ETH_HLEN;
1401         skb_set_network_header(skb, nw_off);
1402         iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ?
1403                      sizeof(struct ipv6hdr) : sizeof(struct iphdr);
1404         skb_set_transport_header(skb, nw_off + iphdr_len);
1405
1406         if (inner_mac_off) { /* tunnel */
1407                 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1408                                             ETH_HLEN - 2));
1409
1410                 bnxt_gro_tunnel(skb, proto);
1411         }
1412 #endif
1413         return skb;
1414 }
1415
1416 #define BNXT_IPV4_HDR_SIZE      (sizeof(struct iphdr) + sizeof(struct tcphdr))
1417 #define BNXT_IPV6_HDR_SIZE      (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1418
1419 static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1420                                            int payload_off, int tcp_ts,
1421                                            struct sk_buff *skb)
1422 {
1423 #ifdef CONFIG_INET
1424         struct tcphdr *th;
1425         int len, nw_off, tcp_opt_len = 0;
1426
1427         if (tcp_ts)
1428                 tcp_opt_len = 12;
1429
1430         if (tpa_info->gso_type == SKB_GSO_TCPV4) {
1431                 struct iphdr *iph;
1432
1433                 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
1434                          ETH_HLEN;
1435                 skb_set_network_header(skb, nw_off);
1436                 iph = ip_hdr(skb);
1437                 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1438                 len = skb->len - skb_transport_offset(skb);
1439                 th = tcp_hdr(skb);
1440                 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1441         } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
1442                 struct ipv6hdr *iph;
1443
1444                 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
1445                          ETH_HLEN;
1446                 skb_set_network_header(skb, nw_off);
1447                 iph = ipv6_hdr(skb);
1448                 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1449                 len = skb->len - skb_transport_offset(skb);
1450                 th = tcp_hdr(skb);
1451                 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1452         } else {
1453                 dev_kfree_skb_any(skb);
1454                 return NULL;
1455         }
1456
1457         if (nw_off) /* tunnel */
1458                 bnxt_gro_tunnel(skb, skb->protocol);
1459 #endif
1460         return skb;
1461 }
1462
1463 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1464                                            struct bnxt_tpa_info *tpa_info,
1465                                            struct rx_tpa_end_cmp *tpa_end,
1466                                            struct rx_tpa_end_cmp_ext *tpa_end1,
1467                                            struct sk_buff *skb)
1468 {
1469 #ifdef CONFIG_INET
1470         int payload_off;
1471         u16 segs;
1472
1473         segs = TPA_END_TPA_SEGS(tpa_end);
1474         if (segs == 1)
1475                 return skb;
1476
1477         NAPI_GRO_CB(skb)->count = segs;
1478         skb_shinfo(skb)->gso_size =
1479                 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
1480         skb_shinfo(skb)->gso_type = tpa_info->gso_type;
1481         if (bp->flags & BNXT_FLAG_CHIP_P5)
1482                 payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1);
1483         else
1484                 payload_off = TPA_END_PAYLOAD_OFF(tpa_end);
1485         skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
1486         if (likely(skb))
1487                 tcp_gro_complete(skb);
1488 #endif
1489         return skb;
1490 }
1491
1492 /* Given the cfa_code of a received packet determine which
1493  * netdev (vf-rep or PF) the packet is destined to.
1494  */
1495 static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
1496 {
1497         struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
1498
1499         /* if vf-rep dev is NULL, the must belongs to the PF */
1500         return dev ? dev : bp->dev;
1501 }
1502
1503 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1504                                            struct bnxt_cp_ring_info *cpr,
1505                                            u32 *raw_cons,
1506                                            struct rx_tpa_end_cmp *tpa_end,
1507                                            struct rx_tpa_end_cmp_ext *tpa_end1,
1508                                            u8 *event)
1509 {
1510         struct bnxt_napi *bnapi = cpr->bnapi;
1511         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1512         u8 *data_ptr, agg_bufs;
1513         unsigned int len;
1514         struct bnxt_tpa_info *tpa_info;
1515         dma_addr_t mapping;
1516         struct sk_buff *skb;
1517         u16 idx = 0, agg_id;
1518         void *data;
1519         bool gro;
1520
1521         if (unlikely(bnapi->in_reset)) {
1522                 int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end);
1523
1524                 if (rc < 0)
1525                         return ERR_PTR(-EBUSY);
1526                 return NULL;
1527         }
1528
1529         if (bp->flags & BNXT_FLAG_CHIP_P5) {
1530                 agg_id = TPA_END_AGG_ID_P5(tpa_end);
1531                 agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1532                 agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1);
1533                 tpa_info = &rxr->rx_tpa[agg_id];
1534                 if (unlikely(agg_bufs != tpa_info->agg_count)) {
1535                         netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n",
1536                                     agg_bufs, tpa_info->agg_count);
1537                         agg_bufs = tpa_info->agg_count;
1538                 }
1539                 tpa_info->agg_count = 0;
1540                 *event |= BNXT_AGG_EVENT;
1541                 bnxt_free_agg_idx(rxr, agg_id);
1542                 idx = agg_id;
1543                 gro = !!(bp->flags & BNXT_FLAG_GRO);
1544         } else {
1545                 agg_id = TPA_END_AGG_ID(tpa_end);
1546                 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1547                 tpa_info = &rxr->rx_tpa[agg_id];
1548                 idx = RING_CMP(*raw_cons);
1549                 if (agg_bufs) {
1550                         if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1551                                 return ERR_PTR(-EBUSY);
1552
1553                         *event |= BNXT_AGG_EVENT;
1554                         idx = NEXT_CMP(idx);
1555                 }
1556                 gro = !!TPA_END_GRO(tpa_end);
1557         }
1558         data = tpa_info->data;
1559         data_ptr = tpa_info->data_ptr;
1560         prefetch(data_ptr);
1561         len = tpa_info->len;
1562         mapping = tpa_info->mapping;
1563
1564         if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
1565                 bnxt_abort_tpa(cpr, idx, agg_bufs);
1566                 if (agg_bufs > MAX_SKB_FRAGS)
1567                         netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1568                                     agg_bufs, (int)MAX_SKB_FRAGS);
1569                 return NULL;
1570         }
1571
1572         if (len <= bp->rx_copy_thresh) {
1573                 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
1574                 if (!skb) {
1575                         bnxt_abort_tpa(cpr, idx, agg_bufs);
1576                         return NULL;
1577                 }
1578         } else {
1579                 u8 *new_data;
1580                 dma_addr_t new_mapping;
1581
1582                 new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
1583                 if (!new_data) {
1584                         bnxt_abort_tpa(cpr, idx, agg_bufs);
1585                         return NULL;
1586                 }
1587
1588                 tpa_info->data = new_data;
1589                 tpa_info->data_ptr = new_data + bp->rx_offset;
1590                 tpa_info->mapping = new_mapping;
1591
1592                 skb = build_skb(data, 0);
1593                 dma_unmap_single_attrs(&bp->pdev->dev, mapping,
1594                                        bp->rx_buf_use_size, bp->rx_dir,
1595                                        DMA_ATTR_WEAK_ORDERING);
1596
1597                 if (!skb) {
1598                         kfree(data);
1599                         bnxt_abort_tpa(cpr, idx, agg_bufs);
1600                         return NULL;
1601                 }
1602                 skb_reserve(skb, bp->rx_offset);
1603                 skb_put(skb, len);
1604         }
1605
1606         if (agg_bufs) {
1607                 skb = bnxt_rx_pages(bp, cpr, skb, idx, agg_bufs, true);
1608                 if (!skb) {
1609                         /* Page reuse already handled by bnxt_rx_pages(). */
1610                         return NULL;
1611                 }
1612         }
1613
1614         skb->protocol =
1615                 eth_type_trans(skb, bnxt_get_pkt_dev(bp, tpa_info->cfa_code));
1616
1617         if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1618                 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1619
1620         if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
1621             (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
1622                 u16 vlan_proto = tpa_info->metadata >>
1623                         RX_CMP_FLAGS2_METADATA_TPID_SFT;
1624                 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1625
1626                 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
1627         }
1628
1629         skb_checksum_none_assert(skb);
1630         if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1631                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1632                 skb->csum_level =
1633                         (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1634         }
1635
1636         if (gro)
1637                 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
1638
1639         return skb;
1640 }
1641
1642 static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1643                          struct rx_agg_cmp *rx_agg)
1644 {
1645         u16 agg_id = TPA_AGG_AGG_ID(rx_agg);
1646         struct bnxt_tpa_info *tpa_info;
1647
1648         agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1649         tpa_info = &rxr->rx_tpa[agg_id];
1650         BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS);
1651         tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg;
1652 }
1653
1654 static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
1655                              struct sk_buff *skb)
1656 {
1657         if (skb->dev != bp->dev) {
1658                 /* this packet belongs to a vf-rep */
1659                 bnxt_vf_rep_rx(bp, skb);
1660                 return;
1661         }
1662         skb_record_rx_queue(skb, bnapi->index);
1663         napi_gro_receive(&bnapi->napi, skb);
1664 }
1665
1666 /* returns the following:
1667  * 1       - 1 packet successfully received
1668  * 0       - successful TPA_START, packet not completed yet
1669  * -EBUSY  - completion ring does not have all the agg buffers yet
1670  * -ENOMEM - packet aborted due to out of memory
1671  * -EIO    - packet aborted due to hw error indicated in BD
1672  */
1673 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1674                        u32 *raw_cons, u8 *event)
1675 {
1676         struct bnxt_napi *bnapi = cpr->bnapi;
1677         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1678         struct net_device *dev = bp->dev;
1679         struct rx_cmp *rxcmp;
1680         struct rx_cmp_ext *rxcmp1;
1681         u32 tmp_raw_cons = *raw_cons;
1682         u16 cfa_code, cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
1683         struct bnxt_sw_rx_bd *rx_buf;
1684         unsigned int len;
1685         u8 *data_ptr, agg_bufs, cmp_type;
1686         dma_addr_t dma_addr;
1687         struct sk_buff *skb;
1688         void *data;
1689         int rc = 0;
1690         u32 misc;
1691
1692         rxcmp = (struct rx_cmp *)
1693                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1694
1695         cmp_type = RX_CMP_TYPE(rxcmp);
1696
1697         if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) {
1698                 bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp);
1699                 goto next_rx_no_prod_no_len;
1700         }
1701
1702         tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1703         cp_cons = RING_CMP(tmp_raw_cons);
1704         rxcmp1 = (struct rx_cmp_ext *)
1705                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1706
1707         if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1708                 return -EBUSY;
1709
1710         prod = rxr->rx_prod;
1711
1712         if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
1713                 bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
1714                                (struct rx_tpa_start_cmp_ext *)rxcmp1);
1715
1716                 *event |= BNXT_RX_EVENT;
1717                 goto next_rx_no_prod_no_len;
1718
1719         } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1720                 skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons,
1721                                    (struct rx_tpa_end_cmp *)rxcmp,
1722                                    (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
1723
1724                 if (IS_ERR(skb))
1725                         return -EBUSY;
1726
1727                 rc = -ENOMEM;
1728                 if (likely(skb)) {
1729                         bnxt_deliver_skb(bp, bnapi, skb);
1730                         rc = 1;
1731                 }
1732                 *event |= BNXT_RX_EVENT;
1733                 goto next_rx_no_prod_no_len;
1734         }
1735
1736         cons = rxcmp->rx_cmp_opaque;
1737         if (unlikely(cons != rxr->rx_next_cons)) {
1738                 int rc1 = bnxt_discard_rx(bp, cpr, raw_cons, rxcmp);
1739
1740                 netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
1741                             cons, rxr->rx_next_cons);
1742                 bnxt_sched_reset(bp, rxr);
1743                 return rc1;
1744         }
1745         rx_buf = &rxr->rx_buf_ring[cons];
1746         data = rx_buf->data;
1747         data_ptr = rx_buf->data_ptr;
1748         prefetch(data_ptr);
1749
1750         misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
1751         agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
1752
1753         if (agg_bufs) {
1754                 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1755                         return -EBUSY;
1756
1757                 cp_cons = NEXT_CMP(cp_cons);
1758                 *event |= BNXT_AGG_EVENT;
1759         }
1760         *event |= BNXT_RX_EVENT;
1761
1762         rx_buf->data = NULL;
1763         if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
1764                 u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
1765
1766                 bnxt_reuse_rx_data(rxr, cons, data);
1767                 if (agg_bufs)
1768                         bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs,
1769                                                false);
1770
1771                 rc = -EIO;
1772                 if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
1773                         bnapi->cp_ring.rx_buf_errors++;
1774                         if (!(bp->flags & BNXT_FLAG_CHIP_P5)) {
1775                                 netdev_warn(bp->dev, "RX buffer error %x\n",
1776                                             rx_err);
1777                                 bnxt_sched_reset(bp, rxr);
1778                         }
1779                 }
1780                 goto next_rx_no_len;
1781         }
1782
1783         len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
1784         dma_addr = rx_buf->mapping;
1785
1786         if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) {
1787                 rc = 1;
1788                 goto next_rx;
1789         }
1790
1791         if (len <= bp->rx_copy_thresh) {
1792                 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
1793                 bnxt_reuse_rx_data(rxr, cons, data);
1794                 if (!skb) {
1795                         if (agg_bufs)
1796                                 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
1797                                                        agg_bufs, false);
1798                         rc = -ENOMEM;
1799                         goto next_rx;
1800                 }
1801         } else {
1802                 u32 payload;
1803
1804                 if (rx_buf->data_ptr == data_ptr)
1805                         payload = misc & RX_CMP_PAYLOAD_OFFSET;
1806                 else
1807                         payload = 0;
1808                 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
1809                                       payload | len);
1810                 if (!skb) {
1811                         rc = -ENOMEM;
1812                         goto next_rx;
1813                 }
1814         }
1815
1816         if (agg_bufs) {
1817                 skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs, false);
1818                 if (!skb) {
1819                         rc = -ENOMEM;
1820                         goto next_rx;
1821                 }
1822         }
1823
1824         if (RX_CMP_HASH_VALID(rxcmp)) {
1825                 u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
1826                 enum pkt_hash_types type = PKT_HASH_TYPE_L4;
1827
1828                 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1829                 if (hash_type != 1 && hash_type != 3)
1830                         type = PKT_HASH_TYPE_L3;
1831                 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
1832         }
1833
1834         cfa_code = RX_CMP_CFA_CODE(rxcmp1);
1835         skb->protocol = eth_type_trans(skb, bnxt_get_pkt_dev(bp, cfa_code));
1836
1837         if ((rxcmp1->rx_cmp_flags2 &
1838              cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
1839             (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
1840                 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
1841                 u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1842                 u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
1843
1844                 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
1845         }
1846
1847         skb_checksum_none_assert(skb);
1848         if (RX_CMP_L4_CS_OK(rxcmp1)) {
1849                 if (dev->features & NETIF_F_RXCSUM) {
1850                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1851                         skb->csum_level = RX_CMP_ENCAP(rxcmp1);
1852                 }
1853         } else {
1854                 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
1855                         if (dev->features & NETIF_F_RXCSUM)
1856                                 bnapi->cp_ring.rx_l4_csum_errors++;
1857                 }
1858         }
1859
1860         bnxt_deliver_skb(bp, bnapi, skb);
1861         rc = 1;
1862
1863 next_rx:
1864         cpr->rx_packets += 1;
1865         cpr->rx_bytes += len;
1866
1867 next_rx_no_len:
1868         rxr->rx_prod = NEXT_RX(prod);
1869         rxr->rx_next_cons = NEXT_RX(cons);
1870
1871 next_rx_no_prod_no_len:
1872         *raw_cons = tmp_raw_cons;
1873
1874         return rc;
1875 }
1876
1877 /* In netpoll mode, if we are using a combined completion ring, we need to
1878  * discard the rx packets and recycle the buffers.
1879  */
1880 static int bnxt_force_rx_discard(struct bnxt *bp,
1881                                  struct bnxt_cp_ring_info *cpr,
1882                                  u32 *raw_cons, u8 *event)
1883 {
1884         u32 tmp_raw_cons = *raw_cons;
1885         struct rx_cmp_ext *rxcmp1;
1886         struct rx_cmp *rxcmp;
1887         u16 cp_cons;
1888         u8 cmp_type;
1889
1890         cp_cons = RING_CMP(tmp_raw_cons);
1891         rxcmp = (struct rx_cmp *)
1892                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1893
1894         tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1895         cp_cons = RING_CMP(tmp_raw_cons);
1896         rxcmp1 = (struct rx_cmp_ext *)
1897                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1898
1899         if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1900                 return -EBUSY;
1901
1902         cmp_type = RX_CMP_TYPE(rxcmp);
1903         if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1904                 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
1905                         cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
1906         } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1907                 struct rx_tpa_end_cmp_ext *tpa_end1;
1908
1909                 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
1910                 tpa_end1->rx_tpa_end_cmp_errors_v2 |=
1911                         cpu_to_le32(RX_TPA_END_CMP_ERRORS);
1912         }
1913         return bnxt_rx_pkt(bp, cpr, raw_cons, event);
1914 }
1915
1916 u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx)
1917 {
1918         struct bnxt_fw_health *fw_health = bp->fw_health;
1919         u32 reg = fw_health->regs[reg_idx];
1920         u32 reg_type, reg_off, val = 0;
1921
1922         reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
1923         reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
1924         switch (reg_type) {
1925         case BNXT_FW_HEALTH_REG_TYPE_CFG:
1926                 pci_read_config_dword(bp->pdev, reg_off, &val);
1927                 break;
1928         case BNXT_FW_HEALTH_REG_TYPE_GRC:
1929                 reg_off = fw_health->mapped_regs[reg_idx];
1930                 /* fall through */
1931         case BNXT_FW_HEALTH_REG_TYPE_BAR0:
1932                 val = readl(bp->bar0 + reg_off);
1933                 break;
1934         case BNXT_FW_HEALTH_REG_TYPE_BAR1:
1935                 val = readl(bp->bar1 + reg_off);
1936                 break;
1937         }
1938         if (reg_idx == BNXT_FW_RESET_INPROG_REG)
1939                 val &= fw_health->fw_reset_inprog_reg_mask;
1940         return val;
1941 }
1942
1943 #define BNXT_GET_EVENT_PORT(data)       \
1944         ((data) &                       \
1945          ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
1946
1947 static int bnxt_async_event_process(struct bnxt *bp,
1948                                     struct hwrm_async_event_cmpl *cmpl)
1949 {
1950         u16 event_id = le16_to_cpu(cmpl->event_id);
1951
1952         /* TODO CHIMP_FW: Define event id's for link change, error etc */
1953         switch (event_id) {
1954         case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
1955                 u32 data1 = le32_to_cpu(cmpl->event_data1);
1956                 struct bnxt_link_info *link_info = &bp->link_info;
1957
1958                 if (BNXT_VF(bp))
1959                         goto async_event_process_exit;
1960
1961                 /* print unsupported speed warning in forced speed mode only */
1962                 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
1963                     (data1 & 0x20000)) {
1964                         u16 fw_speed = link_info->force_link_speed;
1965                         u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
1966
1967                         if (speed != SPEED_UNKNOWN)
1968                                 netdev_warn(bp->dev, "Link speed %d no longer supported\n",
1969                                             speed);
1970                 }
1971                 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
1972         }
1973         /* fall through */
1974         case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
1975         case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE:
1976                 set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event);
1977                 /* fall through */
1978         case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
1979                 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
1980                 break;
1981         case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
1982                 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
1983                 break;
1984         case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
1985                 u32 data1 = le32_to_cpu(cmpl->event_data1);
1986                 u16 port_id = BNXT_GET_EVENT_PORT(data1);
1987
1988                 if (BNXT_VF(bp))
1989                         break;
1990
1991                 if (bp->pf.port_id != port_id)
1992                         break;
1993
1994                 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
1995                 break;
1996         }
1997         case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
1998                 if (BNXT_PF(bp))
1999                         goto async_event_process_exit;
2000                 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
2001                 break;
2002         case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
2003                 u32 data1 = le32_to_cpu(cmpl->event_data1);
2004
2005                 if (!bp->fw_health)
2006                         goto async_event_process_exit;
2007
2008                 bp->fw_reset_timestamp = jiffies;
2009                 bp->fw_reset_min_dsecs = cmpl->timestamp_lo;
2010                 if (!bp->fw_reset_min_dsecs)
2011                         bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS;
2012                 bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi);
2013                 if (!bp->fw_reset_max_dsecs)
2014                         bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
2015                 if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
2016                         netdev_warn(bp->dev, "Firmware fatal reset event received\n");
2017                         set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
2018                 } else {
2019                         netdev_warn(bp->dev, "Firmware non-fatal reset event received, max wait time %d msec\n",
2020                                     bp->fw_reset_max_dsecs * 100);
2021                 }
2022                 set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event);
2023                 break;
2024         }
2025         case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: {
2026                 struct bnxt_fw_health *fw_health = bp->fw_health;
2027                 u32 data1 = le32_to_cpu(cmpl->event_data1);
2028
2029                 if (!fw_health)
2030                         goto async_event_process_exit;
2031
2032                 fw_health->enabled = EVENT_DATA1_RECOVERY_ENABLED(data1);
2033                 fw_health->master = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
2034                 if (!fw_health->enabled)
2035                         break;
2036
2037                 if (netif_msg_drv(bp))
2038                         netdev_info(bp->dev, "Error recovery info: error recovery[%d], master[%d], reset count[0x%x], health status: 0x%x\n",
2039                                     fw_health->enabled, fw_health->master,
2040                                     bnxt_fw_health_readl(bp,
2041                                                          BNXT_FW_RESET_CNT_REG),
2042                                     bnxt_fw_health_readl(bp,
2043                                                          BNXT_FW_HEALTH_REG));
2044                 fw_health->tmr_multiplier =
2045                         DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
2046                                      bp->current_interval * 10);
2047                 fw_health->tmr_counter = fw_health->tmr_multiplier;
2048                 fw_health->last_fw_heartbeat =
2049                         bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
2050                 fw_health->last_fw_reset_cnt =
2051                         bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
2052                 goto async_event_process_exit;
2053         }
2054         default:
2055                 goto async_event_process_exit;
2056         }
2057         bnxt_queue_sp_work(bp);
2058 async_event_process_exit:
2059         bnxt_ulp_async_events(bp, cmpl);
2060         return 0;
2061 }
2062
2063 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
2064 {
2065         u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
2066         struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
2067         struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
2068                                 (struct hwrm_fwd_req_cmpl *)txcmp;
2069
2070         switch (cmpl_type) {
2071         case CMPL_BASE_TYPE_HWRM_DONE:
2072                 seq_id = le16_to_cpu(h_cmpl->sequence_id);
2073                 if (seq_id == bp->hwrm_intr_seq_id)
2074                         bp->hwrm_intr_seq_id = (u16)~bp->hwrm_intr_seq_id;
2075                 else
2076                         netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
2077                 break;
2078
2079         case CMPL_BASE_TYPE_HWRM_FWD_REQ:
2080                 vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
2081
2082                 if ((vf_id < bp->pf.first_vf_id) ||
2083                     (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
2084                         netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
2085                                    vf_id);
2086                         return -EINVAL;
2087                 }
2088
2089                 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
2090                 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
2091                 bnxt_queue_sp_work(bp);
2092                 break;
2093
2094         case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
2095                 bnxt_async_event_process(bp,
2096                                          (struct hwrm_async_event_cmpl *)txcmp);
2097
2098         default:
2099                 break;
2100         }
2101
2102         return 0;
2103 }
2104
2105 static irqreturn_t bnxt_msix(int irq, void *dev_instance)
2106 {
2107         struct bnxt_napi *bnapi = dev_instance;
2108         struct bnxt *bp = bnapi->bp;
2109         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2110         u32 cons = RING_CMP(cpr->cp_raw_cons);
2111
2112         cpr->event_ctr++;
2113         prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2114         napi_schedule(&bnapi->napi);
2115         return IRQ_HANDLED;
2116 }
2117
2118 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2119 {
2120         u32 raw_cons = cpr->cp_raw_cons;
2121         u16 cons = RING_CMP(raw_cons);
2122         struct tx_cmp *txcmp;
2123
2124         txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2125
2126         return TX_CMP_VALID(txcmp, raw_cons);
2127 }
2128
2129 static irqreturn_t bnxt_inta(int irq, void *dev_instance)
2130 {
2131         struct bnxt_napi *bnapi = dev_instance;
2132         struct bnxt *bp = bnapi->bp;
2133         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2134         u32 cons = RING_CMP(cpr->cp_raw_cons);
2135         u32 int_status;
2136
2137         prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2138
2139         if (!bnxt_has_work(bp, cpr)) {
2140                 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
2141                 /* return if erroneous interrupt */
2142                 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
2143                         return IRQ_NONE;
2144         }
2145
2146         /* disable ring IRQ */
2147         BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell);
2148
2149         /* Return here if interrupt is shared and is disabled. */
2150         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2151                 return IRQ_HANDLED;
2152
2153         napi_schedule(&bnapi->napi);
2154         return IRQ_HANDLED;
2155 }
2156
2157 static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2158                             int budget)
2159 {
2160         struct bnxt_napi *bnapi = cpr->bnapi;
2161         u32 raw_cons = cpr->cp_raw_cons;
2162         u32 cons;
2163         int tx_pkts = 0;
2164         int rx_pkts = 0;
2165         u8 event = 0;
2166         struct tx_cmp *txcmp;
2167
2168         cpr->has_more_work = 0;
2169         while (1) {
2170                 int rc;
2171
2172                 cons = RING_CMP(raw_cons);
2173                 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2174
2175                 if (!TX_CMP_VALID(txcmp, raw_cons))
2176                         break;
2177
2178                 /* The valid test of the entry must be done first before
2179                  * reading any further.
2180                  */
2181                 dma_rmb();
2182                 cpr->had_work_done = 1;
2183                 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
2184                         tx_pkts++;
2185                         /* return full budget so NAPI will complete. */
2186                         if (unlikely(tx_pkts > bp->tx_wake_thresh)) {
2187                                 rx_pkts = budget;
2188                                 raw_cons = NEXT_RAW_CMP(raw_cons);
2189                                 if (budget)
2190                                         cpr->has_more_work = 1;
2191                                 break;
2192                         }
2193                 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2194                         if (likely(budget))
2195                                 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2196                         else
2197                                 rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
2198                                                            &event);
2199                         if (likely(rc >= 0))
2200                                 rx_pkts += rc;
2201                         /* Increment rx_pkts when rc is -ENOMEM to count towards
2202                          * the NAPI budget.  Otherwise, we may potentially loop
2203                          * here forever if we consistently cannot allocate
2204                          * buffers.
2205                          */
2206                         else if (rc == -ENOMEM && budget)
2207                                 rx_pkts++;
2208                         else if (rc == -EBUSY)  /* partial completion */
2209                                 break;
2210                 } else if (unlikely((TX_CMP_TYPE(txcmp) ==
2211                                      CMPL_BASE_TYPE_HWRM_DONE) ||
2212                                     (TX_CMP_TYPE(txcmp) ==
2213                                      CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
2214                                     (TX_CMP_TYPE(txcmp) ==
2215                                      CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
2216                         bnxt_hwrm_handler(bp, txcmp);
2217                 }
2218                 raw_cons = NEXT_RAW_CMP(raw_cons);
2219
2220                 if (rx_pkts && rx_pkts == budget) {
2221                         cpr->has_more_work = 1;
2222                         break;
2223                 }
2224         }
2225
2226         if (event & BNXT_REDIRECT_EVENT)
2227                 xdp_do_flush_map();
2228
2229         if (event & BNXT_TX_EVENT) {
2230                 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
2231                 u16 prod = txr->tx_prod;
2232
2233                 /* Sync BD data before updating doorbell */
2234                 wmb();
2235
2236                 bnxt_db_write_relaxed(bp, &txr->tx_db, prod);
2237         }
2238
2239         cpr->cp_raw_cons = raw_cons;
2240         bnapi->tx_pkts += tx_pkts;
2241         bnapi->events |= event;
2242         return rx_pkts;
2243 }
2244
2245 static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi)
2246 {
2247         if (bnapi->tx_pkts) {
2248                 bnapi->tx_int(bp, bnapi, bnapi->tx_pkts);
2249                 bnapi->tx_pkts = 0;
2250         }
2251
2252         if (bnapi->events & BNXT_RX_EVENT) {
2253                 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2254
2255                 if (bnapi->events & BNXT_AGG_EVENT)
2256                         bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2257                 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2258         }
2259         bnapi->events = 0;
2260 }
2261
2262 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2263                           int budget)
2264 {
2265         struct bnxt_napi *bnapi = cpr->bnapi;
2266         int rx_pkts;
2267
2268         rx_pkts = __bnxt_poll_work(bp, cpr, budget);
2269
2270         /* ACK completion ring before freeing tx ring and producing new
2271          * buffers in rx/agg rings to prevent overflowing the completion
2272          * ring.
2273          */
2274         bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
2275
2276         __bnxt_poll_work_done(bp, bnapi);
2277         return rx_pkts;
2278 }
2279
2280 static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
2281 {
2282         struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2283         struct bnxt *bp = bnapi->bp;
2284         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2285         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2286         struct tx_cmp *txcmp;
2287         struct rx_cmp_ext *rxcmp1;
2288         u32 cp_cons, tmp_raw_cons;
2289         u32 raw_cons = cpr->cp_raw_cons;
2290         u32 rx_pkts = 0;
2291         u8 event = 0;
2292
2293         while (1) {
2294                 int rc;
2295
2296                 cp_cons = RING_CMP(raw_cons);
2297                 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2298
2299                 if (!TX_CMP_VALID(txcmp, raw_cons))
2300                         break;
2301
2302                 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2303                         tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
2304                         cp_cons = RING_CMP(tmp_raw_cons);
2305                         rxcmp1 = (struct rx_cmp_ext *)
2306                           &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2307
2308                         if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2309                                 break;
2310
2311                         /* force an error to recycle the buffer */
2312                         rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2313                                 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2314
2315                         rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2316                         if (likely(rc == -EIO) && budget)
2317                                 rx_pkts++;
2318                         else if (rc == -EBUSY)  /* partial completion */
2319                                 break;
2320                 } else if (unlikely(TX_CMP_TYPE(txcmp) ==
2321                                     CMPL_BASE_TYPE_HWRM_DONE)) {
2322                         bnxt_hwrm_handler(bp, txcmp);
2323                 } else {
2324                         netdev_err(bp->dev,
2325                                    "Invalid completion received on special ring\n");
2326                 }
2327                 raw_cons = NEXT_RAW_CMP(raw_cons);
2328
2329                 if (rx_pkts == budget)
2330                         break;
2331         }
2332
2333         cpr->cp_raw_cons = raw_cons;
2334         BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons);
2335         bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2336
2337         if (event & BNXT_AGG_EVENT)
2338                 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2339
2340         if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
2341                 napi_complete_done(napi, rx_pkts);
2342                 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2343         }
2344         return rx_pkts;
2345 }
2346
2347 static int bnxt_poll(struct napi_struct *napi, int budget)
2348 {
2349         struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2350         struct bnxt *bp = bnapi->bp;
2351         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2352         int work_done = 0;
2353
2354         while (1) {
2355                 work_done += bnxt_poll_work(bp, cpr, budget - work_done);
2356
2357                 if (work_done >= budget) {
2358                         if (!budget)
2359                                 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2360                         break;
2361                 }
2362
2363                 if (!bnxt_has_work(bp, cpr)) {
2364                         if (napi_complete_done(napi, work_done))
2365                                 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2366                         break;
2367                 }
2368         }
2369         if (bp->flags & BNXT_FLAG_DIM) {
2370                 struct dim_sample dim_sample = {};
2371
2372                 dim_update_sample(cpr->event_ctr,
2373                                   cpr->rx_packets,
2374                                   cpr->rx_bytes,
2375                                   &dim_sample);
2376                 net_dim(&cpr->dim, dim_sample);
2377         }
2378         return work_done;
2379 }
2380
2381 static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
2382 {
2383         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2384         int i, work_done = 0;
2385
2386         for (i = 0; i < 2; i++) {
2387                 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2388
2389                 if (cpr2) {
2390                         work_done += __bnxt_poll_work(bp, cpr2,
2391                                                       budget - work_done);
2392                         cpr->has_more_work |= cpr2->has_more_work;
2393                 }
2394         }
2395         return work_done;
2396 }
2397
2398 static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
2399                                  u64 dbr_type, bool all)
2400 {
2401         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2402         int i;
2403
2404         for (i = 0; i < 2; i++) {
2405                 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2406                 struct bnxt_db_info *db;
2407
2408                 if (cpr2 && (all || cpr2->had_work_done)) {
2409                         db = &cpr2->cp_db;
2410                         writeq(db->db_key64 | dbr_type |
2411                                RING_CMP(cpr2->cp_raw_cons), db->doorbell);
2412                         cpr2->had_work_done = 0;
2413                 }
2414         }
2415         __bnxt_poll_work_done(bp, bnapi);
2416 }
2417
2418 static int bnxt_poll_p5(struct napi_struct *napi, int budget)
2419 {
2420         struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2421         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2422         u32 raw_cons = cpr->cp_raw_cons;
2423         struct bnxt *bp = bnapi->bp;
2424         struct nqe_cn *nqcmp;
2425         int work_done = 0;
2426         u32 cons;
2427
2428         if (cpr->has_more_work) {
2429                 cpr->has_more_work = 0;
2430                 work_done = __bnxt_poll_cqs(bp, bnapi, budget);
2431                 if (cpr->has_more_work) {
2432                         __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, false);
2433                         return work_done;
2434                 }
2435                 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL, true);
2436                 if (napi_complete_done(napi, work_done))
2437                         BNXT_DB_NQ_ARM_P5(&cpr->cp_db, cpr->cp_raw_cons);
2438                 return work_done;
2439         }
2440         while (1) {
2441                 cons = RING_CMP(raw_cons);
2442                 nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2443
2444                 if (!NQ_CMP_VALID(nqcmp, raw_cons)) {
2445                         __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL,
2446                                              false);
2447                         cpr->cp_raw_cons = raw_cons;
2448                         if (napi_complete_done(napi, work_done))
2449                                 BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
2450                                                   cpr->cp_raw_cons);
2451                         return work_done;
2452                 }
2453
2454                 /* The valid test of the entry must be done first before
2455                  * reading any further.
2456                  */
2457                 dma_rmb();
2458
2459                 if (nqcmp->type == cpu_to_le16(NQ_CN_TYPE_CQ_NOTIFICATION)) {
2460                         u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
2461                         struct bnxt_cp_ring_info *cpr2;
2462
2463                         cpr2 = cpr->cp_ring_arr[idx];
2464                         work_done += __bnxt_poll_work(bp, cpr2,
2465                                                       budget - work_done);
2466                         cpr->has_more_work = cpr2->has_more_work;
2467                 } else {
2468                         bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp);
2469                 }
2470                 raw_cons = NEXT_RAW_CMP(raw_cons);
2471                 if (cpr->has_more_work)
2472                         break;
2473         }
2474         __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, true);
2475         cpr->cp_raw_cons = raw_cons;
2476         return work_done;
2477 }
2478
2479 static void bnxt_free_tx_skbs(struct bnxt *bp)
2480 {
2481         int i, max_idx;
2482         struct pci_dev *pdev = bp->pdev;
2483
2484         if (!bp->tx_ring)
2485                 return;
2486
2487         max_idx = bp->tx_nr_pages * TX_DESC_CNT;
2488         for (i = 0; i < bp->tx_nr_rings; i++) {
2489                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2490                 int j;
2491
2492                 for (j = 0; j < max_idx;) {
2493                         struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
2494                         struct sk_buff *skb;
2495                         int k, last;
2496
2497                         if (i < bp->tx_nr_rings_xdp &&
2498                             tx_buf->action == XDP_REDIRECT) {
2499                                 dma_unmap_single(&pdev->dev,
2500                                         dma_unmap_addr(tx_buf, mapping),
2501                                         dma_unmap_len(tx_buf, len),
2502                                         PCI_DMA_TODEVICE);
2503                                 xdp_return_frame(tx_buf->xdpf);
2504                                 tx_buf->action = 0;
2505                                 tx_buf->xdpf = NULL;
2506                                 j++;
2507                                 continue;
2508                         }
2509
2510                         skb = tx_buf->skb;
2511                         if (!skb) {
2512                                 j++;
2513                                 continue;
2514                         }
2515
2516                         tx_buf->skb = NULL;
2517
2518                         if (tx_buf->is_push) {
2519                                 dev_kfree_skb(skb);
2520                                 j += 2;
2521                                 continue;
2522                         }
2523
2524                         dma_unmap_single(&pdev->dev,
2525                                          dma_unmap_addr(tx_buf, mapping),
2526                                          skb_headlen(skb),
2527                                          PCI_DMA_TODEVICE);
2528
2529                         last = tx_buf->nr_frags;
2530                         j += 2;
2531                         for (k = 0; k < last; k++, j++) {
2532                                 int ring_idx = j & bp->tx_ring_mask;
2533                                 skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
2534
2535                                 tx_buf = &txr->tx_buf_ring[ring_idx];
2536                                 dma_unmap_page(
2537                                         &pdev->dev,
2538                                         dma_unmap_addr(tx_buf, mapping),
2539                                         skb_frag_size(frag), PCI_DMA_TODEVICE);
2540                         }
2541                         dev_kfree_skb(skb);
2542                 }
2543                 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
2544         }
2545 }
2546
2547 static void bnxt_free_rx_skbs(struct bnxt *bp)
2548 {
2549         int i, max_idx, max_agg_idx;
2550         struct pci_dev *pdev = bp->pdev;
2551
2552         if (!bp->rx_ring)
2553                 return;
2554
2555         max_idx = bp->rx_nr_pages * RX_DESC_CNT;
2556         max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
2557         for (i = 0; i < bp->rx_nr_rings; i++) {
2558                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2559                 struct bnxt_tpa_idx_map *map;
2560                 int j;
2561
2562                 if (rxr->rx_tpa) {
2563                         for (j = 0; j < bp->max_tpa; j++) {
2564                                 struct bnxt_tpa_info *tpa_info =
2565                                                         &rxr->rx_tpa[j];
2566                                 u8 *data = tpa_info->data;
2567
2568                                 if (!data)
2569                                         continue;
2570
2571                                 dma_unmap_single_attrs(&pdev->dev,
2572                                                        tpa_info->mapping,
2573                                                        bp->rx_buf_use_size,
2574                                                        bp->rx_dir,
2575                                                        DMA_ATTR_WEAK_ORDERING);
2576
2577                                 tpa_info->data = NULL;
2578
2579                                 kfree(data);
2580                         }
2581                 }
2582
2583                 for (j = 0; j < max_idx; j++) {
2584                         struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j];
2585                         dma_addr_t mapping = rx_buf->mapping;
2586                         void *data = rx_buf->data;
2587
2588                         if (!data)
2589                                 continue;
2590
2591                         rx_buf->data = NULL;
2592
2593                         if (BNXT_RX_PAGE_MODE(bp)) {
2594                                 mapping -= bp->rx_dma_offset;
2595                                 dma_unmap_page_attrs(&pdev->dev, mapping,
2596                                                      PAGE_SIZE, bp->rx_dir,
2597                                                      DMA_ATTR_WEAK_ORDERING);
2598                                 page_pool_recycle_direct(rxr->page_pool, data);
2599                         } else {
2600                                 dma_unmap_single_attrs(&pdev->dev, mapping,
2601                                                        bp->rx_buf_use_size,
2602                                                        bp->rx_dir,
2603                                                        DMA_ATTR_WEAK_ORDERING);
2604                                 kfree(data);
2605                         }
2606                 }
2607
2608                 for (j = 0; j < max_agg_idx; j++) {
2609                         struct bnxt_sw_rx_agg_bd *rx_agg_buf =
2610                                 &rxr->rx_agg_ring[j];
2611                         struct page *page = rx_agg_buf->page;
2612
2613                         if (!page)
2614                                 continue;
2615
2616                         dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
2617                                              BNXT_RX_PAGE_SIZE,
2618                                              PCI_DMA_FROMDEVICE,
2619                                              DMA_ATTR_WEAK_ORDERING);
2620
2621                         rx_agg_buf->page = NULL;
2622                         __clear_bit(j, rxr->rx_agg_bmap);
2623
2624                         __free_page(page);
2625                 }
2626                 if (rxr->rx_page) {
2627                         __free_page(rxr->rx_page);
2628                         rxr->rx_page = NULL;
2629                 }
2630                 map = rxr->rx_tpa_idx_map;
2631                 if (map)
2632                         memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
2633         }
2634 }
2635
2636 static void bnxt_free_skbs(struct bnxt *bp)
2637 {
2638         bnxt_free_tx_skbs(bp);
2639         bnxt_free_rx_skbs(bp);
2640 }
2641
2642 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
2643 {
2644         struct pci_dev *pdev = bp->pdev;
2645         int i;
2646
2647         for (i = 0; i < rmem->nr_pages; i++) {
2648                 if (!rmem->pg_arr[i])
2649                         continue;
2650
2651                 dma_free_coherent(&pdev->dev, rmem->page_size,
2652                                   rmem->pg_arr[i], rmem->dma_arr[i]);
2653
2654                 rmem->pg_arr[i] = NULL;
2655         }
2656         if (rmem->pg_tbl) {
2657                 size_t pg_tbl_size = rmem->nr_pages * 8;
2658
2659                 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
2660                         pg_tbl_size = rmem->page_size;
2661                 dma_free_coherent(&pdev->dev, pg_tbl_size,
2662                                   rmem->pg_tbl, rmem->pg_tbl_map);
2663                 rmem->pg_tbl = NULL;
2664         }
2665         if (rmem->vmem_size && *rmem->vmem) {
2666                 vfree(*rmem->vmem);
2667                 *rmem->vmem = NULL;
2668         }
2669 }
2670
2671 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
2672 {
2673         struct pci_dev *pdev = bp->pdev;
2674         u64 valid_bit = 0;
2675         int i;
2676
2677         if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
2678                 valid_bit = PTU_PTE_VALID;
2679         if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) {
2680                 size_t pg_tbl_size = rmem->nr_pages * 8;
2681
2682                 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
2683                         pg_tbl_size = rmem->page_size;
2684                 rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size,
2685                                                   &rmem->pg_tbl_map,
2686                                                   GFP_KERNEL);
2687                 if (!rmem->pg_tbl)
2688                         return -ENOMEM;
2689         }
2690
2691         for (i = 0; i < rmem->nr_pages; i++) {
2692                 u64 extra_bits = valid_bit;
2693
2694                 rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
2695                                                      rmem->page_size,
2696                                                      &rmem->dma_arr[i],
2697                                                      GFP_KERNEL);
2698                 if (!rmem->pg_arr[i])
2699                         return -ENOMEM;
2700
2701                 if (rmem->init_val)
2702                         memset(rmem->pg_arr[i], rmem->init_val,
2703                                rmem->page_size);
2704                 if (rmem->nr_pages > 1 || rmem->depth > 0) {
2705                         if (i == rmem->nr_pages - 2 &&
2706                             (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2707                                 extra_bits |= PTU_PTE_NEXT_TO_LAST;
2708                         else if (i == rmem->nr_pages - 1 &&
2709                                  (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2710                                 extra_bits |= PTU_PTE_LAST;
2711                         rmem->pg_tbl[i] =
2712                                 cpu_to_le64(rmem->dma_arr[i] | extra_bits);
2713                 }
2714         }
2715
2716         if (rmem->vmem_size) {
2717                 *rmem->vmem = vzalloc(rmem->vmem_size);
2718                 if (!(*rmem->vmem))
2719                         return -ENOMEM;
2720         }
2721         return 0;
2722 }
2723
2724 static void bnxt_free_tpa_info(struct bnxt *bp)
2725 {
2726         int i;
2727
2728         for (i = 0; i < bp->rx_nr_rings; i++) {
2729                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2730
2731                 kfree(rxr->rx_tpa_idx_map);
2732                 rxr->rx_tpa_idx_map = NULL;
2733                 if (rxr->rx_tpa) {
2734                         kfree(rxr->rx_tpa[0].agg_arr);
2735                         rxr->rx_tpa[0].agg_arr = NULL;
2736                 }
2737                 kfree(rxr->rx_tpa);
2738                 rxr->rx_tpa = NULL;
2739         }
2740 }
2741
2742 static int bnxt_alloc_tpa_info(struct bnxt *bp)
2743 {
2744         int i, j, total_aggs = 0;
2745
2746         bp->max_tpa = MAX_TPA;
2747         if (bp->flags & BNXT_FLAG_CHIP_P5) {
2748                 if (!bp->max_tpa_v2)
2749                         return 0;
2750                 bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
2751                 total_aggs = bp->max_tpa * MAX_SKB_FRAGS;
2752         }
2753
2754         for (i = 0; i < bp->rx_nr_rings; i++) {
2755                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2756                 struct rx_agg_cmp *agg;
2757
2758                 rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info),
2759                                       GFP_KERNEL);
2760                 if (!rxr->rx_tpa)
2761                         return -ENOMEM;
2762
2763                 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
2764                         continue;
2765                 agg = kcalloc(total_aggs, sizeof(*agg), GFP_KERNEL);
2766                 rxr->rx_tpa[0].agg_arr = agg;
2767                 if (!agg)
2768                         return -ENOMEM;
2769                 for (j = 1; j < bp->max_tpa; j++)
2770                         rxr->rx_tpa[j].agg_arr = agg + j * MAX_SKB_FRAGS;
2771                 rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
2772                                               GFP_KERNEL);
2773                 if (!rxr->rx_tpa_idx_map)
2774                         return -ENOMEM;
2775         }
2776         return 0;
2777 }
2778
2779 static void bnxt_free_rx_rings(struct bnxt *bp)
2780 {
2781         int i;
2782
2783         if (!bp->rx_ring)
2784                 return;
2785
2786         bnxt_free_tpa_info(bp);
2787         for (i = 0; i < bp->rx_nr_rings; i++) {
2788                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2789                 struct bnxt_ring_struct *ring;
2790
2791                 if (rxr->xdp_prog)
2792                         bpf_prog_put(rxr->xdp_prog);
2793
2794                 if (xdp_rxq_info_is_reg(&rxr->xdp_rxq))
2795                         xdp_rxq_info_unreg(&rxr->xdp_rxq);
2796
2797                 page_pool_destroy(rxr->page_pool);
2798                 rxr->page_pool = NULL;
2799
2800                 kfree(rxr->rx_agg_bmap);
2801                 rxr->rx_agg_bmap = NULL;
2802
2803                 ring = &rxr->rx_ring_struct;
2804                 bnxt_free_ring(bp, &ring->ring_mem);
2805
2806                 ring = &rxr->rx_agg_ring_struct;
2807                 bnxt_free_ring(bp, &ring->ring_mem);
2808         }
2809 }
2810
2811 static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
2812                                    struct bnxt_rx_ring_info *rxr)
2813 {
2814         struct page_pool_params pp = { 0 };
2815
2816         pp.pool_size = bp->rx_ring_size;
2817         pp.nid = dev_to_node(&bp->pdev->dev);
2818         pp.dev = &bp->pdev->dev;
2819         pp.dma_dir = DMA_BIDIRECTIONAL;
2820
2821         rxr->page_pool = page_pool_create(&pp);
2822         if (IS_ERR(rxr->page_pool)) {
2823                 int err = PTR_ERR(rxr->page_pool);
2824
2825                 rxr->page_pool = NULL;
2826                 return err;
2827         }
2828         return 0;
2829 }
2830
2831 static int bnxt_alloc_rx_rings(struct bnxt *bp)
2832 {
2833         int i, rc = 0, agg_rings = 0;
2834
2835         if (!bp->rx_ring)
2836                 return -ENOMEM;
2837
2838         if (bp->flags & BNXT_FLAG_AGG_RINGS)
2839                 agg_rings = 1;
2840
2841         for (i = 0; i < bp->rx_nr_rings; i++) {
2842                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2843                 struct bnxt_ring_struct *ring;
2844
2845                 ring = &rxr->rx_ring_struct;
2846
2847                 rc = bnxt_alloc_rx_page_pool(bp, rxr);
2848                 if (rc)
2849                         return rc;
2850
2851                 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i);
2852                 if (rc < 0)
2853                         return rc;
2854
2855                 rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq,
2856                                                 MEM_TYPE_PAGE_POOL,
2857                                                 rxr->page_pool);
2858                 if (rc) {
2859                         xdp_rxq_info_unreg(&rxr->xdp_rxq);
2860                         return rc;
2861                 }
2862
2863                 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
2864                 if (rc)
2865                         return rc;
2866
2867                 ring->grp_idx = i;
2868                 if (agg_rings) {
2869                         u16 mem_size;
2870
2871                         ring = &rxr->rx_agg_ring_struct;
2872                         rc = bnxt_alloc_ring(bp, &ring->ring_mem);
2873                         if (rc)
2874                                 return rc;
2875
2876                         ring->grp_idx = i;
2877                         rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
2878                         mem_size = rxr->rx_agg_bmap_size / 8;
2879                         rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
2880                         if (!rxr->rx_agg_bmap)
2881                                 return -ENOMEM;
2882                 }
2883         }
2884         if (bp->flags & BNXT_FLAG_TPA)
2885                 rc = bnxt_alloc_tpa_info(bp);
2886         return rc;
2887 }
2888
2889 static void bnxt_free_tx_rings(struct bnxt *bp)
2890 {
2891         int i;
2892         struct pci_dev *pdev = bp->pdev;
2893
2894         if (!bp->tx_ring)
2895                 return;
2896
2897         for (i = 0; i < bp->tx_nr_rings; i++) {
2898                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2899                 struct bnxt_ring_struct *ring;
2900
2901                 if (txr->tx_push) {
2902                         dma_free_coherent(&pdev->dev, bp->tx_push_size,
2903                                           txr->tx_push, txr->tx_push_mapping);
2904                         txr->tx_push = NULL;
2905                 }
2906
2907                 ring = &txr->tx_ring_struct;
2908
2909                 bnxt_free_ring(bp, &ring->ring_mem);
2910         }
2911 }
2912
2913 static int bnxt_alloc_tx_rings(struct bnxt *bp)
2914 {
2915         int i, j, rc;
2916         struct pci_dev *pdev = bp->pdev;
2917
2918         bp->tx_push_size = 0;
2919         if (bp->tx_push_thresh) {
2920                 int push_size;
2921
2922                 push_size  = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
2923                                         bp->tx_push_thresh);
2924
2925                 if (push_size > 256) {
2926                         push_size = 0;
2927                         bp->tx_push_thresh = 0;
2928                 }
2929
2930                 bp->tx_push_size = push_size;
2931         }
2932
2933         for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
2934                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2935                 struct bnxt_ring_struct *ring;
2936                 u8 qidx;
2937
2938                 ring = &txr->tx_ring_struct;
2939
2940                 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
2941                 if (rc)
2942                         return rc;
2943
2944                 ring->grp_idx = txr->bnapi->index;
2945                 if (bp->tx_push_size) {
2946                         dma_addr_t mapping;
2947
2948                         /* One pre-allocated DMA buffer to backup
2949                          * TX push operation
2950                          */
2951                         txr->tx_push = dma_alloc_coherent(&pdev->dev,
2952                                                 bp->tx_push_size,
2953                                                 &txr->tx_push_mapping,
2954                                                 GFP_KERNEL);
2955
2956                         if (!txr->tx_push)
2957                                 return -ENOMEM;
2958
2959                         mapping = txr->tx_push_mapping +
2960                                 sizeof(struct tx_push_bd);
2961                         txr->data_mapping = cpu_to_le64(mapping);
2962                 }
2963                 qidx = bp->tc_to_qidx[j];
2964                 ring->queue_id = bp->q_info[qidx].queue_id;
2965                 if (i < bp->tx_nr_rings_xdp)
2966                         continue;
2967                 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
2968                         j++;
2969         }
2970         return 0;
2971 }
2972
2973 static void bnxt_free_cp_rings(struct bnxt *bp)
2974 {
2975         int i;
2976
2977         if (!bp->bnapi)
2978                 return;
2979
2980         for (i = 0; i < bp->cp_nr_rings; i++) {
2981                 struct bnxt_napi *bnapi = bp->bnapi[i];
2982                 struct bnxt_cp_ring_info *cpr;
2983                 struct bnxt_ring_struct *ring;
2984                 int j;
2985
2986                 if (!bnapi)
2987                         continue;
2988
2989                 cpr = &bnapi->cp_ring;
2990                 ring = &cpr->cp_ring_struct;
2991
2992                 bnxt_free_ring(bp, &ring->ring_mem);
2993
2994                 for (j = 0; j < 2; j++) {
2995                         struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
2996
2997                         if (cpr2) {
2998                                 ring = &cpr2->cp_ring_struct;
2999                                 bnxt_free_ring(bp, &ring->ring_mem);
3000                                 kfree(cpr2);
3001                                 cpr->cp_ring_arr[j] = NULL;
3002                         }
3003                 }
3004         }
3005 }
3006
3007 static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp)
3008 {
3009         struct bnxt_ring_mem_info *rmem;
3010         struct bnxt_ring_struct *ring;
3011         struct bnxt_cp_ring_info *cpr;
3012         int rc;
3013
3014         cpr = kzalloc(sizeof(*cpr), GFP_KERNEL);
3015         if (!cpr)
3016                 return NULL;
3017
3018         ring = &cpr->cp_ring_struct;
3019         rmem = &ring->ring_mem;
3020         rmem->nr_pages = bp->cp_nr_pages;
3021         rmem->page_size = HW_CMPD_RING_SIZE;
3022         rmem->pg_arr = (void **)cpr->cp_desc_ring;
3023         rmem->dma_arr = cpr->cp_desc_mapping;
3024         rmem->flags = BNXT_RMEM_RING_PTE_FLAG;
3025         rc = bnxt_alloc_ring(bp, rmem);
3026         if (rc) {
3027                 bnxt_free_ring(bp, rmem);
3028                 kfree(cpr);
3029                 cpr = NULL;
3030         }
3031         return cpr;
3032 }
3033
3034 static int bnxt_alloc_cp_rings(struct bnxt *bp)
3035 {
3036         bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
3037         int i, rc, ulp_base_vec, ulp_msix;
3038
3039         ulp_msix = bnxt_get_ulp_msix_num(bp);
3040         ulp_base_vec = bnxt_get_ulp_msix_base(bp);
3041         for (i = 0; i < bp->cp_nr_rings; i++) {
3042                 struct bnxt_napi *bnapi = bp->bnapi[i];
3043                 struct bnxt_cp_ring_info *cpr;
3044                 struct bnxt_ring_struct *ring;
3045
3046                 if (!bnapi)
3047                         continue;
3048
3049                 cpr = &bnapi->cp_ring;
3050                 cpr->bnapi = bnapi;
3051                 ring = &cpr->cp_ring_struct;
3052
3053                 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3054                 if (rc)
3055                         return rc;
3056
3057                 if (ulp_msix && i >= ulp_base_vec)
3058                         ring->map_idx = i + ulp_msix;
3059                 else
3060                         ring->map_idx = i;
3061
3062                 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
3063                         continue;
3064
3065                 if (i < bp->rx_nr_rings) {
3066                         struct bnxt_cp_ring_info *cpr2 =
3067                                 bnxt_alloc_cp_sub_ring(bp);
3068
3069                         cpr->cp_ring_arr[BNXT_RX_HDL] = cpr2;
3070                         if (!cpr2)
3071                                 return -ENOMEM;
3072                         cpr2->bnapi = bnapi;
3073                 }
3074                 if ((sh && i < bp->tx_nr_rings) ||
3075                     (!sh && i >= bp->rx_nr_rings)) {
3076                         struct bnxt_cp_ring_info *cpr2 =
3077                                 bnxt_alloc_cp_sub_ring(bp);
3078
3079                         cpr->cp_ring_arr[BNXT_TX_HDL] = cpr2;
3080                         if (!cpr2)
3081                                 return -ENOMEM;
3082                         cpr2->bnapi = bnapi;
3083                 }
3084         }
3085         return 0;
3086 }
3087
3088 static void bnxt_init_ring_struct(struct bnxt *bp)
3089 {
3090         int i;
3091
3092         for (i = 0; i < bp->cp_nr_rings; i++) {
3093                 struct bnxt_napi *bnapi = bp->bnapi[i];
3094                 struct bnxt_ring_mem_info *rmem;
3095                 struct bnxt_cp_ring_info *cpr;
3096                 struct bnxt_rx_ring_info *rxr;
3097                 struct bnxt_tx_ring_info *txr;
3098                 struct bnxt_ring_struct *ring;
3099
3100                 if (!bnapi)
3101                         continue;
3102
3103                 cpr = &bnapi->cp_ring;
3104                 ring = &cpr->cp_ring_struct;
3105                 rmem = &ring->ring_mem;
3106                 rmem->nr_pages = bp->cp_nr_pages;
3107                 rmem->page_size = HW_CMPD_RING_SIZE;
3108                 rmem->pg_arr = (void **)cpr->cp_desc_ring;
3109                 rmem->dma_arr = cpr->cp_desc_mapping;
3110                 rmem->vmem_size = 0;
3111
3112                 rxr = bnapi->rx_ring;
3113                 if (!rxr)
3114                         goto skip_rx;
3115
3116                 ring = &rxr->rx_ring_struct;
3117                 rmem = &ring->ring_mem;
3118                 rmem->nr_pages = bp->rx_nr_pages;
3119                 rmem->page_size = HW_RXBD_RING_SIZE;
3120                 rmem->pg_arr = (void **)rxr->rx_desc_ring;
3121                 rmem->dma_arr = rxr->rx_desc_mapping;
3122                 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
3123                 rmem->vmem = (void **)&rxr->rx_buf_ring;
3124
3125                 ring = &rxr->rx_agg_ring_struct;
3126                 rmem = &ring->ring_mem;
3127                 rmem->nr_pages = bp->rx_agg_nr_pages;
3128                 rmem->page_size = HW_RXBD_RING_SIZE;
3129                 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
3130                 rmem->dma_arr = rxr->rx_agg_desc_mapping;
3131                 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
3132                 rmem->vmem = (void **)&rxr->rx_agg_ring;
3133
3134 skip_rx:
3135                 txr = bnapi->tx_ring;
3136                 if (!txr)
3137                         continue;
3138
3139                 ring = &txr->tx_ring_struct;
3140                 rmem = &ring->ring_mem;
3141                 rmem->nr_pages = bp->tx_nr_pages;
3142                 rmem->page_size = HW_RXBD_RING_SIZE;
3143                 rmem->pg_arr = (void **)txr->tx_desc_ring;
3144                 rmem->dma_arr = txr->tx_desc_mapping;
3145                 rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
3146                 rmem->vmem = (void **)&txr->tx_buf_ring;
3147         }
3148 }
3149
3150 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
3151 {
3152         int i;
3153         u32 prod;
3154         struct rx_bd **rx_buf_ring;
3155
3156         rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
3157         for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
3158                 int j;
3159                 struct rx_bd *rxbd;
3160
3161                 rxbd = rx_buf_ring[i];
3162                 if (!rxbd)
3163                         continue;
3164
3165                 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
3166                         rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
3167                         rxbd->rx_bd_opaque = prod;
3168                 }
3169         }
3170 }
3171
3172 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
3173 {
3174         struct net_device *dev = bp->dev;
3175         struct bnxt_rx_ring_info *rxr;
3176         struct bnxt_ring_struct *ring;
3177         u32 prod, type;
3178         int i;
3179
3180         type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
3181                 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
3182
3183         if (NET_IP_ALIGN == 2)
3184                 type |= RX_BD_FLAGS_SOP;
3185
3186         rxr = &bp->rx_ring[ring_nr];
3187         ring = &rxr->rx_ring_struct;
3188         bnxt_init_rxbd_pages(ring, type);
3189
3190         if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
3191                 bpf_prog_add(bp->xdp_prog, 1);
3192                 rxr->xdp_prog = bp->xdp_prog;
3193         }
3194         prod = rxr->rx_prod;
3195         for (i = 0; i < bp->rx_ring_size; i++) {
3196                 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) {
3197                         netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
3198                                     ring_nr, i, bp->rx_ring_size);
3199                         break;
3200                 }
3201                 prod = NEXT_RX(prod);
3202         }
3203         rxr->rx_prod = prod;
3204         ring->fw_ring_id = INVALID_HW_RING_ID;
3205
3206         ring = &rxr->rx_agg_ring_struct;
3207         ring->fw_ring_id = INVALID_HW_RING_ID;
3208
3209         if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
3210                 return 0;
3211
3212         type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
3213                 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
3214
3215         bnxt_init_rxbd_pages(ring, type);
3216
3217         prod = rxr->rx_agg_prod;
3218         for (i = 0; i < bp->rx_agg_ring_size; i++) {
3219                 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL) != 0) {
3220                         netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
3221                                     ring_nr, i, bp->rx_ring_size);
3222                         break;
3223                 }
3224                 prod = NEXT_RX_AGG(prod);
3225         }
3226         rxr->rx_agg_prod = prod;
3227
3228         if (bp->flags & BNXT_FLAG_TPA) {
3229                 if (rxr->rx_tpa) {
3230                         u8 *data;
3231                         dma_addr_t mapping;
3232
3233                         for (i = 0; i < bp->max_tpa; i++) {
3234                                 data = __bnxt_alloc_rx_data(bp, &mapping,
3235                                                             GFP_KERNEL);
3236                                 if (!data)
3237                                         return -ENOMEM;
3238
3239                                 rxr->rx_tpa[i].data = data;
3240                                 rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
3241                                 rxr->rx_tpa[i].mapping = mapping;
3242                         }
3243                 } else {
3244                         netdev_err(bp->dev, "No resource allocated for LRO/GRO\n");
3245                         return -ENOMEM;
3246                 }
3247         }
3248
3249         return 0;
3250 }
3251
3252 static void bnxt_init_cp_rings(struct bnxt *bp)
3253 {
3254         int i, j;
3255
3256         for (i = 0; i < bp->cp_nr_rings; i++) {
3257                 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
3258                 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3259
3260                 ring->fw_ring_id = INVALID_HW_RING_ID;
3261                 cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3262                 cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3263                 for (j = 0; j < 2; j++) {
3264                         struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
3265
3266                         if (!cpr2)
3267                                 continue;
3268
3269                         ring = &cpr2->cp_ring_struct;
3270                         ring->fw_ring_id = INVALID_HW_RING_ID;
3271                         cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3272                         cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3273                 }
3274         }
3275 }
3276
3277 static int bnxt_init_rx_rings(struct bnxt *bp)
3278 {
3279         int i, rc = 0;
3280
3281         if (BNXT_RX_PAGE_MODE(bp)) {
3282                 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
3283                 bp->rx_dma_offset = XDP_PACKET_HEADROOM;
3284         } else {
3285                 bp->rx_offset = BNXT_RX_OFFSET;
3286                 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
3287         }
3288
3289         for (i = 0; i < bp->rx_nr_rings; i++) {
3290                 rc = bnxt_init_one_rx_ring(bp, i);
3291                 if (rc)
3292                         break;
3293         }
3294
3295         return rc;
3296 }
3297
3298 static int bnxt_init_tx_rings(struct bnxt *bp)
3299 {
3300         u16 i;
3301
3302         bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
3303                                    MAX_SKB_FRAGS + 1);
3304
3305         for (i = 0; i < bp->tx_nr_rings; i++) {
3306                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3307                 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
3308
3309                 ring->fw_ring_id = INVALID_HW_RING_ID;
3310         }
3311
3312         return 0;
3313 }
3314
3315 static void bnxt_free_ring_grps(struct bnxt *bp)
3316 {
3317         kfree(bp->grp_info);
3318         bp->grp_info = NULL;
3319 }
3320
3321 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
3322 {
3323         int i;
3324
3325         if (irq_re_init) {
3326                 bp->grp_info = kcalloc(bp->cp_nr_rings,
3327                                        sizeof(struct bnxt_ring_grp_info),
3328                                        GFP_KERNEL);
3329                 if (!bp->grp_info)
3330                         return -ENOMEM;
3331         }
3332         for (i = 0; i < bp->cp_nr_rings; i++) {
3333                 if (irq_re_init)
3334                         bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
3335                 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
3336                 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
3337                 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
3338                 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
3339         }
3340         return 0;
3341 }
3342
3343 static void bnxt_free_vnics(struct bnxt *bp)
3344 {
3345         kfree(bp->vnic_info);
3346         bp->vnic_info = NULL;
3347         bp->nr_vnics = 0;
3348 }
3349
3350 static int bnxt_alloc_vnics(struct bnxt *bp)
3351 {
3352         int num_vnics = 1;
3353
3354 #ifdef CONFIG_RFS_ACCEL
3355         if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
3356                 num_vnics += bp->rx_nr_rings;
3357 #endif
3358
3359         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
3360                 num_vnics++;
3361
3362         bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
3363                                 GFP_KERNEL);
3364         if (!bp->vnic_info)
3365                 return -ENOMEM;
3366
3367         bp->nr_vnics = num_vnics;
3368         return 0;
3369 }
3370
3371 static void bnxt_init_vnics(struct bnxt *bp)
3372 {
3373         int i;
3374
3375         for (i = 0; i < bp->nr_vnics; i++) {
3376                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3377                 int j;
3378
3379                 vnic->fw_vnic_id = INVALID_HW_RING_ID;
3380                 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++)
3381                         vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
3382
3383                 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
3384
3385                 if (bp->vnic_info[i].rss_hash_key) {
3386                         if (i == 0)
3387                                 prandom_bytes(vnic->rss_hash_key,
3388                                               HW_HASH_KEY_SIZE);
3389                         else
3390                                 memcpy(vnic->rss_hash_key,
3391                                        bp->vnic_info[0].rss_hash_key,
3392                                        HW_HASH_KEY_SIZE);
3393                 }
3394         }
3395 }
3396
3397 static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
3398 {
3399         int pages;
3400
3401         pages = ring_size / desc_per_pg;
3402
3403         if (!pages)
3404                 return 1;
3405
3406         pages++;
3407
3408         while (pages & (pages - 1))
3409                 pages++;
3410
3411         return pages;
3412 }
3413
3414 void bnxt_set_tpa_flags(struct bnxt *bp)
3415 {
3416         bp->flags &= ~BNXT_FLAG_TPA;
3417         if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
3418                 return;
3419         if (bp->dev->features & NETIF_F_LRO)
3420                 bp->flags |= BNXT_FLAG_LRO;
3421         else if (bp->dev->features & NETIF_F_GRO_HW)
3422                 bp->flags |= BNXT_FLAG_GRO;
3423 }
3424
3425 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
3426  * be set on entry.
3427  */
3428 void bnxt_set_ring_params(struct bnxt *bp)
3429 {
3430         u32 ring_size, rx_size, rx_space;
3431         u32 agg_factor = 0, agg_ring_size = 0;
3432
3433         /* 8 for CRC and VLAN */
3434         rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
3435
3436         rx_space = rx_size + NET_SKB_PAD +
3437                 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3438
3439         bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
3440         ring_size = bp->rx_ring_size;
3441         bp->rx_agg_ring_size = 0;
3442         bp->rx_agg_nr_pages = 0;
3443
3444         if (bp->flags & BNXT_FLAG_TPA)
3445                 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
3446
3447         bp->flags &= ~BNXT_FLAG_JUMBO;
3448         if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
3449                 u32 jumbo_factor;
3450
3451                 bp->flags |= BNXT_FLAG_JUMBO;
3452                 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
3453                 if (jumbo_factor > agg_factor)
3454                         agg_factor = jumbo_factor;
3455         }
3456         agg_ring_size = ring_size * agg_factor;
3457
3458         if (agg_ring_size) {
3459                 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
3460                                                         RX_DESC_CNT);
3461                 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
3462                         u32 tmp = agg_ring_size;
3463
3464                         bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
3465                         agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
3466                         netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
3467                                     tmp, agg_ring_size);
3468                 }
3469                 bp->rx_agg_ring_size = agg_ring_size;
3470                 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
3471                 rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
3472                 rx_space = rx_size + NET_SKB_PAD +
3473                         SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3474         }
3475
3476         bp->rx_buf_use_size = rx_size;
3477         bp->rx_buf_size = rx_space;
3478
3479         bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
3480         bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
3481
3482         ring_size = bp->tx_ring_size;
3483         bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
3484         bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
3485
3486         ring_size = bp->rx_ring_size * (2 + agg_factor) + bp->tx_ring_size;
3487         bp->cp_ring_size = ring_size;
3488
3489         bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
3490         if (bp->cp_nr_pages > MAX_CP_PAGES) {
3491                 bp->cp_nr_pages = MAX_CP_PAGES;
3492                 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
3493                 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
3494                             ring_size, bp->cp_ring_size);
3495         }
3496         bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
3497         bp->cp_ring_mask = bp->cp_bit - 1;
3498 }
3499
3500 /* Changing allocation mode of RX rings.
3501  * TODO: Update when extending xdp_rxq_info to support allocation modes.
3502  */
3503 int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
3504 {
3505         if (page_mode) {
3506                 if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU)
3507                         return -EOPNOTSUPP;
3508                 bp->dev->max_mtu =
3509                         min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
3510                 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
3511                 bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE;
3512                 bp->rx_dir = DMA_BIDIRECTIONAL;
3513                 bp->rx_skb_func = bnxt_rx_page_skb;
3514                 /* Disable LRO or GRO_HW */
3515                 netdev_update_features(bp->dev);
3516         } else {
3517                 bp->dev->max_mtu = bp->max_mtu;
3518                 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
3519                 bp->rx_dir = DMA_FROM_DEVICE;
3520                 bp->rx_skb_func = bnxt_rx_skb;
3521         }
3522         return 0;
3523 }
3524
3525 static void bnxt_free_vnic_attributes(struct bnxt *bp)
3526 {
3527         int i;
3528         struct bnxt_vnic_info *vnic;
3529         struct pci_dev *pdev = bp->pdev;
3530
3531         if (!bp->vnic_info)
3532                 return;
3533
3534         for (i = 0; i < bp->nr_vnics; i++) {
3535                 vnic = &bp->vnic_info[i];
3536
3537                 kfree(vnic->fw_grp_ids);
3538                 vnic->fw_grp_ids = NULL;
3539
3540                 kfree(vnic->uc_list);
3541                 vnic->uc_list = NULL;
3542
3543                 if (vnic->mc_list) {
3544                         dma_free_coherent(&pdev->dev, vnic->mc_list_size,
3545                                           vnic->mc_list, vnic->mc_list_mapping);
3546                         vnic->mc_list = NULL;
3547                 }
3548
3549                 if (vnic->rss_table) {
3550                         dma_free_coherent(&pdev->dev, PAGE_SIZE,
3551                                           vnic->rss_table,
3552                                           vnic->rss_table_dma_addr);
3553                         vnic->rss_table = NULL;
3554                 }
3555
3556                 vnic->rss_hash_key = NULL;
3557                 vnic->flags = 0;
3558         }
3559 }
3560
3561 static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
3562 {
3563         int i, rc = 0, size;
3564         struct bnxt_vnic_info *vnic;
3565         struct pci_dev *pdev = bp->pdev;
3566         int max_rings;
3567
3568         for (i = 0; i < bp->nr_vnics; i++) {
3569                 vnic = &bp->vnic_info[i];
3570
3571                 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
3572                         int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
3573
3574                         if (mem_size > 0) {
3575                                 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
3576                                 if (!vnic->uc_list) {
3577                                         rc = -ENOMEM;
3578                                         goto out;
3579                                 }
3580                         }
3581                 }
3582
3583                 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
3584                         vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
3585                         vnic->mc_list =
3586                                 dma_alloc_coherent(&pdev->dev,
3587                                                    vnic->mc_list_size,
3588                                                    &vnic->mc_list_mapping,
3589                                                    GFP_KERNEL);
3590                         if (!vnic->mc_list) {
3591                                 rc = -ENOMEM;
3592                                 goto out;
3593                         }
3594                 }
3595
3596                 if (bp->flags & BNXT_FLAG_CHIP_P5)
3597                         goto vnic_skip_grps;
3598
3599                 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
3600                         max_rings = bp->rx_nr_rings;
3601                 else
3602                         max_rings = 1;
3603
3604                 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
3605                 if (!vnic->fw_grp_ids) {
3606                         rc = -ENOMEM;
3607                         goto out;
3608                 }
3609 vnic_skip_grps:
3610                 if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
3611                     !(vnic->flags & BNXT_VNIC_RSS_FLAG))
3612                         continue;
3613
3614                 /* Allocate rss table and hash key */
3615                 vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3616                                                      &vnic->rss_table_dma_addr,
3617                                                      GFP_KERNEL);
3618                 if (!vnic->rss_table) {
3619                         rc = -ENOMEM;
3620                         goto out;
3621                 }
3622
3623                 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
3624
3625                 vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
3626                 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
3627         }
3628         return 0;
3629
3630 out:
3631         return rc;
3632 }
3633
3634 static void bnxt_free_hwrm_resources(struct bnxt *bp)
3635 {
3636         struct pci_dev *pdev = bp->pdev;
3637
3638         if (bp->hwrm_cmd_resp_addr) {
3639                 dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
3640                                   bp->hwrm_cmd_resp_dma_addr);
3641                 bp->hwrm_cmd_resp_addr = NULL;
3642         }
3643
3644         if (bp->hwrm_cmd_kong_resp_addr) {
3645                 dma_free_coherent(&pdev->dev, PAGE_SIZE,
3646                                   bp->hwrm_cmd_kong_resp_addr,
3647                                   bp->hwrm_cmd_kong_resp_dma_addr);
3648                 bp->hwrm_cmd_kong_resp_addr = NULL;
3649         }
3650 }
3651
3652 static int bnxt_alloc_kong_hwrm_resources(struct bnxt *bp)
3653 {
3654         struct pci_dev *pdev = bp->pdev;
3655
3656         if (bp->hwrm_cmd_kong_resp_addr)
3657                 return 0;
3658
3659         bp->hwrm_cmd_kong_resp_addr =
3660                 dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3661                                    &bp->hwrm_cmd_kong_resp_dma_addr,
3662                                    GFP_KERNEL);
3663         if (!bp->hwrm_cmd_kong_resp_addr)
3664                 return -ENOMEM;
3665
3666         return 0;
3667 }
3668
3669 static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
3670 {
3671         struct pci_dev *pdev = bp->pdev;
3672
3673         bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3674                                                    &bp->hwrm_cmd_resp_dma_addr,
3675                                                    GFP_KERNEL);
3676         if (!bp->hwrm_cmd_resp_addr)
3677                 return -ENOMEM;
3678
3679         return 0;
3680 }
3681
3682 static void bnxt_free_hwrm_short_cmd_req(struct bnxt *bp)
3683 {
3684         if (bp->hwrm_short_cmd_req_addr) {
3685                 struct pci_dev *pdev = bp->pdev;
3686
3687                 dma_free_coherent(&pdev->dev, bp->hwrm_max_ext_req_len,
3688                                   bp->hwrm_short_cmd_req_addr,
3689                                   bp->hwrm_short_cmd_req_dma_addr);
3690                 bp->hwrm_short_cmd_req_addr = NULL;
3691         }
3692 }
3693
3694 static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt *bp)
3695 {
3696         struct pci_dev *pdev = bp->pdev;
3697
3698         if (bp->hwrm_short_cmd_req_addr)
3699                 return 0;
3700
3701         bp->hwrm_short_cmd_req_addr =
3702                 dma_alloc_coherent(&pdev->dev, bp->hwrm_max_ext_req_len,
3703                                    &bp->hwrm_short_cmd_req_dma_addr,
3704                                    GFP_KERNEL);
3705         if (!bp->hwrm_short_cmd_req_addr)
3706                 return -ENOMEM;
3707
3708         return 0;
3709 }
3710
3711 static void bnxt_free_port_stats(struct bnxt *bp)
3712 {
3713         struct pci_dev *pdev = bp->pdev;
3714
3715         bp->flags &= ~BNXT_FLAG_PORT_STATS;
3716         bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
3717
3718         if (bp->hw_rx_port_stats) {
3719                 dma_free_coherent(&pdev->dev, bp->hw_port_stats_size,
3720                                   bp->hw_rx_port_stats,
3721                                   bp->hw_rx_port_stats_map);
3722                 bp->hw_rx_port_stats = NULL;
3723         }
3724
3725         if (bp->hw_tx_port_stats_ext) {
3726                 dma_free_coherent(&pdev->dev, sizeof(struct tx_port_stats_ext),
3727                                   bp->hw_tx_port_stats_ext,
3728                                   bp->hw_tx_port_stats_ext_map);
3729                 bp->hw_tx_port_stats_ext = NULL;
3730         }
3731
3732         if (bp->hw_rx_port_stats_ext) {
3733                 dma_free_coherent(&pdev->dev, sizeof(struct rx_port_stats_ext),
3734                                   bp->hw_rx_port_stats_ext,
3735                                   bp->hw_rx_port_stats_ext_map);
3736                 bp->hw_rx_port_stats_ext = NULL;
3737         }
3738
3739         if (bp->hw_pcie_stats) {
3740                 dma_free_coherent(&pdev->dev, sizeof(struct pcie_ctx_hw_stats),
3741                                   bp->hw_pcie_stats, bp->hw_pcie_stats_map);
3742                 bp->hw_pcie_stats = NULL;
3743         }
3744 }
3745
3746 static void bnxt_free_ring_stats(struct bnxt *bp)
3747 {
3748         struct pci_dev *pdev = bp->pdev;
3749         int size, i;
3750
3751         if (!bp->bnapi)
3752                 return;
3753
3754         size = bp->hw_ring_stats_size;
3755
3756         for (i = 0; i < bp->cp_nr_rings; i++) {
3757                 struct bnxt_napi *bnapi = bp->bnapi[i];
3758                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3759
3760                 if (cpr->hw_stats) {
3761                         dma_free_coherent(&pdev->dev, size, cpr->hw_stats,
3762                                           cpr->hw_stats_map);
3763                         cpr->hw_stats = NULL;
3764                 }
3765         }
3766 }
3767
3768 static int bnxt_alloc_stats(struct bnxt *bp)
3769 {
3770         u32 size, i;
3771         struct pci_dev *pdev = bp->pdev;
3772
3773         size = bp->hw_ring_stats_size;
3774
3775         for (i = 0; i < bp->cp_nr_rings; i++) {
3776                 struct bnxt_napi *bnapi = bp->bnapi[i];
3777                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3778
3779                 cpr->hw_stats = dma_alloc_coherent(&pdev->dev, size,
3780                                                    &cpr->hw_stats_map,
3781                                                    GFP_KERNEL);
3782                 if (!cpr->hw_stats)
3783                         return -ENOMEM;
3784
3785                 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
3786         }
3787
3788         if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700)
3789                 return 0;
3790
3791         if (bp->hw_rx_port_stats)
3792                 goto alloc_ext_stats;
3793
3794         bp->hw_port_stats_size = sizeof(struct rx_port_stats) +
3795                                  sizeof(struct tx_port_stats) + 1024;
3796
3797         bp->hw_rx_port_stats =
3798                 dma_alloc_coherent(&pdev->dev, bp->hw_port_stats_size,
3799                                    &bp->hw_rx_port_stats_map,
3800                                    GFP_KERNEL);
3801         if (!bp->hw_rx_port_stats)
3802                 return -ENOMEM;
3803
3804         bp->hw_tx_port_stats = (void *)(bp->hw_rx_port_stats + 1) + 512;
3805         bp->hw_tx_port_stats_map = bp->hw_rx_port_stats_map +
3806                                    sizeof(struct rx_port_stats) + 512;
3807         bp->flags |= BNXT_FLAG_PORT_STATS;
3808
3809 alloc_ext_stats:
3810         /* Display extended statistics only if FW supports it */
3811         if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900)
3812                 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED))
3813                         return 0;
3814
3815         if (bp->hw_rx_port_stats_ext)
3816                 goto alloc_tx_ext_stats;
3817
3818         bp->hw_rx_port_stats_ext =
3819                 dma_alloc_coherent(&pdev->dev, sizeof(struct rx_port_stats_ext),
3820                                    &bp->hw_rx_port_stats_ext_map, GFP_KERNEL);
3821         if (!bp->hw_rx_port_stats_ext)
3822                 return 0;
3823
3824 alloc_tx_ext_stats:
3825         if (bp->hw_tx_port_stats_ext)
3826                 goto alloc_pcie_stats;
3827
3828         if (bp->hwrm_spec_code >= 0x10902 ||
3829             (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) {
3830                 bp->hw_tx_port_stats_ext =
3831                         dma_alloc_coherent(&pdev->dev,
3832                                            sizeof(struct tx_port_stats_ext),
3833                                            &bp->hw_tx_port_stats_ext_map,
3834                                            GFP_KERNEL);
3835         }
3836         bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
3837
3838 alloc_pcie_stats:
3839         if (bp->hw_pcie_stats ||
3840             !(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED))
3841                 return 0;
3842
3843         bp->hw_pcie_stats =
3844                 dma_alloc_coherent(&pdev->dev, sizeof(struct pcie_ctx_hw_stats),
3845                                    &bp->hw_pcie_stats_map, GFP_KERNEL);
3846         if (!bp->hw_pcie_stats)
3847                 return 0;
3848
3849         bp->flags |= BNXT_FLAG_PCIE_STATS;
3850         return 0;
3851 }
3852
3853 static void bnxt_clear_ring_indices(struct bnxt *bp)
3854 {
3855         int i;
3856
3857         if (!bp->bnapi)
3858                 return;
3859
3860         for (i = 0; i < bp->cp_nr_rings; i++) {
3861                 struct bnxt_napi *bnapi = bp->bnapi[i];
3862                 struct bnxt_cp_ring_info *cpr;
3863                 struct bnxt_rx_ring_info *rxr;
3864                 struct bnxt_tx_ring_info *txr;
3865
3866                 if (!bnapi)
3867                         continue;
3868
3869                 cpr = &bnapi->cp_ring;
3870                 cpr->cp_raw_cons = 0;
3871
3872                 txr = bnapi->tx_ring;
3873                 if (txr) {
3874                         txr->tx_prod = 0;
3875                         txr->tx_cons = 0;
3876                 }
3877
3878                 rxr = bnapi->rx_ring;
3879                 if (rxr) {
3880                         rxr->rx_prod = 0;
3881                         rxr->rx_agg_prod = 0;
3882                         rxr->rx_sw_agg_prod = 0;
3883                         rxr->rx_next_cons = 0;
3884                 }
3885         }
3886 }
3887
3888 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
3889 {
3890 #ifdef CONFIG_RFS_ACCEL
3891         int i;
3892
3893         /* Under rtnl_lock and all our NAPIs have been disabled.  It's
3894          * safe to delete the hash table.
3895          */
3896         for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
3897                 struct hlist_head *head;
3898                 struct hlist_node *tmp;
3899                 struct bnxt_ntuple_filter *fltr;
3900
3901                 head = &bp->ntp_fltr_hash_tbl[i];
3902                 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
3903                         hlist_del(&fltr->hash);
3904                         kfree(fltr);
3905                 }
3906         }
3907         if (irq_reinit) {
3908                 kfree(bp->ntp_fltr_bmap);
3909                 bp->ntp_fltr_bmap = NULL;
3910         }
3911         bp->ntp_fltr_count = 0;
3912 #endif
3913 }
3914
3915 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
3916 {
3917 #ifdef CONFIG_RFS_ACCEL
3918         int i, rc = 0;
3919
3920         if (!(bp->flags & BNXT_FLAG_RFS))
3921                 return 0;
3922
3923         for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
3924                 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
3925
3926         bp->ntp_fltr_count = 0;
3927         bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
3928                                     sizeof(long),
3929                                     GFP_KERNEL);
3930
3931         if (!bp->ntp_fltr_bmap)
3932                 rc = -ENOMEM;
3933
3934         return rc;
3935 #else
3936         return 0;
3937 #endif
3938 }
3939
3940 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
3941 {
3942         bnxt_free_vnic_attributes(bp);
3943         bnxt_free_tx_rings(bp);
3944         bnxt_free_rx_rings(bp);
3945         bnxt_free_cp_rings(bp);
3946         bnxt_free_ntp_fltrs(bp, irq_re_init);
3947         if (irq_re_init) {
3948                 bnxt_free_ring_stats(bp);
3949                 bnxt_free_ring_grps(bp);
3950                 bnxt_free_vnics(bp);
3951                 kfree(bp->tx_ring_map);
3952                 bp->tx_ring_map = NULL;
3953                 kfree(bp->tx_ring);
3954                 bp->tx_ring = NULL;
3955                 kfree(bp->rx_ring);
3956                 bp->rx_ring = NULL;
3957                 kfree(bp->bnapi);
3958                 bp->bnapi = NULL;
3959         } else {
3960                 bnxt_clear_ring_indices(bp);
3961         }
3962 }
3963
3964 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
3965 {
3966         int i, j, rc, size, arr_size;
3967         void *bnapi;
3968
3969         if (irq_re_init) {
3970                 /* Allocate bnapi mem pointer array and mem block for
3971                  * all queues
3972                  */
3973                 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
3974                                 bp->cp_nr_rings);
3975                 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
3976                 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
3977                 if (!bnapi)
3978                         return -ENOMEM;
3979
3980                 bp->bnapi = bnapi;
3981                 bnapi += arr_size;
3982                 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
3983                         bp->bnapi[i] = bnapi;
3984                         bp->bnapi[i]->index = i;
3985                         bp->bnapi[i]->bp = bp;
3986                         if (bp->flags & BNXT_FLAG_CHIP_P5) {
3987                                 struct bnxt_cp_ring_info *cpr =
3988                                         &bp->bnapi[i]->cp_ring;
3989
3990                                 cpr->cp_ring_struct.ring_mem.flags =
3991                                         BNXT_RMEM_RING_PTE_FLAG;
3992                         }
3993                 }
3994
3995                 bp->rx_ring = kcalloc(bp->rx_nr_rings,
3996                                       sizeof(struct bnxt_rx_ring_info),
3997                                       GFP_KERNEL);
3998                 if (!bp->rx_ring)
3999                         return -ENOMEM;
4000
4001                 for (i = 0; i < bp->rx_nr_rings; i++) {
4002                         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
4003
4004                         if (bp->flags & BNXT_FLAG_CHIP_P5) {
4005                                 rxr->rx_ring_struct.ring_mem.flags =
4006                                         BNXT_RMEM_RING_PTE_FLAG;
4007                                 rxr->rx_agg_ring_struct.ring_mem.flags =
4008                                         BNXT_RMEM_RING_PTE_FLAG;
4009                         }
4010                         rxr->bnapi = bp->bnapi[i];
4011                         bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
4012                 }
4013
4014                 bp->tx_ring = kcalloc(bp->tx_nr_rings,
4015                                       sizeof(struct bnxt_tx_ring_info),
4016                                       GFP_KERNEL);
4017                 if (!bp->tx_ring)
4018                         return -ENOMEM;
4019
4020                 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
4021                                           GFP_KERNEL);
4022
4023                 if (!bp->tx_ring_map)
4024                         return -ENOMEM;
4025
4026                 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
4027                         j = 0;
4028                 else
4029                         j = bp->rx_nr_rings;
4030
4031                 for (i = 0; i < bp->tx_nr_rings; i++, j++) {
4032                         struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4033
4034                         if (bp->flags & BNXT_FLAG_CHIP_P5)
4035                                 txr->tx_ring_struct.ring_mem.flags =
4036                                         BNXT_RMEM_RING_PTE_FLAG;
4037                         txr->bnapi = bp->bnapi[j];
4038                         bp->bnapi[j]->tx_ring = txr;
4039                         bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
4040                         if (i >= bp->tx_nr_rings_xdp) {
4041                                 txr->txq_index = i - bp->tx_nr_rings_xdp;
4042                                 bp->bnapi[j]->tx_int = bnxt_tx_int;
4043                         } else {
4044                                 bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP;
4045                                 bp->bnapi[j]->tx_int = bnxt_tx_int_xdp;
4046                         }
4047                 }
4048
4049                 rc = bnxt_alloc_stats(bp);
4050                 if (rc)
4051                         goto alloc_mem_err;
4052
4053                 rc = bnxt_alloc_ntp_fltrs(bp);
4054                 if (rc)
4055                         goto alloc_mem_err;
4056
4057                 rc = bnxt_alloc_vnics(bp);
4058                 if (rc)
4059                         goto alloc_mem_err;
4060         }
4061
4062         bnxt_init_ring_struct(bp);
4063
4064         rc = bnxt_alloc_rx_rings(bp);
4065         if (rc)
4066                 goto alloc_mem_err;
4067
4068         rc = bnxt_alloc_tx_rings(bp);
4069         if (rc)
4070                 goto alloc_mem_err;
4071
4072         rc = bnxt_alloc_cp_rings(bp);
4073         if (rc)
4074                 goto alloc_mem_err;
4075
4076         bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
4077                                   BNXT_VNIC_UCAST_FLAG;
4078         rc = bnxt_alloc_vnic_attributes(bp);
4079         if (rc)
4080                 goto alloc_mem_err;
4081         return 0;
4082
4083 alloc_mem_err:
4084         bnxt_free_mem(bp, true);
4085         return rc;
4086 }
4087
4088 static void bnxt_disable_int(struct bnxt *bp)
4089 {
4090         int i;
4091
4092         if (!bp->bnapi)
4093                 return;
4094
4095         for (i = 0; i < bp->cp_nr_rings; i++) {
4096                 struct bnxt_napi *bnapi = bp->bnapi[i];
4097                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4098                 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
4099
4100                 if (ring->fw_ring_id != INVALID_HW_RING_ID)
4101                         bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
4102         }
4103 }
4104
4105 static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n)
4106 {
4107         struct bnxt_napi *bnapi = bp->bnapi[n];
4108         struct bnxt_cp_ring_info *cpr;
4109
4110         cpr = &bnapi->cp_ring;
4111         return cpr->cp_ring_struct.map_idx;
4112 }
4113
4114 static void bnxt_disable_int_sync(struct bnxt *bp)
4115 {
4116         int i;
4117
4118         atomic_inc(&bp->intr_sem);
4119
4120         bnxt_disable_int(bp);
4121         for (i = 0; i < bp->cp_nr_rings; i++) {
4122                 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
4123
4124                 synchronize_irq(bp->irq_tbl[map_idx].vector);
4125         }
4126 }
4127
4128 static void bnxt_enable_int(struct bnxt *bp)
4129 {
4130         int i;
4131
4132         atomic_set(&bp->intr_sem, 0);
4133         for (i = 0; i < bp->cp_nr_rings; i++) {
4134                 struct bnxt_napi *bnapi = bp->bnapi[i];
4135                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4136
4137                 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
4138         }
4139 }
4140
4141 void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
4142                             u16 cmpl_ring, u16 target_id)
4143 {
4144         struct input *req = request;
4145
4146         req->req_type = cpu_to_le16(req_type);
4147         req->cmpl_ring = cpu_to_le16(cmpl_ring);
4148         req->target_id = cpu_to_le16(target_id);
4149         if (bnxt_kong_hwrm_message(bp, req))
4150                 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr);
4151         else
4152                 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
4153 }
4154
4155 static int bnxt_hwrm_to_stderr(u32 hwrm_err)
4156 {
4157         switch (hwrm_err) {
4158         case HWRM_ERR_CODE_SUCCESS:
4159                 return 0;
4160         case HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED:
4161                 return -EACCES;
4162         case HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR:
4163                 return -ENOSPC;
4164         case HWRM_ERR_CODE_INVALID_PARAMS:
4165         case HWRM_ERR_CODE_INVALID_FLAGS:
4166         case HWRM_ERR_CODE_INVALID_ENABLES:
4167         case HWRM_ERR_CODE_UNSUPPORTED_TLV:
4168         case HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR:
4169                 return -EINVAL;
4170         case HWRM_ERR_CODE_NO_BUFFER:
4171                 return -ENOMEM;
4172         case HWRM_ERR_CODE_HOT_RESET_PROGRESS:
4173                 return -EAGAIN;
4174         case HWRM_ERR_CODE_CMD_NOT_SUPPORTED:
4175                 return -EOPNOTSUPP;
4176         default:
4177                 return -EIO;
4178         }
4179 }
4180
4181 static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
4182                                  int timeout, bool silent)
4183 {
4184         int i, intr_process, rc, tmo_count;
4185         struct input *req = msg;
4186         u32 *data = msg;
4187         __le32 *resp_len;
4188         u8 *valid;
4189         u16 cp_ring_id, len = 0;
4190         struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
4191         u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN;
4192         struct hwrm_short_input short_input = {0};
4193         u32 doorbell_offset = BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER;
4194         u8 *resp_addr = (u8 *)bp->hwrm_cmd_resp_addr;
4195         u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM;
4196         u16 dst = BNXT_HWRM_CHNL_CHIMP;
4197
4198         if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
4199                 return -EBUSY;
4200
4201         if (msg_len > BNXT_HWRM_MAX_REQ_LEN) {
4202                 if (msg_len > bp->hwrm_max_ext_req_len ||
4203                     !bp->hwrm_short_cmd_req_addr)
4204                         return -EINVAL;
4205         }
4206
4207         if (bnxt_hwrm_kong_chnl(bp, req)) {
4208                 dst = BNXT_HWRM_CHNL_KONG;
4209                 bar_offset = BNXT_GRCPF_REG_KONG_COMM;
4210                 doorbell_offset = BNXT_GRCPF_REG_KONG_COMM_TRIGGER;
4211                 resp = bp->hwrm_cmd_kong_resp_addr;
4212                 resp_addr = (u8 *)bp->hwrm_cmd_kong_resp_addr;
4213         }
4214
4215         memset(resp, 0, PAGE_SIZE);
4216         cp_ring_id = le16_to_cpu(req->cmpl_ring);
4217         intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
4218
4219         req->seq_id = cpu_to_le16(bnxt_get_hwrm_seq_id(bp, dst));
4220         /* currently supports only one outstanding message */
4221         if (intr_process)
4222                 bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id);
4223
4224         if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
4225             msg_len > BNXT_HWRM_MAX_REQ_LEN) {
4226                 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
4227                 u16 max_msg_len;
4228
4229                 /* Set boundary for maximum extended request length for short
4230                  * cmd format. If passed up from device use the max supported
4231                  * internal req length.
4232                  */
4233                 max_msg_len = bp->hwrm_max_ext_req_len;
4234
4235                 memcpy(short_cmd_req, req, msg_len);
4236                 if (msg_len < max_msg_len)
4237                         memset(short_cmd_req + msg_len, 0,
4238                                max_msg_len - msg_len);
4239
4240                 short_input.req_type = req->req_type;
4241                 short_input.signature =
4242                                 cpu_to_le16(SHORT_REQ_SIGNATURE_SHORT_CMD);
4243                 short_input.size = cpu_to_le16(msg_len);
4244                 short_input.req_addr =
4245                         cpu_to_le64(bp->hwrm_short_cmd_req_dma_addr);
4246
4247                 data = (u32 *)&short_input;
4248                 msg_len = sizeof(short_input);
4249
4250                 /* Sync memory write before updating doorbell */
4251                 wmb();
4252
4253                 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
4254         }
4255
4256         /* Write request msg to hwrm channel */
4257         __iowrite32_copy(bp->bar0 + bar_offset, data, msg_len / 4);
4258
4259         for (i = msg_len; i < max_req_len; i += 4)
4260                 writel(0, bp->bar0 + bar_offset + i);
4261
4262         /* Ring channel doorbell */
4263         writel(1, bp->bar0 + doorbell_offset);
4264
4265         if (!pci_is_enabled(bp->pdev))
4266                 return 0;
4267
4268         if (!timeout)
4269                 timeout = DFLT_HWRM_CMD_TIMEOUT;
4270         /* convert timeout to usec */
4271         timeout *= 1000;
4272
4273         i = 0;
4274         /* Short timeout for the first few iterations:
4275          * number of loops = number of loops for short timeout +
4276          * number of loops for standard timeout.
4277          */
4278         tmo_count = HWRM_SHORT_TIMEOUT_COUNTER;
4279         timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER;
4280         tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT);
4281         resp_len = (__le32 *)(resp_addr + HWRM_RESP_LEN_OFFSET);
4282
4283         if (intr_process) {
4284                 u16 seq_id = bp->hwrm_intr_seq_id;
4285
4286                 /* Wait until hwrm response cmpl interrupt is processed */
4287                 while (bp->hwrm_intr_seq_id != (u16)~seq_id &&
4288                        i++ < tmo_count) {
4289                         /* Abort the wait for completion if the FW health
4290                          * check has failed.
4291                          */
4292                         if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
4293                                 return -EBUSY;
4294                         /* on first few passes, just barely sleep */
4295                         if (i < HWRM_SHORT_TIMEOUT_COUNTER)
4296                                 usleep_range(HWRM_SHORT_MIN_TIMEOUT,
4297                                              HWRM_SHORT_MAX_TIMEOUT);
4298                         else
4299                                 usleep_range(HWRM_MIN_TIMEOUT,
4300                                              HWRM_MAX_TIMEOUT);
4301                 }
4302
4303                 if (bp->hwrm_intr_seq_id != (u16)~seq_id) {
4304                         if (!silent)
4305                                 netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
4306                                            le16_to_cpu(req->req_type));
4307                         return -EBUSY;
4308                 }
4309                 len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
4310                       HWRM_RESP_LEN_SFT;
4311                 valid = resp_addr + len - 1;
4312         } else {
4313                 int j;
4314
4315                 /* Check if response len is updated */
4316                 for (i = 0; i < tmo_count; i++) {
4317                         /* Abort the wait for completion if the FW health
4318                          * check has failed.
4319                          */
4320                         if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
4321                                 return -EBUSY;
4322                         len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
4323                               HWRM_RESP_LEN_SFT;
4324                         if (len)
4325                                 break;
4326                         /* on first few passes, just barely sleep */
4327                         if (i < HWRM_SHORT_TIMEOUT_COUNTER)
4328                                 usleep_range(HWRM_SHORT_MIN_TIMEOUT,
4329                                              HWRM_SHORT_MAX_TIMEOUT);
4330                         else
4331                                 usleep_range(HWRM_MIN_TIMEOUT,
4332                                              HWRM_MAX_TIMEOUT);
4333                 }
4334
4335                 if (i >= tmo_count) {
4336                         if (!silent)
4337                                 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
4338                                            HWRM_TOTAL_TIMEOUT(i),
4339                                            le16_to_cpu(req->req_type),
4340                                            le16_to_cpu(req->seq_id), len);
4341                         return -EBUSY;
4342                 }
4343
4344                 /* Last byte of resp contains valid bit */
4345                 valid = resp_addr + len - 1;
4346                 for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) {
4347                         /* make sure we read from updated DMA memory */
4348                         dma_rmb();
4349                         if (*valid)
4350                                 break;
4351                         usleep_range(1, 5);
4352                 }
4353
4354                 if (j >= HWRM_VALID_BIT_DELAY_USEC) {
4355                         if (!silent)
4356                                 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
4357                                            HWRM_TOTAL_TIMEOUT(i),
4358                                            le16_to_cpu(req->req_type),
4359                                            le16_to_cpu(req->seq_id), len,
4360                                            *valid);
4361                         return -EBUSY;
4362                 }
4363         }
4364
4365         /* Zero valid bit for compatibility.  Valid bit in an older spec
4366          * may become a new field in a newer spec.  We must make sure that
4367          * a new field not implemented by old spec will read zero.
4368          */
4369         *valid = 0;
4370         rc = le16_to_cpu(resp->error_code);
4371         if (rc && !silent)
4372                 netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
4373                            le16_to_cpu(resp->req_type),
4374                            le16_to_cpu(resp->seq_id), rc);
4375         return bnxt_hwrm_to_stderr(rc);
4376 }
4377
4378 int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
4379 {
4380         return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false);
4381 }
4382
4383 int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
4384                               int timeout)
4385 {
4386         return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
4387 }
4388
4389 int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
4390 {
4391         int rc;
4392
4393         mutex_lock(&bp->hwrm_cmd_lock);
4394         rc = _hwrm_send_message(bp, msg, msg_len, timeout);
4395         mutex_unlock(&bp->hwrm_cmd_lock);
4396         return rc;
4397 }
4398
4399 int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
4400                              int timeout)
4401 {
4402         int rc;
4403
4404         mutex_lock(&bp->hwrm_cmd_lock);
4405         rc = bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
4406         mutex_unlock(&bp->hwrm_cmd_lock);
4407         return rc;
4408 }
4409
4410 int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
4411                             bool async_only)
4412 {
4413         struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
4414         struct hwrm_func_drv_rgtr_input req = {0};
4415         DECLARE_BITMAP(async_events_bmap, 256);
4416         u32 *events = (u32 *)async_events_bmap;
4417         u32 flags;
4418         int rc, i;
4419
4420         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
4421
4422         req.enables =
4423                 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
4424                             FUNC_DRV_RGTR_REQ_ENABLES_VER |
4425                             FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
4426
4427         req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
4428         flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE;
4429         if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
4430                 flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT;
4431         if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
4432                 flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT |
4433                          FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT;
4434         req.flags = cpu_to_le32(flags);
4435         req.ver_maj_8b = DRV_VER_MAJ;
4436         req.ver_min_8b = DRV_VER_MIN;
4437         req.ver_upd_8b = DRV_VER_UPD;
4438         req.ver_maj = cpu_to_le16(DRV_VER_MAJ);
4439         req.ver_min = cpu_to_le16(DRV_VER_MIN);
4440         req.ver_upd = cpu_to_le16(DRV_VER_UPD);
4441
4442         if (BNXT_PF(bp)) {
4443                 u32 data[8];
4444                 int i;
4445
4446                 memset(data, 0, sizeof(data));
4447                 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) {
4448                         u16 cmd = bnxt_vf_req_snif[i];
4449                         unsigned int bit, idx;
4450
4451                         idx = cmd / 32;
4452                         bit = cmd % 32;
4453                         data[idx] |= 1 << bit;
4454                 }
4455
4456                 for (i = 0; i < 8; i++)
4457                         req.vf_req_fwd[i] = cpu_to_le32(data[i]);
4458
4459                 req.enables |=
4460                         cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
4461         }
4462
4463         if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
4464                 req.flags |= cpu_to_le32(
4465                         FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE);
4466
4467         memset(async_events_bmap, 0, sizeof(async_events_bmap));
4468         for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) {
4469                 u16 event_id = bnxt_async_events_arr[i];
4470
4471                 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY &&
4472                     !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
4473                         continue;
4474                 __set_bit(bnxt_async_events_arr[i], async_events_bmap);
4475         }
4476         if (bmap && bmap_size) {
4477                 for (i = 0; i < bmap_size; i++) {
4478                         if (test_bit(i, bmap))
4479                                 __set_bit(i, async_events_bmap);
4480                 }
4481         }
4482         for (i = 0; i < 8; i++)
4483                 req.async_event_fwd[i] |= cpu_to_le32(events[i]);
4484
4485         if (async_only)
4486                 req.enables =
4487                         cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
4488
4489         mutex_lock(&bp->hwrm_cmd_lock);
4490         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4491         if (!rc) {
4492                 set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state);
4493                 if (resp->flags &
4494                     cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
4495                         bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
4496         }
4497         mutex_unlock(&bp->hwrm_cmd_lock);
4498         return rc;
4499 }
4500
4501 static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
4502 {
4503         struct hwrm_func_drv_unrgtr_input req = {0};
4504
4505         if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state))
4506                 return 0;
4507
4508         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1);
4509         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4510 }
4511
4512 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
4513 {
4514         u32 rc = 0;
4515         struct hwrm_tunnel_dst_port_free_input req = {0};
4516
4517         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1);
4518         req.tunnel_type = tunnel_type;
4519
4520         switch (tunnel_type) {
4521         case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
4522                 req.tunnel_dst_port_id = bp->vxlan_fw_dst_port_id;
4523                 break;
4524         case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
4525                 req.tunnel_dst_port_id = bp->nge_fw_dst_port_id;
4526                 break;
4527         default:
4528                 break;
4529         }
4530
4531         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4532         if (rc)
4533                 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
4534                            rc);
4535         return rc;
4536 }
4537
4538 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
4539                                            u8 tunnel_type)
4540 {
4541         u32 rc = 0;
4542         struct hwrm_tunnel_dst_port_alloc_input req = {0};
4543         struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4544
4545         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1);
4546
4547         req.tunnel_type = tunnel_type;
4548         req.tunnel_dst_port_val = port;
4549
4550         mutex_lock(&bp->hwrm_cmd_lock);
4551         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4552         if (rc) {
4553                 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
4554                            rc);
4555                 goto err_out;
4556         }
4557
4558         switch (tunnel_type) {
4559         case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
4560                 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
4561                 break;
4562         case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
4563                 bp->nge_fw_dst_port_id = resp->tunnel_dst_port_id;
4564                 break;
4565         default:
4566                 break;
4567         }
4568
4569 err_out:
4570         mutex_unlock(&bp->hwrm_cmd_lock);
4571         return rc;
4572 }
4573
4574 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
4575 {
4576         struct hwrm_cfa_l2_set_rx_mask_input req = {0};
4577         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4578
4579         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1);
4580         req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
4581
4582         req.num_mc_entries = cpu_to_le32(vnic->mc_list_count);
4583         req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
4584         req.mask = cpu_to_le32(vnic->rx_mask);
4585         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4586 }
4587
4588 #ifdef CONFIG_RFS_ACCEL
4589 static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
4590                                             struct bnxt_ntuple_filter *fltr)
4591 {
4592         struct hwrm_cfa_ntuple_filter_free_input req = {0};
4593
4594         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1);
4595         req.ntuple_filter_id = fltr->filter_id;
4596         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4597 }
4598
4599 #define BNXT_NTP_FLTR_FLAGS                                     \
4600         (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID |     \
4601          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE |        \
4602          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR |      \
4603          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE |      \
4604          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR |       \
4605          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK |  \
4606          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR |       \
4607          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK |  \
4608          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL |      \
4609          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT |         \
4610          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK |    \
4611          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT |         \
4612          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK |    \
4613          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
4614
4615 #define BNXT_NTP_TUNNEL_FLTR_FLAG                               \
4616                 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
4617
4618 static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
4619                                              struct bnxt_ntuple_filter *fltr)
4620 {
4621         struct hwrm_cfa_ntuple_filter_alloc_input req = {0};
4622         struct hwrm_cfa_ntuple_filter_alloc_output *resp;
4623         struct flow_keys *keys = &fltr->fkeys;
4624         struct bnxt_vnic_info *vnic;
4625         u32 flags = 0;
4626         int rc = 0;
4627
4628         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1);
4629         req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
4630
4631         if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
4632                 flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX;
4633                 req.dst_id = cpu_to_le16(fltr->rxq);
4634         } else {
4635                 vnic = &bp->vnic_info[fltr->rxq + 1];
4636                 req.dst_id = cpu_to_le16(vnic->fw_vnic_id);
4637         }
4638         req.flags = cpu_to_le32(flags);
4639         req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
4640
4641         req.ethertype = htons(ETH_P_IP);
4642         memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN);
4643         req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
4644         req.ip_protocol = keys->basic.ip_proto;
4645
4646         if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
4647                 int i;
4648
4649                 req.ethertype = htons(ETH_P_IPV6);
4650                 req.ip_addr_type =
4651                         CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
4652                 *(struct in6_addr *)&req.src_ipaddr[0] =
4653                         keys->addrs.v6addrs.src;
4654                 *(struct in6_addr *)&req.dst_ipaddr[0] =
4655                         keys->addrs.v6addrs.dst;
4656                 for (i = 0; i < 4; i++) {
4657                         req.src_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
4658                         req.dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
4659                 }
4660         } else {
4661                 req.src_ipaddr[0] = keys->addrs.v4addrs.src;
4662                 req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
4663                 req.dst_ipaddr[0] = keys->addrs.v4addrs.dst;
4664                 req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
4665         }
4666         if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
4667                 req.enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
4668                 req.tunnel_type =
4669                         CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
4670         }
4671
4672         req.src_port = keys->ports.src;
4673         req.src_port_mask = cpu_to_be16(0xffff);
4674         req.dst_port = keys->ports.dst;
4675         req.dst_port_mask = cpu_to_be16(0xffff);
4676
4677         mutex_lock(&bp->hwrm_cmd_lock);
4678         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4679         if (!rc) {
4680                 resp = bnxt_get_hwrm_resp_addr(bp, &req);
4681                 fltr->filter_id = resp->ntuple_filter_id;
4682         }
4683         mutex_unlock(&bp->hwrm_cmd_lock);
4684         return rc;
4685 }
4686 #endif
4687
4688 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
4689                                      u8 *mac_addr)
4690 {
4691         u32 rc = 0;
4692         struct hwrm_cfa_l2_filter_alloc_input req = {0};
4693         struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4694
4695         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1);
4696         req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
4697         if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
4698                 req.flags |=
4699                         cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
4700         req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
4701         req.enables =
4702                 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
4703                             CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
4704                             CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
4705         memcpy(req.l2_addr, mac_addr, ETH_ALEN);
4706         req.l2_addr_mask[0] = 0xff;
4707         req.l2_addr_mask[1] = 0xff;
4708         req.l2_addr_mask[2] = 0xff;
4709         req.l2_addr_mask[3] = 0xff;
4710         req.l2_addr_mask[4] = 0xff;
4711         req.l2_addr_mask[5] = 0xff;
4712
4713         mutex_lock(&bp->hwrm_cmd_lock);
4714         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4715         if (!rc)
4716                 bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
4717                                                         resp->l2_filter_id;
4718         mutex_unlock(&bp->hwrm_cmd_lock);
4719         return rc;
4720 }
4721
4722 static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
4723 {
4724         u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
4725         int rc = 0;
4726
4727         /* Any associated ntuple filters will also be cleared by firmware. */
4728         mutex_lock(&bp->hwrm_cmd_lock);
4729         for (i = 0; i < num_of_vnics; i++) {
4730                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
4731
4732                 for (j = 0; j < vnic->uc_filter_count; j++) {
4733                         struct hwrm_cfa_l2_filter_free_input req = {0};
4734
4735                         bnxt_hwrm_cmd_hdr_init(bp, &req,
4736                                                HWRM_CFA_L2_FILTER_FREE, -1, -1);
4737
4738                         req.l2_filter_id = vnic->fw_l2_filter_id[j];
4739
4740                         rc = _hwrm_send_message(bp, &req, sizeof(req),
4741                                                 HWRM_CMD_TIMEOUT);
4742                 }
4743                 vnic->uc_filter_count = 0;
4744         }
4745         mutex_unlock(&bp->hwrm_cmd_lock);
4746
4747         return rc;
4748 }
4749
4750 static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
4751 {
4752         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4753         u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX;
4754         struct hwrm_vnic_tpa_cfg_input req = {0};
4755
4756         if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
4757                 return 0;
4758
4759         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1);
4760
4761         if (tpa_flags) {
4762                 u16 mss = bp->dev->mtu - 40;
4763                 u32 nsegs, n, segs = 0, flags;
4764
4765                 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
4766                         VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
4767                         VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
4768                         VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
4769                         VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
4770                 if (tpa_flags & BNXT_FLAG_GRO)
4771                         flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
4772
4773                 req.flags = cpu_to_le32(flags);
4774
4775                 req.enables =
4776                         cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
4777                                     VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
4778                                     VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
4779
4780                 /* Number of segs are log2 units, and first packet is not
4781                  * included as part of this units.
4782                  */
4783                 if (mss <= BNXT_RX_PAGE_SIZE) {
4784                         n = BNXT_RX_PAGE_SIZE / mss;
4785                         nsegs = (MAX_SKB_FRAGS - 1) * n;
4786                 } else {
4787                         n = mss / BNXT_RX_PAGE_SIZE;
4788                         if (mss & (BNXT_RX_PAGE_SIZE - 1))
4789                                 n++;
4790                         nsegs = (MAX_SKB_FRAGS - n) / n;
4791                 }
4792
4793                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4794                         segs = MAX_TPA_SEGS_P5;
4795                         max_aggs = bp->max_tpa;
4796                 } else {
4797                         segs = ilog2(nsegs);
4798                 }
4799                 req.max_agg_segs = cpu_to_le16(segs);
4800                 req.max_aggs = cpu_to_le16(max_aggs);
4801
4802                 req.min_agg_len = cpu_to_le32(512);
4803         }
4804         req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
4805
4806         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4807 }
4808
4809 static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)
4810 {
4811         struct bnxt_ring_grp_info *grp_info;
4812
4813         grp_info = &bp->grp_info[ring->grp_idx];
4814         return grp_info->cp_fw_ring_id;
4815 }
4816
4817 static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
4818 {
4819         if (bp->flags & BNXT_FLAG_CHIP_P5) {
4820                 struct bnxt_napi *bnapi = rxr->bnapi;
4821                 struct bnxt_cp_ring_info *cpr;
4822
4823                 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_RX_HDL];
4824                 return cpr->cp_ring_struct.fw_ring_id;
4825         } else {
4826                 return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct);
4827         }
4828 }
4829
4830 static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
4831 {
4832         if (bp->flags & BNXT_FLAG_CHIP_P5) {
4833                 struct bnxt_napi *bnapi = txr->bnapi;
4834                 struct bnxt_cp_ring_info *cpr;
4835
4836                 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_TX_HDL];
4837                 return cpr->cp_ring_struct.fw_ring_id;
4838         } else {
4839                 return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
4840         }
4841 }
4842
4843 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
4844 {
4845         u32 i, j, max_rings;
4846         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4847         struct hwrm_vnic_rss_cfg_input req = {0};
4848
4849         if ((bp->flags & BNXT_FLAG_CHIP_P5) ||
4850             vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
4851                 return 0;
4852
4853         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
4854         if (set_rss) {
4855                 req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
4856                 req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
4857                 if (vnic->flags & BNXT_VNIC_RSS_FLAG) {
4858                         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
4859                                 max_rings = bp->rx_nr_rings - 1;
4860                         else
4861                                 max_rings = bp->rx_nr_rings;
4862                 } else {
4863                         max_rings = 1;
4864                 }
4865
4866                 /* Fill the RSS indirection table with ring group ids */
4867                 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++, j++) {
4868                         if (j == max_rings)
4869                                 j = 0;
4870                         vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
4871                 }
4872
4873                 req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
4874                 req.hash_key_tbl_addr =
4875                         cpu_to_le64(vnic->rss_hash_key_dma_addr);
4876         }
4877         req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
4878         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4879 }
4880
4881 static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss)
4882 {
4883         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4884         u32 i, j, k, nr_ctxs, max_rings = bp->rx_nr_rings;
4885         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
4886         struct hwrm_vnic_rss_cfg_input req = {0};
4887
4888         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
4889         req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
4890         if (!set_rss) {
4891                 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4892                 return 0;
4893         }
4894         req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
4895         req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
4896         req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
4897         req.hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
4898         nr_ctxs = DIV_ROUND_UP(bp->rx_nr_rings, 64);
4899         for (i = 0, k = 0; i < nr_ctxs; i++) {
4900                 __le16 *ring_tbl = vnic->rss_table;
4901                 int rc;
4902
4903                 req.ring_table_pair_index = i;
4904                 req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
4905                 for (j = 0; j < 64; j++) {
4906                         u16 ring_id;
4907
4908                         ring_id = rxr->rx_ring_struct.fw_ring_id;
4909                         *ring_tbl++ = cpu_to_le16(ring_id);
4910                         ring_id = bnxt_cp_ring_for_rx(bp, rxr);
4911                         *ring_tbl++ = cpu_to_le16(ring_id);
4912                         rxr++;
4913                         k++;
4914                         if (k == max_rings) {
4915                                 k = 0;
4916                                 rxr = &bp->rx_ring[0];
4917                         }
4918                 }
4919                 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4920                 if (rc)
4921                         return rc;
4922         }
4923         return 0;
4924 }
4925
4926 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
4927 {
4928         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4929         struct hwrm_vnic_plcmodes_cfg_input req = {0};
4930
4931         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1);
4932         req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
4933                                 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
4934                                 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
4935         req.enables =
4936                 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID |
4937                             VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
4938         /* thresholds not implemented in firmware yet */
4939         req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
4940         req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
4941         req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
4942         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4943 }
4944
4945 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
4946                                         u16 ctx_idx)
4947 {
4948         struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0};
4949
4950         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1);
4951         req.rss_cos_lb_ctx_id =
4952                 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]);
4953
4954         hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4955         bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
4956 }
4957
4958 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
4959 {
4960         int i, j;
4961
4962         for (i = 0; i < bp->nr_vnics; i++) {
4963                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
4964
4965                 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
4966                         if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
4967                                 bnxt_hwrm_vnic_ctx_free_one(bp, i, j);
4968                 }
4969         }
4970         bp->rsscos_nr_ctxs = 0;
4971 }
4972
4973 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
4974 {
4975         int rc;
4976         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
4977         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
4978                                                 bp->hwrm_cmd_resp_addr;
4979
4980         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1,
4981                                -1);
4982
4983         mutex_lock(&bp->hwrm_cmd_lock);
4984         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4985         if (!rc)
4986                 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] =
4987                         le16_to_cpu(resp->rss_cos_lb_ctx_id);
4988         mutex_unlock(&bp->hwrm_cmd_lock);
4989
4990         return rc;
4991 }
4992
4993 static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
4994 {
4995         if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP)
4996                 return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE;
4997         return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE;
4998 }
4999
5000 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
5001 {
5002         unsigned int ring = 0, grp_idx;
5003         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5004         struct hwrm_vnic_cfg_input req = {0};
5005         u16 def_vlan = 0;
5006
5007         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1);
5008
5009         if (bp->flags & BNXT_FLAG_CHIP_P5) {
5010                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
5011
5012                 req.default_rx_ring_id =
5013                         cpu_to_le16(rxr->rx_ring_struct.fw_ring_id);
5014                 req.default_cmpl_ring_id =
5015                         cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr));
5016                 req.enables =
5017                         cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID |
5018                                     VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID);
5019                 goto vnic_mru;
5020         }
5021         req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
5022         /* Only RSS support for now TBD: COS & LB */
5023         if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
5024                 req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
5025                 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
5026                                            VNIC_CFG_REQ_ENABLES_MRU);
5027         } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
5028                 req.rss_rule =
5029                         cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]);
5030                 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
5031                                            VNIC_CFG_REQ_ENABLES_MRU);
5032                 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
5033         } else {
5034                 req.rss_rule = cpu_to_le16(0xffff);
5035         }
5036
5037         if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
5038             (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
5039                 req.cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
5040                 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
5041         } else {
5042                 req.cos_rule = cpu_to_le16(0xffff);
5043         }
5044
5045         if (vnic->flags & BNXT_VNIC_RSS_FLAG)
5046                 ring = 0;
5047         else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
5048                 ring = vnic_id - 1;
5049         else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
5050                 ring = bp->rx_nr_rings - 1;
5051
5052         grp_idx = bp->rx_ring[ring].bnapi->index;
5053         req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
5054         req.lb_rule = cpu_to_le16(0xffff);
5055 vnic_mru:
5056         req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN +
5057                               VLAN_HLEN);
5058
5059         req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
5060 #ifdef CONFIG_BNXT_SRIOV
5061         if (BNXT_VF(bp))
5062                 def_vlan = bp->vf.vlan;
5063 #endif
5064         if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
5065                 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
5066         if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
5067                 req.flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
5068
5069         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5070 }
5071
5072 static int bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
5073 {
5074         u32 rc = 0;
5075
5076         if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
5077                 struct hwrm_vnic_free_input req = {0};
5078
5079                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1);
5080                 req.vnic_id =
5081                         cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
5082
5083                 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5084                 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
5085         }
5086         return rc;
5087 }
5088
5089 static void bnxt_hwrm_vnic_free(struct bnxt *bp)
5090 {
5091         u16 i;
5092
5093         for (i = 0; i < bp->nr_vnics; i++)
5094                 bnxt_hwrm_vnic_free_one(bp, i);
5095 }
5096
5097 static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
5098                                 unsigned int start_rx_ring_idx,
5099                                 unsigned int nr_rings)
5100 {
5101         int rc = 0;
5102         unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
5103         struct hwrm_vnic_alloc_input req = {0};
5104         struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
5105         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5106
5107         if (bp->flags & BNXT_FLAG_CHIP_P5)
5108                 goto vnic_no_ring_grps;
5109
5110         /* map ring groups to this vnic */
5111         for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
5112                 grp_idx = bp->rx_ring[i].bnapi->index;
5113                 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
5114                         netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
5115                                    j, nr_rings);
5116                         break;
5117                 }
5118                 vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id;
5119         }
5120
5121 vnic_no_ring_grps:
5122         for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++)
5123                 vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
5124         if (vnic_id == 0)
5125                 req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
5126
5127         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1);
5128
5129         mutex_lock(&bp->hwrm_cmd_lock);
5130         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5131         if (!rc)
5132                 vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id);
5133         mutex_unlock(&bp->hwrm_cmd_lock);
5134         return rc;
5135 }
5136
5137 static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
5138 {
5139         struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
5140         struct hwrm_vnic_qcaps_input req = {0};
5141         int rc;
5142
5143         bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats);
5144         bp->flags &= ~(BNXT_FLAG_NEW_RSS_CAP | BNXT_FLAG_ROCE_MIRROR_CAP);
5145         if (bp->hwrm_spec_code < 0x10600)
5146                 return 0;
5147
5148         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_QCAPS, -1, -1);
5149         mutex_lock(&bp->hwrm_cmd_lock);
5150         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5151         if (!rc) {
5152                 u32 flags = le32_to_cpu(resp->flags);
5153
5154                 if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
5155                     (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
5156                         bp->flags |= BNXT_FLAG_NEW_RSS_CAP;
5157                 if (flags &
5158                     VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
5159                         bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
5160                 bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
5161                 if (bp->max_tpa_v2)
5162                         bp->hw_ring_stats_size =
5163                                 sizeof(struct ctx_hw_stats_ext);
5164         }
5165         mutex_unlock(&bp->hwrm_cmd_lock);
5166         return rc;
5167 }
5168
5169 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
5170 {
5171         u16 i;
5172         u32 rc = 0;
5173
5174         if (bp->flags & BNXT_FLAG_CHIP_P5)
5175                 return 0;
5176
5177         mutex_lock(&bp->hwrm_cmd_lock);
5178         for (i = 0; i < bp->rx_nr_rings; i++) {
5179                 struct hwrm_ring_grp_alloc_input req = {0};
5180                 struct hwrm_ring_grp_alloc_output *resp =
5181                                         bp->hwrm_cmd_resp_addr;
5182                 unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
5183
5184                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1);
5185
5186                 req.cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
5187                 req.rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
5188                 req.ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
5189                 req.sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
5190
5191                 rc = _hwrm_send_message(bp, &req, sizeof(req),
5192                                         HWRM_CMD_TIMEOUT);
5193                 if (rc)
5194                         break;
5195
5196                 bp->grp_info[grp_idx].fw_grp_id =
5197                         le32_to_cpu(resp->ring_group_id);
5198         }
5199         mutex_unlock(&bp->hwrm_cmd_lock);
5200         return rc;
5201 }
5202
5203 static int bnxt_hwrm_ring_grp_free(struct bnxt *bp)
5204 {
5205         u16 i;
5206         u32 rc = 0;
5207         struct hwrm_ring_grp_free_input req = {0};
5208
5209         if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5))
5210                 return 0;
5211
5212         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1);
5213
5214         mutex_lock(&bp->hwrm_cmd_lock);
5215         for (i = 0; i < bp->cp_nr_rings; i++) {
5216                 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
5217                         continue;
5218                 req.ring_group_id =
5219                         cpu_to_le32(bp->grp_info[i].fw_grp_id);
5220
5221                 rc = _hwrm_send_message(bp, &req, sizeof(req),
5222                                         HWRM_CMD_TIMEOUT);
5223                 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
5224         }
5225         mutex_unlock(&bp->hwrm_cmd_lock);
5226         return rc;
5227 }
5228
5229 static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
5230                                     struct bnxt_ring_struct *ring,
5231                                     u32 ring_type, u32 map_index)
5232 {
5233         int rc = 0, err = 0;
5234         struct hwrm_ring_alloc_input req = {0};
5235         struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
5236         struct bnxt_ring_mem_info *rmem = &ring->ring_mem;
5237         struct bnxt_ring_grp_info *grp_info;
5238         u16 ring_id;
5239
5240         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1);
5241
5242         req.enables = 0;
5243         if (rmem->nr_pages > 1) {
5244                 req.page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map);
5245                 /* Page size is in log2 units */
5246                 req.page_size = BNXT_PAGE_SHIFT;
5247                 req.page_tbl_depth = 1;
5248         } else {
5249                 req.page_tbl_addr =  cpu_to_le64(rmem->dma_arr[0]);
5250         }
5251         req.fbo = 0;
5252         /* Association of ring index with doorbell index and MSIX number */
5253         req.logical_id = cpu_to_le16(map_index);
5254
5255         switch (ring_type) {
5256         case HWRM_RING_ALLOC_TX: {
5257                 struct bnxt_tx_ring_info *txr;
5258
5259                 txr = container_of(ring, struct bnxt_tx_ring_info,
5260                                    tx_ring_struct);
5261                 req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
5262                 /* Association of transmit ring with completion ring */
5263                 grp_info = &bp->grp_info[ring->grp_idx];
5264                 req.cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr));
5265                 req.length = cpu_to_le32(bp->tx_ring_mask + 1);
5266                 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5267                 req.queue_id = cpu_to_le16(ring->queue_id);
5268                 break;
5269         }
5270         case HWRM_RING_ALLOC_RX:
5271                 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
5272                 req.length = cpu_to_le32(bp->rx_ring_mask + 1);
5273                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5274                         u16 flags = 0;
5275
5276                         /* Association of rx ring with stats context */
5277                         grp_info = &bp->grp_info[ring->grp_idx];
5278                         req.rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
5279                         req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5280                         req.enables |= cpu_to_le32(
5281                                 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
5282                         if (NET_IP_ALIGN == 2)
5283                                 flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD;
5284                         req.flags = cpu_to_le16(flags);
5285                 }
5286                 break;
5287         case HWRM_RING_ALLOC_AGG:
5288                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5289                         req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
5290                         /* Association of agg ring with rx ring */
5291                         grp_info = &bp->grp_info[ring->grp_idx];
5292                         req.rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
5293                         req.rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
5294                         req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5295                         req.enables |= cpu_to_le32(
5296                                 RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID |
5297                                 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
5298                 } else {
5299                         req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
5300                 }
5301                 req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
5302                 break;
5303         case HWRM_RING_ALLOC_CMPL:
5304                 req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
5305                 req.length = cpu_to_le32(bp->cp_ring_mask + 1);
5306                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5307                         /* Association of cp ring with nq */
5308                         grp_info = &bp->grp_info[map_index];
5309                         req.nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
5310                         req.cq_handle = cpu_to_le64(ring->handle);
5311                         req.enables |= cpu_to_le32(
5312                                 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
5313                 } else if (bp->flags & BNXT_FLAG_USING_MSIX) {
5314                         req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
5315                 }
5316                 break;
5317         case HWRM_RING_ALLOC_NQ:
5318                 req.ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
5319                 req.length = cpu_to_le32(bp->cp_ring_mask + 1);
5320                 if (bp->flags & BNXT_FLAG_USING_MSIX)
5321                         req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
5322                 break;
5323         default:
5324                 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
5325                            ring_type);
5326                 return -1;
5327         }
5328
5329         mutex_lock(&bp->hwrm_cmd_lock);
5330         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5331         err = le16_to_cpu(resp->error_code);
5332         ring_id = le16_to_cpu(resp->ring_id);
5333         mutex_unlock(&bp->hwrm_cmd_lock);
5334
5335         if (rc || err) {
5336                 netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n",
5337                            ring_type, rc, err);
5338                 return -EIO;
5339         }
5340         ring->fw_ring_id = ring_id;
5341         return rc;
5342 }
5343
5344 static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
5345 {
5346         int rc;
5347
5348         if (BNXT_PF(bp)) {
5349                 struct hwrm_func_cfg_input req = {0};
5350
5351                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
5352                 req.fid = cpu_to_le16(0xffff);
5353                 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
5354                 req.async_event_cr = cpu_to_le16(idx);
5355                 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5356         } else {
5357                 struct hwrm_func_vf_cfg_input req = {0};
5358
5359                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
5360                 req.enables =
5361                         cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
5362                 req.async_event_cr = cpu_to_le16(idx);
5363                 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5364         }
5365         return rc;
5366 }
5367
5368 static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
5369                         u32 map_idx, u32 xid)
5370 {
5371         if (bp->flags & BNXT_FLAG_CHIP_P5) {
5372                 if (BNXT_PF(bp))
5373                         db->doorbell = bp->bar1 + 0x10000;
5374                 else
5375                         db->doorbell = bp->bar1 + 0x4000;
5376                 switch (ring_type) {
5377                 case HWRM_RING_ALLOC_TX:
5378                         db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
5379                         break;
5380                 case HWRM_RING_ALLOC_RX:
5381                 case HWRM_RING_ALLOC_AGG:
5382                         db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
5383                         break;
5384                 case HWRM_RING_ALLOC_CMPL:
5385                         db->db_key64 = DBR_PATH_L2;
5386                         break;
5387                 case HWRM_RING_ALLOC_NQ:
5388                         db->db_key64 = DBR_PATH_L2;
5389                         break;
5390                 }
5391                 db->db_key64 |= (u64)xid << DBR_XID_SFT;
5392         } else {
5393                 db->doorbell = bp->bar1 + map_idx * 0x80;
5394                 switch (ring_type) {
5395                 case HWRM_RING_ALLOC_TX:
5396                         db->db_key32 = DB_KEY_TX;
5397                         break;
5398                 case HWRM_RING_ALLOC_RX:
5399                 case HWRM_RING_ALLOC_AGG:
5400                         db->db_key32 = DB_KEY_RX;
5401                         break;
5402                 case HWRM_RING_ALLOC_CMPL:
5403                         db->db_key32 = DB_KEY_CP;
5404                         break;
5405                 }
5406         }
5407 }
5408
5409 static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
5410 {
5411         bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS);
5412         int i, rc = 0;
5413         u32 type;
5414
5415         if (bp->flags & BNXT_FLAG_CHIP_P5)
5416                 type = HWRM_RING_ALLOC_NQ;
5417         else
5418                 type = HWRM_RING_ALLOC_CMPL;
5419         for (i = 0; i < bp->cp_nr_rings; i++) {
5420                 struct bnxt_napi *bnapi = bp->bnapi[i];
5421                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5422                 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
5423                 u32 map_idx = ring->map_idx;
5424                 unsigned int vector;
5425
5426                 vector = bp->irq_tbl[map_idx].vector;
5427                 disable_irq_nosync(vector);
5428                 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5429                 if (rc) {
5430                         enable_irq(vector);
5431                         goto err_out;
5432                 }
5433                 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
5434                 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
5435                 enable_irq(vector);
5436                 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
5437
5438                 if (!i) {
5439                         rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
5440                         if (rc)
5441                                 netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
5442                 }
5443         }
5444
5445         type = HWRM_RING_ALLOC_TX;
5446         for (i = 0; i < bp->tx_nr_rings; i++) {
5447                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5448                 struct bnxt_ring_struct *ring;
5449                 u32 map_idx;
5450
5451                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5452                         struct bnxt_napi *bnapi = txr->bnapi;
5453                         struct bnxt_cp_ring_info *cpr, *cpr2;
5454                         u32 type2 = HWRM_RING_ALLOC_CMPL;
5455
5456                         cpr = &bnapi->cp_ring;
5457                         cpr2 = cpr->cp_ring_arr[BNXT_TX_HDL];
5458                         ring = &cpr2->cp_ring_struct;
5459                         ring->handle = BNXT_TX_HDL;
5460                         map_idx = bnapi->index;
5461                         rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5462                         if (rc)
5463                                 goto err_out;
5464                         bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5465                                     ring->fw_ring_id);
5466                         bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5467                 }
5468                 ring = &txr->tx_ring_struct;
5469                 map_idx = i;
5470                 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5471                 if (rc)
5472                         goto err_out;
5473                 bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id);
5474         }
5475
5476         type = HWRM_RING_ALLOC_RX;
5477         for (i = 0; i < bp->rx_nr_rings; i++) {
5478                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5479                 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
5480                 struct bnxt_napi *bnapi = rxr->bnapi;
5481                 u32 map_idx = bnapi->index;
5482
5483                 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5484                 if (rc)
5485                         goto err_out;
5486                 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
5487                 /* If we have agg rings, post agg buffers first. */
5488                 if (!agg_rings)
5489                         bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
5490                 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
5491                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5492                         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5493                         u32 type2 = HWRM_RING_ALLOC_CMPL;
5494                         struct bnxt_cp_ring_info *cpr2;
5495
5496                         cpr2 = cpr->cp_ring_arr[BNXT_RX_HDL];
5497                         ring = &cpr2->cp_ring_struct;
5498                         ring->handle = BNXT_RX_HDL;
5499                         rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5500                         if (rc)
5501                                 goto err_out;
5502                         bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5503                                     ring->fw_ring_id);
5504                         bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5505                 }
5506         }
5507
5508         if (agg_rings) {
5509                 type = HWRM_RING_ALLOC_AGG;
5510                 for (i = 0; i < bp->rx_nr_rings; i++) {
5511                         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5512                         struct bnxt_ring_struct *ring =
5513                                                 &rxr->rx_agg_ring_struct;
5514                         u32 grp_idx = ring->grp_idx;
5515                         u32 map_idx = grp_idx + bp->rx_nr_rings;
5516
5517                         rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5518                         if (rc)
5519                                 goto err_out;
5520
5521                         bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
5522                                     ring->fw_ring_id);
5523                         bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
5524                         bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
5525                         bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
5526                 }
5527         }
5528 err_out:
5529         return rc;
5530 }
5531
5532 static int hwrm_ring_free_send_msg(struct bnxt *bp,
5533                                    struct bnxt_ring_struct *ring,
5534                                    u32 ring_type, int cmpl_ring_id)
5535 {
5536         int rc;
5537         struct hwrm_ring_free_input req = {0};
5538         struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
5539         u16 error_code;
5540
5541         if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
5542                 return 0;
5543
5544         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1);
5545         req.ring_type = ring_type;
5546         req.ring_id = cpu_to_le16(ring->fw_ring_id);
5547
5548         mutex_lock(&bp->hwrm_cmd_lock);
5549         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5550         error_code = le16_to_cpu(resp->error_code);
5551         mutex_unlock(&bp->hwrm_cmd_lock);
5552
5553         if (rc || error_code) {
5554                 netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n",
5555                            ring_type, rc, error_code);
5556                 return -EIO;
5557         }
5558         return 0;
5559 }
5560
5561 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
5562 {
5563         u32 type;
5564         int i;
5565
5566         if (!bp->bnapi)
5567                 return;
5568
5569         for (i = 0; i < bp->tx_nr_rings; i++) {
5570                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5571                 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
5572
5573                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5574                         u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
5575
5576                         hwrm_ring_free_send_msg(bp, ring,
5577                                                 RING_FREE_REQ_RING_TYPE_TX,
5578                                                 close_path ? cmpl_ring_id :
5579                                                 INVALID_HW_RING_ID);
5580                         ring->fw_ring_id = INVALID_HW_RING_ID;
5581                 }
5582         }
5583
5584         for (i = 0; i < bp->rx_nr_rings; i++) {
5585                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5586                 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
5587                 u32 grp_idx = rxr->bnapi->index;
5588
5589                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5590                         u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5591
5592                         hwrm_ring_free_send_msg(bp, ring,
5593                                                 RING_FREE_REQ_RING_TYPE_RX,
5594                                                 close_path ? cmpl_ring_id :
5595                                                 INVALID_HW_RING_ID);
5596                         ring->fw_ring_id = INVALID_HW_RING_ID;
5597                         bp->grp_info[grp_idx].rx_fw_ring_id =
5598                                 INVALID_HW_RING_ID;
5599                 }
5600         }
5601
5602         if (bp->flags & BNXT_FLAG_CHIP_P5)
5603                 type = RING_FREE_REQ_RING_TYPE_RX_AGG;
5604         else
5605                 type = RING_FREE_REQ_RING_TYPE_RX;
5606         for (i = 0; i < bp->rx_nr_rings; i++) {
5607                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5608                 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
5609                 u32 grp_idx = rxr->bnapi->index;
5610
5611                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5612                         u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5613
5614                         hwrm_ring_free_send_msg(bp, ring, type,
5615                                                 close_path ? cmpl_ring_id :
5616                                                 INVALID_HW_RING_ID);
5617                         ring->fw_ring_id = INVALID_HW_RING_ID;
5618                         bp->grp_info[grp_idx].agg_fw_ring_id =
5619                                 INVALID_HW_RING_ID;
5620                 }
5621         }
5622
5623         /* The completion rings are about to be freed.  After that the
5624          * IRQ doorbell will not work anymore.  So we need to disable
5625          * IRQ here.
5626          */
5627         bnxt_disable_int_sync(bp);
5628
5629         if (bp->flags & BNXT_FLAG_CHIP_P5)
5630                 type = RING_FREE_REQ_RING_TYPE_NQ;
5631         else
5632                 type = RING_FREE_REQ_RING_TYPE_L2_CMPL;
5633         for (i = 0; i < bp->cp_nr_rings; i++) {
5634                 struct bnxt_napi *bnapi = bp->bnapi[i];
5635                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5636                 struct bnxt_ring_struct *ring;
5637                 int j;
5638
5639                 for (j = 0; j < 2; j++) {
5640                         struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
5641
5642                         if (cpr2) {
5643                                 ring = &cpr2->cp_ring_struct;
5644                                 if (ring->fw_ring_id == INVALID_HW_RING_ID)
5645                                         continue;
5646                                 hwrm_ring_free_send_msg(bp, ring,
5647                                         RING_FREE_REQ_RING_TYPE_L2_CMPL,
5648                                         INVALID_HW_RING_ID);
5649                                 ring->fw_ring_id = INVALID_HW_RING_ID;
5650                         }
5651                 }
5652                 ring = &cpr->cp_ring_struct;
5653                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5654                         hwrm_ring_free_send_msg(bp, ring, type,
5655                                                 INVALID_HW_RING_ID);
5656                         ring->fw_ring_id = INVALID_HW_RING_ID;
5657                         bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
5658                 }
5659         }
5660 }
5661
5662 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
5663                            bool shared);
5664
5665 static int bnxt_hwrm_get_rings(struct bnxt *bp)
5666 {
5667         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
5668         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
5669         struct hwrm_func_qcfg_input req = {0};
5670         int rc;
5671
5672         if (bp->hwrm_spec_code < 0x10601)
5673                 return 0;
5674
5675         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
5676         req.fid = cpu_to_le16(0xffff);
5677         mutex_lock(&bp->hwrm_cmd_lock);
5678         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5679         if (rc) {
5680                 mutex_unlock(&bp->hwrm_cmd_lock);
5681                 return rc;
5682         }
5683
5684         hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
5685         if (BNXT_NEW_RM(bp)) {
5686                 u16 cp, stats;
5687
5688                 hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
5689                 hw_resc->resv_hw_ring_grps =
5690                         le32_to_cpu(resp->alloc_hw_ring_grps);
5691                 hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
5692                 cp = le16_to_cpu(resp->alloc_cmpl_rings);
5693                 stats = le16_to_cpu(resp->alloc_stat_ctx);
5694                 hw_resc->resv_irqs = cp;
5695                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5696                         int rx = hw_resc->resv_rx_rings;
5697                         int tx = hw_resc->resv_tx_rings;
5698
5699                         if (bp->flags & BNXT_FLAG_AGG_RINGS)
5700                                 rx >>= 1;
5701                         if (cp < (rx + tx)) {
5702                                 bnxt_trim_rings(bp, &rx, &tx, cp, false);
5703                                 if (bp->flags & BNXT_FLAG_AGG_RINGS)
5704                                         rx <<= 1;
5705                                 hw_resc->resv_rx_rings = rx;
5706                                 hw_resc->resv_tx_rings = tx;
5707                         }
5708                         hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix);
5709                         hw_resc->resv_hw_ring_grps = rx;
5710                 }
5711                 hw_resc->resv_cp_rings = cp;
5712                 hw_resc->resv_stat_ctxs = stats;
5713         }
5714         mutex_unlock(&bp->hwrm_cmd_lock);
5715         return 0;
5716 }
5717
5718 /* Caller must hold bp->hwrm_cmd_lock */
5719 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
5720 {
5721         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
5722         struct hwrm_func_qcfg_input req = {0};
5723         int rc;
5724
5725         if (bp->hwrm_spec_code < 0x10601)
5726                 return 0;
5727
5728         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
5729         req.fid = cpu_to_le16(fid);
5730         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5731         if (!rc)
5732                 *tx_rings = le16_to_cpu(resp->alloc_tx_rings);
5733
5734         return rc;
5735 }
5736
5737 static bool bnxt_rfs_supported(struct bnxt *bp);
5738
5739 static void
5740 __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,
5741                              int tx_rings, int rx_rings, int ring_grps,
5742                              int cp_rings, int stats, int vnics)
5743 {
5744         u32 enables = 0;
5745
5746         bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_CFG, -1, -1);
5747         req->fid = cpu_to_le16(0xffff);
5748         enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
5749         req->num_tx_rings = cpu_to_le16(tx_rings);
5750         if (BNXT_NEW_RM(bp)) {
5751                 enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
5752                 enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
5753                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5754                         enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
5755                         enables |= tx_rings + ring_grps ?
5756                                    FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
5757                         enables |= rx_rings ?
5758                                 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
5759                 } else {
5760                         enables |= cp_rings ?
5761                                    FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
5762                         enables |= ring_grps ?
5763                                    FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
5764                                    FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
5765                 }
5766                 enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
5767
5768                 req->num_rx_rings = cpu_to_le16(rx_rings);
5769                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5770                         req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
5771                         req->num_msix = cpu_to_le16(cp_rings);
5772                         req->num_rsscos_ctxs =
5773                                 cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
5774                 } else {
5775                         req->num_cmpl_rings = cpu_to_le16(cp_rings);
5776                         req->num_hw_ring_grps = cpu_to_le16(ring_grps);
5777                         req->num_rsscos_ctxs = cpu_to_le16(1);
5778                         if (!(bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
5779                             bnxt_rfs_supported(bp))
5780                                 req->num_rsscos_ctxs =
5781                                         cpu_to_le16(ring_grps + 1);
5782                 }
5783                 req->num_stat_ctxs = cpu_to_le16(stats);
5784                 req->num_vnics = cpu_to_le16(vnics);
5785         }
5786         req->enables = cpu_to_le32(enables);
5787 }
5788
5789 static void
5790 __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp,
5791                              struct hwrm_func_vf_cfg_input *req, int tx_rings,
5792                              int rx_rings, int ring_grps, int cp_rings,
5793                              int stats, int vnics)
5794 {
5795         u32 enables = 0;
5796
5797         bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_VF_CFG, -1, -1);
5798         enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
5799         enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
5800                               FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
5801         enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
5802         if (bp->flags & BNXT_FLAG_CHIP_P5) {
5803                 enables |= tx_rings + ring_grps ?
5804                            FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
5805         } else {
5806                 enables |= cp_rings ?
5807                            FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
5808                 enables |= ring_grps ?
5809                            FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
5810         }
5811         enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
5812         enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS;
5813
5814         req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
5815         req->num_tx_rings = cpu_to_le16(tx_rings);
5816         req->num_rx_rings = cpu_to_le16(rx_rings);
5817         if (bp->flags & BNXT_FLAG_CHIP_P5) {
5818                 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
5819                 req->num_rsscos_ctxs = cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
5820         } else {
5821                 req->num_cmpl_rings = cpu_to_le16(cp_rings);
5822                 req->num_hw_ring_grps = cpu_to_le16(ring_grps);
5823                 req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
5824         }
5825         req->num_stat_ctxs = cpu_to_le16(stats);
5826         req->num_vnics = cpu_to_le16(vnics);
5827
5828         req->enables = cpu_to_le32(enables);
5829 }
5830
5831 static int
5832 bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
5833                            int ring_grps, int cp_rings, int stats, int vnics)
5834 {
5835         struct hwrm_func_cfg_input req = {0};
5836         int rc;
5837
5838         __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
5839                                      cp_rings, stats, vnics);
5840         if (!req.enables)
5841                 return 0;
5842
5843         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5844         if (rc)
5845                 return rc;
5846
5847         if (bp->hwrm_spec_code < 0x10601)
5848                 bp->hw_resc.resv_tx_rings = tx_rings;
5849
5850         rc = bnxt_hwrm_get_rings(bp);
5851         return rc;
5852 }
5853
5854 static int
5855 bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
5856                            int ring_grps, int cp_rings, int stats, int vnics)
5857 {
5858         struct hwrm_func_vf_cfg_input req = {0};
5859         int rc;
5860
5861         if (!BNXT_NEW_RM(bp)) {
5862                 bp->hw_resc.resv_tx_rings = tx_rings;
5863                 return 0;
5864         }
5865
5866         __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
5867                                      cp_rings, stats, vnics);
5868         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5869         if (rc)
5870                 return rc;
5871
5872         rc = bnxt_hwrm_get_rings(bp);
5873         return rc;
5874 }
5875
5876 static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp,
5877                                    int cp, int stat, int vnic)
5878 {
5879         if (BNXT_PF(bp))
5880                 return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, stat,
5881                                                   vnic);
5882         else
5883                 return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, stat,
5884                                                   vnic);
5885 }
5886
5887 int bnxt_nq_rings_in_use(struct bnxt *bp)
5888 {
5889         int cp = bp->cp_nr_rings;
5890         int ulp_msix, ulp_base;
5891
5892         ulp_msix = bnxt_get_ulp_msix_num(bp);
5893         if (ulp_msix) {
5894                 ulp_base = bnxt_get_ulp_msix_base(bp);
5895                 cp += ulp_msix;
5896                 if ((ulp_base + ulp_msix) > cp)
5897                         cp = ulp_base + ulp_msix;
5898         }
5899         return cp;
5900 }
5901
5902 static int bnxt_cp_rings_in_use(struct bnxt *bp)
5903 {
5904         int cp;
5905
5906         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
5907                 return bnxt_nq_rings_in_use(bp);
5908
5909         cp = bp->tx_nr_rings + bp->rx_nr_rings;
5910         return cp;
5911 }
5912
5913 static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
5914 {
5915         int ulp_stat = bnxt_get_ulp_stat_ctxs(bp);
5916         int cp = bp->cp_nr_rings;
5917
5918         if (!ulp_stat)
5919                 return cp;
5920
5921         if (bnxt_nq_rings_in_use(bp) > cp + bnxt_get_ulp_msix_num(bp))
5922                 return bnxt_get_ulp_msix_base(bp) + ulp_stat;
5923
5924         return cp + ulp_stat;
5925 }
5926
5927 static bool bnxt_need_reserve_rings(struct bnxt *bp)
5928 {
5929         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
5930         int cp = bnxt_cp_rings_in_use(bp);
5931         int nq = bnxt_nq_rings_in_use(bp);
5932         int rx = bp->rx_nr_rings, stat;
5933         int vnic = 1, grp = rx;
5934
5935         if (bp->hwrm_spec_code < 0x10601)
5936                 return false;
5937
5938         if (hw_resc->resv_tx_rings != bp->tx_nr_rings)
5939                 return true;
5940
5941         if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
5942                 vnic = rx + 1;
5943         if (bp->flags & BNXT_FLAG_AGG_RINGS)
5944                 rx <<= 1;
5945         stat = bnxt_get_func_stat_ctxs(bp);
5946         if (BNXT_NEW_RM(bp) &&
5947             (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
5948              hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat ||
5949              (hw_resc->resv_hw_ring_grps != grp &&
5950               !(bp->flags & BNXT_FLAG_CHIP_P5))))
5951                 return true;
5952         if ((bp->flags & BNXT_FLAG_CHIP_P5) && BNXT_PF(bp) &&
5953             hw_resc->resv_irqs != nq)
5954                 return true;
5955         return false;
5956 }
5957
5958 static int __bnxt_reserve_rings(struct bnxt *bp)
5959 {
5960         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
5961         int cp = bnxt_nq_rings_in_use(bp);
5962         int tx = bp->tx_nr_rings;
5963         int rx = bp->rx_nr_rings;
5964         int grp, rx_rings, rc;
5965         int vnic = 1, stat;
5966         bool sh = false;
5967
5968         if (!bnxt_need_reserve_rings(bp))
5969                 return 0;
5970
5971         if (bp->flags & BNXT_FLAG_SHARED_RINGS)
5972                 sh = true;
5973         if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
5974                 vnic = rx + 1;
5975         if (bp->flags & BNXT_FLAG_AGG_RINGS)
5976                 rx <<= 1;
5977         grp = bp->rx_nr_rings;
5978         stat = bnxt_get_func_stat_ctxs(bp);
5979
5980         rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, stat, vnic);
5981         if (rc)
5982                 return rc;
5983
5984         tx = hw_resc->resv_tx_rings;
5985         if (BNXT_NEW_RM(bp)) {
5986                 rx = hw_resc->resv_rx_rings;
5987                 cp = hw_resc->resv_irqs;
5988                 grp = hw_resc->resv_hw_ring_grps;
5989                 vnic = hw_resc->resv_vnics;
5990                 stat = hw_resc->resv_stat_ctxs;
5991         }
5992
5993         rx_rings = rx;
5994         if (bp->flags & BNXT_FLAG_AGG_RINGS) {
5995                 if (rx >= 2) {
5996                         rx_rings = rx >> 1;
5997                 } else {
5998                         if (netif_running(bp->dev))
5999                                 return -ENOMEM;
6000
6001                         bp->flags &= ~BNXT_FLAG_AGG_RINGS;
6002                         bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
6003                         bp->dev->hw_features &= ~NETIF_F_LRO;
6004                         bp->dev->features &= ~NETIF_F_LRO;
6005                         bnxt_set_ring_params(bp);
6006                 }
6007         }
6008         rx_rings = min_t(int, rx_rings, grp);
6009         cp = min_t(int, cp, bp->cp_nr_rings);
6010         if (stat > bnxt_get_ulp_stat_ctxs(bp))
6011                 stat -= bnxt_get_ulp_stat_ctxs(bp);
6012         cp = min_t(int, cp, stat);
6013         rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh);
6014         if (bp->flags & BNXT_FLAG_AGG_RINGS)
6015                 rx = rx_rings << 1;
6016         cp = sh ? max_t(int, tx, rx_rings) : tx + rx_rings;
6017         bp->tx_nr_rings = tx;
6018         bp->rx_nr_rings = rx_rings;
6019         bp->cp_nr_rings = cp;
6020
6021         if (!tx || !rx || !cp || !grp || !vnic || !stat)
6022                 return -ENOMEM;
6023
6024         return rc;
6025 }
6026
6027 static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6028                                     int ring_grps, int cp_rings, int stats,
6029                                     int vnics)
6030 {
6031         struct hwrm_func_vf_cfg_input req = {0};
6032         u32 flags;
6033         int rc;
6034
6035         if (!BNXT_NEW_RM(bp))
6036                 return 0;
6037
6038         __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
6039                                      cp_rings, stats, vnics);
6040         flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
6041                 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
6042                 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
6043                 FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
6044                 FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST |
6045                 FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
6046         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6047                 flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
6048
6049         req.flags = cpu_to_le32(flags);
6050         rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6051         return rc;
6052 }
6053
6054 static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6055                                     int ring_grps, int cp_rings, int stats,
6056                                     int vnics)
6057 {
6058         struct hwrm_func_cfg_input req = {0};
6059         u32 flags;
6060         int rc;
6061
6062         __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
6063                                      cp_rings, stats, vnics);
6064         flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
6065         if (BNXT_NEW_RM(bp)) {
6066                 flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
6067                          FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
6068                          FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
6069                          FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
6070                 if (bp->flags & BNXT_FLAG_CHIP_P5)
6071                         flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST |
6072                                  FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST;
6073                 else
6074                         flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
6075         }
6076
6077         req.flags = cpu_to_le32(flags);
6078         rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6079         return rc;
6080 }
6081
6082 static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6083                                  int ring_grps, int cp_rings, int stats,
6084                                  int vnics)
6085 {
6086         if (bp->hwrm_spec_code < 0x10801)
6087                 return 0;
6088
6089         if (BNXT_PF(bp))
6090                 return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings,
6091                                                 ring_grps, cp_rings, stats,
6092                                                 vnics);
6093
6094         return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps,
6095                                         cp_rings, stats, vnics);
6096 }
6097
6098 static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
6099 {
6100         struct hwrm_ring_aggint_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
6101         struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6102         struct hwrm_ring_aggint_qcaps_input req = {0};
6103         int rc;
6104
6105         coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS;
6106         coal_cap->num_cmpl_dma_aggr_max = 63;
6107         coal_cap->num_cmpl_dma_aggr_during_int_max = 63;
6108         coal_cap->cmpl_aggr_dma_tmr_max = 65535;
6109         coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535;
6110         coal_cap->int_lat_tmr_min_max = 65535;
6111         coal_cap->int_lat_tmr_max_max = 65535;
6112         coal_cap->num_cmpl_aggr_int_max = 65535;
6113         coal_cap->timer_units = 80;
6114
6115         if (bp->hwrm_spec_code < 0x10902)
6116                 return;
6117
6118         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_AGGINT_QCAPS, -1, -1);
6119         mutex_lock(&bp->hwrm_cmd_lock);
6120         rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6121         if (!rc) {
6122                 coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params);
6123                 coal_cap->nq_params = le32_to_cpu(resp->nq_params);
6124                 coal_cap->num_cmpl_dma_aggr_max =
6125                         le16_to_cpu(resp->num_cmpl_dma_aggr_max);
6126                 coal_cap->num_cmpl_dma_aggr_during_int_max =
6127                         le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max);
6128                 coal_cap->cmpl_aggr_dma_tmr_max =
6129                         le16_to_cpu(resp->cmpl_aggr_dma_tmr_max);
6130                 coal_cap->cmpl_aggr_dma_tmr_during_int_max =
6131                         le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max);
6132                 coal_cap->int_lat_tmr_min_max =
6133                         le16_to_cpu(resp->int_lat_tmr_min_max);
6134                 coal_cap->int_lat_tmr_max_max =
6135                         le16_to_cpu(resp->int_lat_tmr_max_max);
6136                 coal_cap->num_cmpl_aggr_int_max =
6137                         le16_to_cpu(resp->num_cmpl_aggr_int_max);
6138                 coal_cap->timer_units = le16_to_cpu(resp->timer_units);
6139         }
6140         mutex_unlock(&bp->hwrm_cmd_lock);
6141 }
6142
6143 static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec)
6144 {
6145         struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6146
6147         return usec * 1000 / coal_cap->timer_units;
6148 }
6149
6150 static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
6151         struct bnxt_coal *hw_coal,
6152         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
6153 {
6154         struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6155         u32 cmpl_params = coal_cap->cmpl_params;
6156         u16 val, tmr, max, flags = 0;
6157
6158         max = hw_coal->bufs_per_record * 128;
6159         if (hw_coal->budget)
6160                 max = hw_coal->bufs_per_record * hw_coal->budget;
6161         max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max);
6162
6163         val = clamp_t(u16, hw_coal->coal_bufs, 1, max);
6164         req->num_cmpl_aggr_int = cpu_to_le16(val);
6165
6166         val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max);
6167         req->num_cmpl_dma_aggr = cpu_to_le16(val);
6168
6169         val = clamp_t(u16, hw_coal->coal_bufs_irq, 1,
6170                       coal_cap->num_cmpl_dma_aggr_during_int_max);
6171         req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val);
6172
6173         tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks);
6174         tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max);
6175         req->int_lat_tmr_max = cpu_to_le16(tmr);
6176
6177         /* min timer set to 1/2 of interrupt timer */
6178         if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) {
6179                 val = tmr / 2;
6180                 val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max);
6181                 req->int_lat_tmr_min = cpu_to_le16(val);
6182                 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
6183         }
6184
6185         /* buf timer set to 1/4 of interrupt timer */
6186         val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max);
6187         req->cmpl_aggr_dma_tmr = cpu_to_le16(val);
6188
6189         if (cmpl_params &
6190             RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) {
6191                 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq);
6192                 val = clamp_t(u16, tmr, 1,
6193                               coal_cap->cmpl_aggr_dma_tmr_during_int_max);
6194                 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val);
6195                 req->enables |=
6196                         cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE);
6197         }
6198
6199         if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)
6200                 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
6201         if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) &&
6202             hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh)
6203                 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
6204         req->flags = cpu_to_le16(flags);
6205         req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES);
6206 }
6207
6208 /* Caller holds bp->hwrm_cmd_lock */
6209 static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi,
6210                                    struct bnxt_coal *hw_coal)
6211 {
6212         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
6213         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6214         struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6215         u32 nq_params = coal_cap->nq_params;
6216         u16 tmr;
6217
6218         if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN))
6219                 return 0;
6220
6221         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS,
6222                                -1, -1);
6223         req.ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id);
6224         req.flags =
6225                 cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ);
6226
6227         tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2;
6228         tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max);
6229         req.int_lat_tmr_min = cpu_to_le16(tmr);
6230         req.enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
6231         return _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6232 }
6233
6234 int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
6235 {
6236         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0};
6237         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6238         struct bnxt_coal coal;
6239
6240         /* Tick values in micro seconds.
6241          * 1 coal_buf x bufs_per_record = 1 completion record.
6242          */
6243         memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal));
6244
6245         coal.coal_ticks = cpr->rx_ring_coal.coal_ticks;
6246         coal.coal_bufs = cpr->rx_ring_coal.coal_bufs;
6247
6248         if (!bnapi->rx_ring)
6249                 return -ENODEV;
6250
6251         bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
6252                                HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
6253
6254         bnxt_hwrm_set_coal_params(bp, &coal, &req_rx);
6255
6256         req_rx.ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring));
6257
6258         return hwrm_send_message(bp, &req_rx, sizeof(req_rx),
6259                                  HWRM_CMD_TIMEOUT);
6260 }
6261
6262 int bnxt_hwrm_set_coal(struct bnxt *bp)
6263 {
6264         int i, rc = 0;
6265         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0},
6266                                                            req_tx = {0}, *req;
6267
6268         bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
6269                                HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
6270         bnxt_hwrm_cmd_hdr_init(bp, &req_tx,
6271                                HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
6272
6273         bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, &req_rx);
6274         bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, &req_tx);
6275
6276         mutex_lock(&bp->hwrm_cmd_lock);
6277         for (i = 0; i < bp->cp_nr_rings; i++) {
6278                 struct bnxt_napi *bnapi = bp->bnapi[i];
6279                 struct bnxt_coal *hw_coal;
6280                 u16 ring_id;
6281
6282                 req = &req_rx;
6283                 if (!bnapi->rx_ring) {
6284                         ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
6285                         req = &req_tx;
6286                 } else {
6287                         ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
6288                 }
6289                 req->ring_id = cpu_to_le16(ring_id);
6290
6291                 rc = _hwrm_send_message(bp, req, sizeof(*req),
6292                                         HWRM_CMD_TIMEOUT);
6293                 if (rc)
6294                         break;
6295
6296                 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6297                         continue;
6298
6299                 if (bnapi->rx_ring && bnapi->tx_ring) {
6300                         req = &req_tx;
6301                         ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
6302                         req->ring_id = cpu_to_le16(ring_id);
6303                         rc = _hwrm_send_message(bp, req, sizeof(*req),
6304                                                 HWRM_CMD_TIMEOUT);
6305                         if (rc)
6306                                 break;
6307                 }
6308                 if (bnapi->rx_ring)
6309                         hw_coal = &bp->rx_coal;
6310                 else
6311                         hw_coal = &bp->tx_coal;
6312                 __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal);
6313         }
6314         mutex_unlock(&bp->hwrm_cmd_lock);
6315         return rc;
6316 }
6317
6318 static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
6319 {
6320         int rc = 0, i;
6321         struct hwrm_stat_ctx_free_input req = {0};
6322
6323         if (!bp->bnapi)
6324                 return 0;
6325
6326         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6327                 return 0;
6328
6329         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);
6330
6331         mutex_lock(&bp->hwrm_cmd_lock);
6332         for (i = 0; i < bp->cp_nr_rings; i++) {
6333                 struct bnxt_napi *bnapi = bp->bnapi[i];
6334                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6335
6336                 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
6337                         req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
6338
6339                         rc = _hwrm_send_message(bp, &req, sizeof(req),
6340                                                 HWRM_CMD_TIMEOUT);
6341
6342                         cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
6343                 }
6344         }
6345         mutex_unlock(&bp->hwrm_cmd_lock);
6346         return rc;
6347 }
6348
6349 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
6350 {
6351         int rc = 0, i;
6352         struct hwrm_stat_ctx_alloc_input req = {0};
6353         struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
6354
6355         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6356                 return 0;
6357
6358         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1);
6359
6360         req.stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size);
6361         req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
6362
6363         mutex_lock(&bp->hwrm_cmd_lock);
6364         for (i = 0; i < bp->cp_nr_rings; i++) {
6365                 struct bnxt_napi *bnapi = bp->bnapi[i];
6366                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6367
6368                 req.stats_dma_addr = cpu_to_le64(cpr->hw_stats_map);
6369
6370                 rc = _hwrm_send_message(bp, &req, sizeof(req),
6371                                         HWRM_CMD_TIMEOUT);
6372                 if (rc)
6373                         break;
6374
6375                 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
6376
6377                 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
6378         }
6379         mutex_unlock(&bp->hwrm_cmd_lock);
6380         return rc;
6381 }
6382
6383 static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
6384 {
6385         struct hwrm_func_qcfg_input req = {0};
6386         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
6387         u16 flags;
6388         int rc;
6389
6390         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
6391         req.fid = cpu_to_le16(0xffff);
6392         mutex_lock(&bp->hwrm_cmd_lock);
6393         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6394         if (rc)
6395                 goto func_qcfg_exit;
6396
6397 #ifdef CONFIG_BNXT_SRIOV
6398         if (BNXT_VF(bp)) {
6399                 struct bnxt_vf_info *vf = &bp->vf;
6400
6401                 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
6402         } else {
6403                 bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs);
6404         }
6405 #endif
6406         flags = le16_to_cpu(resp->flags);
6407         if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
6408                      FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
6409                 bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
6410                 if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
6411                         bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
6412         }
6413         if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
6414                 bp->flags |= BNXT_FLAG_MULTI_HOST;
6415
6416         switch (resp->port_partition_type) {
6417         case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
6418         case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
6419         case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
6420                 bp->port_partition_type = resp->port_partition_type;
6421                 break;
6422         }
6423         if (bp->hwrm_spec_code < 0x10707 ||
6424             resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB)
6425                 bp->br_mode = BRIDGE_MODE_VEB;
6426         else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA)
6427                 bp->br_mode = BRIDGE_MODE_VEPA;
6428         else
6429                 bp->br_mode = BRIDGE_MODE_UNDEF;
6430
6431         bp->max_mtu = le16_to_cpu(resp->max_mtu_configured);
6432         if (!bp->max_mtu)
6433                 bp->max_mtu = BNXT_MAX_MTU;
6434
6435 func_qcfg_exit:
6436         mutex_unlock(&bp->hwrm_cmd_lock);
6437         return rc;
6438 }
6439
6440 static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
6441 {
6442         struct hwrm_func_backing_store_qcaps_input req = {0};
6443         struct hwrm_func_backing_store_qcaps_output *resp =
6444                 bp->hwrm_cmd_resp_addr;
6445         int rc;
6446
6447         if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx)
6448                 return 0;
6449
6450         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_QCAPS, -1, -1);
6451         mutex_lock(&bp->hwrm_cmd_lock);
6452         rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6453         if (!rc) {
6454                 struct bnxt_ctx_pg_info *ctx_pg;
6455                 struct bnxt_ctx_mem_info *ctx;
6456                 int i;
6457
6458                 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
6459                 if (!ctx) {
6460                         rc = -ENOMEM;
6461                         goto ctx_err;
6462                 }
6463                 ctx_pg = kzalloc(sizeof(*ctx_pg) * (bp->max_q + 1), GFP_KERNEL);
6464                 if (!ctx_pg) {
6465                         kfree(ctx);
6466                         rc = -ENOMEM;
6467                         goto ctx_err;
6468                 }
6469                 for (i = 0; i < bp->max_q + 1; i++, ctx_pg++)
6470                         ctx->tqm_mem[i] = ctx_pg;
6471
6472                 bp->ctx = ctx;
6473                 ctx->qp_max_entries = le32_to_cpu(resp->qp_max_entries);
6474                 ctx->qp_min_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
6475                 ctx->qp_max_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
6476                 ctx->qp_entry_size = le16_to_cpu(resp->qp_entry_size);
6477                 ctx->srq_max_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
6478                 ctx->srq_max_entries = le32_to_cpu(resp->srq_max_entries);
6479                 ctx->srq_entry_size = le16_to_cpu(resp->srq_entry_size);
6480                 ctx->cq_max_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
6481                 ctx->cq_max_entries = le32_to_cpu(resp->cq_max_entries);
6482                 ctx->cq_entry_size = le16_to_cpu(resp->cq_entry_size);
6483                 ctx->vnic_max_vnic_entries =
6484                         le16_to_cpu(resp->vnic_max_vnic_entries);
6485                 ctx->vnic_max_ring_table_entries =
6486                         le16_to_cpu(resp->vnic_max_ring_table_entries);
6487                 ctx->vnic_entry_size = le16_to_cpu(resp->vnic_entry_size);
6488                 ctx->stat_max_entries = le32_to_cpu(resp->stat_max_entries);
6489                 ctx->stat_entry_size = le16_to_cpu(resp->stat_entry_size);
6490                 ctx->tqm_entry_size = le16_to_cpu(resp->tqm_entry_size);
6491                 ctx->tqm_min_entries_per_ring =
6492                         le32_to_cpu(resp->tqm_min_entries_per_ring);
6493                 ctx->tqm_max_entries_per_ring =
6494                         le32_to_cpu(resp->tqm_max_entries_per_ring);
6495                 ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
6496                 if (!ctx->tqm_entries_multiple)
6497                         ctx->tqm_entries_multiple = 1;
6498                 ctx->mrav_max_entries = le32_to_cpu(resp->mrav_max_entries);
6499                 ctx->mrav_entry_size = le16_to_cpu(resp->mrav_entry_size);
6500                 ctx->mrav_num_entries_units =
6501                         le16_to_cpu(resp->mrav_num_entries_units);
6502                 ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size);
6503                 ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries);
6504                 ctx->ctx_kind_initializer = resp->ctx_kind_initializer;
6505         } else {
6506                 rc = 0;
6507         }
6508 ctx_err:
6509         mutex_unlock(&bp->hwrm_cmd_lock);
6510         return rc;
6511 }
6512
6513 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
6514                                   __le64 *pg_dir)
6515 {
6516         u8 pg_size = 0;
6517
6518         if (BNXT_PAGE_SHIFT == 13)
6519                 pg_size = 1 << 4;
6520         else if (BNXT_PAGE_SIZE == 16)
6521                 pg_size = 2 << 4;
6522
6523         *pg_attr = pg_size;
6524         if (rmem->depth >= 1) {
6525                 if (rmem->depth == 2)
6526                         *pg_attr |= 2;
6527                 else
6528                         *pg_attr |= 1;
6529                 *pg_dir = cpu_to_le64(rmem->pg_tbl_map);
6530         } else {
6531                 *pg_dir = cpu_to_le64(rmem->dma_arr[0]);
6532         }
6533 }
6534
6535 #define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES                 \
6536         (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP |                \
6537          FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ |               \
6538          FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ |                \
6539          FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC |              \
6540          FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT)
6541
6542 static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
6543 {
6544         struct hwrm_func_backing_store_cfg_input req = {0};
6545         struct bnxt_ctx_mem_info *ctx = bp->ctx;
6546         struct bnxt_ctx_pg_info *ctx_pg;
6547         __le32 *num_entries;
6548         __le64 *pg_dir;
6549         u32 flags = 0;
6550         u8 *pg_attr;
6551         int i, rc;
6552         u32 ena;
6553
6554         if (!ctx)
6555                 return 0;
6556
6557         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_CFG, -1, -1);
6558         req.enables = cpu_to_le32(enables);
6559
6560         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) {
6561                 ctx_pg = &ctx->qp_mem;
6562                 req.qp_num_entries = cpu_to_le32(ctx_pg->entries);
6563                 req.qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries);
6564                 req.qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries);
6565                 req.qp_entry_size = cpu_to_le16(ctx->qp_entry_size);
6566                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6567                                       &req.qpc_pg_size_qpc_lvl,
6568                                       &req.qpc_page_dir);
6569         }
6570         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) {
6571                 ctx_pg = &ctx->srq_mem;
6572                 req.srq_num_entries = cpu_to_le32(ctx_pg->entries);
6573                 req.srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries);
6574                 req.srq_entry_size = cpu_to_le16(ctx->srq_entry_size);
6575                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6576                                       &req.srq_pg_size_srq_lvl,
6577                                       &req.srq_page_dir);
6578         }
6579         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) {
6580                 ctx_pg = &ctx->cq_mem;
6581                 req.cq_num_entries = cpu_to_le32(ctx_pg->entries);
6582                 req.cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries);
6583                 req.cq_entry_size = cpu_to_le16(ctx->cq_entry_size);
6584                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, &req.cq_pg_size_cq_lvl,
6585                                       &req.cq_page_dir);
6586         }
6587         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) {
6588                 ctx_pg = &ctx->vnic_mem;
6589                 req.vnic_num_vnic_entries =
6590                         cpu_to_le16(ctx->vnic_max_vnic_entries);
6591                 req.vnic_num_ring_table_entries =
6592                         cpu_to_le16(ctx->vnic_max_ring_table_entries);
6593                 req.vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size);
6594                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6595                                       &req.vnic_pg_size_vnic_lvl,
6596                                       &req.vnic_page_dir);
6597         }
6598         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) {
6599                 ctx_pg = &ctx->stat_mem;
6600                 req.stat_num_entries = cpu_to_le32(ctx->stat_max_entries);
6601                 req.stat_entry_size = cpu_to_le16(ctx->stat_entry_size);
6602                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6603                                       &req.stat_pg_size_stat_lvl,
6604                                       &req.stat_page_dir);
6605         }
6606         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) {
6607                 ctx_pg = &ctx->mrav_mem;
6608                 req.mrav_num_entries = cpu_to_le32(ctx_pg->entries);
6609                 if (ctx->mrav_num_entries_units)
6610                         flags |=
6611                         FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT;
6612                 req.mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size);
6613                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6614                                       &req.mrav_pg_size_mrav_lvl,
6615                                       &req.mrav_page_dir);
6616         }
6617         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) {
6618                 ctx_pg = &ctx->tim_mem;
6619                 req.tim_num_entries = cpu_to_le32(ctx_pg->entries);
6620                 req.tim_entry_size = cpu_to_le16(ctx->tim_entry_size);
6621                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6622                                       &req.tim_pg_size_tim_lvl,
6623                                       &req.tim_page_dir);
6624         }
6625         for (i = 0, num_entries = &req.tqm_sp_num_entries,
6626              pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl,
6627              pg_dir = &req.tqm_sp_page_dir,
6628              ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP;
6629              i < 9; i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
6630                 if (!(enables & ena))
6631                         continue;
6632
6633                 req.tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size);
6634                 ctx_pg = ctx->tqm_mem[i];
6635                 *num_entries = cpu_to_le32(ctx_pg->entries);
6636                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
6637         }
6638         req.flags = cpu_to_le32(flags);
6639         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6640         return rc;
6641 }
6642
6643 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
6644                                   struct bnxt_ctx_pg_info *ctx_pg)
6645 {
6646         struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
6647
6648         rmem->page_size = BNXT_PAGE_SIZE;
6649         rmem->pg_arr = ctx_pg->ctx_pg_arr;
6650         rmem->dma_arr = ctx_pg->ctx_dma_arr;
6651         rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
6652         if (rmem->depth >= 1)
6653                 rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG;
6654         return bnxt_alloc_ring(bp, rmem);
6655 }
6656
6657 static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
6658                                   struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
6659                                   u8 depth, bool use_init_val)
6660 {
6661         struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
6662         int rc;
6663
6664         if (!mem_size)
6665                 return 0;
6666
6667         ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
6668         if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
6669                 ctx_pg->nr_pages = 0;
6670                 return -EINVAL;
6671         }
6672         if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) {
6673                 int nr_tbls, i;
6674
6675                 rmem->depth = 2;
6676                 ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg),
6677                                              GFP_KERNEL);
6678                 if (!ctx_pg->ctx_pg_tbl)
6679                         return -ENOMEM;
6680                 nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
6681                 rmem->nr_pages = nr_tbls;
6682                 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
6683                 if (rc)
6684                         return rc;
6685                 for (i = 0; i < nr_tbls; i++) {
6686                         struct bnxt_ctx_pg_info *pg_tbl;
6687
6688                         pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL);
6689                         if (!pg_tbl)
6690                                 return -ENOMEM;
6691                         ctx_pg->ctx_pg_tbl[i] = pg_tbl;
6692                         rmem = &pg_tbl->ring_mem;
6693                         rmem->pg_tbl = ctx_pg->ctx_pg_arr[i];
6694                         rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
6695                         rmem->depth = 1;
6696                         rmem->nr_pages = MAX_CTX_PAGES;
6697                         if (use_init_val)
6698                                 rmem->init_val = bp->ctx->ctx_kind_initializer;
6699                         if (i == (nr_tbls - 1)) {
6700                                 int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
6701
6702                                 if (rem)
6703                                         rmem->nr_pages = rem;
6704                         }
6705                         rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl);
6706                         if (rc)
6707                                 break;
6708                 }
6709         } else {
6710                 rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
6711                 if (rmem->nr_pages > 1 || depth)
6712                         rmem->depth = 1;
6713                 if (use_init_val)
6714                         rmem->init_val = bp->ctx->ctx_kind_initializer;
6715                 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
6716         }
6717         return rc;
6718 }
6719
6720 static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
6721                                   struct bnxt_ctx_pg_info *ctx_pg)
6722 {
6723         struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
6724
6725         if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES ||
6726             ctx_pg->ctx_pg_tbl) {
6727                 int i, nr_tbls = rmem->nr_pages;
6728
6729                 for (i = 0; i < nr_tbls; i++) {
6730                         struct bnxt_ctx_pg_info *pg_tbl;
6731                         struct bnxt_ring_mem_info *rmem2;
6732
6733                         pg_tbl = ctx_pg->ctx_pg_tbl[i];
6734                         if (!pg_tbl)
6735                                 continue;
6736                         rmem2 = &pg_tbl->ring_mem;
6737                         bnxt_free_ring(bp, rmem2);
6738                         ctx_pg->ctx_pg_arr[i] = NULL;
6739                         kfree(pg_tbl);
6740                         ctx_pg->ctx_pg_tbl[i] = NULL;
6741                 }
6742                 kfree(ctx_pg->ctx_pg_tbl);
6743                 ctx_pg->ctx_pg_tbl = NULL;
6744         }
6745         bnxt_free_ring(bp, rmem);
6746         ctx_pg->nr_pages = 0;
6747 }
6748
6749 static void bnxt_free_ctx_mem(struct bnxt *bp)
6750 {
6751         struct bnxt_ctx_mem_info *ctx = bp->ctx;
6752         int i;
6753
6754         if (!ctx)
6755                 return;
6756
6757         if (ctx->tqm_mem[0]) {
6758                 for (i = 0; i < bp->max_q + 1; i++)
6759                         bnxt_free_ctx_pg_tbls(bp, ctx->tqm_mem[i]);
6760                 kfree(ctx->tqm_mem[0]);
6761                 ctx->tqm_mem[0] = NULL;
6762         }
6763
6764         bnxt_free_ctx_pg_tbls(bp, &ctx->tim_mem);
6765         bnxt_free_ctx_pg_tbls(bp, &ctx->mrav_mem);
6766         bnxt_free_ctx_pg_tbls(bp, &ctx->stat_mem);
6767         bnxt_free_ctx_pg_tbls(bp, &ctx->vnic_mem);
6768         bnxt_free_ctx_pg_tbls(bp, &ctx->cq_mem);
6769         bnxt_free_ctx_pg_tbls(bp, &ctx->srq_mem);
6770         bnxt_free_ctx_pg_tbls(bp, &ctx->qp_mem);
6771         ctx->flags &= ~BNXT_CTX_FLAG_INITED;
6772 }
6773
6774 static int bnxt_alloc_ctx_mem(struct bnxt *bp)
6775 {
6776         struct bnxt_ctx_pg_info *ctx_pg;
6777         struct bnxt_ctx_mem_info *ctx;
6778         u32 mem_size, ena, entries;
6779         u32 num_mr, num_ah;
6780         u32 extra_srqs = 0;
6781         u32 extra_qps = 0;
6782         u8 pg_lvl = 1;
6783         int i, rc;
6784
6785         rc = bnxt_hwrm_func_backing_store_qcaps(bp);
6786         if (rc) {
6787                 netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n",
6788                            rc);
6789                 return rc;
6790         }
6791         ctx = bp->ctx;
6792         if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
6793                 return 0;
6794
6795         if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
6796                 pg_lvl = 2;
6797                 extra_qps = 65536;
6798                 extra_srqs = 8192;
6799         }
6800
6801         ctx_pg = &ctx->qp_mem;
6802         ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries +
6803                           extra_qps;
6804         mem_size = ctx->qp_entry_size * ctx_pg->entries;
6805         rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true);
6806         if (rc)
6807                 return rc;
6808
6809         ctx_pg = &ctx->srq_mem;
6810         ctx_pg->entries = ctx->srq_max_l2_entries + extra_srqs;
6811         mem_size = ctx->srq_entry_size * ctx_pg->entries;
6812         rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true);
6813         if (rc)
6814                 return rc;
6815
6816         ctx_pg = &ctx->cq_mem;
6817         ctx_pg->entries = ctx->cq_max_l2_entries + extra_qps * 2;
6818         mem_size = ctx->cq_entry_size * ctx_pg->entries;
6819         rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true);
6820         if (rc)
6821                 return rc;
6822
6823         ctx_pg = &ctx->vnic_mem;
6824         ctx_pg->entries = ctx->vnic_max_vnic_entries +
6825                           ctx->vnic_max_ring_table_entries;
6826         mem_size = ctx->vnic_entry_size * ctx_pg->entries;
6827         rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, true);
6828         if (rc)
6829                 return rc;
6830
6831         ctx_pg = &ctx->stat_mem;
6832         ctx_pg->entries = ctx->stat_max_entries;
6833         mem_size = ctx->stat_entry_size * ctx_pg->entries;
6834         rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, true);
6835         if (rc)
6836                 return rc;
6837
6838         ena = 0;
6839         if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
6840                 goto skip_rdma;
6841
6842         ctx_pg = &ctx->mrav_mem;
6843         /* 128K extra is needed to accommodate static AH context
6844          * allocation by f/w.
6845          */
6846         num_mr = 1024 * 256;
6847         num_ah = 1024 * 128;
6848         ctx_pg->entries = num_mr + num_ah;
6849         mem_size = ctx->mrav_entry_size * ctx_pg->entries;
6850         rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2, true);
6851         if (rc)
6852                 return rc;
6853         ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
6854         if (ctx->mrav_num_entries_units)
6855                 ctx_pg->entries =
6856                         ((num_mr / ctx->mrav_num_entries_units) << 16) |
6857                          (num_ah / ctx->mrav_num_entries_units);
6858
6859         ctx_pg = &ctx->tim_mem;
6860         ctx_pg->entries = ctx->qp_mem.entries;
6861         mem_size = ctx->tim_entry_size * ctx_pg->entries;
6862         rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, false);
6863         if (rc)
6864                 return rc;
6865         ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
6866
6867 skip_rdma:
6868         entries = ctx->qp_max_l2_entries + extra_qps;
6869         entries = roundup(entries, ctx->tqm_entries_multiple);
6870         entries = clamp_t(u32, entries, ctx->tqm_min_entries_per_ring,
6871                           ctx->tqm_max_entries_per_ring);
6872         for (i = 0; i < bp->max_q + 1; i++) {
6873                 ctx_pg = ctx->tqm_mem[i];
6874                 ctx_pg->entries = entries;
6875                 mem_size = ctx->tqm_entry_size * entries;
6876                 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, false);
6877                 if (rc)
6878                         return rc;
6879                 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
6880         }
6881         ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
6882         rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
6883         if (rc)
6884                 netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
6885                            rc);
6886         else
6887                 ctx->flags |= BNXT_CTX_FLAG_INITED;
6888
6889         return 0;
6890 }
6891
6892 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
6893 {
6894         struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
6895         struct hwrm_func_resource_qcaps_input req = {0};
6896         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6897         int rc;
6898
6899         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESOURCE_QCAPS, -1, -1);
6900         req.fid = cpu_to_le16(0xffff);
6901
6902         mutex_lock(&bp->hwrm_cmd_lock);
6903         rc = _hwrm_send_message_silent(bp, &req, sizeof(req),
6904                                        HWRM_CMD_TIMEOUT);
6905         if (rc)
6906                 goto hwrm_func_resc_qcaps_exit;
6907
6908         hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs);
6909         if (!all)
6910                 goto hwrm_func_resc_qcaps_exit;
6911
6912         hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
6913         hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
6914         hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
6915         hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
6916         hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
6917         hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
6918         hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
6919         hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
6920         hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
6921         hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
6922         hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
6923         hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
6924         hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
6925         hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
6926         hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
6927         hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
6928
6929         if (bp->flags & BNXT_FLAG_CHIP_P5) {
6930                 u16 max_msix = le16_to_cpu(resp->max_msix);
6931
6932                 hw_resc->max_nqs = max_msix;
6933                 hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
6934         }
6935
6936         if (BNXT_PF(bp)) {
6937                 struct bnxt_pf_info *pf = &bp->pf;
6938
6939                 pf->vf_resv_strategy =
6940                         le16_to_cpu(resp->vf_reservation_strategy);
6941                 if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC)
6942                         pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
6943         }
6944 hwrm_func_resc_qcaps_exit:
6945         mutex_unlock(&bp->hwrm_cmd_lock);
6946         return rc;
6947 }
6948
6949 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
6950 {
6951         int rc = 0;
6952         struct hwrm_func_qcaps_input req = {0};
6953         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
6954         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6955         u32 flags;
6956
6957         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
6958         req.fid = cpu_to_le16(0xffff);
6959
6960         mutex_lock(&bp->hwrm_cmd_lock);
6961         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6962         if (rc)
6963                 goto hwrm_func_qcaps_exit;
6964
6965         flags = le32_to_cpu(resp->flags);
6966         if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
6967                 bp->flags |= BNXT_FLAG_ROCEV1_CAP;
6968         if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
6969                 bp->flags |= BNXT_FLAG_ROCEV2_CAP;
6970         if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED)
6971                 bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED;
6972         if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE)
6973                 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET;
6974         if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED)
6975                 bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED;
6976         if (flags &  FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE)
6977                 bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
6978         if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD)
6979                 bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
6980
6981         bp->tx_push_thresh = 0;
6982         if (flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED)
6983                 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
6984
6985         hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
6986         hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
6987         hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
6988         hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
6989         hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
6990         if (!hw_resc->max_hw_ring_grps)
6991                 hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings;
6992         hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
6993         hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
6994         hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
6995
6996         if (BNXT_PF(bp)) {
6997                 struct bnxt_pf_info *pf = &bp->pf;
6998
6999                 pf->fw_fid = le16_to_cpu(resp->fid);
7000                 pf->port_id = le16_to_cpu(resp->port_id);
7001                 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
7002                 pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
7003                 pf->max_vfs = le16_to_cpu(resp->max_vfs);
7004                 pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
7005                 pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
7006                 pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
7007                 pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
7008                 pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
7009                 pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
7010                 bp->flags &= ~BNXT_FLAG_WOL_CAP;
7011                 if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
7012                         bp->flags |= BNXT_FLAG_WOL_CAP;
7013         } else {
7014 #ifdef CONFIG_BNXT_SRIOV
7015                 struct bnxt_vf_info *vf = &bp->vf;
7016
7017                 vf->fw_fid = le16_to_cpu(resp->fid);
7018                 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
7019 #endif
7020         }
7021
7022 hwrm_func_qcaps_exit:
7023         mutex_unlock(&bp->hwrm_cmd_lock);
7024         return rc;
7025 }
7026
7027 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
7028
7029 static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
7030 {
7031         int rc;
7032
7033         rc = __bnxt_hwrm_func_qcaps(bp);
7034         if (rc)
7035                 return rc;
7036         rc = bnxt_hwrm_queue_qportcfg(bp);
7037         if (rc) {
7038                 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc);
7039                 return rc;
7040         }
7041         if (bp->hwrm_spec_code >= 0x10803) {
7042                 rc = bnxt_alloc_ctx_mem(bp);
7043                 if (rc)
7044                         return rc;
7045                 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
7046                 if (!rc)
7047                         bp->fw_cap |= BNXT_FW_CAP_NEW_RM;
7048         }
7049         return 0;
7050 }
7051
7052 static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp)
7053 {
7054         struct hwrm_cfa_adv_flow_mgnt_qcaps_input req = {0};
7055         struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp;
7056         int rc = 0;
7057         u32 flags;
7058
7059         if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW))
7060                 return 0;
7061
7062         resp = bp->hwrm_cmd_resp_addr;
7063         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS, -1, -1);
7064
7065         mutex_lock(&bp->hwrm_cmd_lock);
7066         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7067         if (rc)
7068                 goto hwrm_cfa_adv_qcaps_exit;
7069
7070         flags = le32_to_cpu(resp->flags);
7071         if (flags &
7072             CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED)
7073                 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2;
7074
7075 hwrm_cfa_adv_qcaps_exit:
7076         mutex_unlock(&bp->hwrm_cmd_lock);
7077         return rc;
7078 }
7079
7080 static int bnxt_map_fw_health_regs(struct bnxt *bp)
7081 {
7082         struct bnxt_fw_health *fw_health = bp->fw_health;
7083         u32 reg_base = 0xffffffff;
7084         int i;
7085
7086         /* Only pre-map the monitoring GRC registers using window 3 */
7087         for (i = 0; i < 4; i++) {
7088                 u32 reg = fw_health->regs[i];
7089
7090                 if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC)
7091                         continue;
7092                 if (reg_base == 0xffffffff)
7093                         reg_base = reg & BNXT_GRC_BASE_MASK;
7094                 if ((reg & BNXT_GRC_BASE_MASK) != reg_base)
7095                         return -ERANGE;
7096                 fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_BASE +
7097                                             (reg & BNXT_GRC_OFFSET_MASK);
7098         }
7099         if (reg_base == 0xffffffff)
7100                 return 0;
7101
7102         writel(reg_base, bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT +
7103                          BNXT_FW_HEALTH_WIN_MAP_OFF);
7104         return 0;
7105 }
7106
7107 static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
7108 {
7109         struct hwrm_error_recovery_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
7110         struct bnxt_fw_health *fw_health = bp->fw_health;
7111         struct hwrm_error_recovery_qcfg_input req = {0};
7112         int rc, i;
7113
7114         if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
7115                 return 0;
7116
7117         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_ERROR_RECOVERY_QCFG, -1, -1);
7118         mutex_lock(&bp->hwrm_cmd_lock);
7119         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7120         if (rc)
7121                 goto err_recovery_out;
7122         fw_health->flags = le32_to_cpu(resp->flags);
7123         if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) &&
7124             !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) {
7125                 rc = -EINVAL;
7126                 goto err_recovery_out;
7127         }
7128         fw_health->polling_dsecs = le32_to_cpu(resp->driver_polling_freq);
7129         fw_health->master_func_wait_dsecs =
7130                 le32_to_cpu(resp->master_func_wait_period);
7131         fw_health->normal_func_wait_dsecs =
7132                 le32_to_cpu(resp->normal_func_wait_period);
7133         fw_health->post_reset_wait_dsecs =
7134                 le32_to_cpu(resp->master_func_wait_period_after_reset);
7135         fw_health->post_reset_max_wait_dsecs =
7136                 le32_to_cpu(resp->max_bailout_time_after_reset);
7137         fw_health->regs[BNXT_FW_HEALTH_REG] =
7138                 le32_to_cpu(resp->fw_health_status_reg);
7139         fw_health->regs[BNXT_FW_HEARTBEAT_REG] =
7140                 le32_to_cpu(resp->fw_heartbeat_reg);
7141         fw_health->regs[BNXT_FW_RESET_CNT_REG] =
7142                 le32_to_cpu(resp->fw_reset_cnt_reg);
7143         fw_health->regs[BNXT_FW_RESET_INPROG_REG] =
7144                 le32_to_cpu(resp->reset_inprogress_reg);
7145         fw_health->fw_reset_inprog_reg_mask =
7146                 le32_to_cpu(resp->reset_inprogress_reg_mask);
7147         fw_health->fw_reset_seq_cnt = resp->reg_array_cnt;
7148         if (fw_health->fw_reset_seq_cnt >= 16) {
7149                 rc = -EINVAL;
7150                 goto err_recovery_out;
7151         }
7152         for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) {
7153                 fw_health->fw_reset_seq_regs[i] =
7154                         le32_to_cpu(resp->reset_reg[i]);
7155                 fw_health->fw_reset_seq_vals[i] =
7156                         le32_to_cpu(resp->reset_reg_val[i]);
7157                 fw_health->fw_reset_seq_delay_msec[i] =
7158                         resp->delay_after_reset[i];
7159         }
7160 err_recovery_out:
7161         mutex_unlock(&bp->hwrm_cmd_lock);
7162         if (!rc)
7163                 rc = bnxt_map_fw_health_regs(bp);
7164         if (rc)
7165                 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
7166         return rc;
7167 }
7168
7169 static int bnxt_hwrm_func_reset(struct bnxt *bp)
7170 {
7171         struct hwrm_func_reset_input req = {0};
7172
7173         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1);
7174         req.enables = 0;
7175
7176         return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT);
7177 }
7178
7179 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
7180 {
7181         int rc = 0;
7182         struct hwrm_queue_qportcfg_input req = {0};
7183         struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
7184         u8 i, j, *qptr;
7185         bool no_rdma;
7186
7187         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1);
7188
7189         mutex_lock(&bp->hwrm_cmd_lock);
7190         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7191         if (rc)
7192                 goto qportcfg_exit;
7193
7194         if (!resp->max_configurable_queues) {
7195                 rc = -EINVAL;
7196                 goto qportcfg_exit;
7197         }
7198         bp->max_tc = resp->max_configurable_queues;
7199         bp->max_lltc = resp->max_configurable_lossless_queues;
7200         if (bp->max_tc > BNXT_MAX_QUEUE)
7201                 bp->max_tc = BNXT_MAX_QUEUE;
7202
7203         no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP);
7204         qptr = &resp->queue_id0;
7205         for (i = 0, j = 0; i < bp->max_tc; i++) {
7206                 bp->q_info[j].queue_id = *qptr;
7207                 bp->q_ids[i] = *qptr++;
7208                 bp->q_info[j].queue_profile = *qptr++;
7209                 bp->tc_to_qidx[j] = j;
7210                 if (!BNXT_CNPQ(bp->q_info[j].queue_profile) ||
7211                     (no_rdma && BNXT_PF(bp)))
7212                         j++;
7213         }
7214         bp->max_q = bp->max_tc;
7215         bp->max_tc = max_t(u8, j, 1);
7216
7217         if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
7218                 bp->max_tc = 1;
7219
7220         if (bp->max_lltc > bp->max_tc)
7221                 bp->max_lltc = bp->max_tc;
7222
7223 qportcfg_exit:
7224         mutex_unlock(&bp->hwrm_cmd_lock);
7225         return rc;
7226 }
7227
7228 static int __bnxt_hwrm_ver_get(struct bnxt *bp, bool silent)
7229 {
7230         struct hwrm_ver_get_input req = {0};
7231         int rc;
7232
7233         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
7234         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
7235         req.hwrm_intf_min = HWRM_VERSION_MINOR;
7236         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
7237
7238         rc = bnxt_hwrm_do_send_msg(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT,
7239                                    silent);
7240         return rc;
7241 }
7242
7243 static int bnxt_hwrm_ver_get(struct bnxt *bp)
7244 {
7245         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
7246         u32 dev_caps_cfg;
7247         int rc;
7248
7249         bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
7250         mutex_lock(&bp->hwrm_cmd_lock);
7251         rc = __bnxt_hwrm_ver_get(bp, false);
7252         if (rc)
7253                 goto hwrm_ver_get_exit;
7254
7255         memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
7256
7257         bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
7258                              resp->hwrm_intf_min_8b << 8 |
7259                              resp->hwrm_intf_upd_8b;
7260         if (resp->hwrm_intf_maj_8b < 1) {
7261                 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
7262                             resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
7263                             resp->hwrm_intf_upd_8b);
7264                 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
7265         }
7266         snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d.%d",
7267                  resp->hwrm_fw_maj_8b, resp->hwrm_fw_min_8b,
7268                  resp->hwrm_fw_bld_8b, resp->hwrm_fw_rsvd_8b);
7269
7270         if (strlen(resp->active_pkg_name)) {
7271                 int fw_ver_len = strlen(bp->fw_ver_str);
7272
7273                 snprintf(bp->fw_ver_str + fw_ver_len,
7274                          FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s",
7275                          resp->active_pkg_name);
7276                 bp->fw_cap |= BNXT_FW_CAP_PKG_VER;
7277         }
7278
7279         bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
7280         if (!bp->hwrm_cmd_timeout)
7281                 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
7282
7283         if (resp->hwrm_intf_maj_8b >= 1) {
7284                 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
7285                 bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len);
7286         }
7287         if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
7288                 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
7289
7290         bp->chip_num = le16_to_cpu(resp->chip_num);
7291         bp->chip_rev = resp->chip_rev;
7292         if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
7293             !resp->chip_metal)
7294                 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
7295
7296         dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
7297         if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
7298             (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
7299                 bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD;
7300
7301         if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED)
7302                 bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL;
7303
7304         if (dev_caps_cfg &
7305             VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED)
7306                 bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE;
7307
7308         if (dev_caps_cfg &
7309             VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
7310                 bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF;
7311
7312         if (dev_caps_cfg &
7313             VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED)
7314                 bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW;
7315
7316 hwrm_ver_get_exit:
7317         mutex_unlock(&bp->hwrm_cmd_lock);
7318         return rc;
7319 }
7320
7321 int bnxt_hwrm_fw_set_time(struct bnxt *bp)
7322 {
7323         struct hwrm_fw_set_time_input req = {0};
7324         struct tm tm;
7325         time64_t now = ktime_get_real_seconds();
7326
7327         if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) ||
7328             bp->hwrm_spec_code < 0x10400)
7329                 return -EOPNOTSUPP;
7330
7331         time64_to_tm(now, 0, &tm);
7332         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_SET_TIME, -1, -1);
7333         req.year = cpu_to_le16(1900 + tm.tm_year);
7334         req.month = 1 + tm.tm_mon;
7335         req.day = tm.tm_mday;
7336         req.hour = tm.tm_hour;
7337         req.minute = tm.tm_min;
7338         req.second = tm.tm_sec;
7339         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7340 }
7341
7342 static int bnxt_hwrm_port_qstats(struct bnxt *bp)
7343 {
7344         int rc;
7345         struct bnxt_pf_info *pf = &bp->pf;
7346         struct hwrm_port_qstats_input req = {0};
7347
7348         if (!(bp->flags & BNXT_FLAG_PORT_STATS))
7349                 return 0;
7350
7351         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1);
7352         req.port_id = cpu_to_le16(pf->port_id);
7353         req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_map);
7354         req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_map);
7355         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7356         return rc;
7357 }
7358
7359 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp)
7360 {
7361         struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
7362         struct hwrm_queue_pri2cos_qcfg_input req2 = {0};
7363         struct hwrm_port_qstats_ext_input req = {0};
7364         struct bnxt_pf_info *pf = &bp->pf;
7365         u32 tx_stat_size;
7366         int rc;
7367
7368         if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
7369                 return 0;
7370
7371         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS_EXT, -1, -1);
7372         req.port_id = cpu_to_le16(pf->port_id);
7373         req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
7374         req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_ext_map);
7375         tx_stat_size = bp->hw_tx_port_stats_ext ?
7376                        sizeof(*bp->hw_tx_port_stats_ext) : 0;
7377         req.tx_stat_size = cpu_to_le16(tx_stat_size);
7378         req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_ext_map);
7379         mutex_lock(&bp->hwrm_cmd_lock);
7380         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7381         if (!rc) {
7382                 bp->fw_rx_stats_ext_size = le16_to_cpu(resp->rx_stat_size) / 8;
7383                 bp->fw_tx_stats_ext_size = tx_stat_size ?
7384                         le16_to_cpu(resp->tx_stat_size) / 8 : 0;
7385         } else {
7386                 bp->fw_rx_stats_ext_size = 0;
7387                 bp->fw_tx_stats_ext_size = 0;
7388         }
7389         if (bp->fw_tx_stats_ext_size <=
7390             offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) {
7391                 mutex_unlock(&bp->hwrm_cmd_lock);
7392                 bp->pri2cos_valid = 0;
7393                 return rc;
7394         }
7395
7396         bnxt_hwrm_cmd_hdr_init(bp, &req2, HWRM_QUEUE_PRI2COS_QCFG, -1, -1);
7397         req2.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
7398
7399         rc = _hwrm_send_message(bp, &req2, sizeof(req2), HWRM_CMD_TIMEOUT);
7400         if (!rc) {
7401                 struct hwrm_queue_pri2cos_qcfg_output *resp2;
7402                 u8 *pri2cos;
7403                 int i, j;
7404
7405                 resp2 = bp->hwrm_cmd_resp_addr;
7406                 pri2cos = &resp2->pri0_cos_queue_id;
7407                 for (i = 0; i < 8; i++) {
7408                         u8 queue_id = pri2cos[i];
7409
7410                         for (j = 0; j < bp->max_q; j++) {
7411                                 if (bp->q_ids[j] == queue_id)
7412                                         bp->pri2cos[i] = j;
7413                         }
7414                 }
7415                 bp->pri2cos_valid = 1;
7416         }
7417         mutex_unlock(&bp->hwrm_cmd_lock);
7418         return rc;
7419 }
7420
7421 static int bnxt_hwrm_pcie_qstats(struct bnxt *bp)
7422 {
7423         struct hwrm_pcie_qstats_input req = {0};
7424
7425         if (!(bp->flags & BNXT_FLAG_PCIE_STATS))
7426                 return 0;
7427
7428         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PCIE_QSTATS, -1, -1);
7429         req.pcie_stat_size = cpu_to_le16(sizeof(struct pcie_ctx_hw_stats));
7430         req.pcie_stat_host_addr = cpu_to_le64(bp->hw_pcie_stats_map);
7431         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7432 }
7433
7434 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
7435 {
7436         if (bp->vxlan_port_cnt) {
7437                 bnxt_hwrm_tunnel_dst_port_free(
7438                         bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
7439         }
7440         bp->vxlan_port_cnt = 0;
7441         if (bp->nge_port_cnt) {
7442                 bnxt_hwrm_tunnel_dst_port_free(
7443                         bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
7444         }
7445         bp->nge_port_cnt = 0;
7446 }
7447
7448 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
7449 {
7450         int rc, i;
7451         u32 tpa_flags = 0;
7452
7453         if (set_tpa)
7454                 tpa_flags = bp->flags & BNXT_FLAG_TPA;
7455         else if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
7456                 return 0;
7457         for (i = 0; i < bp->nr_vnics; i++) {
7458                 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
7459                 if (rc) {
7460                         netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
7461                                    i, rc);
7462                         return rc;
7463                 }
7464         }
7465         return 0;
7466 }
7467
7468 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
7469 {
7470         int i;
7471
7472         for (i = 0; i < bp->nr_vnics; i++)
7473                 bnxt_hwrm_vnic_set_rss(bp, i, false);
7474 }
7475
7476 static void bnxt_clear_vnic(struct bnxt *bp)
7477 {
7478         if (!bp->vnic_info)
7479                 return;
7480
7481         bnxt_hwrm_clear_vnic_filter(bp);
7482         if (!(bp->flags & BNXT_FLAG_CHIP_P5)) {
7483                 /* clear all RSS setting before free vnic ctx */
7484                 bnxt_hwrm_clear_vnic_rss(bp);
7485                 bnxt_hwrm_vnic_ctx_free(bp);
7486         }
7487         /* before free the vnic, undo the vnic tpa settings */
7488         if (bp->flags & BNXT_FLAG_TPA)
7489                 bnxt_set_tpa(bp, false);
7490         bnxt_hwrm_vnic_free(bp);
7491         if (bp->flags & BNXT_FLAG_CHIP_P5)
7492                 bnxt_hwrm_vnic_ctx_free(bp);
7493 }
7494
7495 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
7496                                     bool irq_re_init)
7497 {
7498         bnxt_clear_vnic(bp);
7499         bnxt_hwrm_ring_free(bp, close_path);
7500         bnxt_hwrm_ring_grp_free(bp);
7501         if (irq_re_init) {
7502                 bnxt_hwrm_stat_ctx_free(bp);
7503                 bnxt_hwrm_free_tunnel_ports(bp);
7504         }
7505 }
7506
7507 static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
7508 {
7509         struct hwrm_func_cfg_input req = {0};
7510         int rc;
7511
7512         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
7513         req.fid = cpu_to_le16(0xffff);
7514         req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE);
7515         if (br_mode == BRIDGE_MODE_VEB)
7516                 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB;
7517         else if (br_mode == BRIDGE_MODE_VEPA)
7518                 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
7519         else
7520                 return -EINVAL;
7521         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7522         return rc;
7523 }
7524
7525 static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
7526 {
7527         struct hwrm_func_cfg_input req = {0};
7528         int rc;
7529
7530         if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
7531                 return 0;
7532
7533         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
7534         req.fid = cpu_to_le16(0xffff);
7535         req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
7536         req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64;
7537         if (size == 128)
7538                 req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
7539
7540         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7541         return rc;
7542 }
7543
7544 static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
7545 {
7546         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
7547         int rc;
7548
7549         if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
7550                 goto skip_rss_ctx;
7551
7552         /* allocate context for vnic */
7553         rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
7554         if (rc) {
7555                 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
7556                            vnic_id, rc);
7557                 goto vnic_setup_err;
7558         }
7559         bp->rsscos_nr_ctxs++;
7560
7561         if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
7562                 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1);
7563                 if (rc) {
7564                         netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
7565                                    vnic_id, rc);
7566                         goto vnic_setup_err;
7567                 }
7568                 bp->rsscos_nr_ctxs++;
7569         }
7570
7571 skip_rss_ctx:
7572         /* configure default vnic, ring grp */
7573         rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
7574         if (rc) {
7575                 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
7576                            vnic_id, rc);
7577                 goto vnic_setup_err;
7578         }
7579
7580         /* Enable RSS hashing on vnic */
7581         rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
7582         if (rc) {
7583                 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
7584                            vnic_id, rc);
7585                 goto vnic_setup_err;
7586         }
7587
7588         if (bp->flags & BNXT_FLAG_AGG_RINGS) {
7589                 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
7590                 if (rc) {
7591                         netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
7592                                    vnic_id, rc);
7593                 }
7594         }
7595
7596 vnic_setup_err:
7597         return rc;
7598 }
7599
7600 static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id)
7601 {
7602         int rc, i, nr_ctxs;
7603
7604         nr_ctxs = DIV_ROUND_UP(bp->rx_nr_rings, 64);
7605         for (i = 0; i < nr_ctxs; i++) {
7606                 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i);
7607                 if (rc) {
7608                         netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n",
7609                                    vnic_id, i, rc);
7610                         break;
7611                 }
7612                 bp->rsscos_nr_ctxs++;
7613         }
7614         if (i < nr_ctxs)
7615                 return -ENOMEM;
7616
7617         rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic_id, true);
7618         if (rc) {
7619                 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
7620                            vnic_id, rc);
7621                 return rc;
7622         }
7623         rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
7624         if (rc) {
7625                 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
7626                            vnic_id, rc);
7627                 return rc;
7628         }
7629         if (bp->flags & BNXT_FLAG_AGG_RINGS) {
7630                 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
7631                 if (rc) {
7632                         netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
7633                                    vnic_id, rc);
7634                 }
7635         }
7636         return rc;
7637 }
7638
7639 static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
7640 {
7641         if (bp->flags & BNXT_FLAG_CHIP_P5)
7642                 return __bnxt_setup_vnic_p5(bp, vnic_id);
7643         else
7644                 return __bnxt_setup_vnic(bp, vnic_id);
7645 }
7646
7647 static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
7648 {
7649 #ifdef CONFIG_RFS_ACCEL
7650         int i, rc = 0;
7651
7652         if (bp->flags & BNXT_FLAG_CHIP_P5)
7653                 return 0;
7654
7655         for (i = 0; i < bp->rx_nr_rings; i++) {
7656                 struct bnxt_vnic_info *vnic;
7657                 u16 vnic_id = i + 1;
7658                 u16 ring_id = i;
7659
7660                 if (vnic_id >= bp->nr_vnics)
7661                         break;
7662
7663                 vnic = &bp->vnic_info[vnic_id];
7664                 vnic->flags |= BNXT_VNIC_RFS_FLAG;
7665                 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
7666                         vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
7667                 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
7668                 if (rc) {
7669                         netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
7670                                    vnic_id, rc);
7671                         break;
7672                 }
7673                 rc = bnxt_setup_vnic(bp, vnic_id);
7674                 if (rc)
7675                         break;
7676         }
7677         return rc;
7678 #else
7679         return 0;
7680 #endif
7681 }
7682
7683 /* Allow PF and VF with default VLAN to be in promiscuous mode */
7684 static bool bnxt_promisc_ok(struct bnxt *bp)
7685 {
7686 #ifdef CONFIG_BNXT_SRIOV
7687         if (BNXT_VF(bp) && !bp->vf.vlan)
7688                 return false;
7689 #endif
7690         return true;
7691 }
7692
7693 static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
7694 {
7695         unsigned int rc = 0;
7696
7697         rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1);
7698         if (rc) {
7699                 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
7700                            rc);
7701                 return rc;
7702         }
7703
7704         rc = bnxt_hwrm_vnic_cfg(bp, 1);
7705         if (rc) {
7706                 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
7707                            rc);
7708                 return rc;
7709         }
7710         return rc;
7711 }
7712
7713 static int bnxt_cfg_rx_mode(struct bnxt *);
7714 static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
7715
7716 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
7717 {
7718         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
7719         int rc = 0;
7720         unsigned int rx_nr_rings = bp->rx_nr_rings;
7721
7722         if (irq_re_init) {
7723                 rc = bnxt_hwrm_stat_ctx_alloc(bp);
7724                 if (rc) {
7725                         netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
7726                                    rc);
7727                         goto err_out;
7728                 }
7729         }
7730
7731         rc = bnxt_hwrm_ring_alloc(bp);
7732         if (rc) {
7733                 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
7734                 goto err_out;
7735         }
7736
7737         rc = bnxt_hwrm_ring_grp_alloc(bp);
7738         if (rc) {
7739                 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
7740                 goto err_out;
7741         }
7742
7743         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
7744                 rx_nr_rings--;
7745
7746         /* default vnic 0 */
7747         rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings);
7748         if (rc) {
7749                 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
7750                 goto err_out;
7751         }
7752
7753         rc = bnxt_setup_vnic(bp, 0);
7754         if (rc)
7755                 goto err_out;
7756
7757         if (bp->flags & BNXT_FLAG_RFS) {
7758                 rc = bnxt_alloc_rfs_vnics(bp);
7759                 if (rc)
7760                         goto err_out;
7761         }
7762
7763         if (bp->flags & BNXT_FLAG_TPA) {
7764                 rc = bnxt_set_tpa(bp, true);
7765                 if (rc)
7766                         goto err_out;
7767         }
7768
7769         if (BNXT_VF(bp))
7770                 bnxt_update_vf_mac(bp);
7771
7772         /* Filter for default vnic 0 */
7773         rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
7774         if (rc) {
7775                 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
7776                 goto err_out;
7777         }
7778         vnic->uc_filter_count = 1;
7779
7780         vnic->rx_mask = 0;
7781         if (bp->dev->flags & IFF_BROADCAST)
7782                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
7783
7784         if ((bp->dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
7785                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
7786
7787         if (bp->dev->flags & IFF_ALLMULTI) {
7788                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
7789                 vnic->mc_list_count = 0;
7790         } else {
7791                 u32 mask = 0;
7792
7793                 bnxt_mc_list_updated(bp, &mask);
7794                 vnic->rx_mask |= mask;
7795         }
7796
7797         rc = bnxt_cfg_rx_mode(bp);
7798         if (rc)
7799                 goto err_out;
7800
7801         rc = bnxt_hwrm_set_coal(bp);
7802         if (rc)
7803                 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
7804                                 rc);
7805
7806         if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
7807                 rc = bnxt_setup_nitroa0_vnic(bp);
7808                 if (rc)
7809                         netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
7810                                    rc);
7811         }
7812
7813         if (BNXT_VF(bp)) {
7814                 bnxt_hwrm_func_qcfg(bp);
7815                 netdev_update_features(bp->dev);
7816         }
7817
7818         return 0;
7819
7820 err_out:
7821         bnxt_hwrm_resource_free(bp, 0, true);
7822
7823         return rc;
7824 }
7825
7826 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
7827 {
7828         bnxt_hwrm_resource_free(bp, 1, irq_re_init);
7829         return 0;
7830 }
7831
7832 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
7833 {
7834         bnxt_init_cp_rings(bp);
7835         bnxt_init_rx_rings(bp);
7836         bnxt_init_tx_rings(bp);
7837         bnxt_init_ring_grps(bp, irq_re_init);
7838         bnxt_init_vnics(bp);
7839
7840         return bnxt_init_chip(bp, irq_re_init);
7841 }
7842
7843 static int bnxt_set_real_num_queues(struct bnxt *bp)
7844 {
7845         int rc;
7846         struct net_device *dev = bp->dev;
7847
7848         rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
7849                                           bp->tx_nr_rings_xdp);
7850         if (rc)
7851                 return rc;
7852
7853         rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
7854         if (rc)
7855                 return rc;
7856
7857 #ifdef CONFIG_RFS_ACCEL
7858         if (bp->flags & BNXT_FLAG_RFS)
7859                 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
7860 #endif
7861
7862         return rc;
7863 }
7864
7865 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
7866                            bool shared)
7867 {
7868         int _rx = *rx, _tx = *tx;
7869
7870         if (shared) {
7871                 *rx = min_t(int, _rx, max);
7872                 *tx = min_t(int, _tx, max);
7873         } else {
7874                 if (max < 2)
7875                         return -ENOMEM;
7876
7877                 while (_rx + _tx > max) {
7878                         if (_rx > _tx && _rx > 1)
7879                                 _rx--;
7880                         else if (_tx > 1)
7881                                 _tx--;
7882                 }
7883                 *rx = _rx;
7884                 *tx = _tx;
7885         }
7886         return 0;
7887 }
7888
7889 static void bnxt_setup_msix(struct bnxt *bp)
7890 {
7891         const int len = sizeof(bp->irq_tbl[0].name);
7892         struct net_device *dev = bp->dev;
7893         int tcs, i;
7894
7895         tcs = netdev_get_num_tc(dev);
7896         if (tcs) {
7897                 int i, off, count;
7898
7899                 for (i = 0; i < tcs; i++) {
7900                         count = bp->tx_nr_rings_per_tc;
7901                         off = i * count;
7902                         netdev_set_tc_queue(dev, i, count, off);
7903                 }
7904         }
7905
7906         for (i = 0; i < bp->cp_nr_rings; i++) {
7907                 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
7908                 char *attr;
7909
7910                 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
7911                         attr = "TxRx";
7912                 else if (i < bp->rx_nr_rings)
7913                         attr = "rx";
7914                 else
7915                         attr = "tx";
7916
7917                 snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
7918                          attr, i);
7919                 bp->irq_tbl[map_idx].handler = bnxt_msix;
7920         }
7921 }
7922
7923 static void bnxt_setup_inta(struct bnxt *bp)
7924 {
7925         const int len = sizeof(bp->irq_tbl[0].name);
7926
7927         if (netdev_get_num_tc(bp->dev))
7928                 netdev_reset_tc(bp->dev);
7929
7930         snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
7931                  0);
7932         bp->irq_tbl[0].handler = bnxt_inta;
7933 }
7934
7935 static int bnxt_setup_int_mode(struct bnxt *bp)
7936 {
7937         int rc;
7938
7939         if (bp->flags & BNXT_FLAG_USING_MSIX)
7940                 bnxt_setup_msix(bp);
7941         else
7942                 bnxt_setup_inta(bp);
7943
7944         rc = bnxt_set_real_num_queues(bp);
7945         return rc;
7946 }
7947
7948 #ifdef CONFIG_RFS_ACCEL
7949 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
7950 {
7951         return bp->hw_resc.max_rsscos_ctxs;
7952 }
7953
7954 static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
7955 {
7956         return bp->hw_resc.max_vnics;
7957 }
7958 #endif
7959
7960 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
7961 {
7962         return bp->hw_resc.max_stat_ctxs;
7963 }
7964
7965 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
7966 {
7967         return bp->hw_resc.max_cp_rings;
7968 }
7969
7970 static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
7971 {
7972         unsigned int cp = bp->hw_resc.max_cp_rings;
7973
7974         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
7975                 cp -= bnxt_get_ulp_msix_num(bp);
7976
7977         return cp;
7978 }
7979
7980 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
7981 {
7982         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7983
7984         if (bp->flags & BNXT_FLAG_CHIP_P5)
7985                 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs);
7986
7987         return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
7988 }
7989
7990 static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
7991 {
7992         bp->hw_resc.max_irqs = max_irqs;
7993 }
7994
7995 unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp)
7996 {
7997         unsigned int cp;
7998
7999         cp = bnxt_get_max_func_cp_rings_for_en(bp);
8000         if (bp->flags & BNXT_FLAG_CHIP_P5)
8001                 return cp - bp->rx_nr_rings - bp->tx_nr_rings;
8002         else
8003                 return cp - bp->cp_nr_rings;
8004 }
8005
8006 unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp)
8007 {
8008         return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp);
8009 }
8010
8011 int bnxt_get_avail_msix(struct bnxt *bp, int num)
8012 {
8013         int max_cp = bnxt_get_max_func_cp_rings(bp);
8014         int max_irq = bnxt_get_max_func_irqs(bp);
8015         int total_req = bp->cp_nr_rings + num;
8016         int max_idx, avail_msix;
8017
8018         max_idx = bp->total_irqs;
8019         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
8020                 max_idx = min_t(int, bp->total_irqs, max_cp);
8021         avail_msix = max_idx - bp->cp_nr_rings;
8022         if (!BNXT_NEW_RM(bp) || avail_msix >= num)
8023                 return avail_msix;
8024
8025         if (max_irq < total_req) {
8026                 num = max_irq - bp->cp_nr_rings;
8027                 if (num <= 0)
8028                         return 0;
8029         }
8030         return num;
8031 }
8032
8033 static int bnxt_get_num_msix(struct bnxt *bp)
8034 {
8035         if (!BNXT_NEW_RM(bp))
8036                 return bnxt_get_max_func_irqs(bp);
8037
8038         return bnxt_nq_rings_in_use(bp);
8039 }
8040
8041 static int bnxt_init_msix(struct bnxt *bp)
8042 {
8043         int i, total_vecs, max, rc = 0, min = 1, ulp_msix;
8044         struct msix_entry *msix_ent;
8045
8046         total_vecs = bnxt_get_num_msix(bp);
8047         max = bnxt_get_max_func_irqs(bp);
8048         if (total_vecs > max)
8049                 total_vecs = max;
8050
8051         if (!total_vecs)
8052                 return 0;
8053
8054         msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
8055         if (!msix_ent)
8056                 return -ENOMEM;
8057
8058         for (i = 0; i < total_vecs; i++) {
8059                 msix_ent[i].entry = i;
8060                 msix_ent[i].vector = 0;
8061         }
8062
8063         if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
8064                 min = 2;
8065
8066         total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
8067         ulp_msix = bnxt_get_ulp_msix_num(bp);
8068         if (total_vecs < 0 || total_vecs < ulp_msix) {
8069                 rc = -ENODEV;
8070                 goto msix_setup_exit;
8071         }
8072
8073         bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
8074         if (bp->irq_tbl) {
8075                 for (i = 0; i < total_vecs; i++)
8076                         bp->irq_tbl[i].vector = msix_ent[i].vector;
8077
8078                 bp->total_irqs = total_vecs;
8079                 /* Trim rings based upon num of vectors allocated */
8080                 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
8081                                      total_vecs - ulp_msix, min == 1);
8082                 if (rc)
8083                         goto msix_setup_exit;
8084
8085                 bp->cp_nr_rings = (min == 1) ?
8086                                   max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
8087                                   bp->tx_nr_rings + bp->rx_nr_rings;
8088
8089         } else {
8090                 rc = -ENOMEM;
8091                 goto msix_setup_exit;
8092         }
8093         bp->flags |= BNXT_FLAG_USING_MSIX;
8094         kfree(msix_ent);
8095         return 0;
8096
8097 msix_setup_exit:
8098         netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc);
8099         kfree(bp->irq_tbl);
8100         bp->irq_tbl = NULL;
8101         pci_disable_msix(bp->pdev);
8102         kfree(msix_ent);
8103         return rc;
8104 }
8105
8106 static int bnxt_init_inta(struct bnxt *bp)
8107 {
8108         bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL);
8109         if (!bp->irq_tbl)
8110                 return -ENOMEM;
8111
8112         bp->total_irqs = 1;
8113         bp->rx_nr_rings = 1;
8114         bp->tx_nr_rings = 1;
8115         bp->cp_nr_rings = 1;
8116         bp->flags |= BNXT_FLAG_SHARED_RINGS;
8117         bp->irq_tbl[0].vector = bp->pdev->irq;
8118         return 0;
8119 }
8120
8121 static int bnxt_init_int_mode(struct bnxt *bp)
8122 {
8123         int rc = 0;
8124
8125         if (bp->flags & BNXT_FLAG_MSIX_CAP)
8126                 rc = bnxt_init_msix(bp);
8127
8128         if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
8129                 /* fallback to INTA */
8130                 rc = bnxt_init_inta(bp);
8131         }
8132         return rc;
8133 }
8134
8135 static void bnxt_clear_int_mode(struct bnxt *bp)
8136 {
8137         if (bp->flags & BNXT_FLAG_USING_MSIX)
8138                 pci_disable_msix(bp->pdev);
8139
8140         kfree(bp->irq_tbl);
8141         bp->irq_tbl = NULL;
8142         bp->flags &= ~BNXT_FLAG_USING_MSIX;
8143 }
8144
8145 int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
8146 {
8147         int tcs = netdev_get_num_tc(bp->dev);
8148         bool irq_cleared = false;
8149         int rc;
8150
8151         if (!bnxt_need_reserve_rings(bp))
8152                 return 0;
8153
8154         if (irq_re_init && BNXT_NEW_RM(bp) &&
8155             bnxt_get_num_msix(bp) != bp->total_irqs) {
8156                 bnxt_ulp_irq_stop(bp);
8157                 bnxt_clear_int_mode(bp);
8158                 irq_cleared = true;
8159         }
8160         rc = __bnxt_reserve_rings(bp);
8161         if (irq_cleared) {
8162                 if (!rc)
8163                         rc = bnxt_init_int_mode(bp);
8164                 bnxt_ulp_irq_restart(bp, rc);
8165         }
8166         if (rc) {
8167                 netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
8168                 return rc;
8169         }
8170         if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) {
8171                 netdev_err(bp->dev, "tx ring reservation failure\n");
8172                 netdev_reset_tc(bp->dev);
8173                 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
8174                 return -ENOMEM;
8175         }
8176         return 0;
8177 }
8178
8179 static void bnxt_free_irq(struct bnxt *bp)
8180 {
8181         struct bnxt_irq *irq;
8182         int i;
8183
8184 #ifdef CONFIG_RFS_ACCEL
8185         free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
8186         bp->dev->rx_cpu_rmap = NULL;
8187 #endif
8188         if (!bp->irq_tbl || !bp->bnapi)
8189                 return;
8190
8191         for (i = 0; i < bp->cp_nr_rings; i++) {
8192                 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
8193
8194                 irq = &bp->irq_tbl[map_idx];
8195                 if (irq->requested) {
8196                         if (irq->have_cpumask) {
8197                                 irq_set_affinity_hint(irq->vector, NULL);
8198                                 free_cpumask_var(irq->cpu_mask);
8199                                 irq->have_cpumask = 0;
8200                         }
8201                         free_irq(irq->vector, bp->bnapi[i]);
8202                 }
8203
8204                 irq->requested = 0;
8205         }
8206 }
8207
8208 static int bnxt_request_irq(struct bnxt *bp)
8209 {
8210         int i, j, rc = 0;
8211         unsigned long flags = 0;
8212 #ifdef CONFIG_RFS_ACCEL
8213         struct cpu_rmap *rmap;
8214 #endif
8215
8216         rc = bnxt_setup_int_mode(bp);
8217         if (rc) {
8218                 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
8219                            rc);
8220                 return rc;
8221         }
8222 #ifdef CONFIG_RFS_ACCEL
8223         rmap = bp->dev->rx_cpu_rmap;
8224 #endif
8225         if (!(bp->flags & BNXT_FLAG_USING_MSIX))
8226                 flags = IRQF_SHARED;
8227
8228         for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
8229                 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
8230                 struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
8231
8232 #ifdef CONFIG_RFS_ACCEL
8233                 if (rmap && bp->bnapi[i]->rx_ring) {
8234                         rc = irq_cpu_rmap_add(rmap, irq->vector);
8235                         if (rc)
8236                                 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
8237                                             j);
8238                         j++;
8239                 }
8240 #endif
8241                 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
8242                                  bp->bnapi[i]);
8243                 if (rc)
8244                         break;
8245
8246                 irq->requested = 1;
8247
8248                 if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
8249                         int numa_node = dev_to_node(&bp->pdev->dev);
8250
8251                         irq->have_cpumask = 1;
8252                         cpumask_set_cpu(cpumask_local_spread(i, numa_node),
8253                                         irq->cpu_mask);
8254                         rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
8255                         if (rc) {
8256                                 netdev_warn(bp->dev,
8257                                             "Set affinity failed, IRQ = %d\n",
8258                                             irq->vector);
8259                                 break;
8260                         }
8261                 }
8262         }
8263         return rc;
8264 }
8265
8266 static void bnxt_del_napi(struct bnxt *bp)
8267 {
8268         int i;
8269
8270         if (!bp->bnapi)
8271                 return;
8272
8273         for (i = 0; i < bp->cp_nr_rings; i++) {
8274                 struct bnxt_napi *bnapi = bp->bnapi[i];
8275
8276                 napi_hash_del(&bnapi->napi);
8277                 netif_napi_del(&bnapi->napi);
8278         }
8279         /* We called napi_hash_del() before netif_napi_del(), we need
8280          * to respect an RCU grace period before freeing napi structures.
8281          */
8282         synchronize_net();
8283 }
8284
8285 static void bnxt_init_napi(struct bnxt *bp)
8286 {
8287         int i;
8288         unsigned int cp_nr_rings = bp->cp_nr_rings;
8289         struct bnxt_napi *bnapi;
8290
8291         if (bp->flags & BNXT_FLAG_USING_MSIX) {
8292                 int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
8293
8294                 if (bp->flags & BNXT_FLAG_CHIP_P5)
8295                         poll_fn = bnxt_poll_p5;
8296                 else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
8297                         cp_nr_rings--;
8298                 for (i = 0; i < cp_nr_rings; i++) {
8299                         bnapi = bp->bnapi[i];
8300                         netif_napi_add(bp->dev, &bnapi->napi, poll_fn, 64);
8301                 }
8302                 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
8303                         bnapi = bp->bnapi[cp_nr_rings];
8304                         netif_napi_add(bp->dev, &bnapi->napi,
8305                                        bnxt_poll_nitroa0, 64);
8306                 }
8307         } else {
8308                 bnapi = bp->bnapi[0];
8309                 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
8310         }
8311 }
8312
8313 static void bnxt_disable_napi(struct bnxt *bp)
8314 {
8315         int i;
8316
8317         if (!bp->bnapi)
8318                 return;
8319
8320         for (i = 0; i < bp->cp_nr_rings; i++) {
8321                 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
8322
8323                 if (bp->bnapi[i]->rx_ring)
8324                         cancel_work_sync(&cpr->dim.work);
8325
8326                 napi_disable(&bp->bnapi[i]->napi);
8327         }
8328 }
8329
8330 static void bnxt_enable_napi(struct bnxt *bp)
8331 {
8332         int i;
8333
8334         for (i = 0; i < bp->cp_nr_rings; i++) {
8335                 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
8336                 bp->bnapi[i]->in_reset = false;
8337
8338                 if (bp->bnapi[i]->rx_ring) {
8339                         INIT_WORK(&cpr->dim.work, bnxt_dim_work);
8340                         cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
8341                 }
8342                 napi_enable(&bp->bnapi[i]->napi);
8343         }
8344 }
8345
8346 void bnxt_tx_disable(struct bnxt *bp)
8347 {
8348         int i;
8349         struct bnxt_tx_ring_info *txr;
8350
8351         if (bp->tx_ring) {
8352                 for (i = 0; i < bp->tx_nr_rings; i++) {
8353                         txr = &bp->tx_ring[i];
8354                         txr->dev_state = BNXT_DEV_STATE_CLOSING;
8355                 }
8356         }
8357         /* Stop all TX queues */
8358         netif_tx_disable(bp->dev);
8359         netif_carrier_off(bp->dev);
8360 }
8361
8362 void bnxt_tx_enable(struct bnxt *bp)
8363 {
8364         int i;
8365         struct bnxt_tx_ring_info *txr;
8366
8367         for (i = 0; i < bp->tx_nr_rings; i++) {
8368                 txr = &bp->tx_ring[i];
8369                 txr->dev_state = 0;
8370         }
8371         netif_tx_wake_all_queues(bp->dev);
8372         if (bp->link_info.link_up)
8373                 netif_carrier_on(bp->dev);
8374 }
8375
8376 static void bnxt_report_link(struct bnxt *bp)
8377 {
8378         if (bp->link_info.link_up) {
8379                 const char *duplex;
8380                 const char *flow_ctrl;
8381                 u32 speed;
8382                 u16 fec;
8383
8384                 netif_carrier_on(bp->dev);
8385                 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
8386                         duplex = "full";
8387                 else
8388                         duplex = "half";
8389                 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
8390                         flow_ctrl = "ON - receive & transmit";
8391                 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
8392                         flow_ctrl = "ON - transmit";
8393                 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
8394                         flow_ctrl = "ON - receive";
8395                 else
8396                         flow_ctrl = "none";
8397                 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
8398                 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s duplex, Flow control: %s\n",
8399                             speed, duplex, flow_ctrl);
8400                 if (bp->flags & BNXT_FLAG_EEE_CAP)
8401                         netdev_info(bp->dev, "EEE is %s\n",
8402                                     bp->eee.eee_active ? "active" :
8403                                                          "not active");
8404                 fec = bp->link_info.fec_cfg;
8405                 if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
8406                         netdev_info(bp->dev, "FEC autoneg %s encodings: %s\n",
8407                                     (fec & BNXT_FEC_AUTONEG) ? "on" : "off",
8408                                     (fec & BNXT_FEC_ENC_BASE_R) ? "BaseR" :
8409                                      (fec & BNXT_FEC_ENC_RS) ? "RS" : "None");
8410         } else {
8411                 netif_carrier_off(bp->dev);
8412                 netdev_err(bp->dev, "NIC Link is Down\n");
8413         }
8414 }
8415
8416 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
8417 {
8418         int rc = 0;
8419         struct hwrm_port_phy_qcaps_input req = {0};
8420         struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
8421         struct bnxt_link_info *link_info = &bp->link_info;
8422
8423         bp->flags &= ~BNXT_FLAG_EEE_CAP;
8424         if (bp->test_info)
8425                 bp->test_info->flags &= ~(BNXT_TEST_FL_EXT_LPBK |
8426                                           BNXT_TEST_FL_AN_PHY_LPBK);
8427         if (bp->hwrm_spec_code < 0x10201)
8428                 return 0;
8429
8430         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1);
8431
8432         mutex_lock(&bp->hwrm_cmd_lock);
8433         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8434         if (rc)
8435                 goto hwrm_phy_qcaps_exit;
8436
8437         if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
8438                 struct ethtool_eee *eee = &bp->eee;
8439                 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
8440
8441                 bp->flags |= BNXT_FLAG_EEE_CAP;
8442                 eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
8443                 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
8444                                  PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
8445                 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
8446                                  PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
8447         }
8448         if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED) {
8449                 if (bp->test_info)
8450                         bp->test_info->flags |= BNXT_TEST_FL_EXT_LPBK;
8451         }
8452         if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_AUTONEG_LPBK_SUPPORTED) {
8453                 if (bp->test_info)
8454                         bp->test_info->flags |= BNXT_TEST_FL_AN_PHY_LPBK;
8455         }
8456         if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_SHARED_PHY_CFG_SUPPORTED) {
8457                 if (BNXT_PF(bp))
8458                         bp->fw_cap |= BNXT_FW_CAP_SHARED_PORT_CFG;
8459         }
8460         if (resp->supported_speeds_auto_mode)
8461                 link_info->support_auto_speeds =
8462                         le16_to_cpu(resp->supported_speeds_auto_mode);
8463
8464         bp->port_count = resp->port_cnt;
8465
8466 hwrm_phy_qcaps_exit:
8467         mutex_unlock(&bp->hwrm_cmd_lock);
8468         return rc;
8469 }
8470
8471 static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
8472 {
8473         int rc = 0;
8474         struct bnxt_link_info *link_info = &bp->link_info;
8475         struct hwrm_port_phy_qcfg_input req = {0};
8476         struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
8477         u8 link_up = link_info->link_up;
8478         u16 diff;
8479
8480         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1);
8481
8482         mutex_lock(&bp->hwrm_cmd_lock);
8483         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8484         if (rc) {
8485                 mutex_unlock(&bp->hwrm_cmd_lock);
8486                 return rc;
8487         }
8488
8489         memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
8490         link_info->phy_link_status = resp->link;
8491         link_info->duplex = resp->duplex_cfg;
8492         if (bp->hwrm_spec_code >= 0x10800)
8493                 link_info->duplex = resp->duplex_state;
8494         link_info->pause = resp->pause;
8495         link_info->auto_mode = resp->auto_mode;
8496         link_info->auto_pause_setting = resp->auto_pause;
8497         link_info->lp_pause = resp->link_partner_adv_pause;
8498         link_info->force_pause_setting = resp->force_pause;
8499         link_info->duplex_setting = resp->duplex_cfg;
8500         if (link_info->phy_link_status == BNXT_LINK_LINK)
8501                 link_info->link_speed = le16_to_cpu(resp->link_speed);
8502         else
8503                 link_info->link_speed = 0;
8504         link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
8505         link_info->support_speeds = le16_to_cpu(resp->support_speeds);
8506         link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
8507         link_info->lp_auto_link_speeds =
8508                 le16_to_cpu(resp->link_partner_adv_speeds);
8509         link_info->preemphasis = le32_to_cpu(resp->preemphasis);
8510         link_info->phy_ver[0] = resp->phy_maj;
8511         link_info->phy_ver[1] = resp->phy_min;
8512         link_info->phy_ver[2] = resp->phy_bld;
8513         link_info->media_type = resp->media_type;
8514         link_info->phy_type = resp->phy_type;
8515         link_info->transceiver = resp->xcvr_pkg_type;
8516         link_info->phy_addr = resp->eee_config_phy_addr &
8517                               PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
8518         link_info->module_status = resp->module_status;
8519
8520         if (bp->flags & BNXT_FLAG_EEE_CAP) {
8521                 struct ethtool_eee *eee = &bp->eee;
8522                 u16 fw_speeds;
8523
8524                 eee->eee_active = 0;
8525                 if (resp->eee_config_phy_addr &
8526                     PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
8527                         eee->eee_active = 1;
8528                         fw_speeds = le16_to_cpu(
8529                                 resp->link_partner_adv_eee_link_speed_mask);
8530                         eee->lp_advertised =
8531                                 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
8532                 }
8533
8534                 /* Pull initial EEE config */
8535                 if (!chng_link_state) {
8536                         if (resp->eee_config_phy_addr &
8537                             PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
8538                                 eee->eee_enabled = 1;
8539
8540                         fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
8541                         eee->advertised =
8542                                 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
8543
8544                         if (resp->eee_config_phy_addr &
8545                             PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
8546                                 __le32 tmr;
8547
8548                                 eee->tx_lpi_enabled = 1;
8549                                 tmr = resp->xcvr_identifier_type_tx_lpi_timer;
8550                                 eee->tx_lpi_timer = le32_to_cpu(tmr) &
8551                                         PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
8552                         }
8553                 }
8554         }
8555
8556         link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
8557         if (bp->hwrm_spec_code >= 0x10504)
8558                 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
8559
8560         /* TODO: need to add more logic to report VF link */
8561         if (chng_link_state) {
8562                 if (link_info->phy_link_status == BNXT_LINK_LINK)
8563                         link_info->link_up = 1;
8564                 else
8565                         link_info->link_up = 0;
8566                 if (link_up != link_info->link_up)
8567                         bnxt_report_link(bp);
8568         } else {
8569                 /* alwasy link down if not require to update link state */
8570                 link_info->link_up = 0;
8571         }
8572         mutex_unlock(&bp->hwrm_cmd_lock);
8573
8574         if (!BNXT_PHY_CFG_ABLE(bp))
8575                 return 0;
8576
8577         diff = link_info->support_auto_speeds ^ link_info->advertising;
8578         if ((link_info->support_auto_speeds | diff) !=
8579             link_info->support_auto_speeds) {
8580                 /* An advertised speed is no longer supported, so we need to
8581                  * update the advertisement settings.  Caller holds RTNL
8582                  * so we can modify link settings.
8583                  */
8584                 link_info->advertising = link_info->support_auto_speeds;
8585                 if (link_info->autoneg & BNXT_AUTONEG_SPEED)
8586                         bnxt_hwrm_set_link_setting(bp, true, false);
8587         }
8588         return 0;
8589 }
8590
8591 static void bnxt_get_port_module_status(struct bnxt *bp)
8592 {
8593         struct bnxt_link_info *link_info = &bp->link_info;
8594         struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
8595         u8 module_status;
8596
8597         if (bnxt_update_link(bp, true))
8598                 return;
8599
8600         module_status = link_info->module_status;
8601         switch (module_status) {
8602         case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
8603         case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
8604         case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
8605                 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
8606                             bp->pf.port_id);
8607                 if (bp->hwrm_spec_code >= 0x10201) {
8608                         netdev_warn(bp->dev, "Module part number %s\n",
8609                                     resp->phy_vendor_partnumber);
8610                 }
8611                 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
8612                         netdev_warn(bp->dev, "TX is disabled\n");
8613                 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
8614                         netdev_warn(bp->dev, "SFP+ module is shutdown\n");
8615         }
8616 }
8617
8618 static void
8619 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
8620 {
8621         if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
8622                 if (bp->hwrm_spec_code >= 0x10201)
8623                         req->auto_pause =
8624                                 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
8625                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
8626                         req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
8627                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
8628                         req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
8629                 req->enables |=
8630                         cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
8631         } else {
8632                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
8633                         req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
8634                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
8635                         req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
8636                 req->enables |=
8637                         cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
8638                 if (bp->hwrm_spec_code >= 0x10201) {
8639                         req->auto_pause = req->force_pause;
8640                         req->enables |= cpu_to_le32(
8641                                 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
8642                 }
8643         }
8644 }
8645
8646 static void bnxt_hwrm_set_link_common(struct bnxt *bp,
8647                                       struct hwrm_port_phy_cfg_input *req)
8648 {
8649         u8 autoneg = bp->link_info.autoneg;
8650         u16 fw_link_speed = bp->link_info.req_link_speed;
8651         u16 advertising = bp->link_info.advertising;
8652
8653         if (autoneg & BNXT_AUTONEG_SPEED) {
8654                 req->auto_mode |=
8655                         PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
8656
8657                 req->enables |= cpu_to_le32(
8658                         PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
8659                 req->auto_link_speed_mask = cpu_to_le16(advertising);
8660
8661                 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
8662                 req->flags |=
8663                         cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
8664         } else {
8665                 req->force_link_speed = cpu_to_le16(fw_link_speed);
8666                 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
8667         }
8668
8669         /* tell chimp that the setting takes effect immediately */
8670         req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
8671 }
8672
8673 int bnxt_hwrm_set_pause(struct bnxt *bp)
8674 {
8675         struct hwrm_port_phy_cfg_input req = {0};
8676         int rc;
8677
8678         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
8679         bnxt_hwrm_set_pause_common(bp, &req);
8680
8681         if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
8682             bp->link_info.force_link_chng)
8683                 bnxt_hwrm_set_link_common(bp, &req);
8684
8685         mutex_lock(&bp->hwrm_cmd_lock);
8686         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8687         if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
8688                 /* since changing of pause setting doesn't trigger any link
8689                  * change event, the driver needs to update the current pause
8690                  * result upon successfully return of the phy_cfg command
8691                  */
8692                 bp->link_info.pause =
8693                 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
8694                 bp->link_info.auto_pause_setting = 0;
8695                 if (!bp->link_info.force_link_chng)
8696                         bnxt_report_link(bp);
8697         }
8698         bp->link_info.force_link_chng = false;
8699         mutex_unlock(&bp->hwrm_cmd_lock);
8700         return rc;
8701 }
8702
8703 static void bnxt_hwrm_set_eee(struct bnxt *bp,
8704                               struct hwrm_port_phy_cfg_input *req)
8705 {
8706         struct ethtool_eee *eee = &bp->eee;
8707
8708         if (eee->eee_enabled) {
8709                 u16 eee_speeds;
8710                 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
8711
8712                 if (eee->tx_lpi_enabled)
8713                         flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
8714                 else
8715                         flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
8716
8717                 req->flags |= cpu_to_le32(flags);
8718                 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
8719                 req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
8720                 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
8721         } else {
8722                 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
8723         }
8724 }
8725
8726 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
8727 {
8728         struct hwrm_port_phy_cfg_input req = {0};
8729
8730         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
8731         if (set_pause)
8732                 bnxt_hwrm_set_pause_common(bp, &req);
8733
8734         bnxt_hwrm_set_link_common(bp, &req);
8735
8736         if (set_eee)
8737                 bnxt_hwrm_set_eee(bp, &req);
8738         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8739 }
8740
8741 static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
8742 {
8743         struct hwrm_port_phy_cfg_input req = {0};
8744
8745         if (!BNXT_SINGLE_PF(bp))
8746                 return 0;
8747
8748         if (pci_num_vf(bp->pdev))
8749                 return 0;
8750
8751         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
8752         req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
8753         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8754 }
8755
8756 static int bnxt_fw_init_one(struct bnxt *bp);
8757
8758 static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
8759 {
8760         struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr;
8761         struct hwrm_func_drv_if_change_input req = {0};
8762         bool resc_reinit = false, fw_reset = false;
8763         u32 flags = 0;
8764         int rc;
8765
8766         if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
8767                 return 0;
8768
8769         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_IF_CHANGE, -1, -1);
8770         if (up)
8771                 req.flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
8772         mutex_lock(&bp->hwrm_cmd_lock);
8773         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8774         if (!rc)
8775                 flags = le32_to_cpu(resp->flags);
8776         mutex_unlock(&bp->hwrm_cmd_lock);
8777         if (rc)
8778                 return rc;
8779
8780         if (!up)
8781                 return 0;
8782
8783         if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)
8784                 resc_reinit = true;
8785         if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE)
8786                 fw_reset = true;
8787
8788         if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) {
8789                 netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n");
8790                 return -ENODEV;
8791         }
8792         if (resc_reinit || fw_reset) {
8793                 if (fw_reset) {
8794                         if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
8795                                 bnxt_ulp_stop(bp);
8796                         bnxt_free_ctx_mem(bp);
8797                         kfree(bp->ctx);
8798                         bp->ctx = NULL;
8799                         rc = bnxt_fw_init_one(bp);
8800                         if (rc) {
8801                                 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
8802                                 return rc;
8803                         }
8804                         bnxt_clear_int_mode(bp);
8805                         rc = bnxt_init_int_mode(bp);
8806                         if (rc) {
8807                                 netdev_err(bp->dev, "init int mode failed\n");
8808                                 return rc;
8809                         }
8810                         set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
8811                 }
8812                 if (BNXT_NEW_RM(bp)) {
8813                         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
8814
8815                         rc = bnxt_hwrm_func_resc_qcaps(bp, true);
8816                         hw_resc->resv_cp_rings = 0;
8817                         hw_resc->resv_stat_ctxs = 0;
8818                         hw_resc->resv_irqs = 0;
8819                         hw_resc->resv_tx_rings = 0;
8820                         hw_resc->resv_rx_rings = 0;
8821                         hw_resc->resv_hw_ring_grps = 0;
8822                         hw_resc->resv_vnics = 0;
8823                         if (!fw_reset) {
8824                                 bp->tx_nr_rings = 0;
8825                                 bp->rx_nr_rings = 0;
8826                         }
8827                 }
8828         }
8829         return 0;
8830 }
8831
8832 static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
8833 {
8834         struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
8835         struct hwrm_port_led_qcaps_input req = {0};
8836         struct bnxt_pf_info *pf = &bp->pf;
8837         int rc;
8838
8839         bp->num_leds = 0;
8840         if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
8841                 return 0;
8842
8843         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_QCAPS, -1, -1);
8844         req.port_id = cpu_to_le16(pf->port_id);
8845         mutex_lock(&bp->hwrm_cmd_lock);
8846         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8847         if (rc) {
8848                 mutex_unlock(&bp->hwrm_cmd_lock);
8849                 return rc;
8850         }
8851         if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
8852                 int i;
8853
8854                 bp->num_leds = resp->num_leds;
8855                 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
8856                                                  bp->num_leds);
8857                 for (i = 0; i < bp->num_leds; i++) {
8858                         struct bnxt_led_info *led = &bp->leds[i];
8859                         __le16 caps = led->led_state_caps;
8860
8861                         if (!led->led_group_id ||
8862                             !BNXT_LED_ALT_BLINK_CAP(caps)) {
8863                                 bp->num_leds = 0;
8864                                 break;
8865                         }
8866                 }
8867         }
8868         mutex_unlock(&bp->hwrm_cmd_lock);
8869         return 0;
8870 }
8871
8872 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
8873 {
8874         struct hwrm_wol_filter_alloc_input req = {0};
8875         struct hwrm_wol_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
8876         int rc;
8877
8878         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_ALLOC, -1, -1);
8879         req.port_id = cpu_to_le16(bp->pf.port_id);
8880         req.wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
8881         req.enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
8882         memcpy(req.mac_address, bp->dev->dev_addr, ETH_ALEN);
8883         mutex_lock(&bp->hwrm_cmd_lock);
8884         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8885         if (!rc)
8886                 bp->wol_filter_id = resp->wol_filter_id;
8887         mutex_unlock(&bp->hwrm_cmd_lock);
8888         return rc;
8889 }
8890
8891 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
8892 {
8893         struct hwrm_wol_filter_free_input req = {0};
8894         int rc;
8895
8896         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_FREE, -1, -1);
8897         req.port_id = cpu_to_le16(bp->pf.port_id);
8898         req.enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
8899         req.wol_filter_id = bp->wol_filter_id;
8900         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8901         return rc;
8902 }
8903
8904 static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
8905 {
8906         struct hwrm_wol_filter_qcfg_input req = {0};
8907         struct hwrm_wol_filter_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
8908         u16 next_handle = 0;
8909         int rc;
8910
8911         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_QCFG, -1, -1);
8912         req.port_id = cpu_to_le16(bp->pf.port_id);
8913         req.handle = cpu_to_le16(handle);
8914         mutex_lock(&bp->hwrm_cmd_lock);
8915         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8916         if (!rc) {
8917                 next_handle = le16_to_cpu(resp->next_handle);
8918                 if (next_handle != 0) {
8919                         if (resp->wol_type ==
8920                             WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) {
8921                                 bp->wol = 1;
8922                                 bp->wol_filter_id = resp->wol_filter_id;
8923                         }
8924                 }
8925         }
8926         mutex_unlock(&bp->hwrm_cmd_lock);
8927         return next_handle;
8928 }
8929
8930 static void bnxt_get_wol_settings(struct bnxt *bp)
8931 {
8932         u16 handle = 0;
8933
8934         bp->wol = 0;
8935         if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
8936                 return;
8937
8938         do {
8939                 handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
8940         } while (handle && handle != 0xffff);
8941 }
8942
8943 #ifdef CONFIG_BNXT_HWMON
8944 static ssize_t bnxt_show_temp(struct device *dev,
8945                               struct device_attribute *devattr, char *buf)
8946 {
8947         struct hwrm_temp_monitor_query_input req = {0};
8948         struct hwrm_temp_monitor_query_output *resp;
8949         struct bnxt *bp = dev_get_drvdata(dev);
8950         u32 temp = 0;
8951
8952         resp = bp->hwrm_cmd_resp_addr;
8953         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1);
8954         mutex_lock(&bp->hwrm_cmd_lock);
8955         if (!_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT))
8956                 temp = resp->temp * 1000; /* display millidegree */
8957         mutex_unlock(&bp->hwrm_cmd_lock);
8958
8959         return sprintf(buf, "%u\n", temp);
8960 }
8961 static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0);
8962
8963 static struct attribute *bnxt_attrs[] = {
8964         &sensor_dev_attr_temp1_input.dev_attr.attr,
8965         NULL
8966 };
8967 ATTRIBUTE_GROUPS(bnxt);
8968
8969 static void bnxt_hwmon_close(struct bnxt *bp)
8970 {
8971         if (bp->hwmon_dev) {
8972                 hwmon_device_unregister(bp->hwmon_dev);
8973                 bp->hwmon_dev = NULL;
8974         }
8975 }
8976
8977 static void bnxt_hwmon_open(struct bnxt *bp)
8978 {
8979         struct pci_dev *pdev = bp->pdev;
8980
8981         if (bp->hwmon_dev)
8982                 return;
8983
8984         bp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
8985                                                           DRV_MODULE_NAME, bp,
8986                                                           bnxt_groups);
8987         if (IS_ERR(bp->hwmon_dev)) {
8988                 bp->hwmon_dev = NULL;
8989                 dev_warn(&pdev->dev, "Cannot register hwmon device\n");
8990         }
8991 }
8992 #else
8993 static void bnxt_hwmon_close(struct bnxt *bp)
8994 {
8995 }
8996
8997 static void bnxt_hwmon_open(struct bnxt *bp)
8998 {
8999 }
9000 #endif
9001
9002 static bool bnxt_eee_config_ok(struct bnxt *bp)
9003 {
9004         struct ethtool_eee *eee = &bp->eee;
9005         struct bnxt_link_info *link_info = &bp->link_info;
9006
9007         if (!(bp->flags & BNXT_FLAG_EEE_CAP))
9008                 return true;
9009
9010         if (eee->eee_enabled) {
9011                 u32 advertising =
9012                         _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
9013
9014                 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
9015                         eee->eee_enabled = 0;
9016                         return false;
9017                 }
9018                 if (eee->advertised & ~advertising) {
9019                         eee->advertised = advertising & eee->supported;
9020                         return false;
9021                 }
9022         }
9023         return true;
9024 }
9025
9026 static int bnxt_update_phy_setting(struct bnxt *bp)
9027 {
9028         int rc;
9029         bool update_link = false;
9030         bool update_pause = false;
9031         bool update_eee = false;
9032         struct bnxt_link_info *link_info = &bp->link_info;
9033
9034         rc = bnxt_update_link(bp, true);
9035         if (rc) {
9036                 netdev_err(bp->dev, "failed to update link (rc: %x)\n",
9037                            rc);
9038                 return rc;
9039         }
9040         if (!BNXT_SINGLE_PF(bp))
9041                 return 0;
9042
9043         if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
9044             (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
9045             link_info->req_flow_ctrl)
9046                 update_pause = true;
9047         if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
9048             link_info->force_pause_setting != link_info->req_flow_ctrl)
9049                 update_pause = true;
9050         if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
9051                 if (BNXT_AUTO_MODE(link_info->auto_mode))
9052                         update_link = true;
9053                 if (link_info->req_link_speed != link_info->force_link_speed)
9054                         update_link = true;
9055                 if (link_info->req_duplex != link_info->duplex_setting)
9056                         update_link = true;
9057         } else {
9058                 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
9059                         update_link = true;
9060                 if (link_info->advertising != link_info->auto_link_speeds)
9061                         update_link = true;
9062         }
9063
9064         /* The last close may have shutdown the link, so need to call
9065          * PHY_CFG to bring it back up.
9066          */
9067         if (!bp->link_info.link_up)
9068                 update_link = true;
9069
9070         if (!bnxt_eee_config_ok(bp))
9071                 update_eee = true;
9072
9073         if (update_link)
9074                 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
9075         else if (update_pause)
9076                 rc = bnxt_hwrm_set_pause(bp);
9077         if (rc) {
9078                 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
9079                            rc);
9080                 return rc;
9081         }
9082
9083         return rc;
9084 }
9085
9086 /* Common routine to pre-map certain register block to different GRC window.
9087  * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
9088  * in PF and 3 windows in VF that can be customized to map in different
9089  * register blocks.
9090  */
9091 static void bnxt_preset_reg_win(struct bnxt *bp)
9092 {
9093         if (BNXT_PF(bp)) {
9094                 /* CAG registers map to GRC window #4 */
9095                 writel(BNXT_CAG_REG_BASE,
9096                        bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
9097         }
9098 }
9099
9100 static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
9101
9102 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
9103 {
9104         int rc = 0;
9105
9106         bnxt_preset_reg_win(bp);
9107         netif_carrier_off(bp->dev);
9108         if (irq_re_init) {
9109                 /* Reserve rings now if none were reserved at driver probe. */
9110                 rc = bnxt_init_dflt_ring_mode(bp);
9111                 if (rc) {
9112                         netdev_err(bp->dev, "Failed to reserve default rings at open\n");
9113                         return rc;
9114                 }
9115         }
9116         rc = bnxt_reserve_rings(bp, irq_re_init);
9117         if (rc)
9118                 return rc;
9119         if ((bp->flags & BNXT_FLAG_RFS) &&
9120             !(bp->flags & BNXT_FLAG_USING_MSIX)) {
9121                 /* disable RFS if falling back to INTA */
9122                 bp->dev->hw_features &= ~NETIF_F_NTUPLE;
9123                 bp->flags &= ~BNXT_FLAG_RFS;
9124         }
9125
9126         rc = bnxt_alloc_mem(bp, irq_re_init);
9127         if (rc) {
9128                 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
9129                 goto open_err_free_mem;
9130         }
9131
9132         if (irq_re_init) {
9133                 bnxt_init_napi(bp);
9134                 rc = bnxt_request_irq(bp);
9135                 if (rc) {
9136                         netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
9137                         goto open_err_irq;
9138                 }
9139         }
9140
9141         bnxt_enable_napi(bp);
9142         bnxt_debug_dev_init(bp);
9143
9144         rc = bnxt_init_nic(bp, irq_re_init);
9145         if (rc) {
9146                 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
9147                 goto open_err;
9148         }
9149
9150         if (link_re_init) {
9151                 mutex_lock(&bp->link_lock);
9152                 rc = bnxt_update_phy_setting(bp);
9153                 mutex_unlock(&bp->link_lock);
9154                 if (rc) {
9155                         netdev_warn(bp->dev, "failed to update phy settings\n");
9156                         if (BNXT_SINGLE_PF(bp)) {
9157                                 bp->link_info.phy_retry = true;
9158                                 bp->link_info.phy_retry_expires =
9159                                         jiffies + 5 * HZ;
9160                         }
9161                 }
9162         }
9163
9164         if (irq_re_init)
9165                 udp_tunnel_get_rx_info(bp->dev);
9166
9167         set_bit(BNXT_STATE_OPEN, &bp->state);
9168         bnxt_enable_int(bp);
9169         /* Enable TX queues */
9170         bnxt_tx_enable(bp);
9171         mod_timer(&bp->timer, jiffies + bp->current_interval);
9172         /* Poll link status and check for SFP+ module status */
9173         bnxt_get_port_module_status(bp);
9174
9175         /* VF-reps may need to be re-opened after the PF is re-opened */
9176         if (BNXT_PF(bp))
9177                 bnxt_vf_reps_open(bp);
9178         return 0;
9179
9180 open_err:
9181         bnxt_debug_dev_exit(bp);
9182         bnxt_disable_napi(bp);
9183
9184 open_err_irq:
9185         bnxt_del_napi(bp);
9186
9187 open_err_free_mem:
9188         bnxt_free_skbs(bp);
9189         bnxt_free_irq(bp);
9190         bnxt_free_mem(bp, true);
9191         return rc;
9192 }
9193
9194 /* rtnl_lock held */
9195 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
9196 {
9197         int rc = 0;
9198
9199         rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
9200         if (rc) {
9201                 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
9202                 dev_close(bp->dev);
9203         }
9204         return rc;
9205 }
9206
9207 /* rtnl_lock held, open the NIC half way by allocating all resources, but
9208  * NAPI, IRQ, and TX are not enabled.  This is mainly used for offline
9209  * self tests.
9210  */
9211 int bnxt_half_open_nic(struct bnxt *bp)
9212 {
9213         int rc = 0;
9214
9215         rc = bnxt_alloc_mem(bp, false);
9216         if (rc) {
9217                 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
9218                 goto half_open_err;
9219         }
9220         rc = bnxt_init_nic(bp, false);
9221         if (rc) {
9222                 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
9223                 goto half_open_err;
9224         }
9225         return 0;
9226
9227 half_open_err:
9228         bnxt_free_skbs(bp);
9229         bnxt_free_mem(bp, false);
9230         dev_close(bp->dev);
9231         return rc;
9232 }
9233
9234 /* rtnl_lock held, this call can only be made after a previous successful
9235  * call to bnxt_half_open_nic().
9236  */
9237 void bnxt_half_close_nic(struct bnxt *bp)
9238 {
9239         bnxt_hwrm_resource_free(bp, false, false);
9240         bnxt_free_skbs(bp);
9241         bnxt_free_mem(bp, false);
9242 }
9243
9244 static void bnxt_reenable_sriov(struct bnxt *bp)
9245 {
9246         if (BNXT_PF(bp)) {
9247                 struct bnxt_pf_info *pf = &bp->pf;
9248                 int n = pf->active_vfs;
9249
9250                 if (n)
9251                         bnxt_cfg_hw_sriov(bp, &n, true);
9252         }
9253 }
9254
9255 static int bnxt_open(struct net_device *dev)
9256 {
9257         struct bnxt *bp = netdev_priv(dev);
9258         int rc;
9259
9260         if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
9261                 netdev_err(bp->dev, "A previous firmware reset did not complete, aborting\n");
9262                 return -ENODEV;
9263         }
9264
9265         rc = bnxt_hwrm_if_change(bp, true);
9266         if (rc)
9267                 return rc;
9268         rc = __bnxt_open_nic(bp, true, true);
9269         if (rc) {
9270                 bnxt_hwrm_if_change(bp, false);
9271         } else {
9272                 if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
9273                         if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
9274                                 bnxt_ulp_start(bp, 0);
9275                                 bnxt_reenable_sriov(bp);
9276                         }
9277                 }
9278                 bnxt_hwmon_open(bp);
9279         }
9280
9281         return rc;
9282 }
9283
9284 static bool bnxt_drv_busy(struct bnxt *bp)
9285 {
9286         return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) ||
9287                 test_bit(BNXT_STATE_READ_STATS, &bp->state));
9288 }
9289
9290 static void bnxt_get_ring_stats(struct bnxt *bp,
9291                                 struct rtnl_link_stats64 *stats);
9292
9293 static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
9294                              bool link_re_init)
9295 {
9296         /* Close the VF-reps before closing PF */
9297         if (BNXT_PF(bp))
9298                 bnxt_vf_reps_close(bp);
9299
9300         /* Change device state to avoid TX queue wake up's */
9301         bnxt_tx_disable(bp);
9302
9303         clear_bit(BNXT_STATE_OPEN, &bp->state);
9304         smp_mb__after_atomic();
9305         while (bnxt_drv_busy(bp))
9306                 msleep(20);
9307
9308         /* Flush rings and and disable interrupts */
9309         bnxt_shutdown_nic(bp, irq_re_init);
9310
9311         /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
9312
9313         bnxt_debug_dev_exit(bp);
9314         bnxt_disable_napi(bp);
9315         del_timer_sync(&bp->timer);
9316         bnxt_free_skbs(bp);
9317
9318         /* Save ring stats before shutdown */
9319         if (bp->bnapi)
9320                 bnxt_get_ring_stats(bp, &bp->net_stats_prev);
9321         if (irq_re_init) {
9322                 bnxt_free_irq(bp);
9323                 bnxt_del_napi(bp);
9324         }
9325         bnxt_free_mem(bp, irq_re_init);
9326 }
9327
9328 int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
9329 {
9330         int rc = 0;
9331
9332         if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
9333                 /* If we get here, it means firmware reset is in progress
9334                  * while we are trying to close.  We can safely proceed with
9335                  * the close because we are holding rtnl_lock().  Some firmware
9336                  * messages may fail as we proceed to close.  We set the
9337                  * ABORT_ERR flag here so that the FW reset thread will later
9338                  * abort when it gets the rtnl_lock() and sees the flag.
9339                  */
9340                 netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n");
9341                 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
9342         }
9343
9344 #ifdef CONFIG_BNXT_SRIOV
9345         if (bp->sriov_cfg) {
9346                 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
9347                                                       !bp->sriov_cfg,
9348                                                       BNXT_SRIOV_CFG_WAIT_TMO);
9349                 if (rc)
9350                         netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
9351         }
9352 #endif
9353         __bnxt_close_nic(bp, irq_re_init, link_re_init);
9354         return rc;
9355 }
9356
9357 static int bnxt_close(struct net_device *dev)
9358 {
9359         struct bnxt *bp = netdev_priv(dev);
9360
9361         bnxt_hwmon_close(bp);
9362         bnxt_close_nic(bp, true, true);
9363         bnxt_hwrm_shutdown_link(bp);
9364         bnxt_hwrm_if_change(bp, false);
9365         return 0;
9366 }
9367
9368 static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg,
9369                                    u16 *val)
9370 {
9371         struct hwrm_port_phy_mdio_read_output *resp = bp->hwrm_cmd_resp_addr;
9372         struct hwrm_port_phy_mdio_read_input req = {0};
9373         int rc;
9374
9375         if (bp->hwrm_spec_code < 0x10a00)
9376                 return -EOPNOTSUPP;
9377
9378         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_READ, -1, -1);
9379         req.port_id = cpu_to_le16(bp->pf.port_id);
9380         req.phy_addr = phy_addr;
9381         req.reg_addr = cpu_to_le16(reg & 0x1f);
9382         if (mdio_phy_id_is_c45(phy_addr)) {
9383                 req.cl45_mdio = 1;
9384                 req.phy_addr = mdio_phy_id_prtad(phy_addr);
9385                 req.dev_addr = mdio_phy_id_devad(phy_addr);
9386                 req.reg_addr = cpu_to_le16(reg);
9387         }
9388
9389         mutex_lock(&bp->hwrm_cmd_lock);
9390         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9391         if (!rc)
9392                 *val = le16_to_cpu(resp->reg_data);
9393         mutex_unlock(&bp->hwrm_cmd_lock);
9394         return rc;
9395 }
9396
9397 static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg,
9398                                     u16 val)
9399 {
9400         struct hwrm_port_phy_mdio_write_input req = {0};
9401
9402         if (bp->hwrm_spec_code < 0x10a00)
9403                 return -EOPNOTSUPP;
9404
9405         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_WRITE, -1, -1);
9406         req.port_id = cpu_to_le16(bp->pf.port_id);
9407         req.phy_addr = phy_addr;
9408         req.reg_addr = cpu_to_le16(reg & 0x1f);
9409         if (mdio_phy_id_is_c45(phy_addr)) {
9410                 req.cl45_mdio = 1;
9411                 req.phy_addr = mdio_phy_id_prtad(phy_addr);
9412                 req.dev_addr = mdio_phy_id_devad(phy_addr);
9413                 req.reg_addr = cpu_to_le16(reg);
9414         }
9415         req.reg_data = cpu_to_le16(val);
9416
9417         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9418 }
9419
9420 /* rtnl_lock held */
9421 static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9422 {
9423         struct mii_ioctl_data *mdio = if_mii(ifr);
9424         struct bnxt *bp = netdev_priv(dev);
9425         int rc;
9426
9427         switch (cmd) {
9428         case SIOCGMIIPHY:
9429                 mdio->phy_id = bp->link_info.phy_addr;
9430
9431                 /* fallthru */
9432         case SIOCGMIIREG: {
9433                 u16 mii_regval = 0;
9434
9435                 if (!netif_running(dev))
9436                         return -EAGAIN;
9437
9438                 rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num,
9439                                              &mii_regval);
9440                 mdio->val_out = mii_regval;
9441                 return rc;
9442         }
9443
9444         case SIOCSMIIREG:
9445                 if (!netif_running(dev))
9446                         return -EAGAIN;
9447
9448                 return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num,
9449                                                 mdio->val_in);
9450
9451         default:
9452                 /* do nothing */
9453                 break;
9454         }
9455         return -EOPNOTSUPP;
9456 }
9457
9458 static void bnxt_get_ring_stats(struct bnxt *bp,
9459                                 struct rtnl_link_stats64 *stats)
9460 {
9461         int i;
9462
9463
9464         for (i = 0; i < bp->cp_nr_rings; i++) {
9465                 struct bnxt_napi *bnapi = bp->bnapi[i];
9466                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
9467                 struct ctx_hw_stats *hw_stats = cpr->hw_stats;
9468
9469                 stats->rx_packets += le64_to_cpu(hw_stats->rx_ucast_pkts);
9470                 stats->rx_packets += le64_to_cpu(hw_stats->rx_mcast_pkts);
9471                 stats->rx_packets += le64_to_cpu(hw_stats->rx_bcast_pkts);
9472
9473                 stats->tx_packets += le64_to_cpu(hw_stats->tx_ucast_pkts);
9474                 stats->tx_packets += le64_to_cpu(hw_stats->tx_mcast_pkts);
9475                 stats->tx_packets += le64_to_cpu(hw_stats->tx_bcast_pkts);
9476
9477                 stats->rx_bytes += le64_to_cpu(hw_stats->rx_ucast_bytes);
9478                 stats->rx_bytes += le64_to_cpu(hw_stats->rx_mcast_bytes);
9479                 stats->rx_bytes += le64_to_cpu(hw_stats->rx_bcast_bytes);
9480
9481                 stats->tx_bytes += le64_to_cpu(hw_stats->tx_ucast_bytes);
9482                 stats->tx_bytes += le64_to_cpu(hw_stats->tx_mcast_bytes);
9483                 stats->tx_bytes += le64_to_cpu(hw_stats->tx_bcast_bytes);
9484
9485                 stats->rx_missed_errors +=
9486                         le64_to_cpu(hw_stats->rx_discard_pkts);
9487
9488                 stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts);
9489
9490                 stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts);
9491         }
9492 }
9493
9494 static void bnxt_add_prev_stats(struct bnxt *bp,
9495                                 struct rtnl_link_stats64 *stats)
9496 {
9497         struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev;
9498
9499         stats->rx_packets += prev_stats->rx_packets;
9500         stats->tx_packets += prev_stats->tx_packets;
9501         stats->rx_bytes += prev_stats->rx_bytes;
9502         stats->tx_bytes += prev_stats->tx_bytes;
9503         stats->rx_missed_errors += prev_stats->rx_missed_errors;
9504         stats->multicast += prev_stats->multicast;
9505         stats->tx_dropped += prev_stats->tx_dropped;
9506 }
9507
9508 static void
9509 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
9510 {
9511         struct bnxt *bp = netdev_priv(dev);
9512
9513         set_bit(BNXT_STATE_READ_STATS, &bp->state);
9514         /* Make sure bnxt_close_nic() sees that we are reading stats before
9515          * we check the BNXT_STATE_OPEN flag.
9516          */
9517         smp_mb__after_atomic();
9518         if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
9519                 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
9520                 *stats = bp->net_stats_prev;
9521                 return;
9522         }
9523
9524         bnxt_get_ring_stats(bp, stats);
9525         bnxt_add_prev_stats(bp, stats);
9526
9527         if (bp->flags & BNXT_FLAG_PORT_STATS) {
9528                 struct rx_port_stats *rx = bp->hw_rx_port_stats;
9529                 struct tx_port_stats *tx = bp->hw_tx_port_stats;
9530
9531                 stats->rx_crc_errors = le64_to_cpu(rx->rx_fcs_err_frames);
9532                 stats->rx_frame_errors = le64_to_cpu(rx->rx_align_err_frames);
9533                 stats->rx_length_errors = le64_to_cpu(rx->rx_undrsz_frames) +
9534                                           le64_to_cpu(rx->rx_ovrsz_frames) +
9535                                           le64_to_cpu(rx->rx_runt_frames);
9536                 stats->rx_errors = le64_to_cpu(rx->rx_false_carrier_frames) +
9537                                    le64_to_cpu(rx->rx_jbr_frames);
9538                 stats->collisions = le64_to_cpu(tx->tx_total_collisions);
9539                 stats->tx_fifo_errors = le64_to_cpu(tx->tx_fifo_underruns);
9540                 stats->tx_errors = le64_to_cpu(tx->tx_err);
9541         }
9542         clear_bit(BNXT_STATE_READ_STATS, &bp->state);
9543 }
9544
9545 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
9546 {
9547         struct net_device *dev = bp->dev;
9548         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
9549         struct netdev_hw_addr *ha;
9550         u8 *haddr;
9551         int mc_count = 0;
9552         bool update = false;
9553         int off = 0;
9554
9555         netdev_for_each_mc_addr(ha, dev) {
9556                 if (mc_count >= BNXT_MAX_MC_ADDRS) {
9557                         *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
9558                         vnic->mc_list_count = 0;
9559                         return false;
9560                 }
9561                 haddr = ha->addr;
9562                 if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
9563                         memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
9564                         update = true;
9565                 }
9566                 off += ETH_ALEN;
9567                 mc_count++;
9568         }
9569         if (mc_count)
9570                 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
9571
9572         if (mc_count != vnic->mc_list_count) {
9573                 vnic->mc_list_count = mc_count;
9574                 update = true;
9575         }
9576         return update;
9577 }
9578
9579 static bool bnxt_uc_list_updated(struct bnxt *bp)
9580 {
9581         struct net_device *dev = bp->dev;
9582         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
9583         struct netdev_hw_addr *ha;
9584         int off = 0;
9585
9586         if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
9587                 return true;
9588
9589         netdev_for_each_uc_addr(ha, dev) {
9590                 if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
9591                         return true;
9592
9593                 off += ETH_ALEN;
9594         }
9595         return false;
9596 }
9597
9598 static void bnxt_set_rx_mode(struct net_device *dev)
9599 {
9600         struct bnxt *bp = netdev_priv(dev);
9601         struct bnxt_vnic_info *vnic;
9602         bool mc_update = false;
9603         bool uc_update;
9604         u32 mask;
9605
9606         if (!test_bit(BNXT_STATE_OPEN, &bp->state))
9607                 return;
9608
9609         vnic = &bp->vnic_info[0];
9610         mask = vnic->rx_mask;
9611         mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
9612                   CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
9613                   CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST |
9614                   CFA_L2_SET_RX_MASK_REQ_MASK_BCAST);
9615
9616         if ((dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
9617                 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
9618
9619         uc_update = bnxt_uc_list_updated(bp);
9620
9621         if (dev->flags & IFF_BROADCAST)
9622                 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
9623         if (dev->flags & IFF_ALLMULTI) {
9624                 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
9625                 vnic->mc_list_count = 0;
9626         } else {
9627                 mc_update = bnxt_mc_list_updated(bp, &mask);
9628         }
9629
9630         if (mask != vnic->rx_mask || uc_update || mc_update) {
9631                 vnic->rx_mask = mask;
9632
9633                 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
9634                 bnxt_queue_sp_work(bp);
9635         }
9636 }
9637
9638 static int bnxt_cfg_rx_mode(struct bnxt *bp)
9639 {
9640         struct net_device *dev = bp->dev;
9641         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
9642         struct netdev_hw_addr *ha;
9643         int i, off = 0, rc;
9644         bool uc_update;
9645
9646         netif_addr_lock_bh(dev);
9647         uc_update = bnxt_uc_list_updated(bp);
9648         netif_addr_unlock_bh(dev);
9649
9650         if (!uc_update)
9651                 goto skip_uc;
9652
9653         mutex_lock(&bp->hwrm_cmd_lock);
9654         for (i = 1; i < vnic->uc_filter_count; i++) {
9655                 struct hwrm_cfa_l2_filter_free_input req = {0};
9656
9657                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1,
9658                                        -1);
9659
9660                 req.l2_filter_id = vnic->fw_l2_filter_id[i];
9661
9662                 rc = _hwrm_send_message(bp, &req, sizeof(req),
9663                                         HWRM_CMD_TIMEOUT);
9664         }
9665         mutex_unlock(&bp->hwrm_cmd_lock);
9666
9667         vnic->uc_filter_count = 1;
9668
9669         netif_addr_lock_bh(dev);
9670         if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
9671                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
9672         } else {
9673                 netdev_for_each_uc_addr(ha, dev) {
9674                         memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
9675                         off += ETH_ALEN;
9676                         vnic->uc_filter_count++;
9677                 }
9678         }
9679         netif_addr_unlock_bh(dev);
9680
9681         for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
9682                 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
9683                 if (rc) {
9684                         netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n",
9685                                    rc);
9686                         vnic->uc_filter_count = i;
9687                         return rc;
9688                 }
9689         }
9690
9691 skip_uc:
9692         rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
9693         if (rc && vnic->mc_list_count) {
9694                 netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
9695                             rc);
9696                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
9697                 vnic->mc_list_count = 0;
9698                 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
9699         }
9700         if (rc)
9701                 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
9702                            rc);
9703
9704         return rc;
9705 }
9706
9707 static bool bnxt_can_reserve_rings(struct bnxt *bp)
9708 {
9709 #ifdef CONFIG_BNXT_SRIOV
9710         if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) {
9711                 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
9712
9713                 /* No minimum rings were provisioned by the PF.  Don't
9714                  * reserve rings by default when device is down.
9715                  */
9716                 if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings)
9717                         return true;
9718
9719                 if (!netif_running(bp->dev))
9720                         return false;
9721         }
9722 #endif
9723         return true;
9724 }
9725
9726 /* If the chip and firmware supports RFS */
9727 static bool bnxt_rfs_supported(struct bnxt *bp)
9728 {
9729         if (bp->flags & BNXT_FLAG_CHIP_P5) {
9730                 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2)
9731                         return true;
9732                 return false;
9733         }
9734         if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
9735                 return true;
9736         if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
9737                 return true;
9738         return false;
9739 }
9740
9741 /* If runtime conditions support RFS */
9742 static bool bnxt_rfs_capable(struct bnxt *bp)
9743 {
9744 #ifdef CONFIG_RFS_ACCEL
9745         int vnics, max_vnics, max_rss_ctxs;
9746
9747         if (bp->flags & BNXT_FLAG_CHIP_P5)
9748                 return bnxt_rfs_supported(bp);
9749         if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp))
9750                 return false;
9751
9752         vnics = 1 + bp->rx_nr_rings;
9753         max_vnics = bnxt_get_max_func_vnics(bp);
9754         max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
9755
9756         /* RSS contexts not a limiting factor */
9757         if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
9758                 max_rss_ctxs = max_vnics;
9759         if (vnics > max_vnics || vnics > max_rss_ctxs) {
9760                 if (bp->rx_nr_rings > 1)
9761                         netdev_warn(bp->dev,
9762                                     "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
9763                                     min(max_rss_ctxs - 1, max_vnics - 1));
9764                 return false;
9765         }
9766
9767         if (!BNXT_NEW_RM(bp))
9768                 return true;
9769
9770         if (vnics == bp->hw_resc.resv_vnics)
9771                 return true;
9772
9773         bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, vnics);
9774         if (vnics <= bp->hw_resc.resv_vnics)
9775                 return true;
9776
9777         netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
9778         bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, 1);
9779         return false;
9780 #else
9781         return false;
9782 #endif
9783 }
9784
9785 static netdev_features_t bnxt_fix_features(struct net_device *dev,
9786                                            netdev_features_t features)
9787 {
9788         struct bnxt *bp = netdev_priv(dev);
9789
9790         if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
9791                 features &= ~NETIF_F_NTUPLE;
9792
9793         if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
9794                 features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
9795
9796         if (!(features & NETIF_F_GRO))
9797                 features &= ~NETIF_F_GRO_HW;
9798
9799         if (features & NETIF_F_GRO_HW)
9800                 features &= ~NETIF_F_LRO;
9801
9802         /* Both CTAG and STAG VLAN accelaration on the RX side have to be
9803          * turned on or off together.
9804          */
9805         if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) !=
9806             (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) {
9807                 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
9808                         features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
9809                                       NETIF_F_HW_VLAN_STAG_RX);
9810                 else
9811                         features |= NETIF_F_HW_VLAN_CTAG_RX |
9812                                     NETIF_F_HW_VLAN_STAG_RX;
9813         }
9814 #ifdef CONFIG_BNXT_SRIOV
9815         if (BNXT_VF(bp)) {
9816                 if (bp->vf.vlan) {
9817                         features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
9818                                       NETIF_F_HW_VLAN_STAG_RX);
9819                 }
9820         }
9821 #endif
9822         return features;
9823 }
9824
9825 static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
9826 {
9827         struct bnxt *bp = netdev_priv(dev);
9828         u32 flags = bp->flags;
9829         u32 changes;
9830         int rc = 0;
9831         bool re_init = false;
9832         bool update_tpa = false;
9833
9834         flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
9835         if (features & NETIF_F_GRO_HW)
9836                 flags |= BNXT_FLAG_GRO;
9837         else if (features & NETIF_F_LRO)
9838                 flags |= BNXT_FLAG_LRO;
9839
9840         if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
9841                 flags &= ~BNXT_FLAG_TPA;
9842
9843         if (features & NETIF_F_HW_VLAN_CTAG_RX)
9844                 flags |= BNXT_FLAG_STRIP_VLAN;
9845
9846         if (features & NETIF_F_NTUPLE)
9847                 flags |= BNXT_FLAG_RFS;
9848
9849         changes = flags ^ bp->flags;
9850         if (changes & BNXT_FLAG_TPA) {
9851                 update_tpa = true;
9852                 if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
9853                     (flags & BNXT_FLAG_TPA) == 0 ||
9854                     (bp->flags & BNXT_FLAG_CHIP_P5))
9855                         re_init = true;
9856         }
9857
9858         if (changes & ~BNXT_FLAG_TPA)
9859                 re_init = true;
9860
9861         if (flags != bp->flags) {
9862                 u32 old_flags = bp->flags;
9863
9864                 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
9865                         bp->flags = flags;
9866                         if (update_tpa)
9867                                 bnxt_set_ring_params(bp);
9868                         return rc;
9869                 }
9870
9871                 if (re_init) {
9872                         bnxt_close_nic(bp, false, false);
9873                         bp->flags = flags;
9874                         if (update_tpa)
9875                                 bnxt_set_ring_params(bp);
9876
9877                         return bnxt_open_nic(bp, false, false);
9878                 }
9879                 if (update_tpa) {
9880                         bp->flags = flags;
9881                         rc = bnxt_set_tpa(bp,
9882                                           (flags & BNXT_FLAG_TPA) ?
9883                                           true : false);
9884                         if (rc)
9885                                 bp->flags = old_flags;
9886                 }
9887         }
9888         return rc;
9889 }
9890
9891 static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
9892                                        u32 ring_id, u32 *prod, u32 *cons)
9893 {
9894         struct hwrm_dbg_ring_info_get_output *resp = bp->hwrm_cmd_resp_addr;
9895         struct hwrm_dbg_ring_info_get_input req = {0};
9896         int rc;
9897
9898         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_RING_INFO_GET, -1, -1);
9899         req.ring_type = ring_type;
9900         req.fw_ring_id = cpu_to_le32(ring_id);
9901         mutex_lock(&bp->hwrm_cmd_lock);
9902         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9903         if (!rc) {
9904                 *prod = le32_to_cpu(resp->producer_index);
9905                 *cons = le32_to_cpu(resp->consumer_index);
9906         }
9907         mutex_unlock(&bp->hwrm_cmd_lock);
9908         return rc;
9909 }
9910
9911 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
9912 {
9913         struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
9914         int i = bnapi->index;
9915
9916         if (!txr)
9917                 return;
9918
9919         netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
9920                     i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
9921                     txr->tx_cons);
9922 }
9923
9924 static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
9925 {
9926         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
9927         int i = bnapi->index;
9928
9929         if (!rxr)
9930                 return;
9931
9932         netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
9933                     i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
9934                     rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
9935                     rxr->rx_sw_agg_prod);
9936 }
9937
9938 static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
9939 {
9940         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
9941         int i = bnapi->index;
9942
9943         netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
9944                     i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
9945 }
9946
9947 static void bnxt_dbg_dump_states(struct bnxt *bp)
9948 {
9949         int i;
9950         struct bnxt_napi *bnapi;
9951
9952         for (i = 0; i < bp->cp_nr_rings; i++) {
9953                 bnapi = bp->bnapi[i];
9954                 if (netif_msg_drv(bp)) {
9955                         bnxt_dump_tx_sw_state(bnapi);
9956                         bnxt_dump_rx_sw_state(bnapi);
9957                         bnxt_dump_cp_sw_state(bnapi);
9958                 }
9959         }
9960 }
9961
9962 static void bnxt_reset_task(struct bnxt *bp, bool silent)
9963 {
9964         if (!silent)
9965                 bnxt_dbg_dump_states(bp);
9966         if (netif_running(bp->dev)) {
9967                 int rc;
9968
9969                 if (silent) {
9970                         bnxt_close_nic(bp, false, false);
9971                         bnxt_open_nic(bp, false, false);
9972                 } else {
9973                         bnxt_ulp_stop(bp);
9974                         bnxt_close_nic(bp, true, false);
9975                         rc = bnxt_open_nic(bp, true, false);
9976                         bnxt_ulp_start(bp, rc);
9977                 }
9978         }
9979 }
9980
9981 static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue)
9982 {
9983         struct bnxt *bp = netdev_priv(dev);
9984
9985         netdev_err(bp->dev,  "TX timeout detected, starting reset task!\n");
9986         set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
9987         bnxt_queue_sp_work(bp);
9988 }
9989
9990 static void bnxt_fw_health_check(struct bnxt *bp)
9991 {
9992         struct bnxt_fw_health *fw_health = bp->fw_health;
9993         u32 val;
9994
9995         if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
9996                 return;
9997
9998         if (fw_health->tmr_counter) {
9999                 fw_health->tmr_counter--;
10000                 return;
10001         }
10002
10003         val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
10004         if (val == fw_health->last_fw_heartbeat)
10005                 goto fw_reset;
10006
10007         fw_health->last_fw_heartbeat = val;
10008
10009         val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
10010         if (val != fw_health->last_fw_reset_cnt)
10011                 goto fw_reset;
10012
10013         fw_health->tmr_counter = fw_health->tmr_multiplier;
10014         return;
10015
10016 fw_reset:
10017         set_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event);
10018         bnxt_queue_sp_work(bp);
10019 }
10020
10021 static void bnxt_timer(struct timer_list *t)
10022 {
10023         struct bnxt *bp = from_timer(bp, t, timer);
10024         struct net_device *dev = bp->dev;
10025
10026         if (!netif_running(dev))
10027                 return;
10028
10029         if (atomic_read(&bp->intr_sem) != 0)
10030                 goto bnxt_restart_timer;
10031
10032         if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
10033                 bnxt_fw_health_check(bp);
10034
10035         if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS) &&
10036             bp->stats_coal_ticks) {
10037                 set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
10038                 bnxt_queue_sp_work(bp);
10039         }
10040
10041         if (bnxt_tc_flower_enabled(bp)) {
10042                 set_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event);
10043                 bnxt_queue_sp_work(bp);
10044         }
10045
10046 #ifdef CONFIG_RFS_ACCEL
10047         if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count) {
10048                 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
10049                 bnxt_queue_sp_work(bp);
10050         }
10051 #endif /*CONFIG_RFS_ACCEL*/
10052
10053         if (bp->link_info.phy_retry) {
10054                 if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
10055                         bp->link_info.phy_retry = false;
10056                         netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
10057                 } else {
10058                         set_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event);
10059                         bnxt_queue_sp_work(bp);
10060                 }
10061         }
10062
10063         if ((bp->flags & BNXT_FLAG_CHIP_P5) && !bp->chip_rev &&
10064             netif_carrier_ok(dev)) {
10065                 set_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event);
10066                 bnxt_queue_sp_work(bp);
10067         }
10068 bnxt_restart_timer:
10069         mod_timer(&bp->timer, jiffies + bp->current_interval);
10070 }
10071
10072 static void bnxt_rtnl_lock_sp(struct bnxt *bp)
10073 {
10074         /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
10075          * set.  If the device is being closed, bnxt_close() may be holding
10076          * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear.  So we
10077          * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
10078          */
10079         clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
10080         rtnl_lock();
10081 }
10082
10083 static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
10084 {
10085         set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
10086         rtnl_unlock();
10087 }
10088
10089 /* Only called from bnxt_sp_task() */
10090 static void bnxt_reset(struct bnxt *bp, bool silent)
10091 {
10092         bnxt_rtnl_lock_sp(bp);
10093         if (test_bit(BNXT_STATE_OPEN, &bp->state))
10094                 bnxt_reset_task(bp, silent);
10095         bnxt_rtnl_unlock_sp(bp);
10096 }
10097
10098 static void bnxt_fw_reset_close(struct bnxt *bp)
10099 {
10100         bnxt_ulp_stop(bp);
10101         /* When firmware is fatal state, disable PCI device to prevent
10102          * any potential bad DMAs before freeing kernel memory.
10103          */
10104         if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
10105                 pci_disable_device(bp->pdev);
10106         __bnxt_close_nic(bp, true, false);
10107         bnxt_clear_int_mode(bp);
10108         bnxt_hwrm_func_drv_unrgtr(bp);
10109         if (pci_is_enabled(bp->pdev))
10110                 pci_disable_device(bp->pdev);
10111         bnxt_free_ctx_mem(bp);
10112         kfree(bp->ctx);
10113         bp->ctx = NULL;
10114 }
10115
10116 static bool is_bnxt_fw_ok(struct bnxt *bp)
10117 {
10118         struct bnxt_fw_health *fw_health = bp->fw_health;
10119         bool no_heartbeat = false, has_reset = false;
10120         u32 val;
10121
10122         val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
10123         if (val == fw_health->last_fw_heartbeat)
10124                 no_heartbeat = true;
10125
10126         val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
10127         if (val != fw_health->last_fw_reset_cnt)
10128                 has_reset = true;
10129
10130         if (!no_heartbeat && has_reset)
10131                 return true;
10132
10133         return false;
10134 }
10135
10136 /* rtnl_lock is acquired before calling this function */
10137 static void bnxt_force_fw_reset(struct bnxt *bp)
10138 {
10139         struct bnxt_fw_health *fw_health = bp->fw_health;
10140         u32 wait_dsecs;
10141
10142         if (!test_bit(BNXT_STATE_OPEN, &bp->state) ||
10143             test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
10144                 return;
10145
10146         set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
10147         bnxt_fw_reset_close(bp);
10148         wait_dsecs = fw_health->master_func_wait_dsecs;
10149         if (fw_health->master) {
10150                 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU)
10151                         wait_dsecs = 0;
10152                 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
10153         } else {
10154                 bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10;
10155                 wait_dsecs = fw_health->normal_func_wait_dsecs;
10156                 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
10157         }
10158
10159         bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs;
10160         bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs;
10161         bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
10162 }
10163
10164 void bnxt_fw_exception(struct bnxt *bp)
10165 {
10166         netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n");
10167         set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
10168         bnxt_rtnl_lock_sp(bp);
10169         bnxt_force_fw_reset(bp);
10170         bnxt_rtnl_unlock_sp(bp);
10171 }
10172
10173 /* Returns the number of registered VFs, or 1 if VF configuration is pending, or
10174  * < 0 on error.
10175  */
10176 static int bnxt_get_registered_vfs(struct bnxt *bp)
10177 {
10178 #ifdef CONFIG_BNXT_SRIOV
10179         int rc;
10180
10181         if (!BNXT_PF(bp))
10182                 return 0;
10183
10184         rc = bnxt_hwrm_func_qcfg(bp);
10185         if (rc) {
10186                 netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc);
10187                 return rc;
10188         }
10189         if (bp->pf.registered_vfs)
10190                 return bp->pf.registered_vfs;
10191         if (bp->sriov_cfg)
10192                 return 1;
10193 #endif
10194         return 0;
10195 }
10196
10197 void bnxt_fw_reset(struct bnxt *bp)
10198 {
10199         bnxt_rtnl_lock_sp(bp);
10200         if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
10201             !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
10202                 int n = 0, tmo;
10203
10204                 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
10205                 if (bp->pf.active_vfs &&
10206                     !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
10207                         n = bnxt_get_registered_vfs(bp);
10208                 if (n < 0) {
10209                         netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n",
10210                                    n);
10211                         clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
10212                         dev_close(bp->dev);
10213                         goto fw_reset_exit;
10214                 } else if (n > 0) {
10215                         u16 vf_tmo_dsecs = n * 10;
10216
10217                         if (bp->fw_reset_max_dsecs < vf_tmo_dsecs)
10218                                 bp->fw_reset_max_dsecs = vf_tmo_dsecs;
10219                         bp->fw_reset_state =
10220                                 BNXT_FW_RESET_STATE_POLL_VF;
10221                         bnxt_queue_fw_reset_work(bp, HZ / 10);
10222                         goto fw_reset_exit;
10223                 }
10224                 bnxt_fw_reset_close(bp);
10225                 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
10226                         bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
10227                         tmo = HZ / 10;
10228                 } else {
10229                         bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
10230                         tmo = bp->fw_reset_min_dsecs * HZ / 10;
10231                 }
10232                 bnxt_queue_fw_reset_work(bp, tmo);
10233         }
10234 fw_reset_exit:
10235         bnxt_rtnl_unlock_sp(bp);
10236 }
10237
10238 static void bnxt_chk_missed_irq(struct bnxt *bp)
10239 {
10240         int i;
10241
10242         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
10243                 return;
10244
10245         for (i = 0; i < bp->cp_nr_rings; i++) {
10246                 struct bnxt_napi *bnapi = bp->bnapi[i];
10247                 struct bnxt_cp_ring_info *cpr;
10248                 u32 fw_ring_id;
10249                 int j;
10250
10251                 if (!bnapi)
10252                         continue;
10253
10254                 cpr = &bnapi->cp_ring;
10255                 for (j = 0; j < 2; j++) {
10256                         struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
10257                         u32 val[2];
10258
10259                         if (!cpr2 || cpr2->has_more_work ||
10260                             !bnxt_has_work(bp, cpr2))
10261                                 continue;
10262
10263                         if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) {
10264                                 cpr2->last_cp_raw_cons = cpr2->cp_raw_cons;
10265                                 continue;
10266                         }
10267                         fw_ring_id = cpr2->cp_ring_struct.fw_ring_id;
10268                         bnxt_dbg_hwrm_ring_info_get(bp,
10269                                 DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
10270                                 fw_ring_id, &val[0], &val[1]);
10271                         cpr->missed_irqs++;
10272                 }
10273         }
10274 }
10275
10276 static void bnxt_cfg_ntp_filters(struct bnxt *);
10277
10278 static void bnxt_init_ethtool_link_settings(struct bnxt *bp)
10279 {
10280         struct bnxt_link_info *link_info = &bp->link_info;
10281
10282         if (BNXT_AUTO_MODE(link_info->auto_mode)) {
10283                 link_info->autoneg = BNXT_AUTONEG_SPEED;
10284                 if (bp->hwrm_spec_code >= 0x10201) {
10285                         if (link_info->auto_pause_setting &
10286                             PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
10287                                 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
10288                 } else {
10289                         link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
10290                 }
10291                 link_info->advertising = link_info->auto_link_speeds;
10292         } else {
10293                 link_info->req_link_speed = link_info->force_link_speed;
10294                 link_info->req_duplex = link_info->duplex_setting;
10295         }
10296         if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
10297                 link_info->req_flow_ctrl =
10298                         link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
10299         else
10300                 link_info->req_flow_ctrl = link_info->force_pause_setting;
10301 }
10302
10303 static void bnxt_sp_task(struct work_struct *work)
10304 {
10305         struct bnxt *bp = container_of(work, struct bnxt, sp_task);
10306
10307         set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
10308         smp_mb__after_atomic();
10309         if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
10310                 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
10311                 return;
10312         }
10313
10314         if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
10315                 bnxt_cfg_rx_mode(bp);
10316
10317         if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
10318                 bnxt_cfg_ntp_filters(bp);
10319         if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
10320                 bnxt_hwrm_exec_fwd_req(bp);
10321         if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) {
10322                 bnxt_hwrm_tunnel_dst_port_alloc(
10323                         bp, bp->vxlan_port,
10324                         TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
10325         }
10326         if (test_and_clear_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event)) {
10327                 bnxt_hwrm_tunnel_dst_port_free(
10328                         bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
10329         }
10330         if (test_and_clear_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event)) {
10331                 bnxt_hwrm_tunnel_dst_port_alloc(
10332                         bp, bp->nge_port,
10333                         TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
10334         }
10335         if (test_and_clear_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event)) {
10336                 bnxt_hwrm_tunnel_dst_port_free(
10337                         bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
10338         }
10339         if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
10340                 bnxt_hwrm_port_qstats(bp);
10341                 bnxt_hwrm_port_qstats_ext(bp);
10342                 bnxt_hwrm_pcie_qstats(bp);
10343         }
10344
10345         if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
10346                 int rc;
10347
10348                 mutex_lock(&bp->link_lock);
10349                 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
10350                                        &bp->sp_event))
10351                         bnxt_hwrm_phy_qcaps(bp);
10352
10353                 if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT,
10354                                        &bp->sp_event))
10355                         bnxt_init_ethtool_link_settings(bp);
10356
10357                 rc = bnxt_update_link(bp, true);
10358                 mutex_unlock(&bp->link_lock);
10359                 if (rc)
10360                         netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
10361                                    rc);
10362         }
10363         if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) {
10364                 int rc;
10365
10366                 mutex_lock(&bp->link_lock);
10367                 rc = bnxt_update_phy_setting(bp);
10368                 mutex_unlock(&bp->link_lock);
10369                 if (rc) {
10370                         netdev_warn(bp->dev, "update phy settings retry failed\n");
10371                 } else {
10372                         bp->link_info.phy_retry = false;
10373                         netdev_info(bp->dev, "update phy settings retry succeeded\n");
10374                 }
10375         }
10376         if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
10377                 mutex_lock(&bp->link_lock);
10378                 bnxt_get_port_module_status(bp);
10379                 mutex_unlock(&bp->link_lock);
10380         }
10381
10382         if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event))
10383                 bnxt_tc_flow_stats_work(bp);
10384
10385         if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event))
10386                 bnxt_chk_missed_irq(bp);
10387
10388         /* These functions below will clear BNXT_STATE_IN_SP_TASK.  They
10389          * must be the last functions to be called before exiting.
10390          */
10391         if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
10392                 bnxt_reset(bp, false);
10393
10394         if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
10395                 bnxt_reset(bp, true);
10396
10397         if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event))
10398                 bnxt_devlink_health_report(bp, BNXT_FW_RESET_NOTIFY_SP_EVENT);
10399
10400         if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) {
10401                 if (!is_bnxt_fw_ok(bp))
10402                         bnxt_devlink_health_report(bp,
10403                                                    BNXT_FW_EXCEPTION_SP_EVENT);
10404         }
10405
10406         smp_mb__before_atomic();
10407         clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
10408 }
10409
10410 /* Under rtnl_lock */
10411 int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
10412                      int tx_xdp)
10413 {
10414         int max_rx, max_tx, tx_sets = 1;
10415         int tx_rings_needed, stats;
10416         int rx_rings = rx;
10417         int cp, vnics, rc;
10418
10419         if (tcs)
10420                 tx_sets = tcs;
10421
10422         rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh);
10423         if (rc)
10424                 return rc;
10425
10426         if (max_rx < rx)
10427                 return -ENOMEM;
10428
10429         tx_rings_needed = tx * tx_sets + tx_xdp;
10430         if (max_tx < tx_rings_needed)
10431                 return -ENOMEM;
10432
10433         vnics = 1;
10434         if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
10435                 vnics += rx_rings;
10436
10437         if (bp->flags & BNXT_FLAG_AGG_RINGS)
10438                 rx_rings <<= 1;
10439         cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx;
10440         stats = cp;
10441         if (BNXT_NEW_RM(bp)) {
10442                 cp += bnxt_get_ulp_msix_num(bp);
10443                 stats += bnxt_get_ulp_stat_ctxs(bp);
10444         }
10445         return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp,
10446                                      stats, vnics);
10447 }
10448
10449 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
10450 {
10451         if (bp->bar2) {
10452                 pci_iounmap(pdev, bp->bar2);
10453                 bp->bar2 = NULL;
10454         }
10455
10456         if (bp->bar1) {
10457                 pci_iounmap(pdev, bp->bar1);
10458                 bp->bar1 = NULL;
10459         }
10460
10461         if (bp->bar0) {
10462                 pci_iounmap(pdev, bp->bar0);
10463                 bp->bar0 = NULL;
10464         }
10465 }
10466
10467 static void bnxt_cleanup_pci(struct bnxt *bp)
10468 {
10469         bnxt_unmap_bars(bp, bp->pdev);
10470         pci_release_regions(bp->pdev);
10471         if (pci_is_enabled(bp->pdev))
10472                 pci_disable_device(bp->pdev);
10473 }
10474
10475 static void bnxt_init_dflt_coal(struct bnxt *bp)
10476 {
10477         struct bnxt_coal *coal;
10478
10479         /* Tick values in micro seconds.
10480          * 1 coal_buf x bufs_per_record = 1 completion record.
10481          */
10482         coal = &bp->rx_coal;
10483         coal->coal_ticks = 10;
10484         coal->coal_bufs = 30;
10485         coal->coal_ticks_irq = 1;
10486         coal->coal_bufs_irq = 2;
10487         coal->idle_thresh = 50;
10488         coal->bufs_per_record = 2;
10489         coal->budget = 64;              /* NAPI budget */
10490
10491         coal = &bp->tx_coal;
10492         coal->coal_ticks = 28;
10493         coal->coal_bufs = 30;
10494         coal->coal_ticks_irq = 2;
10495         coal->coal_bufs_irq = 2;
10496         coal->bufs_per_record = 1;
10497
10498         bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
10499 }
10500
10501 static void bnxt_alloc_fw_health(struct bnxt *bp)
10502 {
10503         if (bp->fw_health)
10504                 return;
10505
10506         if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) &&
10507             !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
10508                 return;
10509
10510         bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL);
10511         if (!bp->fw_health) {
10512                 netdev_warn(bp->dev, "Failed to allocate fw_health\n");
10513                 bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
10514                 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
10515         }
10516 }
10517
10518 static int bnxt_fw_init_one_p1(struct bnxt *bp)
10519 {
10520         int rc;
10521
10522         bp->fw_cap = 0;
10523         rc = bnxt_hwrm_ver_get(bp);
10524         if (rc)
10525                 return rc;
10526
10527         if (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL) {
10528                 rc = bnxt_alloc_kong_hwrm_resources(bp);
10529                 if (rc)
10530                         bp->fw_cap &= ~BNXT_FW_CAP_KONG_MB_CHNL;
10531         }
10532
10533         if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
10534             bp->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) {
10535                 rc = bnxt_alloc_hwrm_short_cmd_req(bp);
10536                 if (rc)
10537                         return rc;
10538         }
10539         rc = bnxt_hwrm_func_reset(bp);
10540         if (rc)
10541                 return -ENODEV;
10542
10543         bnxt_hwrm_fw_set_time(bp);
10544         return 0;
10545 }
10546
10547 static int bnxt_fw_init_one_p2(struct bnxt *bp)
10548 {
10549         int rc;
10550
10551         /* Get the MAX capabilities for this function */
10552         rc = bnxt_hwrm_func_qcaps(bp);
10553         if (rc) {
10554                 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
10555                            rc);
10556                 return -ENODEV;
10557         }
10558
10559         rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp);
10560         if (rc)
10561                 netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
10562                             rc);
10563
10564         bnxt_alloc_fw_health(bp);
10565         rc = bnxt_hwrm_error_recovery_qcfg(bp);
10566         if (rc)
10567                 netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n",
10568                             rc);
10569
10570         rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false);
10571         if (rc)
10572                 return -ENODEV;
10573
10574         bnxt_hwrm_func_qcfg(bp);
10575         bnxt_hwrm_vnic_qcaps(bp);
10576         bnxt_hwrm_port_led_qcaps(bp);
10577         bnxt_ethtool_init(bp);
10578         bnxt_dcb_init(bp);
10579         return 0;
10580 }
10581
10582 static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp)
10583 {
10584         bp->flags &= ~BNXT_FLAG_UDP_RSS_CAP;
10585         bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
10586                            VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
10587                            VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
10588                            VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
10589         if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) {
10590                 bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
10591                 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
10592                                     VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
10593         }
10594 }
10595
10596 static void bnxt_set_dflt_rfs(struct bnxt *bp)
10597 {
10598         struct net_device *dev = bp->dev;
10599
10600         dev->hw_features &= ~NETIF_F_NTUPLE;
10601         dev->features &= ~NETIF_F_NTUPLE;
10602         bp->flags &= ~BNXT_FLAG_RFS;
10603         if (bnxt_rfs_supported(bp)) {
10604                 dev->hw_features |= NETIF_F_NTUPLE;
10605                 if (bnxt_rfs_capable(bp)) {
10606                         bp->flags |= BNXT_FLAG_RFS;
10607                         dev->features |= NETIF_F_NTUPLE;
10608                 }
10609         }
10610 }
10611
10612 static void bnxt_fw_init_one_p3(struct bnxt *bp)
10613 {
10614         struct pci_dev *pdev = bp->pdev;
10615
10616         bnxt_set_dflt_rss_hash_type(bp);
10617         bnxt_set_dflt_rfs(bp);
10618
10619         bnxt_get_wol_settings(bp);
10620         if (bp->flags & BNXT_FLAG_WOL_CAP)
10621                 device_set_wakeup_enable(&pdev->dev, bp->wol);
10622         else
10623                 device_set_wakeup_capable(&pdev->dev, false);
10624
10625         bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
10626         bnxt_hwrm_coal_params_qcaps(bp);
10627 }
10628
10629 static int bnxt_fw_init_one(struct bnxt *bp)
10630 {
10631         int rc;
10632
10633         rc = bnxt_fw_init_one_p1(bp);
10634         if (rc) {
10635                 netdev_err(bp->dev, "Firmware init phase 1 failed\n");
10636                 return rc;
10637         }
10638         rc = bnxt_fw_init_one_p2(bp);
10639         if (rc) {
10640                 netdev_err(bp->dev, "Firmware init phase 2 failed\n");
10641                 return rc;
10642         }
10643         rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
10644         if (rc)
10645                 return rc;
10646
10647         /* In case fw capabilities have changed, destroy the unneeded
10648          * reporters and create newly capable ones.
10649          */
10650         bnxt_dl_fw_reporters_destroy(bp, false);
10651         bnxt_dl_fw_reporters_create(bp);
10652         bnxt_fw_init_one_p3(bp);
10653         return 0;
10654 }
10655
10656 static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx)
10657 {
10658         struct bnxt_fw_health *fw_health = bp->fw_health;
10659         u32 reg = fw_health->fw_reset_seq_regs[reg_idx];
10660         u32 val = fw_health->fw_reset_seq_vals[reg_idx];
10661         u32 reg_type, reg_off, delay_msecs;
10662
10663         delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx];
10664         reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
10665         reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
10666         switch (reg_type) {
10667         case BNXT_FW_HEALTH_REG_TYPE_CFG:
10668                 pci_write_config_dword(bp->pdev, reg_off, val);
10669                 break;
10670         case BNXT_FW_HEALTH_REG_TYPE_GRC:
10671                 writel(reg_off & BNXT_GRC_BASE_MASK,
10672                        bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
10673                 reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000;
10674                 /* fall through */
10675         case BNXT_FW_HEALTH_REG_TYPE_BAR0:
10676                 writel(val, bp->bar0 + reg_off);
10677                 break;
10678         case BNXT_FW_HEALTH_REG_TYPE_BAR1:
10679                 writel(val, bp->bar1 + reg_off);
10680                 break;
10681         }
10682         if (delay_msecs) {
10683                 pci_read_config_dword(bp->pdev, 0, &val);
10684                 msleep(delay_msecs);
10685         }
10686 }
10687
10688 static void bnxt_reset_all(struct bnxt *bp)
10689 {
10690         struct bnxt_fw_health *fw_health = bp->fw_health;
10691         int i, rc;
10692
10693         if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
10694 #ifdef CONFIG_TEE_BNXT_FW
10695                 rc = tee_bnxt_fw_load();
10696                 if (rc)
10697                         netdev_err(bp->dev, "Unable to reset FW rc=%d\n", rc);
10698                 bp->fw_reset_timestamp = jiffies;
10699 #endif
10700                 return;
10701         }
10702
10703         if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) {
10704                 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++)
10705                         bnxt_fw_reset_writel(bp, i);
10706         } else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) {
10707                 struct hwrm_fw_reset_input req = {0};
10708
10709                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1);
10710                 req.resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr);
10711                 req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
10712                 req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
10713                 req.flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
10714                 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
10715                 if (rc)
10716                         netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc);
10717         }
10718         bp->fw_reset_timestamp = jiffies;
10719 }
10720
10721 static void bnxt_fw_reset_task(struct work_struct *work)
10722 {
10723         struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work);
10724         int rc;
10725
10726         if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
10727                 netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n");
10728                 return;
10729         }
10730
10731         switch (bp->fw_reset_state) {
10732         case BNXT_FW_RESET_STATE_POLL_VF: {
10733                 int n = bnxt_get_registered_vfs(bp);
10734                 int tmo;
10735
10736                 if (n < 0) {
10737                         netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n",
10738                                    n, jiffies_to_msecs(jiffies -
10739                                    bp->fw_reset_timestamp));
10740                         goto fw_reset_abort;
10741                 } else if (n > 0) {
10742                         if (time_after(jiffies, bp->fw_reset_timestamp +
10743                                        (bp->fw_reset_max_dsecs * HZ / 10))) {
10744                                 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
10745                                 bp->fw_reset_state = 0;
10746                                 netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n",
10747                                            n);
10748                                 return;
10749                         }
10750                         bnxt_queue_fw_reset_work(bp, HZ / 10);
10751                         return;
10752                 }
10753                 bp->fw_reset_timestamp = jiffies;
10754                 rtnl_lock();
10755                 bnxt_fw_reset_close(bp);
10756                 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
10757                         bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
10758                         tmo = HZ / 10;
10759                 } else {
10760                         bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
10761                         tmo = bp->fw_reset_min_dsecs * HZ / 10;
10762                 }
10763                 rtnl_unlock();
10764                 bnxt_queue_fw_reset_work(bp, tmo);
10765                 return;
10766         }
10767         case BNXT_FW_RESET_STATE_POLL_FW_DOWN: {
10768                 u32 val;
10769
10770                 val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
10771                 if (!(val & BNXT_FW_STATUS_SHUTDOWN) &&
10772                     !time_after(jiffies, bp->fw_reset_timestamp +
10773                     (bp->fw_reset_max_dsecs * HZ / 10))) {
10774                         bnxt_queue_fw_reset_work(bp, HZ / 5);
10775                         return;
10776                 }
10777
10778                 if (!bp->fw_health->master) {
10779                         u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs;
10780
10781                         bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
10782                         bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
10783                         return;
10784                 }
10785                 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
10786         }
10787         /* fall through */
10788         case BNXT_FW_RESET_STATE_RESET_FW:
10789                 bnxt_reset_all(bp);
10790                 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
10791                 bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
10792                 return;
10793         case BNXT_FW_RESET_STATE_ENABLE_DEV:
10794                 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
10795                         u32 val;
10796
10797                         val = bnxt_fw_health_readl(bp,
10798                                                    BNXT_FW_RESET_INPROG_REG);
10799                         if (val)
10800                                 netdev_warn(bp->dev, "FW reset inprog %x after min wait time.\n",
10801                                             val);
10802                 }
10803                 clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
10804                 if (pci_enable_device(bp->pdev)) {
10805                         netdev_err(bp->dev, "Cannot re-enable PCI device\n");
10806                         goto fw_reset_abort;
10807                 }
10808                 pci_set_master(bp->pdev);
10809                 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW;
10810                 /* fall through */
10811         case BNXT_FW_RESET_STATE_POLL_FW:
10812                 bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT;
10813                 rc = __bnxt_hwrm_ver_get(bp, true);
10814                 if (rc) {
10815                         if (time_after(jiffies, bp->fw_reset_timestamp +
10816                                        (bp->fw_reset_max_dsecs * HZ / 10))) {
10817                                 netdev_err(bp->dev, "Firmware reset aborted\n");
10818                                 goto fw_reset_abort;
10819                         }
10820                         bnxt_queue_fw_reset_work(bp, HZ / 5);
10821                         return;
10822                 }
10823                 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
10824                 bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
10825                 /* fall through */
10826         case BNXT_FW_RESET_STATE_OPENING:
10827                 while (!rtnl_trylock()) {
10828                         bnxt_queue_fw_reset_work(bp, HZ / 10);
10829                         return;
10830                 }
10831                 rc = bnxt_open(bp->dev);
10832                 if (rc) {
10833                         netdev_err(bp->dev, "bnxt_open_nic() failed\n");
10834                         clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
10835                         dev_close(bp->dev);
10836                 }
10837
10838                 bp->fw_reset_state = 0;
10839                 /* Make sure fw_reset_state is 0 before clearing the flag */
10840                 smp_mb__before_atomic();
10841                 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
10842                 bnxt_ulp_start(bp, rc);
10843                 if (!rc)
10844                         bnxt_reenable_sriov(bp);
10845                 bnxt_dl_health_recovery_done(bp);
10846                 bnxt_dl_health_status_update(bp, true);
10847                 rtnl_unlock();
10848                 break;
10849         }
10850         return;
10851
10852 fw_reset_abort:
10853         clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
10854         if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF)
10855                 bnxt_dl_health_status_update(bp, false);
10856         bp->fw_reset_state = 0;
10857         rtnl_lock();
10858         dev_close(bp->dev);
10859         rtnl_unlock();
10860 }
10861
10862 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
10863 {
10864         int rc;
10865         struct bnxt *bp = netdev_priv(dev);
10866
10867         SET_NETDEV_DEV(dev, &pdev->dev);
10868
10869         /* enable device (incl. PCI PM wakeup), and bus-mastering */
10870         rc = pci_enable_device(pdev);
10871         if (rc) {
10872                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
10873                 goto init_err;
10874         }
10875
10876         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10877                 dev_err(&pdev->dev,
10878                         "Cannot find PCI device base address, aborting\n");
10879                 rc = -ENODEV;
10880                 goto init_err_disable;
10881         }
10882
10883         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10884         if (rc) {
10885                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
10886                 goto init_err_disable;
10887         }
10888
10889         if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
10890             dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
10891                 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
10892                 goto init_err_disable;
10893         }
10894
10895         pci_set_master(pdev);
10896
10897         bp->dev = dev;
10898         bp->pdev = pdev;
10899
10900         bp->bar0 = pci_ioremap_bar(pdev, 0);
10901         if (!bp->bar0) {
10902                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
10903                 rc = -ENOMEM;
10904                 goto init_err_release;
10905         }
10906
10907         bp->bar1 = pci_ioremap_bar(pdev, 2);
10908         if (!bp->bar1) {
10909                 dev_err(&pdev->dev, "Cannot map doorbell registers, aborting\n");
10910                 rc = -ENOMEM;
10911                 goto init_err_release;
10912         }
10913
10914         bp->bar2 = pci_ioremap_bar(pdev, 4);
10915         if (!bp->bar2) {
10916                 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
10917                 rc = -ENOMEM;
10918                 goto init_err_release;
10919         }
10920
10921         pci_enable_pcie_error_reporting(pdev);
10922
10923         INIT_WORK(&bp->sp_task, bnxt_sp_task);
10924         INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task);
10925
10926         spin_lock_init(&bp->ntp_fltr_lock);
10927 #if BITS_PER_LONG == 32
10928         spin_lock_init(&bp->db_lock);
10929 #endif
10930
10931         bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
10932         bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
10933
10934         bnxt_init_dflt_coal(bp);
10935
10936         timer_setup(&bp->timer, bnxt_timer, 0);
10937         bp->current_interval = BNXT_TIMER_INTERVAL;
10938
10939         clear_bit(BNXT_STATE_OPEN, &bp->state);
10940         return 0;
10941
10942 init_err_release:
10943         bnxt_unmap_bars(bp, pdev);
10944         pci_release_regions(pdev);
10945
10946 init_err_disable:
10947         pci_disable_device(pdev);
10948
10949 init_err:
10950         return rc;
10951 }
10952
10953 /* rtnl_lock held */
10954 static int bnxt_change_mac_addr(struct net_device *dev, void *p)
10955 {
10956         struct sockaddr *addr = p;
10957         struct bnxt *bp = netdev_priv(dev);
10958         int rc = 0;
10959
10960         if (!is_valid_ether_addr(addr->sa_data))
10961                 return -EADDRNOTAVAIL;
10962
10963         if (ether_addr_equal(addr->sa_data, dev->dev_addr))
10964                 return 0;
10965
10966         rc = bnxt_approve_mac(bp, addr->sa_data, true);
10967         if (rc)
10968                 return rc;
10969
10970         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
10971         if (netif_running(dev)) {
10972                 bnxt_close_nic(bp, false, false);
10973                 rc = bnxt_open_nic(bp, false, false);
10974         }
10975
10976         return rc;
10977 }
10978
10979 /* rtnl_lock held */
10980 static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
10981 {
10982         struct bnxt *bp = netdev_priv(dev);
10983
10984         if (netif_running(dev))
10985                 bnxt_close_nic(bp, false, false);
10986
10987         dev->mtu = new_mtu;
10988         bnxt_set_ring_params(bp);
10989
10990         if (netif_running(dev))
10991                 return bnxt_open_nic(bp, false, false);
10992
10993         return 0;
10994 }
10995
10996 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
10997 {
10998         struct bnxt *bp = netdev_priv(dev);
10999         bool sh = false;
11000         int rc;
11001
11002         if (tc > bp->max_tc) {
11003                 netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
11004                            tc, bp->max_tc);
11005                 return -EINVAL;
11006         }
11007
11008         if (netdev_get_num_tc(dev) == tc)
11009                 return 0;
11010
11011         if (bp->flags & BNXT_FLAG_SHARED_RINGS)
11012                 sh = true;
11013
11014         rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
11015                               sh, tc, bp->tx_nr_rings_xdp);
11016         if (rc)
11017                 return rc;
11018
11019         /* Needs to close the device and do hw resource re-allocations */
11020         if (netif_running(bp->dev))
11021                 bnxt_close_nic(bp, true, false);
11022
11023         if (tc) {
11024                 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
11025                 netdev_set_num_tc(dev, tc);
11026         } else {
11027                 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
11028                 netdev_reset_tc(dev);
11029         }
11030         bp->tx_nr_rings += bp->tx_nr_rings_xdp;
11031         bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
11032                                bp->tx_nr_rings + bp->rx_nr_rings;
11033
11034         if (netif_running(bp->dev))
11035                 return bnxt_open_nic(bp, true, false);
11036
11037         return 0;
11038 }
11039
11040 static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
11041                                   void *cb_priv)
11042 {
11043         struct bnxt *bp = cb_priv;
11044
11045         if (!bnxt_tc_flower_enabled(bp) ||
11046             !tc_cls_can_offload_and_chain0(bp->dev, type_data))
11047                 return -EOPNOTSUPP;
11048
11049         switch (type) {
11050         case TC_SETUP_CLSFLOWER:
11051                 return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data);
11052         default:
11053                 return -EOPNOTSUPP;
11054         }
11055 }
11056
11057 LIST_HEAD(bnxt_block_cb_list);
11058
11059 static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
11060                          void *type_data)
11061 {
11062         struct bnxt *bp = netdev_priv(dev);
11063
11064         switch (type) {
11065         case TC_SETUP_BLOCK:
11066                 return flow_block_cb_setup_simple(type_data,
11067                                                   &bnxt_block_cb_list,
11068                                                   bnxt_setup_tc_block_cb,
11069                                                   bp, bp, true);
11070         case TC_SETUP_QDISC_MQPRIO: {
11071                 struct tc_mqprio_qopt *mqprio = type_data;
11072
11073                 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
11074
11075                 return bnxt_setup_mq_tc(dev, mqprio->num_tc);
11076         }
11077         default:
11078                 return -EOPNOTSUPP;
11079         }
11080 }
11081
11082 #ifdef CONFIG_RFS_ACCEL
11083 static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
11084                             struct bnxt_ntuple_filter *f2)
11085 {
11086         struct flow_keys *keys1 = &f1->fkeys;
11087         struct flow_keys *keys2 = &f2->fkeys;
11088
11089         if (keys1->basic.n_proto != keys2->basic.n_proto ||
11090             keys1->basic.ip_proto != keys2->basic.ip_proto)
11091                 return false;
11092
11093         if (keys1->basic.n_proto == htons(ETH_P_IP)) {
11094                 if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src ||
11095                     keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst)
11096                         return false;
11097         } else {
11098                 if (memcmp(&keys1->addrs.v6addrs.src, &keys2->addrs.v6addrs.src,
11099                            sizeof(keys1->addrs.v6addrs.src)) ||
11100                     memcmp(&keys1->addrs.v6addrs.dst, &keys2->addrs.v6addrs.dst,
11101                            sizeof(keys1->addrs.v6addrs.dst)))
11102                         return false;
11103         }
11104
11105         if (keys1->ports.ports == keys2->ports.ports &&
11106             keys1->control.flags == keys2->control.flags &&
11107             ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) &&
11108             ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr))
11109                 return true;
11110
11111         return false;
11112 }
11113
11114 static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
11115                               u16 rxq_index, u32 flow_id)
11116 {
11117         struct bnxt *bp = netdev_priv(dev);
11118         struct bnxt_ntuple_filter *fltr, *new_fltr;
11119         struct flow_keys *fkeys;
11120         struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
11121         int rc = 0, idx, bit_id, l2_idx = 0;
11122         struct hlist_head *head;
11123         u32 flags;
11124
11125         if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) {
11126                 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
11127                 int off = 0, j;
11128
11129                 netif_addr_lock_bh(dev);
11130                 for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) {
11131                         if (ether_addr_equal(eth->h_dest,
11132                                              vnic->uc_list + off)) {
11133                                 l2_idx = j + 1;
11134                                 break;
11135                         }
11136                 }
11137                 netif_addr_unlock_bh(dev);
11138                 if (!l2_idx)
11139                         return -EINVAL;
11140         }
11141         new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
11142         if (!new_fltr)
11143                 return -ENOMEM;
11144
11145         fkeys = &new_fltr->fkeys;
11146         if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
11147                 rc = -EPROTONOSUPPORT;
11148                 goto err_free;
11149         }
11150
11151         if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
11152              fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
11153             ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
11154              (fkeys->basic.ip_proto != IPPROTO_UDP))) {
11155                 rc = -EPROTONOSUPPORT;
11156                 goto err_free;
11157         }
11158         if (fkeys->basic.n_proto == htons(ETH_P_IPV6) &&
11159             bp->hwrm_spec_code < 0x10601) {
11160                 rc = -EPROTONOSUPPORT;
11161                 goto err_free;
11162         }
11163         flags = fkeys->control.flags;
11164         if (((flags & FLOW_DIS_ENCAPSULATION) &&
11165              bp->hwrm_spec_code < 0x10601) || (flags & FLOW_DIS_IS_FRAGMENT)) {
11166                 rc = -EPROTONOSUPPORT;
11167                 goto err_free;
11168         }
11169
11170         memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN);
11171         memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
11172
11173         idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
11174         head = &bp->ntp_fltr_hash_tbl[idx];
11175         rcu_read_lock();
11176         hlist_for_each_entry_rcu(fltr, head, hash) {
11177                 if (bnxt_fltr_match(fltr, new_fltr)) {
11178                         rcu_read_unlock();
11179                         rc = 0;
11180                         goto err_free;
11181                 }
11182         }
11183         rcu_read_unlock();
11184
11185         spin_lock_bh(&bp->ntp_fltr_lock);
11186         bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
11187                                          BNXT_NTP_FLTR_MAX_FLTR, 0);
11188         if (bit_id < 0) {
11189                 spin_unlock_bh(&bp->ntp_fltr_lock);
11190                 rc = -ENOMEM;
11191                 goto err_free;
11192         }
11193
11194         new_fltr->sw_id = (u16)bit_id;
11195         new_fltr->flow_id = flow_id;
11196         new_fltr->l2_fltr_idx = l2_idx;
11197         new_fltr->rxq = rxq_index;
11198         hlist_add_head_rcu(&new_fltr->hash, head);
11199         bp->ntp_fltr_count++;
11200         spin_unlock_bh(&bp->ntp_fltr_lock);
11201
11202         set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
11203         bnxt_queue_sp_work(bp);
11204
11205         return new_fltr->sw_id;
11206
11207 err_free:
11208         kfree(new_fltr);
11209         return rc;
11210 }
11211
11212 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
11213 {
11214         int i;
11215
11216         for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
11217                 struct hlist_head *head;
11218                 struct hlist_node *tmp;
11219                 struct bnxt_ntuple_filter *fltr;
11220                 int rc;
11221
11222                 head = &bp->ntp_fltr_hash_tbl[i];
11223                 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
11224                         bool del = false;
11225
11226                         if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
11227                                 if (rps_may_expire_flow(bp->dev, fltr->rxq,
11228                                                         fltr->flow_id,
11229                                                         fltr->sw_id)) {
11230                                         bnxt_hwrm_cfa_ntuple_filter_free(bp,
11231                                                                          fltr);
11232                                         del = true;
11233                                 }
11234                         } else {
11235                                 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
11236                                                                        fltr);
11237                                 if (rc)
11238                                         del = true;
11239                                 else
11240                                         set_bit(BNXT_FLTR_VALID, &fltr->state);
11241                         }
11242
11243                         if (del) {
11244                                 spin_lock_bh(&bp->ntp_fltr_lock);
11245                                 hlist_del_rcu(&fltr->hash);
11246                                 bp->ntp_fltr_count--;
11247                                 spin_unlock_bh(&bp->ntp_fltr_lock);
11248                                 synchronize_rcu();
11249                                 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
11250                                 kfree(fltr);
11251                         }
11252                 }
11253         }
11254         if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
11255                 netdev_info(bp->dev, "Receive PF driver unload event!");
11256 }
11257
11258 #else
11259
11260 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
11261 {
11262 }
11263
11264 #endif /* CONFIG_RFS_ACCEL */
11265
11266 static void bnxt_udp_tunnel_add(struct net_device *dev,
11267                                 struct udp_tunnel_info *ti)
11268 {
11269         struct bnxt *bp = netdev_priv(dev);
11270
11271         if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
11272                 return;
11273
11274         if (!netif_running(dev))
11275                 return;
11276
11277         switch (ti->type) {
11278         case UDP_TUNNEL_TYPE_VXLAN:
11279                 if (bp->vxlan_port_cnt && bp->vxlan_port != ti->port)
11280                         return;
11281
11282                 bp->vxlan_port_cnt++;
11283                 if (bp->vxlan_port_cnt == 1) {
11284                         bp->vxlan_port = ti->port;
11285                         set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event);
11286                         bnxt_queue_sp_work(bp);
11287                 }
11288                 break;
11289         case UDP_TUNNEL_TYPE_GENEVE:
11290                 if (bp->nge_port_cnt && bp->nge_port != ti->port)
11291                         return;
11292
11293                 bp->nge_port_cnt++;
11294                 if (bp->nge_port_cnt == 1) {
11295                         bp->nge_port = ti->port;
11296                         set_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event);
11297                 }
11298                 break;
11299         default:
11300                 return;
11301         }
11302
11303         bnxt_queue_sp_work(bp);
11304 }
11305
11306 static void bnxt_udp_tunnel_del(struct net_device *dev,
11307                                 struct udp_tunnel_info *ti)
11308 {
11309         struct bnxt *bp = netdev_priv(dev);
11310
11311         if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
11312                 return;
11313
11314         if (!netif_running(dev))
11315                 return;
11316
11317         switch (ti->type) {
11318         case UDP_TUNNEL_TYPE_VXLAN:
11319                 if (!bp->vxlan_port_cnt || bp->vxlan_port != ti->port)
11320                         return;
11321                 bp->vxlan_port_cnt--;
11322
11323                 if (bp->vxlan_port_cnt != 0)
11324                         return;
11325
11326                 set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event);
11327                 break;
11328         case UDP_TUNNEL_TYPE_GENEVE:
11329                 if (!bp->nge_port_cnt || bp->nge_port != ti->port)
11330                         return;
11331                 bp->nge_port_cnt--;
11332
11333                 if (bp->nge_port_cnt != 0)
11334                         return;
11335
11336                 set_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event);
11337                 break;
11338         default:
11339                 return;
11340         }
11341
11342         bnxt_queue_sp_work(bp);
11343 }
11344
11345 static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
11346                                struct net_device *dev, u32 filter_mask,
11347                                int nlflags)
11348 {
11349         struct bnxt *bp = netdev_priv(dev);
11350
11351         return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0,
11352                                        nlflags, filter_mask, NULL);
11353 }
11354
11355 static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
11356                                u16 flags, struct netlink_ext_ack *extack)
11357 {
11358         struct bnxt *bp = netdev_priv(dev);
11359         struct nlattr *attr, *br_spec;
11360         int rem, rc = 0;
11361
11362         if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp))
11363                 return -EOPNOTSUPP;
11364
11365         br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
11366         if (!br_spec)
11367                 return -EINVAL;
11368
11369         nla_for_each_nested(attr, br_spec, rem) {
11370                 u16 mode;
11371
11372                 if (nla_type(attr) != IFLA_BRIDGE_MODE)
11373                         continue;
11374
11375                 if (nla_len(attr) < sizeof(mode))
11376                         return -EINVAL;
11377
11378                 mode = nla_get_u16(attr);
11379                 if (mode == bp->br_mode)
11380                         break;
11381
11382                 rc = bnxt_hwrm_set_br_mode(bp, mode);
11383                 if (!rc)
11384                         bp->br_mode = mode;
11385                 break;
11386         }
11387         return rc;
11388 }
11389
11390 int bnxt_get_port_parent_id(struct net_device *dev,
11391                             struct netdev_phys_item_id *ppid)
11392 {
11393         struct bnxt *bp = netdev_priv(dev);
11394
11395         if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
11396                 return -EOPNOTSUPP;
11397
11398         /* The PF and it's VF-reps only support the switchdev framework */
11399         if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID))
11400                 return -EOPNOTSUPP;
11401
11402         ppid->id_len = sizeof(bp->dsn);
11403         memcpy(ppid->id, bp->dsn, ppid->id_len);
11404
11405         return 0;
11406 }
11407
11408 static struct devlink_port *bnxt_get_devlink_port(struct net_device *dev)
11409 {
11410         struct bnxt *bp = netdev_priv(dev);
11411
11412         return &bp->dl_port;
11413 }
11414
11415 static const struct net_device_ops bnxt_netdev_ops = {
11416         .ndo_open               = bnxt_open,
11417         .ndo_start_xmit         = bnxt_start_xmit,
11418         .ndo_stop               = bnxt_close,
11419         .ndo_get_stats64        = bnxt_get_stats64,
11420         .ndo_set_rx_mode        = bnxt_set_rx_mode,
11421         .ndo_do_ioctl           = bnxt_ioctl,
11422         .ndo_validate_addr      = eth_validate_addr,
11423         .ndo_set_mac_address    = bnxt_change_mac_addr,
11424         .ndo_change_mtu         = bnxt_change_mtu,
11425         .ndo_fix_features       = bnxt_fix_features,
11426         .ndo_set_features       = bnxt_set_features,
11427         .ndo_tx_timeout         = bnxt_tx_timeout,
11428 #ifdef CONFIG_BNXT_SRIOV
11429         .ndo_get_vf_config      = bnxt_get_vf_config,
11430         .ndo_set_vf_mac         = bnxt_set_vf_mac,
11431         .ndo_set_vf_vlan        = bnxt_set_vf_vlan,
11432         .ndo_set_vf_rate        = bnxt_set_vf_bw,
11433         .ndo_set_vf_link_state  = bnxt_set_vf_link_state,
11434         .ndo_set_vf_spoofchk    = bnxt_set_vf_spoofchk,
11435         .ndo_set_vf_trust       = bnxt_set_vf_trust,
11436 #endif
11437         .ndo_setup_tc           = bnxt_setup_tc,
11438 #ifdef CONFIG_RFS_ACCEL
11439         .ndo_rx_flow_steer      = bnxt_rx_flow_steer,
11440 #endif
11441         .ndo_udp_tunnel_add     = bnxt_udp_tunnel_add,
11442         .ndo_udp_tunnel_del     = bnxt_udp_tunnel_del,
11443         .ndo_bpf                = bnxt_xdp,
11444         .ndo_xdp_xmit           = bnxt_xdp_xmit,
11445         .ndo_bridge_getlink     = bnxt_bridge_getlink,
11446         .ndo_bridge_setlink     = bnxt_bridge_setlink,
11447         .ndo_get_devlink_port   = bnxt_get_devlink_port,
11448 };
11449
11450 static void bnxt_remove_one(struct pci_dev *pdev)
11451 {
11452         struct net_device *dev = pci_get_drvdata(pdev);
11453         struct bnxt *bp = netdev_priv(dev);
11454
11455         if (BNXT_PF(bp))
11456                 bnxt_sriov_disable(bp);
11457
11458         bnxt_dl_fw_reporters_destroy(bp, true);
11459         pci_disable_pcie_error_reporting(pdev);
11460         unregister_netdev(dev);
11461         bnxt_dl_unregister(bp);
11462         bnxt_shutdown_tc(bp);
11463         bnxt_cancel_sp_work(bp);
11464         bp->sp_event = 0;
11465
11466         bnxt_clear_int_mode(bp);
11467         bnxt_hwrm_func_drv_unrgtr(bp);
11468         bnxt_free_hwrm_resources(bp);
11469         bnxt_free_hwrm_short_cmd_req(bp);
11470         bnxt_ethtool_free(bp);
11471         bnxt_dcb_free(bp);
11472         kfree(bp->edev);
11473         bp->edev = NULL;
11474         kfree(bp->fw_health);
11475         bp->fw_health = NULL;
11476         bnxt_cleanup_pci(bp);
11477         bnxt_free_ctx_mem(bp);
11478         kfree(bp->ctx);
11479         bp->ctx = NULL;
11480         bnxt_free_port_stats(bp);
11481         free_netdev(dev);
11482 }
11483
11484 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
11485 {
11486         int rc = 0;
11487         struct bnxt_link_info *link_info = &bp->link_info;
11488
11489         rc = bnxt_hwrm_phy_qcaps(bp);
11490         if (rc) {
11491                 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
11492                            rc);
11493                 return rc;
11494         }
11495         if (!fw_dflt)
11496                 return 0;
11497
11498         rc = bnxt_update_link(bp, false);
11499         if (rc) {
11500                 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
11501                            rc);
11502                 return rc;
11503         }
11504
11505         /* Older firmware does not have supported_auto_speeds, so assume
11506          * that all supported speeds can be autonegotiated.
11507          */
11508         if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
11509                 link_info->support_auto_speeds = link_info->support_speeds;
11510
11511         bnxt_init_ethtool_link_settings(bp);
11512         return 0;
11513 }
11514
11515 static int bnxt_get_max_irq(struct pci_dev *pdev)
11516 {
11517         u16 ctrl;
11518
11519         if (!pdev->msix_cap)
11520                 return 1;
11521
11522         pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
11523         return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
11524 }
11525
11526 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
11527                                 int *max_cp)
11528 {
11529         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
11530         int max_ring_grps = 0, max_irq;
11531
11532         *max_tx = hw_resc->max_tx_rings;
11533         *max_rx = hw_resc->max_rx_rings;
11534         *max_cp = bnxt_get_max_func_cp_rings_for_en(bp);
11535         max_irq = min_t(int, bnxt_get_max_func_irqs(bp) -
11536                         bnxt_get_ulp_msix_num(bp),
11537                         hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp));
11538         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
11539                 *max_cp = min_t(int, *max_cp, max_irq);
11540         max_ring_grps = hw_resc->max_hw_ring_grps;
11541         if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
11542                 *max_cp -= 1;
11543                 *max_rx -= 2;
11544         }
11545         if (bp->flags & BNXT_FLAG_AGG_RINGS)
11546                 *max_rx >>= 1;
11547         if (bp->flags & BNXT_FLAG_CHIP_P5) {
11548                 bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false);
11549                 /* On P5 chips, max_cp output param should be available NQs */
11550                 *max_cp = max_irq;
11551         }
11552         *max_rx = min_t(int, *max_rx, max_ring_grps);
11553 }
11554
11555 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
11556 {
11557         int rx, tx, cp;
11558
11559         _bnxt_get_max_rings(bp, &rx, &tx, &cp);
11560         *max_rx = rx;
11561         *max_tx = tx;
11562         if (!rx || !tx || !cp)
11563                 return -ENOMEM;
11564
11565         return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
11566 }
11567
11568 static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
11569                                bool shared)
11570 {
11571         int rc;
11572
11573         rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
11574         if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
11575                 /* Not enough rings, try disabling agg rings. */
11576                 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
11577                 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
11578                 if (rc) {
11579                         /* set BNXT_FLAG_AGG_RINGS back for consistency */
11580                         bp->flags |= BNXT_FLAG_AGG_RINGS;
11581                         return rc;
11582                 }
11583                 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
11584                 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
11585                 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
11586                 bnxt_set_ring_params(bp);
11587         }
11588
11589         if (bp->flags & BNXT_FLAG_ROCE_CAP) {
11590                 int max_cp, max_stat, max_irq;
11591
11592                 /* Reserve minimum resources for RoCE */
11593                 max_cp = bnxt_get_max_func_cp_rings(bp);
11594                 max_stat = bnxt_get_max_func_stat_ctxs(bp);
11595                 max_irq = bnxt_get_max_func_irqs(bp);
11596                 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
11597                     max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
11598                     max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
11599                         return 0;
11600
11601                 max_cp -= BNXT_MIN_ROCE_CP_RINGS;
11602                 max_irq -= BNXT_MIN_ROCE_CP_RINGS;
11603                 max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
11604                 max_cp = min_t(int, max_cp, max_irq);
11605                 max_cp = min_t(int, max_cp, max_stat);
11606                 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
11607                 if (rc)
11608                         rc = 0;
11609         }
11610         return rc;
11611 }
11612
11613 /* In initial default shared ring setting, each shared ring must have a
11614  * RX/TX ring pair.
11615  */
11616 static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
11617 {
11618         bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
11619         bp->rx_nr_rings = bp->cp_nr_rings;
11620         bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
11621         bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
11622 }
11623
11624 static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
11625 {
11626         int dflt_rings, max_rx_rings, max_tx_rings, rc;
11627
11628         if (!bnxt_can_reserve_rings(bp))
11629                 return 0;
11630
11631         if (sh)
11632                 bp->flags |= BNXT_FLAG_SHARED_RINGS;
11633         dflt_rings = is_kdump_kernel() ? 1 : netif_get_num_default_rss_queues();
11634         /* Reduce default rings on multi-port cards so that total default
11635          * rings do not exceed CPU count.
11636          */
11637         if (bp->port_count > 1) {
11638                 int max_rings =
11639                         max_t(int, num_online_cpus() / bp->port_count, 1);
11640
11641                 dflt_rings = min_t(int, dflt_rings, max_rings);
11642         }
11643         rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
11644         if (rc)
11645                 return rc;
11646         bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
11647         bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
11648         if (sh)
11649                 bnxt_trim_dflt_sh_rings(bp);
11650         else
11651                 bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
11652         bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
11653
11654         rc = __bnxt_reserve_rings(bp);
11655         if (rc)
11656                 netdev_warn(bp->dev, "Unable to reserve tx rings\n");
11657         bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
11658         if (sh)
11659                 bnxt_trim_dflt_sh_rings(bp);
11660
11661         /* Rings may have been trimmed, re-reserve the trimmed rings. */
11662         if (bnxt_need_reserve_rings(bp)) {
11663                 rc = __bnxt_reserve_rings(bp);
11664                 if (rc)
11665                         netdev_warn(bp->dev, "2nd rings reservation failed.\n");
11666                 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
11667         }
11668         if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
11669                 bp->rx_nr_rings++;
11670                 bp->cp_nr_rings++;
11671         }
11672         return rc;
11673 }
11674
11675 static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
11676 {
11677         int rc;
11678
11679         if (bp->tx_nr_rings)
11680                 return 0;
11681
11682         bnxt_ulp_irq_stop(bp);
11683         bnxt_clear_int_mode(bp);
11684         rc = bnxt_set_dflt_rings(bp, true);
11685         if (rc) {
11686                 netdev_err(bp->dev, "Not enough rings available.\n");
11687                 goto init_dflt_ring_err;
11688         }
11689         rc = bnxt_init_int_mode(bp);
11690         if (rc)
11691                 goto init_dflt_ring_err;
11692
11693         bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
11694         if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) {
11695                 bp->flags |= BNXT_FLAG_RFS;
11696                 bp->dev->features |= NETIF_F_NTUPLE;
11697         }
11698 init_dflt_ring_err:
11699         bnxt_ulp_irq_restart(bp, rc);
11700         return rc;
11701 }
11702
11703 int bnxt_restore_pf_fw_resources(struct bnxt *bp)
11704 {
11705         int rc;
11706
11707         ASSERT_RTNL();
11708         bnxt_hwrm_func_qcaps(bp);
11709
11710         if (netif_running(bp->dev))
11711                 __bnxt_close_nic(bp, true, false);
11712
11713         bnxt_ulp_irq_stop(bp);
11714         bnxt_clear_int_mode(bp);
11715         rc = bnxt_init_int_mode(bp);
11716         bnxt_ulp_irq_restart(bp, rc);
11717
11718         if (netif_running(bp->dev)) {
11719                 if (rc)
11720                         dev_close(bp->dev);
11721                 else
11722                         rc = bnxt_open_nic(bp, true, false);
11723         }
11724
11725         return rc;
11726 }
11727
11728 static int bnxt_init_mac_addr(struct bnxt *bp)
11729 {
11730         int rc = 0;
11731
11732         if (BNXT_PF(bp)) {
11733                 memcpy(bp->dev->dev_addr, bp->pf.mac_addr, ETH_ALEN);
11734         } else {
11735 #ifdef CONFIG_BNXT_SRIOV
11736                 struct bnxt_vf_info *vf = &bp->vf;
11737                 bool strict_approval = true;
11738
11739                 if (is_valid_ether_addr(vf->mac_addr)) {
11740                         /* overwrite netdev dev_addr with admin VF MAC */
11741                         memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
11742                         /* Older PF driver or firmware may not approve this
11743                          * correctly.
11744                          */
11745                         strict_approval = false;
11746                 } else {
11747                         eth_hw_addr_random(bp->dev);
11748                 }
11749                 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval);
11750 #endif
11751         }
11752         return rc;
11753 }
11754
11755 static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
11756 {
11757         struct pci_dev *pdev = bp->pdev;
11758         int pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN);
11759         u32 dw;
11760
11761         if (!pos) {
11762                 netdev_info(bp->dev, "Unable do read adapter's DSN");
11763                 return -EOPNOTSUPP;
11764         }
11765
11766         /* DSN (two dw) is at an offset of 4 from the cap pos */
11767         pos += 4;
11768         pci_read_config_dword(pdev, pos, &dw);
11769         put_unaligned_le32(dw, &dsn[0]);
11770         pci_read_config_dword(pdev, pos + 4, &dw);
11771         put_unaligned_le32(dw, &dsn[4]);
11772         bp->flags |= BNXT_FLAG_DSN_VALID;
11773         return 0;
11774 }
11775
11776 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
11777 {
11778         static int version_printed;
11779         struct net_device *dev;
11780         struct bnxt *bp;
11781         int rc, max_irqs;
11782
11783         if (pci_is_bridge(pdev))
11784                 return -ENODEV;
11785
11786         if (version_printed++ == 0)
11787                 pr_info("%s", version);
11788
11789         max_irqs = bnxt_get_max_irq(pdev);
11790         dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
11791         if (!dev)
11792                 return -ENOMEM;
11793
11794         bp = netdev_priv(dev);
11795         bnxt_set_max_func_irqs(bp, max_irqs);
11796
11797         if (bnxt_vf_pciid(ent->driver_data))
11798                 bp->flags |= BNXT_FLAG_VF;
11799
11800         if (pdev->msix_cap)
11801                 bp->flags |= BNXT_FLAG_MSIX_CAP;
11802
11803         rc = bnxt_init_board(pdev, dev);
11804         if (rc < 0)
11805                 goto init_err_free;
11806
11807         dev->netdev_ops = &bnxt_netdev_ops;
11808         dev->watchdog_timeo = BNXT_TX_TIMEOUT;
11809         dev->ethtool_ops = &bnxt_ethtool_ops;
11810         pci_set_drvdata(pdev, dev);
11811
11812         rc = bnxt_alloc_hwrm_resources(bp);
11813         if (rc)
11814                 goto init_err_pci_clean;
11815
11816         mutex_init(&bp->hwrm_cmd_lock);
11817         mutex_init(&bp->link_lock);
11818
11819         rc = bnxt_fw_init_one_p1(bp);
11820         if (rc)
11821                 goto init_err_pci_clean;
11822
11823         if (BNXT_CHIP_P5(bp))
11824                 bp->flags |= BNXT_FLAG_CHIP_P5;
11825
11826         rc = bnxt_fw_init_one_p2(bp);
11827         if (rc)
11828                 goto init_err_pci_clean;
11829
11830         dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
11831                            NETIF_F_TSO | NETIF_F_TSO6 |
11832                            NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
11833                            NETIF_F_GSO_IPXIP4 |
11834                            NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
11835                            NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
11836                            NETIF_F_RXCSUM | NETIF_F_GRO;
11837
11838         if (BNXT_SUPPORTS_TPA(bp))
11839                 dev->hw_features |= NETIF_F_LRO;
11840
11841         dev->hw_enc_features =
11842                         NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
11843                         NETIF_F_TSO | NETIF_F_TSO6 |
11844                         NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
11845                         NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
11846                         NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
11847         dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
11848                                     NETIF_F_GSO_GRE_CSUM;
11849         dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
11850         dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
11851                             NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX;
11852         if (BNXT_SUPPORTS_TPA(bp))
11853                 dev->hw_features |= NETIF_F_GRO_HW;
11854         dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
11855         if (dev->features & NETIF_F_GRO_HW)
11856                 dev->features &= ~NETIF_F_LRO;
11857         dev->priv_flags |= IFF_UNICAST_FLT;
11858
11859 #ifdef CONFIG_BNXT_SRIOV
11860         init_waitqueue_head(&bp->sriov_cfg_wait);
11861         mutex_init(&bp->sriov_lock);
11862 #endif
11863         if (BNXT_SUPPORTS_TPA(bp)) {
11864                 bp->gro_func = bnxt_gro_func_5730x;
11865                 if (BNXT_CHIP_P4(bp))
11866                         bp->gro_func = bnxt_gro_func_5731x;
11867                 else if (BNXT_CHIP_P5(bp))
11868                         bp->gro_func = bnxt_gro_func_5750x;
11869         }
11870         if (!BNXT_CHIP_P4_PLUS(bp))
11871                 bp->flags |= BNXT_FLAG_DOUBLE_DB;
11872
11873         bp->ulp_probe = bnxt_ulp_probe;
11874
11875         rc = bnxt_init_mac_addr(bp);
11876         if (rc) {
11877                 dev_err(&pdev->dev, "Unable to initialize mac address.\n");
11878                 rc = -EADDRNOTAVAIL;
11879                 goto init_err_pci_clean;
11880         }
11881
11882         if (BNXT_PF(bp)) {
11883                 /* Read the adapter's DSN to use as the eswitch switch_id */
11884                 rc = bnxt_pcie_dsn_get(bp, bp->dsn);
11885         }
11886
11887         /* MTU range: 60 - FW defined max */
11888         dev->min_mtu = ETH_ZLEN;
11889         dev->max_mtu = bp->max_mtu;
11890
11891         rc = bnxt_probe_phy(bp, true);
11892         if (rc)
11893                 goto init_err_pci_clean;
11894
11895         bnxt_set_rx_skb_mode(bp, false);
11896         bnxt_set_tpa_flags(bp);
11897         bnxt_set_ring_params(bp);
11898         rc = bnxt_set_dflt_rings(bp, true);
11899         if (rc) {
11900                 netdev_err(bp->dev, "Not enough rings available.\n");
11901                 rc = -ENOMEM;
11902                 goto init_err_pci_clean;
11903         }
11904
11905         bnxt_fw_init_one_p3(bp);
11906
11907         if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX)
11908                 bp->flags |= BNXT_FLAG_STRIP_VLAN;
11909
11910         rc = bnxt_init_int_mode(bp);
11911         if (rc)
11912                 goto init_err_pci_clean;
11913
11914         /* No TC has been set yet and rings may have been trimmed due to
11915          * limited MSIX, so we re-initialize the TX rings per TC.
11916          */
11917         bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
11918
11919         if (BNXT_PF(bp)) {
11920                 if (!bnxt_pf_wq) {
11921                         bnxt_pf_wq =
11922                                 create_singlethread_workqueue("bnxt_pf_wq");
11923                         if (!bnxt_pf_wq) {
11924                                 dev_err(&pdev->dev, "Unable to create workqueue.\n");
11925                                 goto init_err_pci_clean;
11926                         }
11927                 }
11928                 bnxt_init_tc(bp);
11929         }
11930
11931         bnxt_dl_register(bp);
11932
11933         rc = register_netdev(dev);
11934         if (rc)
11935                 goto init_err_cleanup;
11936
11937         if (BNXT_PF(bp))
11938                 devlink_port_type_eth_set(&bp->dl_port, bp->dev);
11939         bnxt_dl_fw_reporters_create(bp);
11940
11941         netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
11942                     board_info[ent->driver_data].name,
11943                     (long)pci_resource_start(pdev, 0), dev->dev_addr);
11944         pcie_print_link_status(pdev);
11945
11946         return 0;
11947
11948 init_err_cleanup:
11949         bnxt_dl_unregister(bp);
11950         bnxt_shutdown_tc(bp);
11951         bnxt_clear_int_mode(bp);
11952
11953 init_err_pci_clean:
11954         bnxt_hwrm_func_drv_unrgtr(bp);
11955         bnxt_free_hwrm_short_cmd_req(bp);
11956         bnxt_free_hwrm_resources(bp);
11957         bnxt_free_ctx_mem(bp);
11958         kfree(bp->ctx);
11959         bp->ctx = NULL;
11960         kfree(bp->fw_health);
11961         bp->fw_health = NULL;
11962         bnxt_cleanup_pci(bp);
11963
11964 init_err_free:
11965         free_netdev(dev);
11966         return rc;
11967 }
11968
11969 static void bnxt_shutdown(struct pci_dev *pdev)
11970 {
11971         struct net_device *dev = pci_get_drvdata(pdev);
11972         struct bnxt *bp;
11973
11974         if (!dev)
11975                 return;
11976
11977         rtnl_lock();
11978         bp = netdev_priv(dev);
11979         if (!bp)
11980                 goto shutdown_exit;
11981
11982         if (netif_running(dev))
11983                 dev_close(dev);
11984
11985         bnxt_ulp_shutdown(bp);
11986         bnxt_clear_int_mode(bp);
11987         pci_disable_device(pdev);
11988
11989         if (system_state == SYSTEM_POWER_OFF) {
11990                 pci_wake_from_d3(pdev, bp->wol);
11991                 pci_set_power_state(pdev, PCI_D3hot);
11992         }
11993
11994 shutdown_exit:
11995         rtnl_unlock();
11996 }
11997
11998 #ifdef CONFIG_PM_SLEEP
11999 static int bnxt_suspend(struct device *device)
12000 {
12001         struct net_device *dev = dev_get_drvdata(device);
12002         struct bnxt *bp = netdev_priv(dev);
12003         int rc = 0;
12004
12005         rtnl_lock();
12006         bnxt_ulp_stop(bp);
12007         if (netif_running(dev)) {
12008                 netif_device_detach(dev);
12009                 rc = bnxt_close(dev);
12010         }
12011         bnxt_hwrm_func_drv_unrgtr(bp);
12012         pci_disable_device(bp->pdev);
12013         bnxt_free_ctx_mem(bp);
12014         kfree(bp->ctx);
12015         bp->ctx = NULL;
12016         rtnl_unlock();
12017         return rc;
12018 }
12019
12020 static int bnxt_resume(struct device *device)
12021 {
12022         struct net_device *dev = dev_get_drvdata(device);
12023         struct bnxt *bp = netdev_priv(dev);
12024         int rc = 0;
12025
12026         rtnl_lock();
12027         rc = pci_enable_device(bp->pdev);
12028         if (rc) {
12029                 netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n",
12030                            rc);
12031                 goto resume_exit;
12032         }
12033         pci_set_master(bp->pdev);
12034         if (bnxt_hwrm_ver_get(bp)) {
12035                 rc = -ENODEV;
12036                 goto resume_exit;
12037         }
12038         rc = bnxt_hwrm_func_reset(bp);
12039         if (rc) {
12040                 rc = -EBUSY;
12041                 goto resume_exit;
12042         }
12043
12044         if (bnxt_hwrm_queue_qportcfg(bp)) {
12045                 rc = -ENODEV;
12046                 goto resume_exit;
12047         }
12048
12049         if (bp->hwrm_spec_code >= 0x10803) {
12050                 if (bnxt_alloc_ctx_mem(bp)) {
12051                         rc = -ENODEV;
12052                         goto resume_exit;
12053                 }
12054         }
12055         if (BNXT_NEW_RM(bp))
12056                 bnxt_hwrm_func_resc_qcaps(bp, false);
12057
12058         if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) {
12059                 rc = -ENODEV;
12060                 goto resume_exit;
12061         }
12062
12063         bnxt_get_wol_settings(bp);
12064         if (netif_running(dev)) {
12065                 rc = bnxt_open(dev);
12066                 if (!rc)
12067                         netif_device_attach(dev);
12068         }
12069
12070 resume_exit:
12071         bnxt_ulp_start(bp, rc);
12072         rtnl_unlock();
12073         return rc;
12074 }
12075
12076 static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume);
12077 #define BNXT_PM_OPS (&bnxt_pm_ops)
12078
12079 #else
12080
12081 #define BNXT_PM_OPS NULL
12082
12083 #endif /* CONFIG_PM_SLEEP */
12084
12085 /**
12086  * bnxt_io_error_detected - called when PCI error is detected
12087  * @pdev: Pointer to PCI device
12088  * @state: The current pci connection state
12089  *
12090  * This function is called after a PCI bus error affecting
12091  * this device has been detected.
12092  */
12093 static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
12094                                                pci_channel_state_t state)
12095 {
12096         struct net_device *netdev = pci_get_drvdata(pdev);
12097         struct bnxt *bp = netdev_priv(netdev);
12098
12099         netdev_info(netdev, "PCI I/O error detected\n");
12100
12101         rtnl_lock();
12102         netif_device_detach(netdev);
12103
12104         bnxt_ulp_stop(bp);
12105
12106         if (state == pci_channel_io_perm_failure) {
12107                 rtnl_unlock();
12108                 return PCI_ERS_RESULT_DISCONNECT;
12109         }
12110
12111         if (netif_running(netdev))
12112                 bnxt_close(netdev);
12113
12114         pci_disable_device(pdev);
12115         rtnl_unlock();
12116
12117         /* Request a slot slot reset. */
12118         return PCI_ERS_RESULT_NEED_RESET;
12119 }
12120
12121 /**
12122  * bnxt_io_slot_reset - called after the pci bus has been reset.
12123  * @pdev: Pointer to PCI device
12124  *
12125  * Restart the card from scratch, as if from a cold-boot.
12126  * At this point, the card has exprienced a hard reset,
12127  * followed by fixups by BIOS, and has its config space
12128  * set up identically to what it was at cold boot.
12129  */
12130 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
12131 {
12132         struct net_device *netdev = pci_get_drvdata(pdev);
12133         struct bnxt *bp = netdev_priv(netdev);
12134         int err = 0;
12135         pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
12136
12137         netdev_info(bp->dev, "PCI Slot Reset\n");
12138
12139         rtnl_lock();
12140
12141         if (pci_enable_device(pdev)) {
12142                 dev_err(&pdev->dev,
12143                         "Cannot re-enable PCI device after reset.\n");
12144         } else {
12145                 pci_set_master(pdev);
12146
12147                 err = bnxt_hwrm_func_reset(bp);
12148                 if (!err && netif_running(netdev))
12149                         err = bnxt_open(netdev);
12150
12151                 if (!err)
12152                         result = PCI_ERS_RESULT_RECOVERED;
12153                 bnxt_ulp_start(bp, err);
12154         }
12155
12156         if (result != PCI_ERS_RESULT_RECOVERED && netif_running(netdev))
12157                 dev_close(netdev);
12158
12159         rtnl_unlock();
12160
12161         return PCI_ERS_RESULT_RECOVERED;
12162 }
12163
12164 /**
12165  * bnxt_io_resume - called when traffic can start flowing again.
12166  * @pdev: Pointer to PCI device
12167  *
12168  * This callback is called when the error recovery driver tells
12169  * us that its OK to resume normal operation.
12170  */
12171 static void bnxt_io_resume(struct pci_dev *pdev)
12172 {
12173         struct net_device *netdev = pci_get_drvdata(pdev);
12174
12175         rtnl_lock();
12176
12177         netif_device_attach(netdev);
12178
12179         rtnl_unlock();
12180 }
12181
12182 static const struct pci_error_handlers bnxt_err_handler = {
12183         .error_detected = bnxt_io_error_detected,
12184         .slot_reset     = bnxt_io_slot_reset,
12185         .resume         = bnxt_io_resume
12186 };
12187
12188 static struct pci_driver bnxt_pci_driver = {
12189         .name           = DRV_MODULE_NAME,
12190         .id_table       = bnxt_pci_tbl,
12191         .probe          = bnxt_init_one,
12192         .remove         = bnxt_remove_one,
12193         .shutdown       = bnxt_shutdown,
12194         .driver.pm      = BNXT_PM_OPS,
12195         .err_handler    = &bnxt_err_handler,
12196 #if defined(CONFIG_BNXT_SRIOV)
12197         .sriov_configure = bnxt_sriov_configure,
12198 #endif
12199 };
12200
12201 static int __init bnxt_init(void)
12202 {
12203         bnxt_debug_init();
12204         return pci_register_driver(&bnxt_pci_driver);
12205 }
12206
12207 static void __exit bnxt_exit(void)
12208 {
12209         pci_unregister_driver(&bnxt_pci_driver);
12210         if (bnxt_pf_wq)
12211                 destroy_workqueue(bnxt_pf_wq);
12212         bnxt_debug_exit();
12213 }
12214
12215 module_init(bnxt_init);
12216 module_exit(bnxt_exit);