]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/net/ethernet/broadcom/bnxt/bnxt.c
net: hns: add phy_attached_info() to the hns driver
[linux.git] / drivers / net / ethernet / broadcom / bnxt / bnxt.c
1 /* Broadcom NetXtreme-C/E network driver.
2  *
3  * Copyright (c) 2014-2016 Broadcom Corporation
4  * Copyright (c) 2016-2019 Broadcom Limited
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation.
9  */
10
11 #include <linux/module.h>
12
13 #include <linux/stringify.h>
14 #include <linux/kernel.h>
15 #include <linux/timer.h>
16 #include <linux/errno.h>
17 #include <linux/ioport.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/interrupt.h>
21 #include <linux/pci.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/skbuff.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/bitops.h>
27 #include <linux/io.h>
28 #include <linux/irq.h>
29 #include <linux/delay.h>
30 #include <asm/byteorder.h>
31 #include <asm/page.h>
32 #include <linux/time.h>
33 #include <linux/mii.h>
34 #include <linux/mdio.h>
35 #include <linux/if.h>
36 #include <linux/if_vlan.h>
37 #include <linux/if_bridge.h>
38 #include <linux/rtc.h>
39 #include <linux/bpf.h>
40 #include <net/ip.h>
41 #include <net/tcp.h>
42 #include <net/udp.h>
43 #include <net/checksum.h>
44 #include <net/ip6_checksum.h>
45 #include <net/udp_tunnel.h>
46 #include <linux/workqueue.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/log2.h>
50 #include <linux/aer.h>
51 #include <linux/bitmap.h>
52 #include <linux/cpu_rmap.h>
53 #include <linux/cpumask.h>
54 #include <net/pkt_cls.h>
55 #include <linux/hwmon.h>
56 #include <linux/hwmon-sysfs.h>
57 #include <net/page_pool.h>
58
59 #include "bnxt_hsi.h"
60 #include "bnxt.h"
61 #include "bnxt_ulp.h"
62 #include "bnxt_sriov.h"
63 #include "bnxt_ethtool.h"
64 #include "bnxt_dcb.h"
65 #include "bnxt_xdp.h"
66 #include "bnxt_vfr.h"
67 #include "bnxt_tc.h"
68 #include "bnxt_devlink.h"
69 #include "bnxt_debugfs.h"
70
71 #define BNXT_TX_TIMEOUT         (5 * HZ)
72
73 static const char version[] =
74         "Broadcom NetXtreme-C/E driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION "\n";
75
76 MODULE_LICENSE("GPL");
77 MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
78 MODULE_VERSION(DRV_MODULE_VERSION);
79
80 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
81 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD
82 #define BNXT_RX_COPY_THRESH 256
83
84 #define BNXT_TX_PUSH_THRESH 164
85
86 enum board_idx {
87         BCM57301,
88         BCM57302,
89         BCM57304,
90         BCM57417_NPAR,
91         BCM58700,
92         BCM57311,
93         BCM57312,
94         BCM57402,
95         BCM57404,
96         BCM57406,
97         BCM57402_NPAR,
98         BCM57407,
99         BCM57412,
100         BCM57414,
101         BCM57416,
102         BCM57417,
103         BCM57412_NPAR,
104         BCM57314,
105         BCM57417_SFP,
106         BCM57416_SFP,
107         BCM57404_NPAR,
108         BCM57406_NPAR,
109         BCM57407_SFP,
110         BCM57407_NPAR,
111         BCM57414_NPAR,
112         BCM57416_NPAR,
113         BCM57452,
114         BCM57454,
115         BCM5745x_NPAR,
116         BCM57508,
117         BCM57504,
118         BCM57502,
119         BCM57508_NPAR,
120         BCM57504_NPAR,
121         BCM57502_NPAR,
122         BCM58802,
123         BCM58804,
124         BCM58808,
125         NETXTREME_E_VF,
126         NETXTREME_C_VF,
127         NETXTREME_S_VF,
128         NETXTREME_E_P5_VF,
129 };
130
131 /* indexed by enum above */
132 static const struct {
133         char *name;
134 } board_info[] = {
135         [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
136         [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
137         [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
138         [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
139         [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
140         [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
141         [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
142         [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
143         [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
144         [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
145         [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
146         [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
147         [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
148         [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
149         [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
150         [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
151         [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
152         [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
153         [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
154         [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
155         [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
156         [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
157         [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
158         [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
159         [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
160         [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
161         [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
162         [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
163         [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
164         [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
165         [BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
166         [BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" },
167         [BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" },
168         [BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" },
169         [BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" },
170         [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
171         [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
172         [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
173         [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
174         [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
175         [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
176         [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
177 };
178
179 static const struct pci_device_id bnxt_pci_tbl[] = {
180         { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR },
181         { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR },
182         { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
183         { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
184         { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
185         { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
186         { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
187         { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
188         { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
189         { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
190         { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
191         { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
192         { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
193         { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
194         { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
195         { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
196         { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
197         { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
198         { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
199         { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
200         { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
201         { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
202         { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
203         { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
204         { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
205         { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
206         { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
207         { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
208         { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
209         { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
210         { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
211         { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
212         { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
213         { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
214         { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
215         { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
216         { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
217         { PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 },
218         { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57508_NPAR },
219         { PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR },
220         { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57502_NPAR },
221         { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57508_NPAR },
222         { PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR },
223         { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57502_NPAR },
224         { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
225         { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
226 #ifdef CONFIG_BNXT_SRIOV
227         { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
228         { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
229         { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
230         { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
231         { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
232         { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
233         { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
234         { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
235         { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF },
236         { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
237         { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
238 #endif
239         { 0 }
240 };
241
242 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
243
244 static const u16 bnxt_vf_req_snif[] = {
245         HWRM_FUNC_CFG,
246         HWRM_FUNC_VF_CFG,
247         HWRM_PORT_PHY_QCFG,
248         HWRM_CFA_L2_FILTER_ALLOC,
249 };
250
251 static const u16 bnxt_async_events_arr[] = {
252         ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
253         ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
254         ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
255         ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
256         ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
257 };
258
259 static struct workqueue_struct *bnxt_pf_wq;
260
261 static bool bnxt_vf_pciid(enum board_idx idx)
262 {
263         return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
264                 idx == NETXTREME_S_VF || idx == NETXTREME_E_P5_VF);
265 }
266
267 #define DB_CP_REARM_FLAGS       (DB_KEY_CP | DB_IDX_VALID)
268 #define DB_CP_FLAGS             (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
269 #define DB_CP_IRQ_DIS_FLAGS     (DB_KEY_CP | DB_IRQ_DIS)
270
271 #define BNXT_CP_DB_IRQ_DIS(db)                                          \
272                 writel(DB_CP_IRQ_DIS_FLAGS, db)
273
274 #define BNXT_DB_CQ(db, idx)                                             \
275         writel(DB_CP_FLAGS | RING_CMP(idx), (db)->doorbell)
276
277 #define BNXT_DB_NQ_P5(db, idx)                                          \
278         writeq((db)->db_key64 | DBR_TYPE_NQ | RING_CMP(idx), (db)->doorbell)
279
280 #define BNXT_DB_CQ_ARM(db, idx)                                         \
281         writel(DB_CP_REARM_FLAGS | RING_CMP(idx), (db)->doorbell)
282
283 #define BNXT_DB_NQ_ARM_P5(db, idx)                                      \
284         writeq((db)->db_key64 | DBR_TYPE_NQ_ARM | RING_CMP(idx), (db)->doorbell)
285
286 static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
287 {
288         if (bp->flags & BNXT_FLAG_CHIP_P5)
289                 BNXT_DB_NQ_P5(db, idx);
290         else
291                 BNXT_DB_CQ(db, idx);
292 }
293
294 static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
295 {
296         if (bp->flags & BNXT_FLAG_CHIP_P5)
297                 BNXT_DB_NQ_ARM_P5(db, idx);
298         else
299                 BNXT_DB_CQ_ARM(db, idx);
300 }
301
302 static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
303 {
304         if (bp->flags & BNXT_FLAG_CHIP_P5)
305                 writeq(db->db_key64 | DBR_TYPE_CQ_ARMALL | RING_CMP(idx),
306                        db->doorbell);
307         else
308                 BNXT_DB_CQ(db, idx);
309 }
310
311 const u16 bnxt_lhint_arr[] = {
312         TX_BD_FLAGS_LHINT_512_AND_SMALLER,
313         TX_BD_FLAGS_LHINT_512_TO_1023,
314         TX_BD_FLAGS_LHINT_1024_TO_2047,
315         TX_BD_FLAGS_LHINT_1024_TO_2047,
316         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
317         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
318         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
319         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
320         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
321         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
322         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
323         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
324         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
325         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
326         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
327         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
328         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
329         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
330         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
331 };
332
333 static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
334 {
335         struct metadata_dst *md_dst = skb_metadata_dst(skb);
336
337         if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX)
338                 return 0;
339
340         return md_dst->u.port_info.port_id;
341 }
342
343 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
344 {
345         struct bnxt *bp = netdev_priv(dev);
346         struct tx_bd *txbd;
347         struct tx_bd_ext *txbd1;
348         struct netdev_queue *txq;
349         int i;
350         dma_addr_t mapping;
351         unsigned int length, pad = 0;
352         u32 len, free_size, vlan_tag_flags, cfa_action, flags;
353         u16 prod, last_frag;
354         struct pci_dev *pdev = bp->pdev;
355         struct bnxt_tx_ring_info *txr;
356         struct bnxt_sw_tx_bd *tx_buf;
357
358         i = skb_get_queue_mapping(skb);
359         if (unlikely(i >= bp->tx_nr_rings)) {
360                 dev_kfree_skb_any(skb);
361                 return NETDEV_TX_OK;
362         }
363
364         txq = netdev_get_tx_queue(dev, i);
365         txr = &bp->tx_ring[bp->tx_ring_map[i]];
366         prod = txr->tx_prod;
367
368         free_size = bnxt_tx_avail(bp, txr);
369         if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
370                 netif_tx_stop_queue(txq);
371                 return NETDEV_TX_BUSY;
372         }
373
374         length = skb->len;
375         len = skb_headlen(skb);
376         last_frag = skb_shinfo(skb)->nr_frags;
377
378         txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
379
380         txbd->tx_bd_opaque = prod;
381
382         tx_buf = &txr->tx_buf_ring[prod];
383         tx_buf->skb = skb;
384         tx_buf->nr_frags = last_frag;
385
386         vlan_tag_flags = 0;
387         cfa_action = bnxt_xmit_get_cfa_action(skb);
388         if (skb_vlan_tag_present(skb)) {
389                 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
390                                  skb_vlan_tag_get(skb);
391                 /* Currently supports 8021Q, 8021AD vlan offloads
392                  * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
393                  */
394                 if (skb->vlan_proto == htons(ETH_P_8021Q))
395                         vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
396         }
397
398         if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) {
399                 struct tx_push_buffer *tx_push_buf = txr->tx_push;
400                 struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
401                 struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
402                 void __iomem *db = txr->tx_db.doorbell;
403                 void *pdata = tx_push_buf->data;
404                 u64 *end;
405                 int j, push_len;
406
407                 /* Set COAL_NOW to be ready quickly for the next push */
408                 tx_push->tx_bd_len_flags_type =
409                         cpu_to_le32((length << TX_BD_LEN_SHIFT) |
410                                         TX_BD_TYPE_LONG_TX_BD |
411                                         TX_BD_FLAGS_LHINT_512_AND_SMALLER |
412                                         TX_BD_FLAGS_COAL_NOW |
413                                         TX_BD_FLAGS_PACKET_END |
414                                         (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
415
416                 if (skb->ip_summed == CHECKSUM_PARTIAL)
417                         tx_push1->tx_bd_hsize_lflags =
418                                         cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
419                 else
420                         tx_push1->tx_bd_hsize_lflags = 0;
421
422                 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
423                 tx_push1->tx_bd_cfa_action =
424                         cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
425
426                 end = pdata + length;
427                 end = PTR_ALIGN(end, 8) - 1;
428                 *end = 0;
429
430                 skb_copy_from_linear_data(skb, pdata, len);
431                 pdata += len;
432                 for (j = 0; j < last_frag; j++) {
433                         skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
434                         void *fptr;
435
436                         fptr = skb_frag_address_safe(frag);
437                         if (!fptr)
438                                 goto normal_tx;
439
440                         memcpy(pdata, fptr, skb_frag_size(frag));
441                         pdata += skb_frag_size(frag);
442                 }
443
444                 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
445                 txbd->tx_bd_haddr = txr->data_mapping;
446                 prod = NEXT_TX(prod);
447                 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
448                 memcpy(txbd, tx_push1, sizeof(*txbd));
449                 prod = NEXT_TX(prod);
450                 tx_push->doorbell =
451                         cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
452                 txr->tx_prod = prod;
453
454                 tx_buf->is_push = 1;
455                 netdev_tx_sent_queue(txq, skb->len);
456                 wmb();  /* Sync is_push and byte queue before pushing data */
457
458                 push_len = (length + sizeof(*tx_push) + 7) / 8;
459                 if (push_len > 16) {
460                         __iowrite64_copy(db, tx_push_buf, 16);
461                         __iowrite32_copy(db + 4, tx_push_buf + 1,
462                                          (push_len - 16) << 1);
463                 } else {
464                         __iowrite64_copy(db, tx_push_buf, push_len);
465                 }
466
467                 goto tx_done;
468         }
469
470 normal_tx:
471         if (length < BNXT_MIN_PKT_SIZE) {
472                 pad = BNXT_MIN_PKT_SIZE - length;
473                 if (skb_pad(skb, pad)) {
474                         /* SKB already freed. */
475                         tx_buf->skb = NULL;
476                         return NETDEV_TX_OK;
477                 }
478                 length = BNXT_MIN_PKT_SIZE;
479         }
480
481         mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
482
483         if (unlikely(dma_mapping_error(&pdev->dev, mapping))) {
484                 dev_kfree_skb_any(skb);
485                 tx_buf->skb = NULL;
486                 return NETDEV_TX_OK;
487         }
488
489         dma_unmap_addr_set(tx_buf, mapping, mapping);
490         flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
491                 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
492
493         txbd->tx_bd_haddr = cpu_to_le64(mapping);
494
495         prod = NEXT_TX(prod);
496         txbd1 = (struct tx_bd_ext *)
497                 &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
498
499         txbd1->tx_bd_hsize_lflags = 0;
500         if (skb_is_gso(skb)) {
501                 u32 hdr_len;
502
503                 if (skb->encapsulation)
504                         hdr_len = skb_inner_network_offset(skb) +
505                                 skb_inner_network_header_len(skb) +
506                                 inner_tcp_hdrlen(skb);
507                 else
508                         hdr_len = skb_transport_offset(skb) +
509                                 tcp_hdrlen(skb);
510
511                 txbd1->tx_bd_hsize_lflags = cpu_to_le32(TX_BD_FLAGS_LSO |
512                                         TX_BD_FLAGS_T_IPID |
513                                         (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
514                 length = skb_shinfo(skb)->gso_size;
515                 txbd1->tx_bd_mss = cpu_to_le32(length);
516                 length += hdr_len;
517         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
518                 txbd1->tx_bd_hsize_lflags =
519                         cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
520                 txbd1->tx_bd_mss = 0;
521         }
522
523         length >>= 9;
524         if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) {
525                 dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n",
526                                      skb->len);
527                 i = 0;
528                 goto tx_dma_error;
529         }
530         flags |= bnxt_lhint_arr[length];
531         txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
532
533         txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
534         txbd1->tx_bd_cfa_action =
535                         cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
536         for (i = 0; i < last_frag; i++) {
537                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
538
539                 prod = NEXT_TX(prod);
540                 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
541
542                 len = skb_frag_size(frag);
543                 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
544                                            DMA_TO_DEVICE);
545
546                 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
547                         goto tx_dma_error;
548
549                 tx_buf = &txr->tx_buf_ring[prod];
550                 dma_unmap_addr_set(tx_buf, mapping, mapping);
551
552                 txbd->tx_bd_haddr = cpu_to_le64(mapping);
553
554                 flags = len << TX_BD_LEN_SHIFT;
555                 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
556         }
557
558         flags &= ~TX_BD_LEN;
559         txbd->tx_bd_len_flags_type =
560                 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
561                             TX_BD_FLAGS_PACKET_END);
562
563         netdev_tx_sent_queue(txq, skb->len);
564
565         /* Sync BD data before updating doorbell */
566         wmb();
567
568         prod = NEXT_TX(prod);
569         txr->tx_prod = prod;
570
571         if (!netdev_xmit_more() || netif_xmit_stopped(txq))
572                 bnxt_db_write(bp, &txr->tx_db, prod);
573
574 tx_done:
575
576         if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
577                 if (netdev_xmit_more() && !tx_buf->is_push)
578                         bnxt_db_write(bp, &txr->tx_db, prod);
579
580                 netif_tx_stop_queue(txq);
581
582                 /* netif_tx_stop_queue() must be done before checking
583                  * tx index in bnxt_tx_avail() below, because in
584                  * bnxt_tx_int(), we update tx index before checking for
585                  * netif_tx_queue_stopped().
586                  */
587                 smp_mb();
588                 if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)
589                         netif_tx_wake_queue(txq);
590         }
591         return NETDEV_TX_OK;
592
593 tx_dma_error:
594         last_frag = i;
595
596         /* start back at beginning and unmap skb */
597         prod = txr->tx_prod;
598         tx_buf = &txr->tx_buf_ring[prod];
599         tx_buf->skb = NULL;
600         dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
601                          skb_headlen(skb), PCI_DMA_TODEVICE);
602         prod = NEXT_TX(prod);
603
604         /* unmap remaining mapped pages */
605         for (i = 0; i < last_frag; i++) {
606                 prod = NEXT_TX(prod);
607                 tx_buf = &txr->tx_buf_ring[prod];
608                 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
609                                skb_frag_size(&skb_shinfo(skb)->frags[i]),
610                                PCI_DMA_TODEVICE);
611         }
612
613         dev_kfree_skb_any(skb);
614         return NETDEV_TX_OK;
615 }
616
617 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
618 {
619         struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
620         struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
621         u16 cons = txr->tx_cons;
622         struct pci_dev *pdev = bp->pdev;
623         int i;
624         unsigned int tx_bytes = 0;
625
626         for (i = 0; i < nr_pkts; i++) {
627                 struct bnxt_sw_tx_bd *tx_buf;
628                 struct sk_buff *skb;
629                 int j, last;
630
631                 tx_buf = &txr->tx_buf_ring[cons];
632                 cons = NEXT_TX(cons);
633                 skb = tx_buf->skb;
634                 tx_buf->skb = NULL;
635
636                 if (tx_buf->is_push) {
637                         tx_buf->is_push = 0;
638                         goto next_tx_int;
639                 }
640
641                 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
642                                  skb_headlen(skb), PCI_DMA_TODEVICE);
643                 last = tx_buf->nr_frags;
644
645                 for (j = 0; j < last; j++) {
646                         cons = NEXT_TX(cons);
647                         tx_buf = &txr->tx_buf_ring[cons];
648                         dma_unmap_page(
649                                 &pdev->dev,
650                                 dma_unmap_addr(tx_buf, mapping),
651                                 skb_frag_size(&skb_shinfo(skb)->frags[j]),
652                                 PCI_DMA_TODEVICE);
653                 }
654
655 next_tx_int:
656                 cons = NEXT_TX(cons);
657
658                 tx_bytes += skb->len;
659                 dev_kfree_skb_any(skb);
660         }
661
662         netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
663         txr->tx_cons = cons;
664
665         /* Need to make the tx_cons update visible to bnxt_start_xmit()
666          * before checking for netif_tx_queue_stopped().  Without the
667          * memory barrier, there is a small possibility that bnxt_start_xmit()
668          * will miss it and cause the queue to be stopped forever.
669          */
670         smp_mb();
671
672         if (unlikely(netif_tx_queue_stopped(txq)) &&
673             (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
674                 __netif_tx_lock(txq, smp_processor_id());
675                 if (netif_tx_queue_stopped(txq) &&
676                     bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
677                     txr->dev_state != BNXT_DEV_STATE_CLOSING)
678                         netif_tx_wake_queue(txq);
679                 __netif_tx_unlock(txq);
680         }
681 }
682
683 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
684                                          struct bnxt_rx_ring_info *rxr,
685                                          gfp_t gfp)
686 {
687         struct device *dev = &bp->pdev->dev;
688         struct page *page;
689
690         page = page_pool_dev_alloc_pages(rxr->page_pool);
691         if (!page)
692                 return NULL;
693
694         *mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir,
695                                       DMA_ATTR_WEAK_ORDERING);
696         if (dma_mapping_error(dev, *mapping)) {
697                 page_pool_recycle_direct(rxr->page_pool, page);
698                 return NULL;
699         }
700         *mapping += bp->rx_dma_offset;
701         return page;
702 }
703
704 static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
705                                        gfp_t gfp)
706 {
707         u8 *data;
708         struct pci_dev *pdev = bp->pdev;
709
710         data = kmalloc(bp->rx_buf_size, gfp);
711         if (!data)
712                 return NULL;
713
714         *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset,
715                                         bp->rx_buf_use_size, bp->rx_dir,
716                                         DMA_ATTR_WEAK_ORDERING);
717
718         if (dma_mapping_error(&pdev->dev, *mapping)) {
719                 kfree(data);
720                 data = NULL;
721         }
722         return data;
723 }
724
725 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
726                        u16 prod, gfp_t gfp)
727 {
728         struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
729         struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
730         dma_addr_t mapping;
731
732         if (BNXT_RX_PAGE_MODE(bp)) {
733                 struct page *page =
734                         __bnxt_alloc_rx_page(bp, &mapping, rxr, gfp);
735
736                 if (!page)
737                         return -ENOMEM;
738
739                 rx_buf->data = page;
740                 rx_buf->data_ptr = page_address(page) + bp->rx_offset;
741         } else {
742                 u8 *data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
743
744                 if (!data)
745                         return -ENOMEM;
746
747                 rx_buf->data = data;
748                 rx_buf->data_ptr = data + bp->rx_offset;
749         }
750         rx_buf->mapping = mapping;
751
752         rxbd->rx_bd_haddr = cpu_to_le64(mapping);
753         return 0;
754 }
755
756 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
757 {
758         u16 prod = rxr->rx_prod;
759         struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
760         struct rx_bd *cons_bd, *prod_bd;
761
762         prod_rx_buf = &rxr->rx_buf_ring[prod];
763         cons_rx_buf = &rxr->rx_buf_ring[cons];
764
765         prod_rx_buf->data = data;
766         prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
767
768         prod_rx_buf->mapping = cons_rx_buf->mapping;
769
770         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
771         cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
772
773         prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
774 }
775
776 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
777 {
778         u16 next, max = rxr->rx_agg_bmap_size;
779
780         next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
781         if (next >= max)
782                 next = find_first_zero_bit(rxr->rx_agg_bmap, max);
783         return next;
784 }
785
786 static inline int bnxt_alloc_rx_page(struct bnxt *bp,
787                                      struct bnxt_rx_ring_info *rxr,
788                                      u16 prod, gfp_t gfp)
789 {
790         struct rx_bd *rxbd =
791                 &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
792         struct bnxt_sw_rx_agg_bd *rx_agg_buf;
793         struct pci_dev *pdev = bp->pdev;
794         struct page *page;
795         dma_addr_t mapping;
796         u16 sw_prod = rxr->rx_sw_agg_prod;
797         unsigned int offset = 0;
798
799         if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
800                 page = rxr->rx_page;
801                 if (!page) {
802                         page = alloc_page(gfp);
803                         if (!page)
804                                 return -ENOMEM;
805                         rxr->rx_page = page;
806                         rxr->rx_page_offset = 0;
807                 }
808                 offset = rxr->rx_page_offset;
809                 rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
810                 if (rxr->rx_page_offset == PAGE_SIZE)
811                         rxr->rx_page = NULL;
812                 else
813                         get_page(page);
814         } else {
815                 page = alloc_page(gfp);
816                 if (!page)
817                         return -ENOMEM;
818         }
819
820         mapping = dma_map_page_attrs(&pdev->dev, page, offset,
821                                      BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE,
822                                      DMA_ATTR_WEAK_ORDERING);
823         if (dma_mapping_error(&pdev->dev, mapping)) {
824                 __free_page(page);
825                 return -EIO;
826         }
827
828         if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
829                 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
830
831         __set_bit(sw_prod, rxr->rx_agg_bmap);
832         rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
833         rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
834
835         rx_agg_buf->page = page;
836         rx_agg_buf->offset = offset;
837         rx_agg_buf->mapping = mapping;
838         rxbd->rx_bd_haddr = cpu_to_le64(mapping);
839         rxbd->rx_bd_opaque = sw_prod;
840         return 0;
841 }
842
843 static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp,
844                                        struct bnxt_cp_ring_info *cpr,
845                                        u16 cp_cons, u16 curr)
846 {
847         struct rx_agg_cmp *agg;
848
849         cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr));
850         agg = (struct rx_agg_cmp *)
851                 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
852         return agg;
853 }
854
855 static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp,
856                                               struct bnxt_rx_ring_info *rxr,
857                                               u16 agg_id, u16 curr)
858 {
859         struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id];
860
861         return &tpa_info->agg_arr[curr];
862 }
863
864 static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
865                                    u16 start, u32 agg_bufs, bool tpa)
866 {
867         struct bnxt_napi *bnapi = cpr->bnapi;
868         struct bnxt *bp = bnapi->bp;
869         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
870         u16 prod = rxr->rx_agg_prod;
871         u16 sw_prod = rxr->rx_sw_agg_prod;
872         bool p5_tpa = false;
873         u32 i;
874
875         if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
876                 p5_tpa = true;
877
878         for (i = 0; i < agg_bufs; i++) {
879                 u16 cons;
880                 struct rx_agg_cmp *agg;
881                 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
882                 struct rx_bd *prod_bd;
883                 struct page *page;
884
885                 if (p5_tpa)
886                         agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i);
887                 else
888                         agg = bnxt_get_agg(bp, cpr, idx, start + i);
889                 cons = agg->rx_agg_cmp_opaque;
890                 __clear_bit(cons, rxr->rx_agg_bmap);
891
892                 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
893                         sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
894
895                 __set_bit(sw_prod, rxr->rx_agg_bmap);
896                 prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
897                 cons_rx_buf = &rxr->rx_agg_ring[cons];
898
899                 /* It is possible for sw_prod to be equal to cons, so
900                  * set cons_rx_buf->page to NULL first.
901                  */
902                 page = cons_rx_buf->page;
903                 cons_rx_buf->page = NULL;
904                 prod_rx_buf->page = page;
905                 prod_rx_buf->offset = cons_rx_buf->offset;
906
907                 prod_rx_buf->mapping = cons_rx_buf->mapping;
908
909                 prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
910
911                 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
912                 prod_bd->rx_bd_opaque = sw_prod;
913
914                 prod = NEXT_RX_AGG(prod);
915                 sw_prod = NEXT_RX_AGG(sw_prod);
916         }
917         rxr->rx_agg_prod = prod;
918         rxr->rx_sw_agg_prod = sw_prod;
919 }
920
921 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
922                                         struct bnxt_rx_ring_info *rxr,
923                                         u16 cons, void *data, u8 *data_ptr,
924                                         dma_addr_t dma_addr,
925                                         unsigned int offset_and_len)
926 {
927         unsigned int payload = offset_and_len >> 16;
928         unsigned int len = offset_and_len & 0xffff;
929         skb_frag_t *frag;
930         struct page *page = data;
931         u16 prod = rxr->rx_prod;
932         struct sk_buff *skb;
933         int off, err;
934
935         err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
936         if (unlikely(err)) {
937                 bnxt_reuse_rx_data(rxr, cons, data);
938                 return NULL;
939         }
940         dma_addr -= bp->rx_dma_offset;
941         dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
942                              DMA_ATTR_WEAK_ORDERING);
943
944         if (unlikely(!payload))
945                 payload = eth_get_headlen(bp->dev, data_ptr, len);
946
947         skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
948         if (!skb) {
949                 __free_page(page);
950                 return NULL;
951         }
952
953         off = (void *)data_ptr - page_address(page);
954         skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE);
955         memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
956                payload + NET_IP_ALIGN);
957
958         frag = &skb_shinfo(skb)->frags[0];
959         skb_frag_size_sub(frag, payload);
960         skb_frag_off_add(frag, payload);
961         skb->data_len -= payload;
962         skb->tail += payload;
963
964         return skb;
965 }
966
967 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
968                                    struct bnxt_rx_ring_info *rxr, u16 cons,
969                                    void *data, u8 *data_ptr,
970                                    dma_addr_t dma_addr,
971                                    unsigned int offset_and_len)
972 {
973         u16 prod = rxr->rx_prod;
974         struct sk_buff *skb;
975         int err;
976
977         err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
978         if (unlikely(err)) {
979                 bnxt_reuse_rx_data(rxr, cons, data);
980                 return NULL;
981         }
982
983         skb = build_skb(data, 0);
984         dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
985                                bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
986         if (!skb) {
987                 kfree(data);
988                 return NULL;
989         }
990
991         skb_reserve(skb, bp->rx_offset);
992         skb_put(skb, offset_and_len & 0xffff);
993         return skb;
994 }
995
996 static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
997                                      struct bnxt_cp_ring_info *cpr,
998                                      struct sk_buff *skb, u16 idx,
999                                      u32 agg_bufs, bool tpa)
1000 {
1001         struct bnxt_napi *bnapi = cpr->bnapi;
1002         struct pci_dev *pdev = bp->pdev;
1003         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1004         u16 prod = rxr->rx_agg_prod;
1005         bool p5_tpa = false;
1006         u32 i;
1007
1008         if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
1009                 p5_tpa = true;
1010
1011         for (i = 0; i < agg_bufs; i++) {
1012                 u16 cons, frag_len;
1013                 struct rx_agg_cmp *agg;
1014                 struct bnxt_sw_rx_agg_bd *cons_rx_buf;
1015                 struct page *page;
1016                 dma_addr_t mapping;
1017
1018                 if (p5_tpa)
1019                         agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i);
1020                 else
1021                         agg = bnxt_get_agg(bp, cpr, idx, i);
1022                 cons = agg->rx_agg_cmp_opaque;
1023                 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
1024                             RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
1025
1026                 cons_rx_buf = &rxr->rx_agg_ring[cons];
1027                 skb_fill_page_desc(skb, i, cons_rx_buf->page,
1028                                    cons_rx_buf->offset, frag_len);
1029                 __clear_bit(cons, rxr->rx_agg_bmap);
1030
1031                 /* It is possible for bnxt_alloc_rx_page() to allocate
1032                  * a sw_prod index that equals the cons index, so we
1033                  * need to clear the cons entry now.
1034                  */
1035                 mapping = cons_rx_buf->mapping;
1036                 page = cons_rx_buf->page;
1037                 cons_rx_buf->page = NULL;
1038
1039                 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
1040                         struct skb_shared_info *shinfo;
1041                         unsigned int nr_frags;
1042
1043                         shinfo = skb_shinfo(skb);
1044                         nr_frags = --shinfo->nr_frags;
1045                         __skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
1046
1047                         dev_kfree_skb(skb);
1048
1049                         cons_rx_buf->page = page;
1050
1051                         /* Update prod since possibly some pages have been
1052                          * allocated already.
1053                          */
1054                         rxr->rx_agg_prod = prod;
1055                         bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa);
1056                         return NULL;
1057                 }
1058
1059                 dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
1060                                      PCI_DMA_FROMDEVICE,
1061                                      DMA_ATTR_WEAK_ORDERING);
1062
1063                 skb->data_len += frag_len;
1064                 skb->len += frag_len;
1065                 skb->truesize += PAGE_SIZE;
1066
1067                 prod = NEXT_RX_AGG(prod);
1068         }
1069         rxr->rx_agg_prod = prod;
1070         return skb;
1071 }
1072
1073 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1074                                u8 agg_bufs, u32 *raw_cons)
1075 {
1076         u16 last;
1077         struct rx_agg_cmp *agg;
1078
1079         *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
1080         last = RING_CMP(*raw_cons);
1081         agg = (struct rx_agg_cmp *)
1082                 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
1083         return RX_AGG_CMP_VALID(agg, *raw_cons);
1084 }
1085
1086 static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
1087                                             unsigned int len,
1088                                             dma_addr_t mapping)
1089 {
1090         struct bnxt *bp = bnapi->bp;
1091         struct pci_dev *pdev = bp->pdev;
1092         struct sk_buff *skb;
1093
1094         skb = napi_alloc_skb(&bnapi->napi, len);
1095         if (!skb)
1096                 return NULL;
1097
1098         dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh,
1099                                 bp->rx_dir);
1100
1101         memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
1102                len + NET_IP_ALIGN);
1103
1104         dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh,
1105                                    bp->rx_dir);
1106
1107         skb_put(skb, len);
1108         return skb;
1109 }
1110
1111 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1112                            u32 *raw_cons, void *cmp)
1113 {
1114         struct rx_cmp *rxcmp = cmp;
1115         u32 tmp_raw_cons = *raw_cons;
1116         u8 cmp_type, agg_bufs = 0;
1117
1118         cmp_type = RX_CMP_TYPE(rxcmp);
1119
1120         if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1121                 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
1122                             RX_CMP_AGG_BUFS) >>
1123                            RX_CMP_AGG_BUFS_SHIFT;
1124         } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1125                 struct rx_tpa_end_cmp *tpa_end = cmp;
1126
1127                 if (bp->flags & BNXT_FLAG_CHIP_P5)
1128                         return 0;
1129
1130                 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1131         }
1132
1133         if (agg_bufs) {
1134                 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1135                         return -EBUSY;
1136         }
1137         *raw_cons = tmp_raw_cons;
1138         return 0;
1139 }
1140
1141 static void bnxt_queue_sp_work(struct bnxt *bp)
1142 {
1143         if (BNXT_PF(bp))
1144                 queue_work(bnxt_pf_wq, &bp->sp_task);
1145         else
1146                 schedule_work(&bp->sp_task);
1147 }
1148
1149 static void bnxt_cancel_sp_work(struct bnxt *bp)
1150 {
1151         if (BNXT_PF(bp))
1152                 flush_workqueue(bnxt_pf_wq);
1153         else
1154                 cancel_work_sync(&bp->sp_task);
1155 }
1156
1157 static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
1158 {
1159         if (!rxr->bnapi->in_reset) {
1160                 rxr->bnapi->in_reset = true;
1161                 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
1162                 bnxt_queue_sp_work(bp);
1163         }
1164         rxr->rx_next_cons = 0xffff;
1165 }
1166
1167 static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1168 {
1169         struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1170         u16 idx = agg_id & MAX_TPA_P5_MASK;
1171
1172         if (test_bit(idx, map->agg_idx_bmap))
1173                 idx = find_first_zero_bit(map->agg_idx_bmap,
1174                                           BNXT_AGG_IDX_BMAP_SIZE);
1175         __set_bit(idx, map->agg_idx_bmap);
1176         map->agg_id_tbl[agg_id] = idx;
1177         return idx;
1178 }
1179
1180 static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
1181 {
1182         struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1183
1184         __clear_bit(idx, map->agg_idx_bmap);
1185 }
1186
1187 static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1188 {
1189         struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1190
1191         return map->agg_id_tbl[agg_id];
1192 }
1193
1194 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1195                            struct rx_tpa_start_cmp *tpa_start,
1196                            struct rx_tpa_start_cmp_ext *tpa_start1)
1197 {
1198         struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
1199         struct bnxt_tpa_info *tpa_info;
1200         u16 cons, prod, agg_id;
1201         struct rx_bd *prod_bd;
1202         dma_addr_t mapping;
1203
1204         if (bp->flags & BNXT_FLAG_CHIP_P5) {
1205                 agg_id = TPA_START_AGG_ID_P5(tpa_start);
1206                 agg_id = bnxt_alloc_agg_idx(rxr, agg_id);
1207         } else {
1208                 agg_id = TPA_START_AGG_ID(tpa_start);
1209         }
1210         cons = tpa_start->rx_tpa_start_cmp_opaque;
1211         prod = rxr->rx_prod;
1212         cons_rx_buf = &rxr->rx_buf_ring[cons];
1213         prod_rx_buf = &rxr->rx_buf_ring[prod];
1214         tpa_info = &rxr->rx_tpa[agg_id];
1215
1216         if (unlikely(cons != rxr->rx_next_cons ||
1217                      TPA_START_ERROR(tpa_start))) {
1218                 netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n",
1219                             cons, rxr->rx_next_cons,
1220                             TPA_START_ERROR_CODE(tpa_start1));
1221                 bnxt_sched_reset(bp, rxr);
1222                 return;
1223         }
1224         /* Store cfa_code in tpa_info to use in tpa_end
1225          * completion processing.
1226          */
1227         tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
1228         prod_rx_buf->data = tpa_info->data;
1229         prod_rx_buf->data_ptr = tpa_info->data_ptr;
1230
1231         mapping = tpa_info->mapping;
1232         prod_rx_buf->mapping = mapping;
1233
1234         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1235
1236         prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
1237
1238         tpa_info->data = cons_rx_buf->data;
1239         tpa_info->data_ptr = cons_rx_buf->data_ptr;
1240         cons_rx_buf->data = NULL;
1241         tpa_info->mapping = cons_rx_buf->mapping;
1242
1243         tpa_info->len =
1244                 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
1245                                 RX_TPA_START_CMP_LEN_SHIFT;
1246         if (likely(TPA_START_HASH_VALID(tpa_start))) {
1247                 u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
1248
1249                 tpa_info->hash_type = PKT_HASH_TYPE_L4;
1250                 tpa_info->gso_type = SKB_GSO_TCPV4;
1251                 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1252                 if (hash_type == 3 || TPA_START_IS_IPV6(tpa_start1))
1253                         tpa_info->gso_type = SKB_GSO_TCPV6;
1254                 tpa_info->rss_hash =
1255                         le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
1256         } else {
1257                 tpa_info->hash_type = PKT_HASH_TYPE_NONE;
1258                 tpa_info->gso_type = 0;
1259                 if (netif_msg_rx_err(bp))
1260                         netdev_warn(bp->dev, "TPA packet without valid hash\n");
1261         }
1262         tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
1263         tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
1264         tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
1265         tpa_info->agg_count = 0;
1266
1267         rxr->rx_prod = NEXT_RX(prod);
1268         cons = NEXT_RX(cons);
1269         rxr->rx_next_cons = NEXT_RX(cons);
1270         cons_rx_buf = &rxr->rx_buf_ring[cons];
1271
1272         bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
1273         rxr->rx_prod = NEXT_RX(rxr->rx_prod);
1274         cons_rx_buf->data = NULL;
1275 }
1276
1277 static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs)
1278 {
1279         if (agg_bufs)
1280                 bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true);
1281 }
1282
1283 #ifdef CONFIG_INET
1284 static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto)
1285 {
1286         struct udphdr *uh = NULL;
1287
1288         if (ip_proto == htons(ETH_P_IP)) {
1289                 struct iphdr *iph = (struct iphdr *)skb->data;
1290
1291                 if (iph->protocol == IPPROTO_UDP)
1292                         uh = (struct udphdr *)(iph + 1);
1293         } else {
1294                 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1295
1296                 if (iph->nexthdr == IPPROTO_UDP)
1297                         uh = (struct udphdr *)(iph + 1);
1298         }
1299         if (uh) {
1300                 if (uh->check)
1301                         skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
1302                 else
1303                         skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1304         }
1305 }
1306 #endif
1307
1308 static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
1309                                            int payload_off, int tcp_ts,
1310                                            struct sk_buff *skb)
1311 {
1312 #ifdef CONFIG_INET
1313         struct tcphdr *th;
1314         int len, nw_off;
1315         u16 outer_ip_off, inner_ip_off, inner_mac_off;
1316         u32 hdr_info = tpa_info->hdr_info;
1317         bool loopback = false;
1318
1319         inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1320         inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1321         outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1322
1323         /* If the packet is an internal loopback packet, the offsets will
1324          * have an extra 4 bytes.
1325          */
1326         if (inner_mac_off == 4) {
1327                 loopback = true;
1328         } else if (inner_mac_off > 4) {
1329                 __be16 proto = *((__be16 *)(skb->data + inner_ip_off -
1330                                             ETH_HLEN - 2));
1331
1332                 /* We only support inner iPv4/ipv6.  If we don't see the
1333                  * correct protocol ID, it must be a loopback packet where
1334                  * the offsets are off by 4.
1335                  */
1336                 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
1337                         loopback = true;
1338         }
1339         if (loopback) {
1340                 /* internal loopback packet, subtract all offsets by 4 */
1341                 inner_ip_off -= 4;
1342                 inner_mac_off -= 4;
1343                 outer_ip_off -= 4;
1344         }
1345
1346         nw_off = inner_ip_off - ETH_HLEN;
1347         skb_set_network_header(skb, nw_off);
1348         if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
1349                 struct ipv6hdr *iph = ipv6_hdr(skb);
1350
1351                 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1352                 len = skb->len - skb_transport_offset(skb);
1353                 th = tcp_hdr(skb);
1354                 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1355         } else {
1356                 struct iphdr *iph = ip_hdr(skb);
1357
1358                 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1359                 len = skb->len - skb_transport_offset(skb);
1360                 th = tcp_hdr(skb);
1361                 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1362         }
1363
1364         if (inner_mac_off) { /* tunnel */
1365                 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1366                                             ETH_HLEN - 2));
1367
1368                 bnxt_gro_tunnel(skb, proto);
1369         }
1370 #endif
1371         return skb;
1372 }
1373
1374 static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info,
1375                                            int payload_off, int tcp_ts,
1376                                            struct sk_buff *skb)
1377 {
1378 #ifdef CONFIG_INET
1379         u16 outer_ip_off, inner_ip_off, inner_mac_off;
1380         u32 hdr_info = tpa_info->hdr_info;
1381         int iphdr_len, nw_off;
1382
1383         inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1384         inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1385         outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1386
1387         nw_off = inner_ip_off - ETH_HLEN;
1388         skb_set_network_header(skb, nw_off);
1389         iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ?
1390                      sizeof(struct ipv6hdr) : sizeof(struct iphdr);
1391         skb_set_transport_header(skb, nw_off + iphdr_len);
1392
1393         if (inner_mac_off) { /* tunnel */
1394                 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1395                                             ETH_HLEN - 2));
1396
1397                 bnxt_gro_tunnel(skb, proto);
1398         }
1399 #endif
1400         return skb;
1401 }
1402
1403 #define BNXT_IPV4_HDR_SIZE      (sizeof(struct iphdr) + sizeof(struct tcphdr))
1404 #define BNXT_IPV6_HDR_SIZE      (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1405
1406 static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1407                                            int payload_off, int tcp_ts,
1408                                            struct sk_buff *skb)
1409 {
1410 #ifdef CONFIG_INET
1411         struct tcphdr *th;
1412         int len, nw_off, tcp_opt_len = 0;
1413
1414         if (tcp_ts)
1415                 tcp_opt_len = 12;
1416
1417         if (tpa_info->gso_type == SKB_GSO_TCPV4) {
1418                 struct iphdr *iph;
1419
1420                 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
1421                          ETH_HLEN;
1422                 skb_set_network_header(skb, nw_off);
1423                 iph = ip_hdr(skb);
1424                 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1425                 len = skb->len - skb_transport_offset(skb);
1426                 th = tcp_hdr(skb);
1427                 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1428         } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
1429                 struct ipv6hdr *iph;
1430
1431                 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
1432                          ETH_HLEN;
1433                 skb_set_network_header(skb, nw_off);
1434                 iph = ipv6_hdr(skb);
1435                 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1436                 len = skb->len - skb_transport_offset(skb);
1437                 th = tcp_hdr(skb);
1438                 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1439         } else {
1440                 dev_kfree_skb_any(skb);
1441                 return NULL;
1442         }
1443
1444         if (nw_off) /* tunnel */
1445                 bnxt_gro_tunnel(skb, skb->protocol);
1446 #endif
1447         return skb;
1448 }
1449
1450 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1451                                            struct bnxt_tpa_info *tpa_info,
1452                                            struct rx_tpa_end_cmp *tpa_end,
1453                                            struct rx_tpa_end_cmp_ext *tpa_end1,
1454                                            struct sk_buff *skb)
1455 {
1456 #ifdef CONFIG_INET
1457         int payload_off;
1458         u16 segs;
1459
1460         segs = TPA_END_TPA_SEGS(tpa_end);
1461         if (segs == 1)
1462                 return skb;
1463
1464         NAPI_GRO_CB(skb)->count = segs;
1465         skb_shinfo(skb)->gso_size =
1466                 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
1467         skb_shinfo(skb)->gso_type = tpa_info->gso_type;
1468         if (bp->flags & BNXT_FLAG_CHIP_P5)
1469                 payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1);
1470         else
1471                 payload_off = TPA_END_PAYLOAD_OFF(tpa_end);
1472         skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
1473         if (likely(skb))
1474                 tcp_gro_complete(skb);
1475 #endif
1476         return skb;
1477 }
1478
1479 /* Given the cfa_code of a received packet determine which
1480  * netdev (vf-rep or PF) the packet is destined to.
1481  */
1482 static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
1483 {
1484         struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
1485
1486         /* if vf-rep dev is NULL, the must belongs to the PF */
1487         return dev ? dev : bp->dev;
1488 }
1489
1490 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1491                                            struct bnxt_cp_ring_info *cpr,
1492                                            u32 *raw_cons,
1493                                            struct rx_tpa_end_cmp *tpa_end,
1494                                            struct rx_tpa_end_cmp_ext *tpa_end1,
1495                                            u8 *event)
1496 {
1497         struct bnxt_napi *bnapi = cpr->bnapi;
1498         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1499         u8 *data_ptr, agg_bufs;
1500         unsigned int len;
1501         struct bnxt_tpa_info *tpa_info;
1502         dma_addr_t mapping;
1503         struct sk_buff *skb;
1504         u16 idx = 0, agg_id;
1505         void *data;
1506         bool gro;
1507
1508         if (unlikely(bnapi->in_reset)) {
1509                 int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end);
1510
1511                 if (rc < 0)
1512                         return ERR_PTR(-EBUSY);
1513                 return NULL;
1514         }
1515
1516         if (bp->flags & BNXT_FLAG_CHIP_P5) {
1517                 agg_id = TPA_END_AGG_ID_P5(tpa_end);
1518                 agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1519                 agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1);
1520                 tpa_info = &rxr->rx_tpa[agg_id];
1521                 if (unlikely(agg_bufs != tpa_info->agg_count)) {
1522                         netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n",
1523                                     agg_bufs, tpa_info->agg_count);
1524                         agg_bufs = tpa_info->agg_count;
1525                 }
1526                 tpa_info->agg_count = 0;
1527                 *event |= BNXT_AGG_EVENT;
1528                 bnxt_free_agg_idx(rxr, agg_id);
1529                 idx = agg_id;
1530                 gro = !!(bp->flags & BNXT_FLAG_GRO);
1531         } else {
1532                 agg_id = TPA_END_AGG_ID(tpa_end);
1533                 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1534                 tpa_info = &rxr->rx_tpa[agg_id];
1535                 idx = RING_CMP(*raw_cons);
1536                 if (agg_bufs) {
1537                         if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1538                                 return ERR_PTR(-EBUSY);
1539
1540                         *event |= BNXT_AGG_EVENT;
1541                         idx = NEXT_CMP(idx);
1542                 }
1543                 gro = !!TPA_END_GRO(tpa_end);
1544         }
1545         data = tpa_info->data;
1546         data_ptr = tpa_info->data_ptr;
1547         prefetch(data_ptr);
1548         len = tpa_info->len;
1549         mapping = tpa_info->mapping;
1550
1551         if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
1552                 bnxt_abort_tpa(cpr, idx, agg_bufs);
1553                 if (agg_bufs > MAX_SKB_FRAGS)
1554                         netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1555                                     agg_bufs, (int)MAX_SKB_FRAGS);
1556                 return NULL;
1557         }
1558
1559         if (len <= bp->rx_copy_thresh) {
1560                 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
1561                 if (!skb) {
1562                         bnxt_abort_tpa(cpr, idx, agg_bufs);
1563                         return NULL;
1564                 }
1565         } else {
1566                 u8 *new_data;
1567                 dma_addr_t new_mapping;
1568
1569                 new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
1570                 if (!new_data) {
1571                         bnxt_abort_tpa(cpr, idx, agg_bufs);
1572                         return NULL;
1573                 }
1574
1575                 tpa_info->data = new_data;
1576                 tpa_info->data_ptr = new_data + bp->rx_offset;
1577                 tpa_info->mapping = new_mapping;
1578
1579                 skb = build_skb(data, 0);
1580                 dma_unmap_single_attrs(&bp->pdev->dev, mapping,
1581                                        bp->rx_buf_use_size, bp->rx_dir,
1582                                        DMA_ATTR_WEAK_ORDERING);
1583
1584                 if (!skb) {
1585                         kfree(data);
1586                         bnxt_abort_tpa(cpr, idx, agg_bufs);
1587                         return NULL;
1588                 }
1589                 skb_reserve(skb, bp->rx_offset);
1590                 skb_put(skb, len);
1591         }
1592
1593         if (agg_bufs) {
1594                 skb = bnxt_rx_pages(bp, cpr, skb, idx, agg_bufs, true);
1595                 if (!skb) {
1596                         /* Page reuse already handled by bnxt_rx_pages(). */
1597                         return NULL;
1598                 }
1599         }
1600
1601         skb->protocol =
1602                 eth_type_trans(skb, bnxt_get_pkt_dev(bp, tpa_info->cfa_code));
1603
1604         if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1605                 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1606
1607         if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
1608             (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
1609                 u16 vlan_proto = tpa_info->metadata >>
1610                         RX_CMP_FLAGS2_METADATA_TPID_SFT;
1611                 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1612
1613                 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
1614         }
1615
1616         skb_checksum_none_assert(skb);
1617         if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1618                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1619                 skb->csum_level =
1620                         (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1621         }
1622
1623         if (gro)
1624                 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
1625
1626         return skb;
1627 }
1628
1629 static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1630                          struct rx_agg_cmp *rx_agg)
1631 {
1632         u16 agg_id = TPA_AGG_AGG_ID(rx_agg);
1633         struct bnxt_tpa_info *tpa_info;
1634
1635         agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1636         tpa_info = &rxr->rx_tpa[agg_id];
1637         BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS);
1638         tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg;
1639 }
1640
1641 static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
1642                              struct sk_buff *skb)
1643 {
1644         if (skb->dev != bp->dev) {
1645                 /* this packet belongs to a vf-rep */
1646                 bnxt_vf_rep_rx(bp, skb);
1647                 return;
1648         }
1649         skb_record_rx_queue(skb, bnapi->index);
1650         napi_gro_receive(&bnapi->napi, skb);
1651 }
1652
1653 /* returns the following:
1654  * 1       - 1 packet successfully received
1655  * 0       - successful TPA_START, packet not completed yet
1656  * -EBUSY  - completion ring does not have all the agg buffers yet
1657  * -ENOMEM - packet aborted due to out of memory
1658  * -EIO    - packet aborted due to hw error indicated in BD
1659  */
1660 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1661                        u32 *raw_cons, u8 *event)
1662 {
1663         struct bnxt_napi *bnapi = cpr->bnapi;
1664         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1665         struct net_device *dev = bp->dev;
1666         struct rx_cmp *rxcmp;
1667         struct rx_cmp_ext *rxcmp1;
1668         u32 tmp_raw_cons = *raw_cons;
1669         u16 cfa_code, cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
1670         struct bnxt_sw_rx_bd *rx_buf;
1671         unsigned int len;
1672         u8 *data_ptr, agg_bufs, cmp_type;
1673         dma_addr_t dma_addr;
1674         struct sk_buff *skb;
1675         void *data;
1676         int rc = 0;
1677         u32 misc;
1678
1679         rxcmp = (struct rx_cmp *)
1680                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1681
1682         cmp_type = RX_CMP_TYPE(rxcmp);
1683
1684         if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) {
1685                 bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp);
1686                 goto next_rx_no_prod_no_len;
1687         }
1688
1689         tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1690         cp_cons = RING_CMP(tmp_raw_cons);
1691         rxcmp1 = (struct rx_cmp_ext *)
1692                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1693
1694         if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1695                 return -EBUSY;
1696
1697         prod = rxr->rx_prod;
1698
1699         if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
1700                 bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
1701                                (struct rx_tpa_start_cmp_ext *)rxcmp1);
1702
1703                 *event |= BNXT_RX_EVENT;
1704                 goto next_rx_no_prod_no_len;
1705
1706         } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1707                 skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons,
1708                                    (struct rx_tpa_end_cmp *)rxcmp,
1709                                    (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
1710
1711                 if (IS_ERR(skb))
1712                         return -EBUSY;
1713
1714                 rc = -ENOMEM;
1715                 if (likely(skb)) {
1716                         bnxt_deliver_skb(bp, bnapi, skb);
1717                         rc = 1;
1718                 }
1719                 *event |= BNXT_RX_EVENT;
1720                 goto next_rx_no_prod_no_len;
1721         }
1722
1723         cons = rxcmp->rx_cmp_opaque;
1724         if (unlikely(cons != rxr->rx_next_cons)) {
1725                 int rc1 = bnxt_discard_rx(bp, cpr, raw_cons, rxcmp);
1726
1727                 netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
1728                             cons, rxr->rx_next_cons);
1729                 bnxt_sched_reset(bp, rxr);
1730                 return rc1;
1731         }
1732         rx_buf = &rxr->rx_buf_ring[cons];
1733         data = rx_buf->data;
1734         data_ptr = rx_buf->data_ptr;
1735         prefetch(data_ptr);
1736
1737         misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
1738         agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
1739
1740         if (agg_bufs) {
1741                 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1742                         return -EBUSY;
1743
1744                 cp_cons = NEXT_CMP(cp_cons);
1745                 *event |= BNXT_AGG_EVENT;
1746         }
1747         *event |= BNXT_RX_EVENT;
1748
1749         rx_buf->data = NULL;
1750         if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
1751                 u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
1752
1753                 bnxt_reuse_rx_data(rxr, cons, data);
1754                 if (agg_bufs)
1755                         bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs,
1756                                                false);
1757
1758                 rc = -EIO;
1759                 if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
1760                         netdev_warn(bp->dev, "RX buffer error %x\n", rx_err);
1761                         bnxt_sched_reset(bp, rxr);
1762                 }
1763                 goto next_rx_no_len;
1764         }
1765
1766         len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
1767         dma_addr = rx_buf->mapping;
1768
1769         if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) {
1770                 rc = 1;
1771                 goto next_rx;
1772         }
1773
1774         if (len <= bp->rx_copy_thresh) {
1775                 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
1776                 bnxt_reuse_rx_data(rxr, cons, data);
1777                 if (!skb) {
1778                         if (agg_bufs)
1779                                 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
1780                                                        agg_bufs, false);
1781                         rc = -ENOMEM;
1782                         goto next_rx;
1783                 }
1784         } else {
1785                 u32 payload;
1786
1787                 if (rx_buf->data_ptr == data_ptr)
1788                         payload = misc & RX_CMP_PAYLOAD_OFFSET;
1789                 else
1790                         payload = 0;
1791                 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
1792                                       payload | len);
1793                 if (!skb) {
1794                         rc = -ENOMEM;
1795                         goto next_rx;
1796                 }
1797         }
1798
1799         if (agg_bufs) {
1800                 skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs, false);
1801                 if (!skb) {
1802                         rc = -ENOMEM;
1803                         goto next_rx;
1804                 }
1805         }
1806
1807         if (RX_CMP_HASH_VALID(rxcmp)) {
1808                 u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
1809                 enum pkt_hash_types type = PKT_HASH_TYPE_L4;
1810
1811                 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1812                 if (hash_type != 1 && hash_type != 3)
1813                         type = PKT_HASH_TYPE_L3;
1814                 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
1815         }
1816
1817         cfa_code = RX_CMP_CFA_CODE(rxcmp1);
1818         skb->protocol = eth_type_trans(skb, bnxt_get_pkt_dev(bp, cfa_code));
1819
1820         if ((rxcmp1->rx_cmp_flags2 &
1821              cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
1822             (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
1823                 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
1824                 u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1825                 u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
1826
1827                 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
1828         }
1829
1830         skb_checksum_none_assert(skb);
1831         if (RX_CMP_L4_CS_OK(rxcmp1)) {
1832                 if (dev->features & NETIF_F_RXCSUM) {
1833                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1834                         skb->csum_level = RX_CMP_ENCAP(rxcmp1);
1835                 }
1836         } else {
1837                 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
1838                         if (dev->features & NETIF_F_RXCSUM)
1839                                 bnapi->cp_ring.rx_l4_csum_errors++;
1840                 }
1841         }
1842
1843         bnxt_deliver_skb(bp, bnapi, skb);
1844         rc = 1;
1845
1846 next_rx:
1847         cpr->rx_packets += 1;
1848         cpr->rx_bytes += len;
1849
1850 next_rx_no_len:
1851         rxr->rx_prod = NEXT_RX(prod);
1852         rxr->rx_next_cons = NEXT_RX(cons);
1853
1854 next_rx_no_prod_no_len:
1855         *raw_cons = tmp_raw_cons;
1856
1857         return rc;
1858 }
1859
1860 /* In netpoll mode, if we are using a combined completion ring, we need to
1861  * discard the rx packets and recycle the buffers.
1862  */
1863 static int bnxt_force_rx_discard(struct bnxt *bp,
1864                                  struct bnxt_cp_ring_info *cpr,
1865                                  u32 *raw_cons, u8 *event)
1866 {
1867         u32 tmp_raw_cons = *raw_cons;
1868         struct rx_cmp_ext *rxcmp1;
1869         struct rx_cmp *rxcmp;
1870         u16 cp_cons;
1871         u8 cmp_type;
1872
1873         cp_cons = RING_CMP(tmp_raw_cons);
1874         rxcmp = (struct rx_cmp *)
1875                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1876
1877         tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1878         cp_cons = RING_CMP(tmp_raw_cons);
1879         rxcmp1 = (struct rx_cmp_ext *)
1880                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1881
1882         if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1883                 return -EBUSY;
1884
1885         cmp_type = RX_CMP_TYPE(rxcmp);
1886         if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1887                 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
1888                         cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
1889         } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1890                 struct rx_tpa_end_cmp_ext *tpa_end1;
1891
1892                 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
1893                 tpa_end1->rx_tpa_end_cmp_errors_v2 |=
1894                         cpu_to_le32(RX_TPA_END_CMP_ERRORS);
1895         }
1896         return bnxt_rx_pkt(bp, cpr, raw_cons, event);
1897 }
1898
1899 #define BNXT_GET_EVENT_PORT(data)       \
1900         ((data) &                       \
1901          ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
1902
1903 static int bnxt_async_event_process(struct bnxt *bp,
1904                                     struct hwrm_async_event_cmpl *cmpl)
1905 {
1906         u16 event_id = le16_to_cpu(cmpl->event_id);
1907
1908         /* TODO CHIMP_FW: Define event id's for link change, error etc */
1909         switch (event_id) {
1910         case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
1911                 u32 data1 = le32_to_cpu(cmpl->event_data1);
1912                 struct bnxt_link_info *link_info = &bp->link_info;
1913
1914                 if (BNXT_VF(bp))
1915                         goto async_event_process_exit;
1916
1917                 /* print unsupported speed warning in forced speed mode only */
1918                 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
1919                     (data1 & 0x20000)) {
1920                         u16 fw_speed = link_info->force_link_speed;
1921                         u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
1922
1923                         if (speed != SPEED_UNKNOWN)
1924                                 netdev_warn(bp->dev, "Link speed %d no longer supported\n",
1925                                             speed);
1926                 }
1927                 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
1928         }
1929         /* fall through */
1930         case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
1931                 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
1932                 break;
1933         case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
1934                 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
1935                 break;
1936         case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
1937                 u32 data1 = le32_to_cpu(cmpl->event_data1);
1938                 u16 port_id = BNXT_GET_EVENT_PORT(data1);
1939
1940                 if (BNXT_VF(bp))
1941                         break;
1942
1943                 if (bp->pf.port_id != port_id)
1944                         break;
1945
1946                 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
1947                 break;
1948         }
1949         case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
1950                 if (BNXT_PF(bp))
1951                         goto async_event_process_exit;
1952                 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
1953                 break;
1954         default:
1955                 goto async_event_process_exit;
1956         }
1957         bnxt_queue_sp_work(bp);
1958 async_event_process_exit:
1959         bnxt_ulp_async_events(bp, cmpl);
1960         return 0;
1961 }
1962
1963 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
1964 {
1965         u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
1966         struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
1967         struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
1968                                 (struct hwrm_fwd_req_cmpl *)txcmp;
1969
1970         switch (cmpl_type) {
1971         case CMPL_BASE_TYPE_HWRM_DONE:
1972                 seq_id = le16_to_cpu(h_cmpl->sequence_id);
1973                 if (seq_id == bp->hwrm_intr_seq_id)
1974                         bp->hwrm_intr_seq_id = (u16)~bp->hwrm_intr_seq_id;
1975                 else
1976                         netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
1977                 break;
1978
1979         case CMPL_BASE_TYPE_HWRM_FWD_REQ:
1980                 vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
1981
1982                 if ((vf_id < bp->pf.first_vf_id) ||
1983                     (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
1984                         netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
1985                                    vf_id);
1986                         return -EINVAL;
1987                 }
1988
1989                 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
1990                 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
1991                 bnxt_queue_sp_work(bp);
1992                 break;
1993
1994         case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
1995                 bnxt_async_event_process(bp,
1996                                          (struct hwrm_async_event_cmpl *)txcmp);
1997
1998         default:
1999                 break;
2000         }
2001
2002         return 0;
2003 }
2004
2005 static irqreturn_t bnxt_msix(int irq, void *dev_instance)
2006 {
2007         struct bnxt_napi *bnapi = dev_instance;
2008         struct bnxt *bp = bnapi->bp;
2009         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2010         u32 cons = RING_CMP(cpr->cp_raw_cons);
2011
2012         cpr->event_ctr++;
2013         prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2014         napi_schedule(&bnapi->napi);
2015         return IRQ_HANDLED;
2016 }
2017
2018 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2019 {
2020         u32 raw_cons = cpr->cp_raw_cons;
2021         u16 cons = RING_CMP(raw_cons);
2022         struct tx_cmp *txcmp;
2023
2024         txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2025
2026         return TX_CMP_VALID(txcmp, raw_cons);
2027 }
2028
2029 static irqreturn_t bnxt_inta(int irq, void *dev_instance)
2030 {
2031         struct bnxt_napi *bnapi = dev_instance;
2032         struct bnxt *bp = bnapi->bp;
2033         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2034         u32 cons = RING_CMP(cpr->cp_raw_cons);
2035         u32 int_status;
2036
2037         prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2038
2039         if (!bnxt_has_work(bp, cpr)) {
2040                 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
2041                 /* return if erroneous interrupt */
2042                 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
2043                         return IRQ_NONE;
2044         }
2045
2046         /* disable ring IRQ */
2047         BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell);
2048
2049         /* Return here if interrupt is shared and is disabled. */
2050         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2051                 return IRQ_HANDLED;
2052
2053         napi_schedule(&bnapi->napi);
2054         return IRQ_HANDLED;
2055 }
2056
2057 static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2058                             int budget)
2059 {
2060         struct bnxt_napi *bnapi = cpr->bnapi;
2061         u32 raw_cons = cpr->cp_raw_cons;
2062         u32 cons;
2063         int tx_pkts = 0;
2064         int rx_pkts = 0;
2065         u8 event = 0;
2066         struct tx_cmp *txcmp;
2067
2068         cpr->has_more_work = 0;
2069         while (1) {
2070                 int rc;
2071
2072                 cons = RING_CMP(raw_cons);
2073                 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2074
2075                 if (!TX_CMP_VALID(txcmp, raw_cons))
2076                         break;
2077
2078                 /* The valid test of the entry must be done first before
2079                  * reading any further.
2080                  */
2081                 dma_rmb();
2082                 cpr->had_work_done = 1;
2083                 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
2084                         tx_pkts++;
2085                         /* return full budget so NAPI will complete. */
2086                         if (unlikely(tx_pkts > bp->tx_wake_thresh)) {
2087                                 rx_pkts = budget;
2088                                 raw_cons = NEXT_RAW_CMP(raw_cons);
2089                                 if (budget)
2090                                         cpr->has_more_work = 1;
2091                                 break;
2092                         }
2093                 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2094                         if (likely(budget))
2095                                 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2096                         else
2097                                 rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
2098                                                            &event);
2099                         if (likely(rc >= 0))
2100                                 rx_pkts += rc;
2101                         /* Increment rx_pkts when rc is -ENOMEM to count towards
2102                          * the NAPI budget.  Otherwise, we may potentially loop
2103                          * here forever if we consistently cannot allocate
2104                          * buffers.
2105                          */
2106                         else if (rc == -ENOMEM && budget)
2107                                 rx_pkts++;
2108                         else if (rc == -EBUSY)  /* partial completion */
2109                                 break;
2110                 } else if (unlikely((TX_CMP_TYPE(txcmp) ==
2111                                      CMPL_BASE_TYPE_HWRM_DONE) ||
2112                                     (TX_CMP_TYPE(txcmp) ==
2113                                      CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
2114                                     (TX_CMP_TYPE(txcmp) ==
2115                                      CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
2116                         bnxt_hwrm_handler(bp, txcmp);
2117                 }
2118                 raw_cons = NEXT_RAW_CMP(raw_cons);
2119
2120                 if (rx_pkts && rx_pkts == budget) {
2121                         cpr->has_more_work = 1;
2122                         break;
2123                 }
2124         }
2125
2126         if (event & BNXT_REDIRECT_EVENT)
2127                 xdp_do_flush_map();
2128
2129         if (event & BNXT_TX_EVENT) {
2130                 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
2131                 u16 prod = txr->tx_prod;
2132
2133                 /* Sync BD data before updating doorbell */
2134                 wmb();
2135
2136                 bnxt_db_write_relaxed(bp, &txr->tx_db, prod);
2137         }
2138
2139         cpr->cp_raw_cons = raw_cons;
2140         bnapi->tx_pkts += tx_pkts;
2141         bnapi->events |= event;
2142         return rx_pkts;
2143 }
2144
2145 static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi)
2146 {
2147         if (bnapi->tx_pkts) {
2148                 bnapi->tx_int(bp, bnapi, bnapi->tx_pkts);
2149                 bnapi->tx_pkts = 0;
2150         }
2151
2152         if (bnapi->events & BNXT_RX_EVENT) {
2153                 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2154
2155                 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2156                 if (bnapi->events & BNXT_AGG_EVENT)
2157                         bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2158         }
2159         bnapi->events = 0;
2160 }
2161
2162 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2163                           int budget)
2164 {
2165         struct bnxt_napi *bnapi = cpr->bnapi;
2166         int rx_pkts;
2167
2168         rx_pkts = __bnxt_poll_work(bp, cpr, budget);
2169
2170         /* ACK completion ring before freeing tx ring and producing new
2171          * buffers in rx/agg rings to prevent overflowing the completion
2172          * ring.
2173          */
2174         bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
2175
2176         __bnxt_poll_work_done(bp, bnapi);
2177         return rx_pkts;
2178 }
2179
2180 static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
2181 {
2182         struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2183         struct bnxt *bp = bnapi->bp;
2184         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2185         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2186         struct tx_cmp *txcmp;
2187         struct rx_cmp_ext *rxcmp1;
2188         u32 cp_cons, tmp_raw_cons;
2189         u32 raw_cons = cpr->cp_raw_cons;
2190         u32 rx_pkts = 0;
2191         u8 event = 0;
2192
2193         while (1) {
2194                 int rc;
2195
2196                 cp_cons = RING_CMP(raw_cons);
2197                 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2198
2199                 if (!TX_CMP_VALID(txcmp, raw_cons))
2200                         break;
2201
2202                 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2203                         tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
2204                         cp_cons = RING_CMP(tmp_raw_cons);
2205                         rxcmp1 = (struct rx_cmp_ext *)
2206                           &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2207
2208                         if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2209                                 break;
2210
2211                         /* force an error to recycle the buffer */
2212                         rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2213                                 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2214
2215                         rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2216                         if (likely(rc == -EIO) && budget)
2217                                 rx_pkts++;
2218                         else if (rc == -EBUSY)  /* partial completion */
2219                                 break;
2220                 } else if (unlikely(TX_CMP_TYPE(txcmp) ==
2221                                     CMPL_BASE_TYPE_HWRM_DONE)) {
2222                         bnxt_hwrm_handler(bp, txcmp);
2223                 } else {
2224                         netdev_err(bp->dev,
2225                                    "Invalid completion received on special ring\n");
2226                 }
2227                 raw_cons = NEXT_RAW_CMP(raw_cons);
2228
2229                 if (rx_pkts == budget)
2230                         break;
2231         }
2232
2233         cpr->cp_raw_cons = raw_cons;
2234         BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons);
2235         bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2236
2237         if (event & BNXT_AGG_EVENT)
2238                 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2239
2240         if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
2241                 napi_complete_done(napi, rx_pkts);
2242                 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2243         }
2244         return rx_pkts;
2245 }
2246
2247 static int bnxt_poll(struct napi_struct *napi, int budget)
2248 {
2249         struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2250         struct bnxt *bp = bnapi->bp;
2251         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2252         int work_done = 0;
2253
2254         while (1) {
2255                 work_done += bnxt_poll_work(bp, cpr, budget - work_done);
2256
2257                 if (work_done >= budget) {
2258                         if (!budget)
2259                                 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2260                         break;
2261                 }
2262
2263                 if (!bnxt_has_work(bp, cpr)) {
2264                         if (napi_complete_done(napi, work_done))
2265                                 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2266                         break;
2267                 }
2268         }
2269         if (bp->flags & BNXT_FLAG_DIM) {
2270                 struct dim_sample dim_sample = {};
2271
2272                 dim_update_sample(cpr->event_ctr,
2273                                   cpr->rx_packets,
2274                                   cpr->rx_bytes,
2275                                   &dim_sample);
2276                 net_dim(&cpr->dim, dim_sample);
2277         }
2278         return work_done;
2279 }
2280
2281 static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
2282 {
2283         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2284         int i, work_done = 0;
2285
2286         for (i = 0; i < 2; i++) {
2287                 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2288
2289                 if (cpr2) {
2290                         work_done += __bnxt_poll_work(bp, cpr2,
2291                                                       budget - work_done);
2292                         cpr->has_more_work |= cpr2->has_more_work;
2293                 }
2294         }
2295         return work_done;
2296 }
2297
2298 static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
2299                                  u64 dbr_type, bool all)
2300 {
2301         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2302         int i;
2303
2304         for (i = 0; i < 2; i++) {
2305                 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2306                 struct bnxt_db_info *db;
2307
2308                 if (cpr2 && (all || cpr2->had_work_done)) {
2309                         db = &cpr2->cp_db;
2310                         writeq(db->db_key64 | dbr_type |
2311                                RING_CMP(cpr2->cp_raw_cons), db->doorbell);
2312                         cpr2->had_work_done = 0;
2313                 }
2314         }
2315         __bnxt_poll_work_done(bp, bnapi);
2316 }
2317
2318 static int bnxt_poll_p5(struct napi_struct *napi, int budget)
2319 {
2320         struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2321         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2322         u32 raw_cons = cpr->cp_raw_cons;
2323         struct bnxt *bp = bnapi->bp;
2324         struct nqe_cn *nqcmp;
2325         int work_done = 0;
2326         u32 cons;
2327
2328         if (cpr->has_more_work) {
2329                 cpr->has_more_work = 0;
2330                 work_done = __bnxt_poll_cqs(bp, bnapi, budget);
2331                 if (cpr->has_more_work) {
2332                         __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, false);
2333                         return work_done;
2334                 }
2335                 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL, true);
2336                 if (napi_complete_done(napi, work_done))
2337                         BNXT_DB_NQ_ARM_P5(&cpr->cp_db, cpr->cp_raw_cons);
2338                 return work_done;
2339         }
2340         while (1) {
2341                 cons = RING_CMP(raw_cons);
2342                 nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2343
2344                 if (!NQ_CMP_VALID(nqcmp, raw_cons)) {
2345                         __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL,
2346                                              false);
2347                         cpr->cp_raw_cons = raw_cons;
2348                         if (napi_complete_done(napi, work_done))
2349                                 BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
2350                                                   cpr->cp_raw_cons);
2351                         return work_done;
2352                 }
2353
2354                 /* The valid test of the entry must be done first before
2355                  * reading any further.
2356                  */
2357                 dma_rmb();
2358
2359                 if (nqcmp->type == cpu_to_le16(NQ_CN_TYPE_CQ_NOTIFICATION)) {
2360                         u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
2361                         struct bnxt_cp_ring_info *cpr2;
2362
2363                         cpr2 = cpr->cp_ring_arr[idx];
2364                         work_done += __bnxt_poll_work(bp, cpr2,
2365                                                       budget - work_done);
2366                         cpr->has_more_work = cpr2->has_more_work;
2367                 } else {
2368                         bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp);
2369                 }
2370                 raw_cons = NEXT_RAW_CMP(raw_cons);
2371                 if (cpr->has_more_work)
2372                         break;
2373         }
2374         __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, true);
2375         cpr->cp_raw_cons = raw_cons;
2376         return work_done;
2377 }
2378
2379 static void bnxt_free_tx_skbs(struct bnxt *bp)
2380 {
2381         int i, max_idx;
2382         struct pci_dev *pdev = bp->pdev;
2383
2384         if (!bp->tx_ring)
2385                 return;
2386
2387         max_idx = bp->tx_nr_pages * TX_DESC_CNT;
2388         for (i = 0; i < bp->tx_nr_rings; i++) {
2389                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2390                 int j;
2391
2392                 for (j = 0; j < max_idx;) {
2393                         struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
2394                         struct sk_buff *skb;
2395                         int k, last;
2396
2397                         if (i < bp->tx_nr_rings_xdp &&
2398                             tx_buf->action == XDP_REDIRECT) {
2399                                 dma_unmap_single(&pdev->dev,
2400                                         dma_unmap_addr(tx_buf, mapping),
2401                                         dma_unmap_len(tx_buf, len),
2402                                         PCI_DMA_TODEVICE);
2403                                 xdp_return_frame(tx_buf->xdpf);
2404                                 tx_buf->action = 0;
2405                                 tx_buf->xdpf = NULL;
2406                                 j++;
2407                                 continue;
2408                         }
2409
2410                         skb = tx_buf->skb;
2411                         if (!skb) {
2412                                 j++;
2413                                 continue;
2414                         }
2415
2416                         tx_buf->skb = NULL;
2417
2418                         if (tx_buf->is_push) {
2419                                 dev_kfree_skb(skb);
2420                                 j += 2;
2421                                 continue;
2422                         }
2423
2424                         dma_unmap_single(&pdev->dev,
2425                                          dma_unmap_addr(tx_buf, mapping),
2426                                          skb_headlen(skb),
2427                                          PCI_DMA_TODEVICE);
2428
2429                         last = tx_buf->nr_frags;
2430                         j += 2;
2431                         for (k = 0; k < last; k++, j++) {
2432                                 int ring_idx = j & bp->tx_ring_mask;
2433                                 skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
2434
2435                                 tx_buf = &txr->tx_buf_ring[ring_idx];
2436                                 dma_unmap_page(
2437                                         &pdev->dev,
2438                                         dma_unmap_addr(tx_buf, mapping),
2439                                         skb_frag_size(frag), PCI_DMA_TODEVICE);
2440                         }
2441                         dev_kfree_skb(skb);
2442                 }
2443                 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
2444         }
2445 }
2446
2447 static void bnxt_free_rx_skbs(struct bnxt *bp)
2448 {
2449         int i, max_idx, max_agg_idx;
2450         struct pci_dev *pdev = bp->pdev;
2451
2452         if (!bp->rx_ring)
2453                 return;
2454
2455         max_idx = bp->rx_nr_pages * RX_DESC_CNT;
2456         max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
2457         for (i = 0; i < bp->rx_nr_rings; i++) {
2458                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2459                 struct bnxt_tpa_idx_map *map;
2460                 int j;
2461
2462                 if (rxr->rx_tpa) {
2463                         for (j = 0; j < bp->max_tpa; j++) {
2464                                 struct bnxt_tpa_info *tpa_info =
2465                                                         &rxr->rx_tpa[j];
2466                                 u8 *data = tpa_info->data;
2467
2468                                 if (!data)
2469                                         continue;
2470
2471                                 dma_unmap_single_attrs(&pdev->dev,
2472                                                        tpa_info->mapping,
2473                                                        bp->rx_buf_use_size,
2474                                                        bp->rx_dir,
2475                                                        DMA_ATTR_WEAK_ORDERING);
2476
2477                                 tpa_info->data = NULL;
2478
2479                                 kfree(data);
2480                         }
2481                 }
2482
2483                 for (j = 0; j < max_idx; j++) {
2484                         struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j];
2485                         dma_addr_t mapping = rx_buf->mapping;
2486                         void *data = rx_buf->data;
2487
2488                         if (!data)
2489                                 continue;
2490
2491                         rx_buf->data = NULL;
2492
2493                         if (BNXT_RX_PAGE_MODE(bp)) {
2494                                 mapping -= bp->rx_dma_offset;
2495                                 dma_unmap_page_attrs(&pdev->dev, mapping,
2496                                                      PAGE_SIZE, bp->rx_dir,
2497                                                      DMA_ATTR_WEAK_ORDERING);
2498                                 page_pool_recycle_direct(rxr->page_pool, data);
2499                         } else {
2500                                 dma_unmap_single_attrs(&pdev->dev, mapping,
2501                                                        bp->rx_buf_use_size,
2502                                                        bp->rx_dir,
2503                                                        DMA_ATTR_WEAK_ORDERING);
2504                                 kfree(data);
2505                         }
2506                 }
2507
2508                 for (j = 0; j < max_agg_idx; j++) {
2509                         struct bnxt_sw_rx_agg_bd *rx_agg_buf =
2510                                 &rxr->rx_agg_ring[j];
2511                         struct page *page = rx_agg_buf->page;
2512
2513                         if (!page)
2514                                 continue;
2515
2516                         dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
2517                                              BNXT_RX_PAGE_SIZE,
2518                                              PCI_DMA_FROMDEVICE,
2519                                              DMA_ATTR_WEAK_ORDERING);
2520
2521                         rx_agg_buf->page = NULL;
2522                         __clear_bit(j, rxr->rx_agg_bmap);
2523
2524                         __free_page(page);
2525                 }
2526                 if (rxr->rx_page) {
2527                         __free_page(rxr->rx_page);
2528                         rxr->rx_page = NULL;
2529                 }
2530                 map = rxr->rx_tpa_idx_map;
2531                 if (map)
2532                         memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
2533         }
2534 }
2535
2536 static void bnxt_free_skbs(struct bnxt *bp)
2537 {
2538         bnxt_free_tx_skbs(bp);
2539         bnxt_free_rx_skbs(bp);
2540 }
2541
2542 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
2543 {
2544         struct pci_dev *pdev = bp->pdev;
2545         int i;
2546
2547         for (i = 0; i < rmem->nr_pages; i++) {
2548                 if (!rmem->pg_arr[i])
2549                         continue;
2550
2551                 dma_free_coherent(&pdev->dev, rmem->page_size,
2552                                   rmem->pg_arr[i], rmem->dma_arr[i]);
2553
2554                 rmem->pg_arr[i] = NULL;
2555         }
2556         if (rmem->pg_tbl) {
2557                 size_t pg_tbl_size = rmem->nr_pages * 8;
2558
2559                 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
2560                         pg_tbl_size = rmem->page_size;
2561                 dma_free_coherent(&pdev->dev, pg_tbl_size,
2562                                   rmem->pg_tbl, rmem->pg_tbl_map);
2563                 rmem->pg_tbl = NULL;
2564         }
2565         if (rmem->vmem_size && *rmem->vmem) {
2566                 vfree(*rmem->vmem);
2567                 *rmem->vmem = NULL;
2568         }
2569 }
2570
2571 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
2572 {
2573         struct pci_dev *pdev = bp->pdev;
2574         u64 valid_bit = 0;
2575         int i;
2576
2577         if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
2578                 valid_bit = PTU_PTE_VALID;
2579         if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) {
2580                 size_t pg_tbl_size = rmem->nr_pages * 8;
2581
2582                 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
2583                         pg_tbl_size = rmem->page_size;
2584                 rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size,
2585                                                   &rmem->pg_tbl_map,
2586                                                   GFP_KERNEL);
2587                 if (!rmem->pg_tbl)
2588                         return -ENOMEM;
2589         }
2590
2591         for (i = 0; i < rmem->nr_pages; i++) {
2592                 u64 extra_bits = valid_bit;
2593
2594                 rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
2595                                                      rmem->page_size,
2596                                                      &rmem->dma_arr[i],
2597                                                      GFP_KERNEL);
2598                 if (!rmem->pg_arr[i])
2599                         return -ENOMEM;
2600
2601                 if (rmem->nr_pages > 1 || rmem->depth > 0) {
2602                         if (i == rmem->nr_pages - 2 &&
2603                             (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2604                                 extra_bits |= PTU_PTE_NEXT_TO_LAST;
2605                         else if (i == rmem->nr_pages - 1 &&
2606                                  (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2607                                 extra_bits |= PTU_PTE_LAST;
2608                         rmem->pg_tbl[i] =
2609                                 cpu_to_le64(rmem->dma_arr[i] | extra_bits);
2610                 }
2611         }
2612
2613         if (rmem->vmem_size) {
2614                 *rmem->vmem = vzalloc(rmem->vmem_size);
2615                 if (!(*rmem->vmem))
2616                         return -ENOMEM;
2617         }
2618         return 0;
2619 }
2620
2621 static void bnxt_free_tpa_info(struct bnxt *bp)
2622 {
2623         int i;
2624
2625         for (i = 0; i < bp->rx_nr_rings; i++) {
2626                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2627
2628                 kfree(rxr->rx_tpa_idx_map);
2629                 rxr->rx_tpa_idx_map = NULL;
2630                 if (rxr->rx_tpa) {
2631                         kfree(rxr->rx_tpa[0].agg_arr);
2632                         rxr->rx_tpa[0].agg_arr = NULL;
2633                 }
2634                 kfree(rxr->rx_tpa);
2635                 rxr->rx_tpa = NULL;
2636         }
2637 }
2638
2639 static int bnxt_alloc_tpa_info(struct bnxt *bp)
2640 {
2641         int i, j, total_aggs = 0;
2642
2643         bp->max_tpa = MAX_TPA;
2644         if (bp->flags & BNXT_FLAG_CHIP_P5) {
2645                 if (!bp->max_tpa_v2)
2646                         return 0;
2647                 bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
2648                 total_aggs = bp->max_tpa * MAX_SKB_FRAGS;
2649         }
2650
2651         for (i = 0; i < bp->rx_nr_rings; i++) {
2652                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2653                 struct rx_agg_cmp *agg;
2654
2655                 rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info),
2656                                       GFP_KERNEL);
2657                 if (!rxr->rx_tpa)
2658                         return -ENOMEM;
2659
2660                 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
2661                         continue;
2662                 agg = kcalloc(total_aggs, sizeof(*agg), GFP_KERNEL);
2663                 rxr->rx_tpa[0].agg_arr = agg;
2664                 if (!agg)
2665                         return -ENOMEM;
2666                 for (j = 1; j < bp->max_tpa; j++)
2667                         rxr->rx_tpa[j].agg_arr = agg + j * MAX_SKB_FRAGS;
2668                 rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
2669                                               GFP_KERNEL);
2670                 if (!rxr->rx_tpa_idx_map)
2671                         return -ENOMEM;
2672         }
2673         return 0;
2674 }
2675
2676 static void bnxt_free_rx_rings(struct bnxt *bp)
2677 {
2678         int i;
2679
2680         if (!bp->rx_ring)
2681                 return;
2682
2683         bnxt_free_tpa_info(bp);
2684         for (i = 0; i < bp->rx_nr_rings; i++) {
2685                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2686                 struct bnxt_ring_struct *ring;
2687
2688                 if (rxr->xdp_prog)
2689                         bpf_prog_put(rxr->xdp_prog);
2690
2691                 if (xdp_rxq_info_is_reg(&rxr->xdp_rxq))
2692                         xdp_rxq_info_unreg(&rxr->xdp_rxq);
2693
2694                 page_pool_destroy(rxr->page_pool);
2695                 rxr->page_pool = NULL;
2696
2697                 kfree(rxr->rx_agg_bmap);
2698                 rxr->rx_agg_bmap = NULL;
2699
2700                 ring = &rxr->rx_ring_struct;
2701                 bnxt_free_ring(bp, &ring->ring_mem);
2702
2703                 ring = &rxr->rx_agg_ring_struct;
2704                 bnxt_free_ring(bp, &ring->ring_mem);
2705         }
2706 }
2707
2708 static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
2709                                    struct bnxt_rx_ring_info *rxr)
2710 {
2711         struct page_pool_params pp = { 0 };
2712
2713         pp.pool_size = bp->rx_ring_size;
2714         pp.nid = dev_to_node(&bp->pdev->dev);
2715         pp.dev = &bp->pdev->dev;
2716         pp.dma_dir = DMA_BIDIRECTIONAL;
2717
2718         rxr->page_pool = page_pool_create(&pp);
2719         if (IS_ERR(rxr->page_pool)) {
2720                 int err = PTR_ERR(rxr->page_pool);
2721
2722                 rxr->page_pool = NULL;
2723                 return err;
2724         }
2725         return 0;
2726 }
2727
2728 static int bnxt_alloc_rx_rings(struct bnxt *bp)
2729 {
2730         int i, rc = 0, agg_rings = 0;
2731
2732         if (!bp->rx_ring)
2733                 return -ENOMEM;
2734
2735         if (bp->flags & BNXT_FLAG_AGG_RINGS)
2736                 agg_rings = 1;
2737
2738         for (i = 0; i < bp->rx_nr_rings; i++) {
2739                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2740                 struct bnxt_ring_struct *ring;
2741
2742                 ring = &rxr->rx_ring_struct;
2743
2744                 rc = bnxt_alloc_rx_page_pool(bp, rxr);
2745                 if (rc)
2746                         return rc;
2747
2748                 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i);
2749                 if (rc < 0)
2750                         return rc;
2751
2752                 rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq,
2753                                                 MEM_TYPE_PAGE_POOL,
2754                                                 rxr->page_pool);
2755                 if (rc) {
2756                         xdp_rxq_info_unreg(&rxr->xdp_rxq);
2757                         return rc;
2758                 }
2759
2760                 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
2761                 if (rc)
2762                         return rc;
2763
2764                 ring->grp_idx = i;
2765                 if (agg_rings) {
2766                         u16 mem_size;
2767
2768                         ring = &rxr->rx_agg_ring_struct;
2769                         rc = bnxt_alloc_ring(bp, &ring->ring_mem);
2770                         if (rc)
2771                                 return rc;
2772
2773                         ring->grp_idx = i;
2774                         rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
2775                         mem_size = rxr->rx_agg_bmap_size / 8;
2776                         rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
2777                         if (!rxr->rx_agg_bmap)
2778                                 return -ENOMEM;
2779                 }
2780         }
2781         if (bp->flags & BNXT_FLAG_TPA)
2782                 rc = bnxt_alloc_tpa_info(bp);
2783         return rc;
2784 }
2785
2786 static void bnxt_free_tx_rings(struct bnxt *bp)
2787 {
2788         int i;
2789         struct pci_dev *pdev = bp->pdev;
2790
2791         if (!bp->tx_ring)
2792                 return;
2793
2794         for (i = 0; i < bp->tx_nr_rings; i++) {
2795                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2796                 struct bnxt_ring_struct *ring;
2797
2798                 if (txr->tx_push) {
2799                         dma_free_coherent(&pdev->dev, bp->tx_push_size,
2800                                           txr->tx_push, txr->tx_push_mapping);
2801                         txr->tx_push = NULL;
2802                 }
2803
2804                 ring = &txr->tx_ring_struct;
2805
2806                 bnxt_free_ring(bp, &ring->ring_mem);
2807         }
2808 }
2809
2810 static int bnxt_alloc_tx_rings(struct bnxt *bp)
2811 {
2812         int i, j, rc;
2813         struct pci_dev *pdev = bp->pdev;
2814
2815         bp->tx_push_size = 0;
2816         if (bp->tx_push_thresh) {
2817                 int push_size;
2818
2819                 push_size  = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
2820                                         bp->tx_push_thresh);
2821
2822                 if (push_size > 256) {
2823                         push_size = 0;
2824                         bp->tx_push_thresh = 0;
2825                 }
2826
2827                 bp->tx_push_size = push_size;
2828         }
2829
2830         for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
2831                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2832                 struct bnxt_ring_struct *ring;
2833                 u8 qidx;
2834
2835                 ring = &txr->tx_ring_struct;
2836
2837                 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
2838                 if (rc)
2839                         return rc;
2840
2841                 ring->grp_idx = txr->bnapi->index;
2842                 if (bp->tx_push_size) {
2843                         dma_addr_t mapping;
2844
2845                         /* One pre-allocated DMA buffer to backup
2846                          * TX push operation
2847                          */
2848                         txr->tx_push = dma_alloc_coherent(&pdev->dev,
2849                                                 bp->tx_push_size,
2850                                                 &txr->tx_push_mapping,
2851                                                 GFP_KERNEL);
2852
2853                         if (!txr->tx_push)
2854                                 return -ENOMEM;
2855
2856                         mapping = txr->tx_push_mapping +
2857                                 sizeof(struct tx_push_bd);
2858                         txr->data_mapping = cpu_to_le64(mapping);
2859                 }
2860                 qidx = bp->tc_to_qidx[j];
2861                 ring->queue_id = bp->q_info[qidx].queue_id;
2862                 if (i < bp->tx_nr_rings_xdp)
2863                         continue;
2864                 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
2865                         j++;
2866         }
2867         return 0;
2868 }
2869
2870 static void bnxt_free_cp_rings(struct bnxt *bp)
2871 {
2872         int i;
2873
2874         if (!bp->bnapi)
2875                 return;
2876
2877         for (i = 0; i < bp->cp_nr_rings; i++) {
2878                 struct bnxt_napi *bnapi = bp->bnapi[i];
2879                 struct bnxt_cp_ring_info *cpr;
2880                 struct bnxt_ring_struct *ring;
2881                 int j;
2882
2883                 if (!bnapi)
2884                         continue;
2885
2886                 cpr = &bnapi->cp_ring;
2887                 ring = &cpr->cp_ring_struct;
2888
2889                 bnxt_free_ring(bp, &ring->ring_mem);
2890
2891                 for (j = 0; j < 2; j++) {
2892                         struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
2893
2894                         if (cpr2) {
2895                                 ring = &cpr2->cp_ring_struct;
2896                                 bnxt_free_ring(bp, &ring->ring_mem);
2897                                 kfree(cpr2);
2898                                 cpr->cp_ring_arr[j] = NULL;
2899                         }
2900                 }
2901         }
2902 }
2903
2904 static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp)
2905 {
2906         struct bnxt_ring_mem_info *rmem;
2907         struct bnxt_ring_struct *ring;
2908         struct bnxt_cp_ring_info *cpr;
2909         int rc;
2910
2911         cpr = kzalloc(sizeof(*cpr), GFP_KERNEL);
2912         if (!cpr)
2913                 return NULL;
2914
2915         ring = &cpr->cp_ring_struct;
2916         rmem = &ring->ring_mem;
2917         rmem->nr_pages = bp->cp_nr_pages;
2918         rmem->page_size = HW_CMPD_RING_SIZE;
2919         rmem->pg_arr = (void **)cpr->cp_desc_ring;
2920         rmem->dma_arr = cpr->cp_desc_mapping;
2921         rmem->flags = BNXT_RMEM_RING_PTE_FLAG;
2922         rc = bnxt_alloc_ring(bp, rmem);
2923         if (rc) {
2924                 bnxt_free_ring(bp, rmem);
2925                 kfree(cpr);
2926                 cpr = NULL;
2927         }
2928         return cpr;
2929 }
2930
2931 static int bnxt_alloc_cp_rings(struct bnxt *bp)
2932 {
2933         bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
2934         int i, rc, ulp_base_vec, ulp_msix;
2935
2936         ulp_msix = bnxt_get_ulp_msix_num(bp);
2937         ulp_base_vec = bnxt_get_ulp_msix_base(bp);
2938         for (i = 0; i < bp->cp_nr_rings; i++) {
2939                 struct bnxt_napi *bnapi = bp->bnapi[i];
2940                 struct bnxt_cp_ring_info *cpr;
2941                 struct bnxt_ring_struct *ring;
2942
2943                 if (!bnapi)
2944                         continue;
2945
2946                 cpr = &bnapi->cp_ring;
2947                 cpr->bnapi = bnapi;
2948                 ring = &cpr->cp_ring_struct;
2949
2950                 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
2951                 if (rc)
2952                         return rc;
2953
2954                 if (ulp_msix && i >= ulp_base_vec)
2955                         ring->map_idx = i + ulp_msix;
2956                 else
2957                         ring->map_idx = i;
2958
2959                 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
2960                         continue;
2961
2962                 if (i < bp->rx_nr_rings) {
2963                         struct bnxt_cp_ring_info *cpr2 =
2964                                 bnxt_alloc_cp_sub_ring(bp);
2965
2966                         cpr->cp_ring_arr[BNXT_RX_HDL] = cpr2;
2967                         if (!cpr2)
2968                                 return -ENOMEM;
2969                         cpr2->bnapi = bnapi;
2970                 }
2971                 if ((sh && i < bp->tx_nr_rings) ||
2972                     (!sh && i >= bp->rx_nr_rings)) {
2973                         struct bnxt_cp_ring_info *cpr2 =
2974                                 bnxt_alloc_cp_sub_ring(bp);
2975
2976                         cpr->cp_ring_arr[BNXT_TX_HDL] = cpr2;
2977                         if (!cpr2)
2978                                 return -ENOMEM;
2979                         cpr2->bnapi = bnapi;
2980                 }
2981         }
2982         return 0;
2983 }
2984
2985 static void bnxt_init_ring_struct(struct bnxt *bp)
2986 {
2987         int i;
2988
2989         for (i = 0; i < bp->cp_nr_rings; i++) {
2990                 struct bnxt_napi *bnapi = bp->bnapi[i];
2991                 struct bnxt_ring_mem_info *rmem;
2992                 struct bnxt_cp_ring_info *cpr;
2993                 struct bnxt_rx_ring_info *rxr;
2994                 struct bnxt_tx_ring_info *txr;
2995                 struct bnxt_ring_struct *ring;
2996
2997                 if (!bnapi)
2998                         continue;
2999
3000                 cpr = &bnapi->cp_ring;
3001                 ring = &cpr->cp_ring_struct;
3002                 rmem = &ring->ring_mem;
3003                 rmem->nr_pages = bp->cp_nr_pages;
3004                 rmem->page_size = HW_CMPD_RING_SIZE;
3005                 rmem->pg_arr = (void **)cpr->cp_desc_ring;
3006                 rmem->dma_arr = cpr->cp_desc_mapping;
3007                 rmem->vmem_size = 0;
3008
3009                 rxr = bnapi->rx_ring;
3010                 if (!rxr)
3011                         goto skip_rx;
3012
3013                 ring = &rxr->rx_ring_struct;
3014                 rmem = &ring->ring_mem;
3015                 rmem->nr_pages = bp->rx_nr_pages;
3016                 rmem->page_size = HW_RXBD_RING_SIZE;
3017                 rmem->pg_arr = (void **)rxr->rx_desc_ring;
3018                 rmem->dma_arr = rxr->rx_desc_mapping;
3019                 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
3020                 rmem->vmem = (void **)&rxr->rx_buf_ring;
3021
3022                 ring = &rxr->rx_agg_ring_struct;
3023                 rmem = &ring->ring_mem;
3024                 rmem->nr_pages = bp->rx_agg_nr_pages;
3025                 rmem->page_size = HW_RXBD_RING_SIZE;
3026                 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
3027                 rmem->dma_arr = rxr->rx_agg_desc_mapping;
3028                 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
3029                 rmem->vmem = (void **)&rxr->rx_agg_ring;
3030
3031 skip_rx:
3032                 txr = bnapi->tx_ring;
3033                 if (!txr)
3034                         continue;
3035
3036                 ring = &txr->tx_ring_struct;
3037                 rmem = &ring->ring_mem;
3038                 rmem->nr_pages = bp->tx_nr_pages;
3039                 rmem->page_size = HW_RXBD_RING_SIZE;
3040                 rmem->pg_arr = (void **)txr->tx_desc_ring;
3041                 rmem->dma_arr = txr->tx_desc_mapping;
3042                 rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
3043                 rmem->vmem = (void **)&txr->tx_buf_ring;
3044         }
3045 }
3046
3047 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
3048 {
3049         int i;
3050         u32 prod;
3051         struct rx_bd **rx_buf_ring;
3052
3053         rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
3054         for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
3055                 int j;
3056                 struct rx_bd *rxbd;
3057
3058                 rxbd = rx_buf_ring[i];
3059                 if (!rxbd)
3060                         continue;
3061
3062                 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
3063                         rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
3064                         rxbd->rx_bd_opaque = prod;
3065                 }
3066         }
3067 }
3068
3069 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
3070 {
3071         struct net_device *dev = bp->dev;
3072         struct bnxt_rx_ring_info *rxr;
3073         struct bnxt_ring_struct *ring;
3074         u32 prod, type;
3075         int i;
3076
3077         type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
3078                 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
3079
3080         if (NET_IP_ALIGN == 2)
3081                 type |= RX_BD_FLAGS_SOP;
3082
3083         rxr = &bp->rx_ring[ring_nr];
3084         ring = &rxr->rx_ring_struct;
3085         bnxt_init_rxbd_pages(ring, type);
3086
3087         if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
3088                 rxr->xdp_prog = bpf_prog_add(bp->xdp_prog, 1);
3089                 if (IS_ERR(rxr->xdp_prog)) {
3090                         int rc = PTR_ERR(rxr->xdp_prog);
3091
3092                         rxr->xdp_prog = NULL;
3093                         return rc;
3094                 }
3095         }
3096         prod = rxr->rx_prod;
3097         for (i = 0; i < bp->rx_ring_size; i++) {
3098                 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) {
3099                         netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
3100                                     ring_nr, i, bp->rx_ring_size);
3101                         break;
3102                 }
3103                 prod = NEXT_RX(prod);
3104         }
3105         rxr->rx_prod = prod;
3106         ring->fw_ring_id = INVALID_HW_RING_ID;
3107
3108         ring = &rxr->rx_agg_ring_struct;
3109         ring->fw_ring_id = INVALID_HW_RING_ID;
3110
3111         if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
3112                 return 0;
3113
3114         type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
3115                 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
3116
3117         bnxt_init_rxbd_pages(ring, type);
3118
3119         prod = rxr->rx_agg_prod;
3120         for (i = 0; i < bp->rx_agg_ring_size; i++) {
3121                 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL) != 0) {
3122                         netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
3123                                     ring_nr, i, bp->rx_ring_size);
3124                         break;
3125                 }
3126                 prod = NEXT_RX_AGG(prod);
3127         }
3128         rxr->rx_agg_prod = prod;
3129
3130         if (bp->flags & BNXT_FLAG_TPA) {
3131                 if (rxr->rx_tpa) {
3132                         u8 *data;
3133                         dma_addr_t mapping;
3134
3135                         for (i = 0; i < bp->max_tpa; i++) {
3136                                 data = __bnxt_alloc_rx_data(bp, &mapping,
3137                                                             GFP_KERNEL);
3138                                 if (!data)
3139                                         return -ENOMEM;
3140
3141                                 rxr->rx_tpa[i].data = data;
3142                                 rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
3143                                 rxr->rx_tpa[i].mapping = mapping;
3144                         }
3145                 } else {
3146                         netdev_err(bp->dev, "No resource allocated for LRO/GRO\n");
3147                         return -ENOMEM;
3148                 }
3149         }
3150
3151         return 0;
3152 }
3153
3154 static void bnxt_init_cp_rings(struct bnxt *bp)
3155 {
3156         int i, j;
3157
3158         for (i = 0; i < bp->cp_nr_rings; i++) {
3159                 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
3160                 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3161
3162                 ring->fw_ring_id = INVALID_HW_RING_ID;
3163                 cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3164                 cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3165                 for (j = 0; j < 2; j++) {
3166                         struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
3167
3168                         if (!cpr2)
3169                                 continue;
3170
3171                         ring = &cpr2->cp_ring_struct;
3172                         ring->fw_ring_id = INVALID_HW_RING_ID;
3173                         cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3174                         cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3175                 }
3176         }
3177 }
3178
3179 static int bnxt_init_rx_rings(struct bnxt *bp)
3180 {
3181         int i, rc = 0;
3182
3183         if (BNXT_RX_PAGE_MODE(bp)) {
3184                 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
3185                 bp->rx_dma_offset = XDP_PACKET_HEADROOM;
3186         } else {
3187                 bp->rx_offset = BNXT_RX_OFFSET;
3188                 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
3189         }
3190
3191         for (i = 0; i < bp->rx_nr_rings; i++) {
3192                 rc = bnxt_init_one_rx_ring(bp, i);
3193                 if (rc)
3194                         break;
3195         }
3196
3197         return rc;
3198 }
3199
3200 static int bnxt_init_tx_rings(struct bnxt *bp)
3201 {
3202         u16 i;
3203
3204         bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
3205                                    MAX_SKB_FRAGS + 1);
3206
3207         for (i = 0; i < bp->tx_nr_rings; i++) {
3208                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3209                 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
3210
3211                 ring->fw_ring_id = INVALID_HW_RING_ID;
3212         }
3213
3214         return 0;
3215 }
3216
3217 static void bnxt_free_ring_grps(struct bnxt *bp)
3218 {
3219         kfree(bp->grp_info);
3220         bp->grp_info = NULL;
3221 }
3222
3223 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
3224 {
3225         int i;
3226
3227         if (irq_re_init) {
3228                 bp->grp_info = kcalloc(bp->cp_nr_rings,
3229                                        sizeof(struct bnxt_ring_grp_info),
3230                                        GFP_KERNEL);
3231                 if (!bp->grp_info)
3232                         return -ENOMEM;
3233         }
3234         for (i = 0; i < bp->cp_nr_rings; i++) {
3235                 if (irq_re_init)
3236                         bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
3237                 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
3238                 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
3239                 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
3240                 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
3241         }
3242         return 0;
3243 }
3244
3245 static void bnxt_free_vnics(struct bnxt *bp)
3246 {
3247         kfree(bp->vnic_info);
3248         bp->vnic_info = NULL;
3249         bp->nr_vnics = 0;
3250 }
3251
3252 static int bnxt_alloc_vnics(struct bnxt *bp)
3253 {
3254         int num_vnics = 1;
3255
3256 #ifdef CONFIG_RFS_ACCEL
3257         if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
3258                 num_vnics += bp->rx_nr_rings;
3259 #endif
3260
3261         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
3262                 num_vnics++;
3263
3264         bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
3265                                 GFP_KERNEL);
3266         if (!bp->vnic_info)
3267                 return -ENOMEM;
3268
3269         bp->nr_vnics = num_vnics;
3270         return 0;
3271 }
3272
3273 static void bnxt_init_vnics(struct bnxt *bp)
3274 {
3275         int i;
3276
3277         for (i = 0; i < bp->nr_vnics; i++) {
3278                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3279                 int j;
3280
3281                 vnic->fw_vnic_id = INVALID_HW_RING_ID;
3282                 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++)
3283                         vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
3284
3285                 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
3286
3287                 if (bp->vnic_info[i].rss_hash_key) {
3288                         if (i == 0)
3289                                 prandom_bytes(vnic->rss_hash_key,
3290                                               HW_HASH_KEY_SIZE);
3291                         else
3292                                 memcpy(vnic->rss_hash_key,
3293                                        bp->vnic_info[0].rss_hash_key,
3294                                        HW_HASH_KEY_SIZE);
3295                 }
3296         }
3297 }
3298
3299 static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
3300 {
3301         int pages;
3302
3303         pages = ring_size / desc_per_pg;
3304
3305         if (!pages)
3306                 return 1;
3307
3308         pages++;
3309
3310         while (pages & (pages - 1))
3311                 pages++;
3312
3313         return pages;
3314 }
3315
3316 void bnxt_set_tpa_flags(struct bnxt *bp)
3317 {
3318         bp->flags &= ~BNXT_FLAG_TPA;
3319         if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
3320                 return;
3321         if (bp->dev->features & NETIF_F_LRO)
3322                 bp->flags |= BNXT_FLAG_LRO;
3323         else if (bp->dev->features & NETIF_F_GRO_HW)
3324                 bp->flags |= BNXT_FLAG_GRO;
3325 }
3326
3327 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
3328  * be set on entry.
3329  */
3330 void bnxt_set_ring_params(struct bnxt *bp)
3331 {
3332         u32 ring_size, rx_size, rx_space;
3333         u32 agg_factor = 0, agg_ring_size = 0;
3334
3335         /* 8 for CRC and VLAN */
3336         rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
3337
3338         rx_space = rx_size + NET_SKB_PAD +
3339                 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3340
3341         bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
3342         ring_size = bp->rx_ring_size;
3343         bp->rx_agg_ring_size = 0;
3344         bp->rx_agg_nr_pages = 0;
3345
3346         if (bp->flags & BNXT_FLAG_TPA)
3347                 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
3348
3349         bp->flags &= ~BNXT_FLAG_JUMBO;
3350         if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
3351                 u32 jumbo_factor;
3352
3353                 bp->flags |= BNXT_FLAG_JUMBO;
3354                 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
3355                 if (jumbo_factor > agg_factor)
3356                         agg_factor = jumbo_factor;
3357         }
3358         agg_ring_size = ring_size * agg_factor;
3359
3360         if (agg_ring_size) {
3361                 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
3362                                                         RX_DESC_CNT);
3363                 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
3364                         u32 tmp = agg_ring_size;
3365
3366                         bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
3367                         agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
3368                         netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
3369                                     tmp, agg_ring_size);
3370                 }
3371                 bp->rx_agg_ring_size = agg_ring_size;
3372                 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
3373                 rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
3374                 rx_space = rx_size + NET_SKB_PAD +
3375                         SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3376         }
3377
3378         bp->rx_buf_use_size = rx_size;
3379         bp->rx_buf_size = rx_space;
3380
3381         bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
3382         bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
3383
3384         ring_size = bp->tx_ring_size;
3385         bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
3386         bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
3387
3388         ring_size = bp->rx_ring_size * (2 + agg_factor) + bp->tx_ring_size;
3389         bp->cp_ring_size = ring_size;
3390
3391         bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
3392         if (bp->cp_nr_pages > MAX_CP_PAGES) {
3393                 bp->cp_nr_pages = MAX_CP_PAGES;
3394                 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
3395                 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
3396                             ring_size, bp->cp_ring_size);
3397         }
3398         bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
3399         bp->cp_ring_mask = bp->cp_bit - 1;
3400 }
3401
3402 /* Changing allocation mode of RX rings.
3403  * TODO: Update when extending xdp_rxq_info to support allocation modes.
3404  */
3405 int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
3406 {
3407         if (page_mode) {
3408                 if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU)
3409                         return -EOPNOTSUPP;
3410                 bp->dev->max_mtu =
3411                         min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
3412                 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
3413                 bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE;
3414                 bp->rx_dir = DMA_BIDIRECTIONAL;
3415                 bp->rx_skb_func = bnxt_rx_page_skb;
3416                 /* Disable LRO or GRO_HW */
3417                 netdev_update_features(bp->dev);
3418         } else {
3419                 bp->dev->max_mtu = bp->max_mtu;
3420                 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
3421                 bp->rx_dir = DMA_FROM_DEVICE;
3422                 bp->rx_skb_func = bnxt_rx_skb;
3423         }
3424         return 0;
3425 }
3426
3427 static void bnxt_free_vnic_attributes(struct bnxt *bp)
3428 {
3429         int i;
3430         struct bnxt_vnic_info *vnic;
3431         struct pci_dev *pdev = bp->pdev;
3432
3433         if (!bp->vnic_info)
3434                 return;
3435
3436         for (i = 0; i < bp->nr_vnics; i++) {
3437                 vnic = &bp->vnic_info[i];
3438
3439                 kfree(vnic->fw_grp_ids);
3440                 vnic->fw_grp_ids = NULL;
3441
3442                 kfree(vnic->uc_list);
3443                 vnic->uc_list = NULL;
3444
3445                 if (vnic->mc_list) {
3446                         dma_free_coherent(&pdev->dev, vnic->mc_list_size,
3447                                           vnic->mc_list, vnic->mc_list_mapping);
3448                         vnic->mc_list = NULL;
3449                 }
3450
3451                 if (vnic->rss_table) {
3452                         dma_free_coherent(&pdev->dev, PAGE_SIZE,
3453                                           vnic->rss_table,
3454                                           vnic->rss_table_dma_addr);
3455                         vnic->rss_table = NULL;
3456                 }
3457
3458                 vnic->rss_hash_key = NULL;
3459                 vnic->flags = 0;
3460         }
3461 }
3462
3463 static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
3464 {
3465         int i, rc = 0, size;
3466         struct bnxt_vnic_info *vnic;
3467         struct pci_dev *pdev = bp->pdev;
3468         int max_rings;
3469
3470         for (i = 0; i < bp->nr_vnics; i++) {
3471                 vnic = &bp->vnic_info[i];
3472
3473                 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
3474                         int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
3475
3476                         if (mem_size > 0) {
3477                                 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
3478                                 if (!vnic->uc_list) {
3479                                         rc = -ENOMEM;
3480                                         goto out;
3481                                 }
3482                         }
3483                 }
3484
3485                 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
3486                         vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
3487                         vnic->mc_list =
3488                                 dma_alloc_coherent(&pdev->dev,
3489                                                    vnic->mc_list_size,
3490                                                    &vnic->mc_list_mapping,
3491                                                    GFP_KERNEL);
3492                         if (!vnic->mc_list) {
3493                                 rc = -ENOMEM;
3494                                 goto out;
3495                         }
3496                 }
3497
3498                 if (bp->flags & BNXT_FLAG_CHIP_P5)
3499                         goto vnic_skip_grps;
3500
3501                 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
3502                         max_rings = bp->rx_nr_rings;
3503                 else
3504                         max_rings = 1;
3505
3506                 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
3507                 if (!vnic->fw_grp_ids) {
3508                         rc = -ENOMEM;
3509                         goto out;
3510                 }
3511 vnic_skip_grps:
3512                 if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
3513                     !(vnic->flags & BNXT_VNIC_RSS_FLAG))
3514                         continue;
3515
3516                 /* Allocate rss table and hash key */
3517                 vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3518                                                      &vnic->rss_table_dma_addr,
3519                                                      GFP_KERNEL);
3520                 if (!vnic->rss_table) {
3521                         rc = -ENOMEM;
3522                         goto out;
3523                 }
3524
3525                 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
3526
3527                 vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
3528                 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
3529         }
3530         return 0;
3531
3532 out:
3533         return rc;
3534 }
3535
3536 static void bnxt_free_hwrm_resources(struct bnxt *bp)
3537 {
3538         struct pci_dev *pdev = bp->pdev;
3539
3540         if (bp->hwrm_cmd_resp_addr) {
3541                 dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
3542                                   bp->hwrm_cmd_resp_dma_addr);
3543                 bp->hwrm_cmd_resp_addr = NULL;
3544         }
3545
3546         if (bp->hwrm_cmd_kong_resp_addr) {
3547                 dma_free_coherent(&pdev->dev, PAGE_SIZE,
3548                                   bp->hwrm_cmd_kong_resp_addr,
3549                                   bp->hwrm_cmd_kong_resp_dma_addr);
3550                 bp->hwrm_cmd_kong_resp_addr = NULL;
3551         }
3552 }
3553
3554 static int bnxt_alloc_kong_hwrm_resources(struct bnxt *bp)
3555 {
3556         struct pci_dev *pdev = bp->pdev;
3557
3558         bp->hwrm_cmd_kong_resp_addr =
3559                 dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3560                                    &bp->hwrm_cmd_kong_resp_dma_addr,
3561                                    GFP_KERNEL);
3562         if (!bp->hwrm_cmd_kong_resp_addr)
3563                 return -ENOMEM;
3564
3565         return 0;
3566 }
3567
3568 static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
3569 {
3570         struct pci_dev *pdev = bp->pdev;
3571
3572         bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3573                                                    &bp->hwrm_cmd_resp_dma_addr,
3574                                                    GFP_KERNEL);
3575         if (!bp->hwrm_cmd_resp_addr)
3576                 return -ENOMEM;
3577
3578         return 0;
3579 }
3580
3581 static void bnxt_free_hwrm_short_cmd_req(struct bnxt *bp)
3582 {
3583         if (bp->hwrm_short_cmd_req_addr) {
3584                 struct pci_dev *pdev = bp->pdev;
3585
3586                 dma_free_coherent(&pdev->dev, bp->hwrm_max_ext_req_len,
3587                                   bp->hwrm_short_cmd_req_addr,
3588                                   bp->hwrm_short_cmd_req_dma_addr);
3589                 bp->hwrm_short_cmd_req_addr = NULL;
3590         }
3591 }
3592
3593 static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt *bp)
3594 {
3595         struct pci_dev *pdev = bp->pdev;
3596
3597         bp->hwrm_short_cmd_req_addr =
3598                 dma_alloc_coherent(&pdev->dev, bp->hwrm_max_ext_req_len,
3599                                    &bp->hwrm_short_cmd_req_dma_addr,
3600                                    GFP_KERNEL);
3601         if (!bp->hwrm_short_cmd_req_addr)
3602                 return -ENOMEM;
3603
3604         return 0;
3605 }
3606
3607 static void bnxt_free_port_stats(struct bnxt *bp)
3608 {
3609         struct pci_dev *pdev = bp->pdev;
3610
3611         bp->flags &= ~BNXT_FLAG_PORT_STATS;
3612         bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
3613
3614         if (bp->hw_rx_port_stats) {
3615                 dma_free_coherent(&pdev->dev, bp->hw_port_stats_size,
3616                                   bp->hw_rx_port_stats,
3617                                   bp->hw_rx_port_stats_map);
3618                 bp->hw_rx_port_stats = NULL;
3619         }
3620
3621         if (bp->hw_tx_port_stats_ext) {
3622                 dma_free_coherent(&pdev->dev, sizeof(struct tx_port_stats_ext),
3623                                   bp->hw_tx_port_stats_ext,
3624                                   bp->hw_tx_port_stats_ext_map);
3625                 bp->hw_tx_port_stats_ext = NULL;
3626         }
3627
3628         if (bp->hw_rx_port_stats_ext) {
3629                 dma_free_coherent(&pdev->dev, sizeof(struct rx_port_stats_ext),
3630                                   bp->hw_rx_port_stats_ext,
3631                                   bp->hw_rx_port_stats_ext_map);
3632                 bp->hw_rx_port_stats_ext = NULL;
3633         }
3634
3635         if (bp->hw_pcie_stats) {
3636                 dma_free_coherent(&pdev->dev, sizeof(struct pcie_ctx_hw_stats),
3637                                   bp->hw_pcie_stats, bp->hw_pcie_stats_map);
3638                 bp->hw_pcie_stats = NULL;
3639         }
3640 }
3641
3642 static void bnxt_free_ring_stats(struct bnxt *bp)
3643 {
3644         struct pci_dev *pdev = bp->pdev;
3645         int size, i;
3646
3647         if (!bp->bnapi)
3648                 return;
3649
3650         size = bp->hw_ring_stats_size;
3651
3652         for (i = 0; i < bp->cp_nr_rings; i++) {
3653                 struct bnxt_napi *bnapi = bp->bnapi[i];
3654                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3655
3656                 if (cpr->hw_stats) {
3657                         dma_free_coherent(&pdev->dev, size, cpr->hw_stats,
3658                                           cpr->hw_stats_map);
3659                         cpr->hw_stats = NULL;
3660                 }
3661         }
3662 }
3663
3664 static int bnxt_alloc_stats(struct bnxt *bp)
3665 {
3666         u32 size, i;
3667         struct pci_dev *pdev = bp->pdev;
3668
3669         size = bp->hw_ring_stats_size;
3670
3671         for (i = 0; i < bp->cp_nr_rings; i++) {
3672                 struct bnxt_napi *bnapi = bp->bnapi[i];
3673                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3674
3675                 cpr->hw_stats = dma_alloc_coherent(&pdev->dev, size,
3676                                                    &cpr->hw_stats_map,
3677                                                    GFP_KERNEL);
3678                 if (!cpr->hw_stats)
3679                         return -ENOMEM;
3680
3681                 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
3682         }
3683
3684         if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700)
3685                 return 0;
3686
3687         if (bp->hw_rx_port_stats)
3688                 goto alloc_ext_stats;
3689
3690         bp->hw_port_stats_size = sizeof(struct rx_port_stats) +
3691                                  sizeof(struct tx_port_stats) + 1024;
3692
3693         bp->hw_rx_port_stats =
3694                 dma_alloc_coherent(&pdev->dev, bp->hw_port_stats_size,
3695                                    &bp->hw_rx_port_stats_map,
3696                                    GFP_KERNEL);
3697         if (!bp->hw_rx_port_stats)
3698                 return -ENOMEM;
3699
3700         bp->hw_tx_port_stats = (void *)(bp->hw_rx_port_stats + 1) + 512;
3701         bp->hw_tx_port_stats_map = bp->hw_rx_port_stats_map +
3702                                    sizeof(struct rx_port_stats) + 512;
3703         bp->flags |= BNXT_FLAG_PORT_STATS;
3704
3705 alloc_ext_stats:
3706         /* Display extended statistics only if FW supports it */
3707         if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900)
3708                 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED))
3709                         return 0;
3710
3711         if (bp->hw_rx_port_stats_ext)
3712                 goto alloc_tx_ext_stats;
3713
3714         bp->hw_rx_port_stats_ext =
3715                 dma_alloc_coherent(&pdev->dev, sizeof(struct rx_port_stats_ext),
3716                                    &bp->hw_rx_port_stats_ext_map, GFP_KERNEL);
3717         if (!bp->hw_rx_port_stats_ext)
3718                 return 0;
3719
3720 alloc_tx_ext_stats:
3721         if (bp->hw_tx_port_stats_ext)
3722                 goto alloc_pcie_stats;
3723
3724         if (bp->hwrm_spec_code >= 0x10902 ||
3725             (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) {
3726                 bp->hw_tx_port_stats_ext =
3727                         dma_alloc_coherent(&pdev->dev,
3728                                            sizeof(struct tx_port_stats_ext),
3729                                            &bp->hw_tx_port_stats_ext_map,
3730                                            GFP_KERNEL);
3731         }
3732         bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
3733
3734 alloc_pcie_stats:
3735         if (bp->hw_pcie_stats ||
3736             !(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED))
3737                 return 0;
3738
3739         bp->hw_pcie_stats =
3740                 dma_alloc_coherent(&pdev->dev, sizeof(struct pcie_ctx_hw_stats),
3741                                    &bp->hw_pcie_stats_map, GFP_KERNEL);
3742         if (!bp->hw_pcie_stats)
3743                 return 0;
3744
3745         bp->flags |= BNXT_FLAG_PCIE_STATS;
3746         return 0;
3747 }
3748
3749 static void bnxt_clear_ring_indices(struct bnxt *bp)
3750 {
3751         int i;
3752
3753         if (!bp->bnapi)
3754                 return;
3755
3756         for (i = 0; i < bp->cp_nr_rings; i++) {
3757                 struct bnxt_napi *bnapi = bp->bnapi[i];
3758                 struct bnxt_cp_ring_info *cpr;
3759                 struct bnxt_rx_ring_info *rxr;
3760                 struct bnxt_tx_ring_info *txr;
3761
3762                 if (!bnapi)
3763                         continue;
3764
3765                 cpr = &bnapi->cp_ring;
3766                 cpr->cp_raw_cons = 0;
3767
3768                 txr = bnapi->tx_ring;
3769                 if (txr) {
3770                         txr->tx_prod = 0;
3771                         txr->tx_cons = 0;
3772                 }
3773
3774                 rxr = bnapi->rx_ring;
3775                 if (rxr) {
3776                         rxr->rx_prod = 0;
3777                         rxr->rx_agg_prod = 0;
3778                         rxr->rx_sw_agg_prod = 0;
3779                         rxr->rx_next_cons = 0;
3780                 }
3781         }
3782 }
3783
3784 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
3785 {
3786 #ifdef CONFIG_RFS_ACCEL
3787         int i;
3788
3789         /* Under rtnl_lock and all our NAPIs have been disabled.  It's
3790          * safe to delete the hash table.
3791          */
3792         for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
3793                 struct hlist_head *head;
3794                 struct hlist_node *tmp;
3795                 struct bnxt_ntuple_filter *fltr;
3796
3797                 head = &bp->ntp_fltr_hash_tbl[i];
3798                 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
3799                         hlist_del(&fltr->hash);
3800                         kfree(fltr);
3801                 }
3802         }
3803         if (irq_reinit) {
3804                 kfree(bp->ntp_fltr_bmap);
3805                 bp->ntp_fltr_bmap = NULL;
3806         }
3807         bp->ntp_fltr_count = 0;
3808 #endif
3809 }
3810
3811 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
3812 {
3813 #ifdef CONFIG_RFS_ACCEL
3814         int i, rc = 0;
3815
3816         if (!(bp->flags & BNXT_FLAG_RFS))
3817                 return 0;
3818
3819         for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
3820                 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
3821
3822         bp->ntp_fltr_count = 0;
3823         bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
3824                                     sizeof(long),
3825                                     GFP_KERNEL);
3826
3827         if (!bp->ntp_fltr_bmap)
3828                 rc = -ENOMEM;
3829
3830         return rc;
3831 #else
3832         return 0;
3833 #endif
3834 }
3835
3836 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
3837 {
3838         bnxt_free_vnic_attributes(bp);
3839         bnxt_free_tx_rings(bp);
3840         bnxt_free_rx_rings(bp);
3841         bnxt_free_cp_rings(bp);
3842         bnxt_free_ntp_fltrs(bp, irq_re_init);
3843         if (irq_re_init) {
3844                 bnxt_free_ring_stats(bp);
3845                 bnxt_free_ring_grps(bp);
3846                 bnxt_free_vnics(bp);
3847                 kfree(bp->tx_ring_map);
3848                 bp->tx_ring_map = NULL;
3849                 kfree(bp->tx_ring);
3850                 bp->tx_ring = NULL;
3851                 kfree(bp->rx_ring);
3852                 bp->rx_ring = NULL;
3853                 kfree(bp->bnapi);
3854                 bp->bnapi = NULL;
3855         } else {
3856                 bnxt_clear_ring_indices(bp);
3857         }
3858 }
3859
3860 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
3861 {
3862         int i, j, rc, size, arr_size;
3863         void *bnapi;
3864
3865         if (irq_re_init) {
3866                 /* Allocate bnapi mem pointer array and mem block for
3867                  * all queues
3868                  */
3869                 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
3870                                 bp->cp_nr_rings);
3871                 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
3872                 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
3873                 if (!bnapi)
3874                         return -ENOMEM;
3875
3876                 bp->bnapi = bnapi;
3877                 bnapi += arr_size;
3878                 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
3879                         bp->bnapi[i] = bnapi;
3880                         bp->bnapi[i]->index = i;
3881                         bp->bnapi[i]->bp = bp;
3882                         if (bp->flags & BNXT_FLAG_CHIP_P5) {
3883                                 struct bnxt_cp_ring_info *cpr =
3884                                         &bp->bnapi[i]->cp_ring;
3885
3886                                 cpr->cp_ring_struct.ring_mem.flags =
3887                                         BNXT_RMEM_RING_PTE_FLAG;
3888                         }
3889                 }
3890
3891                 bp->rx_ring = kcalloc(bp->rx_nr_rings,
3892                                       sizeof(struct bnxt_rx_ring_info),
3893                                       GFP_KERNEL);
3894                 if (!bp->rx_ring)
3895                         return -ENOMEM;
3896
3897                 for (i = 0; i < bp->rx_nr_rings; i++) {
3898                         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3899
3900                         if (bp->flags & BNXT_FLAG_CHIP_P5) {
3901                                 rxr->rx_ring_struct.ring_mem.flags =
3902                                         BNXT_RMEM_RING_PTE_FLAG;
3903                                 rxr->rx_agg_ring_struct.ring_mem.flags =
3904                                         BNXT_RMEM_RING_PTE_FLAG;
3905                         }
3906                         rxr->bnapi = bp->bnapi[i];
3907                         bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
3908                 }
3909
3910                 bp->tx_ring = kcalloc(bp->tx_nr_rings,
3911                                       sizeof(struct bnxt_tx_ring_info),
3912                                       GFP_KERNEL);
3913                 if (!bp->tx_ring)
3914                         return -ENOMEM;
3915
3916                 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
3917                                           GFP_KERNEL);
3918
3919                 if (!bp->tx_ring_map)
3920                         return -ENOMEM;
3921
3922                 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
3923                         j = 0;
3924                 else
3925                         j = bp->rx_nr_rings;
3926
3927                 for (i = 0; i < bp->tx_nr_rings; i++, j++) {
3928                         struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3929
3930                         if (bp->flags & BNXT_FLAG_CHIP_P5)
3931                                 txr->tx_ring_struct.ring_mem.flags =
3932                                         BNXT_RMEM_RING_PTE_FLAG;
3933                         txr->bnapi = bp->bnapi[j];
3934                         bp->bnapi[j]->tx_ring = txr;
3935                         bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
3936                         if (i >= bp->tx_nr_rings_xdp) {
3937                                 txr->txq_index = i - bp->tx_nr_rings_xdp;
3938                                 bp->bnapi[j]->tx_int = bnxt_tx_int;
3939                         } else {
3940                                 bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP;
3941                                 bp->bnapi[j]->tx_int = bnxt_tx_int_xdp;
3942                         }
3943                 }
3944
3945                 rc = bnxt_alloc_stats(bp);
3946                 if (rc)
3947                         goto alloc_mem_err;
3948
3949                 rc = bnxt_alloc_ntp_fltrs(bp);
3950                 if (rc)
3951                         goto alloc_mem_err;
3952
3953                 rc = bnxt_alloc_vnics(bp);
3954                 if (rc)
3955                         goto alloc_mem_err;
3956         }
3957
3958         bnxt_init_ring_struct(bp);
3959
3960         rc = bnxt_alloc_rx_rings(bp);
3961         if (rc)
3962                 goto alloc_mem_err;
3963
3964         rc = bnxt_alloc_tx_rings(bp);
3965         if (rc)
3966                 goto alloc_mem_err;
3967
3968         rc = bnxt_alloc_cp_rings(bp);
3969         if (rc)
3970                 goto alloc_mem_err;
3971
3972         bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
3973                                   BNXT_VNIC_UCAST_FLAG;
3974         rc = bnxt_alloc_vnic_attributes(bp);
3975         if (rc)
3976                 goto alloc_mem_err;
3977         return 0;
3978
3979 alloc_mem_err:
3980         bnxt_free_mem(bp, true);
3981         return rc;
3982 }
3983
3984 static void bnxt_disable_int(struct bnxt *bp)
3985 {
3986         int i;
3987
3988         if (!bp->bnapi)
3989                 return;
3990
3991         for (i = 0; i < bp->cp_nr_rings; i++) {
3992                 struct bnxt_napi *bnapi = bp->bnapi[i];
3993                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3994                 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3995
3996                 if (ring->fw_ring_id != INVALID_HW_RING_ID)
3997                         bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
3998         }
3999 }
4000
4001 static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n)
4002 {
4003         struct bnxt_napi *bnapi = bp->bnapi[n];
4004         struct bnxt_cp_ring_info *cpr;
4005
4006         cpr = &bnapi->cp_ring;
4007         return cpr->cp_ring_struct.map_idx;
4008 }
4009
4010 static void bnxt_disable_int_sync(struct bnxt *bp)
4011 {
4012         int i;
4013
4014         atomic_inc(&bp->intr_sem);
4015
4016         bnxt_disable_int(bp);
4017         for (i = 0; i < bp->cp_nr_rings; i++) {
4018                 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
4019
4020                 synchronize_irq(bp->irq_tbl[map_idx].vector);
4021         }
4022 }
4023
4024 static void bnxt_enable_int(struct bnxt *bp)
4025 {
4026         int i;
4027
4028         atomic_set(&bp->intr_sem, 0);
4029         for (i = 0; i < bp->cp_nr_rings; i++) {
4030                 struct bnxt_napi *bnapi = bp->bnapi[i];
4031                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4032
4033                 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
4034         }
4035 }
4036
4037 void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
4038                             u16 cmpl_ring, u16 target_id)
4039 {
4040         struct input *req = request;
4041
4042         req->req_type = cpu_to_le16(req_type);
4043         req->cmpl_ring = cpu_to_le16(cmpl_ring);
4044         req->target_id = cpu_to_le16(target_id);
4045         if (bnxt_kong_hwrm_message(bp, req))
4046                 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr);
4047         else
4048                 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
4049 }
4050
4051 static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
4052                                  int timeout, bool silent)
4053 {
4054         int i, intr_process, rc, tmo_count;
4055         struct input *req = msg;
4056         u32 *data = msg;
4057         __le32 *resp_len;
4058         u8 *valid;
4059         u16 cp_ring_id, len = 0;
4060         struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
4061         u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN;
4062         struct hwrm_short_input short_input = {0};
4063         u32 doorbell_offset = BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER;
4064         u8 *resp_addr = (u8 *)bp->hwrm_cmd_resp_addr;
4065         u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM;
4066         u16 dst = BNXT_HWRM_CHNL_CHIMP;
4067
4068         if (msg_len > BNXT_HWRM_MAX_REQ_LEN) {
4069                 if (msg_len > bp->hwrm_max_ext_req_len ||
4070                     !bp->hwrm_short_cmd_req_addr)
4071                         return -EINVAL;
4072         }
4073
4074         if (bnxt_hwrm_kong_chnl(bp, req)) {
4075                 dst = BNXT_HWRM_CHNL_KONG;
4076                 bar_offset = BNXT_GRCPF_REG_KONG_COMM;
4077                 doorbell_offset = BNXT_GRCPF_REG_KONG_COMM_TRIGGER;
4078                 resp = bp->hwrm_cmd_kong_resp_addr;
4079                 resp_addr = (u8 *)bp->hwrm_cmd_kong_resp_addr;
4080         }
4081
4082         memset(resp, 0, PAGE_SIZE);
4083         cp_ring_id = le16_to_cpu(req->cmpl_ring);
4084         intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
4085
4086         req->seq_id = cpu_to_le16(bnxt_get_hwrm_seq_id(bp, dst));
4087         /* currently supports only one outstanding message */
4088         if (intr_process)
4089                 bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id);
4090
4091         if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
4092             msg_len > BNXT_HWRM_MAX_REQ_LEN) {
4093                 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
4094                 u16 max_msg_len;
4095
4096                 /* Set boundary for maximum extended request length for short
4097                  * cmd format. If passed up from device use the max supported
4098                  * internal req length.
4099                  */
4100                 max_msg_len = bp->hwrm_max_ext_req_len;
4101
4102                 memcpy(short_cmd_req, req, msg_len);
4103                 if (msg_len < max_msg_len)
4104                         memset(short_cmd_req + msg_len, 0,
4105                                max_msg_len - msg_len);
4106
4107                 short_input.req_type = req->req_type;
4108                 short_input.signature =
4109                                 cpu_to_le16(SHORT_REQ_SIGNATURE_SHORT_CMD);
4110                 short_input.size = cpu_to_le16(msg_len);
4111                 short_input.req_addr =
4112                         cpu_to_le64(bp->hwrm_short_cmd_req_dma_addr);
4113
4114                 data = (u32 *)&short_input;
4115                 msg_len = sizeof(short_input);
4116
4117                 /* Sync memory write before updating doorbell */
4118                 wmb();
4119
4120                 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
4121         }
4122
4123         /* Write request msg to hwrm channel */
4124         __iowrite32_copy(bp->bar0 + bar_offset, data, msg_len / 4);
4125
4126         for (i = msg_len; i < max_req_len; i += 4)
4127                 writel(0, bp->bar0 + bar_offset + i);
4128
4129         /* Ring channel doorbell */
4130         writel(1, bp->bar0 + doorbell_offset);
4131
4132         if (!timeout)
4133                 timeout = DFLT_HWRM_CMD_TIMEOUT;
4134         /* convert timeout to usec */
4135         timeout *= 1000;
4136
4137         i = 0;
4138         /* Short timeout for the first few iterations:
4139          * number of loops = number of loops for short timeout +
4140          * number of loops for standard timeout.
4141          */
4142         tmo_count = HWRM_SHORT_TIMEOUT_COUNTER;
4143         timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER;
4144         tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT);
4145         resp_len = (__le32 *)(resp_addr + HWRM_RESP_LEN_OFFSET);
4146
4147         if (intr_process) {
4148                 u16 seq_id = bp->hwrm_intr_seq_id;
4149
4150                 /* Wait until hwrm response cmpl interrupt is processed */
4151                 while (bp->hwrm_intr_seq_id != (u16)~seq_id &&
4152                        i++ < tmo_count) {
4153                         /* on first few passes, just barely sleep */
4154                         if (i < HWRM_SHORT_TIMEOUT_COUNTER)
4155                                 usleep_range(HWRM_SHORT_MIN_TIMEOUT,
4156                                              HWRM_SHORT_MAX_TIMEOUT);
4157                         else
4158                                 usleep_range(HWRM_MIN_TIMEOUT,
4159                                              HWRM_MAX_TIMEOUT);
4160                 }
4161
4162                 if (bp->hwrm_intr_seq_id != (u16)~seq_id) {
4163                         netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
4164                                    le16_to_cpu(req->req_type));
4165                         return -1;
4166                 }
4167                 len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
4168                       HWRM_RESP_LEN_SFT;
4169                 valid = resp_addr + len - 1;
4170         } else {
4171                 int j;
4172
4173                 /* Check if response len is updated */
4174                 for (i = 0; i < tmo_count; i++) {
4175                         len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
4176                               HWRM_RESP_LEN_SFT;
4177                         if (len)
4178                                 break;
4179                         /* on first few passes, just barely sleep */
4180                         if (i < HWRM_SHORT_TIMEOUT_COUNTER)
4181                                 usleep_range(HWRM_SHORT_MIN_TIMEOUT,
4182                                              HWRM_SHORT_MAX_TIMEOUT);
4183                         else
4184                                 usleep_range(HWRM_MIN_TIMEOUT,
4185                                              HWRM_MAX_TIMEOUT);
4186                 }
4187
4188                 if (i >= tmo_count) {
4189                         netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
4190                                    HWRM_TOTAL_TIMEOUT(i),
4191                                    le16_to_cpu(req->req_type),
4192                                    le16_to_cpu(req->seq_id), len);
4193                         return -1;
4194                 }
4195
4196                 /* Last byte of resp contains valid bit */
4197                 valid = resp_addr + len - 1;
4198                 for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) {
4199                         /* make sure we read from updated DMA memory */
4200                         dma_rmb();
4201                         if (*valid)
4202                                 break;
4203                         usleep_range(1, 5);
4204                 }
4205
4206                 if (j >= HWRM_VALID_BIT_DELAY_USEC) {
4207                         netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
4208                                    HWRM_TOTAL_TIMEOUT(i),
4209                                    le16_to_cpu(req->req_type),
4210                                    le16_to_cpu(req->seq_id), len, *valid);
4211                         return -1;
4212                 }
4213         }
4214
4215         /* Zero valid bit for compatibility.  Valid bit in an older spec
4216          * may become a new field in a newer spec.  We must make sure that
4217          * a new field not implemented by old spec will read zero.
4218          */
4219         *valid = 0;
4220         rc = le16_to_cpu(resp->error_code);
4221         if (rc && !silent)
4222                 netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
4223                            le16_to_cpu(resp->req_type),
4224                            le16_to_cpu(resp->seq_id), rc);
4225         return rc;
4226 }
4227
4228 int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
4229 {
4230         return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false);
4231 }
4232
4233 int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
4234                               int timeout)
4235 {
4236         return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
4237 }
4238
4239 int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
4240 {
4241         int rc;
4242
4243         mutex_lock(&bp->hwrm_cmd_lock);
4244         rc = _hwrm_send_message(bp, msg, msg_len, timeout);
4245         mutex_unlock(&bp->hwrm_cmd_lock);
4246         return rc;
4247 }
4248
4249 int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
4250                              int timeout)
4251 {
4252         int rc;
4253
4254         mutex_lock(&bp->hwrm_cmd_lock);
4255         rc = bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
4256         mutex_unlock(&bp->hwrm_cmd_lock);
4257         return rc;
4258 }
4259
4260 int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap,
4261                                      int bmap_size)
4262 {
4263         struct hwrm_func_drv_rgtr_input req = {0};
4264         DECLARE_BITMAP(async_events_bmap, 256);
4265         u32 *events = (u32 *)async_events_bmap;
4266         int i;
4267
4268         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
4269
4270         req.enables =
4271                 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
4272
4273         memset(async_events_bmap, 0, sizeof(async_events_bmap));
4274         for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++)
4275                 __set_bit(bnxt_async_events_arr[i], async_events_bmap);
4276
4277         if (bmap && bmap_size) {
4278                 for (i = 0; i < bmap_size; i++) {
4279                         if (test_bit(i, bmap))
4280                                 __set_bit(i, async_events_bmap);
4281                 }
4282         }
4283
4284         for (i = 0; i < 8; i++)
4285                 req.async_event_fwd[i] |= cpu_to_le32(events[i]);
4286
4287         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4288 }
4289
4290 static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
4291 {
4292         struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
4293         struct hwrm_func_drv_rgtr_input req = {0};
4294         int rc;
4295
4296         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
4297
4298         req.enables =
4299                 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
4300                             FUNC_DRV_RGTR_REQ_ENABLES_VER);
4301
4302         req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
4303         req.flags = cpu_to_le32(FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE);
4304         req.ver_maj_8b = DRV_VER_MAJ;
4305         req.ver_min_8b = DRV_VER_MIN;
4306         req.ver_upd_8b = DRV_VER_UPD;
4307         req.ver_maj = cpu_to_le16(DRV_VER_MAJ);
4308         req.ver_min = cpu_to_le16(DRV_VER_MIN);
4309         req.ver_upd = cpu_to_le16(DRV_VER_UPD);
4310
4311         if (BNXT_PF(bp)) {
4312                 u32 data[8];
4313                 int i;
4314
4315                 memset(data, 0, sizeof(data));
4316                 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) {
4317                         u16 cmd = bnxt_vf_req_snif[i];
4318                         unsigned int bit, idx;
4319
4320                         idx = cmd / 32;
4321                         bit = cmd % 32;
4322                         data[idx] |= 1 << bit;
4323                 }
4324
4325                 for (i = 0; i < 8; i++)
4326                         req.vf_req_fwd[i] = cpu_to_le32(data[i]);
4327
4328                 req.enables |=
4329                         cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
4330         }
4331
4332         if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
4333                 req.flags |= cpu_to_le32(
4334                         FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE);
4335
4336         mutex_lock(&bp->hwrm_cmd_lock);
4337         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4338         if (rc)
4339                 rc = -EIO;
4340         else if (resp->flags &
4341                  cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
4342                 bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
4343         mutex_unlock(&bp->hwrm_cmd_lock);
4344         return rc;
4345 }
4346
4347 static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
4348 {
4349         struct hwrm_func_drv_unrgtr_input req = {0};
4350
4351         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1);
4352         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4353 }
4354
4355 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
4356 {
4357         u32 rc = 0;
4358         struct hwrm_tunnel_dst_port_free_input req = {0};
4359
4360         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1);
4361         req.tunnel_type = tunnel_type;
4362
4363         switch (tunnel_type) {
4364         case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
4365                 req.tunnel_dst_port_id = bp->vxlan_fw_dst_port_id;
4366                 break;
4367         case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
4368                 req.tunnel_dst_port_id = bp->nge_fw_dst_port_id;
4369                 break;
4370         default:
4371                 break;
4372         }
4373
4374         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4375         if (rc)
4376                 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
4377                            rc);
4378         return rc;
4379 }
4380
4381 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
4382                                            u8 tunnel_type)
4383 {
4384         u32 rc = 0;
4385         struct hwrm_tunnel_dst_port_alloc_input req = {0};
4386         struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4387
4388         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1);
4389
4390         req.tunnel_type = tunnel_type;
4391         req.tunnel_dst_port_val = port;
4392
4393         mutex_lock(&bp->hwrm_cmd_lock);
4394         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4395         if (rc) {
4396                 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
4397                            rc);
4398                 goto err_out;
4399         }
4400
4401         switch (tunnel_type) {
4402         case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
4403                 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
4404                 break;
4405         case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
4406                 bp->nge_fw_dst_port_id = resp->tunnel_dst_port_id;
4407                 break;
4408         default:
4409                 break;
4410         }
4411
4412 err_out:
4413         mutex_unlock(&bp->hwrm_cmd_lock);
4414         return rc;
4415 }
4416
4417 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
4418 {
4419         struct hwrm_cfa_l2_set_rx_mask_input req = {0};
4420         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4421
4422         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1);
4423         req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
4424
4425         req.num_mc_entries = cpu_to_le32(vnic->mc_list_count);
4426         req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
4427         req.mask = cpu_to_le32(vnic->rx_mask);
4428         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4429 }
4430
4431 #ifdef CONFIG_RFS_ACCEL
4432 static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
4433                                             struct bnxt_ntuple_filter *fltr)
4434 {
4435         struct hwrm_cfa_ntuple_filter_free_input req = {0};
4436
4437         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1);
4438         req.ntuple_filter_id = fltr->filter_id;
4439         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4440 }
4441
4442 #define BNXT_NTP_FLTR_FLAGS                                     \
4443         (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID |     \
4444          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE |        \
4445          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR |      \
4446          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE |      \
4447          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR |       \
4448          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK |  \
4449          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR |       \
4450          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK |  \
4451          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL |      \
4452          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT |         \
4453          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK |    \
4454          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT |         \
4455          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK |    \
4456          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
4457
4458 #define BNXT_NTP_TUNNEL_FLTR_FLAG                               \
4459                 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
4460
4461 static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
4462                                              struct bnxt_ntuple_filter *fltr)
4463 {
4464         struct hwrm_cfa_ntuple_filter_alloc_input req = {0};
4465         struct hwrm_cfa_ntuple_filter_alloc_output *resp;
4466         struct flow_keys *keys = &fltr->fkeys;
4467         struct bnxt_vnic_info *vnic;
4468         u32 dst_ena = 0;
4469         int rc = 0;
4470
4471         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1);
4472         req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
4473
4474         if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX) {
4475                 dst_ena = CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_RFS_RING_TBL_IDX;
4476                 req.rfs_ring_tbl_idx = cpu_to_le16(fltr->rxq);
4477                 vnic = &bp->vnic_info[0];
4478         } else {
4479                 vnic = &bp->vnic_info[fltr->rxq + 1];
4480         }
4481         req.dst_id = cpu_to_le16(vnic->fw_vnic_id);
4482         req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS | dst_ena);
4483
4484         req.ethertype = htons(ETH_P_IP);
4485         memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN);
4486         req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
4487         req.ip_protocol = keys->basic.ip_proto;
4488
4489         if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
4490                 int i;
4491
4492                 req.ethertype = htons(ETH_P_IPV6);
4493                 req.ip_addr_type =
4494                         CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
4495                 *(struct in6_addr *)&req.src_ipaddr[0] =
4496                         keys->addrs.v6addrs.src;
4497                 *(struct in6_addr *)&req.dst_ipaddr[0] =
4498                         keys->addrs.v6addrs.dst;
4499                 for (i = 0; i < 4; i++) {
4500                         req.src_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
4501                         req.dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
4502                 }
4503         } else {
4504                 req.src_ipaddr[0] = keys->addrs.v4addrs.src;
4505                 req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
4506                 req.dst_ipaddr[0] = keys->addrs.v4addrs.dst;
4507                 req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
4508         }
4509         if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
4510                 req.enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
4511                 req.tunnel_type =
4512                         CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
4513         }
4514
4515         req.src_port = keys->ports.src;
4516         req.src_port_mask = cpu_to_be16(0xffff);
4517         req.dst_port = keys->ports.dst;
4518         req.dst_port_mask = cpu_to_be16(0xffff);
4519
4520         mutex_lock(&bp->hwrm_cmd_lock);
4521         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4522         if (!rc) {
4523                 resp = bnxt_get_hwrm_resp_addr(bp, &req);
4524                 fltr->filter_id = resp->ntuple_filter_id;
4525         }
4526         mutex_unlock(&bp->hwrm_cmd_lock);
4527         return rc;
4528 }
4529 #endif
4530
4531 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
4532                                      u8 *mac_addr)
4533 {
4534         u32 rc = 0;
4535         struct hwrm_cfa_l2_filter_alloc_input req = {0};
4536         struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4537
4538         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1);
4539         req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
4540         if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
4541                 req.flags |=
4542                         cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
4543         req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
4544         req.enables =
4545                 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
4546                             CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
4547                             CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
4548         memcpy(req.l2_addr, mac_addr, ETH_ALEN);
4549         req.l2_addr_mask[0] = 0xff;
4550         req.l2_addr_mask[1] = 0xff;
4551         req.l2_addr_mask[2] = 0xff;
4552         req.l2_addr_mask[3] = 0xff;
4553         req.l2_addr_mask[4] = 0xff;
4554         req.l2_addr_mask[5] = 0xff;
4555
4556         mutex_lock(&bp->hwrm_cmd_lock);
4557         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4558         if (!rc)
4559                 bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
4560                                                         resp->l2_filter_id;
4561         mutex_unlock(&bp->hwrm_cmd_lock);
4562         return rc;
4563 }
4564
4565 static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
4566 {
4567         u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
4568         int rc = 0;
4569
4570         /* Any associated ntuple filters will also be cleared by firmware. */
4571         mutex_lock(&bp->hwrm_cmd_lock);
4572         for (i = 0; i < num_of_vnics; i++) {
4573                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
4574
4575                 for (j = 0; j < vnic->uc_filter_count; j++) {
4576                         struct hwrm_cfa_l2_filter_free_input req = {0};
4577
4578                         bnxt_hwrm_cmd_hdr_init(bp, &req,
4579                                                HWRM_CFA_L2_FILTER_FREE, -1, -1);
4580
4581                         req.l2_filter_id = vnic->fw_l2_filter_id[j];
4582
4583                         rc = _hwrm_send_message(bp, &req, sizeof(req),
4584                                                 HWRM_CMD_TIMEOUT);
4585                 }
4586                 vnic->uc_filter_count = 0;
4587         }
4588         mutex_unlock(&bp->hwrm_cmd_lock);
4589
4590         return rc;
4591 }
4592
4593 static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
4594 {
4595         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4596         u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX;
4597         struct hwrm_vnic_tpa_cfg_input req = {0};
4598
4599         if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
4600                 return 0;
4601
4602         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1);
4603
4604         if (tpa_flags) {
4605                 u16 mss = bp->dev->mtu - 40;
4606                 u32 nsegs, n, segs = 0, flags;
4607
4608                 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
4609                         VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
4610                         VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
4611                         VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
4612                         VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
4613                 if (tpa_flags & BNXT_FLAG_GRO)
4614                         flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
4615
4616                 req.flags = cpu_to_le32(flags);
4617
4618                 req.enables =
4619                         cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
4620                                     VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
4621                                     VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
4622
4623                 /* Number of segs are log2 units, and first packet is not
4624                  * included as part of this units.
4625                  */
4626                 if (mss <= BNXT_RX_PAGE_SIZE) {
4627                         n = BNXT_RX_PAGE_SIZE / mss;
4628                         nsegs = (MAX_SKB_FRAGS - 1) * n;
4629                 } else {
4630                         n = mss / BNXT_RX_PAGE_SIZE;
4631                         if (mss & (BNXT_RX_PAGE_SIZE - 1))
4632                                 n++;
4633                         nsegs = (MAX_SKB_FRAGS - n) / n;
4634                 }
4635
4636                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4637                         segs = MAX_TPA_SEGS_P5;
4638                         max_aggs = bp->max_tpa;
4639                 } else {
4640                         segs = ilog2(nsegs);
4641                 }
4642                 req.max_agg_segs = cpu_to_le16(segs);
4643                 req.max_aggs = cpu_to_le16(max_aggs);
4644
4645                 req.min_agg_len = cpu_to_le32(512);
4646         }
4647         req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
4648
4649         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4650 }
4651
4652 static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)
4653 {
4654         struct bnxt_ring_grp_info *grp_info;
4655
4656         grp_info = &bp->grp_info[ring->grp_idx];
4657         return grp_info->cp_fw_ring_id;
4658 }
4659
4660 static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
4661 {
4662         if (bp->flags & BNXT_FLAG_CHIP_P5) {
4663                 struct bnxt_napi *bnapi = rxr->bnapi;
4664                 struct bnxt_cp_ring_info *cpr;
4665
4666                 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_RX_HDL];
4667                 return cpr->cp_ring_struct.fw_ring_id;
4668         } else {
4669                 return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct);
4670         }
4671 }
4672
4673 static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
4674 {
4675         if (bp->flags & BNXT_FLAG_CHIP_P5) {
4676                 struct bnxt_napi *bnapi = txr->bnapi;
4677                 struct bnxt_cp_ring_info *cpr;
4678
4679                 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_TX_HDL];
4680                 return cpr->cp_ring_struct.fw_ring_id;
4681         } else {
4682                 return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
4683         }
4684 }
4685
4686 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
4687 {
4688         u32 i, j, max_rings;
4689         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4690         struct hwrm_vnic_rss_cfg_input req = {0};
4691
4692         if ((bp->flags & BNXT_FLAG_CHIP_P5) ||
4693             vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
4694                 return 0;
4695
4696         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
4697         if (set_rss) {
4698                 req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
4699                 req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
4700                 if (vnic->flags & BNXT_VNIC_RSS_FLAG) {
4701                         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
4702                                 max_rings = bp->rx_nr_rings - 1;
4703                         else
4704                                 max_rings = bp->rx_nr_rings;
4705                 } else {
4706                         max_rings = 1;
4707                 }
4708
4709                 /* Fill the RSS indirection table with ring group ids */
4710                 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++, j++) {
4711                         if (j == max_rings)
4712                                 j = 0;
4713                         vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
4714                 }
4715
4716                 req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
4717                 req.hash_key_tbl_addr =
4718                         cpu_to_le64(vnic->rss_hash_key_dma_addr);
4719         }
4720         req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
4721         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4722 }
4723
4724 static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss)
4725 {
4726         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4727         u32 i, j, k, nr_ctxs, max_rings = bp->rx_nr_rings;
4728         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
4729         struct hwrm_vnic_rss_cfg_input req = {0};
4730
4731         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
4732         req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
4733         if (!set_rss) {
4734                 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4735                 return 0;
4736         }
4737         req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
4738         req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
4739         req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
4740         req.hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
4741         nr_ctxs = DIV_ROUND_UP(bp->rx_nr_rings, 64);
4742         for (i = 0, k = 0; i < nr_ctxs; i++) {
4743                 __le16 *ring_tbl = vnic->rss_table;
4744                 int rc;
4745
4746                 req.ring_table_pair_index = i;
4747                 req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
4748                 for (j = 0; j < 64; j++) {
4749                         u16 ring_id;
4750
4751                         ring_id = rxr->rx_ring_struct.fw_ring_id;
4752                         *ring_tbl++ = cpu_to_le16(ring_id);
4753                         ring_id = bnxt_cp_ring_for_rx(bp, rxr);
4754                         *ring_tbl++ = cpu_to_le16(ring_id);
4755                         rxr++;
4756                         k++;
4757                         if (k == max_rings) {
4758                                 k = 0;
4759                                 rxr = &bp->rx_ring[0];
4760                         }
4761                 }
4762                 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4763                 if (rc)
4764                         return -EIO;
4765         }
4766         return 0;
4767 }
4768
4769 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
4770 {
4771         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4772         struct hwrm_vnic_plcmodes_cfg_input req = {0};
4773
4774         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1);
4775         req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
4776                                 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
4777                                 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
4778         req.enables =
4779                 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID |
4780                             VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
4781         /* thresholds not implemented in firmware yet */
4782         req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
4783         req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
4784         req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
4785         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4786 }
4787
4788 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
4789                                         u16 ctx_idx)
4790 {
4791         struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0};
4792
4793         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1);
4794         req.rss_cos_lb_ctx_id =
4795                 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]);
4796
4797         hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4798         bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
4799 }
4800
4801 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
4802 {
4803         int i, j;
4804
4805         for (i = 0; i < bp->nr_vnics; i++) {
4806                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
4807
4808                 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
4809                         if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
4810                                 bnxt_hwrm_vnic_ctx_free_one(bp, i, j);
4811                 }
4812         }
4813         bp->rsscos_nr_ctxs = 0;
4814 }
4815
4816 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
4817 {
4818         int rc;
4819         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
4820         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
4821                                                 bp->hwrm_cmd_resp_addr;
4822
4823         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1,
4824                                -1);
4825
4826         mutex_lock(&bp->hwrm_cmd_lock);
4827         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4828         if (!rc)
4829                 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] =
4830                         le16_to_cpu(resp->rss_cos_lb_ctx_id);
4831         mutex_unlock(&bp->hwrm_cmd_lock);
4832
4833         return rc;
4834 }
4835
4836 static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
4837 {
4838         if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP)
4839                 return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE;
4840         return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE;
4841 }
4842
4843 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
4844 {
4845         unsigned int ring = 0, grp_idx;
4846         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4847         struct hwrm_vnic_cfg_input req = {0};
4848         u16 def_vlan = 0;
4849
4850         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1);
4851
4852         if (bp->flags & BNXT_FLAG_CHIP_P5) {
4853                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
4854
4855                 req.default_rx_ring_id =
4856                         cpu_to_le16(rxr->rx_ring_struct.fw_ring_id);
4857                 req.default_cmpl_ring_id =
4858                         cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr));
4859                 req.enables =
4860                         cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID |
4861                                     VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID);
4862                 goto vnic_mru;
4863         }
4864         req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
4865         /* Only RSS support for now TBD: COS & LB */
4866         if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
4867                 req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
4868                 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
4869                                            VNIC_CFG_REQ_ENABLES_MRU);
4870         } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
4871                 req.rss_rule =
4872                         cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]);
4873                 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
4874                                            VNIC_CFG_REQ_ENABLES_MRU);
4875                 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
4876         } else {
4877                 req.rss_rule = cpu_to_le16(0xffff);
4878         }
4879
4880         if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
4881             (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
4882                 req.cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
4883                 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
4884         } else {
4885                 req.cos_rule = cpu_to_le16(0xffff);
4886         }
4887
4888         if (vnic->flags & BNXT_VNIC_RSS_FLAG)
4889                 ring = 0;
4890         else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
4891                 ring = vnic_id - 1;
4892         else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
4893                 ring = bp->rx_nr_rings - 1;
4894
4895         grp_idx = bp->rx_ring[ring].bnapi->index;
4896         req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
4897         req.lb_rule = cpu_to_le16(0xffff);
4898 vnic_mru:
4899         req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN +
4900                               VLAN_HLEN);
4901
4902         req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
4903 #ifdef CONFIG_BNXT_SRIOV
4904         if (BNXT_VF(bp))
4905                 def_vlan = bp->vf.vlan;
4906 #endif
4907         if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
4908                 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
4909         if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
4910                 req.flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
4911
4912         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4913 }
4914
4915 static int bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
4916 {
4917         u32 rc = 0;
4918
4919         if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
4920                 struct hwrm_vnic_free_input req = {0};
4921
4922                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1);
4923                 req.vnic_id =
4924                         cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
4925
4926                 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4927                 if (rc)
4928                         return rc;
4929                 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
4930         }
4931         return rc;
4932 }
4933
4934 static void bnxt_hwrm_vnic_free(struct bnxt *bp)
4935 {
4936         u16 i;
4937
4938         for (i = 0; i < bp->nr_vnics; i++)
4939                 bnxt_hwrm_vnic_free_one(bp, i);
4940 }
4941
4942 static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
4943                                 unsigned int start_rx_ring_idx,
4944                                 unsigned int nr_rings)
4945 {
4946         int rc = 0;
4947         unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
4948         struct hwrm_vnic_alloc_input req = {0};
4949         struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4950         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4951
4952         if (bp->flags & BNXT_FLAG_CHIP_P5)
4953                 goto vnic_no_ring_grps;
4954
4955         /* map ring groups to this vnic */
4956         for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
4957                 grp_idx = bp->rx_ring[i].bnapi->index;
4958                 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
4959                         netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
4960                                    j, nr_rings);
4961                         break;
4962                 }
4963                 vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id;
4964         }
4965
4966 vnic_no_ring_grps:
4967         for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++)
4968                 vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
4969         if (vnic_id == 0)
4970                 req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
4971
4972         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1);
4973
4974         mutex_lock(&bp->hwrm_cmd_lock);
4975         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4976         if (!rc)
4977                 vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id);
4978         mutex_unlock(&bp->hwrm_cmd_lock);
4979         return rc;
4980 }
4981
4982 static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
4983 {
4984         struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
4985         struct hwrm_vnic_qcaps_input req = {0};
4986         int rc;
4987
4988         if (bp->hwrm_spec_code < 0x10600)
4989                 return 0;
4990
4991         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_QCAPS, -1, -1);
4992         mutex_lock(&bp->hwrm_cmd_lock);
4993         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4994         if (!rc) {
4995                 u32 flags = le32_to_cpu(resp->flags);
4996
4997                 if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
4998                     (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
4999                         bp->flags |= BNXT_FLAG_NEW_RSS_CAP;
5000                 if (flags &
5001                     VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
5002                         bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
5003                 bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
5004                 if (bp->max_tpa_v2)
5005                         bp->hw_ring_stats_size =
5006                                 sizeof(struct ctx_hw_stats_ext);
5007                 else
5008                         bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats);
5009         }
5010         mutex_unlock(&bp->hwrm_cmd_lock);
5011         return rc;
5012 }
5013
5014 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
5015 {
5016         u16 i;
5017         u32 rc = 0;
5018
5019         if (bp->flags & BNXT_FLAG_CHIP_P5)
5020                 return 0;
5021
5022         mutex_lock(&bp->hwrm_cmd_lock);
5023         for (i = 0; i < bp->rx_nr_rings; i++) {
5024                 struct hwrm_ring_grp_alloc_input req = {0};
5025                 struct hwrm_ring_grp_alloc_output *resp =
5026                                         bp->hwrm_cmd_resp_addr;
5027                 unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
5028
5029                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1);
5030
5031                 req.cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
5032                 req.rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
5033                 req.ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
5034                 req.sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
5035
5036                 rc = _hwrm_send_message(bp, &req, sizeof(req),
5037                                         HWRM_CMD_TIMEOUT);
5038                 if (rc)
5039                         break;
5040
5041                 bp->grp_info[grp_idx].fw_grp_id =
5042                         le32_to_cpu(resp->ring_group_id);
5043         }
5044         mutex_unlock(&bp->hwrm_cmd_lock);
5045         return rc;
5046 }
5047
5048 static int bnxt_hwrm_ring_grp_free(struct bnxt *bp)
5049 {
5050         u16 i;
5051         u32 rc = 0;
5052         struct hwrm_ring_grp_free_input req = {0};
5053
5054         if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5))
5055                 return 0;
5056
5057         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1);
5058
5059         mutex_lock(&bp->hwrm_cmd_lock);
5060         for (i = 0; i < bp->cp_nr_rings; i++) {
5061                 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
5062                         continue;
5063                 req.ring_group_id =
5064                         cpu_to_le32(bp->grp_info[i].fw_grp_id);
5065
5066                 rc = _hwrm_send_message(bp, &req, sizeof(req),
5067                                         HWRM_CMD_TIMEOUT);
5068                 if (rc)
5069                         break;
5070                 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
5071         }
5072         mutex_unlock(&bp->hwrm_cmd_lock);
5073         return rc;
5074 }
5075
5076 static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
5077                                     struct bnxt_ring_struct *ring,
5078                                     u32 ring_type, u32 map_index)
5079 {
5080         int rc = 0, err = 0;
5081         struct hwrm_ring_alloc_input req = {0};
5082         struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
5083         struct bnxt_ring_mem_info *rmem = &ring->ring_mem;
5084         struct bnxt_ring_grp_info *grp_info;
5085         u16 ring_id;
5086
5087         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1);
5088
5089         req.enables = 0;
5090         if (rmem->nr_pages > 1) {
5091                 req.page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map);
5092                 /* Page size is in log2 units */
5093                 req.page_size = BNXT_PAGE_SHIFT;
5094                 req.page_tbl_depth = 1;
5095         } else {
5096                 req.page_tbl_addr =  cpu_to_le64(rmem->dma_arr[0]);
5097         }
5098         req.fbo = 0;
5099         /* Association of ring index with doorbell index and MSIX number */
5100         req.logical_id = cpu_to_le16(map_index);
5101
5102         switch (ring_type) {
5103         case HWRM_RING_ALLOC_TX: {
5104                 struct bnxt_tx_ring_info *txr;
5105
5106                 txr = container_of(ring, struct bnxt_tx_ring_info,
5107                                    tx_ring_struct);
5108                 req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
5109                 /* Association of transmit ring with completion ring */
5110                 grp_info = &bp->grp_info[ring->grp_idx];
5111                 req.cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr));
5112                 req.length = cpu_to_le32(bp->tx_ring_mask + 1);
5113                 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5114                 req.queue_id = cpu_to_le16(ring->queue_id);
5115                 break;
5116         }
5117         case HWRM_RING_ALLOC_RX:
5118                 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
5119                 req.length = cpu_to_le32(bp->rx_ring_mask + 1);
5120                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5121                         u16 flags = 0;
5122
5123                         /* Association of rx ring with stats context */
5124                         grp_info = &bp->grp_info[ring->grp_idx];
5125                         req.rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
5126                         req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5127                         req.enables |= cpu_to_le32(
5128                                 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
5129                         if (NET_IP_ALIGN == 2)
5130                                 flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD;
5131                         req.flags = cpu_to_le16(flags);
5132                 }
5133                 break;
5134         case HWRM_RING_ALLOC_AGG:
5135                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5136                         req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
5137                         /* Association of agg ring with rx ring */
5138                         grp_info = &bp->grp_info[ring->grp_idx];
5139                         req.rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
5140                         req.rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
5141                         req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5142                         req.enables |= cpu_to_le32(
5143                                 RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID |
5144                                 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
5145                 } else {
5146                         req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
5147                 }
5148                 req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
5149                 break;
5150         case HWRM_RING_ALLOC_CMPL:
5151                 req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
5152                 req.length = cpu_to_le32(bp->cp_ring_mask + 1);
5153                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5154                         /* Association of cp ring with nq */
5155                         grp_info = &bp->grp_info[map_index];
5156                         req.nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
5157                         req.cq_handle = cpu_to_le64(ring->handle);
5158                         req.enables |= cpu_to_le32(
5159                                 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
5160                 } else if (bp->flags & BNXT_FLAG_USING_MSIX) {
5161                         req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
5162                 }
5163                 break;
5164         case HWRM_RING_ALLOC_NQ:
5165                 req.ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
5166                 req.length = cpu_to_le32(bp->cp_ring_mask + 1);
5167                 if (bp->flags & BNXT_FLAG_USING_MSIX)
5168                         req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
5169                 break;
5170         default:
5171                 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
5172                            ring_type);
5173                 return -1;
5174         }
5175
5176         mutex_lock(&bp->hwrm_cmd_lock);
5177         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5178         err = le16_to_cpu(resp->error_code);
5179         ring_id = le16_to_cpu(resp->ring_id);
5180         mutex_unlock(&bp->hwrm_cmd_lock);
5181
5182         if (rc || err) {
5183                 netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n",
5184                            ring_type, rc, err);
5185                 return -EIO;
5186         }
5187         ring->fw_ring_id = ring_id;
5188         return rc;
5189 }
5190
5191 static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
5192 {
5193         int rc;
5194
5195         if (BNXT_PF(bp)) {
5196                 struct hwrm_func_cfg_input req = {0};
5197
5198                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
5199                 req.fid = cpu_to_le16(0xffff);
5200                 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
5201                 req.async_event_cr = cpu_to_le16(idx);
5202                 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5203         } else {
5204                 struct hwrm_func_vf_cfg_input req = {0};
5205
5206                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
5207                 req.enables =
5208                         cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
5209                 req.async_event_cr = cpu_to_le16(idx);
5210                 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5211         }
5212         return rc;
5213 }
5214
5215 static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
5216                         u32 map_idx, u32 xid)
5217 {
5218         if (bp->flags & BNXT_FLAG_CHIP_P5) {
5219                 if (BNXT_PF(bp))
5220                         db->doorbell = bp->bar1 + 0x10000;
5221                 else
5222                         db->doorbell = bp->bar1 + 0x4000;
5223                 switch (ring_type) {
5224                 case HWRM_RING_ALLOC_TX:
5225                         db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
5226                         break;
5227                 case HWRM_RING_ALLOC_RX:
5228                 case HWRM_RING_ALLOC_AGG:
5229                         db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
5230                         break;
5231                 case HWRM_RING_ALLOC_CMPL:
5232                         db->db_key64 = DBR_PATH_L2;
5233                         break;
5234                 case HWRM_RING_ALLOC_NQ:
5235                         db->db_key64 = DBR_PATH_L2;
5236                         break;
5237                 }
5238                 db->db_key64 |= (u64)xid << DBR_XID_SFT;
5239         } else {
5240                 db->doorbell = bp->bar1 + map_idx * 0x80;
5241                 switch (ring_type) {
5242                 case HWRM_RING_ALLOC_TX:
5243                         db->db_key32 = DB_KEY_TX;
5244                         break;
5245                 case HWRM_RING_ALLOC_RX:
5246                 case HWRM_RING_ALLOC_AGG:
5247                         db->db_key32 = DB_KEY_RX;
5248                         break;
5249                 case HWRM_RING_ALLOC_CMPL:
5250                         db->db_key32 = DB_KEY_CP;
5251                         break;
5252                 }
5253         }
5254 }
5255
5256 static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
5257 {
5258         int i, rc = 0;
5259         u32 type;
5260
5261         if (bp->flags & BNXT_FLAG_CHIP_P5)
5262                 type = HWRM_RING_ALLOC_NQ;
5263         else
5264                 type = HWRM_RING_ALLOC_CMPL;
5265         for (i = 0; i < bp->cp_nr_rings; i++) {
5266                 struct bnxt_napi *bnapi = bp->bnapi[i];
5267                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5268                 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
5269                 u32 map_idx = ring->map_idx;
5270                 unsigned int vector;
5271
5272                 vector = bp->irq_tbl[map_idx].vector;
5273                 disable_irq_nosync(vector);
5274                 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5275                 if (rc) {
5276                         enable_irq(vector);
5277                         goto err_out;
5278                 }
5279                 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
5280                 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
5281                 enable_irq(vector);
5282                 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
5283
5284                 if (!i) {
5285                         rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
5286                         if (rc)
5287                                 netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
5288                 }
5289         }
5290
5291         type = HWRM_RING_ALLOC_TX;
5292         for (i = 0; i < bp->tx_nr_rings; i++) {
5293                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5294                 struct bnxt_ring_struct *ring;
5295                 u32 map_idx;
5296
5297                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5298                         struct bnxt_napi *bnapi = txr->bnapi;
5299                         struct bnxt_cp_ring_info *cpr, *cpr2;
5300                         u32 type2 = HWRM_RING_ALLOC_CMPL;
5301
5302                         cpr = &bnapi->cp_ring;
5303                         cpr2 = cpr->cp_ring_arr[BNXT_TX_HDL];
5304                         ring = &cpr2->cp_ring_struct;
5305                         ring->handle = BNXT_TX_HDL;
5306                         map_idx = bnapi->index;
5307                         rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5308                         if (rc)
5309                                 goto err_out;
5310                         bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5311                                     ring->fw_ring_id);
5312                         bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5313                 }
5314                 ring = &txr->tx_ring_struct;
5315                 map_idx = i;
5316                 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5317                 if (rc)
5318                         goto err_out;
5319                 bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id);
5320         }
5321
5322         type = HWRM_RING_ALLOC_RX;
5323         for (i = 0; i < bp->rx_nr_rings; i++) {
5324                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5325                 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
5326                 struct bnxt_napi *bnapi = rxr->bnapi;
5327                 u32 map_idx = bnapi->index;
5328
5329                 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5330                 if (rc)
5331                         goto err_out;
5332                 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
5333                 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
5334                 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
5335                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5336                         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5337                         u32 type2 = HWRM_RING_ALLOC_CMPL;
5338                         struct bnxt_cp_ring_info *cpr2;
5339
5340                         cpr2 = cpr->cp_ring_arr[BNXT_RX_HDL];
5341                         ring = &cpr2->cp_ring_struct;
5342                         ring->handle = BNXT_RX_HDL;
5343                         rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5344                         if (rc)
5345                                 goto err_out;
5346                         bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5347                                     ring->fw_ring_id);
5348                         bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5349                 }
5350         }
5351
5352         if (bp->flags & BNXT_FLAG_AGG_RINGS) {
5353                 type = HWRM_RING_ALLOC_AGG;
5354                 for (i = 0; i < bp->rx_nr_rings; i++) {
5355                         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5356                         struct bnxt_ring_struct *ring =
5357                                                 &rxr->rx_agg_ring_struct;
5358                         u32 grp_idx = ring->grp_idx;
5359                         u32 map_idx = grp_idx + bp->rx_nr_rings;
5360
5361                         rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5362                         if (rc)
5363                                 goto err_out;
5364
5365                         bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
5366                                     ring->fw_ring_id);
5367                         bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
5368                         bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
5369                 }
5370         }
5371 err_out:
5372         return rc;
5373 }
5374
5375 static int hwrm_ring_free_send_msg(struct bnxt *bp,
5376                                    struct bnxt_ring_struct *ring,
5377                                    u32 ring_type, int cmpl_ring_id)
5378 {
5379         int rc;
5380         struct hwrm_ring_free_input req = {0};
5381         struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
5382         u16 error_code;
5383
5384         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1);
5385         req.ring_type = ring_type;
5386         req.ring_id = cpu_to_le16(ring->fw_ring_id);
5387
5388         mutex_lock(&bp->hwrm_cmd_lock);
5389         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5390         error_code = le16_to_cpu(resp->error_code);
5391         mutex_unlock(&bp->hwrm_cmd_lock);
5392
5393         if (rc || error_code) {
5394                 netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n",
5395                            ring_type, rc, error_code);
5396                 return -EIO;
5397         }
5398         return 0;
5399 }
5400
5401 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
5402 {
5403         u32 type;
5404         int i;
5405
5406         if (!bp->bnapi)
5407                 return;
5408
5409         for (i = 0; i < bp->tx_nr_rings; i++) {
5410                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5411                 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
5412
5413                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5414                         u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
5415
5416                         hwrm_ring_free_send_msg(bp, ring,
5417                                                 RING_FREE_REQ_RING_TYPE_TX,
5418                                                 close_path ? cmpl_ring_id :
5419                                                 INVALID_HW_RING_ID);
5420                         ring->fw_ring_id = INVALID_HW_RING_ID;
5421                 }
5422         }
5423
5424         for (i = 0; i < bp->rx_nr_rings; i++) {
5425                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5426                 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
5427                 u32 grp_idx = rxr->bnapi->index;
5428
5429                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5430                         u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5431
5432                         hwrm_ring_free_send_msg(bp, ring,
5433                                                 RING_FREE_REQ_RING_TYPE_RX,
5434                                                 close_path ? cmpl_ring_id :
5435                                                 INVALID_HW_RING_ID);
5436                         ring->fw_ring_id = INVALID_HW_RING_ID;
5437                         bp->grp_info[grp_idx].rx_fw_ring_id =
5438                                 INVALID_HW_RING_ID;
5439                 }
5440         }
5441
5442         if (bp->flags & BNXT_FLAG_CHIP_P5)
5443                 type = RING_FREE_REQ_RING_TYPE_RX_AGG;
5444         else
5445                 type = RING_FREE_REQ_RING_TYPE_RX;
5446         for (i = 0; i < bp->rx_nr_rings; i++) {
5447                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5448                 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
5449                 u32 grp_idx = rxr->bnapi->index;
5450
5451                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5452                         u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5453
5454                         hwrm_ring_free_send_msg(bp, ring, type,
5455                                                 close_path ? cmpl_ring_id :
5456                                                 INVALID_HW_RING_ID);
5457                         ring->fw_ring_id = INVALID_HW_RING_ID;
5458                         bp->grp_info[grp_idx].agg_fw_ring_id =
5459                                 INVALID_HW_RING_ID;
5460                 }
5461         }
5462
5463         /* The completion rings are about to be freed.  After that the
5464          * IRQ doorbell will not work anymore.  So we need to disable
5465          * IRQ here.
5466          */
5467         bnxt_disable_int_sync(bp);
5468
5469         if (bp->flags & BNXT_FLAG_CHIP_P5)
5470                 type = RING_FREE_REQ_RING_TYPE_NQ;
5471         else
5472                 type = RING_FREE_REQ_RING_TYPE_L2_CMPL;
5473         for (i = 0; i < bp->cp_nr_rings; i++) {
5474                 struct bnxt_napi *bnapi = bp->bnapi[i];
5475                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5476                 struct bnxt_ring_struct *ring;
5477                 int j;
5478
5479                 for (j = 0; j < 2; j++) {
5480                         struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
5481
5482                         if (cpr2) {
5483                                 ring = &cpr2->cp_ring_struct;
5484                                 if (ring->fw_ring_id == INVALID_HW_RING_ID)
5485                                         continue;
5486                                 hwrm_ring_free_send_msg(bp, ring,
5487                                         RING_FREE_REQ_RING_TYPE_L2_CMPL,
5488                                         INVALID_HW_RING_ID);
5489                                 ring->fw_ring_id = INVALID_HW_RING_ID;
5490                         }
5491                 }
5492                 ring = &cpr->cp_ring_struct;
5493                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5494                         hwrm_ring_free_send_msg(bp, ring, type,
5495                                                 INVALID_HW_RING_ID);
5496                         ring->fw_ring_id = INVALID_HW_RING_ID;
5497                         bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
5498                 }
5499         }
5500 }
5501
5502 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
5503                            bool shared);
5504
5505 static int bnxt_hwrm_get_rings(struct bnxt *bp)
5506 {
5507         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
5508         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
5509         struct hwrm_func_qcfg_input req = {0};
5510         int rc;
5511
5512         if (bp->hwrm_spec_code < 0x10601)
5513                 return 0;
5514
5515         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
5516         req.fid = cpu_to_le16(0xffff);
5517         mutex_lock(&bp->hwrm_cmd_lock);
5518         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5519         if (rc) {
5520                 mutex_unlock(&bp->hwrm_cmd_lock);
5521                 return -EIO;
5522         }
5523
5524         hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
5525         if (BNXT_NEW_RM(bp)) {
5526                 u16 cp, stats;
5527
5528                 hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
5529                 hw_resc->resv_hw_ring_grps =
5530                         le32_to_cpu(resp->alloc_hw_ring_grps);
5531                 hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
5532                 cp = le16_to_cpu(resp->alloc_cmpl_rings);
5533                 stats = le16_to_cpu(resp->alloc_stat_ctx);
5534                 hw_resc->resv_irqs = cp;
5535                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5536                         int rx = hw_resc->resv_rx_rings;
5537                         int tx = hw_resc->resv_tx_rings;
5538
5539                         if (bp->flags & BNXT_FLAG_AGG_RINGS)
5540                                 rx >>= 1;
5541                         if (cp < (rx + tx)) {
5542                                 bnxt_trim_rings(bp, &rx, &tx, cp, false);
5543                                 if (bp->flags & BNXT_FLAG_AGG_RINGS)
5544                                         rx <<= 1;
5545                                 hw_resc->resv_rx_rings = rx;
5546                                 hw_resc->resv_tx_rings = tx;
5547                         }
5548                         hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix);
5549                         hw_resc->resv_hw_ring_grps = rx;
5550                 }
5551                 hw_resc->resv_cp_rings = cp;
5552                 hw_resc->resv_stat_ctxs = stats;
5553         }
5554         mutex_unlock(&bp->hwrm_cmd_lock);
5555         return 0;
5556 }
5557
5558 /* Caller must hold bp->hwrm_cmd_lock */
5559 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
5560 {
5561         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
5562         struct hwrm_func_qcfg_input req = {0};
5563         int rc;
5564
5565         if (bp->hwrm_spec_code < 0x10601)
5566                 return 0;
5567
5568         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
5569         req.fid = cpu_to_le16(fid);
5570         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5571         if (!rc)
5572                 *tx_rings = le16_to_cpu(resp->alloc_tx_rings);
5573
5574         return rc;
5575 }
5576
5577 static bool bnxt_rfs_supported(struct bnxt *bp);
5578
5579 static void
5580 __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,
5581                              int tx_rings, int rx_rings, int ring_grps,
5582                              int cp_rings, int stats, int vnics)
5583 {
5584         u32 enables = 0;
5585
5586         bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_CFG, -1, -1);
5587         req->fid = cpu_to_le16(0xffff);
5588         enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
5589         req->num_tx_rings = cpu_to_le16(tx_rings);
5590         if (BNXT_NEW_RM(bp)) {
5591                 enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
5592                 enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
5593                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5594                         enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
5595                         enables |= tx_rings + ring_grps ?
5596                                    FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
5597                         enables |= rx_rings ?
5598                                 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
5599                 } else {
5600                         enables |= cp_rings ?
5601                                    FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
5602                         enables |= ring_grps ?
5603                                    FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
5604                                    FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
5605                 }
5606                 enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
5607
5608                 req->num_rx_rings = cpu_to_le16(rx_rings);
5609                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5610                         req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
5611                         req->num_msix = cpu_to_le16(cp_rings);
5612                         req->num_rsscos_ctxs =
5613                                 cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
5614                 } else {
5615                         req->num_cmpl_rings = cpu_to_le16(cp_rings);
5616                         req->num_hw_ring_grps = cpu_to_le16(ring_grps);
5617                         req->num_rsscos_ctxs = cpu_to_le16(1);
5618                         if (!(bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
5619                             bnxt_rfs_supported(bp))
5620                                 req->num_rsscos_ctxs =
5621                                         cpu_to_le16(ring_grps + 1);
5622                 }
5623                 req->num_stat_ctxs = cpu_to_le16(stats);
5624                 req->num_vnics = cpu_to_le16(vnics);
5625         }
5626         req->enables = cpu_to_le32(enables);
5627 }
5628
5629 static void
5630 __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp,
5631                              struct hwrm_func_vf_cfg_input *req, int tx_rings,
5632                              int rx_rings, int ring_grps, int cp_rings,
5633                              int stats, int vnics)
5634 {
5635         u32 enables = 0;
5636
5637         bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_VF_CFG, -1, -1);
5638         enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
5639         enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
5640                               FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
5641         enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
5642         if (bp->flags & BNXT_FLAG_CHIP_P5) {
5643                 enables |= tx_rings + ring_grps ?
5644                            FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
5645         } else {
5646                 enables |= cp_rings ?
5647                            FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
5648                 enables |= ring_grps ?
5649                            FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
5650         }
5651         enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
5652         enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS;
5653
5654         req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
5655         req->num_tx_rings = cpu_to_le16(tx_rings);
5656         req->num_rx_rings = cpu_to_le16(rx_rings);
5657         if (bp->flags & BNXT_FLAG_CHIP_P5) {
5658                 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
5659                 req->num_rsscos_ctxs = cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
5660         } else {
5661                 req->num_cmpl_rings = cpu_to_le16(cp_rings);
5662                 req->num_hw_ring_grps = cpu_to_le16(ring_grps);
5663                 req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
5664         }
5665         req->num_stat_ctxs = cpu_to_le16(stats);
5666         req->num_vnics = cpu_to_le16(vnics);
5667
5668         req->enables = cpu_to_le32(enables);
5669 }
5670
5671 static int
5672 bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
5673                            int ring_grps, int cp_rings, int stats, int vnics)
5674 {
5675         struct hwrm_func_cfg_input req = {0};
5676         int rc;
5677
5678         __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
5679                                      cp_rings, stats, vnics);
5680         if (!req.enables)
5681                 return 0;
5682
5683         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5684         if (rc)
5685                 return -ENOMEM;
5686
5687         if (bp->hwrm_spec_code < 0x10601)
5688                 bp->hw_resc.resv_tx_rings = tx_rings;
5689
5690         rc = bnxt_hwrm_get_rings(bp);
5691         return rc;
5692 }
5693
5694 static int
5695 bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
5696                            int ring_grps, int cp_rings, int stats, int vnics)
5697 {
5698         struct hwrm_func_vf_cfg_input req = {0};
5699         int rc;
5700
5701         if (!BNXT_NEW_RM(bp)) {
5702                 bp->hw_resc.resv_tx_rings = tx_rings;
5703                 return 0;
5704         }
5705
5706         __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
5707                                      cp_rings, stats, vnics);
5708         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5709         if (rc)
5710                 return -ENOMEM;
5711
5712         rc = bnxt_hwrm_get_rings(bp);
5713         return rc;
5714 }
5715
5716 static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp,
5717                                    int cp, int stat, int vnic)
5718 {
5719         if (BNXT_PF(bp))
5720                 return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, stat,
5721                                                   vnic);
5722         else
5723                 return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, stat,
5724                                                   vnic);
5725 }
5726
5727 int bnxt_nq_rings_in_use(struct bnxt *bp)
5728 {
5729         int cp = bp->cp_nr_rings;
5730         int ulp_msix, ulp_base;
5731
5732         ulp_msix = bnxt_get_ulp_msix_num(bp);
5733         if (ulp_msix) {
5734                 ulp_base = bnxt_get_ulp_msix_base(bp);
5735                 cp += ulp_msix;
5736                 if ((ulp_base + ulp_msix) > cp)
5737                         cp = ulp_base + ulp_msix;
5738         }
5739         return cp;
5740 }
5741
5742 static int bnxt_cp_rings_in_use(struct bnxt *bp)
5743 {
5744         int cp;
5745
5746         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
5747                 return bnxt_nq_rings_in_use(bp);
5748
5749         cp = bp->tx_nr_rings + bp->rx_nr_rings;
5750         return cp;
5751 }
5752
5753 static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
5754 {
5755         int ulp_stat = bnxt_get_ulp_stat_ctxs(bp);
5756         int cp = bp->cp_nr_rings;
5757
5758         if (!ulp_stat)
5759                 return cp;
5760
5761         if (bnxt_nq_rings_in_use(bp) > cp + bnxt_get_ulp_msix_num(bp))
5762                 return bnxt_get_ulp_msix_base(bp) + ulp_stat;
5763
5764         return cp + ulp_stat;
5765 }
5766
5767 static bool bnxt_need_reserve_rings(struct bnxt *bp)
5768 {
5769         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
5770         int cp = bnxt_cp_rings_in_use(bp);
5771         int nq = bnxt_nq_rings_in_use(bp);
5772         int rx = bp->rx_nr_rings, stat;
5773         int vnic = 1, grp = rx;
5774
5775         if (bp->hwrm_spec_code < 0x10601)
5776                 return false;
5777
5778         if (hw_resc->resv_tx_rings != bp->tx_nr_rings)
5779                 return true;
5780
5781         if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
5782                 vnic = rx + 1;
5783         if (bp->flags & BNXT_FLAG_AGG_RINGS)
5784                 rx <<= 1;
5785         stat = bnxt_get_func_stat_ctxs(bp);
5786         if (BNXT_NEW_RM(bp) &&
5787             (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
5788              hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat ||
5789              (hw_resc->resv_hw_ring_grps != grp &&
5790               !(bp->flags & BNXT_FLAG_CHIP_P5))))
5791                 return true;
5792         if ((bp->flags & BNXT_FLAG_CHIP_P5) && BNXT_PF(bp) &&
5793             hw_resc->resv_irqs != nq)
5794                 return true;
5795         return false;
5796 }
5797
5798 static int __bnxt_reserve_rings(struct bnxt *bp)
5799 {
5800         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
5801         int cp = bnxt_nq_rings_in_use(bp);
5802         int tx = bp->tx_nr_rings;
5803         int rx = bp->rx_nr_rings;
5804         int grp, rx_rings, rc;
5805         int vnic = 1, stat;
5806         bool sh = false;
5807
5808         if (!bnxt_need_reserve_rings(bp))
5809                 return 0;
5810
5811         if (bp->flags & BNXT_FLAG_SHARED_RINGS)
5812                 sh = true;
5813         if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
5814                 vnic = rx + 1;
5815         if (bp->flags & BNXT_FLAG_AGG_RINGS)
5816                 rx <<= 1;
5817         grp = bp->rx_nr_rings;
5818         stat = bnxt_get_func_stat_ctxs(bp);
5819
5820         rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, stat, vnic);
5821         if (rc)
5822                 return rc;
5823
5824         tx = hw_resc->resv_tx_rings;
5825         if (BNXT_NEW_RM(bp)) {
5826                 rx = hw_resc->resv_rx_rings;
5827                 cp = hw_resc->resv_irqs;
5828                 grp = hw_resc->resv_hw_ring_grps;
5829                 vnic = hw_resc->resv_vnics;
5830                 stat = hw_resc->resv_stat_ctxs;
5831         }
5832
5833         rx_rings = rx;
5834         if (bp->flags & BNXT_FLAG_AGG_RINGS) {
5835                 if (rx >= 2) {
5836                         rx_rings = rx >> 1;
5837                 } else {
5838                         if (netif_running(bp->dev))
5839                                 return -ENOMEM;
5840
5841                         bp->flags &= ~BNXT_FLAG_AGG_RINGS;
5842                         bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
5843                         bp->dev->hw_features &= ~NETIF_F_LRO;
5844                         bp->dev->features &= ~NETIF_F_LRO;
5845                         bnxt_set_ring_params(bp);
5846                 }
5847         }
5848         rx_rings = min_t(int, rx_rings, grp);
5849         cp = min_t(int, cp, bp->cp_nr_rings);
5850         if (stat > bnxt_get_ulp_stat_ctxs(bp))
5851                 stat -= bnxt_get_ulp_stat_ctxs(bp);
5852         cp = min_t(int, cp, stat);
5853         rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh);
5854         if (bp->flags & BNXT_FLAG_AGG_RINGS)
5855                 rx = rx_rings << 1;
5856         cp = sh ? max_t(int, tx, rx_rings) : tx + rx_rings;
5857         bp->tx_nr_rings = tx;
5858         bp->rx_nr_rings = rx_rings;
5859         bp->cp_nr_rings = cp;
5860
5861         if (!tx || !rx || !cp || !grp || !vnic || !stat)
5862                 return -ENOMEM;
5863
5864         return rc;
5865 }
5866
5867 static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
5868                                     int ring_grps, int cp_rings, int stats,
5869                                     int vnics)
5870 {
5871         struct hwrm_func_vf_cfg_input req = {0};
5872         u32 flags;
5873         int rc;
5874
5875         if (!BNXT_NEW_RM(bp))
5876                 return 0;
5877
5878         __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
5879                                      cp_rings, stats, vnics);
5880         flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
5881                 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
5882                 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
5883                 FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
5884                 FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST |
5885                 FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
5886         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
5887                 flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
5888
5889         req.flags = cpu_to_le32(flags);
5890         rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5891         if (rc)
5892                 return -ENOMEM;
5893         return 0;
5894 }
5895
5896 static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
5897                                     int ring_grps, int cp_rings, int stats,
5898                                     int vnics)
5899 {
5900         struct hwrm_func_cfg_input req = {0};
5901         u32 flags;
5902         int rc;
5903
5904         __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
5905                                      cp_rings, stats, vnics);
5906         flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
5907         if (BNXT_NEW_RM(bp)) {
5908                 flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
5909                          FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
5910                          FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
5911                          FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
5912                 if (bp->flags & BNXT_FLAG_CHIP_P5)
5913                         flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST |
5914                                  FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST;
5915                 else
5916                         flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
5917         }
5918
5919         req.flags = cpu_to_le32(flags);
5920         rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5921         if (rc)
5922                 return -ENOMEM;
5923         return 0;
5924 }
5925
5926 static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
5927                                  int ring_grps, int cp_rings, int stats,
5928                                  int vnics)
5929 {
5930         if (bp->hwrm_spec_code < 0x10801)
5931                 return 0;
5932
5933         if (BNXT_PF(bp))
5934                 return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings,
5935                                                 ring_grps, cp_rings, stats,
5936                                                 vnics);
5937
5938         return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps,
5939                                         cp_rings, stats, vnics);
5940 }
5941
5942 static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
5943 {
5944         struct hwrm_ring_aggint_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
5945         struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
5946         struct hwrm_ring_aggint_qcaps_input req = {0};
5947         int rc;
5948
5949         coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS;
5950         coal_cap->num_cmpl_dma_aggr_max = 63;
5951         coal_cap->num_cmpl_dma_aggr_during_int_max = 63;
5952         coal_cap->cmpl_aggr_dma_tmr_max = 65535;
5953         coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535;
5954         coal_cap->int_lat_tmr_min_max = 65535;
5955         coal_cap->int_lat_tmr_max_max = 65535;
5956         coal_cap->num_cmpl_aggr_int_max = 65535;
5957         coal_cap->timer_units = 80;
5958
5959         if (bp->hwrm_spec_code < 0x10902)
5960                 return;
5961
5962         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_AGGINT_QCAPS, -1, -1);
5963         mutex_lock(&bp->hwrm_cmd_lock);
5964         rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5965         if (!rc) {
5966                 coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params);
5967                 coal_cap->nq_params = le32_to_cpu(resp->nq_params);
5968                 coal_cap->num_cmpl_dma_aggr_max =
5969                         le16_to_cpu(resp->num_cmpl_dma_aggr_max);
5970                 coal_cap->num_cmpl_dma_aggr_during_int_max =
5971                         le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max);
5972                 coal_cap->cmpl_aggr_dma_tmr_max =
5973                         le16_to_cpu(resp->cmpl_aggr_dma_tmr_max);
5974                 coal_cap->cmpl_aggr_dma_tmr_during_int_max =
5975                         le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max);
5976                 coal_cap->int_lat_tmr_min_max =
5977                         le16_to_cpu(resp->int_lat_tmr_min_max);
5978                 coal_cap->int_lat_tmr_max_max =
5979                         le16_to_cpu(resp->int_lat_tmr_max_max);
5980                 coal_cap->num_cmpl_aggr_int_max =
5981                         le16_to_cpu(resp->num_cmpl_aggr_int_max);
5982                 coal_cap->timer_units = le16_to_cpu(resp->timer_units);
5983         }
5984         mutex_unlock(&bp->hwrm_cmd_lock);
5985 }
5986
5987 static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec)
5988 {
5989         struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
5990
5991         return usec * 1000 / coal_cap->timer_units;
5992 }
5993
5994 static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
5995         struct bnxt_coal *hw_coal,
5996         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
5997 {
5998         struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
5999         u32 cmpl_params = coal_cap->cmpl_params;
6000         u16 val, tmr, max, flags = 0;
6001
6002         max = hw_coal->bufs_per_record * 128;
6003         if (hw_coal->budget)
6004                 max = hw_coal->bufs_per_record * hw_coal->budget;
6005         max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max);
6006
6007         val = clamp_t(u16, hw_coal->coal_bufs, 1, max);
6008         req->num_cmpl_aggr_int = cpu_to_le16(val);
6009
6010         val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max);
6011         req->num_cmpl_dma_aggr = cpu_to_le16(val);
6012
6013         val = clamp_t(u16, hw_coal->coal_bufs_irq, 1,
6014                       coal_cap->num_cmpl_dma_aggr_during_int_max);
6015         req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val);
6016
6017         tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks);
6018         tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max);
6019         req->int_lat_tmr_max = cpu_to_le16(tmr);
6020
6021         /* min timer set to 1/2 of interrupt timer */
6022         if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) {
6023                 val = tmr / 2;
6024                 val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max);
6025                 req->int_lat_tmr_min = cpu_to_le16(val);
6026                 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
6027         }
6028
6029         /* buf timer set to 1/4 of interrupt timer */
6030         val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max);
6031         req->cmpl_aggr_dma_tmr = cpu_to_le16(val);
6032
6033         if (cmpl_params &
6034             RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) {
6035                 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq);
6036                 val = clamp_t(u16, tmr, 1,
6037                               coal_cap->cmpl_aggr_dma_tmr_during_int_max);
6038                 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(tmr);
6039                 req->enables |=
6040                         cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE);
6041         }
6042
6043         if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)
6044                 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
6045         if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) &&
6046             hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh)
6047                 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
6048         req->flags = cpu_to_le16(flags);
6049         req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES);
6050 }
6051
6052 /* Caller holds bp->hwrm_cmd_lock */
6053 static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi,
6054                                    struct bnxt_coal *hw_coal)
6055 {
6056         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
6057         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6058         struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6059         u32 nq_params = coal_cap->nq_params;
6060         u16 tmr;
6061
6062         if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN))
6063                 return 0;
6064
6065         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS,
6066                                -1, -1);
6067         req.ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id);
6068         req.flags =
6069                 cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ);
6070
6071         tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2;
6072         tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max);
6073         req.int_lat_tmr_min = cpu_to_le16(tmr);
6074         req.enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
6075         return _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6076 }
6077
6078 int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
6079 {
6080         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0};
6081         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6082         struct bnxt_coal coal;
6083
6084         /* Tick values in micro seconds.
6085          * 1 coal_buf x bufs_per_record = 1 completion record.
6086          */
6087         memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal));
6088
6089         coal.coal_ticks = cpr->rx_ring_coal.coal_ticks;
6090         coal.coal_bufs = cpr->rx_ring_coal.coal_bufs;
6091
6092         if (!bnapi->rx_ring)
6093                 return -ENODEV;
6094
6095         bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
6096                                HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
6097
6098         bnxt_hwrm_set_coal_params(bp, &coal, &req_rx);
6099
6100         req_rx.ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring));
6101
6102         return hwrm_send_message(bp, &req_rx, sizeof(req_rx),
6103                                  HWRM_CMD_TIMEOUT);
6104 }
6105
6106 int bnxt_hwrm_set_coal(struct bnxt *bp)
6107 {
6108         int i, rc = 0;
6109         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0},
6110                                                            req_tx = {0}, *req;
6111
6112         bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
6113                                HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
6114         bnxt_hwrm_cmd_hdr_init(bp, &req_tx,
6115                                HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
6116
6117         bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, &req_rx);
6118         bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, &req_tx);
6119
6120         mutex_lock(&bp->hwrm_cmd_lock);
6121         for (i = 0; i < bp->cp_nr_rings; i++) {
6122                 struct bnxt_napi *bnapi = bp->bnapi[i];
6123                 struct bnxt_coal *hw_coal;
6124                 u16 ring_id;
6125
6126                 req = &req_rx;
6127                 if (!bnapi->rx_ring) {
6128                         ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
6129                         req = &req_tx;
6130                 } else {
6131                         ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
6132                 }
6133                 req->ring_id = cpu_to_le16(ring_id);
6134
6135                 rc = _hwrm_send_message(bp, req, sizeof(*req),
6136                                         HWRM_CMD_TIMEOUT);
6137                 if (rc)
6138                         break;
6139
6140                 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6141                         continue;
6142
6143                 if (bnapi->rx_ring && bnapi->tx_ring) {
6144                         req = &req_tx;
6145                         ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
6146                         req->ring_id = cpu_to_le16(ring_id);
6147                         rc = _hwrm_send_message(bp, req, sizeof(*req),
6148                                                 HWRM_CMD_TIMEOUT);
6149                         if (rc)
6150                                 break;
6151                 }
6152                 if (bnapi->rx_ring)
6153                         hw_coal = &bp->rx_coal;
6154                 else
6155                         hw_coal = &bp->tx_coal;
6156                 __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal);
6157         }
6158         mutex_unlock(&bp->hwrm_cmd_lock);
6159         return rc;
6160 }
6161
6162 static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
6163 {
6164         int rc = 0, i;
6165         struct hwrm_stat_ctx_free_input req = {0};
6166
6167         if (!bp->bnapi)
6168                 return 0;
6169
6170         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6171                 return 0;
6172
6173         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);
6174
6175         mutex_lock(&bp->hwrm_cmd_lock);
6176         for (i = 0; i < bp->cp_nr_rings; i++) {
6177                 struct bnxt_napi *bnapi = bp->bnapi[i];
6178                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6179
6180                 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
6181                         req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
6182
6183                         rc = _hwrm_send_message(bp, &req, sizeof(req),
6184                                                 HWRM_CMD_TIMEOUT);
6185                         if (rc)
6186                                 break;
6187
6188                         cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
6189                 }
6190         }
6191         mutex_unlock(&bp->hwrm_cmd_lock);
6192         return rc;
6193 }
6194
6195 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
6196 {
6197         int rc = 0, i;
6198         struct hwrm_stat_ctx_alloc_input req = {0};
6199         struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
6200
6201         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6202                 return 0;
6203
6204         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1);
6205
6206         req.stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size);
6207         req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
6208
6209         mutex_lock(&bp->hwrm_cmd_lock);
6210         for (i = 0; i < bp->cp_nr_rings; i++) {
6211                 struct bnxt_napi *bnapi = bp->bnapi[i];
6212                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6213
6214                 req.stats_dma_addr = cpu_to_le64(cpr->hw_stats_map);
6215
6216                 rc = _hwrm_send_message(bp, &req, sizeof(req),
6217                                         HWRM_CMD_TIMEOUT);
6218                 if (rc)
6219                         break;
6220
6221                 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
6222
6223                 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
6224         }
6225         mutex_unlock(&bp->hwrm_cmd_lock);
6226         return rc;
6227 }
6228
6229 static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
6230 {
6231         struct hwrm_func_qcfg_input req = {0};
6232         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
6233         u16 flags;
6234         int rc;
6235
6236         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
6237         req.fid = cpu_to_le16(0xffff);
6238         mutex_lock(&bp->hwrm_cmd_lock);
6239         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6240         if (rc)
6241                 goto func_qcfg_exit;
6242
6243 #ifdef CONFIG_BNXT_SRIOV
6244         if (BNXT_VF(bp)) {
6245                 struct bnxt_vf_info *vf = &bp->vf;
6246
6247                 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
6248         }
6249 #endif
6250         flags = le16_to_cpu(resp->flags);
6251         if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
6252                      FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
6253                 bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
6254                 if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
6255                         bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
6256         }
6257         if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
6258                 bp->flags |= BNXT_FLAG_MULTI_HOST;
6259
6260         switch (resp->port_partition_type) {
6261         case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
6262         case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
6263         case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
6264                 bp->port_partition_type = resp->port_partition_type;
6265                 break;
6266         }
6267         if (bp->hwrm_spec_code < 0x10707 ||
6268             resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB)
6269                 bp->br_mode = BRIDGE_MODE_VEB;
6270         else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA)
6271                 bp->br_mode = BRIDGE_MODE_VEPA;
6272         else
6273                 bp->br_mode = BRIDGE_MODE_UNDEF;
6274
6275         bp->max_mtu = le16_to_cpu(resp->max_mtu_configured);
6276         if (!bp->max_mtu)
6277                 bp->max_mtu = BNXT_MAX_MTU;
6278
6279 func_qcfg_exit:
6280         mutex_unlock(&bp->hwrm_cmd_lock);
6281         return rc;
6282 }
6283
6284 static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
6285 {
6286         struct hwrm_func_backing_store_qcaps_input req = {0};
6287         struct hwrm_func_backing_store_qcaps_output *resp =
6288                 bp->hwrm_cmd_resp_addr;
6289         int rc;
6290
6291         if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx)
6292                 return 0;
6293
6294         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_QCAPS, -1, -1);
6295         mutex_lock(&bp->hwrm_cmd_lock);
6296         rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6297         if (!rc) {
6298                 struct bnxt_ctx_pg_info *ctx_pg;
6299                 struct bnxt_ctx_mem_info *ctx;
6300                 int i;
6301
6302                 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
6303                 if (!ctx) {
6304                         rc = -ENOMEM;
6305                         goto ctx_err;
6306                 }
6307                 ctx_pg = kzalloc(sizeof(*ctx_pg) * (bp->max_q + 1), GFP_KERNEL);
6308                 if (!ctx_pg) {
6309                         kfree(ctx);
6310                         rc = -ENOMEM;
6311                         goto ctx_err;
6312                 }
6313                 for (i = 0; i < bp->max_q + 1; i++, ctx_pg++)
6314                         ctx->tqm_mem[i] = ctx_pg;
6315
6316                 bp->ctx = ctx;
6317                 ctx->qp_max_entries = le32_to_cpu(resp->qp_max_entries);
6318                 ctx->qp_min_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
6319                 ctx->qp_max_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
6320                 ctx->qp_entry_size = le16_to_cpu(resp->qp_entry_size);
6321                 ctx->srq_max_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
6322                 ctx->srq_max_entries = le32_to_cpu(resp->srq_max_entries);
6323                 ctx->srq_entry_size = le16_to_cpu(resp->srq_entry_size);
6324                 ctx->cq_max_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
6325                 ctx->cq_max_entries = le32_to_cpu(resp->cq_max_entries);
6326                 ctx->cq_entry_size = le16_to_cpu(resp->cq_entry_size);
6327                 ctx->vnic_max_vnic_entries =
6328                         le16_to_cpu(resp->vnic_max_vnic_entries);
6329                 ctx->vnic_max_ring_table_entries =
6330                         le16_to_cpu(resp->vnic_max_ring_table_entries);
6331                 ctx->vnic_entry_size = le16_to_cpu(resp->vnic_entry_size);
6332                 ctx->stat_max_entries = le32_to_cpu(resp->stat_max_entries);
6333                 ctx->stat_entry_size = le16_to_cpu(resp->stat_entry_size);
6334                 ctx->tqm_entry_size = le16_to_cpu(resp->tqm_entry_size);
6335                 ctx->tqm_min_entries_per_ring =
6336                         le32_to_cpu(resp->tqm_min_entries_per_ring);
6337                 ctx->tqm_max_entries_per_ring =
6338                         le32_to_cpu(resp->tqm_max_entries_per_ring);
6339                 ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
6340                 if (!ctx->tqm_entries_multiple)
6341                         ctx->tqm_entries_multiple = 1;
6342                 ctx->mrav_max_entries = le32_to_cpu(resp->mrav_max_entries);
6343                 ctx->mrav_entry_size = le16_to_cpu(resp->mrav_entry_size);
6344                 ctx->mrav_num_entries_units =
6345                         le16_to_cpu(resp->mrav_num_entries_units);
6346                 ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size);
6347                 ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries);
6348         } else {
6349                 rc = 0;
6350         }
6351 ctx_err:
6352         mutex_unlock(&bp->hwrm_cmd_lock);
6353         return rc;
6354 }
6355
6356 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
6357                                   __le64 *pg_dir)
6358 {
6359         u8 pg_size = 0;
6360
6361         if (BNXT_PAGE_SHIFT == 13)
6362                 pg_size = 1 << 4;
6363         else if (BNXT_PAGE_SIZE == 16)
6364                 pg_size = 2 << 4;
6365
6366         *pg_attr = pg_size;
6367         if (rmem->depth >= 1) {
6368                 if (rmem->depth == 2)
6369                         *pg_attr |= 2;
6370                 else
6371                         *pg_attr |= 1;
6372                 *pg_dir = cpu_to_le64(rmem->pg_tbl_map);
6373         } else {
6374                 *pg_dir = cpu_to_le64(rmem->dma_arr[0]);
6375         }
6376 }
6377
6378 #define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES                 \
6379         (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP |                \
6380          FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ |               \
6381          FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ |                \
6382          FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC |              \
6383          FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT)
6384
6385 static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
6386 {
6387         struct hwrm_func_backing_store_cfg_input req = {0};
6388         struct bnxt_ctx_mem_info *ctx = bp->ctx;
6389         struct bnxt_ctx_pg_info *ctx_pg;
6390         __le32 *num_entries;
6391         __le64 *pg_dir;
6392         u32 flags = 0;
6393         u8 *pg_attr;
6394         int i, rc;
6395         u32 ena;
6396
6397         if (!ctx)
6398                 return 0;
6399
6400         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_CFG, -1, -1);
6401         req.enables = cpu_to_le32(enables);
6402
6403         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) {
6404                 ctx_pg = &ctx->qp_mem;
6405                 req.qp_num_entries = cpu_to_le32(ctx_pg->entries);
6406                 req.qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries);
6407                 req.qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries);
6408                 req.qp_entry_size = cpu_to_le16(ctx->qp_entry_size);
6409                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6410                                       &req.qpc_pg_size_qpc_lvl,
6411                                       &req.qpc_page_dir);
6412         }
6413         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) {
6414                 ctx_pg = &ctx->srq_mem;
6415                 req.srq_num_entries = cpu_to_le32(ctx_pg->entries);
6416                 req.srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries);
6417                 req.srq_entry_size = cpu_to_le16(ctx->srq_entry_size);
6418                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6419                                       &req.srq_pg_size_srq_lvl,
6420                                       &req.srq_page_dir);
6421         }
6422         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) {
6423                 ctx_pg = &ctx->cq_mem;
6424                 req.cq_num_entries = cpu_to_le32(ctx_pg->entries);
6425                 req.cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries);
6426                 req.cq_entry_size = cpu_to_le16(ctx->cq_entry_size);
6427                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, &req.cq_pg_size_cq_lvl,
6428                                       &req.cq_page_dir);
6429         }
6430         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) {
6431                 ctx_pg = &ctx->vnic_mem;
6432                 req.vnic_num_vnic_entries =
6433                         cpu_to_le16(ctx->vnic_max_vnic_entries);
6434                 req.vnic_num_ring_table_entries =
6435                         cpu_to_le16(ctx->vnic_max_ring_table_entries);
6436                 req.vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size);
6437                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6438                                       &req.vnic_pg_size_vnic_lvl,
6439                                       &req.vnic_page_dir);
6440         }
6441         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) {
6442                 ctx_pg = &ctx->stat_mem;
6443                 req.stat_num_entries = cpu_to_le32(ctx->stat_max_entries);
6444                 req.stat_entry_size = cpu_to_le16(ctx->stat_entry_size);
6445                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6446                                       &req.stat_pg_size_stat_lvl,
6447                                       &req.stat_page_dir);
6448         }
6449         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) {
6450                 ctx_pg = &ctx->mrav_mem;
6451                 req.mrav_num_entries = cpu_to_le32(ctx_pg->entries);
6452                 if (ctx->mrav_num_entries_units)
6453                         flags |=
6454                         FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT;
6455                 req.mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size);
6456                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6457                                       &req.mrav_pg_size_mrav_lvl,
6458                                       &req.mrav_page_dir);
6459         }
6460         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) {
6461                 ctx_pg = &ctx->tim_mem;
6462                 req.tim_num_entries = cpu_to_le32(ctx_pg->entries);
6463                 req.tim_entry_size = cpu_to_le16(ctx->tim_entry_size);
6464                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6465                                       &req.tim_pg_size_tim_lvl,
6466                                       &req.tim_page_dir);
6467         }
6468         for (i = 0, num_entries = &req.tqm_sp_num_entries,
6469              pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl,
6470              pg_dir = &req.tqm_sp_page_dir,
6471              ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP;
6472              i < 9; i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
6473                 if (!(enables & ena))
6474                         continue;
6475
6476                 req.tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size);
6477                 ctx_pg = ctx->tqm_mem[i];
6478                 *num_entries = cpu_to_le32(ctx_pg->entries);
6479                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
6480         }
6481         req.flags = cpu_to_le32(flags);
6482         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6483         if (rc)
6484                 rc = -EIO;
6485         return rc;
6486 }
6487
6488 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
6489                                   struct bnxt_ctx_pg_info *ctx_pg)
6490 {
6491         struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
6492
6493         rmem->page_size = BNXT_PAGE_SIZE;
6494         rmem->pg_arr = ctx_pg->ctx_pg_arr;
6495         rmem->dma_arr = ctx_pg->ctx_dma_arr;
6496         rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
6497         if (rmem->depth >= 1)
6498                 rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG;
6499         return bnxt_alloc_ring(bp, rmem);
6500 }
6501
6502 static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
6503                                   struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
6504                                   u8 depth)
6505 {
6506         struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
6507         int rc;
6508
6509         if (!mem_size)
6510                 return 0;
6511
6512         ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
6513         if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
6514                 ctx_pg->nr_pages = 0;
6515                 return -EINVAL;
6516         }
6517         if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) {
6518                 int nr_tbls, i;
6519
6520                 rmem->depth = 2;
6521                 ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg),
6522                                              GFP_KERNEL);
6523                 if (!ctx_pg->ctx_pg_tbl)
6524                         return -ENOMEM;
6525                 nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
6526                 rmem->nr_pages = nr_tbls;
6527                 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
6528                 if (rc)
6529                         return rc;
6530                 for (i = 0; i < nr_tbls; i++) {
6531                         struct bnxt_ctx_pg_info *pg_tbl;
6532
6533                         pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL);
6534                         if (!pg_tbl)
6535                                 return -ENOMEM;
6536                         ctx_pg->ctx_pg_tbl[i] = pg_tbl;
6537                         rmem = &pg_tbl->ring_mem;
6538                         rmem->pg_tbl = ctx_pg->ctx_pg_arr[i];
6539                         rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
6540                         rmem->depth = 1;
6541                         rmem->nr_pages = MAX_CTX_PAGES;
6542                         if (i == (nr_tbls - 1)) {
6543                                 int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
6544
6545                                 if (rem)
6546                                         rmem->nr_pages = rem;
6547                         }
6548                         rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl);
6549                         if (rc)
6550                                 break;
6551                 }
6552         } else {
6553                 rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
6554                 if (rmem->nr_pages > 1 || depth)
6555                         rmem->depth = 1;
6556                 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
6557         }
6558         return rc;
6559 }
6560
6561 static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
6562                                   struct bnxt_ctx_pg_info *ctx_pg)
6563 {
6564         struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
6565
6566         if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES ||
6567             ctx_pg->ctx_pg_tbl) {
6568                 int i, nr_tbls = rmem->nr_pages;
6569
6570                 for (i = 0; i < nr_tbls; i++) {
6571                         struct bnxt_ctx_pg_info *pg_tbl;
6572                         struct bnxt_ring_mem_info *rmem2;
6573
6574                         pg_tbl = ctx_pg->ctx_pg_tbl[i];
6575                         if (!pg_tbl)
6576                                 continue;
6577                         rmem2 = &pg_tbl->ring_mem;
6578                         bnxt_free_ring(bp, rmem2);
6579                         ctx_pg->ctx_pg_arr[i] = NULL;
6580                         kfree(pg_tbl);
6581                         ctx_pg->ctx_pg_tbl[i] = NULL;
6582                 }
6583                 kfree(ctx_pg->ctx_pg_tbl);
6584                 ctx_pg->ctx_pg_tbl = NULL;
6585         }
6586         bnxt_free_ring(bp, rmem);
6587         ctx_pg->nr_pages = 0;
6588 }
6589
6590 static void bnxt_free_ctx_mem(struct bnxt *bp)
6591 {
6592         struct bnxt_ctx_mem_info *ctx = bp->ctx;
6593         int i;
6594
6595         if (!ctx)
6596                 return;
6597
6598         if (ctx->tqm_mem[0]) {
6599                 for (i = 0; i < bp->max_q + 1; i++)
6600                         bnxt_free_ctx_pg_tbls(bp, ctx->tqm_mem[i]);
6601                 kfree(ctx->tqm_mem[0]);
6602                 ctx->tqm_mem[0] = NULL;
6603         }
6604
6605         bnxt_free_ctx_pg_tbls(bp, &ctx->tim_mem);
6606         bnxt_free_ctx_pg_tbls(bp, &ctx->mrav_mem);
6607         bnxt_free_ctx_pg_tbls(bp, &ctx->stat_mem);
6608         bnxt_free_ctx_pg_tbls(bp, &ctx->vnic_mem);
6609         bnxt_free_ctx_pg_tbls(bp, &ctx->cq_mem);
6610         bnxt_free_ctx_pg_tbls(bp, &ctx->srq_mem);
6611         bnxt_free_ctx_pg_tbls(bp, &ctx->qp_mem);
6612         ctx->flags &= ~BNXT_CTX_FLAG_INITED;
6613 }
6614
6615 static int bnxt_alloc_ctx_mem(struct bnxt *bp)
6616 {
6617         struct bnxt_ctx_pg_info *ctx_pg;
6618         struct bnxt_ctx_mem_info *ctx;
6619         u32 mem_size, ena, entries;
6620         u32 num_mr, num_ah;
6621         u32 extra_srqs = 0;
6622         u32 extra_qps = 0;
6623         u8 pg_lvl = 1;
6624         int i, rc;
6625
6626         rc = bnxt_hwrm_func_backing_store_qcaps(bp);
6627         if (rc) {
6628                 netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n",
6629                            rc);
6630                 return rc;
6631         }
6632         ctx = bp->ctx;
6633         if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
6634                 return 0;
6635
6636         if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
6637                 pg_lvl = 2;
6638                 extra_qps = 65536;
6639                 extra_srqs = 8192;
6640         }
6641
6642         ctx_pg = &ctx->qp_mem;
6643         ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries +
6644                           extra_qps;
6645         mem_size = ctx->qp_entry_size * ctx_pg->entries;
6646         rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl);
6647         if (rc)
6648                 return rc;
6649
6650         ctx_pg = &ctx->srq_mem;
6651         ctx_pg->entries = ctx->srq_max_l2_entries + extra_srqs;
6652         mem_size = ctx->srq_entry_size * ctx_pg->entries;
6653         rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl);
6654         if (rc)
6655                 return rc;
6656
6657         ctx_pg = &ctx->cq_mem;
6658         ctx_pg->entries = ctx->cq_max_l2_entries + extra_qps * 2;
6659         mem_size = ctx->cq_entry_size * ctx_pg->entries;
6660         rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl);
6661         if (rc)
6662                 return rc;
6663
6664         ctx_pg = &ctx->vnic_mem;
6665         ctx_pg->entries = ctx->vnic_max_vnic_entries +
6666                           ctx->vnic_max_ring_table_entries;
6667         mem_size = ctx->vnic_entry_size * ctx_pg->entries;
6668         rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1);
6669         if (rc)
6670                 return rc;
6671
6672         ctx_pg = &ctx->stat_mem;
6673         ctx_pg->entries = ctx->stat_max_entries;
6674         mem_size = ctx->stat_entry_size * ctx_pg->entries;
6675         rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1);
6676         if (rc)
6677                 return rc;
6678
6679         ena = 0;
6680         if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
6681                 goto skip_rdma;
6682
6683         ctx_pg = &ctx->mrav_mem;
6684         /* 128K extra is needed to accommodate static AH context
6685          * allocation by f/w.
6686          */
6687         num_mr = 1024 * 256;
6688         num_ah = 1024 * 128;
6689         ctx_pg->entries = num_mr + num_ah;
6690         mem_size = ctx->mrav_entry_size * ctx_pg->entries;
6691         rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2);
6692         if (rc)
6693                 return rc;
6694         ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
6695         if (ctx->mrav_num_entries_units)
6696                 ctx_pg->entries =
6697                         ((num_mr / ctx->mrav_num_entries_units) << 16) |
6698                          (num_ah / ctx->mrav_num_entries_units);
6699
6700         ctx_pg = &ctx->tim_mem;
6701         ctx_pg->entries = ctx->qp_mem.entries;
6702         mem_size = ctx->tim_entry_size * ctx_pg->entries;
6703         rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1);
6704         if (rc)
6705                 return rc;
6706         ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
6707
6708 skip_rdma:
6709         entries = ctx->qp_max_l2_entries + extra_qps;
6710         entries = roundup(entries, ctx->tqm_entries_multiple);
6711         entries = clamp_t(u32, entries, ctx->tqm_min_entries_per_ring,
6712                           ctx->tqm_max_entries_per_ring);
6713         for (i = 0; i < bp->max_q + 1; i++) {
6714                 ctx_pg = ctx->tqm_mem[i];
6715                 ctx_pg->entries = entries;
6716                 mem_size = ctx->tqm_entry_size * entries;
6717                 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1);
6718                 if (rc)
6719                         return rc;
6720                 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
6721         }
6722         ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
6723         rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
6724         if (rc)
6725                 netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
6726                            rc);
6727         else
6728                 ctx->flags |= BNXT_CTX_FLAG_INITED;
6729
6730         return 0;
6731 }
6732
6733 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
6734 {
6735         struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
6736         struct hwrm_func_resource_qcaps_input req = {0};
6737         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6738         int rc;
6739
6740         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESOURCE_QCAPS, -1, -1);
6741         req.fid = cpu_to_le16(0xffff);
6742
6743         mutex_lock(&bp->hwrm_cmd_lock);
6744         rc = _hwrm_send_message_silent(bp, &req, sizeof(req),
6745                                        HWRM_CMD_TIMEOUT);
6746         if (rc) {
6747                 rc = -EIO;
6748                 goto hwrm_func_resc_qcaps_exit;
6749         }
6750
6751         hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs);
6752         if (!all)
6753                 goto hwrm_func_resc_qcaps_exit;
6754
6755         hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
6756         hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
6757         hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
6758         hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
6759         hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
6760         hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
6761         hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
6762         hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
6763         hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
6764         hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
6765         hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
6766         hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
6767         hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
6768         hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
6769         hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
6770         hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
6771
6772         if (bp->flags & BNXT_FLAG_CHIP_P5) {
6773                 u16 max_msix = le16_to_cpu(resp->max_msix);
6774
6775                 hw_resc->max_nqs = max_msix;
6776                 hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
6777         }
6778
6779         if (BNXT_PF(bp)) {
6780                 struct bnxt_pf_info *pf = &bp->pf;
6781
6782                 pf->vf_resv_strategy =
6783                         le16_to_cpu(resp->vf_reservation_strategy);
6784                 if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC)
6785                         pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
6786         }
6787 hwrm_func_resc_qcaps_exit:
6788         mutex_unlock(&bp->hwrm_cmd_lock);
6789         return rc;
6790 }
6791
6792 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
6793 {
6794         int rc = 0;
6795         struct hwrm_func_qcaps_input req = {0};
6796         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
6797         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6798         u32 flags;
6799
6800         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
6801         req.fid = cpu_to_le16(0xffff);
6802
6803         mutex_lock(&bp->hwrm_cmd_lock);
6804         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6805         if (rc)
6806                 goto hwrm_func_qcaps_exit;
6807
6808         flags = le32_to_cpu(resp->flags);
6809         if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
6810                 bp->flags |= BNXT_FLAG_ROCEV1_CAP;
6811         if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
6812                 bp->flags |= BNXT_FLAG_ROCEV2_CAP;
6813         if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED)
6814                 bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED;
6815         if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED)
6816                 bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED;
6817
6818         bp->tx_push_thresh = 0;
6819         if (flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED)
6820                 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
6821
6822         hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
6823         hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
6824         hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
6825         hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
6826         hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
6827         if (!hw_resc->max_hw_ring_grps)
6828                 hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings;
6829         hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
6830         hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
6831         hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
6832
6833         if (BNXT_PF(bp)) {
6834                 struct bnxt_pf_info *pf = &bp->pf;
6835
6836                 pf->fw_fid = le16_to_cpu(resp->fid);
6837                 pf->port_id = le16_to_cpu(resp->port_id);
6838                 bp->dev->dev_port = pf->port_id;
6839                 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
6840                 pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
6841                 pf->max_vfs = le16_to_cpu(resp->max_vfs);
6842                 pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
6843                 pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
6844                 pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
6845                 pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
6846                 pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
6847                 pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
6848                 if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
6849                         bp->flags |= BNXT_FLAG_WOL_CAP;
6850         } else {
6851 #ifdef CONFIG_BNXT_SRIOV
6852                 struct bnxt_vf_info *vf = &bp->vf;
6853
6854                 vf->fw_fid = le16_to_cpu(resp->fid);
6855                 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
6856 #endif
6857         }
6858
6859 hwrm_func_qcaps_exit:
6860         mutex_unlock(&bp->hwrm_cmd_lock);
6861         return rc;
6862 }
6863
6864 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
6865
6866 static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
6867 {
6868         int rc;
6869
6870         rc = __bnxt_hwrm_func_qcaps(bp);
6871         if (rc)
6872                 return rc;
6873         rc = bnxt_hwrm_queue_qportcfg(bp);
6874         if (rc) {
6875                 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc);
6876                 return rc;
6877         }
6878         if (bp->hwrm_spec_code >= 0x10803) {
6879                 rc = bnxt_alloc_ctx_mem(bp);
6880                 if (rc)
6881                         return rc;
6882                 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
6883                 if (!rc)
6884                         bp->fw_cap |= BNXT_FW_CAP_NEW_RM;
6885         }
6886         return 0;
6887 }
6888
6889 static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp)
6890 {
6891         struct hwrm_cfa_adv_flow_mgnt_qcaps_input req = {0};
6892         struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp;
6893         int rc = 0;
6894         u32 flags;
6895
6896         if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW))
6897                 return 0;
6898
6899         resp = bp->hwrm_cmd_resp_addr;
6900         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS, -1, -1);
6901
6902         mutex_lock(&bp->hwrm_cmd_lock);
6903         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6904         if (rc)
6905                 goto hwrm_cfa_adv_qcaps_exit;
6906
6907         flags = le32_to_cpu(resp->flags);
6908         if (flags &
6909             CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_SUPPORTED)
6910                 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX;
6911
6912 hwrm_cfa_adv_qcaps_exit:
6913         mutex_unlock(&bp->hwrm_cmd_lock);
6914         return rc;
6915 }
6916
6917 static int bnxt_hwrm_func_reset(struct bnxt *bp)
6918 {
6919         struct hwrm_func_reset_input req = {0};
6920
6921         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1);
6922         req.enables = 0;
6923
6924         return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT);
6925 }
6926
6927 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
6928 {
6929         int rc = 0;
6930         struct hwrm_queue_qportcfg_input req = {0};
6931         struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
6932         u8 i, j, *qptr;
6933         bool no_rdma;
6934
6935         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1);
6936
6937         mutex_lock(&bp->hwrm_cmd_lock);
6938         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6939         if (rc)
6940                 goto qportcfg_exit;
6941
6942         if (!resp->max_configurable_queues) {
6943                 rc = -EINVAL;
6944                 goto qportcfg_exit;
6945         }
6946         bp->max_tc = resp->max_configurable_queues;
6947         bp->max_lltc = resp->max_configurable_lossless_queues;
6948         if (bp->max_tc > BNXT_MAX_QUEUE)
6949                 bp->max_tc = BNXT_MAX_QUEUE;
6950
6951         no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP);
6952         qptr = &resp->queue_id0;
6953         for (i = 0, j = 0; i < bp->max_tc; i++) {
6954                 bp->q_info[j].queue_id = *qptr;
6955                 bp->q_ids[i] = *qptr++;
6956                 bp->q_info[j].queue_profile = *qptr++;
6957                 bp->tc_to_qidx[j] = j;
6958                 if (!BNXT_CNPQ(bp->q_info[j].queue_profile) ||
6959                     (no_rdma && BNXT_PF(bp)))
6960                         j++;
6961         }
6962         bp->max_q = bp->max_tc;
6963         bp->max_tc = max_t(u8, j, 1);
6964
6965         if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
6966                 bp->max_tc = 1;
6967
6968         if (bp->max_lltc > bp->max_tc)
6969                 bp->max_lltc = bp->max_tc;
6970
6971 qportcfg_exit:
6972         mutex_unlock(&bp->hwrm_cmd_lock);
6973         return rc;
6974 }
6975
6976 static int bnxt_hwrm_ver_get(struct bnxt *bp)
6977 {
6978         int rc;
6979         struct hwrm_ver_get_input req = {0};
6980         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
6981         u32 dev_caps_cfg;
6982
6983         bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
6984         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
6985         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
6986         req.hwrm_intf_min = HWRM_VERSION_MINOR;
6987         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
6988         mutex_lock(&bp->hwrm_cmd_lock);
6989         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6990         if (rc)
6991                 goto hwrm_ver_get_exit;
6992
6993         memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
6994
6995         bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
6996                              resp->hwrm_intf_min_8b << 8 |
6997                              resp->hwrm_intf_upd_8b;
6998         if (resp->hwrm_intf_maj_8b < 1) {
6999                 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
7000                             resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
7001                             resp->hwrm_intf_upd_8b);
7002                 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
7003         }
7004         snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d.%d",
7005                  resp->hwrm_fw_maj_8b, resp->hwrm_fw_min_8b,
7006                  resp->hwrm_fw_bld_8b, resp->hwrm_fw_rsvd_8b);
7007
7008         if (strlen(resp->active_pkg_name)) {
7009                 int fw_ver_len = strlen(bp->fw_ver_str);
7010
7011                 snprintf(bp->fw_ver_str + fw_ver_len,
7012                          FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s",
7013                          resp->active_pkg_name);
7014                 bp->fw_cap |= BNXT_FW_CAP_PKG_VER;
7015         }
7016
7017         bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
7018         if (!bp->hwrm_cmd_timeout)
7019                 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
7020
7021         if (resp->hwrm_intf_maj_8b >= 1) {
7022                 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
7023                 bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len);
7024         }
7025         if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
7026                 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
7027
7028         bp->chip_num = le16_to_cpu(resp->chip_num);
7029         if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
7030             !resp->chip_metal)
7031                 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
7032
7033         dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
7034         if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
7035             (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
7036                 bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD;
7037
7038         if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED)
7039                 bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL;
7040
7041         if (dev_caps_cfg &
7042             VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED)
7043                 bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE;
7044
7045         if (dev_caps_cfg &
7046             VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
7047                 bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF;
7048
7049         if (dev_caps_cfg &
7050             VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED)
7051                 bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW;
7052
7053 hwrm_ver_get_exit:
7054         mutex_unlock(&bp->hwrm_cmd_lock);
7055         return rc;
7056 }
7057
7058 int bnxt_hwrm_fw_set_time(struct bnxt *bp)
7059 {
7060         struct hwrm_fw_set_time_input req = {0};
7061         struct tm tm;
7062         time64_t now = ktime_get_real_seconds();
7063
7064         if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) ||
7065             bp->hwrm_spec_code < 0x10400)
7066                 return -EOPNOTSUPP;
7067
7068         time64_to_tm(now, 0, &tm);
7069         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_SET_TIME, -1, -1);
7070         req.year = cpu_to_le16(1900 + tm.tm_year);
7071         req.month = 1 + tm.tm_mon;
7072         req.day = tm.tm_mday;
7073         req.hour = tm.tm_hour;
7074         req.minute = tm.tm_min;
7075         req.second = tm.tm_sec;
7076         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7077 }
7078
7079 static int bnxt_hwrm_port_qstats(struct bnxt *bp)
7080 {
7081         int rc;
7082         struct bnxt_pf_info *pf = &bp->pf;
7083         struct hwrm_port_qstats_input req = {0};
7084
7085         if (!(bp->flags & BNXT_FLAG_PORT_STATS))
7086                 return 0;
7087
7088         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1);
7089         req.port_id = cpu_to_le16(pf->port_id);
7090         req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_map);
7091         req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_map);
7092         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7093         return rc;
7094 }
7095
7096 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp)
7097 {
7098         struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
7099         struct hwrm_queue_pri2cos_qcfg_input req2 = {0};
7100         struct hwrm_port_qstats_ext_input req = {0};
7101         struct bnxt_pf_info *pf = &bp->pf;
7102         u32 tx_stat_size;
7103         int rc;
7104
7105         if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
7106                 return 0;
7107
7108         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS_EXT, -1, -1);
7109         req.port_id = cpu_to_le16(pf->port_id);
7110         req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
7111         req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_ext_map);
7112         tx_stat_size = bp->hw_tx_port_stats_ext ?
7113                        sizeof(*bp->hw_tx_port_stats_ext) : 0;
7114         req.tx_stat_size = cpu_to_le16(tx_stat_size);
7115         req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_ext_map);
7116         mutex_lock(&bp->hwrm_cmd_lock);
7117         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7118         if (!rc) {
7119                 bp->fw_rx_stats_ext_size = le16_to_cpu(resp->rx_stat_size) / 8;
7120                 bp->fw_tx_stats_ext_size = tx_stat_size ?
7121                         le16_to_cpu(resp->tx_stat_size) / 8 : 0;
7122         } else {
7123                 bp->fw_rx_stats_ext_size = 0;
7124                 bp->fw_tx_stats_ext_size = 0;
7125         }
7126         if (bp->fw_tx_stats_ext_size <=
7127             offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) {
7128                 mutex_unlock(&bp->hwrm_cmd_lock);
7129                 bp->pri2cos_valid = 0;
7130                 return rc;
7131         }
7132
7133         bnxt_hwrm_cmd_hdr_init(bp, &req2, HWRM_QUEUE_PRI2COS_QCFG, -1, -1);
7134         req2.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
7135
7136         rc = _hwrm_send_message(bp, &req2, sizeof(req2), HWRM_CMD_TIMEOUT);
7137         if (!rc) {
7138                 struct hwrm_queue_pri2cos_qcfg_output *resp2;
7139                 u8 *pri2cos;
7140                 int i, j;
7141
7142                 resp2 = bp->hwrm_cmd_resp_addr;
7143                 pri2cos = &resp2->pri0_cos_queue_id;
7144                 for (i = 0; i < 8; i++) {
7145                         u8 queue_id = pri2cos[i];
7146
7147                         for (j = 0; j < bp->max_q; j++) {
7148                                 if (bp->q_ids[j] == queue_id)
7149                                         bp->pri2cos[i] = j;
7150                         }
7151                 }
7152                 bp->pri2cos_valid = 1;
7153         }
7154         mutex_unlock(&bp->hwrm_cmd_lock);
7155         return rc;
7156 }
7157
7158 static int bnxt_hwrm_pcie_qstats(struct bnxt *bp)
7159 {
7160         struct hwrm_pcie_qstats_input req = {0};
7161
7162         if (!(bp->flags & BNXT_FLAG_PCIE_STATS))
7163                 return 0;
7164
7165         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PCIE_QSTATS, -1, -1);
7166         req.pcie_stat_size = cpu_to_le16(sizeof(struct pcie_ctx_hw_stats));
7167         req.pcie_stat_host_addr = cpu_to_le64(bp->hw_pcie_stats_map);
7168         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7169 }
7170
7171 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
7172 {
7173         if (bp->vxlan_port_cnt) {
7174                 bnxt_hwrm_tunnel_dst_port_free(
7175                         bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
7176         }
7177         bp->vxlan_port_cnt = 0;
7178         if (bp->nge_port_cnt) {
7179                 bnxt_hwrm_tunnel_dst_port_free(
7180                         bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
7181         }
7182         bp->nge_port_cnt = 0;
7183 }
7184
7185 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
7186 {
7187         int rc, i;
7188         u32 tpa_flags = 0;
7189
7190         if (set_tpa)
7191                 tpa_flags = bp->flags & BNXT_FLAG_TPA;
7192         for (i = 0; i < bp->nr_vnics; i++) {
7193                 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
7194                 if (rc) {
7195                         netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
7196                                    i, rc);
7197                         return rc;
7198                 }
7199         }
7200         return 0;
7201 }
7202
7203 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
7204 {
7205         int i;
7206
7207         for (i = 0; i < bp->nr_vnics; i++)
7208                 bnxt_hwrm_vnic_set_rss(bp, i, false);
7209 }
7210
7211 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
7212                                     bool irq_re_init)
7213 {
7214         if (bp->vnic_info) {
7215                 bnxt_hwrm_clear_vnic_filter(bp);
7216                 /* clear all RSS setting before free vnic ctx */
7217                 bnxt_hwrm_clear_vnic_rss(bp);
7218                 bnxt_hwrm_vnic_ctx_free(bp);
7219                 /* before free the vnic, undo the vnic tpa settings */
7220                 if (bp->flags & BNXT_FLAG_TPA)
7221                         bnxt_set_tpa(bp, false);
7222                 bnxt_hwrm_vnic_free(bp);
7223         }
7224         bnxt_hwrm_ring_free(bp, close_path);
7225         bnxt_hwrm_ring_grp_free(bp);
7226         if (irq_re_init) {
7227                 bnxt_hwrm_stat_ctx_free(bp);
7228                 bnxt_hwrm_free_tunnel_ports(bp);
7229         }
7230 }
7231
7232 static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
7233 {
7234         struct hwrm_func_cfg_input req = {0};
7235         int rc;
7236
7237         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
7238         req.fid = cpu_to_le16(0xffff);
7239         req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE);
7240         if (br_mode == BRIDGE_MODE_VEB)
7241                 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB;
7242         else if (br_mode == BRIDGE_MODE_VEPA)
7243                 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
7244         else
7245                 return -EINVAL;
7246         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7247         if (rc)
7248                 rc = -EIO;
7249         return rc;
7250 }
7251
7252 static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
7253 {
7254         struct hwrm_func_cfg_input req = {0};
7255         int rc;
7256
7257         if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
7258                 return 0;
7259
7260         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
7261         req.fid = cpu_to_le16(0xffff);
7262         req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
7263         req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64;
7264         if (size == 128)
7265                 req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
7266
7267         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7268         if (rc)
7269                 rc = -EIO;
7270         return rc;
7271 }
7272
7273 static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
7274 {
7275         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
7276         int rc;
7277
7278         if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
7279                 goto skip_rss_ctx;
7280
7281         /* allocate context for vnic */
7282         rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
7283         if (rc) {
7284                 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
7285                            vnic_id, rc);
7286                 goto vnic_setup_err;
7287         }
7288         bp->rsscos_nr_ctxs++;
7289
7290         if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
7291                 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1);
7292                 if (rc) {
7293                         netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
7294                                    vnic_id, rc);
7295                         goto vnic_setup_err;
7296                 }
7297                 bp->rsscos_nr_ctxs++;
7298         }
7299
7300 skip_rss_ctx:
7301         /* configure default vnic, ring grp */
7302         rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
7303         if (rc) {
7304                 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
7305                            vnic_id, rc);
7306                 goto vnic_setup_err;
7307         }
7308
7309         /* Enable RSS hashing on vnic */
7310         rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
7311         if (rc) {
7312                 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
7313                            vnic_id, rc);
7314                 goto vnic_setup_err;
7315         }
7316
7317         if (bp->flags & BNXT_FLAG_AGG_RINGS) {
7318                 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
7319                 if (rc) {
7320                         netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
7321                                    vnic_id, rc);
7322                 }
7323         }
7324
7325 vnic_setup_err:
7326         return rc;
7327 }
7328
7329 static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id)
7330 {
7331         int rc, i, nr_ctxs;
7332
7333         nr_ctxs = DIV_ROUND_UP(bp->rx_nr_rings, 64);
7334         for (i = 0; i < nr_ctxs; i++) {
7335                 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i);
7336                 if (rc) {
7337                         netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n",
7338                                    vnic_id, i, rc);
7339                         break;
7340                 }
7341                 bp->rsscos_nr_ctxs++;
7342         }
7343         if (i < nr_ctxs)
7344                 return -ENOMEM;
7345
7346         rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic_id, true);
7347         if (rc) {
7348                 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
7349                            vnic_id, rc);
7350                 return rc;
7351         }
7352         rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
7353         if (rc) {
7354                 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
7355                            vnic_id, rc);
7356                 return rc;
7357         }
7358         if (bp->flags & BNXT_FLAG_AGG_RINGS) {
7359                 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
7360                 if (rc) {
7361                         netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
7362                                    vnic_id, rc);
7363                 }
7364         }
7365         return rc;
7366 }
7367
7368 static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
7369 {
7370         if (bp->flags & BNXT_FLAG_CHIP_P5)
7371                 return __bnxt_setup_vnic_p5(bp, vnic_id);
7372         else
7373                 return __bnxt_setup_vnic(bp, vnic_id);
7374 }
7375
7376 static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
7377 {
7378 #ifdef CONFIG_RFS_ACCEL
7379         int i, rc = 0;
7380
7381         if (bp->flags & BNXT_FLAG_CHIP_P5)
7382                 return 0;
7383
7384         for (i = 0; i < bp->rx_nr_rings; i++) {
7385                 struct bnxt_vnic_info *vnic;
7386                 u16 vnic_id = i + 1;
7387                 u16 ring_id = i;
7388
7389                 if (vnic_id >= bp->nr_vnics)
7390                         break;
7391
7392                 vnic = &bp->vnic_info[vnic_id];
7393                 vnic->flags |= BNXT_VNIC_RFS_FLAG;
7394                 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
7395                         vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
7396                 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
7397                 if (rc) {
7398                         netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
7399                                    vnic_id, rc);
7400                         break;
7401                 }
7402                 rc = bnxt_setup_vnic(bp, vnic_id);
7403                 if (rc)
7404                         break;
7405         }
7406         return rc;
7407 #else
7408         return 0;
7409 #endif
7410 }
7411
7412 /* Allow PF and VF with default VLAN to be in promiscuous mode */
7413 static bool bnxt_promisc_ok(struct bnxt *bp)
7414 {
7415 #ifdef CONFIG_BNXT_SRIOV
7416         if (BNXT_VF(bp) && !bp->vf.vlan)
7417                 return false;
7418 #endif
7419         return true;
7420 }
7421
7422 static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
7423 {
7424         unsigned int rc = 0;
7425
7426         rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1);
7427         if (rc) {
7428                 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
7429                            rc);
7430                 return rc;
7431         }
7432
7433         rc = bnxt_hwrm_vnic_cfg(bp, 1);
7434         if (rc) {
7435                 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
7436                            rc);
7437                 return rc;
7438         }
7439         return rc;
7440 }
7441
7442 static int bnxt_cfg_rx_mode(struct bnxt *);
7443 static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
7444
7445 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
7446 {
7447         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
7448         int rc = 0;
7449         unsigned int rx_nr_rings = bp->rx_nr_rings;
7450
7451         if (irq_re_init) {
7452                 rc = bnxt_hwrm_stat_ctx_alloc(bp);
7453                 if (rc) {
7454                         netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
7455                                    rc);
7456                         goto err_out;
7457                 }
7458         }
7459
7460         rc = bnxt_hwrm_ring_alloc(bp);
7461         if (rc) {
7462                 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
7463                 goto err_out;
7464         }
7465
7466         rc = bnxt_hwrm_ring_grp_alloc(bp);
7467         if (rc) {
7468                 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
7469                 goto err_out;
7470         }
7471
7472         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
7473                 rx_nr_rings--;
7474
7475         /* default vnic 0 */
7476         rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings);
7477         if (rc) {
7478                 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
7479                 goto err_out;
7480         }
7481
7482         rc = bnxt_setup_vnic(bp, 0);
7483         if (rc)
7484                 goto err_out;
7485
7486         if (bp->flags & BNXT_FLAG_RFS) {
7487                 rc = bnxt_alloc_rfs_vnics(bp);
7488                 if (rc)
7489                         goto err_out;
7490         }
7491
7492         if (bp->flags & BNXT_FLAG_TPA) {
7493                 rc = bnxt_set_tpa(bp, true);
7494                 if (rc)
7495                         goto err_out;
7496         }
7497
7498         if (BNXT_VF(bp))
7499                 bnxt_update_vf_mac(bp);
7500
7501         /* Filter for default vnic 0 */
7502         rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
7503         if (rc) {
7504                 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
7505                 goto err_out;
7506         }
7507         vnic->uc_filter_count = 1;
7508
7509         vnic->rx_mask = 0;
7510         if (bp->dev->flags & IFF_BROADCAST)
7511                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
7512
7513         if ((bp->dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
7514                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
7515
7516         if (bp->dev->flags & IFF_ALLMULTI) {
7517                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
7518                 vnic->mc_list_count = 0;
7519         } else {
7520                 u32 mask = 0;
7521
7522                 bnxt_mc_list_updated(bp, &mask);
7523                 vnic->rx_mask |= mask;
7524         }
7525
7526         rc = bnxt_cfg_rx_mode(bp);
7527         if (rc)
7528                 goto err_out;
7529
7530         rc = bnxt_hwrm_set_coal(bp);
7531         if (rc)
7532                 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
7533                                 rc);
7534
7535         if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
7536                 rc = bnxt_setup_nitroa0_vnic(bp);
7537                 if (rc)
7538                         netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
7539                                    rc);
7540         }
7541
7542         if (BNXT_VF(bp)) {
7543                 bnxt_hwrm_func_qcfg(bp);
7544                 netdev_update_features(bp->dev);
7545         }
7546
7547         return 0;
7548
7549 err_out:
7550         bnxt_hwrm_resource_free(bp, 0, true);
7551
7552         return rc;
7553 }
7554
7555 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
7556 {
7557         bnxt_hwrm_resource_free(bp, 1, irq_re_init);
7558         return 0;
7559 }
7560
7561 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
7562 {
7563         bnxt_init_cp_rings(bp);
7564         bnxt_init_rx_rings(bp);
7565         bnxt_init_tx_rings(bp);
7566         bnxt_init_ring_grps(bp, irq_re_init);
7567         bnxt_init_vnics(bp);
7568
7569         return bnxt_init_chip(bp, irq_re_init);
7570 }
7571
7572 static int bnxt_set_real_num_queues(struct bnxt *bp)
7573 {
7574         int rc;
7575         struct net_device *dev = bp->dev;
7576
7577         rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
7578                                           bp->tx_nr_rings_xdp);
7579         if (rc)
7580                 return rc;
7581
7582         rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
7583         if (rc)
7584                 return rc;
7585
7586 #ifdef CONFIG_RFS_ACCEL
7587         if (bp->flags & BNXT_FLAG_RFS)
7588                 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
7589 #endif
7590
7591         return rc;
7592 }
7593
7594 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
7595                            bool shared)
7596 {
7597         int _rx = *rx, _tx = *tx;
7598
7599         if (shared) {
7600                 *rx = min_t(int, _rx, max);
7601                 *tx = min_t(int, _tx, max);
7602         } else {
7603                 if (max < 2)
7604                         return -ENOMEM;
7605
7606                 while (_rx + _tx > max) {
7607                         if (_rx > _tx && _rx > 1)
7608                                 _rx--;
7609                         else if (_tx > 1)
7610                                 _tx--;
7611                 }
7612                 *rx = _rx;
7613                 *tx = _tx;
7614         }
7615         return 0;
7616 }
7617
7618 static void bnxt_setup_msix(struct bnxt *bp)
7619 {
7620         const int len = sizeof(bp->irq_tbl[0].name);
7621         struct net_device *dev = bp->dev;
7622         int tcs, i;
7623
7624         tcs = netdev_get_num_tc(dev);
7625         if (tcs > 1) {
7626                 int i, off, count;
7627
7628                 for (i = 0; i < tcs; i++) {
7629                         count = bp->tx_nr_rings_per_tc;
7630                         off = i * count;
7631                         netdev_set_tc_queue(dev, i, count, off);
7632                 }
7633         }
7634
7635         for (i = 0; i < bp->cp_nr_rings; i++) {
7636                 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
7637                 char *attr;
7638
7639                 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
7640                         attr = "TxRx";
7641                 else if (i < bp->rx_nr_rings)
7642                         attr = "rx";
7643                 else
7644                         attr = "tx";
7645
7646                 snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
7647                          attr, i);
7648                 bp->irq_tbl[map_idx].handler = bnxt_msix;
7649         }
7650 }
7651
7652 static void bnxt_setup_inta(struct bnxt *bp)
7653 {
7654         const int len = sizeof(bp->irq_tbl[0].name);
7655
7656         if (netdev_get_num_tc(bp->dev))
7657                 netdev_reset_tc(bp->dev);
7658
7659         snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
7660                  0);
7661         bp->irq_tbl[0].handler = bnxt_inta;
7662 }
7663
7664 static int bnxt_setup_int_mode(struct bnxt *bp)
7665 {
7666         int rc;
7667
7668         if (bp->flags & BNXT_FLAG_USING_MSIX)
7669                 bnxt_setup_msix(bp);
7670         else
7671                 bnxt_setup_inta(bp);
7672
7673         rc = bnxt_set_real_num_queues(bp);
7674         return rc;
7675 }
7676
7677 #ifdef CONFIG_RFS_ACCEL
7678 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
7679 {
7680         return bp->hw_resc.max_rsscos_ctxs;
7681 }
7682
7683 static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
7684 {
7685         return bp->hw_resc.max_vnics;
7686 }
7687 #endif
7688
7689 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
7690 {
7691         return bp->hw_resc.max_stat_ctxs;
7692 }
7693
7694 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
7695 {
7696         return bp->hw_resc.max_cp_rings;
7697 }
7698
7699 static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
7700 {
7701         unsigned int cp = bp->hw_resc.max_cp_rings;
7702
7703         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
7704                 cp -= bnxt_get_ulp_msix_num(bp);
7705
7706         return cp;
7707 }
7708
7709 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
7710 {
7711         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7712
7713         if (bp->flags & BNXT_FLAG_CHIP_P5)
7714                 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs);
7715
7716         return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
7717 }
7718
7719 static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
7720 {
7721         bp->hw_resc.max_irqs = max_irqs;
7722 }
7723
7724 unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp)
7725 {
7726         unsigned int cp;
7727
7728         cp = bnxt_get_max_func_cp_rings_for_en(bp);
7729         if (bp->flags & BNXT_FLAG_CHIP_P5)
7730                 return cp - bp->rx_nr_rings - bp->tx_nr_rings;
7731         else
7732                 return cp - bp->cp_nr_rings;
7733 }
7734
7735 unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp)
7736 {
7737         return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp);
7738 }
7739
7740 int bnxt_get_avail_msix(struct bnxt *bp, int num)
7741 {
7742         int max_cp = bnxt_get_max_func_cp_rings(bp);
7743         int max_irq = bnxt_get_max_func_irqs(bp);
7744         int total_req = bp->cp_nr_rings + num;
7745         int max_idx, avail_msix;
7746
7747         max_idx = bp->total_irqs;
7748         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
7749                 max_idx = min_t(int, bp->total_irqs, max_cp);
7750         avail_msix = max_idx - bp->cp_nr_rings;
7751         if (!BNXT_NEW_RM(bp) || avail_msix >= num)
7752                 return avail_msix;
7753
7754         if (max_irq < total_req) {
7755                 num = max_irq - bp->cp_nr_rings;
7756                 if (num <= 0)
7757                         return 0;
7758         }
7759         return num;
7760 }
7761
7762 static int bnxt_get_num_msix(struct bnxt *bp)
7763 {
7764         if (!BNXT_NEW_RM(bp))
7765                 return bnxt_get_max_func_irqs(bp);
7766
7767         return bnxt_nq_rings_in_use(bp);
7768 }
7769
7770 static int bnxt_init_msix(struct bnxt *bp)
7771 {
7772         int i, total_vecs, max, rc = 0, min = 1, ulp_msix;
7773         struct msix_entry *msix_ent;
7774
7775         total_vecs = bnxt_get_num_msix(bp);
7776         max = bnxt_get_max_func_irqs(bp);
7777         if (total_vecs > max)
7778                 total_vecs = max;
7779
7780         if (!total_vecs)
7781                 return 0;
7782
7783         msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
7784         if (!msix_ent)
7785                 return -ENOMEM;
7786
7787         for (i = 0; i < total_vecs; i++) {
7788                 msix_ent[i].entry = i;
7789                 msix_ent[i].vector = 0;
7790         }
7791
7792         if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
7793                 min = 2;
7794
7795         total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
7796         ulp_msix = bnxt_get_ulp_msix_num(bp);
7797         if (total_vecs < 0 || total_vecs < ulp_msix) {
7798                 rc = -ENODEV;
7799                 goto msix_setup_exit;
7800         }
7801
7802         bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
7803         if (bp->irq_tbl) {
7804                 for (i = 0; i < total_vecs; i++)
7805                         bp->irq_tbl[i].vector = msix_ent[i].vector;
7806
7807                 bp->total_irqs = total_vecs;
7808                 /* Trim rings based upon num of vectors allocated */
7809                 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
7810                                      total_vecs - ulp_msix, min == 1);
7811                 if (rc)
7812                         goto msix_setup_exit;
7813
7814                 bp->cp_nr_rings = (min == 1) ?
7815                                   max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
7816                                   bp->tx_nr_rings + bp->rx_nr_rings;
7817
7818         } else {
7819                 rc = -ENOMEM;
7820                 goto msix_setup_exit;
7821         }
7822         bp->flags |= BNXT_FLAG_USING_MSIX;
7823         kfree(msix_ent);
7824         return 0;
7825
7826 msix_setup_exit:
7827         netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc);
7828         kfree(bp->irq_tbl);
7829         bp->irq_tbl = NULL;
7830         pci_disable_msix(bp->pdev);
7831         kfree(msix_ent);
7832         return rc;
7833 }
7834
7835 static int bnxt_init_inta(struct bnxt *bp)
7836 {
7837         bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL);
7838         if (!bp->irq_tbl)
7839                 return -ENOMEM;
7840
7841         bp->total_irqs = 1;
7842         bp->rx_nr_rings = 1;
7843         bp->tx_nr_rings = 1;
7844         bp->cp_nr_rings = 1;
7845         bp->flags |= BNXT_FLAG_SHARED_RINGS;
7846         bp->irq_tbl[0].vector = bp->pdev->irq;
7847         return 0;
7848 }
7849
7850 static int bnxt_init_int_mode(struct bnxt *bp)
7851 {
7852         int rc = 0;
7853
7854         if (bp->flags & BNXT_FLAG_MSIX_CAP)
7855                 rc = bnxt_init_msix(bp);
7856
7857         if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
7858                 /* fallback to INTA */
7859                 rc = bnxt_init_inta(bp);
7860         }
7861         return rc;
7862 }
7863
7864 static void bnxt_clear_int_mode(struct bnxt *bp)
7865 {
7866         if (bp->flags & BNXT_FLAG_USING_MSIX)
7867                 pci_disable_msix(bp->pdev);
7868
7869         kfree(bp->irq_tbl);
7870         bp->irq_tbl = NULL;
7871         bp->flags &= ~BNXT_FLAG_USING_MSIX;
7872 }
7873
7874 int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
7875 {
7876         int tcs = netdev_get_num_tc(bp->dev);
7877         bool irq_cleared = false;
7878         int rc;
7879
7880         if (!bnxt_need_reserve_rings(bp))
7881                 return 0;
7882
7883         if (irq_re_init && BNXT_NEW_RM(bp) &&
7884             bnxt_get_num_msix(bp) != bp->total_irqs) {
7885                 bnxt_ulp_irq_stop(bp);
7886                 bnxt_clear_int_mode(bp);
7887                 irq_cleared = true;
7888         }
7889         rc = __bnxt_reserve_rings(bp);
7890         if (irq_cleared) {
7891                 if (!rc)
7892                         rc = bnxt_init_int_mode(bp);
7893                 bnxt_ulp_irq_restart(bp, rc);
7894         }
7895         if (rc) {
7896                 netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
7897                 return rc;
7898         }
7899         if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) {
7900                 netdev_err(bp->dev, "tx ring reservation failure\n");
7901                 netdev_reset_tc(bp->dev);
7902                 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
7903                 return -ENOMEM;
7904         }
7905         return 0;
7906 }
7907
7908 static void bnxt_free_irq(struct bnxt *bp)
7909 {
7910         struct bnxt_irq *irq;
7911         int i;
7912
7913 #ifdef CONFIG_RFS_ACCEL
7914         free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
7915         bp->dev->rx_cpu_rmap = NULL;
7916 #endif
7917         if (!bp->irq_tbl || !bp->bnapi)
7918                 return;
7919
7920         for (i = 0; i < bp->cp_nr_rings; i++) {
7921                 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
7922
7923                 irq = &bp->irq_tbl[map_idx];
7924                 if (irq->requested) {
7925                         if (irq->have_cpumask) {
7926                                 irq_set_affinity_hint(irq->vector, NULL);
7927                                 free_cpumask_var(irq->cpu_mask);
7928                                 irq->have_cpumask = 0;
7929                         }
7930                         free_irq(irq->vector, bp->bnapi[i]);
7931                 }
7932
7933                 irq->requested = 0;
7934         }
7935 }
7936
7937 static int bnxt_request_irq(struct bnxt *bp)
7938 {
7939         int i, j, rc = 0;
7940         unsigned long flags = 0;
7941 #ifdef CONFIG_RFS_ACCEL
7942         struct cpu_rmap *rmap;
7943 #endif
7944
7945         rc = bnxt_setup_int_mode(bp);
7946         if (rc) {
7947                 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
7948                            rc);
7949                 return rc;
7950         }
7951 #ifdef CONFIG_RFS_ACCEL
7952         rmap = bp->dev->rx_cpu_rmap;
7953 #endif
7954         if (!(bp->flags & BNXT_FLAG_USING_MSIX))
7955                 flags = IRQF_SHARED;
7956
7957         for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
7958                 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
7959                 struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
7960
7961 #ifdef CONFIG_RFS_ACCEL
7962                 if (rmap && bp->bnapi[i]->rx_ring) {
7963                         rc = irq_cpu_rmap_add(rmap, irq->vector);
7964                         if (rc)
7965                                 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
7966                                             j);
7967                         j++;
7968                 }
7969 #endif
7970                 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
7971                                  bp->bnapi[i]);
7972                 if (rc)
7973                         break;
7974
7975                 irq->requested = 1;
7976
7977                 if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
7978                         int numa_node = dev_to_node(&bp->pdev->dev);
7979
7980                         irq->have_cpumask = 1;
7981                         cpumask_set_cpu(cpumask_local_spread(i, numa_node),
7982                                         irq->cpu_mask);
7983                         rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
7984                         if (rc) {
7985                                 netdev_warn(bp->dev,
7986                                             "Set affinity failed, IRQ = %d\n",
7987                                             irq->vector);
7988                                 break;
7989                         }
7990                 }
7991         }
7992         return rc;
7993 }
7994
7995 static void bnxt_del_napi(struct bnxt *bp)
7996 {
7997         int i;
7998
7999         if (!bp->bnapi)
8000                 return;
8001
8002         for (i = 0; i < bp->cp_nr_rings; i++) {
8003                 struct bnxt_napi *bnapi = bp->bnapi[i];
8004
8005                 napi_hash_del(&bnapi->napi);
8006                 netif_napi_del(&bnapi->napi);
8007         }
8008         /* We called napi_hash_del() before netif_napi_del(), we need
8009          * to respect an RCU grace period before freeing napi structures.
8010          */
8011         synchronize_net();
8012 }
8013
8014 static void bnxt_init_napi(struct bnxt *bp)
8015 {
8016         int i;
8017         unsigned int cp_nr_rings = bp->cp_nr_rings;
8018         struct bnxt_napi *bnapi;
8019
8020         if (bp->flags & BNXT_FLAG_USING_MSIX) {
8021                 int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
8022
8023                 if (bp->flags & BNXT_FLAG_CHIP_P5)
8024                         poll_fn = bnxt_poll_p5;
8025                 else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
8026                         cp_nr_rings--;
8027                 for (i = 0; i < cp_nr_rings; i++) {
8028                         bnapi = bp->bnapi[i];
8029                         netif_napi_add(bp->dev, &bnapi->napi, poll_fn, 64);
8030                 }
8031                 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
8032                         bnapi = bp->bnapi[cp_nr_rings];
8033                         netif_napi_add(bp->dev, &bnapi->napi,
8034                                        bnxt_poll_nitroa0, 64);
8035                 }
8036         } else {
8037                 bnapi = bp->bnapi[0];
8038                 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
8039         }
8040 }
8041
8042 static void bnxt_disable_napi(struct bnxt *bp)
8043 {
8044         int i;
8045
8046         if (!bp->bnapi)
8047                 return;
8048
8049         for (i = 0; i < bp->cp_nr_rings; i++) {
8050                 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
8051
8052                 if (bp->bnapi[i]->rx_ring)
8053                         cancel_work_sync(&cpr->dim.work);
8054
8055                 napi_disable(&bp->bnapi[i]->napi);
8056         }
8057 }
8058
8059 static void bnxt_enable_napi(struct bnxt *bp)
8060 {
8061         int i;
8062
8063         for (i = 0; i < bp->cp_nr_rings; i++) {
8064                 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
8065                 bp->bnapi[i]->in_reset = false;
8066
8067                 if (bp->bnapi[i]->rx_ring) {
8068                         INIT_WORK(&cpr->dim.work, bnxt_dim_work);
8069                         cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
8070                 }
8071                 napi_enable(&bp->bnapi[i]->napi);
8072         }
8073 }
8074
8075 void bnxt_tx_disable(struct bnxt *bp)
8076 {
8077         int i;
8078         struct bnxt_tx_ring_info *txr;
8079
8080         if (bp->tx_ring) {
8081                 for (i = 0; i < bp->tx_nr_rings; i++) {
8082                         txr = &bp->tx_ring[i];
8083                         txr->dev_state = BNXT_DEV_STATE_CLOSING;
8084                 }
8085         }
8086         /* Stop all TX queues */
8087         netif_tx_disable(bp->dev);
8088         netif_carrier_off(bp->dev);
8089 }
8090
8091 void bnxt_tx_enable(struct bnxt *bp)
8092 {
8093         int i;
8094         struct bnxt_tx_ring_info *txr;
8095
8096         for (i = 0; i < bp->tx_nr_rings; i++) {
8097                 txr = &bp->tx_ring[i];
8098                 txr->dev_state = 0;
8099         }
8100         netif_tx_wake_all_queues(bp->dev);
8101         if (bp->link_info.link_up)
8102                 netif_carrier_on(bp->dev);
8103 }
8104
8105 static void bnxt_report_link(struct bnxt *bp)
8106 {
8107         if (bp->link_info.link_up) {
8108                 const char *duplex;
8109                 const char *flow_ctrl;
8110                 u32 speed;
8111                 u16 fec;
8112
8113                 netif_carrier_on(bp->dev);
8114                 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
8115                         duplex = "full";
8116                 else
8117                         duplex = "half";
8118                 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
8119                         flow_ctrl = "ON - receive & transmit";
8120                 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
8121                         flow_ctrl = "ON - transmit";
8122                 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
8123                         flow_ctrl = "ON - receive";
8124                 else
8125                         flow_ctrl = "none";
8126                 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
8127                 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s duplex, Flow control: %s\n",
8128                             speed, duplex, flow_ctrl);
8129                 if (bp->flags & BNXT_FLAG_EEE_CAP)
8130                         netdev_info(bp->dev, "EEE is %s\n",
8131                                     bp->eee.eee_active ? "active" :
8132                                                          "not active");
8133                 fec = bp->link_info.fec_cfg;
8134                 if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
8135                         netdev_info(bp->dev, "FEC autoneg %s encodings: %s\n",
8136                                     (fec & BNXT_FEC_AUTONEG) ? "on" : "off",
8137                                     (fec & BNXT_FEC_ENC_BASE_R) ? "BaseR" :
8138                                      (fec & BNXT_FEC_ENC_RS) ? "RS" : "None");
8139         } else {
8140                 netif_carrier_off(bp->dev);
8141                 netdev_err(bp->dev, "NIC Link is Down\n");
8142         }
8143 }
8144
8145 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
8146 {
8147         int rc = 0;
8148         struct hwrm_port_phy_qcaps_input req = {0};
8149         struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
8150         struct bnxt_link_info *link_info = &bp->link_info;
8151
8152         if (bp->hwrm_spec_code < 0x10201)
8153                 return 0;
8154
8155         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1);
8156
8157         mutex_lock(&bp->hwrm_cmd_lock);
8158         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8159         if (rc)
8160                 goto hwrm_phy_qcaps_exit;
8161
8162         if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
8163                 struct ethtool_eee *eee = &bp->eee;
8164                 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
8165
8166                 bp->flags |= BNXT_FLAG_EEE_CAP;
8167                 eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
8168                 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
8169                                  PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
8170                 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
8171                                  PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
8172         }
8173         if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED) {
8174                 if (bp->test_info)
8175                         bp->test_info->flags |= BNXT_TEST_FL_EXT_LPBK;
8176         }
8177         if (resp->supported_speeds_auto_mode)
8178                 link_info->support_auto_speeds =
8179                         le16_to_cpu(resp->supported_speeds_auto_mode);
8180
8181         bp->port_count = resp->port_cnt;
8182
8183 hwrm_phy_qcaps_exit:
8184         mutex_unlock(&bp->hwrm_cmd_lock);
8185         return rc;
8186 }
8187
8188 static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
8189 {
8190         int rc = 0;
8191         struct bnxt_link_info *link_info = &bp->link_info;
8192         struct hwrm_port_phy_qcfg_input req = {0};
8193         struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
8194         u8 link_up = link_info->link_up;
8195         u16 diff;
8196
8197         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1);
8198
8199         mutex_lock(&bp->hwrm_cmd_lock);
8200         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8201         if (rc) {
8202                 mutex_unlock(&bp->hwrm_cmd_lock);
8203                 return rc;
8204         }
8205
8206         memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
8207         link_info->phy_link_status = resp->link;
8208         link_info->duplex = resp->duplex_cfg;
8209         if (bp->hwrm_spec_code >= 0x10800)
8210                 link_info->duplex = resp->duplex_state;
8211         link_info->pause = resp->pause;
8212         link_info->auto_mode = resp->auto_mode;
8213         link_info->auto_pause_setting = resp->auto_pause;
8214         link_info->lp_pause = resp->link_partner_adv_pause;
8215         link_info->force_pause_setting = resp->force_pause;
8216         link_info->duplex_setting = resp->duplex_cfg;
8217         if (link_info->phy_link_status == BNXT_LINK_LINK)
8218                 link_info->link_speed = le16_to_cpu(resp->link_speed);
8219         else
8220                 link_info->link_speed = 0;
8221         link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
8222         link_info->support_speeds = le16_to_cpu(resp->support_speeds);
8223         link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
8224         link_info->lp_auto_link_speeds =
8225                 le16_to_cpu(resp->link_partner_adv_speeds);
8226         link_info->preemphasis = le32_to_cpu(resp->preemphasis);
8227         link_info->phy_ver[0] = resp->phy_maj;
8228         link_info->phy_ver[1] = resp->phy_min;
8229         link_info->phy_ver[2] = resp->phy_bld;
8230         link_info->media_type = resp->media_type;
8231         link_info->phy_type = resp->phy_type;
8232         link_info->transceiver = resp->xcvr_pkg_type;
8233         link_info->phy_addr = resp->eee_config_phy_addr &
8234                               PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
8235         link_info->module_status = resp->module_status;
8236
8237         if (bp->flags & BNXT_FLAG_EEE_CAP) {
8238                 struct ethtool_eee *eee = &bp->eee;
8239                 u16 fw_speeds;
8240
8241                 eee->eee_active = 0;
8242                 if (resp->eee_config_phy_addr &
8243                     PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
8244                         eee->eee_active = 1;
8245                         fw_speeds = le16_to_cpu(
8246                                 resp->link_partner_adv_eee_link_speed_mask);
8247                         eee->lp_advertised =
8248                                 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
8249                 }
8250
8251                 /* Pull initial EEE config */
8252                 if (!chng_link_state) {
8253                         if (resp->eee_config_phy_addr &
8254                             PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
8255                                 eee->eee_enabled = 1;
8256
8257                         fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
8258                         eee->advertised =
8259                                 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
8260
8261                         if (resp->eee_config_phy_addr &
8262                             PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
8263                                 __le32 tmr;
8264
8265                                 eee->tx_lpi_enabled = 1;
8266                                 tmr = resp->xcvr_identifier_type_tx_lpi_timer;
8267                                 eee->tx_lpi_timer = le32_to_cpu(tmr) &
8268                                         PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
8269                         }
8270                 }
8271         }
8272
8273         link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
8274         if (bp->hwrm_spec_code >= 0x10504)
8275                 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
8276
8277         /* TODO: need to add more logic to report VF link */
8278         if (chng_link_state) {
8279                 if (link_info->phy_link_status == BNXT_LINK_LINK)
8280                         link_info->link_up = 1;
8281                 else
8282                         link_info->link_up = 0;
8283                 if (link_up != link_info->link_up)
8284                         bnxt_report_link(bp);
8285         } else {
8286                 /* alwasy link down if not require to update link state */
8287                 link_info->link_up = 0;
8288         }
8289         mutex_unlock(&bp->hwrm_cmd_lock);
8290
8291         if (!BNXT_SINGLE_PF(bp))
8292                 return 0;
8293
8294         diff = link_info->support_auto_speeds ^ link_info->advertising;
8295         if ((link_info->support_auto_speeds | diff) !=
8296             link_info->support_auto_speeds) {
8297                 /* An advertised speed is no longer supported, so we need to
8298                  * update the advertisement settings.  Caller holds RTNL
8299                  * so we can modify link settings.
8300                  */
8301                 link_info->advertising = link_info->support_auto_speeds;
8302                 if (link_info->autoneg & BNXT_AUTONEG_SPEED)
8303                         bnxt_hwrm_set_link_setting(bp, true, false);
8304         }
8305         return 0;
8306 }
8307
8308 static void bnxt_get_port_module_status(struct bnxt *bp)
8309 {
8310         struct bnxt_link_info *link_info = &bp->link_info;
8311         struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
8312         u8 module_status;
8313
8314         if (bnxt_update_link(bp, true))
8315                 return;
8316
8317         module_status = link_info->module_status;
8318         switch (module_status) {
8319         case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
8320         case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
8321         case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
8322                 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
8323                             bp->pf.port_id);
8324                 if (bp->hwrm_spec_code >= 0x10201) {
8325                         netdev_warn(bp->dev, "Module part number %s\n",
8326                                     resp->phy_vendor_partnumber);
8327                 }
8328                 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
8329                         netdev_warn(bp->dev, "TX is disabled\n");
8330                 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
8331                         netdev_warn(bp->dev, "SFP+ module is shutdown\n");
8332         }
8333 }
8334
8335 static void
8336 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
8337 {
8338         if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
8339                 if (bp->hwrm_spec_code >= 0x10201)
8340                         req->auto_pause =
8341                                 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
8342                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
8343                         req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
8344                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
8345                         req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
8346                 req->enables |=
8347                         cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
8348         } else {
8349                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
8350                         req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
8351                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
8352                         req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
8353                 req->enables |=
8354                         cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
8355                 if (bp->hwrm_spec_code >= 0x10201) {
8356                         req->auto_pause = req->force_pause;
8357                         req->enables |= cpu_to_le32(
8358                                 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
8359                 }
8360         }
8361 }
8362
8363 static void bnxt_hwrm_set_link_common(struct bnxt *bp,
8364                                       struct hwrm_port_phy_cfg_input *req)
8365 {
8366         u8 autoneg = bp->link_info.autoneg;
8367         u16 fw_link_speed = bp->link_info.req_link_speed;
8368         u16 advertising = bp->link_info.advertising;
8369
8370         if (autoneg & BNXT_AUTONEG_SPEED) {
8371                 req->auto_mode |=
8372                         PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
8373
8374                 req->enables |= cpu_to_le32(
8375                         PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
8376                 req->auto_link_speed_mask = cpu_to_le16(advertising);
8377
8378                 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
8379                 req->flags |=
8380                         cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
8381         } else {
8382                 req->force_link_speed = cpu_to_le16(fw_link_speed);
8383                 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
8384         }
8385
8386         /* tell chimp that the setting takes effect immediately */
8387         req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
8388 }
8389
8390 int bnxt_hwrm_set_pause(struct bnxt *bp)
8391 {
8392         struct hwrm_port_phy_cfg_input req = {0};
8393         int rc;
8394
8395         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
8396         bnxt_hwrm_set_pause_common(bp, &req);
8397
8398         if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
8399             bp->link_info.force_link_chng)
8400                 bnxt_hwrm_set_link_common(bp, &req);
8401
8402         mutex_lock(&bp->hwrm_cmd_lock);
8403         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8404         if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
8405                 /* since changing of pause setting doesn't trigger any link
8406                  * change event, the driver needs to update the current pause
8407                  * result upon successfully return of the phy_cfg command
8408                  */
8409                 bp->link_info.pause =
8410                 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
8411                 bp->link_info.auto_pause_setting = 0;
8412                 if (!bp->link_info.force_link_chng)
8413                         bnxt_report_link(bp);
8414         }
8415         bp->link_info.force_link_chng = false;
8416         mutex_unlock(&bp->hwrm_cmd_lock);
8417         return rc;
8418 }
8419
8420 static void bnxt_hwrm_set_eee(struct bnxt *bp,
8421                               struct hwrm_port_phy_cfg_input *req)
8422 {
8423         struct ethtool_eee *eee = &bp->eee;
8424
8425         if (eee->eee_enabled) {
8426                 u16 eee_speeds;
8427                 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
8428
8429                 if (eee->tx_lpi_enabled)
8430                         flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
8431                 else
8432                         flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
8433
8434                 req->flags |= cpu_to_le32(flags);
8435                 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
8436                 req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
8437                 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
8438         } else {
8439                 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
8440         }
8441 }
8442
8443 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
8444 {
8445         struct hwrm_port_phy_cfg_input req = {0};
8446
8447         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
8448         if (set_pause)
8449                 bnxt_hwrm_set_pause_common(bp, &req);
8450
8451         bnxt_hwrm_set_link_common(bp, &req);
8452
8453         if (set_eee)
8454                 bnxt_hwrm_set_eee(bp, &req);
8455         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8456 }
8457
8458 static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
8459 {
8460         struct hwrm_port_phy_cfg_input req = {0};
8461
8462         if (!BNXT_SINGLE_PF(bp))
8463                 return 0;
8464
8465         if (pci_num_vf(bp->pdev))
8466                 return 0;
8467
8468         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
8469         req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
8470         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8471 }
8472
8473 static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
8474 {
8475         struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr;
8476         struct hwrm_func_drv_if_change_input req = {0};
8477         bool resc_reinit = false;
8478         int rc;
8479
8480         if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
8481                 return 0;
8482
8483         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_IF_CHANGE, -1, -1);
8484         if (up)
8485                 req.flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
8486         mutex_lock(&bp->hwrm_cmd_lock);
8487         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8488         if (!rc && (resp->flags &
8489                     cpu_to_le32(FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)))
8490                 resc_reinit = true;
8491         mutex_unlock(&bp->hwrm_cmd_lock);
8492
8493         if (up && resc_reinit && BNXT_NEW_RM(bp)) {
8494                 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
8495
8496                 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
8497                 hw_resc->resv_cp_rings = 0;
8498                 hw_resc->resv_stat_ctxs = 0;
8499                 hw_resc->resv_irqs = 0;
8500                 hw_resc->resv_tx_rings = 0;
8501                 hw_resc->resv_rx_rings = 0;
8502                 hw_resc->resv_hw_ring_grps = 0;
8503                 hw_resc->resv_vnics = 0;
8504                 bp->tx_nr_rings = 0;
8505                 bp->rx_nr_rings = 0;
8506         }
8507         return rc;
8508 }
8509
8510 static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
8511 {
8512         struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
8513         struct hwrm_port_led_qcaps_input req = {0};
8514         struct bnxt_pf_info *pf = &bp->pf;
8515         int rc;
8516
8517         if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
8518                 return 0;
8519
8520         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_QCAPS, -1, -1);
8521         req.port_id = cpu_to_le16(pf->port_id);
8522         mutex_lock(&bp->hwrm_cmd_lock);
8523         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8524         if (rc) {
8525                 mutex_unlock(&bp->hwrm_cmd_lock);
8526                 return rc;
8527         }
8528         if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
8529                 int i;
8530
8531                 bp->num_leds = resp->num_leds;
8532                 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
8533                                                  bp->num_leds);
8534                 for (i = 0; i < bp->num_leds; i++) {
8535                         struct bnxt_led_info *led = &bp->leds[i];
8536                         __le16 caps = led->led_state_caps;
8537
8538                         if (!led->led_group_id ||
8539                             !BNXT_LED_ALT_BLINK_CAP(caps)) {
8540                                 bp->num_leds = 0;
8541                                 break;
8542                         }
8543                 }
8544         }
8545         mutex_unlock(&bp->hwrm_cmd_lock);
8546         return 0;
8547 }
8548
8549 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
8550 {
8551         struct hwrm_wol_filter_alloc_input req = {0};
8552         struct hwrm_wol_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
8553         int rc;
8554
8555         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_ALLOC, -1, -1);
8556         req.port_id = cpu_to_le16(bp->pf.port_id);
8557         req.wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
8558         req.enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
8559         memcpy(req.mac_address, bp->dev->dev_addr, ETH_ALEN);
8560         mutex_lock(&bp->hwrm_cmd_lock);
8561         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8562         if (!rc)
8563                 bp->wol_filter_id = resp->wol_filter_id;
8564         mutex_unlock(&bp->hwrm_cmd_lock);
8565         return rc;
8566 }
8567
8568 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
8569 {
8570         struct hwrm_wol_filter_free_input req = {0};
8571         int rc;
8572
8573         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_FREE, -1, -1);
8574         req.port_id = cpu_to_le16(bp->pf.port_id);
8575         req.enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
8576         req.wol_filter_id = bp->wol_filter_id;
8577         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8578         return rc;
8579 }
8580
8581 static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
8582 {
8583         struct hwrm_wol_filter_qcfg_input req = {0};
8584         struct hwrm_wol_filter_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
8585         u16 next_handle = 0;
8586         int rc;
8587
8588         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_QCFG, -1, -1);
8589         req.port_id = cpu_to_le16(bp->pf.port_id);
8590         req.handle = cpu_to_le16(handle);
8591         mutex_lock(&bp->hwrm_cmd_lock);
8592         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8593         if (!rc) {
8594                 next_handle = le16_to_cpu(resp->next_handle);
8595                 if (next_handle != 0) {
8596                         if (resp->wol_type ==
8597                             WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) {
8598                                 bp->wol = 1;
8599                                 bp->wol_filter_id = resp->wol_filter_id;
8600                         }
8601                 }
8602         }
8603         mutex_unlock(&bp->hwrm_cmd_lock);
8604         return next_handle;
8605 }
8606
8607 static void bnxt_get_wol_settings(struct bnxt *bp)
8608 {
8609         u16 handle = 0;
8610
8611         if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
8612                 return;
8613
8614         do {
8615                 handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
8616         } while (handle && handle != 0xffff);
8617 }
8618
8619 #ifdef CONFIG_BNXT_HWMON
8620 static ssize_t bnxt_show_temp(struct device *dev,
8621                               struct device_attribute *devattr, char *buf)
8622 {
8623         struct hwrm_temp_monitor_query_input req = {0};
8624         struct hwrm_temp_monitor_query_output *resp;
8625         struct bnxt *bp = dev_get_drvdata(dev);
8626         u32 temp = 0;
8627
8628         resp = bp->hwrm_cmd_resp_addr;
8629         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1);
8630         mutex_lock(&bp->hwrm_cmd_lock);
8631         if (!_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT))
8632                 temp = resp->temp * 1000; /* display millidegree */
8633         mutex_unlock(&bp->hwrm_cmd_lock);
8634
8635         return sprintf(buf, "%u\n", temp);
8636 }
8637 static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0);
8638
8639 static struct attribute *bnxt_attrs[] = {
8640         &sensor_dev_attr_temp1_input.dev_attr.attr,
8641         NULL
8642 };
8643 ATTRIBUTE_GROUPS(bnxt);
8644
8645 static void bnxt_hwmon_close(struct bnxt *bp)
8646 {
8647         if (bp->hwmon_dev) {
8648                 hwmon_device_unregister(bp->hwmon_dev);
8649                 bp->hwmon_dev = NULL;
8650         }
8651 }
8652
8653 static void bnxt_hwmon_open(struct bnxt *bp)
8654 {
8655         struct pci_dev *pdev = bp->pdev;
8656
8657         bp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
8658                                                           DRV_MODULE_NAME, bp,
8659                                                           bnxt_groups);
8660         if (IS_ERR(bp->hwmon_dev)) {
8661                 bp->hwmon_dev = NULL;
8662                 dev_warn(&pdev->dev, "Cannot register hwmon device\n");
8663         }
8664 }
8665 #else
8666 static void bnxt_hwmon_close(struct bnxt *bp)
8667 {
8668 }
8669
8670 static void bnxt_hwmon_open(struct bnxt *bp)
8671 {
8672 }
8673 #endif
8674
8675 static bool bnxt_eee_config_ok(struct bnxt *bp)
8676 {
8677         struct ethtool_eee *eee = &bp->eee;
8678         struct bnxt_link_info *link_info = &bp->link_info;
8679
8680         if (!(bp->flags & BNXT_FLAG_EEE_CAP))
8681                 return true;
8682
8683         if (eee->eee_enabled) {
8684                 u32 advertising =
8685                         _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
8686
8687                 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
8688                         eee->eee_enabled = 0;
8689                         return false;
8690                 }
8691                 if (eee->advertised & ~advertising) {
8692                         eee->advertised = advertising & eee->supported;
8693                         return false;
8694                 }
8695         }
8696         return true;
8697 }
8698
8699 static int bnxt_update_phy_setting(struct bnxt *bp)
8700 {
8701         int rc;
8702         bool update_link = false;
8703         bool update_pause = false;
8704         bool update_eee = false;
8705         struct bnxt_link_info *link_info = &bp->link_info;
8706
8707         rc = bnxt_update_link(bp, true);
8708         if (rc) {
8709                 netdev_err(bp->dev, "failed to update link (rc: %x)\n",
8710                            rc);
8711                 return rc;
8712         }
8713         if (!BNXT_SINGLE_PF(bp))
8714                 return 0;
8715
8716         if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
8717             (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
8718             link_info->req_flow_ctrl)
8719                 update_pause = true;
8720         if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
8721             link_info->force_pause_setting != link_info->req_flow_ctrl)
8722                 update_pause = true;
8723         if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
8724                 if (BNXT_AUTO_MODE(link_info->auto_mode))
8725                         update_link = true;
8726                 if (link_info->req_link_speed != link_info->force_link_speed)
8727                         update_link = true;
8728                 if (link_info->req_duplex != link_info->duplex_setting)
8729                         update_link = true;
8730         } else {
8731                 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
8732                         update_link = true;
8733                 if (link_info->advertising != link_info->auto_link_speeds)
8734                         update_link = true;
8735         }
8736
8737         /* The last close may have shutdown the link, so need to call
8738          * PHY_CFG to bring it back up.
8739          */
8740         if (!netif_carrier_ok(bp->dev))
8741                 update_link = true;
8742
8743         if (!bnxt_eee_config_ok(bp))
8744                 update_eee = true;
8745
8746         if (update_link)
8747                 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
8748         else if (update_pause)
8749                 rc = bnxt_hwrm_set_pause(bp);
8750         if (rc) {
8751                 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
8752                            rc);
8753                 return rc;
8754         }
8755
8756         return rc;
8757 }
8758
8759 /* Common routine to pre-map certain register block to different GRC window.
8760  * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
8761  * in PF and 3 windows in VF that can be customized to map in different
8762  * register blocks.
8763  */
8764 static void bnxt_preset_reg_win(struct bnxt *bp)
8765 {
8766         if (BNXT_PF(bp)) {
8767                 /* CAG registers map to GRC window #4 */
8768                 writel(BNXT_CAG_REG_BASE,
8769                        bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
8770         }
8771 }
8772
8773 static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
8774
8775 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
8776 {
8777         int rc = 0;
8778
8779         bnxt_preset_reg_win(bp);
8780         netif_carrier_off(bp->dev);
8781         if (irq_re_init) {
8782                 /* Reserve rings now if none were reserved at driver probe. */
8783                 rc = bnxt_init_dflt_ring_mode(bp);
8784                 if (rc) {
8785                         netdev_err(bp->dev, "Failed to reserve default rings at open\n");
8786                         return rc;
8787                 }
8788         }
8789         rc = bnxt_reserve_rings(bp, irq_re_init);
8790         if (rc)
8791                 return rc;
8792         if ((bp->flags & BNXT_FLAG_RFS) &&
8793             !(bp->flags & BNXT_FLAG_USING_MSIX)) {
8794                 /* disable RFS if falling back to INTA */
8795                 bp->dev->hw_features &= ~NETIF_F_NTUPLE;
8796                 bp->flags &= ~BNXT_FLAG_RFS;
8797         }
8798
8799         rc = bnxt_alloc_mem(bp, irq_re_init);
8800         if (rc) {
8801                 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
8802                 goto open_err_free_mem;
8803         }
8804
8805         if (irq_re_init) {
8806                 bnxt_init_napi(bp);
8807                 rc = bnxt_request_irq(bp);
8808                 if (rc) {
8809                         netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
8810                         goto open_err_irq;
8811                 }
8812         }
8813
8814         bnxt_enable_napi(bp);
8815         bnxt_debug_dev_init(bp);
8816
8817         rc = bnxt_init_nic(bp, irq_re_init);
8818         if (rc) {
8819                 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
8820                 goto open_err;
8821         }
8822
8823         if (link_re_init) {
8824                 mutex_lock(&bp->link_lock);
8825                 rc = bnxt_update_phy_setting(bp);
8826                 mutex_unlock(&bp->link_lock);
8827                 if (rc) {
8828                         netdev_warn(bp->dev, "failed to update phy settings\n");
8829                         if (BNXT_SINGLE_PF(bp)) {
8830                                 bp->link_info.phy_retry = true;
8831                                 bp->link_info.phy_retry_expires =
8832                                         jiffies + 5 * HZ;
8833                         }
8834                 }
8835         }
8836
8837         if (irq_re_init)
8838                 udp_tunnel_get_rx_info(bp->dev);
8839
8840         set_bit(BNXT_STATE_OPEN, &bp->state);
8841         bnxt_enable_int(bp);
8842         /* Enable TX queues */
8843         bnxt_tx_enable(bp);
8844         mod_timer(&bp->timer, jiffies + bp->current_interval);
8845         /* Poll link status and check for SFP+ module status */
8846         bnxt_get_port_module_status(bp);
8847
8848         /* VF-reps may need to be re-opened after the PF is re-opened */
8849         if (BNXT_PF(bp))
8850                 bnxt_vf_reps_open(bp);
8851         return 0;
8852
8853 open_err:
8854         bnxt_debug_dev_exit(bp);
8855         bnxt_disable_napi(bp);
8856
8857 open_err_irq:
8858         bnxt_del_napi(bp);
8859
8860 open_err_free_mem:
8861         bnxt_free_skbs(bp);
8862         bnxt_free_irq(bp);
8863         bnxt_free_mem(bp, true);
8864         return rc;
8865 }
8866
8867 /* rtnl_lock held */
8868 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
8869 {
8870         int rc = 0;
8871
8872         rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
8873         if (rc) {
8874                 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
8875                 dev_close(bp->dev);
8876         }
8877         return rc;
8878 }
8879
8880 /* rtnl_lock held, open the NIC half way by allocating all resources, but
8881  * NAPI, IRQ, and TX are not enabled.  This is mainly used for offline
8882  * self tests.
8883  */
8884 int bnxt_half_open_nic(struct bnxt *bp)
8885 {
8886         int rc = 0;
8887
8888         rc = bnxt_alloc_mem(bp, false);
8889         if (rc) {
8890                 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
8891                 goto half_open_err;
8892         }
8893         rc = bnxt_init_nic(bp, false);
8894         if (rc) {
8895                 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
8896                 goto half_open_err;
8897         }
8898         return 0;
8899
8900 half_open_err:
8901         bnxt_free_skbs(bp);
8902         bnxt_free_mem(bp, false);
8903         dev_close(bp->dev);
8904         return rc;
8905 }
8906
8907 /* rtnl_lock held, this call can only be made after a previous successful
8908  * call to bnxt_half_open_nic().
8909  */
8910 void bnxt_half_close_nic(struct bnxt *bp)
8911 {
8912         bnxt_hwrm_resource_free(bp, false, false);
8913         bnxt_free_skbs(bp);
8914         bnxt_free_mem(bp, false);
8915 }
8916
8917 static int bnxt_open(struct net_device *dev)
8918 {
8919         struct bnxt *bp = netdev_priv(dev);
8920         int rc;
8921
8922         bnxt_hwrm_if_change(bp, true);
8923         rc = __bnxt_open_nic(bp, true, true);
8924         if (rc)
8925                 bnxt_hwrm_if_change(bp, false);
8926
8927         bnxt_hwmon_open(bp);
8928
8929         return rc;
8930 }
8931
8932 static bool bnxt_drv_busy(struct bnxt *bp)
8933 {
8934         return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) ||
8935                 test_bit(BNXT_STATE_READ_STATS, &bp->state));
8936 }
8937
8938 static void bnxt_get_ring_stats(struct bnxt *bp,
8939                                 struct rtnl_link_stats64 *stats);
8940
8941 static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
8942                              bool link_re_init)
8943 {
8944         /* Close the VF-reps before closing PF */
8945         if (BNXT_PF(bp))
8946                 bnxt_vf_reps_close(bp);
8947
8948         /* Change device state to avoid TX queue wake up's */
8949         bnxt_tx_disable(bp);
8950
8951         clear_bit(BNXT_STATE_OPEN, &bp->state);
8952         smp_mb__after_atomic();
8953         while (bnxt_drv_busy(bp))
8954                 msleep(20);
8955
8956         /* Flush rings and and disable interrupts */
8957         bnxt_shutdown_nic(bp, irq_re_init);
8958
8959         /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
8960
8961         bnxt_debug_dev_exit(bp);
8962         bnxt_disable_napi(bp);
8963         del_timer_sync(&bp->timer);
8964         bnxt_free_skbs(bp);
8965
8966         /* Save ring stats before shutdown */
8967         if (bp->bnapi)
8968                 bnxt_get_ring_stats(bp, &bp->net_stats_prev);
8969         if (irq_re_init) {
8970                 bnxt_free_irq(bp);
8971                 bnxt_del_napi(bp);
8972         }
8973         bnxt_free_mem(bp, irq_re_init);
8974 }
8975
8976 int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
8977 {
8978         int rc = 0;
8979
8980 #ifdef CONFIG_BNXT_SRIOV
8981         if (bp->sriov_cfg) {
8982                 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
8983                                                       !bp->sriov_cfg,
8984                                                       BNXT_SRIOV_CFG_WAIT_TMO);
8985                 if (rc)
8986                         netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
8987         }
8988 #endif
8989         __bnxt_close_nic(bp, irq_re_init, link_re_init);
8990         return rc;
8991 }
8992
8993 static int bnxt_close(struct net_device *dev)
8994 {
8995         struct bnxt *bp = netdev_priv(dev);
8996
8997         bnxt_hwmon_close(bp);
8998         bnxt_close_nic(bp, true, true);
8999         bnxt_hwrm_shutdown_link(bp);
9000         bnxt_hwrm_if_change(bp, false);
9001         return 0;
9002 }
9003
9004 static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg,
9005                                    u16 *val)
9006 {
9007         struct hwrm_port_phy_mdio_read_output *resp = bp->hwrm_cmd_resp_addr;
9008         struct hwrm_port_phy_mdio_read_input req = {0};
9009         int rc;
9010
9011         if (bp->hwrm_spec_code < 0x10a00)
9012                 return -EOPNOTSUPP;
9013
9014         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_READ, -1, -1);
9015         req.port_id = cpu_to_le16(bp->pf.port_id);
9016         req.phy_addr = phy_addr;
9017         req.reg_addr = cpu_to_le16(reg & 0x1f);
9018         if (mdio_phy_id_is_c45(phy_addr)) {
9019                 req.cl45_mdio = 1;
9020                 req.phy_addr = mdio_phy_id_prtad(phy_addr);
9021                 req.dev_addr = mdio_phy_id_devad(phy_addr);
9022                 req.reg_addr = cpu_to_le16(reg);
9023         }
9024
9025         mutex_lock(&bp->hwrm_cmd_lock);
9026         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9027         if (!rc)
9028                 *val = le16_to_cpu(resp->reg_data);
9029         mutex_unlock(&bp->hwrm_cmd_lock);
9030         return rc;
9031 }
9032
9033 static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg,
9034                                     u16 val)
9035 {
9036         struct hwrm_port_phy_mdio_write_input req = {0};
9037
9038         if (bp->hwrm_spec_code < 0x10a00)
9039                 return -EOPNOTSUPP;
9040
9041         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_WRITE, -1, -1);
9042         req.port_id = cpu_to_le16(bp->pf.port_id);
9043         req.phy_addr = phy_addr;
9044         req.reg_addr = cpu_to_le16(reg & 0x1f);
9045         if (mdio_phy_id_is_c45(phy_addr)) {
9046                 req.cl45_mdio = 1;
9047                 req.phy_addr = mdio_phy_id_prtad(phy_addr);
9048                 req.dev_addr = mdio_phy_id_devad(phy_addr);
9049                 req.reg_addr = cpu_to_le16(reg);
9050         }
9051         req.reg_data = cpu_to_le16(val);
9052
9053         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9054 }
9055
9056 /* rtnl_lock held */
9057 static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9058 {
9059         struct mii_ioctl_data *mdio = if_mii(ifr);
9060         struct bnxt *bp = netdev_priv(dev);
9061         int rc;
9062
9063         switch (cmd) {
9064         case SIOCGMIIPHY:
9065                 mdio->phy_id = bp->link_info.phy_addr;
9066
9067                 /* fallthru */
9068         case SIOCGMIIREG: {
9069                 u16 mii_regval = 0;
9070
9071                 if (!netif_running(dev))
9072                         return -EAGAIN;
9073
9074                 rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num,
9075                                              &mii_regval);
9076                 mdio->val_out = mii_regval;
9077                 return rc;
9078         }
9079
9080         case SIOCSMIIREG:
9081                 if (!netif_running(dev))
9082                         return -EAGAIN;
9083
9084                 return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num,
9085                                                 mdio->val_in);
9086
9087         default:
9088                 /* do nothing */
9089                 break;
9090         }
9091         return -EOPNOTSUPP;
9092 }
9093
9094 static void bnxt_get_ring_stats(struct bnxt *bp,
9095                                 struct rtnl_link_stats64 *stats)
9096 {
9097         int i;
9098
9099
9100         for (i = 0; i < bp->cp_nr_rings; i++) {
9101                 struct bnxt_napi *bnapi = bp->bnapi[i];
9102                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
9103                 struct ctx_hw_stats *hw_stats = cpr->hw_stats;
9104
9105                 stats->rx_packets += le64_to_cpu(hw_stats->rx_ucast_pkts);
9106                 stats->rx_packets += le64_to_cpu(hw_stats->rx_mcast_pkts);
9107                 stats->rx_packets += le64_to_cpu(hw_stats->rx_bcast_pkts);
9108
9109                 stats->tx_packets += le64_to_cpu(hw_stats->tx_ucast_pkts);
9110                 stats->tx_packets += le64_to_cpu(hw_stats->tx_mcast_pkts);
9111                 stats->tx_packets += le64_to_cpu(hw_stats->tx_bcast_pkts);
9112
9113                 stats->rx_bytes += le64_to_cpu(hw_stats->rx_ucast_bytes);
9114                 stats->rx_bytes += le64_to_cpu(hw_stats->rx_mcast_bytes);
9115                 stats->rx_bytes += le64_to_cpu(hw_stats->rx_bcast_bytes);
9116
9117                 stats->tx_bytes += le64_to_cpu(hw_stats->tx_ucast_bytes);
9118                 stats->tx_bytes += le64_to_cpu(hw_stats->tx_mcast_bytes);
9119                 stats->tx_bytes += le64_to_cpu(hw_stats->tx_bcast_bytes);
9120
9121                 stats->rx_missed_errors +=
9122                         le64_to_cpu(hw_stats->rx_discard_pkts);
9123
9124                 stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts);
9125
9126                 stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts);
9127         }
9128 }
9129
9130 static void bnxt_add_prev_stats(struct bnxt *bp,
9131                                 struct rtnl_link_stats64 *stats)
9132 {
9133         struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev;
9134
9135         stats->rx_packets += prev_stats->rx_packets;
9136         stats->tx_packets += prev_stats->tx_packets;
9137         stats->rx_bytes += prev_stats->rx_bytes;
9138         stats->tx_bytes += prev_stats->tx_bytes;
9139         stats->rx_missed_errors += prev_stats->rx_missed_errors;
9140         stats->multicast += prev_stats->multicast;
9141         stats->tx_dropped += prev_stats->tx_dropped;
9142 }
9143
9144 static void
9145 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
9146 {
9147         struct bnxt *bp = netdev_priv(dev);
9148
9149         set_bit(BNXT_STATE_READ_STATS, &bp->state);
9150         /* Make sure bnxt_close_nic() sees that we are reading stats before
9151          * we check the BNXT_STATE_OPEN flag.
9152          */
9153         smp_mb__after_atomic();
9154         if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
9155                 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
9156                 *stats = bp->net_stats_prev;
9157                 return;
9158         }
9159
9160         bnxt_get_ring_stats(bp, stats);
9161         bnxt_add_prev_stats(bp, stats);
9162
9163         if (bp->flags & BNXT_FLAG_PORT_STATS) {
9164                 struct rx_port_stats *rx = bp->hw_rx_port_stats;
9165                 struct tx_port_stats *tx = bp->hw_tx_port_stats;
9166
9167                 stats->rx_crc_errors = le64_to_cpu(rx->rx_fcs_err_frames);
9168                 stats->rx_frame_errors = le64_to_cpu(rx->rx_align_err_frames);
9169                 stats->rx_length_errors = le64_to_cpu(rx->rx_undrsz_frames) +
9170                                           le64_to_cpu(rx->rx_ovrsz_frames) +
9171                                           le64_to_cpu(rx->rx_runt_frames);
9172                 stats->rx_errors = le64_to_cpu(rx->rx_false_carrier_frames) +
9173                                    le64_to_cpu(rx->rx_jbr_frames);
9174                 stats->collisions = le64_to_cpu(tx->tx_total_collisions);
9175                 stats->tx_fifo_errors = le64_to_cpu(tx->tx_fifo_underruns);
9176                 stats->tx_errors = le64_to_cpu(tx->tx_err);
9177         }
9178         clear_bit(BNXT_STATE_READ_STATS, &bp->state);
9179 }
9180
9181 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
9182 {
9183         struct net_device *dev = bp->dev;
9184         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
9185         struct netdev_hw_addr *ha;
9186         u8 *haddr;
9187         int mc_count = 0;
9188         bool update = false;
9189         int off = 0;
9190
9191         netdev_for_each_mc_addr(ha, dev) {
9192                 if (mc_count >= BNXT_MAX_MC_ADDRS) {
9193                         *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
9194                         vnic->mc_list_count = 0;
9195                         return false;
9196                 }
9197                 haddr = ha->addr;
9198                 if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
9199                         memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
9200                         update = true;
9201                 }
9202                 off += ETH_ALEN;
9203                 mc_count++;
9204         }
9205         if (mc_count)
9206                 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
9207
9208         if (mc_count != vnic->mc_list_count) {
9209                 vnic->mc_list_count = mc_count;
9210                 update = true;
9211         }
9212         return update;
9213 }
9214
9215 static bool bnxt_uc_list_updated(struct bnxt *bp)
9216 {
9217         struct net_device *dev = bp->dev;
9218         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
9219         struct netdev_hw_addr *ha;
9220         int off = 0;
9221
9222         if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
9223                 return true;
9224
9225         netdev_for_each_uc_addr(ha, dev) {
9226                 if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
9227                         return true;
9228
9229                 off += ETH_ALEN;
9230         }
9231         return false;
9232 }
9233
9234 static void bnxt_set_rx_mode(struct net_device *dev)
9235 {
9236         struct bnxt *bp = netdev_priv(dev);
9237         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
9238         u32 mask = vnic->rx_mask;
9239         bool mc_update = false;
9240         bool uc_update;
9241
9242         if (!netif_running(dev))
9243                 return;
9244
9245         mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
9246                   CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
9247                   CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST |
9248                   CFA_L2_SET_RX_MASK_REQ_MASK_BCAST);
9249
9250         if ((dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
9251                 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
9252
9253         uc_update = bnxt_uc_list_updated(bp);
9254
9255         if (dev->flags & IFF_BROADCAST)
9256                 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
9257         if (dev->flags & IFF_ALLMULTI) {
9258                 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
9259                 vnic->mc_list_count = 0;
9260         } else {
9261                 mc_update = bnxt_mc_list_updated(bp, &mask);
9262         }
9263
9264         if (mask != vnic->rx_mask || uc_update || mc_update) {
9265                 vnic->rx_mask = mask;
9266
9267                 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
9268                 bnxt_queue_sp_work(bp);
9269         }
9270 }
9271
9272 static int bnxt_cfg_rx_mode(struct bnxt *bp)
9273 {
9274         struct net_device *dev = bp->dev;
9275         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
9276         struct netdev_hw_addr *ha;
9277         int i, off = 0, rc;
9278         bool uc_update;
9279
9280         netif_addr_lock_bh(dev);
9281         uc_update = bnxt_uc_list_updated(bp);
9282         netif_addr_unlock_bh(dev);
9283
9284         if (!uc_update)
9285                 goto skip_uc;
9286
9287         mutex_lock(&bp->hwrm_cmd_lock);
9288         for (i = 1; i < vnic->uc_filter_count; i++) {
9289                 struct hwrm_cfa_l2_filter_free_input req = {0};
9290
9291                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1,
9292                                        -1);
9293
9294                 req.l2_filter_id = vnic->fw_l2_filter_id[i];
9295
9296                 rc = _hwrm_send_message(bp, &req, sizeof(req),
9297                                         HWRM_CMD_TIMEOUT);
9298         }
9299         mutex_unlock(&bp->hwrm_cmd_lock);
9300
9301         vnic->uc_filter_count = 1;
9302
9303         netif_addr_lock_bh(dev);
9304         if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
9305                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
9306         } else {
9307                 netdev_for_each_uc_addr(ha, dev) {
9308                         memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
9309                         off += ETH_ALEN;
9310                         vnic->uc_filter_count++;
9311                 }
9312         }
9313         netif_addr_unlock_bh(dev);
9314
9315         for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
9316                 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
9317                 if (rc) {
9318                         netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n",
9319                                    rc);
9320                         vnic->uc_filter_count = i;
9321                         return rc;
9322                 }
9323         }
9324
9325 skip_uc:
9326         rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
9327         if (rc && vnic->mc_list_count) {
9328                 netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
9329                             rc);
9330                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
9331                 vnic->mc_list_count = 0;
9332                 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
9333         }
9334         if (rc)
9335                 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
9336                            rc);
9337
9338         return rc;
9339 }
9340
9341 static bool bnxt_can_reserve_rings(struct bnxt *bp)
9342 {
9343 #ifdef CONFIG_BNXT_SRIOV
9344         if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) {
9345                 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
9346
9347                 /* No minimum rings were provisioned by the PF.  Don't
9348                  * reserve rings by default when device is down.
9349                  */
9350                 if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings)
9351                         return true;
9352
9353                 if (!netif_running(bp->dev))
9354                         return false;
9355         }
9356 #endif
9357         return true;
9358 }
9359
9360 /* If the chip and firmware supports RFS */
9361 static bool bnxt_rfs_supported(struct bnxt *bp)
9362 {
9363         if (bp->flags & BNXT_FLAG_CHIP_P5) {
9364                 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX)
9365                         return true;
9366                 return false;
9367         }
9368         if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
9369                 return true;
9370         if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
9371                 return true;
9372         return false;
9373 }
9374
9375 /* If runtime conditions support RFS */
9376 static bool bnxt_rfs_capable(struct bnxt *bp)
9377 {
9378 #ifdef CONFIG_RFS_ACCEL
9379         int vnics, max_vnics, max_rss_ctxs;
9380
9381         if (bp->flags & BNXT_FLAG_CHIP_P5)
9382                 return bnxt_rfs_supported(bp);
9383         if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp))
9384                 return false;
9385
9386         vnics = 1 + bp->rx_nr_rings;
9387         max_vnics = bnxt_get_max_func_vnics(bp);
9388         max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
9389
9390         /* RSS contexts not a limiting factor */
9391         if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
9392                 max_rss_ctxs = max_vnics;
9393         if (vnics > max_vnics || vnics > max_rss_ctxs) {
9394                 if (bp->rx_nr_rings > 1)
9395                         netdev_warn(bp->dev,
9396                                     "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
9397                                     min(max_rss_ctxs - 1, max_vnics - 1));
9398                 return false;
9399         }
9400
9401         if (!BNXT_NEW_RM(bp))
9402                 return true;
9403
9404         if (vnics == bp->hw_resc.resv_vnics)
9405                 return true;
9406
9407         bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, vnics);
9408         if (vnics <= bp->hw_resc.resv_vnics)
9409                 return true;
9410
9411         netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
9412         bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, 1);
9413         return false;
9414 #else
9415         return false;
9416 #endif
9417 }
9418
9419 static netdev_features_t bnxt_fix_features(struct net_device *dev,
9420                                            netdev_features_t features)
9421 {
9422         struct bnxt *bp = netdev_priv(dev);
9423
9424         if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
9425                 features &= ~NETIF_F_NTUPLE;
9426
9427         if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
9428                 features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
9429
9430         if (!(features & NETIF_F_GRO))
9431                 features &= ~NETIF_F_GRO_HW;
9432
9433         if (features & NETIF_F_GRO_HW)
9434                 features &= ~NETIF_F_LRO;
9435
9436         /* Both CTAG and STAG VLAN accelaration on the RX side have to be
9437          * turned on or off together.
9438          */
9439         if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) !=
9440             (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) {
9441                 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
9442                         features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
9443                                       NETIF_F_HW_VLAN_STAG_RX);
9444                 else
9445                         features |= NETIF_F_HW_VLAN_CTAG_RX |
9446                                     NETIF_F_HW_VLAN_STAG_RX;
9447         }
9448 #ifdef CONFIG_BNXT_SRIOV
9449         if (BNXT_VF(bp)) {
9450                 if (bp->vf.vlan) {
9451                         features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
9452                                       NETIF_F_HW_VLAN_STAG_RX);
9453                 }
9454         }
9455 #endif
9456         return features;
9457 }
9458
9459 static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
9460 {
9461         struct bnxt *bp = netdev_priv(dev);
9462         u32 flags = bp->flags;
9463         u32 changes;
9464         int rc = 0;
9465         bool re_init = false;
9466         bool update_tpa = false;
9467
9468         flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
9469         if (features & NETIF_F_GRO_HW)
9470                 flags |= BNXT_FLAG_GRO;
9471         else if (features & NETIF_F_LRO)
9472                 flags |= BNXT_FLAG_LRO;
9473
9474         if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
9475                 flags &= ~BNXT_FLAG_TPA;
9476
9477         if (features & NETIF_F_HW_VLAN_CTAG_RX)
9478                 flags |= BNXT_FLAG_STRIP_VLAN;
9479
9480         if (features & NETIF_F_NTUPLE)
9481                 flags |= BNXT_FLAG_RFS;
9482
9483         changes = flags ^ bp->flags;
9484         if (changes & BNXT_FLAG_TPA) {
9485                 update_tpa = true;
9486                 if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
9487                     (flags & BNXT_FLAG_TPA) == 0 ||
9488                     (bp->flags & BNXT_FLAG_CHIP_P5))
9489                         re_init = true;
9490         }
9491
9492         if (changes & ~BNXT_FLAG_TPA)
9493                 re_init = true;
9494
9495         if (flags != bp->flags) {
9496                 u32 old_flags = bp->flags;
9497
9498                 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
9499                         bp->flags = flags;
9500                         if (update_tpa)
9501                                 bnxt_set_ring_params(bp);
9502                         return rc;
9503                 }
9504
9505                 if (re_init) {
9506                         bnxt_close_nic(bp, false, false);
9507                         bp->flags = flags;
9508                         if (update_tpa)
9509                                 bnxt_set_ring_params(bp);
9510
9511                         return bnxt_open_nic(bp, false, false);
9512                 }
9513                 if (update_tpa) {
9514                         bp->flags = flags;
9515                         rc = bnxt_set_tpa(bp,
9516                                           (flags & BNXT_FLAG_TPA) ?
9517                                           true : false);
9518                         if (rc)
9519                                 bp->flags = old_flags;
9520                 }
9521         }
9522         return rc;
9523 }
9524
9525 static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
9526                                        u32 ring_id, u32 *prod, u32 *cons)
9527 {
9528         struct hwrm_dbg_ring_info_get_output *resp = bp->hwrm_cmd_resp_addr;
9529         struct hwrm_dbg_ring_info_get_input req = {0};
9530         int rc;
9531
9532         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_RING_INFO_GET, -1, -1);
9533         req.ring_type = ring_type;
9534         req.fw_ring_id = cpu_to_le32(ring_id);
9535         mutex_lock(&bp->hwrm_cmd_lock);
9536         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9537         if (!rc) {
9538                 *prod = le32_to_cpu(resp->producer_index);
9539                 *cons = le32_to_cpu(resp->consumer_index);
9540         }
9541         mutex_unlock(&bp->hwrm_cmd_lock);
9542         return rc;
9543 }
9544
9545 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
9546 {
9547         struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
9548         int i = bnapi->index;
9549
9550         if (!txr)
9551                 return;
9552
9553         netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
9554                     i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
9555                     txr->tx_cons);
9556 }
9557
9558 static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
9559 {
9560         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
9561         int i = bnapi->index;
9562
9563         if (!rxr)
9564                 return;
9565
9566         netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
9567                     i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
9568                     rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
9569                     rxr->rx_sw_agg_prod);
9570 }
9571
9572 static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
9573 {
9574         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
9575         int i = bnapi->index;
9576
9577         netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
9578                     i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
9579 }
9580
9581 static void bnxt_dbg_dump_states(struct bnxt *bp)
9582 {
9583         int i;
9584         struct bnxt_napi *bnapi;
9585
9586         for (i = 0; i < bp->cp_nr_rings; i++) {
9587                 bnapi = bp->bnapi[i];
9588                 if (netif_msg_drv(bp)) {
9589                         bnxt_dump_tx_sw_state(bnapi);
9590                         bnxt_dump_rx_sw_state(bnapi);
9591                         bnxt_dump_cp_sw_state(bnapi);
9592                 }
9593         }
9594 }
9595
9596 static void bnxt_reset_task(struct bnxt *bp, bool silent)
9597 {
9598         if (!silent)
9599                 bnxt_dbg_dump_states(bp);
9600         if (netif_running(bp->dev)) {
9601                 int rc;
9602
9603                 if (!silent)
9604                         bnxt_ulp_stop(bp);
9605                 bnxt_close_nic(bp, false, false);
9606                 rc = bnxt_open_nic(bp, false, false);
9607                 if (!silent && !rc)
9608                         bnxt_ulp_start(bp);
9609         }
9610 }
9611
9612 static void bnxt_tx_timeout(struct net_device *dev)
9613 {
9614         struct bnxt *bp = netdev_priv(dev);
9615
9616         netdev_err(bp->dev,  "TX timeout detected, starting reset task!\n");
9617         set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
9618         bnxt_queue_sp_work(bp);
9619 }
9620
9621 static void bnxt_timer(struct timer_list *t)
9622 {
9623         struct bnxt *bp = from_timer(bp, t, timer);
9624         struct net_device *dev = bp->dev;
9625
9626         if (!netif_running(dev))
9627                 return;
9628
9629         if (atomic_read(&bp->intr_sem) != 0)
9630                 goto bnxt_restart_timer;
9631
9632         if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS) &&
9633             bp->stats_coal_ticks) {
9634                 set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
9635                 bnxt_queue_sp_work(bp);
9636         }
9637
9638         if (bnxt_tc_flower_enabled(bp)) {
9639                 set_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event);
9640                 bnxt_queue_sp_work(bp);
9641         }
9642
9643         if (bp->link_info.phy_retry) {
9644                 if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
9645                         bp->link_info.phy_retry = 0;
9646                         netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
9647                 } else {
9648                         set_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event);
9649                         bnxt_queue_sp_work(bp);
9650                 }
9651         }
9652
9653         if ((bp->flags & BNXT_FLAG_CHIP_P5) && netif_carrier_ok(dev)) {
9654                 set_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event);
9655                 bnxt_queue_sp_work(bp);
9656         }
9657 bnxt_restart_timer:
9658         mod_timer(&bp->timer, jiffies + bp->current_interval);
9659 }
9660
9661 static void bnxt_rtnl_lock_sp(struct bnxt *bp)
9662 {
9663         /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
9664          * set.  If the device is being closed, bnxt_close() may be holding
9665          * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear.  So we
9666          * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
9667          */
9668         clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
9669         rtnl_lock();
9670 }
9671
9672 static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
9673 {
9674         set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
9675         rtnl_unlock();
9676 }
9677
9678 /* Only called from bnxt_sp_task() */
9679 static void bnxt_reset(struct bnxt *bp, bool silent)
9680 {
9681         bnxt_rtnl_lock_sp(bp);
9682         if (test_bit(BNXT_STATE_OPEN, &bp->state))
9683                 bnxt_reset_task(bp, silent);
9684         bnxt_rtnl_unlock_sp(bp);
9685 }
9686
9687 static void bnxt_chk_missed_irq(struct bnxt *bp)
9688 {
9689         int i;
9690
9691         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
9692                 return;
9693
9694         for (i = 0; i < bp->cp_nr_rings; i++) {
9695                 struct bnxt_napi *bnapi = bp->bnapi[i];
9696                 struct bnxt_cp_ring_info *cpr;
9697                 u32 fw_ring_id;
9698                 int j;
9699
9700                 if (!bnapi)
9701                         continue;
9702
9703                 cpr = &bnapi->cp_ring;
9704                 for (j = 0; j < 2; j++) {
9705                         struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
9706                         u32 val[2];
9707
9708                         if (!cpr2 || cpr2->has_more_work ||
9709                             !bnxt_has_work(bp, cpr2))
9710                                 continue;
9711
9712                         if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) {
9713                                 cpr2->last_cp_raw_cons = cpr2->cp_raw_cons;
9714                                 continue;
9715                         }
9716                         fw_ring_id = cpr2->cp_ring_struct.fw_ring_id;
9717                         bnxt_dbg_hwrm_ring_info_get(bp,
9718                                 DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
9719                                 fw_ring_id, &val[0], &val[1]);
9720                         cpr->missed_irqs++;
9721                 }
9722         }
9723 }
9724
9725 static void bnxt_cfg_ntp_filters(struct bnxt *);
9726
9727 static void bnxt_sp_task(struct work_struct *work)
9728 {
9729         struct bnxt *bp = container_of(work, struct bnxt, sp_task);
9730
9731         set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
9732         smp_mb__after_atomic();
9733         if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
9734                 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
9735                 return;
9736         }
9737
9738         if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
9739                 bnxt_cfg_rx_mode(bp);
9740
9741         if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
9742                 bnxt_cfg_ntp_filters(bp);
9743         if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
9744                 bnxt_hwrm_exec_fwd_req(bp);
9745         if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) {
9746                 bnxt_hwrm_tunnel_dst_port_alloc(
9747                         bp, bp->vxlan_port,
9748                         TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
9749         }
9750         if (test_and_clear_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event)) {
9751                 bnxt_hwrm_tunnel_dst_port_free(
9752                         bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
9753         }
9754         if (test_and_clear_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event)) {
9755                 bnxt_hwrm_tunnel_dst_port_alloc(
9756                         bp, bp->nge_port,
9757                         TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
9758         }
9759         if (test_and_clear_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event)) {
9760                 bnxt_hwrm_tunnel_dst_port_free(
9761                         bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
9762         }
9763         if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
9764                 bnxt_hwrm_port_qstats(bp);
9765                 bnxt_hwrm_port_qstats_ext(bp);
9766                 bnxt_hwrm_pcie_qstats(bp);
9767         }
9768
9769         if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
9770                 int rc;
9771
9772                 mutex_lock(&bp->link_lock);
9773                 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
9774                                        &bp->sp_event))
9775                         bnxt_hwrm_phy_qcaps(bp);
9776
9777                 rc = bnxt_update_link(bp, true);
9778                 mutex_unlock(&bp->link_lock);
9779                 if (rc)
9780                         netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
9781                                    rc);
9782         }
9783         if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) {
9784                 int rc;
9785
9786                 mutex_lock(&bp->link_lock);
9787                 rc = bnxt_update_phy_setting(bp);
9788                 mutex_unlock(&bp->link_lock);
9789                 if (rc) {
9790                         netdev_warn(bp->dev, "update phy settings retry failed\n");
9791                 } else {
9792                         bp->link_info.phy_retry = false;
9793                         netdev_info(bp->dev, "update phy settings retry succeeded\n");
9794                 }
9795         }
9796         if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
9797                 mutex_lock(&bp->link_lock);
9798                 bnxt_get_port_module_status(bp);
9799                 mutex_unlock(&bp->link_lock);
9800         }
9801
9802         if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event))
9803                 bnxt_tc_flow_stats_work(bp);
9804
9805         if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event))
9806                 bnxt_chk_missed_irq(bp);
9807
9808         /* These functions below will clear BNXT_STATE_IN_SP_TASK.  They
9809          * must be the last functions to be called before exiting.
9810          */
9811         if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
9812                 bnxt_reset(bp, false);
9813
9814         if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
9815                 bnxt_reset(bp, true);
9816
9817         smp_mb__before_atomic();
9818         clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
9819 }
9820
9821 /* Under rtnl_lock */
9822 int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
9823                      int tx_xdp)
9824 {
9825         int max_rx, max_tx, tx_sets = 1;
9826         int tx_rings_needed, stats;
9827         int rx_rings = rx;
9828         int cp, vnics, rc;
9829
9830         if (tcs)
9831                 tx_sets = tcs;
9832
9833         rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh);
9834         if (rc)
9835                 return rc;
9836
9837         if (max_rx < rx)
9838                 return -ENOMEM;
9839
9840         tx_rings_needed = tx * tx_sets + tx_xdp;
9841         if (max_tx < tx_rings_needed)
9842                 return -ENOMEM;
9843
9844         vnics = 1;
9845         if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
9846                 vnics += rx_rings;
9847
9848         if (bp->flags & BNXT_FLAG_AGG_RINGS)
9849                 rx_rings <<= 1;
9850         cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx;
9851         stats = cp;
9852         if (BNXT_NEW_RM(bp)) {
9853                 cp += bnxt_get_ulp_msix_num(bp);
9854                 stats += bnxt_get_ulp_stat_ctxs(bp);
9855         }
9856         return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp,
9857                                      stats, vnics);
9858 }
9859
9860 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
9861 {
9862         if (bp->bar2) {
9863                 pci_iounmap(pdev, bp->bar2);
9864                 bp->bar2 = NULL;
9865         }
9866
9867         if (bp->bar1) {
9868                 pci_iounmap(pdev, bp->bar1);
9869                 bp->bar1 = NULL;
9870         }
9871
9872         if (bp->bar0) {
9873                 pci_iounmap(pdev, bp->bar0);
9874                 bp->bar0 = NULL;
9875         }
9876 }
9877
9878 static void bnxt_cleanup_pci(struct bnxt *bp)
9879 {
9880         bnxt_unmap_bars(bp, bp->pdev);
9881         pci_release_regions(bp->pdev);
9882         pci_disable_device(bp->pdev);
9883 }
9884
9885 static void bnxt_init_dflt_coal(struct bnxt *bp)
9886 {
9887         struct bnxt_coal *coal;
9888
9889         /* Tick values in micro seconds.
9890          * 1 coal_buf x bufs_per_record = 1 completion record.
9891          */
9892         coal = &bp->rx_coal;
9893         coal->coal_ticks = 10;
9894         coal->coal_bufs = 30;
9895         coal->coal_ticks_irq = 1;
9896         coal->coal_bufs_irq = 2;
9897         coal->idle_thresh = 50;
9898         coal->bufs_per_record = 2;
9899         coal->budget = 64;              /* NAPI budget */
9900
9901         coal = &bp->tx_coal;
9902         coal->coal_ticks = 28;
9903         coal->coal_bufs = 30;
9904         coal->coal_ticks_irq = 2;
9905         coal->coal_bufs_irq = 2;
9906         coal->bufs_per_record = 1;
9907
9908         bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
9909 }
9910
9911 static int bnxt_fw_init_one_p1(struct bnxt *bp)
9912 {
9913         int rc;
9914
9915         bp->fw_cap = 0;
9916         rc = bnxt_hwrm_ver_get(bp);
9917         if (rc)
9918                 return rc;
9919
9920         if (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL) {
9921                 rc = bnxt_alloc_kong_hwrm_resources(bp);
9922                 if (rc)
9923                         bp->fw_cap &= ~BNXT_FW_CAP_KONG_MB_CHNL;
9924         }
9925
9926         if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
9927             bp->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) {
9928                 rc = bnxt_alloc_hwrm_short_cmd_req(bp);
9929                 if (rc)
9930                         return rc;
9931         }
9932         rc = bnxt_hwrm_func_reset(bp);
9933         if (rc)
9934                 return -ENODEV;
9935
9936         bnxt_hwrm_fw_set_time(bp);
9937         return 0;
9938 }
9939
9940 static int bnxt_fw_init_one_p2(struct bnxt *bp)
9941 {
9942         int rc;
9943
9944         /* Get the MAX capabilities for this function */
9945         rc = bnxt_hwrm_func_qcaps(bp);
9946         if (rc) {
9947                 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
9948                            rc);
9949                 return -ENODEV;
9950         }
9951
9952         rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp);
9953         if (rc)
9954                 netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
9955                             rc);
9956
9957         rc = bnxt_hwrm_func_drv_rgtr(bp);
9958         if (rc)
9959                 return -ENODEV;
9960
9961         rc = bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0);
9962         if (rc)
9963                 return -ENODEV;
9964
9965         bnxt_hwrm_func_qcfg(bp);
9966         bnxt_hwrm_vnic_qcaps(bp);
9967         bnxt_hwrm_port_led_qcaps(bp);
9968         bnxt_ethtool_init(bp);
9969         bnxt_dcb_init(bp);
9970         return 0;
9971 }
9972
9973 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
9974 {
9975         int rc;
9976         struct bnxt *bp = netdev_priv(dev);
9977
9978         SET_NETDEV_DEV(dev, &pdev->dev);
9979
9980         /* enable device (incl. PCI PM wakeup), and bus-mastering */
9981         rc = pci_enable_device(pdev);
9982         if (rc) {
9983                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
9984                 goto init_err;
9985         }
9986
9987         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9988                 dev_err(&pdev->dev,
9989                         "Cannot find PCI device base address, aborting\n");
9990                 rc = -ENODEV;
9991                 goto init_err_disable;
9992         }
9993
9994         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
9995         if (rc) {
9996                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
9997                 goto init_err_disable;
9998         }
9999
10000         if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
10001             dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
10002                 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
10003                 goto init_err_disable;
10004         }
10005
10006         pci_set_master(pdev);
10007
10008         bp->dev = dev;
10009         bp->pdev = pdev;
10010
10011         bp->bar0 = pci_ioremap_bar(pdev, 0);
10012         if (!bp->bar0) {
10013                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
10014                 rc = -ENOMEM;
10015                 goto init_err_release;
10016         }
10017
10018         bp->bar1 = pci_ioremap_bar(pdev, 2);
10019         if (!bp->bar1) {
10020                 dev_err(&pdev->dev, "Cannot map doorbell registers, aborting\n");
10021                 rc = -ENOMEM;
10022                 goto init_err_release;
10023         }
10024
10025         bp->bar2 = pci_ioremap_bar(pdev, 4);
10026         if (!bp->bar2) {
10027                 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
10028                 rc = -ENOMEM;
10029                 goto init_err_release;
10030         }
10031
10032         pci_enable_pcie_error_reporting(pdev);
10033
10034         INIT_WORK(&bp->sp_task, bnxt_sp_task);
10035
10036         spin_lock_init(&bp->ntp_fltr_lock);
10037 #if BITS_PER_LONG == 32
10038         spin_lock_init(&bp->db_lock);
10039 #endif
10040
10041         bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
10042         bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
10043
10044         bnxt_init_dflt_coal(bp);
10045
10046         timer_setup(&bp->timer, bnxt_timer, 0);
10047         bp->current_interval = BNXT_TIMER_INTERVAL;
10048
10049         clear_bit(BNXT_STATE_OPEN, &bp->state);
10050         return 0;
10051
10052 init_err_release:
10053         bnxt_unmap_bars(bp, pdev);
10054         pci_release_regions(pdev);
10055
10056 init_err_disable:
10057         pci_disable_device(pdev);
10058
10059 init_err:
10060         return rc;
10061 }
10062
10063 /* rtnl_lock held */
10064 static int bnxt_change_mac_addr(struct net_device *dev, void *p)
10065 {
10066         struct sockaddr *addr = p;
10067         struct bnxt *bp = netdev_priv(dev);
10068         int rc = 0;
10069
10070         if (!is_valid_ether_addr(addr->sa_data))
10071                 return -EADDRNOTAVAIL;
10072
10073         if (ether_addr_equal(addr->sa_data, dev->dev_addr))
10074                 return 0;
10075
10076         rc = bnxt_approve_mac(bp, addr->sa_data, true);
10077         if (rc)
10078                 return rc;
10079
10080         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
10081         if (netif_running(dev)) {
10082                 bnxt_close_nic(bp, false, false);
10083                 rc = bnxt_open_nic(bp, false, false);
10084         }
10085
10086         return rc;
10087 }
10088
10089 /* rtnl_lock held */
10090 static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
10091 {
10092         struct bnxt *bp = netdev_priv(dev);
10093
10094         if (netif_running(dev))
10095                 bnxt_close_nic(bp, false, false);
10096
10097         dev->mtu = new_mtu;
10098         bnxt_set_ring_params(bp);
10099
10100         if (netif_running(dev))
10101                 return bnxt_open_nic(bp, false, false);
10102
10103         return 0;
10104 }
10105
10106 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
10107 {
10108         struct bnxt *bp = netdev_priv(dev);
10109         bool sh = false;
10110         int rc;
10111
10112         if (tc > bp->max_tc) {
10113                 netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
10114                            tc, bp->max_tc);
10115                 return -EINVAL;
10116         }
10117
10118         if (netdev_get_num_tc(dev) == tc)
10119                 return 0;
10120
10121         if (bp->flags & BNXT_FLAG_SHARED_RINGS)
10122                 sh = true;
10123
10124         rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
10125                               sh, tc, bp->tx_nr_rings_xdp);
10126         if (rc)
10127                 return rc;
10128
10129         /* Needs to close the device and do hw resource re-allocations */
10130         if (netif_running(bp->dev))
10131                 bnxt_close_nic(bp, true, false);
10132
10133         if (tc) {
10134                 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
10135                 netdev_set_num_tc(dev, tc);
10136         } else {
10137                 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
10138                 netdev_reset_tc(dev);
10139         }
10140         bp->tx_nr_rings += bp->tx_nr_rings_xdp;
10141         bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
10142                                bp->tx_nr_rings + bp->rx_nr_rings;
10143
10144         if (netif_running(bp->dev))
10145                 return bnxt_open_nic(bp, true, false);
10146
10147         return 0;
10148 }
10149
10150 static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
10151                                   void *cb_priv)
10152 {
10153         struct bnxt *bp = cb_priv;
10154
10155         if (!bnxt_tc_flower_enabled(bp) ||
10156             !tc_cls_can_offload_and_chain0(bp->dev, type_data))
10157                 return -EOPNOTSUPP;
10158
10159         switch (type) {
10160         case TC_SETUP_CLSFLOWER:
10161                 return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data);
10162         default:
10163                 return -EOPNOTSUPP;
10164         }
10165 }
10166
10167 static LIST_HEAD(bnxt_block_cb_list);
10168
10169 static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
10170                          void *type_data)
10171 {
10172         struct bnxt *bp = netdev_priv(dev);
10173
10174         switch (type) {
10175         case TC_SETUP_BLOCK:
10176                 return flow_block_cb_setup_simple(type_data,
10177                                                   &bnxt_block_cb_list,
10178                                                   bnxt_setup_tc_block_cb,
10179                                                   bp, bp, true);
10180         case TC_SETUP_QDISC_MQPRIO: {
10181                 struct tc_mqprio_qopt *mqprio = type_data;
10182
10183                 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
10184
10185                 return bnxt_setup_mq_tc(dev, mqprio->num_tc);
10186         }
10187         default:
10188                 return -EOPNOTSUPP;
10189         }
10190 }
10191
10192 #ifdef CONFIG_RFS_ACCEL
10193 static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
10194                             struct bnxt_ntuple_filter *f2)
10195 {
10196         struct flow_keys *keys1 = &f1->fkeys;
10197         struct flow_keys *keys2 = &f2->fkeys;
10198
10199         if (keys1->addrs.v4addrs.src == keys2->addrs.v4addrs.src &&
10200             keys1->addrs.v4addrs.dst == keys2->addrs.v4addrs.dst &&
10201             keys1->ports.ports == keys2->ports.ports &&
10202             keys1->basic.ip_proto == keys2->basic.ip_proto &&
10203             keys1->basic.n_proto == keys2->basic.n_proto &&
10204             keys1->control.flags == keys2->control.flags &&
10205             ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) &&
10206             ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr))
10207                 return true;
10208
10209         return false;
10210 }
10211
10212 static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
10213                               u16 rxq_index, u32 flow_id)
10214 {
10215         struct bnxt *bp = netdev_priv(dev);
10216         struct bnxt_ntuple_filter *fltr, *new_fltr;
10217         struct flow_keys *fkeys;
10218         struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
10219         int rc = 0, idx, bit_id, l2_idx = 0;
10220         struct hlist_head *head;
10221
10222         if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) {
10223                 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
10224                 int off = 0, j;
10225
10226                 netif_addr_lock_bh(dev);
10227                 for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) {
10228                         if (ether_addr_equal(eth->h_dest,
10229                                              vnic->uc_list + off)) {
10230                                 l2_idx = j + 1;
10231                                 break;
10232                         }
10233                 }
10234                 netif_addr_unlock_bh(dev);
10235                 if (!l2_idx)
10236                         return -EINVAL;
10237         }
10238         new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
10239         if (!new_fltr)
10240                 return -ENOMEM;
10241
10242         fkeys = &new_fltr->fkeys;
10243         if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
10244                 rc = -EPROTONOSUPPORT;
10245                 goto err_free;
10246         }
10247
10248         if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
10249              fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
10250             ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
10251              (fkeys->basic.ip_proto != IPPROTO_UDP))) {
10252                 rc = -EPROTONOSUPPORT;
10253                 goto err_free;
10254         }
10255         if (fkeys->basic.n_proto == htons(ETH_P_IPV6) &&
10256             bp->hwrm_spec_code < 0x10601) {
10257                 rc = -EPROTONOSUPPORT;
10258                 goto err_free;
10259         }
10260         if ((fkeys->control.flags & FLOW_DIS_ENCAPSULATION) &&
10261             bp->hwrm_spec_code < 0x10601) {
10262                 rc = -EPROTONOSUPPORT;
10263                 goto err_free;
10264         }
10265
10266         memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN);
10267         memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
10268
10269         idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
10270         head = &bp->ntp_fltr_hash_tbl[idx];
10271         rcu_read_lock();
10272         hlist_for_each_entry_rcu(fltr, head, hash) {
10273                 if (bnxt_fltr_match(fltr, new_fltr)) {
10274                         rcu_read_unlock();
10275                         rc = 0;
10276                         goto err_free;
10277                 }
10278         }
10279         rcu_read_unlock();
10280
10281         spin_lock_bh(&bp->ntp_fltr_lock);
10282         bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
10283                                          BNXT_NTP_FLTR_MAX_FLTR, 0);
10284         if (bit_id < 0) {
10285                 spin_unlock_bh(&bp->ntp_fltr_lock);
10286                 rc = -ENOMEM;
10287                 goto err_free;
10288         }
10289
10290         new_fltr->sw_id = (u16)bit_id;
10291         new_fltr->flow_id = flow_id;
10292         new_fltr->l2_fltr_idx = l2_idx;
10293         new_fltr->rxq = rxq_index;
10294         hlist_add_head_rcu(&new_fltr->hash, head);
10295         bp->ntp_fltr_count++;
10296         spin_unlock_bh(&bp->ntp_fltr_lock);
10297
10298         set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
10299         bnxt_queue_sp_work(bp);
10300
10301         return new_fltr->sw_id;
10302
10303 err_free:
10304         kfree(new_fltr);
10305         return rc;
10306 }
10307
10308 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
10309 {
10310         int i;
10311
10312         for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
10313                 struct hlist_head *head;
10314                 struct hlist_node *tmp;
10315                 struct bnxt_ntuple_filter *fltr;
10316                 int rc;
10317
10318                 head = &bp->ntp_fltr_hash_tbl[i];
10319                 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
10320                         bool del = false;
10321
10322                         if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
10323                                 if (rps_may_expire_flow(bp->dev, fltr->rxq,
10324                                                         fltr->flow_id,
10325                                                         fltr->sw_id)) {
10326                                         bnxt_hwrm_cfa_ntuple_filter_free(bp,
10327                                                                          fltr);
10328                                         del = true;
10329                                 }
10330                         } else {
10331                                 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
10332                                                                        fltr);
10333                                 if (rc)
10334                                         del = true;
10335                                 else
10336                                         set_bit(BNXT_FLTR_VALID, &fltr->state);
10337                         }
10338
10339                         if (del) {
10340                                 spin_lock_bh(&bp->ntp_fltr_lock);
10341                                 hlist_del_rcu(&fltr->hash);
10342                                 bp->ntp_fltr_count--;
10343                                 spin_unlock_bh(&bp->ntp_fltr_lock);
10344                                 synchronize_rcu();
10345                                 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
10346                                 kfree(fltr);
10347                         }
10348                 }
10349         }
10350         if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
10351                 netdev_info(bp->dev, "Receive PF driver unload event!");
10352 }
10353
10354 #else
10355
10356 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
10357 {
10358 }
10359
10360 #endif /* CONFIG_RFS_ACCEL */
10361
10362 static void bnxt_udp_tunnel_add(struct net_device *dev,
10363                                 struct udp_tunnel_info *ti)
10364 {
10365         struct bnxt *bp = netdev_priv(dev);
10366
10367         if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
10368                 return;
10369
10370         if (!netif_running(dev))
10371                 return;
10372
10373         switch (ti->type) {
10374         case UDP_TUNNEL_TYPE_VXLAN:
10375                 if (bp->vxlan_port_cnt && bp->vxlan_port != ti->port)
10376                         return;
10377
10378                 bp->vxlan_port_cnt++;
10379                 if (bp->vxlan_port_cnt == 1) {
10380                         bp->vxlan_port = ti->port;
10381                         set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event);
10382                         bnxt_queue_sp_work(bp);
10383                 }
10384                 break;
10385         case UDP_TUNNEL_TYPE_GENEVE:
10386                 if (bp->nge_port_cnt && bp->nge_port != ti->port)
10387                         return;
10388
10389                 bp->nge_port_cnt++;
10390                 if (bp->nge_port_cnt == 1) {
10391                         bp->nge_port = ti->port;
10392                         set_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event);
10393                 }
10394                 break;
10395         default:
10396                 return;
10397         }
10398
10399         bnxt_queue_sp_work(bp);
10400 }
10401
10402 static void bnxt_udp_tunnel_del(struct net_device *dev,
10403                                 struct udp_tunnel_info *ti)
10404 {
10405         struct bnxt *bp = netdev_priv(dev);
10406
10407         if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
10408                 return;
10409
10410         if (!netif_running(dev))
10411                 return;
10412
10413         switch (ti->type) {
10414         case UDP_TUNNEL_TYPE_VXLAN:
10415                 if (!bp->vxlan_port_cnt || bp->vxlan_port != ti->port)
10416                         return;
10417                 bp->vxlan_port_cnt--;
10418
10419                 if (bp->vxlan_port_cnt != 0)
10420                         return;
10421
10422                 set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event);
10423                 break;
10424         case UDP_TUNNEL_TYPE_GENEVE:
10425                 if (!bp->nge_port_cnt || bp->nge_port != ti->port)
10426                         return;
10427                 bp->nge_port_cnt--;
10428
10429                 if (bp->nge_port_cnt != 0)
10430                         return;
10431
10432                 set_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event);
10433                 break;
10434         default:
10435                 return;
10436         }
10437
10438         bnxt_queue_sp_work(bp);
10439 }
10440
10441 static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
10442                                struct net_device *dev, u32 filter_mask,
10443                                int nlflags)
10444 {
10445         struct bnxt *bp = netdev_priv(dev);
10446
10447         return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0,
10448                                        nlflags, filter_mask, NULL);
10449 }
10450
10451 static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
10452                                u16 flags, struct netlink_ext_ack *extack)
10453 {
10454         struct bnxt *bp = netdev_priv(dev);
10455         struct nlattr *attr, *br_spec;
10456         int rem, rc = 0;
10457
10458         if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp))
10459                 return -EOPNOTSUPP;
10460
10461         br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
10462         if (!br_spec)
10463                 return -EINVAL;
10464
10465         nla_for_each_nested(attr, br_spec, rem) {
10466                 u16 mode;
10467
10468                 if (nla_type(attr) != IFLA_BRIDGE_MODE)
10469                         continue;
10470
10471                 if (nla_len(attr) < sizeof(mode))
10472                         return -EINVAL;
10473
10474                 mode = nla_get_u16(attr);
10475                 if (mode == bp->br_mode)
10476                         break;
10477
10478                 rc = bnxt_hwrm_set_br_mode(bp, mode);
10479                 if (!rc)
10480                         bp->br_mode = mode;
10481                 break;
10482         }
10483         return rc;
10484 }
10485
10486 int bnxt_get_port_parent_id(struct net_device *dev,
10487                             struct netdev_phys_item_id *ppid)
10488 {
10489         struct bnxt *bp = netdev_priv(dev);
10490
10491         if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
10492                 return -EOPNOTSUPP;
10493
10494         /* The PF and it's VF-reps only support the switchdev framework */
10495         if (!BNXT_PF(bp))
10496                 return -EOPNOTSUPP;
10497
10498         ppid->id_len = sizeof(bp->switch_id);
10499         memcpy(ppid->id, bp->switch_id, ppid->id_len);
10500
10501         return 0;
10502 }
10503
10504 static struct devlink_port *bnxt_get_devlink_port(struct net_device *dev)
10505 {
10506         struct bnxt *bp = netdev_priv(dev);
10507
10508         return &bp->dl_port;
10509 }
10510
10511 static const struct net_device_ops bnxt_netdev_ops = {
10512         .ndo_open               = bnxt_open,
10513         .ndo_start_xmit         = bnxt_start_xmit,
10514         .ndo_stop               = bnxt_close,
10515         .ndo_get_stats64        = bnxt_get_stats64,
10516         .ndo_set_rx_mode        = bnxt_set_rx_mode,
10517         .ndo_do_ioctl           = bnxt_ioctl,
10518         .ndo_validate_addr      = eth_validate_addr,
10519         .ndo_set_mac_address    = bnxt_change_mac_addr,
10520         .ndo_change_mtu         = bnxt_change_mtu,
10521         .ndo_fix_features       = bnxt_fix_features,
10522         .ndo_set_features       = bnxt_set_features,
10523         .ndo_tx_timeout         = bnxt_tx_timeout,
10524 #ifdef CONFIG_BNXT_SRIOV
10525         .ndo_get_vf_config      = bnxt_get_vf_config,
10526         .ndo_set_vf_mac         = bnxt_set_vf_mac,
10527         .ndo_set_vf_vlan        = bnxt_set_vf_vlan,
10528         .ndo_set_vf_rate        = bnxt_set_vf_bw,
10529         .ndo_set_vf_link_state  = bnxt_set_vf_link_state,
10530         .ndo_set_vf_spoofchk    = bnxt_set_vf_spoofchk,
10531         .ndo_set_vf_trust       = bnxt_set_vf_trust,
10532 #endif
10533         .ndo_setup_tc           = bnxt_setup_tc,
10534 #ifdef CONFIG_RFS_ACCEL
10535         .ndo_rx_flow_steer      = bnxt_rx_flow_steer,
10536 #endif
10537         .ndo_udp_tunnel_add     = bnxt_udp_tunnel_add,
10538         .ndo_udp_tunnel_del     = bnxt_udp_tunnel_del,
10539         .ndo_bpf                = bnxt_xdp,
10540         .ndo_xdp_xmit           = bnxt_xdp_xmit,
10541         .ndo_bridge_getlink     = bnxt_bridge_getlink,
10542         .ndo_bridge_setlink     = bnxt_bridge_setlink,
10543         .ndo_get_devlink_port   = bnxt_get_devlink_port,
10544 };
10545
10546 static void bnxt_remove_one(struct pci_dev *pdev)
10547 {
10548         struct net_device *dev = pci_get_drvdata(pdev);
10549         struct bnxt *bp = netdev_priv(dev);
10550
10551         if (BNXT_PF(bp)) {
10552                 bnxt_sriov_disable(bp);
10553                 bnxt_dl_unregister(bp);
10554         }
10555
10556         pci_disable_pcie_error_reporting(pdev);
10557         unregister_netdev(dev);
10558         bnxt_shutdown_tc(bp);
10559         bnxt_cancel_sp_work(bp);
10560         bp->sp_event = 0;
10561
10562         bnxt_clear_int_mode(bp);
10563         bnxt_hwrm_func_drv_unrgtr(bp);
10564         bnxt_free_hwrm_resources(bp);
10565         bnxt_free_hwrm_short_cmd_req(bp);
10566         bnxt_ethtool_free(bp);
10567         bnxt_dcb_free(bp);
10568         kfree(bp->edev);
10569         bp->edev = NULL;
10570         bnxt_cleanup_pci(bp);
10571         bnxt_free_ctx_mem(bp);
10572         kfree(bp->ctx);
10573         bp->ctx = NULL;
10574         bnxt_free_port_stats(bp);
10575         free_netdev(dev);
10576 }
10577
10578 static int bnxt_probe_phy(struct bnxt *bp)
10579 {
10580         int rc = 0;
10581         struct bnxt_link_info *link_info = &bp->link_info;
10582
10583         rc = bnxt_hwrm_phy_qcaps(bp);
10584         if (rc) {
10585                 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
10586                            rc);
10587                 return rc;
10588         }
10589         mutex_init(&bp->link_lock);
10590
10591         rc = bnxt_update_link(bp, false);
10592         if (rc) {
10593                 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
10594                            rc);
10595                 return rc;
10596         }
10597
10598         /* Older firmware does not have supported_auto_speeds, so assume
10599          * that all supported speeds can be autonegotiated.
10600          */
10601         if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
10602                 link_info->support_auto_speeds = link_info->support_speeds;
10603
10604         /*initialize the ethool setting copy with NVM settings */
10605         if (BNXT_AUTO_MODE(link_info->auto_mode)) {
10606                 link_info->autoneg = BNXT_AUTONEG_SPEED;
10607                 if (bp->hwrm_spec_code >= 0x10201) {
10608                         if (link_info->auto_pause_setting &
10609                             PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
10610                                 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
10611                 } else {
10612                         link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
10613                 }
10614                 link_info->advertising = link_info->auto_link_speeds;
10615         } else {
10616                 link_info->req_link_speed = link_info->force_link_speed;
10617                 link_info->req_duplex = link_info->duplex_setting;
10618         }
10619         if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
10620                 link_info->req_flow_ctrl =
10621                         link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
10622         else
10623                 link_info->req_flow_ctrl = link_info->force_pause_setting;
10624         return rc;
10625 }
10626
10627 static int bnxt_get_max_irq(struct pci_dev *pdev)
10628 {
10629         u16 ctrl;
10630
10631         if (!pdev->msix_cap)
10632                 return 1;
10633
10634         pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
10635         return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
10636 }
10637
10638 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
10639                                 int *max_cp)
10640 {
10641         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
10642         int max_ring_grps = 0, max_irq;
10643
10644         *max_tx = hw_resc->max_tx_rings;
10645         *max_rx = hw_resc->max_rx_rings;
10646         *max_cp = bnxt_get_max_func_cp_rings_for_en(bp);
10647         max_irq = min_t(int, bnxt_get_max_func_irqs(bp) -
10648                         bnxt_get_ulp_msix_num(bp),
10649                         hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp));
10650         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
10651                 *max_cp = min_t(int, *max_cp, max_irq);
10652         max_ring_grps = hw_resc->max_hw_ring_grps;
10653         if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
10654                 *max_cp -= 1;
10655                 *max_rx -= 2;
10656         }
10657         if (bp->flags & BNXT_FLAG_AGG_RINGS)
10658                 *max_rx >>= 1;
10659         if (bp->flags & BNXT_FLAG_CHIP_P5) {
10660                 bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false);
10661                 /* On P5 chips, max_cp output param should be available NQs */
10662                 *max_cp = max_irq;
10663         }
10664         *max_rx = min_t(int, *max_rx, max_ring_grps);
10665 }
10666
10667 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
10668 {
10669         int rx, tx, cp;
10670
10671         _bnxt_get_max_rings(bp, &rx, &tx, &cp);
10672         *max_rx = rx;
10673         *max_tx = tx;
10674         if (!rx || !tx || !cp)
10675                 return -ENOMEM;
10676
10677         return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
10678 }
10679
10680 static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
10681                                bool shared)
10682 {
10683         int rc;
10684
10685         rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
10686         if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
10687                 /* Not enough rings, try disabling agg rings. */
10688                 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
10689                 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
10690                 if (rc) {
10691                         /* set BNXT_FLAG_AGG_RINGS back for consistency */
10692                         bp->flags |= BNXT_FLAG_AGG_RINGS;
10693                         return rc;
10694                 }
10695                 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
10696                 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
10697                 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
10698                 bnxt_set_ring_params(bp);
10699         }
10700
10701         if (bp->flags & BNXT_FLAG_ROCE_CAP) {
10702                 int max_cp, max_stat, max_irq;
10703
10704                 /* Reserve minimum resources for RoCE */
10705                 max_cp = bnxt_get_max_func_cp_rings(bp);
10706                 max_stat = bnxt_get_max_func_stat_ctxs(bp);
10707                 max_irq = bnxt_get_max_func_irqs(bp);
10708                 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
10709                     max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
10710                     max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
10711                         return 0;
10712
10713                 max_cp -= BNXT_MIN_ROCE_CP_RINGS;
10714                 max_irq -= BNXT_MIN_ROCE_CP_RINGS;
10715                 max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
10716                 max_cp = min_t(int, max_cp, max_irq);
10717                 max_cp = min_t(int, max_cp, max_stat);
10718                 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
10719                 if (rc)
10720                         rc = 0;
10721         }
10722         return rc;
10723 }
10724
10725 /* In initial default shared ring setting, each shared ring must have a
10726  * RX/TX ring pair.
10727  */
10728 static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
10729 {
10730         bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
10731         bp->rx_nr_rings = bp->cp_nr_rings;
10732         bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
10733         bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
10734 }
10735
10736 static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
10737 {
10738         int dflt_rings, max_rx_rings, max_tx_rings, rc;
10739
10740         if (!bnxt_can_reserve_rings(bp))
10741                 return 0;
10742
10743         if (sh)
10744                 bp->flags |= BNXT_FLAG_SHARED_RINGS;
10745         dflt_rings = is_kdump_kernel() ? 1 : netif_get_num_default_rss_queues();
10746         /* Reduce default rings on multi-port cards so that total default
10747          * rings do not exceed CPU count.
10748          */
10749         if (bp->port_count > 1) {
10750                 int max_rings =
10751                         max_t(int, num_online_cpus() / bp->port_count, 1);
10752
10753                 dflt_rings = min_t(int, dflt_rings, max_rings);
10754         }
10755         rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
10756         if (rc)
10757                 return rc;
10758         bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
10759         bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
10760         if (sh)
10761                 bnxt_trim_dflt_sh_rings(bp);
10762         else
10763                 bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
10764         bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
10765
10766         rc = __bnxt_reserve_rings(bp);
10767         if (rc)
10768                 netdev_warn(bp->dev, "Unable to reserve tx rings\n");
10769         bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
10770         if (sh)
10771                 bnxt_trim_dflt_sh_rings(bp);
10772
10773         /* Rings may have been trimmed, re-reserve the trimmed rings. */
10774         if (bnxt_need_reserve_rings(bp)) {
10775                 rc = __bnxt_reserve_rings(bp);
10776                 if (rc)
10777                         netdev_warn(bp->dev, "2nd rings reservation failed.\n");
10778                 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
10779         }
10780         if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
10781                 bp->rx_nr_rings++;
10782                 bp->cp_nr_rings++;
10783         }
10784         return rc;
10785 }
10786
10787 static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
10788 {
10789         int rc;
10790
10791         if (bp->tx_nr_rings)
10792                 return 0;
10793
10794         bnxt_ulp_irq_stop(bp);
10795         bnxt_clear_int_mode(bp);
10796         rc = bnxt_set_dflt_rings(bp, true);
10797         if (rc) {
10798                 netdev_err(bp->dev, "Not enough rings available.\n");
10799                 goto init_dflt_ring_err;
10800         }
10801         rc = bnxt_init_int_mode(bp);
10802         if (rc)
10803                 goto init_dflt_ring_err;
10804
10805         bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
10806         if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) {
10807                 bp->flags |= BNXT_FLAG_RFS;
10808                 bp->dev->features |= NETIF_F_NTUPLE;
10809         }
10810 init_dflt_ring_err:
10811         bnxt_ulp_irq_restart(bp, rc);
10812         return rc;
10813 }
10814
10815 int bnxt_restore_pf_fw_resources(struct bnxt *bp)
10816 {
10817         int rc;
10818
10819         ASSERT_RTNL();
10820         bnxt_hwrm_func_qcaps(bp);
10821
10822         if (netif_running(bp->dev))
10823                 __bnxt_close_nic(bp, true, false);
10824
10825         bnxt_ulp_irq_stop(bp);
10826         bnxt_clear_int_mode(bp);
10827         rc = bnxt_init_int_mode(bp);
10828         bnxt_ulp_irq_restart(bp, rc);
10829
10830         if (netif_running(bp->dev)) {
10831                 if (rc)
10832                         dev_close(bp->dev);
10833                 else
10834                         rc = bnxt_open_nic(bp, true, false);
10835         }
10836
10837         return rc;
10838 }
10839
10840 static int bnxt_init_mac_addr(struct bnxt *bp)
10841 {
10842         int rc = 0;
10843
10844         if (BNXT_PF(bp)) {
10845                 memcpy(bp->dev->dev_addr, bp->pf.mac_addr, ETH_ALEN);
10846         } else {
10847 #ifdef CONFIG_BNXT_SRIOV
10848                 struct bnxt_vf_info *vf = &bp->vf;
10849                 bool strict_approval = true;
10850
10851                 if (is_valid_ether_addr(vf->mac_addr)) {
10852                         /* overwrite netdev dev_addr with admin VF MAC */
10853                         memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
10854                         /* Older PF driver or firmware may not approve this
10855                          * correctly.
10856                          */
10857                         strict_approval = false;
10858                 } else {
10859                         eth_hw_addr_random(bp->dev);
10860                 }
10861                 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval);
10862 #endif
10863         }
10864         return rc;
10865 }
10866
10867 static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
10868 {
10869         struct pci_dev *pdev = bp->pdev;
10870         int pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN);
10871         u32 dw;
10872
10873         if (!pos) {
10874                 netdev_info(bp->dev, "Unable do read adapter's DSN");
10875                 return -EOPNOTSUPP;
10876         }
10877
10878         /* DSN (two dw) is at an offset of 4 from the cap pos */
10879         pos += 4;
10880         pci_read_config_dword(pdev, pos, &dw);
10881         put_unaligned_le32(dw, &dsn[0]);
10882         pci_read_config_dword(pdev, pos + 4, &dw);
10883         put_unaligned_le32(dw, &dsn[4]);
10884         return 0;
10885 }
10886
10887 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
10888 {
10889         static int version_printed;
10890         struct net_device *dev;
10891         struct bnxt *bp;
10892         int rc, max_irqs;
10893
10894         if (pci_is_bridge(pdev))
10895                 return -ENODEV;
10896
10897         if (version_printed++ == 0)
10898                 pr_info("%s", version);
10899
10900         max_irqs = bnxt_get_max_irq(pdev);
10901         dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
10902         if (!dev)
10903                 return -ENOMEM;
10904
10905         bp = netdev_priv(dev);
10906         bnxt_set_max_func_irqs(bp, max_irqs);
10907
10908         if (bnxt_vf_pciid(ent->driver_data))
10909                 bp->flags |= BNXT_FLAG_VF;
10910
10911         if (pdev->msix_cap)
10912                 bp->flags |= BNXT_FLAG_MSIX_CAP;
10913
10914         rc = bnxt_init_board(pdev, dev);
10915         if (rc < 0)
10916                 goto init_err_free;
10917
10918         dev->netdev_ops = &bnxt_netdev_ops;
10919         dev->watchdog_timeo = BNXT_TX_TIMEOUT;
10920         dev->ethtool_ops = &bnxt_ethtool_ops;
10921         pci_set_drvdata(pdev, dev);
10922
10923         rc = bnxt_alloc_hwrm_resources(bp);
10924         if (rc)
10925                 goto init_err_pci_clean;
10926
10927         mutex_init(&bp->hwrm_cmd_lock);
10928
10929         rc = bnxt_fw_init_one_p1(bp);
10930         if (rc)
10931                 goto init_err_pci_clean;
10932
10933         if (BNXT_CHIP_P5(bp))
10934                 bp->flags |= BNXT_FLAG_CHIP_P5;
10935
10936         rc = bnxt_fw_init_one_p2(bp);
10937         if (rc)
10938                 goto init_err_pci_clean;
10939
10940         dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
10941                            NETIF_F_TSO | NETIF_F_TSO6 |
10942                            NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
10943                            NETIF_F_GSO_IPXIP4 |
10944                            NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
10945                            NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
10946                            NETIF_F_RXCSUM | NETIF_F_GRO;
10947
10948         if (BNXT_SUPPORTS_TPA(bp))
10949                 dev->hw_features |= NETIF_F_LRO;
10950
10951         dev->hw_enc_features =
10952                         NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
10953                         NETIF_F_TSO | NETIF_F_TSO6 |
10954                         NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
10955                         NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
10956                         NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
10957         dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
10958                                     NETIF_F_GSO_GRE_CSUM;
10959         dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
10960         dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
10961                             NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX;
10962         if (BNXT_SUPPORTS_TPA(bp))
10963                 dev->hw_features |= NETIF_F_GRO_HW;
10964         dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
10965         if (dev->features & NETIF_F_GRO_HW)
10966                 dev->features &= ~NETIF_F_LRO;
10967         dev->priv_flags |= IFF_UNICAST_FLT;
10968
10969 #ifdef CONFIG_BNXT_SRIOV
10970         init_waitqueue_head(&bp->sriov_cfg_wait);
10971         mutex_init(&bp->sriov_lock);
10972 #endif
10973         if (BNXT_SUPPORTS_TPA(bp)) {
10974                 bp->gro_func = bnxt_gro_func_5730x;
10975                 if (BNXT_CHIP_P4(bp))
10976                         bp->gro_func = bnxt_gro_func_5731x;
10977                 else if (BNXT_CHIP_P5(bp))
10978                         bp->gro_func = bnxt_gro_func_5750x;
10979         }
10980         if (!BNXT_CHIP_P4_PLUS(bp))
10981                 bp->flags |= BNXT_FLAG_DOUBLE_DB;
10982
10983         bp->ulp_probe = bnxt_ulp_probe;
10984
10985         rc = bnxt_init_mac_addr(bp);
10986         if (rc) {
10987                 dev_err(&pdev->dev, "Unable to initialize mac address.\n");
10988                 rc = -EADDRNOTAVAIL;
10989                 goto init_err_pci_clean;
10990         }
10991
10992         if (BNXT_PF(bp)) {
10993                 /* Read the adapter's DSN to use as the eswitch switch_id */
10994                 rc = bnxt_pcie_dsn_get(bp, bp->switch_id);
10995                 if (rc)
10996                         goto init_err_pci_clean;
10997         }
10998
10999         /* MTU range: 60 - FW defined max */
11000         dev->min_mtu = ETH_ZLEN;
11001         dev->max_mtu = bp->max_mtu;
11002
11003         rc = bnxt_probe_phy(bp);
11004         if (rc)
11005                 goto init_err_pci_clean;
11006
11007         bnxt_set_rx_skb_mode(bp, false);
11008         bnxt_set_tpa_flags(bp);
11009         bnxt_set_ring_params(bp);
11010         rc = bnxt_set_dflt_rings(bp, true);
11011         if (rc) {
11012                 netdev_err(bp->dev, "Not enough rings available.\n");
11013                 rc = -ENOMEM;
11014                 goto init_err_pci_clean;
11015         }
11016
11017         /* Default RSS hash cfg. */
11018         bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
11019                            VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
11020                            VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
11021                            VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
11022         if (BNXT_CHIP_P4(bp) && bp->hwrm_spec_code >= 0x10501) {
11023                 bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
11024                 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
11025                                     VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
11026         }
11027
11028         if (bnxt_rfs_supported(bp)) {
11029                 dev->hw_features |= NETIF_F_NTUPLE;
11030                 if (bnxt_rfs_capable(bp)) {
11031                         bp->flags |= BNXT_FLAG_RFS;
11032                         dev->features |= NETIF_F_NTUPLE;
11033                 }
11034         }
11035
11036         if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX)
11037                 bp->flags |= BNXT_FLAG_STRIP_VLAN;
11038
11039         rc = bnxt_init_int_mode(bp);
11040         if (rc)
11041                 goto init_err_pci_clean;
11042
11043         /* No TC has been set yet and rings may have been trimmed due to
11044          * limited MSIX, so we re-initialize the TX rings per TC.
11045          */
11046         bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
11047
11048         bnxt_get_wol_settings(bp);
11049         if (bp->flags & BNXT_FLAG_WOL_CAP)
11050                 device_set_wakeup_enable(&pdev->dev, bp->wol);
11051         else
11052                 device_set_wakeup_capable(&pdev->dev, false);
11053
11054         bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
11055
11056         bnxt_hwrm_coal_params_qcaps(bp);
11057
11058         if (BNXT_PF(bp)) {
11059                 if (!bnxt_pf_wq) {
11060                         bnxt_pf_wq =
11061                                 create_singlethread_workqueue("bnxt_pf_wq");
11062                         if (!bnxt_pf_wq) {
11063                                 dev_err(&pdev->dev, "Unable to create workqueue.\n");
11064                                 goto init_err_pci_clean;
11065                         }
11066                 }
11067                 bnxt_init_tc(bp);
11068         }
11069
11070         rc = register_netdev(dev);
11071         if (rc)
11072                 goto init_err_cleanup_tc;
11073
11074         if (BNXT_PF(bp))
11075                 bnxt_dl_register(bp);
11076
11077         netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
11078                     board_info[ent->driver_data].name,
11079                     (long)pci_resource_start(pdev, 0), dev->dev_addr);
11080         pcie_print_link_status(pdev);
11081
11082         return 0;
11083
11084 init_err_cleanup_tc:
11085         bnxt_shutdown_tc(bp);
11086         bnxt_clear_int_mode(bp);
11087
11088 init_err_pci_clean:
11089         bnxt_free_hwrm_short_cmd_req(bp);
11090         bnxt_free_hwrm_resources(bp);
11091         bnxt_free_ctx_mem(bp);
11092         kfree(bp->ctx);
11093         bp->ctx = NULL;
11094         bnxt_cleanup_pci(bp);
11095
11096 init_err_free:
11097         free_netdev(dev);
11098         return rc;
11099 }
11100
11101 static void bnxt_shutdown(struct pci_dev *pdev)
11102 {
11103         struct net_device *dev = pci_get_drvdata(pdev);
11104         struct bnxt *bp;
11105
11106         if (!dev)
11107                 return;
11108
11109         rtnl_lock();
11110         bp = netdev_priv(dev);
11111         if (!bp)
11112                 goto shutdown_exit;
11113
11114         if (netif_running(dev))
11115                 dev_close(dev);
11116
11117         bnxt_ulp_shutdown(bp);
11118
11119         if (system_state == SYSTEM_POWER_OFF) {
11120                 bnxt_clear_int_mode(bp);
11121                 pci_disable_device(pdev);
11122                 pci_wake_from_d3(pdev, bp->wol);
11123                 pci_set_power_state(pdev, PCI_D3hot);
11124         }
11125
11126 shutdown_exit:
11127         rtnl_unlock();
11128 }
11129
11130 #ifdef CONFIG_PM_SLEEP
11131 static int bnxt_suspend(struct device *device)
11132 {
11133         struct net_device *dev = dev_get_drvdata(device);
11134         struct bnxt *bp = netdev_priv(dev);
11135         int rc = 0;
11136
11137         rtnl_lock();
11138         if (netif_running(dev)) {
11139                 netif_device_detach(dev);
11140                 rc = bnxt_close(dev);
11141         }
11142         bnxt_hwrm_func_drv_unrgtr(bp);
11143         rtnl_unlock();
11144         return rc;
11145 }
11146
11147 static int bnxt_resume(struct device *device)
11148 {
11149         struct net_device *dev = dev_get_drvdata(device);
11150         struct bnxt *bp = netdev_priv(dev);
11151         int rc = 0;
11152
11153         rtnl_lock();
11154         if (bnxt_hwrm_ver_get(bp) || bnxt_hwrm_func_drv_rgtr(bp)) {
11155                 rc = -ENODEV;
11156                 goto resume_exit;
11157         }
11158         rc = bnxt_hwrm_func_reset(bp);
11159         if (rc) {
11160                 rc = -EBUSY;
11161                 goto resume_exit;
11162         }
11163         bnxt_get_wol_settings(bp);
11164         if (netif_running(dev)) {
11165                 rc = bnxt_open(dev);
11166                 if (!rc)
11167                         netif_device_attach(dev);
11168         }
11169
11170 resume_exit:
11171         rtnl_unlock();
11172         return rc;
11173 }
11174
11175 static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume);
11176 #define BNXT_PM_OPS (&bnxt_pm_ops)
11177
11178 #else
11179
11180 #define BNXT_PM_OPS NULL
11181
11182 #endif /* CONFIG_PM_SLEEP */
11183
11184 /**
11185  * bnxt_io_error_detected - called when PCI error is detected
11186  * @pdev: Pointer to PCI device
11187  * @state: The current pci connection state
11188  *
11189  * This function is called after a PCI bus error affecting
11190  * this device has been detected.
11191  */
11192 static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
11193                                                pci_channel_state_t state)
11194 {
11195         struct net_device *netdev = pci_get_drvdata(pdev);
11196         struct bnxt *bp = netdev_priv(netdev);
11197
11198         netdev_info(netdev, "PCI I/O error detected\n");
11199
11200         rtnl_lock();
11201         netif_device_detach(netdev);
11202
11203         bnxt_ulp_stop(bp);
11204
11205         if (state == pci_channel_io_perm_failure) {
11206                 rtnl_unlock();
11207                 return PCI_ERS_RESULT_DISCONNECT;
11208         }
11209
11210         if (netif_running(netdev))
11211                 bnxt_close(netdev);
11212
11213         pci_disable_device(pdev);
11214         rtnl_unlock();
11215
11216         /* Request a slot slot reset. */
11217         return PCI_ERS_RESULT_NEED_RESET;
11218 }
11219
11220 /**
11221  * bnxt_io_slot_reset - called after the pci bus has been reset.
11222  * @pdev: Pointer to PCI device
11223  *
11224  * Restart the card from scratch, as if from a cold-boot.
11225  * At this point, the card has exprienced a hard reset,
11226  * followed by fixups by BIOS, and has its config space
11227  * set up identically to what it was at cold boot.
11228  */
11229 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
11230 {
11231         struct net_device *netdev = pci_get_drvdata(pdev);
11232         struct bnxt *bp = netdev_priv(netdev);
11233         int err = 0;
11234         pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
11235
11236         netdev_info(bp->dev, "PCI Slot Reset\n");
11237
11238         rtnl_lock();
11239
11240         if (pci_enable_device(pdev)) {
11241                 dev_err(&pdev->dev,
11242                         "Cannot re-enable PCI device after reset.\n");
11243         } else {
11244                 pci_set_master(pdev);
11245
11246                 err = bnxt_hwrm_func_reset(bp);
11247                 if (!err && netif_running(netdev))
11248                         err = bnxt_open(netdev);
11249
11250                 if (!err) {
11251                         result = PCI_ERS_RESULT_RECOVERED;
11252                         bnxt_ulp_start(bp);
11253                 }
11254         }
11255
11256         if (result != PCI_ERS_RESULT_RECOVERED && netif_running(netdev))
11257                 dev_close(netdev);
11258
11259         rtnl_unlock();
11260
11261         return PCI_ERS_RESULT_RECOVERED;
11262 }
11263
11264 /**
11265  * bnxt_io_resume - called when traffic can start flowing again.
11266  * @pdev: Pointer to PCI device
11267  *
11268  * This callback is called when the error recovery driver tells
11269  * us that its OK to resume normal operation.
11270  */
11271 static void bnxt_io_resume(struct pci_dev *pdev)
11272 {
11273         struct net_device *netdev = pci_get_drvdata(pdev);
11274
11275         rtnl_lock();
11276
11277         netif_device_attach(netdev);
11278
11279         rtnl_unlock();
11280 }
11281
11282 static const struct pci_error_handlers bnxt_err_handler = {
11283         .error_detected = bnxt_io_error_detected,
11284         .slot_reset     = bnxt_io_slot_reset,
11285         .resume         = bnxt_io_resume
11286 };
11287
11288 static struct pci_driver bnxt_pci_driver = {
11289         .name           = DRV_MODULE_NAME,
11290         .id_table       = bnxt_pci_tbl,
11291         .probe          = bnxt_init_one,
11292         .remove         = bnxt_remove_one,
11293         .shutdown       = bnxt_shutdown,
11294         .driver.pm      = BNXT_PM_OPS,
11295         .err_handler    = &bnxt_err_handler,
11296 #if defined(CONFIG_BNXT_SRIOV)
11297         .sriov_configure = bnxt_sriov_configure,
11298 #endif
11299 };
11300
11301 static int __init bnxt_init(void)
11302 {
11303         bnxt_debug_init();
11304         return pci_register_driver(&bnxt_pci_driver);
11305 }
11306
11307 static void __exit bnxt_exit(void)
11308 {
11309         pci_unregister_driver(&bnxt_pci_driver);
11310         if (bnxt_pf_wq)
11311                 destroy_workqueue(bnxt_pf_wq);
11312         bnxt_debug_exit();
11313 }
11314
11315 module_init(bnxt_init);
11316 module_exit(bnxt_exit);